From a5563db391b046ebc5638b1a9cdc2911a934c2a9 Mon Sep 17 00:00:00 2001 From: "Rust.Benson" Date: Wed, 17 Jul 2019 14:07:13 -0400 Subject: [PATCH 01/14] Add LICENSE.md --- LICENSE.md | 157 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 LICENSE.md diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..0927556b5 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,157 @@ +### GNU LESSER GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This version of the GNU Lesser General Public License incorporates the +terms and conditions of version 3 of the GNU General Public License, +supplemented by the additional permissions listed below. + +#### 0. Additional Definitions. + +As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the +GNU General Public License. + +"The Library" refers to a covered work governed by this License, other +than an Application or a Combined Work as defined below. + +An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + +A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + +The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + +The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + +#### 1. Exception to Section 3 of the GNU GPL. + +You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + +#### 2. Conveying Modified Versions. + +If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + +- a) under this License, provided that you make a good faith effort + to ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or +- b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + +#### 3. Object Code Incorporating Material from Library Header Files. + +The object code form of an Application may incorporate material from a +header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + +- a) Give prominent notice with each copy of the object code that + the Library is used in it and that the Library and its use are + covered by this License. +- b) Accompany the object code with a copy of the GNU GPL and this + license document. + +#### 4. Combined Works. + +You may convey a Combined Work under terms of your choice that, taken +together, effectively do not restrict modification of the portions of +the Library contained in the Combined Work and reverse engineering for +debugging such modifications, if you also do each of the following: + +- a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. +- b) Accompany the Combined Work with a copy of the GNU GPL and this + license document. +- c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. +- d) Do one of the following: + - 0) Convey the Minimal Corresponding Source under the terms of + this License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + - 1) Use a suitable shared library mechanism for linking with + the Library. A suitable mechanism is one that (a) uses at run + time a copy of the Library already present on the user's + computer system, and (b) will operate properly with a modified + version of the Library that is interface-compatible with the + Linked Version. +- e) Provide Installation Information, but only if you would + otherwise be required to provide such information under section 6 + of the GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the Application + with a modified version of the Linked Version. (If you use option + 4d0, the Installation Information must accompany the Minimal + Corresponding Source and Corresponding Application Code. If you + use option 4d1, you must provide the Installation Information in + the manner specified by section 6 of the GNU GPL for conveying + Corresponding Source.) + +#### 5. Combined Libraries. + +You may place library facilities that are a work based on the Library +side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + +- a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities, conveyed under the terms of this License. +- b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + +#### 6. Revised Versions of the GNU Lesser General Public License. + +The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +as you received it specifies that a certain numbered version of the +GNU Lesser General Public License "or any later version" applies to +it, you have the option of following the terms and conditions either +of that published version or of any later version published by the +Free Software Foundation. If the Library as you received it does not +specify a version number of the GNU Lesser General Public License, you +may choose any version of the GNU Lesser General Public License ever +published by the Free Software Foundation. + +If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. From 75edcb4702672df2aac4f15e76552ccf0ccc2b66 Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Wed, 17 Jul 2019 14:55:38 -0400 Subject: [PATCH 02/14] Renamed driver directories and removed the null version of the nh extensions for the public release --- driver/{coupled => GFDL}/atmosphere.F90 | 0 driver/{fvGFS => SHiELD}/atmosphere.F90 | 0 driver/{fvGFS => SHiELD}/constants.F90 | 0 .../{fvGFS => SHiELD}/lin_cloud_microphys.F90 | 0 model_nh_null/nh_core.F90 | 155 ------------------ 5 files changed, 155 deletions(-) rename driver/{coupled => GFDL}/atmosphere.F90 (100%) rename driver/{fvGFS => SHiELD}/atmosphere.F90 (100%) rename driver/{fvGFS => SHiELD}/constants.F90 (100%) rename driver/{fvGFS => SHiELD}/lin_cloud_microphys.F90 (100%) delete mode 100644 model_nh_null/nh_core.F90 diff --git a/driver/coupled/atmosphere.F90 b/driver/GFDL/atmosphere.F90 similarity index 100% rename from driver/coupled/atmosphere.F90 rename to driver/GFDL/atmosphere.F90 diff --git a/driver/fvGFS/atmosphere.F90 b/driver/SHiELD/atmosphere.F90 similarity index 100% rename from driver/fvGFS/atmosphere.F90 rename to driver/SHiELD/atmosphere.F90 diff --git a/driver/fvGFS/constants.F90 b/driver/SHiELD/constants.F90 similarity index 100% rename from driver/fvGFS/constants.F90 rename to driver/SHiELD/constants.F90 diff --git a/driver/fvGFS/lin_cloud_microphys.F90 b/driver/SHiELD/lin_cloud_microphys.F90 similarity index 100% rename from driver/fvGFS/lin_cloud_microphys.F90 rename to driver/SHiELD/lin_cloud_microphys.F90 diff --git a/model_nh_null/nh_core.F90 b/model_nh_null/nh_core.F90 deleted file mode 100644 index 01728c309..000000000 --- a/model_nh_null/nh_core.F90 +++ /dev/null @@ -1,155 +0,0 @@ -!*********************************************************************** -!* GNU General Public License * -!* This file is a part of fvGFS. * -!* * -!* fvGFS is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* fvGFS is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * -!*********************************************************************** - -module nh_core_mod - - use fms_mod, only: error_mesg, FATAL - use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type - - - implicit none - private - - public Riem_Solver3, Riem_Solver_c, update_dz_c, update_dz_d, nest_halo_nh - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - -contains - - subroutine update_dz_c(is, ie, js, je, km, ng, dt, dp0, zs, area, ut, vt, gz, ws, & - npx, npy, sw_corner, se_corner, ne_corner, nw_corner, bd, grid_type) -! !INPUT PARAMETERS: - type(fv_grid_bounds_type), intent(IN) :: bd - integer, intent(in):: is, ie, js, je, ng, km, npx, npy, grid_type - logical, intent(IN):: sw_corner, se_corner, ne_corner, nw_corner - real, intent(in):: dt - real, intent(in):: dp0(km) - real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: ut, vt - real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng):: area - real, intent(inout):: gz(is-ng:ie+ng,js-ng:je+ng,km+1) - real, intent(in ):: zs(is-ng:ie+ng, js-ng:je+ng) - real, intent( out):: ws(is-ng:ie+ng, js-ng:je+ng) - - call error_mesg('update_dz_c','The null version of update_dz_c should not be called.',FATAL) - - end subroutine update_dz_c - - - - subroutine update_dz_d(ndif, damp, hord, is, ie, js, je, km, ng, npx, npy, area, rarea, & - dp0, zs, zh, crx, cry, xfx, yfx, delz, ws, rdt, gridstruct, bd) - type(fv_grid_bounds_type), intent(IN) :: bd - integer, intent(in):: is, ie, js, je, ng, km, npx, npy - integer, intent(in):: hord - real, intent(in) :: rdt - real, intent(in) :: dp0(km) - real, intent(in) :: area(is-ng:ie+ng,js-ng:je+ng) - real, intent(in) :: rarea(is-ng:ie+ng,js-ng:je+ng) - real, intent(inout):: damp(km+1) - integer, intent(inout):: ndif(km+1) - real, intent(in ) :: zs(is-ng:ie+ng,js-ng:je+ng) - real, intent(inout) :: zh(is-ng:ie+ng,js-ng:je+ng,km+1) - real, intent( out) ::delz(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(inout), dimension(is:ie+1,js-ng:je+ng,km):: crx, xfx - real, intent(inout), dimension(is-ng:ie+ng,js:je+1,km):: cry, yfx - real, intent(inout) :: ws(is:ie,js:je) - type(fv_grid_type), intent(IN), target :: gridstruct - - call error_mesg('update_dz_d','The null version of update_dz_d should not be called.',FATAL) - - end subroutine update_dz_d - - - - - subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & - isd, ied, jsd, jed, akap, cappa, cp, & - ptop, zs, q_con, w, delz, pt, & - delp, zh, pe, ppe, pk3, pk, peln, & - ws, scale_m, p_fac, a_imp, & - use_logp, last_call, fp_out) -!-------------------------------------------- -! !OUTPUT PARAMETERS -! Ouput: gz: grav*height at edges -! pe: full hydrostatic pressure -! ppe: non-hydrostatic pressure perturbation -!-------------------------------------------- - integer, intent(in):: ms, is, ie, js, je, km, ng - integer, intent(in):: isd, ied, jsd, jed - real, intent(in):: dt ! the BIG horizontal Lagrangian time step - real, intent(in):: akap, cp, ptop, p_fac, a_imp, scale_m - real, intent(in):: zs(isd:ied,jsd:jed) - logical, intent(in):: last_call, use_logp, fp_out - real, intent(in):: ws(is:ie,js:je) - real, intent(in), dimension(isd:,jsd:,1:):: q_con, cappa - real, intent(in), dimension(isd:ied,jsd:jed,km):: delp, pt - real, intent(inout), dimension(isd:ied,jsd:jed,km+1):: zh - real, intent(inout), dimension(isd:ied,jsd:jed,km):: w - real, intent(inout):: pe(is-1:ie+1,km+1,js-1:je+1) - real, intent(out):: peln(is:ie,km+1,js:je) ! ln(pe) - real, intent(out), dimension(isd:ied,jsd:jed,km+1):: ppe - real, intent(out):: delz(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(out):: pk(is:ie,js:je,km+1) - real, intent(out):: pk3(isd:ied,jsd:jed,km+1) - - call error_mesg('Riem_Solver3','The null version of Riem_Solver3 should not be called.',FATAL) - - end subroutine Riem_Solver3 - - - - subroutine Riem_Solver_c(ms, dt, is, ie, js, je, km, ng, & - akap, cappa, cp, ptop, hs, w3, pt, q_con, & - delp, gz, pef, ws, p_fac, a_imp, scale_m) - integer, intent(in):: is, ie, js, je, ng, km - integer, intent(in):: ms - real, intent(in):: dt, akap, cp, ptop, p_fac, a_imp, scale_m - real, intent(in):: ws(is-ng:ie+ng,js-ng:je+ng) - real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: pt, delp - real, intent(in), dimension(is-ng:,js-ng:,1:):: q_con, cappa - real, intent(in):: hs(is-ng:ie+ng,js-ng:je+ng) - real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: w3 -! OUTPUT PARAMETERS - real, intent(inout), dimension(is-ng:ie+ng,js-ng:je+ng,km+1):: gz - real, intent( out), dimension(is-ng:ie+ng,js-ng:je+ng,km+1):: pef - - call error_mesg('Riem_Solver_c','The null version of Riem_Solver_c should not be called.',FATAL) - - end subroutine Riem_Solver_c - - - - subroutine nest_halo_nh(ptop, grav, kappa, cp, delp, delz, pt, phis, pkc, gz, pk3, & - npx, npy, npz, nested, pkc_pertn, computepk3, fullhalo, bd) - integer, intent(in) :: npx, npy, npz - logical, intent(in) :: pkc_pertn, computepk3, fullhalo, nested - real, intent(in) :: ptop, kappa, cp, grav - type(fv_grid_bounds_type), intent(IN) :: bd - real, intent(in) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed) - real, intent(in), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: pt, delp, delz - real, intent(inout), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1):: gz, pkc, pk3 - - call error_mesg('nest_halo_nh','The null version of nest_halo_nh should not be called.',FATAL) - - end subroutine nest_halo_nh - -end module nh_core_mod From 1cc80fc880c4d176f0436db39b0650ebe0f5487b Mon Sep 17 00:00:00 2001 From: Seth Underwood Date: Thu, 18 Jul 2019 09:11:14 -0400 Subject: [PATCH 03/14] Update license header for FV3 --- driver/SHiELD/atmosphere.F90 | 6 +++--- driver/SHiELD/constants.F90 | 6 +++--- model/a2b_edge.F90 | 6 +++--- model/boundary.F90 | 6 +++--- model/dyn_core.F90 | 6 +++--- model/fv_arrays.F90 | 6 +++--- model/fv_control.F90 | 6 +++--- model/fv_current_grid.F90 | 6 +++--- model/fv_dynamics.F90 | 6 +++--- model/fv_fill.F90 | 6 +++--- model/fv_grid_utils.F90 | 6 +++--- model/fv_mapz.F90 | 6 +++--- model/fv_nesting.F90 | 6 +++--- model/fv_sg.F90 | 6 +++--- model/fv_tracer2d.F90 | 6 +++--- model/fv_update_phys.F90 | 6 +++--- model/sw_core.F90 | 6 +++--- model/tp_core.F90 | 6 +++--- model_nh/nh_core.F90 | 6 +++--- model_nh/nh_utils.F90 | 6 +++--- tools/external_ic.F90 | 6 +++--- tools/external_sst.F90 | 6 +++--- tools/fv_diagnostics.F90 | 6 +++--- tools/fv_eta.F90 | 6 +++--- tools/fv_grid_tools.F90 | 6 +++--- tools/fv_io.F90 | 6 +++--- tools/fv_mp_mod.F90 | 6 +++--- tools/fv_nggps_diag.F90 | 6 +++--- tools/fv_nudge.F90 | 6 +++--- tools/fv_restart.F90 | 6 +++--- tools/fv_surf_map.F90 | 6 +++--- tools/fv_timing.F90 | 6 +++--- tools/init_hydro.F90 | 6 +++--- tools/sim_nc_mod.F90 | 6 +++--- tools/sorted_index.F90 | 6 +++--- tools/test_cases.F90 | 6 +++--- 36 files changed, 108 insertions(+), 108 deletions(-) diff --git a/driver/SHiELD/atmosphere.F90 b/driver/SHiELD/atmosphere.F90 index 0ec5ed000..2fbb40865 100644 --- a/driver/SHiELD/atmosphere.F90 +++ b/driver/SHiELD/atmosphere.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/driver/SHiELD/constants.F90 b/driver/SHiELD/constants.F90 index 6e1c842ee..3cc35b29d 100644 --- a/driver/SHiELD/constants.F90 +++ b/driver/SHiELD/constants.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/a2b_edge.F90 b/model/a2b_edge.F90 index 08c64c85a..da05fd466 100644 --- a/model/a2b_edge.F90 +++ b/model/a2b_edge.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/boundary.F90 b/model/boundary.F90 index b42126cf3..97cb06928 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/dyn_core.F90 b/model/dyn_core.F90 index bc699e3cd..ffc703b7f 100644 --- a/model/dyn_core.F90 +++ b/model/dyn_core.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index aa1ee2d15..e9d058243 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_control.F90 b/model/fv_control.F90 index 6ef916ff5..0442572eb 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_current_grid.F90 b/model/fv_current_grid.F90 index 62c00dc87..1a3f2fef9 100644 --- a/model/fv_current_grid.F90 +++ b/model/fv_current_grid.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_dynamics.F90 b/model/fv_dynamics.F90 index f35569274..18717bc1b 100644 --- a/model/fv_dynamics.F90 +++ b/model/fv_dynamics.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_fill.F90 b/model/fv_fill.F90 index cc974b7fd..e907641a2 100644 --- a/model/fv_fill.F90 +++ b/model/fv_fill.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index a7074f7bb..c6d627909 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_mapz.F90 b/model/fv_mapz.F90 index 276a41504..06fdc5bf2 100644 --- a/model/fv_mapz.F90 +++ b/model/fv_mapz.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index eb6784a4f..5d8e4560d 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_sg.F90 b/model/fv_sg.F90 index 067d39fd2..93ff34dd7 100644 --- a/model/fv_sg.F90 +++ b/model/fv_sg.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_tracer2d.F90 b/model/fv_tracer2d.F90 index 2f3bd3585..3b1c4b5bf 100644 --- a/model/fv_tracer2d.F90 +++ b/model/fv_tracer2d.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/fv_update_phys.F90 b/model/fv_update_phys.F90 index 1ede2a56e..132a7e2de 100644 --- a/model/fv_update_phys.F90 +++ b/model/fv_update_phys.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/sw_core.F90 b/model/sw_core.F90 index 178e14965..25b06d493 100644 --- a/model/sw_core.F90 +++ b/model/sw_core.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model/tp_core.F90 b/model/tp_core.F90 index 313aeaccd..ac55e42a8 100644 --- a/model/tp_core.F90 +++ b/model/tp_core.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model_nh/nh_core.F90 b/model_nh/nh_core.F90 index c45be259b..569704266 100644 --- a/model_nh/nh_core.F90 +++ b/model_nh/nh_core.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/model_nh/nh_utils.F90 b/model_nh/nh_utils.F90 index 92d9c1329..d38f58fb5 100644 --- a/model_nh/nh_utils.F90 +++ b/model_nh/nh_utils.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index 3fbdeb13b..999e10add 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/external_sst.F90 b/tools/external_sst.F90 index 78cbba6f9..6ff80ed79 100644 --- a/tools/external_sst.F90 +++ b/tools/external_sst.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index 7c942ef00..355bae559 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_eta.F90 b/tools/fv_eta.F90 index 8e43a2ac6..7c5776b75 100644 --- a/tools/fv_eta.F90 +++ b/tools/fv_eta.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index 6a96850ee..b73ead009 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_io.F90 b/tools/fv_io.F90 index a4a2a3290..fe28bf864 100644 --- a/tools/fv_io.F90 +++ b/tools/fv_io.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index bc338c3d8..892a2e2a8 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_nggps_diag.F90 b/tools/fv_nggps_diag.F90 index 2efb80048..23c4d5ee6 100644 --- a/tools/fv_nggps_diag.F90 +++ b/tools/fv_nggps_diag.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_nudge.F90 b/tools/fv_nudge.F90 index 01cbbf11e..826a5fda5 100644 --- a/tools/fv_nudge.F90 +++ b/tools/fv_nudge.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_restart.F90 b/tools/fv_restart.F90 index d9b0cbe0a..722804cc3 100644 --- a/tools/fv_restart.F90 +++ b/tools/fv_restart.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index ba1f900e9..d6e27c306 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/fv_timing.F90 b/tools/fv_timing.F90 index a2c58c76e..1c6cb5403 100644 --- a/tools/fv_timing.F90 +++ b/tools/fv_timing.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/init_hydro.F90 b/tools/init_hydro.F90 index 66c80fdb9..8cfb99967 100644 --- a/tools/init_hydro.F90 +++ b/tools/init_hydro.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/sim_nc_mod.F90 b/tools/sim_nc_mod.F90 index 5bbd36b09..17696df88 100644 --- a/tools/sim_nc_mod.F90 +++ b/tools/sim_nc_mod.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/sorted_index.F90 b/tools/sorted_index.F90 index 438485a72..c14975a18 100644 --- a/tools/sorted_index.F90 +++ b/tools/sorted_index.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * diff --git a/tools/test_cases.F90 b/tools/test_cases.F90 index e1d4cac68..5b4cc4831 100644 --- a/tools/test_cases.F90 +++ b/tools/test_cases.F90 @@ -1,13 +1,13 @@ !*********************************************************************** !* GNU General Public License * -!* This file is a part of fvGFS. * +!* This file is a part of FV3. * !* * -!* fvGFS is free software; you can redistribute it and/or modify it * +!* FV3 is free software; you can redistribute it and/or modify it * !* and are expected to follow the terms of the GNU General Public * !* License as published by the Free Software Foundation; either * !* version 2 of the License, or (at your option) any later version. * !* * -!* fvGFS is distributed in the hope that it will be useful, but * +!* FV3 is distributed in the hope that it will be useful, but * !* WITHOUT ANY WARRANTY; without even the implied warranty of * !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * !* General Public License for more details. * From 131a732658b8816b153a60b4f4430d3c4c8477a7 Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Thu, 18 Jul 2019 11:27:52 -0400 Subject: [PATCH 04/14] Added a README.md file --- README.md | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..6db43cb95 --- /dev/null +++ b/README.md @@ -0,0 +1,46 @@ +# GFDL_atmos_cubed_sphere + +This is for the FV3 dynamical core and the GFDL Microphysics. + +The source in this branch reflects the codebase delivered to NCEP/EMC for use in GFS. Updates will be forthcoming. + +# Where to find information + +See the [FV3 documentation and references](https://www.gfdl.noaa.gov/fv3/fv3-documentation-and-references/) for more information. + +# Proper usage attribution + +Cite either Putman and Lin (2007) or Harris and Lin (2013) when describing a model using the FV3 dynamical core. +Cite Chen et al (2013) and Zhou et al (2019) if using the GFDL Microphysics. + +# What files are what + +The top level directory structure groups source code and input files as follow: + +| File/directory | Purpose | +| -------------- | ------- | +| ```LICENSE.md``` | a copy of the Gnu lesser general public license, version 3. | +| ```README.md``` | this file with basic pointers to more information | +| ```model/``` | contains the source code for core of the FV3 dyanmical core | +| ```model_nh/``` | contains the source code for non-hydrostatic extensions | +| ```driver/``` | contains drivers used by different models/modeling systems | +| ```tools/``` | contains source code of tools used within the core | +| ```GFDL_tools/``` | contains source code of tools specific to GFDL models | + +# Disclaimer + +The United States Department of Commerce (DOC) GitHub project code is provided +on an "as is" basis and the user assumes responsibility for its use. DOC has +relinquished control of the information and no longer has responsibility to +protect the integrity, confidentiality, or availability of the information. Any +claims against the Department of Commerce stemming from the use of its GitHub +project will be governed by all applicable Federal law. Any reference to +specific commercial products, processes, or services by service mark, +trademark, manufacturer, or otherwise, does not constitute or imply their +endorsement, recommendation or favoring by the Department of Commerce. The +Department of Commerce seal and logo, or the seal and logo of a DOC bureau, +shall not be used in any manner to imply endorsement of any commercial product +or activity by DOC or the United States Government. + +This project code is made available through GitHub but is managed by NOAA-GFDL +at https://gitlab.gfdl.noaa.gov. From 77fd5017c20a3c9bdce04359aa84228f8394a10f Mon Sep 17 00:00:00 2001 From: Seth Underwood Date: Thu, 18 Jul 2019 14:19:41 -0400 Subject: [PATCH 05/14] Replace GPL header with LGPL header --- driver/SHiELD/atmosphere.F90 | 33 ++++++++++++++++----------------- driver/SHiELD/constants.F90 | 33 ++++++++++++++++----------------- model/a2b_edge.F90 | 33 ++++++++++++++++----------------- model/boundary.F90 | 33 ++++++++++++++++----------------- model/dyn_core.F90 | 33 ++++++++++++++++----------------- model/fv_arrays.F90 | 33 ++++++++++++++++----------------- model/fv_control.F90 | 33 ++++++++++++++++----------------- model/fv_current_grid.F90 | 33 ++++++++++++++++----------------- model/fv_dynamics.F90 | 33 ++++++++++++++++----------------- model/fv_fill.F90 | 33 ++++++++++++++++----------------- model/fv_grid_utils.F90 | 33 ++++++++++++++++----------------- model/fv_mapz.F90 | 33 ++++++++++++++++----------------- model/fv_nesting.F90 | 33 ++++++++++++++++----------------- model/fv_sg.F90 | 33 ++++++++++++++++----------------- model/fv_tracer2d.F90 | 33 ++++++++++++++++----------------- model/fv_update_phys.F90 | 33 ++++++++++++++++----------------- model/sw_core.F90 | 33 ++++++++++++++++----------------- model/tp_core.F90 | 33 ++++++++++++++++----------------- model_nh/nh_core.F90 | 33 ++++++++++++++++----------------- model_nh/nh_utils.F90 | 33 ++++++++++++++++----------------- tools/external_ic.F90 | 33 ++++++++++++++++----------------- tools/external_sst.F90 | 33 ++++++++++++++++----------------- tools/fv_diagnostics.F90 | 33 ++++++++++++++++----------------- tools/fv_eta.F90 | 33 ++++++++++++++++----------------- tools/fv_grid_tools.F90 | 33 ++++++++++++++++----------------- tools/fv_io.F90 | 33 ++++++++++++++++----------------- tools/fv_mp_mod.F90 | 33 ++++++++++++++++----------------- tools/fv_nggps_diag.F90 | 33 ++++++++++++++++----------------- tools/fv_nudge.F90 | 33 ++++++++++++++++----------------- tools/fv_restart.F90 | 33 ++++++++++++++++----------------- tools/fv_surf_map.F90 | 33 ++++++++++++++++----------------- tools/fv_timing.F90 | 33 ++++++++++++++++----------------- tools/init_hydro.F90 | 33 ++++++++++++++++----------------- tools/sim_nc_mod.F90 | 33 ++++++++++++++++----------------- tools/sorted_index.F90 | 33 ++++++++++++++++----------------- tools/test_cases.F90 | 33 ++++++++++++++++----------------- 36 files changed, 576 insertions(+), 612 deletions(-) diff --git a/driver/SHiELD/atmosphere.F90 b/driver/SHiELD/atmosphere.F90 index 2fbb40865..5df0b36d8 100644 --- a/driver/SHiELD/atmosphere.F90 +++ b/driver/SHiELD/atmosphere.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module atmosphere_mod #include diff --git a/driver/SHiELD/constants.F90 b/driver/SHiELD/constants.F90 index 3cc35b29d..c931fa7af 100644 --- a/driver/SHiELD/constants.F90 +++ b/driver/SHiELD/constants.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module constants_mod diff --git a/model/a2b_edge.F90 b/model/a2b_edge.F90 index da05fd466..8a5bc43ce 100644 --- a/model/a2b_edge.F90 +++ b/model/a2b_edge.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module a2b_edge_mod diff --git a/model/boundary.F90 b/model/boundary.F90 index 97cb06928..54d2a2018 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module boundary_mod diff --git a/model/dyn_core.F90 b/model/dyn_core.F90 index ffc703b7f..876374d2c 100644 --- a/model/dyn_core.F90 +++ b/model/dyn_core.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module dyn_core_mod diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index e9d058243..59ccac943 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_arrays_mod #include diff --git a/model/fv_control.F90 b/model/fv_control.F90 index 0442572eb..751862e96 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** ! $Id$ ! diff --git a/model/fv_current_grid.F90 b/model/fv_current_grid.F90 index 1a3f2fef9..ae9ae7081 100644 --- a/model/fv_current_grid.F90 +++ b/model/fv_current_grid.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_current_grid_mod diff --git a/model/fv_dynamics.F90 b/model/fv_dynamics.F90 index 18717bc1b..1103ac829 100644 --- a/model/fv_dynamics.F90 +++ b/model/fv_dynamics.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_dynamics_mod use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor diff --git a/model/fv_fill.F90 b/model/fv_fill.F90 index e907641a2..ccd41c8a1 100644 --- a/model/fv_fill.F90 +++ b/model/fv_fill.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_fill_mod diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index c6d627909..4b33fb215 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_grid_utils_mod diff --git a/model/fv_mapz.F90 b/model/fv_mapz.F90 index 06fdc5bf2..0d1169c92 100644 --- a/model/fv_mapz.F90 +++ b/model/fv_mapz.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** ! SJL: Apr 12, 2012 ! This revision may actually produce rounding level differences due to the elimination of KS to compute diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index 5d8e4560d..77fc6d7d9 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_nesting_mod diff --git a/model/fv_sg.F90 b/model/fv_sg.F90 index 93ff34dd7..e8aa06be2 100644 --- a/model/fv_sg.F90 +++ b/model/fv_sg.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_sg_mod diff --git a/model/fv_tracer2d.F90 b/model/fv_tracer2d.F90 index 3b1c4b5bf..0cec192f9 100644 --- a/model/fv_tracer2d.F90 +++ b/model/fv_tracer2d.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_tracer2d_mod use tp_core_mod, only: fv_tp_2d, copy_corners diff --git a/model/fv_update_phys.F90 b/model/fv_update_phys.F90 index 132a7e2de..1bf7743fb 100644 --- a/model/fv_update_phys.F90 +++ b/model/fv_update_phys.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_update_phys_mod diff --git a/model/sw_core.F90 b/model/sw_core.F90 index 25b06d493..b77934ce7 100644 --- a/model/sw_core.F90 +++ b/model/sw_core.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module sw_core_mod diff --git a/model/tp_core.F90 b/model/tp_core.F90 index ac55e42a8..e8a6fa29f 100644 --- a/model/tp_core.F90 +++ b/model/tp_core.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module tp_core_mod !BOP diff --git a/model_nh/nh_core.F90 b/model_nh/nh_core.F90 index 569704266..e5d5de122 100644 --- a/model_nh/nh_core.F90 +++ b/model_nh/nh_core.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module nh_core_mod ! Developer: S.-J. Lin, NOAA/GFDL diff --git a/model_nh/nh_utils.F90 b/model_nh/nh_utils.F90 index d38f58fb5..c2e103018 100644 --- a/model_nh/nh_utils.F90 +++ b/model_nh/nh_utils.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module nh_utils_mod ! Developer: S.-J. Lin, NOAA/GFDL diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index 999e10add..ed50d0c94 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** #ifdef OVERLOAD_R4 #define _GET_VAR1 get_var1_real diff --git a/tools/external_sst.F90 b/tools/external_sst.F90 index 6ff80ed79..2de1edb84 100644 --- a/tools/external_sst.F90 +++ b/tools/external_sst.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module external_sst_mod diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index 355bae559..78339b310 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_diagnostics_mod diff --git a/tools/fv_eta.F90 b/tools/fv_eta.F90 index 7c5776b75..a80c5f393 100644 --- a/tools/fv_eta.F90 +++ b/tools/fv_eta.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_eta_mod use constants_mod, only: kappa, grav, cp_air, rdgas diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index b73ead009..2bdeba832 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_grid_tools_mod diff --git a/tools/fv_io.F90 b/tools/fv_io.F90 index fe28bf864..8bf9fb560 100644 --- a/tools/fv_io.F90 +++ b/tools/fv_io.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** !!!NOTE: Merging in the seasonal forecast initialization code !!!! has proven problematic in the past, since many conflicts diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index 892a2e2a8..acbf962b6 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** !------------------------------------------------------------------------------ !BOP diff --git a/tools/fv_nggps_diag.F90 b/tools/fv_nggps_diag.F90 index 23c4d5ee6..fff83e986 100644 --- a/tools/fv_nggps_diag.F90 +++ b/tools/fv_nggps_diag.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_nggps_diags_mod diff --git a/tools/fv_nudge.F90 b/tools/fv_nudge.F90 index 826a5fda5..76bf998de 100644 --- a/tools/fv_nudge.F90 +++ b/tools/fv_nudge.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** #ifdef OVERLOAD_R4 #define _GET_VAR1 get_var1_real diff --git a/tools/fv_restart.F90 b/tools/fv_restart.F90 index 722804cc3..7d822e5ab 100644 --- a/tools/fv_restart.F90 +++ b/tools/fv_restart.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_restart_mod diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index d6e27c306..8ca3b33d3 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_surf_map_mod diff --git a/tools/fv_timing.F90 b/tools/fv_timing.F90 index 1c6cb5403..49ceb5b5b 100644 --- a/tools/fv_timing.F90 +++ b/tools/fv_timing.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module fv_timing_mod diff --git a/tools/init_hydro.F90 b/tools/init_hydro.F90 index 8cfb99967..6c89b6cd7 100644 --- a/tools/init_hydro.F90 +++ b/tools/init_hydro.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** ! $Id$ diff --git a/tools/sim_nc_mod.F90 b/tools/sim_nc_mod.F90 index 17696df88..453824668 100644 --- a/tools/sim_nc_mod.F90 +++ b/tools/sim_nc_mod.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module sim_nc_mod diff --git a/tools/sorted_index.F90 b/tools/sorted_index.F90 index c14975a18..32f012efe 100644 --- a/tools/sorted_index.F90 +++ b/tools/sorted_index.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** !-*- F90 -*- module sorted_index_mod diff --git a/tools/test_cases.F90 b/tools/test_cases.F90 index 5b4cc4831..4506d3ff8 100644 --- a/tools/test_cases.F90 +++ b/tools/test_cases.F90 @@ -1,21 +1,20 @@ !*********************************************************************** -!* GNU General Public License * -!* This file is a part of FV3. * -!* * -!* FV3 is free software; you can redistribute it and/or modify it * -!* and are expected to follow the terms of the GNU General Public * -!* License as published by the Free Software Foundation; either * -!* version 2 of the License, or (at your option) any later version. * -!* * -!* FV3 is distributed in the hope that it will be useful, but * -!* WITHOUT ANY WARRANTY; without even the implied warranty of * -!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * -!* General Public License for more details. * -!* * -!* For the full text of the GNU General Public License, * -!* write to: Free Software Foundation, Inc., * -!* 675 Mass Ave, Cambridge, MA 02139, USA. * -!* or see: http://www.gnu.org/licenses/gpl.html * +!* GNU Lesser General Public License +!* +!* This file is part of the GFDL Flexible Modeling System (FMS). +!* +!* FMS is free software: you can redistribute it and/or modify it under +!* the terms of the GNU Lesser General Public License as published by +!* the Free Software Foundation, either version 3 of the License, or (at +!* your option) any later version. +!* +!* FMS is distributed in the hope that it will be useful, but WITHOUT +!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +!* for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with FMS. If not, see . !*********************************************************************** module test_cases_mod From 19b48bc1f3c6a86ff7eb73fc9c219e8020220c58 Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Fri, 19 Jul 2019 13:31:38 -0400 Subject: [PATCH 06/14] fixed license header in every file and added it to those without it --- GFDL_tools/fv_ada_nudge.F90 | 20 ++++++++++++++++++++ GFDL_tools/fv_climate_nudge.F90 | 20 ++++++++++++++++++++ GFDL_tools/fv_cmip_diag.F90 | 20 ++++++++++++++++++++ GFDL_tools/read_climate_nudge_data.F90 | 21 ++++++++++++++++++++- driver/GFDL/atmosphere.F90 | 20 ++++++++++++++++++++ driver/SHiELD/atmosphere.F90 | 22 ++++++++++++---------- driver/SHiELD/constants.F90 | 22 ++++++++++++---------- driver/SHiELD/lin_cloud_microphys.F90 | 20 ++++++++++++++++++++ model/a2b_edge.F90 | 22 ++++++++++++---------- model/boundary.F90 | 22 ++++++++++++---------- model/dyn_core.F90 | 22 ++++++++++++---------- model/fv_arrays.F90 | 22 ++++++++++++---------- model/fv_cmp.F90 | 20 ++++++++++++++++++++ model/fv_control.F90 | 22 ++++++++++++---------- model/fv_current_grid.F90 | 22 ++++++++++++---------- model/fv_dynamics.F90 | 22 ++++++++++++---------- model/fv_fill.F90 | 22 ++++++++++++---------- model/fv_grid_utils.F90 | 22 ++++++++++++---------- model/fv_mapz.F90 | 22 ++++++++++++---------- model/fv_nesting.F90 | 22 ++++++++++++---------- model/fv_sg.F90 | 22 ++++++++++++---------- model/fv_tracer2d.F90 | 22 ++++++++++++---------- model/fv_update_phys.F90 | 22 ++++++++++++---------- model/sw_core.F90 | 22 ++++++++++++---------- model/tp_core.F90 | 22 ++++++++++++---------- model_nh/nh_core.F90 | 22 ++++++++++++---------- model_nh/nh_utils.F90 | 22 ++++++++++++---------- tools/external_ic.F90 | 22 ++++++++++++---------- tools/external_sst.F90 | 22 ++++++++++++---------- tools/fv_diagnostics.F90 | 22 ++++++++++++---------- tools/fv_eta.F90 | 22 ++++++++++++---------- tools/fv_grid_tools.F90 | 22 ++++++++++++---------- tools/fv_io.F90 | 22 ++++++++++++---------- tools/fv_mp_mod.F90 | 22 ++++++++++++---------- tools/fv_nggps_diag.F90 | 22 ++++++++++++---------- tools/fv_nudge.F90 | 22 ++++++++++++---------- tools/fv_restart.F90 | 22 ++++++++++++---------- tools/fv_surf_map.F90 | 22 ++++++++++++---------- tools/fv_timing.F90 | 22 ++++++++++++---------- tools/init_hydro.F90 | 22 ++++++++++++---------- tools/sim_nc_mod.F90 | 22 ++++++++++++---------- tools/sorted_index.F90 | 22 ++++++++++++---------- tools/test_cases.F90 | 22 ++++++++++++---------- 43 files changed, 572 insertions(+), 361 deletions(-) diff --git a/GFDL_tools/fv_ada_nudge.F90 b/GFDL_tools/fv_ada_nudge.F90 index 21986b3c8..9e24ba474 100644 --- a/GFDL_tools/fv_ada_nudge.F90 +++ b/GFDL_tools/fv_ada_nudge.F90 @@ -1,3 +1,23 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** #ifdef OVERLOAD_R4 #define _GET_VAR1 get_var1_real #else diff --git a/GFDL_tools/fv_climate_nudge.F90 b/GFDL_tools/fv_climate_nudge.F90 index 76d304181..1388b7fb6 100644 --- a/GFDL_tools/fv_climate_nudge.F90 +++ b/GFDL_tools/fv_climate_nudge.F90 @@ -1,3 +1,23 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** module fv_climate_nudge_mod use fms_mod, only: open_namelist_file, check_nml_error, & diff --git a/GFDL_tools/fv_cmip_diag.F90 b/GFDL_tools/fv_cmip_diag.F90 index 2839d1a48..90e657ac9 100644 --- a/GFDL_tools/fv_cmip_diag.F90 +++ b/GFDL_tools/fv_cmip_diag.F90 @@ -1,3 +1,23 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** module fv_cmip_diag_mod use mpp_mod, only: input_nml_file diff --git a/GFDL_tools/read_climate_nudge_data.F90 b/GFDL_tools/read_climate_nudge_data.F90 index 6d32b59f3..842cb7555 100644 --- a/GFDL_tools/read_climate_nudge_data.F90 +++ b/GFDL_tools/read_climate_nudge_data.F90 @@ -1,4 +1,23 @@ - +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** module read_climate_nudge_data_mod use fms_mod, only: open_namelist_file, check_nml_error, close_file, & diff --git a/driver/GFDL/atmosphere.F90 b/driver/GFDL/atmosphere.F90 index ce568683a..75233bcf1 100644 --- a/driver/GFDL/atmosphere.F90 +++ b/driver/GFDL/atmosphere.F90 @@ -1,3 +1,23 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** module atmosphere_mod #include diff --git a/driver/SHiELD/atmosphere.F90 b/driver/SHiELD/atmosphere.F90 index 5df0b36d8..ddc186884 100644 --- a/driver/SHiELD/atmosphere.F90 +++ b/driver/SHiELD/atmosphere.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module atmosphere_mod #include diff --git a/driver/SHiELD/constants.F90 b/driver/SHiELD/constants.F90 index c931fa7af..1ee867e15 100644 --- a/driver/SHiELD/constants.F90 +++ b/driver/SHiELD/constants.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module constants_mod diff --git a/driver/SHiELD/lin_cloud_microphys.F90 b/driver/SHiELD/lin_cloud_microphys.F90 index 3b2f80d3a..9bde12411 100644 --- a/driver/SHiELD/lin_cloud_microphys.F90 +++ b/driver/SHiELD/lin_cloud_microphys.F90 @@ -1,3 +1,23 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** ! ! Cloud micro-physics package for GFDL global cloud resolving model ! The algorithms are originally derived from Lin et al 1983. Most of the key diff --git a/model/a2b_edge.F90 b/model/a2b_edge.F90 index 8a5bc43ce..52b347d8b 100644 --- a/model/a2b_edge.F90 +++ b/model/a2b_edge.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module a2b_edge_mod diff --git a/model/boundary.F90 b/model/boundary.F90 index 54d2a2018..f0f2ef14b 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module boundary_mod diff --git a/model/dyn_core.F90 b/model/dyn_core.F90 index 876374d2c..c48cde19a 100644 --- a/model/dyn_core.F90 +++ b/model/dyn_core.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module dyn_core_mod diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index 59ccac943..155f2cec3 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_arrays_mod #include diff --git a/model/fv_cmp.F90 b/model/fv_cmp.F90 index 0a177aed5..ef17eb64a 100644 --- a/model/fv_cmp.F90 +++ b/model/fv_cmp.F90 @@ -1,3 +1,23 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** module fv_cmp_mod use constants_mod, only: pi=>pi_8, rvgas, rdgas, grav, hlv, hlf, cp_air, cp_vapor diff --git a/model/fv_control.F90 b/model/fv_control.F90 index 751862e96..fd58c9ea6 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** ! $Id$ ! diff --git a/model/fv_current_grid.F90 b/model/fv_current_grid.F90 index ae9ae7081..0c474e7cb 100644 --- a/model/fv_current_grid.F90 +++ b/model/fv_current_grid.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_current_grid_mod diff --git a/model/fv_dynamics.F90 b/model/fv_dynamics.F90 index 1103ac829..edff41f2d 100644 --- a/model/fv_dynamics.F90 +++ b/model/fv_dynamics.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_dynamics_mod use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, omega, rvgas, cp_vapor diff --git a/model/fv_fill.F90 b/model/fv_fill.F90 index ccd41c8a1..46d5887fc 100644 --- a/model/fv_fill.F90 +++ b/model/fv_fill.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_fill_mod diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index 4b33fb215..579fe65f5 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_grid_utils_mod diff --git a/model/fv_mapz.F90 b/model/fv_mapz.F90 index 0d1169c92..0b750dda3 100644 --- a/model/fv_mapz.F90 +++ b/model/fv_mapz.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** ! SJL: Apr 12, 2012 ! This revision may actually produce rounding level differences due to the elimination of KS to compute diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index 77fc6d7d9..cf04cb8ba 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_nesting_mod diff --git a/model/fv_sg.F90 b/model/fv_sg.F90 index e8aa06be2..7f611ab57 100644 --- a/model/fv_sg.F90 +++ b/model/fv_sg.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_sg_mod diff --git a/model/fv_tracer2d.F90 b/model/fv_tracer2d.F90 index 0cec192f9..e99fc049b 100644 --- a/model/fv_tracer2d.F90 +++ b/model/fv_tracer2d.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_tracer2d_mod use tp_core_mod, only: fv_tp_2d, copy_corners diff --git a/model/fv_update_phys.F90 b/model/fv_update_phys.F90 index 1bf7743fb..614abbf5a 100644 --- a/model/fv_update_phys.F90 +++ b/model/fv_update_phys.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_update_phys_mod diff --git a/model/sw_core.F90 b/model/sw_core.F90 index b77934ce7..49f0c305c 100644 --- a/model/sw_core.F90 +++ b/model/sw_core.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module sw_core_mod diff --git a/model/tp_core.F90 b/model/tp_core.F90 index e8a6fa29f..e446a33f9 100644 --- a/model/tp_core.F90 +++ b/model/tp_core.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module tp_core_mod !BOP diff --git a/model_nh/nh_core.F90 b/model_nh/nh_core.F90 index e5d5de122..26df89c98 100644 --- a/model_nh/nh_core.F90 +++ b/model_nh/nh_core.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module nh_core_mod ! Developer: S.-J. Lin, NOAA/GFDL diff --git a/model_nh/nh_utils.F90 b/model_nh/nh_utils.F90 index c2e103018..36ea4e926 100644 --- a/model_nh/nh_utils.F90 +++ b/model_nh/nh_utils.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module nh_utils_mod ! Developer: S.-J. Lin, NOAA/GFDL diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index ed50d0c94..6e1653d57 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** #ifdef OVERLOAD_R4 #define _GET_VAR1 get_var1_real diff --git a/tools/external_sst.F90 b/tools/external_sst.F90 index 2de1edb84..d9dd496d3 100644 --- a/tools/external_sst.F90 +++ b/tools/external_sst.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module external_sst_mod diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index 78339b310..e18140cd0 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_diagnostics_mod diff --git a/tools/fv_eta.F90 b/tools/fv_eta.F90 index a80c5f393..dd59e98b0 100644 --- a/tools/fv_eta.F90 +++ b/tools/fv_eta.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_eta_mod use constants_mod, only: kappa, grav, cp_air, rdgas diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index 2bdeba832..4cd647a42 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_grid_tools_mod diff --git a/tools/fv_io.F90 b/tools/fv_io.F90 index 8bf9fb560..7a083d7c5 100644 --- a/tools/fv_io.F90 +++ b/tools/fv_io.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** !!!NOTE: Merging in the seasonal forecast initialization code !!!! has proven problematic in the past, since many conflicts diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index acbf962b6..d68351b92 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** !------------------------------------------------------------------------------ !BOP diff --git a/tools/fv_nggps_diag.F90 b/tools/fv_nggps_diag.F90 index fff83e986..03b62f1a0 100644 --- a/tools/fv_nggps_diag.F90 +++ b/tools/fv_nggps_diag.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_nggps_diags_mod diff --git a/tools/fv_nudge.F90 b/tools/fv_nudge.F90 index 76bf998de..ad50c06c8 100644 --- a/tools/fv_nudge.F90 +++ b/tools/fv_nudge.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** #ifdef OVERLOAD_R4 #define _GET_VAR1 get_var1_real diff --git a/tools/fv_restart.F90 b/tools/fv_restart.F90 index 7d822e5ab..0fda414e7 100644 --- a/tools/fv_restart.F90 +++ b/tools/fv_restart.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_restart_mod diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index 8ca3b33d3..48f373ac1 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_surf_map_mod diff --git a/tools/fv_timing.F90 b/tools/fv_timing.F90 index 49ceb5b5b..2c2302e71 100644 --- a/tools/fv_timing.F90 +++ b/tools/fv_timing.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module fv_timing_mod diff --git a/tools/init_hydro.F90 b/tools/init_hydro.F90 index 6c89b6cd7..b03583f33 100644 --- a/tools/init_hydro.F90 +++ b/tools/init_hydro.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** ! $Id$ diff --git a/tools/sim_nc_mod.F90 b/tools/sim_nc_mod.F90 index 453824668..4727c91f7 100644 --- a/tools/sim_nc_mod.F90 +++ b/tools/sim_nc_mod.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module sim_nc_mod diff --git a/tools/sorted_index.F90 b/tools/sorted_index.F90 index 32f012efe..1d3ea9edd 100644 --- a/tools/sorted_index.F90 +++ b/tools/sorted_index.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** !-*- F90 -*- module sorted_index_mod diff --git a/tools/test_cases.F90 b/tools/test_cases.F90 index 4506d3ff8..77e2bff1e 100644 --- a/tools/test_cases.F90 +++ b/tools/test_cases.F90 @@ -1,20 +1,22 @@ !*********************************************************************** !* GNU Lesser General Public License !* -!* This file is part of the GFDL Flexible Modeling System (FMS). +!* This file is part of the FV3 dynamical core. !* -!* FMS is free software: you can redistribute it and/or modify it under -!* the terms of the GNU Lesser General Public License as published by -!* the Free Software Foundation, either version 3 of the License, or (at -!* your option) any later version. +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. !* -!* FMS is distributed in the hope that it will be useful, but WITHOUT -!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -!* for more details. +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. !* !* You should have received a copy of the GNU Lesser General Public -!* License along with FMS. If not, see . +!* License along with the FV3 dynamical core. +!* If not, see . !*********************************************************************** module test_cases_mod From f44db53248c745daaa80e4c85cc534881eff6b37 Mon Sep 17 00:00:00 2001 From: bensonr <6594772+bensonr@users.noreply.github.com> Date: Fri, 21 Feb 2020 11:55:09 -0500 Subject: [PATCH 07/14] Master test (#17) * commit of new version of dycore from Weather and Climate Dynamics Group at GFDL * updated versions of GFDL-specific files from dev/gfdl * updated README.md with current release information * cleaned up a few lines in fv_dynamics * new file RELEASE.md with release notes documenting differences between this and the last release * updated RELEASE.md message * hand merge of diagnostic updates * remove trailing spaces from sources * updates to merge some GFDL specific updates into this public release --- GFDL_tools/fv_ada_nudge.F90 | 10 +- GFDL_tools/fv_climate_nudge.F90 | 23 +- GFDL_tools/fv_cmip_diag.F90 | 125 +- GFDL_tools/read_climate_nudge_data.F90 | 24 +- README.md | 4 +- RELEASE.md | 31 + driver/GFDL/atmosphere.F90 | 538 +- driver/SHiELD/atmosphere.F90 | 986 +++- driver/SHiELD/constants.F90 | 341 -- driver/SHiELD/gfdl_cloud_microphys.F90 | 4699 +++++++++++++++ driver/SHiELD/lin_cloud_microphys.F90 | 1324 ----- model_nh/README => model/README_nh_core | 0 model/a2b_edge.F90 | 68 +- model/boundary.F90 | 1107 +++- model/dyn_core.F90 | 445 +- model/fv_arrays.F90 | 412 +- model/fv_control.F90 | 2080 +++---- model/fv_current_grid.F90 | 251 - model/fv_dynamics.F90 | 185 +- model/fv_fill.F90 | 18 +- model/fv_grid_utils.F90 | 779 ++- model/fv_mapz.F90 | 248 +- model/fv_nesting.F90 | 3214 +++++++--- model/fv_regional_bc.F90 | 5727 ++++++++++++++++++ model/fv_sg.F90 | 94 +- model/fv_tracer2d.F90 | 69 +- model/fv_update_phys.F90 | 575 +- {model_nh => model}/nh_core.F90 | 8 +- {model_nh => model}/nh_utils.F90 | 654 +- model/sw_core.F90 | 355 +- model/tp_core.F90 | 97 +- tools/external_ic.F90 | 1962 +++--- tools/external_sst.F90 | 4 - tools/fv_diagnostics.F90 | 3401 +++++++++-- tools/fv_eta.F90 | 1928 +++--- tools/fv_eta.h | 999 ++++ tools/fv_grid_tools.F90 | 595 +- tools/fv_io.F90 | 192 +- tools/fv_mp_mod.F90 | 816 +-- tools/fv_nggps_diag.F90 | 46 +- tools/fv_nudge.F90 | 292 +- tools/fv_restart.F90 | 1514 ++--- tools/fv_surf_map.F90 | 179 +- tools/fv_timing.F90 | 18 +- tools/fv_treat_da_inc.F90 | 476 ++ tools/init_hydro.F90 | 66 +- tools/sim_nc_mod.F90 | 14 +- tools/sorted_index.F90 | 79 +- tools/test_cases.F90 | 7210 ++++++++++++----------- 49 files changed, 28871 insertions(+), 15411 deletions(-) create mode 100644 RELEASE.md delete mode 100644 driver/SHiELD/constants.F90 create mode 100644 driver/SHiELD/gfdl_cloud_microphys.F90 delete mode 100644 driver/SHiELD/lin_cloud_microphys.F90 rename model_nh/README => model/README_nh_core (100%) delete mode 100644 model/fv_current_grid.F90 create mode 100644 model/fv_regional_bc.F90 rename {model_nh => model}/nh_core.F90 (97%) rename {model_nh => model}/nh_utils.F90 (77%) create mode 100644 tools/fv_eta.h create mode 100644 tools/fv_treat_da_inc.F90 diff --git a/GFDL_tools/fv_ada_nudge.F90 b/GFDL_tools/fv_ada_nudge.F90 index 9e24ba474..b091e556c 100644 --- a/GFDL_tools/fv_ada_nudge.F90 +++ b/GFDL_tools/fv_ada_nudge.F90 @@ -77,10 +77,10 @@ module fv_ada_nudge_mod real(kind=R_GRID), parameter :: radius = cnst_radius - character(len=*), parameter :: VERSION =& - & '$Id$' - character(len=*), parameter :: TAGNAME =& - & '$Name$' +! version number of this module +! Include variable "version" to be written to log file. +#include + logical :: do_adiabatic_init public fv_ada_nudge, fv_ada_nudge_init, fv_ada_nudge_end, breed_slp_inline_ada @@ -1536,7 +1536,7 @@ subroutine fv_ada_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct 10 call close_file ( unit ) end if #endif - call write_version_number (VERSION, TAGNAME) + call write_version_number ( 'FV_ADA_NUDGE_MOD', version ) if ( master ) then f_unit=stdlog() write( f_unit, nml = fv_ada_nudge_nml ) diff --git a/GFDL_tools/fv_climate_nudge.F90 b/GFDL_tools/fv_climate_nudge.F90 index 1388b7fb6..bc490228d 100644 --- a/GFDL_tools/fv_climate_nudge.F90 +++ b/GFDL_tools/fv_climate_nudge.F90 @@ -45,8 +45,9 @@ module fv_climate_nudge_mod public :: fv_climate_nudge_init, fv_climate_nudge, & fv_climate_nudge_end, do_ps -character(len=128), parameter :: version = '$Id$' -character(len=128), parameter :: tagname = '$Name$' +! version number of this module +! Include variable "version" to be written to log file. +#include type var_state_type integer :: is, ie, js, je, npz @@ -134,11 +135,11 @@ subroutine fv_climate_nudge_init ( Time, axes, flag ) #else if (file_exist('input.nml') ) then unit = open_namelist_file() - ierr=1 + ierr=1 do while (ierr /= 0) - read (unit, nml=fv_climate_nudge_nml, iostat=io, end=10) + read (unit, nml=fv_climate_nudge_nml, iostat=io, end=10) ierr = check_nml_error (io, 'fv_climate_nudge_nml') - enddo + enddo 10 call close_file (unit) endif #endif @@ -146,7 +147,7 @@ subroutine fv_climate_nudge_init ( Time, axes, flag ) !----- write version and namelist to log file ----- unit = stdlog() - call write_version_number (version, tagname) + call write_version_number ('FV_CLIMATE_NUDGE_MOD', version) if (mpp_pe() == mpp_root_pe()) write (unit, nml=fv_climate_nudge_nml) ! initialize flags @@ -340,7 +341,7 @@ subroutine fv_climate_nudge (Time, dt, is, ie, js, je, npz, pfull, & ! vertically dependent factor call get_factor (npz,pfull, factor) - ! first time allocate state + ! first time allocate state if (do_state_alloc) then call var_state_init ( is, ie, js, je, npz, State(1) ) call var_state_init ( is, ie, js, je, npz, State(2) ) @@ -633,7 +634,7 @@ subroutine get_factor (nlev,pfull,factor) factor(k,2) = 0. enddo endif - + ! Specific humidity if (skip_top_q > 0) then do k = 1, skip_top_q @@ -823,7 +824,7 @@ subroutine remap_coef( isd, ied, jsd, jed, lon_in, lat_in, & ! integer, intent(out), dimension(is:ie,js:je ):: id1, id2, jdc real, intent(out), dimension(is:ie,js:je,4):: s2c - + !=============================================================================================== ! local: @@ -832,7 +833,7 @@ subroutine remap_coef( isd, ied, jsd, jed, lon_in, lat_in, & real:: a1, b1 integer i, j, i1, i2, jc, i0, j0 - !pk0(1) = ak_in(1)**KAPPA + !pk0(1) = ak_in(1)**KAPPA !pn_top = log(ak_in(1)) do i=isd,ied-1 @@ -1006,7 +1007,7 @@ subroutine remap_ps( is, ie, js, je, km, & gz(km+1) = gz_dat(i,j) pk0(km+1) = ph_dat(i,j,km+1)**KAPPA do k=km,1,-1 - gz(k) = gz(k+1) + RDGAS*tp_dat(i,j,k)*(pn_dat(i,j,k+1)-pn_dat(i,j,k)) + gz(k) = gz(k+1) + RDGAS*tp_dat(i,j,k)*(pn_dat(i,j,k+1)-pn_dat(i,j,k)) pk0(k) = ph_dat(i,j,k)**KAPPA enddo if ( phis(i,j) .gt. gz_dat(i,j) ) then diff --git a/GFDL_tools/fv_cmip_diag.F90 b/GFDL_tools/fv_cmip_diag.F90 index 90e657ac9..39cee2e4d 100644 --- a/GFDL_tools/fv_cmip_diag.F90 +++ b/GFDL_tools/fv_cmip_diag.F90 @@ -69,7 +69,9 @@ module fv_cmip_diag_mod !----------------------------------------------------------------------- -type(cmip_diag_id_type) :: ID_ta, ID_ua, ID_va, ID_hus, ID_hur, ID_wap, ID_zg +type(cmip_diag_id_type) :: ID_ta, ID_ua, ID_va, ID_hus, ID_hur, ID_wap, ID_zg, & + ID_u2, ID_v2, ID_t2, ID_wap2, ID_uv, ID_ut, ID_vt, & + ID_uwap, ID_vwap, ID_twap integer :: id_ps, id_orog integer :: id_ua200, id_va200, id_ua850, id_va850, & id_ta500, id_ta700, id_ta850, id_zg500, & @@ -78,8 +80,9 @@ module fv_cmip_diag_mod character(len=5) :: mod_name = 'atmos' -character(len=128) :: version = '$Id$' -character(len=128) :: tagname = '$Name$' +! version number of this module +! Include variable "version" to be written to log file. +#include logical :: module_is_initialized=.false. @@ -131,7 +134,7 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) !----- write version and namelist to log file ----- iunit = stdlog() - call write_version_number ( version, tagname ) + call write_version_number ( 'FV_CMIP_DIAG_MOD', version ) if (mpp_pe() == mpp_root_pe()) write (iunit, nml=fv_cmip_diag_nml) @@ -171,9 +174,48 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) 'Relative Humidity', '%', standard_name='relative_humidity') ID_zg = register_cmip_diag_field_3d (mod_name, 'zg', Time, & - 'Geopotential Height', 'm', standard_name='geopotential_height') + 'Geopotential Height', 'm', standard_name='geopotential_height', axis='half') +!----------------------------------------------------------------------- +! register products of 3D variables (on model levels and pressure levels) + + ID_u2 = register_cmip_diag_field_3d (mod_name, 'u2', Time, & + 'Square of Eastward Wind', 'm2 s-2', standard_name='square_of_eastward_wind') + + ID_v2 = register_cmip_diag_field_3d (mod_name, 'v2', Time, & + 'Square of Northward Wind', 'm2 s-2', standard_name='square_of_northward_wind') + + ID_t2 = register_cmip_diag_field_3d (mod_name, 't2', Time, & + 'Square of Air Temperature', 'K2', standard_name='square_of_air_temperature') + + ID_wap2 = register_cmip_diag_field_3d (mod_name, 'wap2', Time, & + 'Square of Omega', 'Pa2 s-2', standard_name='square_of_omega') + + ID_uv = register_cmip_diag_field_3d (mod_name, 'uv', Time, & + 'Eastward Wind times Northward Wind', 'm2 s-2', & + standard_name='product_of_eastward_wind_and_northward_wind') + ID_ut = register_cmip_diag_field_3d (mod_name, 'ut', Time, & + 'Air Temperature times Eastward Wind', 'K m s-1', & + standard_name='product_of_eastward_wind_and_air_temperature') + + ID_vt = register_cmip_diag_field_3d (mod_name, 'vt', Time, & + 'Air Temperature times Northward Wind', 'K m s-1', & + standard_name='product_of_northward_wind_and_air_temperature') + + ID_uwap = register_cmip_diag_field_3d (mod_name, 'uwap', Time, & + 'Eastward Wind times Omega', 'K m s-1', & + standard_name='product_of_eastward_wind_and_omega') + + ID_vwap = register_cmip_diag_field_3d (mod_name, 'vwap', Time, & + 'Northward Wind times Omega', 'K m s-1', & + standard_name='product_of_northward_wind_and_omega') + + ID_twap = register_cmip_diag_field_3d (mod_name, 'twap', Time, & + 'Air Temperature times Omega', 'K m s-1', & + standard_name='product_of_omega_and_air_temperature') + +!----------------------------------------------------------------------- ! 2D fields id_ps = register_cmip_diag_field_2d (mod_name, 'ps', Time, & @@ -393,89 +435,132 @@ subroutine fv_cmip_diag ( Atm, zvir, Time ) if (query_cmip_diag_id(ID_zg)) & used = send_cmip_data_3d (ID_zg, wz, Time, phalf=Atm(n)%peln, opt=1, ext=.true.) +!---------------------------------------------------------------------- + ! process product of fields on model levels and/or pressure levels + + if (query_cmip_diag_id(ID_u2)) & + used = send_cmip_data_3d (ID_u2, Atm(n)%ua (isc:iec,jsc:jec,:)*Atm(n)%ua (isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_v2)) & + used = send_cmip_data_3d (ID_v2, Atm(n)%va (isc:iec,jsc:jec,:)*Atm(n)%va (isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_t2)) & + used = send_cmip_data_3d (ID_t2, Atm(n)%pt (isc:iec,jsc:jec,:)*Atm(n)%pt (isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_wap2)) & + used = send_cmip_data_3d (ID_wap2, Atm(n)%omga(isc:iec,jsc:jec,:)*Atm(n)%omga(isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_uv)) & + used = send_cmip_data_3d (ID_uv, Atm(n)%ua (isc:iec,jsc:jec,:)*Atm(n)%va (isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_ut)) & + used = send_cmip_data_3d (ID_ut, Atm(n)%ua (isc:iec,jsc:jec,:)*Atm(n)%pt (isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_vt)) & + used = send_cmip_data_3d (ID_vt, Atm(n)%va (isc:iec,jsc:jec,:)*Atm(n)%pt (isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_uwap)) & + used = send_cmip_data_3d (ID_uwap, Atm(n)%ua (isc:iec,jsc:jec,:)*Atm(n)%omga(isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_vwap)) & + used = send_cmip_data_3d (ID_vwap, Atm(n)%va (isc:iec,jsc:jec,:)*Atm(n)%omga(isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + + if (query_cmip_diag_id(ID_twap)) & + used = send_cmip_data_3d (ID_twap, Atm(n)%pt (isc:iec,jsc:jec,:)*Atm(n)%omga(isc:iec,jsc:jec,:), & + Time, phalf=Atm(n)%peln, opt=1) + !---------------------------------------------------------------------- ! process 2D fields on specific pressure levels -! +! if (id_ua10 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 10.e2, Atm(n)%peln, & - Atm(n)%ua(isc:iec,jsc:jec,:), dat2) + Atm(n)%ua(isc:iec,jsc:jec,:), dat2) used = send_data (id_ua10, dat2, Time) endif if (id_ua200 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 200.e2, Atm(n)%peln, & - Atm(n)%ua(isc:iec,jsc:jec,:), dat2) + Atm(n)%ua(isc:iec,jsc:jec,:), dat2) used = send_data (id_ua200, dat2, Time) endif if (id_va200 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 200.e2, Atm(n)%peln, & - Atm(n)%va(isc:iec,jsc:jec,:), dat2) + Atm(n)%va(isc:iec,jsc:jec,:), dat2) used = send_data (id_va200, dat2, Time) endif if (id_ua850 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, & - Atm(n)%ua(isc:iec,jsc:jec,:), dat2) + Atm(n)%ua(isc:iec,jsc:jec,:), dat2) used = send_data (id_ua850, dat2, Time) endif if (id_va850 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, & - Atm(n)%va(isc:iec,jsc:jec,:), dat2) + Atm(n)%va(isc:iec,jsc:jec,:), dat2) used = send_data (id_va850, dat2, Time) endif if (id_ta500 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 500.e2, Atm(n)%peln, & - Atm(n)%pt(isc:iec,jsc:jec,:), dat2) + Atm(n)%pt(isc:iec,jsc:jec,:), dat2) used = send_data (id_ta500, dat2, Time) endif if (id_ta700 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 700.e2, Atm(n)%peln, & - Atm(n)%pt(isc:iec,jsc:jec,:), dat2) + Atm(n)%pt(isc:iec,jsc:jec,:), dat2) used = send_data (id_ta700, dat2, Time) endif if (id_ta850 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, & - Atm(n)%pt(isc:iec,jsc:jec,:), dat2) + Atm(n)%pt(isc:iec,jsc:jec,:), dat2) used = send_data (id_ta850, dat2, Time) endif if (id_hus850 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, & - Atm(n)%q(isc:iec,jsc:jec,:,sphum), dat2) + Atm(n)%q(isc:iec,jsc:jec,:,sphum), dat2) used = send_data (id_hus850, dat2, Time) endif if (id_wap500 > 0) then call interpolate_vertical (isc, iec, jsc, jec, npz, 500.e2, Atm(n)%peln, & - Atm(n)%omga(isc:iec,jsc:jec,:), dat2) + Atm(n)%omga(isc:iec,jsc:jec,:), dat2) used = send_data (id_wap500, dat2, Time) endif if (id_zg10 > 0) then - call get_height_given_pressure (isc, iec, jsc, jec, ngc, npz, wz, 1, (/id_zg10/), & + call get_height_given_pressure (isc, iec, jsc, jec, npz, wz, 1, (/id_zg10/), & (/log(10.e2)/), Atm(n)%peln, dat3) used = send_data (id_zg10, dat3(:,:,1), Time) endif if (id_zg100 > 0) then - call get_height_given_pressure (isc, iec, jsc, jec, ngc, npz, wz, 1, (/id_zg100/), & + call get_height_given_pressure (isc, iec, jsc, jec, npz, wz, 1, (/id_zg100/), & (/log(100.e2)/), Atm(n)%peln, dat3) used = send_data (id_zg100, dat3(:,:,1), Time) endif if (id_zg500 > 0) then - call get_height_given_pressure (isc, iec, jsc, jec, ngc, npz, wz, 1, (/id_zg500/), & + call get_height_given_pressure (isc, iec, jsc, jec, npz, wz, 1, (/id_zg500/), & (/log(500.e2)/), Atm(n)%peln, dat3) used = send_data (id_zg500, dat3(:,:,1), Time) endif if (id_zg1000 > 0) then - call get_height_given_pressure (isc, iec, jsc, jec, ngc, npz, wz, 1, (/id_zg1000/), & + call get_height_given_pressure (isc, iec, jsc, jec, npz, wz, 1, (/id_zg1000/), & (/log(1000.e2)/), Atm(n)%peln, dat3) used = send_data (id_zg1000, dat3(:,:,1), Time) endif diff --git a/GFDL_tools/read_climate_nudge_data.F90 b/GFDL_tools/read_climate_nudge_data.F90 index 842cb7555..6122478cd 100644 --- a/GFDL_tools/read_climate_nudge_data.F90 +++ b/GFDL_tools/read_climate_nudge_data.F90 @@ -42,8 +42,10 @@ module read_climate_nudge_data_mod module procedure read_climate_nudge_data_3d end interface - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' +! version number of this module +! Include variable "version" to be written to log file. +#include + real, parameter :: P0 = 1.e5 real, parameter :: D608 = RVGAS/RDGAS - 1. @@ -59,7 +61,7 @@ module read_climate_nudge_data_mod INDEX_U = 8, INDEX_V = 9 character(len=8), dimension(NUM_REQ_FLDS) :: required_field_names = & (/ 'P0 ', 'hyai', 'hybi', 'PHI ', 'PS ', 'T ', 'Q ', 'U ', 'V ' /) - + integer, parameter :: MAXFILES = 53 character(len=256) :: filenames(MAXFILES) character(len=256) :: filename_tails(MAXFILES) @@ -83,7 +85,7 @@ module read_climate_nudge_data_mod integer, dimension(NUM_REQ_FLDS) :: field_index ! varid for variables integer, dimension(NUM_REQ_AXES) :: axis_index ! varid for dimensions type(axistype), dimension(NUM_REQ_FLDS) :: axes - type(fieldtype), dimension(NUM_REQ_FLDS) :: fields + type(fieldtype), dimension(NUM_REQ_FLDS) :: fields end type type(filedata_type), allocatable :: Files(:) @@ -133,7 +135,7 @@ subroutine read_climate_nudge_data_init (nlon, nlat, nlev, ntime) !----- write version and namelist to log file ----- iunit = stdlog() - call write_version_number ( version, tagname ) + call write_version_number ( 'READ_CLIMATE_NUDGE_DATA_MOD', version ) if (mpp_pe() == mpp_root_pe()) write (iunit, nml=read_climate_nudge_data_nml) ! determine the number of files @@ -299,7 +301,7 @@ subroutine read_grid ( lon, lat, ak, bk ) else ak = 0. endif - + call mpp_read(Files(1)%ncid, Files(1)%fields(INDEX_BK), bk) @@ -390,7 +392,7 @@ subroutine read_climate_nudge_data_2d (itime, field, dat, is, js) call error_mesg ('read_climate_nudge_data_mod', 'itime out of range', FATAL) endif - ! check dimensions + ! check dimensions if (present(js)) then if (size(dat,1) .ne. global_axis_size(INDEX_LON) .or. & size(dat,2) .ne. sub_domain_latitude_size) then @@ -412,7 +414,7 @@ subroutine read_climate_nudge_data_2d (itime, field, dat, is, js) else call error_mesg ('read_climate_nudge_data_mod', 'incorrect field requested in read_climate_nudge_data_2d', FATAL) endif - + ! file index and actual time index in file n = file_index(itime) atime = itime - Files(n)%time_offset @@ -425,9 +427,9 @@ subroutine read_climate_nudge_data_2d (itime, field, dat, is, js) nread = 1 nread(1) = size(dat,1) nread(2) = size(dat,2) - + call mpp_read(Files(n)%ncid, Files(n)%fields(this_index), dat, start, nread) - + ! geopotential height (convert to m2/s2 if necessary) if (field .eq. 'phis') then if (maxval(dat) > 1000.*GRAV) then @@ -487,7 +489,7 @@ subroutine read_climate_nudge_data_3d (itime, field, dat, is, js) else call error_mesg ('read_climate_nudge_data_mod', 'incorrect field requested in read_climate_nudge_data_3d', FATAL) endif - + ! file index and actual time index in file n = file_index(itime) diff --git a/README.md b/README.md index 6db43cb95..9eeb7d3f6 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # GFDL_atmos_cubed_sphere -This is for the FV3 dynamical core and the GFDL Microphysics. +The source contained herein reflects the 201912 release of the Finite Volume Cubed-Sphere Dynamical Core (FV3) from GFDL -The source in this branch reflects the codebase delivered to NCEP/EMC for use in GFS. Updates will be forthcoming. +The GFDL Microphysics is also available via this repository. # Where to find information diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 000000000..40c37d10b --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,31 @@ +# RELEASE NOTES for FV3: Summary + +FV3-201912-public --- 10 January 2020 +Lucas Harris, GFDL + +This version has been tested against the current SHiELD (formerly fvGFS) physics +and with FMS release candidate 2020.02 from https://github.com/NOAA-GFDL/FMS + +Includes all of the features of the GFDL Release to EMC, as well as: + +- Updated 2017 GFDL Microphysics (from S-J Lin and L Zhou included in GFSv15) +- Updates for GFSv15 ICs (from T Black/J Abeles, EMC) +- Updates to support new nesting capabilities in FMS (from Z Liang) +- Re-written grid nesting code for efficiency and parallelization +- Re-organized fv_eta for improved vertical level selection +- 2018 Stand-alone regional capabilities (from T Black/J Abeles, EMC) +- Refactored model front-end (fv_control, fv_restart) +- Support for point soundings +- And other updates + +# Interface changes + +drivers: renamed 'fvGFS' directory to SHiELD + +atmosphere.F90: 'mytile' is renamed 'mygrid' + +The non-functional gfdl_cloud_microphys.F90 has been removed and replaced with the 2017 public release given to EMC. Also added a proper initialization routine, that includes the use of INTERNAL_FILE_NML and thereby requires the input_nml_file argument. If you do not define the compiler flag INTERNAL_FILE_NML then you can delete this argument. + +The namelist nggps_diag_nml has been eliminated. 'fdiag' is no longer handled by the dynamical core, and should be handled by the physics driver. + +For a complete technical description see the [forthcoming] GFDL Technical Memorandum. diff --git a/driver/GFDL/atmosphere.F90 b/driver/GFDL/atmosphere.F90 index 75233bcf1..cb8e4a684 100644 --- a/driver/GFDL/atmosphere.F90 +++ b/driver/GFDL/atmosphere.F90 @@ -30,72 +30,70 @@ module atmosphere_mod !----------------- ! FMS modules: !----------------- -use atmos_co2_mod, only: atmos_co2_rad, co2_radiation_override -use block_control_mod, only: block_control_type -use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks -use time_manager_mod, only: time_type, get_time, set_time, operator(+) -use fms_mod, only: file_exist, open_namelist_file, & - close_file, error_mesg, FATAL, & - check_nml_error, stdlog, & - write_version_number, & - mpp_pe, mpp_root_pe, set_domain, & - mpp_clock_id, mpp_clock_begin, & - mpp_clock_end, CLOCK_SUBCOMPONENT, & - clock_flag_default, nullify_domain -use mpp_mod, only: mpp_error, FATAL, NOTE, input_nml_file, & - mpp_npes, mpp_get_current_pelist, & - mpp_set_current_pelist, stdout, & - mpp_pe, mpp_chksum -use mpp_domains_mod, only: domain2d -use xgrid_mod, only: grid_box_type +use atmos_co2_mod, only: atmos_co2_rad, co2_radiation_override +use block_control_mod, only: block_control_type +use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks +use time_manager_mod, only: time_type, get_time, set_time, operator(+) +use fms_mod, only: file_exist, open_namelist_file, & + close_file, error_mesg, FATAL, & + check_nml_error, stdlog, & + write_version_number, & + mpp_pe, mpp_root_pe, set_domain, & + mpp_clock_id, mpp_clock_begin, & + mpp_clock_end, CLOCK_SUBCOMPONENT, & + clock_flag_default, nullify_domain +use mpp_mod, only: mpp_error, FATAL, NOTE, input_nml_file, & + mpp_npes, mpp_get_current_pelist, & + mpp_set_current_pelist, stdout, & + mpp_pe, mpp_chksum +use mpp_domains_mod, only: domain2d +use xgrid_mod, only: grid_box_type !miz -use diag_manager_mod, only: register_diag_field, send_data -use field_manager_mod, only: MODEL_ATMOS -use tracer_manager_mod, only: get_tracer_index,& - get_number_tracers, & - get_tracer_names, NO_TRACER -use physics_driver_mod, only: surf_diff_type -use physics_types_mod, only: physics_type, & - physics_tendency_type -use radiation_types_mod,only: radiation_type, compute_g_avg -use atmos_cmip_diag_mod,only: atmos_cmip_diag_init, & - register_cmip_diag_field_3d, & - send_cmip_data_3d, cmip_diag_id_type, & - query_cmip_diag_id -#ifndef use_AM3_physics -use atmos_global_diag_mod, only: atmos_global_diag_init, & - atmos_global_diag_end -#endif +use diag_manager_mod, only: register_diag_field, send_data +use field_manager_mod, only: MODEL_ATMOS +use tracer_manager_mod, only: get_tracer_index,& + get_number_tracers, & + get_tracer_names, NO_TRACER +use physics_driver_mod, only: surf_diff_type +use physics_types_mod, only: physics_type, & + physics_tendency_type +use radiation_types_mod, only: radiation_type, compute_g_avg +use atmos_cmip_diag_mod, only: atmos_cmip_diag_init, & + register_cmip_diag_field_3d, & + send_cmip_data_3d, cmip_diag_id_type, & + query_cmip_diag_id +use atmos_global_diag_mod, only: atmos_global_diag_init, & + atmos_global_diag_end !----------------- ! FV core modules: !----------------- -use fv_arrays_mod, only: fv_atmos_type -use fv_control_mod, only: fv_init, fv_end, ngrids -use fv_eta_mod, only: get_eta_level -use fv_io_mod, only: fv_io_register_nudge_restart -use fv_dynamics_mod, only: fv_dynamics -use fv_nesting_mod, only: twoway_nesting -use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin -use fv_cmip_diag_mod, only: fv_cmip_diag_init, fv_cmip_diag, fv_cmip_diag_end -use fv_restart_mod, only: fv_restart, fv_write_restart -use fv_timing_mod, only: timing_on, timing_off -use fv_mp_mod, only: switch_current_Atm -use fv_sg_mod, only: fv_subgrid_z -use fv_update_phys_mod, only: fv_update_phys +use fv_arrays_mod, only: fv_atmos_type +use fv_control_mod, only: fv_control_init, fv_end, ngrids +use fv_eta_mod, only: get_eta_level +use fv_io_mod, only: fv_io_register_nudge_restart +use fv_dynamics_mod, only: fv_dynamics +use fv_nesting_mod, only: twoway_nesting +use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin +use fv_cmip_diag_mod, only: fv_cmip_diag_init, fv_cmip_diag, fv_cmip_diag_end +use fv_restart_mod, only: fv_restart, fv_write_restart +use fv_timing_mod, only: timing_on, timing_off +use fv_mp_mod, only: switch_current_Atm +use fv_sg_mod, only: fv_subgrid_z +use fv_update_phys_mod, only: fv_update_phys #if defined (ATMOS_NUDGE) -use atmos_nudge_mod, only: atmos_nudge_init, atmos_nudge_end +use atmos_nudge_mod, only: atmos_nudge_init, atmos_nudge_end #elif defined (CLIMATE_NUDGE) -use fv_climate_nudge_mod,only: fv_climate_nudge_init,fv_climate_nudge_end +use fv_climate_nudge_mod, only: fv_climate_nudge_init,fv_climate_nudge_end #elif defined (ADA_NUDGE) -use fv_ada_nudge_mod, only: fv_ada_nudge_init, fv_ada_nudge_end +use fv_ada_nudge_mod, only: fv_ada_nudge_init, fv_ada_nudge_end #else -use fv_nwp_nudge_mod, only: fv_nwp_nudge_init, fv_nwp_nudge_end, do_adiabatic_init -use amip_interp_mod, only: forecast_mode +use fv_nwp_nudge_mod, only: fv_nwp_nudge_init, fv_nwp_nudge_end, do_adiabatic_init +use amip_interp_mod, only: forecast_mode #endif -use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain -use boundary_mod, only: update_coarse_grid +use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain +use boundary_mod, only: update_coarse_grid implicit none private @@ -118,8 +116,9 @@ module atmosphere_mod !----------------------------------------------------------------------- -character(len=128) :: version = '$Id$' -character(len=128) :: tagname = '$Name$' +! version number of this module +! Include variable "version" to be written to log file. +#include character(len=7) :: mod_name = 'atmos' !---- private data ---- @@ -139,26 +138,28 @@ module atmosphere_mod integer, dimension(:), allocatable :: id_tracerdt_dyn integer :: num_tracers = 0 + !miz !Diagnostics - integer :: id_tdt_dyn, id_qdt_dyn, id_qldt_dyn, id_qidt_dyn, id_qadt_dyn + type(cmip_diag_id_type) :: ID_tnta, ID_tnhusa, ID_tnt, ID_tnhus + integer :: id_udt_dyn, id_vdt_dyn, id_tdt_dyn, id_qdt_dyn + integer :: id_qldt_dyn, id_qidt_dyn, id_qadt_dyn logical :: used character(len=64) :: field real, allocatable :: ttend(:,:,:) real, allocatable :: qtendyyf(:,:,:,:) real, allocatable :: qtend(:,:,:,:) - real :: mv = -1.e10 + real :: mv = -1.e10 ! missing value for diagnostics + integer :: sphum, liq_wat, rainwat, ice_wat, snowwat, graupel !condensate species + integer :: cld_amt !miz - type(cmip_diag_id_type) :: ID_tnta, ID_tnhusa - integer :: mytile = 1 + integer :: mygrid = 1 integer :: p_split = 1 integer, allocatable :: pelist(:) logical, allocatable :: grids_on_this_pe(:) type(fv_atmos_type), allocatable, target :: Atm(:) - integer :: id_udt_dyn, id_vdt_dyn - real, parameter:: w0_big = 60. ! to prevent negative w-tracer diffusion !---dynamics tendencies for use in fv_subgrid_z and during fv_update_phys @@ -207,38 +208,49 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) !NOTE do we still need the second file_exist call? cold_start = (.not.file_exist('INPUT/fv_core.res.nc') .and. .not.file_exist('INPUT/fv_core.res.tile1.nc')) - call fv_init( Atm, dt_atmos, grids_on_this_pe, p_split ) ! allocates Atm components + call fv_control_init( Atm, dt_atmos, mygrid, grids_on_this_pe, p_split ) ! allocates Atm components; sets mygrid - do n=1,ngrids - if (grids_on_this_pe(n)) mytile = n - enddo + Atm(mygrid)%Time_init = Time_init !----- write version and namelist to log file ----- - call write_version_number ( version, tagname ) + call write_version_number ( 'COUPLED/ATMOSPHERE_MOD', version ) !----------------------------------- - npx = Atm(mytile)%npx - npy = Atm(mytile)%npy - npz = Atm(mytile)%npz - ncnst = Atm(mytile)%ncnst - pnats = Atm(mytile)%flagstruct%pnats + npx = Atm(mygrid)%npx + npy = Atm(mygrid)%npy + npz = Atm(mygrid)%npz + ncnst = Atm(mygrid)%ncnst + pnats = Atm(mygrid)%flagstruct%pnats - isc = Atm(mytile)%bd%isc - iec = Atm(mytile)%bd%iec - jsc = Atm(mytile)%bd%jsc - jec = Atm(mytile)%bd%jec + isc = Atm(mygrid)%bd%isc + iec = Atm(mygrid)%bd%iec + jsc = Atm(mygrid)%bd%jsc + jec = Atm(mygrid)%bd%jec - isd = isc - Atm(mytile)%bd%ng - ied = iec + Atm(mytile)%bd%ng - jsd = jsc - Atm(mytile)%bd%ng - jed = jec + Atm(mytile)%bd%ng + isd = isc - Atm(mygrid)%bd%ng + ied = iec + Atm(mygrid)%bd%ng + jsd = jsc - Atm(mygrid)%bd%ng + jed = jec + Atm(mygrid)%bd%ng nq = ncnst-pnats + sphum = get_tracer_index (MODEL_ATMOS, 'sphum' ) + liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat' ) + ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat' ) + rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat' ) + snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat' ) + graupel = get_tracer_index (MODEL_ATMOS, 'graupel' ) + cld_amt = get_tracer_index (MODEL_ATMOS, 'cld_amt' ) + + if (max(sphum,liq_wat,ice_wat,rainwat,snowwat,graupel) > Atm(mygrid)%flagstruct%nwat) then + call mpp_error (FATAL,' atmosphere_init: condensate species are not first in the list of & + &tracers defined in the field_table') + endif ! Allocate grid variables to be used to calculate gradient in 2nd order flux exchange ! This data is only needed for the COARSEST grid. - call switch_current_Atm(Atm(mytile)) + !call switch_current_Atm(Atm(mygrid)) + call set_domain(Atm(mygrid)%domain) allocate(Grid_box%dx ( isc:iec , jsc:jec+1)) allocate(Grid_box%dy ( isc:iec+1, jsc:jec )) @@ -251,18 +263,18 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) allocate(Grid_box%en2 (3, isc:iec+1, jsc:jec )) allocate(Grid_box%vlon (3, isc:iec , jsc:jec )) allocate(Grid_box%vlat (3, isc:iec , jsc:jec )) - Grid_box%dx ( isc:iec , jsc:jec+1) = Atm(mytile)%gridstruct%dx ( isc:iec, jsc:jec+1) - Grid_box%dy ( isc:iec+1, jsc:jec ) = Atm(mytile)%gridstruct%dy ( isc:iec+1, jsc:jec ) - Grid_box%area ( isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%area ( isc:iec , jsc:jec ) - Grid_box%edge_w( jsc:jec+1) = Atm(mytile)%gridstruct%edge_w( jsc:jec+1) - Grid_box%edge_e( jsc:jec+1) = Atm(mytile)%gridstruct%edge_e( jsc:jec+1) - Grid_box%edge_s( isc:iec+1 ) = Atm(mytile)%gridstruct%edge_s( isc:iec+1) - Grid_box%edge_n( isc:iec+1 ) = Atm(mytile)%gridstruct%edge_n( isc:iec+1) - Grid_box%en1 (:, isc:iec , jsc:jec+1) = Atm(mytile)%gridstruct%en1 (:, isc:iec , jsc:jec+1) - Grid_box%en2 (:, isc:iec+1, jsc:jec ) = Atm(mytile)%gridstruct%en2 (:, isc:iec+1, jsc:jec ) + Grid_box%dx ( isc:iec , jsc:jec+1) = Atm(mygrid)%gridstruct%dx ( isc:iec, jsc:jec+1) + Grid_box%dy ( isc:iec+1, jsc:jec ) = Atm(mygrid)%gridstruct%dy ( isc:iec+1, jsc:jec ) + Grid_box%area ( isc:iec , jsc:jec ) = Atm(mygrid)%gridstruct%area ( isc:iec , jsc:jec ) + Grid_box%edge_w( jsc:jec+1) = Atm(mygrid)%gridstruct%edge_w( jsc:jec+1) + Grid_box%edge_e( jsc:jec+1) = Atm(mygrid)%gridstruct%edge_e( jsc:jec+1) + Grid_box%edge_s( isc:iec+1 ) = Atm(mygrid)%gridstruct%edge_s( isc:iec+1) + Grid_box%edge_n( isc:iec+1 ) = Atm(mygrid)%gridstruct%edge_n( isc:iec+1) + Grid_box%en1 (:, isc:iec , jsc:jec+1) = Atm(mygrid)%gridstruct%en1 (:, isc:iec , jsc:jec+1) + Grid_box%en2 (:, isc:iec+1, jsc:jec ) = Atm(mygrid)%gridstruct%en2 (:, isc:iec+1, jsc:jec ) do i = 1,3 - Grid_box%vlon (i, isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%vlon (isc:iec , jsc:jec, i ) - Grid_box%vlat (i, isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%vlat (isc:iec , jsc:jec, i ) + Grid_box%vlon (i, isc:iec , jsc:jec ) = Atm(mygrid)%gridstruct%vlon (isc:iec , jsc:jec, i ) + Grid_box%vlat (i, isc:iec , jsc:jec ) = Atm(mygrid)%gridstruct%vlat (isc:iec , jsc:jec, i ) enddo !----- allocate and zero out the dynamics (and accumulated) tendencies @@ -273,58 +285,57 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) !--- allocate pref allocate(pref(npz+1,2), dum1d(npz+1)) - call set_domain ( Atm(mytile)%domain ) - call fv_restart(Atm(mytile)%domain, Atm, dt_atmos, seconds, days, cold_start, Atm(mytile)%gridstruct%grid_type, grids_on_this_pe) + call fv_restart(Atm(mygrid)%domain, Atm, dt_atmos, seconds, days, cold_start, Atm(mygrid)%gridstruct%grid_type, mygrid) fv_time = Time !----- initialize atmos_axes and fv_dynamics diagnostics !I've had trouble getting this to work with multiple grids at a time; worth revisiting? - call fv_diag_init(Atm(mytile:mytile), Atm(mytile)%atmos_axes, Time, npx, npy, npz, Atm(mytile)%flagstruct%p_ref) + call fv_diag_init(Atm(mygrid:mygrid), Atm(mygrid)%atmos_axes, Time, npx, npy, npz, Atm(mygrid)%flagstruct%p_ref) !---------- reference profile ----------- ps1 = 101325. ps2 = 81060. pref(npz+1,1) = ps1 pref(npz+1,2) = ps2 - call get_eta_level ( npz, ps1, pref(1,1), dum1d, Atm(mytile)%ak, Atm(mytile)%bk ) - call get_eta_level ( npz, ps2, pref(1,2), dum1d, Atm(mytile)%ak, Atm(mytile)%bk ) + call get_eta_level ( npz, ps1, pref(1,1), dum1d, Atm(mygrid)%ak, Atm(mygrid)%bk ) + call get_eta_level ( npz, ps2, pref(1,2), dum1d, Atm(mygrid)%ak, Atm(mygrid)%bk ) !---- initialize cmip diagnostic output ---- - call atmos_cmip_diag_init ( Atm(mytile)%ak, Atm(mytile)%bk, pref(1,1), Atm(mytile)%atmos_axes, Time ) -#ifndef use_AM3_physics - call atmos_global_diag_init ( Atm(mytile)%atmos_axes, Atm(mytile)%gridstruct%area(isc:iec,jsc:jec) ) -#endif - call fv_cmip_diag_init ( Atm(mytile:mytile), Atm(mytile)%atmos_axes, Time ) + call atmos_cmip_diag_init ( Atm(mygrid)%ak, Atm(mygrid)%bk, pref(1,1), Atm(mygrid)%atmos_axes, Time ) + call atmos_global_diag_init ( Atm(mygrid)%atmos_axes, Atm(mygrid)%gridstruct%area(isc:iec,jsc:jec) ) + call fv_cmip_diag_init ( Atm(mygrid:mygrid), Atm(mygrid)%atmos_axes, Time ) !--- initialize nudging module --- #if defined (ATMOS_NUDGE) - call atmos_nudge_init ( Time, Atm(mytile)%atmos_axes(1:3), flag=do_atmos_nudge ) - if ( do_atmos_nudge .and. Atm(mytile)%flagstruct%nudge ) then + call atmos_nudge_init ( Time, Atm(mygrid)%atmos_axes(1:3), flag=do_atmos_nudge ) + if ( do_atmos_nudge .and. Atm(mygrid)%flagstruct%nudge ) then call mpp_error(NOTE, 'Code compiled with atmospheric nudging, but fv_core_nml nudge is also set to .true.') elseif ( do_atmos_nudge) then call mpp_error(NOTE, 'Code compiled with and using atmospheric nudging') endif - Atm(mytile)%flagstruct%nudge = do_atmos_nudge + Atm(mygrid)%flagstruct%nudge = do_atmos_nudge #elif defined (CLIMATE_NUDGE) - call fv_climate_nudge_init ( Time, Atm(mytile)%atmos_axes(1:3), flag=do_atmos_nudge ) - if ( do_atmos_nudge .and. Atm(1)%flagstruct%nudge ) then + call fv_climate_nudge_init ( Time, Atm(mygrid)%atmos_axes(1:3), flag=do_atmos_nudge ) + if ( do_atmos_nudge .and. Atm(mygrid)%flagstruct%nudge ) then call mpp_error(NOTE, 'Code compiled with climate nudging, but fv_core_nml nudge is also set to .true.') elseif ( do_atmos_nudge ) then call mpp_error(NOTE, 'Code compiled with and using climate nudging') endif - Atm(mytile)%flagstruct%nudge = do_atmos_nudge + Atm(mygrid)%flagstruct%nudge = do_atmos_nudge #elif defined (ADA_NUDGE) - if ( Atm(1)%flagstruct%nudge ) then - call fv_ada_nudge_init( Time, Atm(mytile)%atmos_axes, npz, zvir, Atm(1)%ak, Atm(1)%bk, Atm(1)%ts, & - Atm(1)%phis, Atm(1)%gridstruct, Atm(1)%ks, Atm(1)%npx, Atm(1)%neststruct, Atm(1)%bd, Atm(1)%domain) + if ( Atm(mygrid)%flagstruct%nudge ) then + call fv_ada_nudge_init( Time, Atm(mygrid)%atmos_axes, npz, zvir, Atm(mygrid)%ak, Atm(mygrid)%bk, & + Atm(mygrid)%ts, Atm(mygrid)%phis, Atm(mygrid)%gridstruct, Atm(mygrid)%ks, Atm(mygrid)%npx, & + Atm(mygrid)%neststruct, Atm(mygrid)%bd, Atm(mygrid)%domain) call mpp_error(NOTE, 'ADA nudging is active') endif #else !Only do nudging on coarse grid for now - if ( Atm(mytile)%flagstruct%nudge ) then - call fv_nwp_nudge_init( Time, Atm(mytile)%atmos_axes, npz, zvir, Atm(1)%ak, Atm(1)%bk, Atm(1)%ts, & - Atm(1)%phis, Atm(1)%gridstruct, Atm(1)%ks, Atm(1)%npx, Atm(1)%neststruct, Atm(1)%bd) + if ( Atm(mygrid)%flagstruct%nudge ) then + call fv_nwp_nudge_init( Time, Atm(mygrid)%atmos_axes, npz, zvir, Atm(mygrid)%ak, Atm(mygrid)%bk, & + Atm(mygrid)%ts, Atm(mygrid)%phis, Atm(mygrid)%gridstruct, Atm(mygrid)%ks, Atm(mygrid)%npx, & + Atm(mygrid)%neststruct, Atm(mygrid)%bd) call mpp_error(NOTE, 'NWP nudging is active') endif #endif @@ -338,19 +349,19 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) !and so for now we will only define for the coarsest grid !miz - id_udt_dyn =register_diag_field(mod_name,'udt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_udt_dyn =register_diag_field(mod_name,'udt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'udt_dyn', 'm/s/s', missing_value=mv) - id_vdt_dyn =register_diag_field(mod_name,'vdt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_vdt_dyn =register_diag_field(mod_name,'vdt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'vdt_dyn', 'm/s/s', missing_value=mv) - id_tdt_dyn =register_diag_field(mod_name,'tdt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_tdt_dyn =register_diag_field(mod_name,'tdt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'tdt_dyn', 'K/s', missing_value=mv) - id_qdt_dyn =register_diag_field(mod_name,'qdt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_qdt_dyn =register_diag_field(mod_name,'qdt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'qdt_dyn', 'kg/kg/s', missing_value=mv) - id_qldt_dyn =register_diag_field(mod_name,'qldt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_qldt_dyn =register_diag_field(mod_name,'qldt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'qldt_dyn', 'kg/kg/s', missing_value=mv) - id_qidt_dyn =register_diag_field(mod_name,'qidt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_qidt_dyn =register_diag_field(mod_name,'qidt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'qidt_dyn', 'kg/kg/s', missing_value=mv) - id_qadt_dyn =register_diag_field(mod_name,'qadt_dyn', Atm(mytile)%atmos_axes(1:3), & + id_qadt_dyn =register_diag_field(mod_name,'qadt_dyn', Atm(mygrid)%atmos_axes(1:3), & Time,'qadt_dyn', '1/s', missing_value=mv) !--- register cmip tendency fields --- ID_tnta = register_cmip_diag_field_3d (mod_name, 'tnta', Time, & @@ -359,6 +370,12 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) ID_tnhusa = register_cmip_diag_field_3d (mod_name, 'tnhusa', Time, & 'Tendency of Specific Humidity due to Advection', 's-1', & standard_name='tendency_of_specific_humidity_due_to_advection') + ID_tnt = register_cmip_diag_field_3d (mod_name, 'tnt', Time, & + 'Tendency of Air Temperature', 'K s-1', & + standard_name='tendency_of_air_temperature') + ID_tnhus = register_cmip_diag_field_3d (mod_name, 'tnhus', Time, & + 'Tendency of Specific Humidity', 's-1', & + standard_name='tendency_of_specific_humidity') !---allocate id_tracer_* allocate (id_tracerdt_dyn (num_tracers)) @@ -367,15 +384,22 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) call get_tracer_names (MODEL_ATMOS, itrac, name = tracer_name, units = tracer_units) if (get_tracer_index(MODEL_ATMOS,tracer_name)>0) then id_tracerdt_dyn(itrac) = register_diag_field(mod_name, TRIM(tracer_name)//'dt_dyn', & - Atm(mytile)%atmos_axes(1:3),Time, & + Atm(mygrid)%atmos_axes(1:3),Time, & TRIM(tracer_name)//' total tendency from advection', & TRIM(tracer_units)//'/s', missing_value = mv) endif enddo if (any(id_tracerdt_dyn(:)>0)) allocate(qtendyyf(isc:iec, jsc:jec,1:npz,num_tracers)) - if ( id_tdt_dyn>0 .or. query_cmip_diag_id(ID_tnta) ) allocate(ttend(isc:iec, jsc:jec, 1:npz)) + if ( id_tdt_dyn>0 .or. query_cmip_diag_id(ID_tnta) .or. query_cmip_diag_id(ID_tnt) ) & + allocate(ttend(isc:iec, jsc:jec, 1:npz)) if ( any((/ id_qdt_dyn, id_qldt_dyn, id_qidt_dyn, id_qadt_dyn /) > 0) .or. & - query_cmip_diag_id(ID_tnhusa) ) allocate(qtend(isc:iec, jsc:jec, 1:npz, 4)) + query_cmip_diag_id(ID_tnhusa) .or. query_cmip_diag_id(ID_tnhus) ) allocate(qtend(isc:iec, jsc:jec, 1:npz, 4)) + +! could zero out diagnostics if tracer field not defined + if (sphum > size(qtend,4)) id_qdt_dyn = 0 + if (liq_wat > size(qtend,4)) id_qldt_dyn = 0 + if (ice_wat > size(qtend,4)) id_qidt_dyn = 0 + if (cld_amt > size(qtend,4)) id_qadt_dyn = 0 !miz ! --- initialize clocks for dynamics, physics_down and physics_up @@ -383,8 +407,9 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Surf_diff, Grid_box) id_subgridz = mpp_clock_id ('FV subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) id_fv_diag = mpp_clock_id ('FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) - n = mytile - call switch_current_Atm(Atm(n)) + call timing_off('ATMOS_INIT') + + call set_domain(Atm(mygrid)%domain) end subroutine atmosphere_init @@ -399,26 +424,24 @@ subroutine atmosphere_dynamics ( Time, surf_diff ) !---- Call FV dynamics ----- call mpp_clock_begin (id_dynam) -!miz -#ifndef use_AM3_physics - Surf_diff%ddp_dyn(:,:,:) = Atm(mytile)%delp(isc:iec, jsc:jec, :) - Surf_diff%tdt_dyn(:,:,:) = Atm(mytile)%pt(isc:iec, jsc:jec, :) - Surf_diff%qdt_dyn(:,:,:) = Atm(mytile)%q (isc:iec, jsc:jec, :, 1) + & - Atm(mytile)%q (isc:iec, jsc:jec, :, 2) + & - Atm(mytile)%q (isc:iec, jsc:jec, :, 3) -#endif -!miz[M d0 - if ( id_tdt_dyn>0 .or. query_cmip_diag_id(ID_tnta) ) ttend(:, :, :) = Atm(mytile)%pt(isc:iec, jsc:jec, :) + Surf_diff%ddp_dyn(:,:,:) = Atm(mygrid)%delp(isc:iec, jsc:jec, :) + Surf_diff%tdt_dyn(:,:,:) = Atm(mygrid)%pt(isc:iec, jsc:jec, :) + Surf_diff%qdt_dyn(:,:,:) = Atm(mygrid)%q (isc:iec, jsc:jec, :, sphum) + & + Atm(mygrid)%q (isc:iec, jsc:jec, :, liq_wat) + & + Atm(mygrid)%q (isc:iec, jsc:jec, :, ice_wat) + +!miz + if ( id_tdt_dyn>0 .or. query_cmip_diag_id(ID_tnta) ) ttend(:, :, :) = Atm(mygrid)%pt(isc:iec, jsc:jec, :) if ( any((/ id_qdt_dyn, id_qldt_dyn, id_qidt_dyn, id_qadt_dyn /) > 0) .or. & - query_cmip_diag_id(ID_tnhusa) ) qtend(:, :, :, :) = Atm(mytile)%q (isc:iec, jsc:jec, :, :) + query_cmip_diag_id(ID_tnhusa) ) qtend(:, :, :, :) = Atm(mygrid)%q (isc:iec, jsc:jec, :, :) !miz do itrac = 1, num_tracers if (id_tracerdt_dyn (itrac) >0 ) & - qtendyyf(:,:,:,itrac) = Atm(mytile)%q(isc:iec,jsc:jec,:,itrac) + qtendyyf(:,:,:,itrac) = Atm(mygrid)%q(isc:iec,jsc:jec,:,itrac) enddo - n = mytile + n = mygrid do psc=1,abs(p_split) call timing_on('fv_dynamics') !uc/vc only need be same on coarse grid? However BCs do need to be the same @@ -444,58 +467,42 @@ subroutine atmosphere_dynamics ( Time, surf_diff ) if (ngrids > 1 .and. (psc < p_split .or. p_split < 0)) then call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) + call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mygrid) call timing_off('TWOWAY_UPDATE') endif end do !p_split call mpp_clock_end (id_dynam) -!miz -#ifndef use_AM3_physics - Surf_diff%ddp_dyn(:,:,:) =(Atm(mytile)%delp(isc:iec,jsc:jec,:)-Surf_diff%ddp_dyn(:,:,:))/dt_atmos - Surf_diff%tdt_dyn(:,:,:) =(Atm(mytile)%pt(isc:iec,jsc:jec,:) -Surf_diff%tdt_dyn(:,:,:))/dt_atmos - Surf_diff%qdt_dyn(:,:,:) =(Atm(mytile)%q (isc:iec,jsc:jec,:,1) + & - Atm(mytile)%q (isc:iec,jsc:jec,:,2) + & - Atm(mytile)%q (isc:iec,jsc:jec,:,3) - Surf_diff%qdt_dyn(:,:,:))/dt_atmos -#endif -!miz - if ( id_udt_dyn>0 ) used = send_data( id_udt_dyn, 2.0/dt_atmos*Atm(mytile)%ua(isc:iec,jsc:jec,:), Time) - if ( id_vdt_dyn>0 ) used = send_data( id_vdt_dyn, 2.0/dt_atmos*Atm(mytile)%va(isc:iec,jsc:jec,:), Time) - if ( id_tdt_dyn>0 .or. query_cmip_diag_id(ID_tnta) ) then - ttend = (Atm(mytile)%pt(isc:iec, jsc:jec, :) - ttend(:, :, : ))/dt_atmos - if (id_tdt_dyn>0) used = send_data(id_tdt_dyn, ttend(:,:,:), Time) - if (query_cmip_diag_id(ID_tnta)) used = send_cmip_data_3d (ID_tnta, ttend(:,:,:), Time) - endif + Surf_diff%ddp_dyn(:,:,:) =(Atm(mygrid)%delp(isc:iec,jsc:jec,:)-Surf_diff%ddp_dyn(:,:,:))/dt_atmos + Surf_diff%tdt_dyn(:,:,:) =(Atm(mygrid)%pt(isc:iec,jsc:jec,:) -Surf_diff%tdt_dyn(:,:,:))/dt_atmos + Surf_diff%qdt_dyn(:,:,:) =(Atm(mygrid)%q (isc:iec,jsc:jec,:,sphum) + & + Atm(mygrid)%q (isc:iec,jsc:jec,:,liq_wat) + & + Atm(mygrid)%q (isc:iec,jsc:jec,:,ice_wat) - Surf_diff%qdt_dyn(:,:,:))/dt_atmos - if ( any((/ id_qdt_dyn, id_qldt_dyn, id_qidt_dyn, id_qadt_dyn /) > 0) .or. query_cmip_diag_id(ID_tnhusa) ) then - qtend = (Atm(mytile)%q (isc:iec, jsc:jec, :, :)- qtend(:, :, :, :))/dt_atmos - if (id_qdt_dyn > 0) used = send_data(id_qdt_dyn, qtend(:,:,:,1), Time) - if (id_qldt_dyn > 0) used = send_data(id_qldt_dyn, qtend(:,:,:,2), Time) - if (id_qidt_dyn > 0) used = send_data(id_qidt_dyn, qtend(:,:,:,3), Time) - if (id_qadt_dyn > 0) used = send_data(id_qadt_dyn, qtend(:,:,:,4), Time) - if (query_cmip_diag_id(ID_tnhusa)) used = send_cmip_data_3d (ID_tnhusa, qtend(:,:,:,1), Time) - endif +!miz + if (id_udt_dyn>0) used = send_data( id_udt_dyn, 2.0/dt_atmos*Atm(mygrid)%ua(isc:iec,jsc:jec,:), Time) + if (id_vdt_dyn>0) used = send_data( id_vdt_dyn, 2.0/dt_atmos*Atm(mygrid)%va(isc:iec,jsc:jec,:), Time) + if (id_tdt_dyn > 0) used = send_data( id_tdt_dyn, (Atm(mygrid)%pt(isc:iec,jsc:jec,:)-ttend(:,:,:))/dt_atmos, Time) + if (query_cmip_diag_id(ID_tnta)) & + used = send_cmip_data_3d ( ID_tnta, (Atm(mygrid)%pt(isc:iec,jsc:jec,:)-ttend(:,:,:))/dt_atmos, Time) + + if (id_qdt_dyn > 0) used = send_data( id_qdt_dyn , (Atm(mygrid)%q(isc:iec,jsc:jec,:,sphum)-qtend(:,:,:,sphum))/dt_atmos, Time) + if (id_qldt_dyn > 0) used = send_data( id_qldt_dyn, (Atm(mygrid)%q(isc:iec,jsc:jec,:,liq_wat)-qtend(:,:,:,liq_wat))/dt_atmos, Time) + if (id_qidt_dyn > 0) used = send_data( id_qidt_dyn, (Atm(mygrid)%q(isc:iec,jsc:jec,:,ice_wat)-qtend(:,:,:,ice_wat))/dt_atmos, Time) + if (id_qadt_dyn > 0) used = send_data( id_qadt_dyn, (Atm(mygrid)%q(isc:iec,jsc:jec,:,cld_amt)-qtend(:,:,:,cld_amt))/dt_atmos, Time) + if (query_cmip_diag_id(ID_tnhusa)) & + used = send_cmip_data_3d (ID_tnhusa, (Atm(mygrid)%q(isc:iec,jsc:jec,:,sphum)-qtend(:,:,:,sphum))/dt_atmos, Time) !miz do itrac = 1, num_tracers if(id_tracerdt_dyn(itrac)>0) then - qtendyyf(:,:,:,itrac) = (Atm(mytile)%q (isc:iec, jsc:jec, :,itrac)- & - qtendyyf(:,:,:,itrac))/dt_atmos - used = send_data(id_tracerdt_dyn(itrac), qtendyyf(:,:,:,itrac), & - Time) + qtendyyf(:,:,:,itrac) = (Atm(mygrid)%q (isc:iec, jsc:jec, :,itrac)- & + qtendyyf(:,:,:,itrac))/dt_atmos + used = send_data(id_tracerdt_dyn(itrac), qtendyyf(:,:,:,itrac), Time) endif enddo -#ifdef TWOWAY_UPDATE_BEFORE_PHYSICS - if (ngrids > 1) then - call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, kappa, cp_air, zvir, dt_atmos) - call timing_off('TWOWAY_UPDATE') - endif - call nullify_domain() -#endif - !----------------------------------------------------- !--- COMPUTE SUBGRID Z !----------------------------------------------------- @@ -543,26 +550,24 @@ subroutine atmosphere_end (Time, Grid_box )!rab, Radiation, Physics) !rab type (physics_type), intent(inout) :: Physics ! initialize domains for writing global physics data - call set_domain ( Atm(mytile)%domain ) + call set_domain ( Atm(mygrid)%domain ) !--- end nudging module --- #if defined (ATMOS_NUDGE) - if ( Atm(mytile)%flagstruct%nudge ) call atmos_nudge_end + if ( Atm(mygrid)%flagstruct%nudge ) call atmos_nudge_end #elif defined (CLIMATE_NUDGE) - if ( Atm(mytile)%flagstruct%nudge ) call fv_climate_nudge_end + if ( Atm(mygrid)%flagstruct%nudge ) call fv_climate_nudge_end #elif defined (ADA_NUDGE) - if ( Atm(mytile)%flagstruct%nudge ) call fv_ada_nudge_end + if ( Atm(mygrid)%flagstruct%nudge ) call fv_ada_nudge_end #else - if ( Atm(mytile)%flagstruct%nudge ) call fv_nwp_nudge_end + if ( Atm(mygrid)%flagstruct%nudge ) call fv_nwp_nudge_end #endif -#ifndef use_AM3_physics call atmos_global_diag_end -#endif call fv_cmip_diag_end call nullify_domain ( ) - call fv_end(Atm, grids_on_this_pe) + call fv_end(Atm, mygrid) deallocate (Atm) deallocate( u_dt, v_dt, t_dt, q_dt, pref, dum1d ) @@ -579,7 +584,7 @@ end subroutine atmosphere_end subroutine atmosphere_restart(timestamp) character(len=*), intent(in) :: timestamp - call fv_write_restart(Atm, grids_on_this_pe, timestamp) + call fv_write_restart(Atm(mygrid), timestamp) end subroutine atmosphere_restart ! @@ -614,15 +619,15 @@ end subroutine atmosphere_pref subroutine atmosphere_control_data (i1, i2, j1, j2, kt, p_hydro, hydro, do_uni_zfull) !miz integer, intent(out) :: i1, i2, j1, j2, kt logical, intent(out), optional :: p_hydro, hydro, do_uni_zfull !miz - i1 = Atm(mytile)%bd%isc - i2 = Atm(mytile)%bd%iec - j1 = Atm(mytile)%bd%jsc - j2 = Atm(mytile)%bd%jec - kt = Atm(mytile)%npz + i1 = Atm(mygrid)%bd%isc + i2 = Atm(mygrid)%bd%iec + j1 = Atm(mygrid)%bd%jsc + j2 = Atm(mygrid)%bd%jec + kt = Atm(mygrid)%npz - if (present(p_hydro)) p_hydro = Atm(mytile)%flagstruct%phys_hydrostatic - if (present( hydro)) hydro = Atm(mytile)%flagstruct%hydrostatic - if (present(do_uni_zfull)) do_uni_zfull = Atm(mytile)%flagstruct%do_uni_zfull + if (present(p_hydro)) p_hydro = Atm(mygrid)%flagstruct%phys_hydrostatic + if (present( hydro)) hydro = Atm(mygrid)%flagstruct%hydrostatic + if (present(do_uni_zfull)) do_uni_zfull = Atm(mygrid)%flagstruct%do_uni_zfull end subroutine atmosphere_control_data @@ -630,7 +635,7 @@ end subroutine atmosphere_control_data subroutine atmosphere_cell_area (area_out) real, dimension(:,:), intent(out) :: area_out - area_out(1:iec-isc+1, 1:jec-jsc+1) = Atm(mytile)%gridstruct%area (isc:iec,jsc:jec) + area_out(1:iec-isc+1, 1:jec-jsc+1) = Atm(mygrid)%gridstruct%area (isc:iec,jsc:jec) end subroutine atmosphere_cell_area @@ -646,8 +651,8 @@ subroutine atmosphere_grid_center (lon, lat) do j=jsc,jec do i=isc,iec - lon(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%agrid_64(i,j,1) - lat(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%agrid_64(i,j,2) + lon(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%agrid_64(i,j,1) + lat(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%agrid_64(i,j,2) enddo end do @@ -672,8 +677,8 @@ subroutine atmosphere_boundary (blon, blat, global) do j=jsc,jec+1 do i=isc,iec+1 - blon(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%grid(i,j,1) - blat(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%grid(i,j,2) + blon(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%grid(i,j,1) + blat(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%grid(i,j,2) enddo end do @@ -681,7 +686,7 @@ end subroutine atmosphere_boundary subroutine set_atmosphere_pelist () - call mpp_set_current_pelist(Atm(mytile)%pelist, no_sync=.TRUE.) + call mpp_set_current_pelist(Atm(mygrid)%pelist, no_sync=.TRUE.) end subroutine set_atmosphere_pelist @@ -690,7 +695,7 @@ subroutine atmosphere_domain ( fv_domain ) ! returns the domain2d variable associated with the coupling grid ! note: coupling is done using the mass/temperature grid with no halos - fv_domain = Atm(mytile)%domain_for_coupler + fv_domain = Atm(mygrid)%domain_for_coupler end subroutine atmosphere_domain @@ -704,7 +709,7 @@ subroutine get_atmosphere_axes ( axes ) 'get_atmosphere_axes in atmosphere_mod', & 'size of argument is incorrect', FATAL ) - axes (1:size(axes(:))) = Atm(mytile)%atmos_axes (1:size(axes(:))) + axes (1:size(axes(:))) = Atm(mygrid)%atmos_axes (1:size(axes(:))) end subroutine get_atmosphere_axes @@ -727,19 +732,19 @@ subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) do j=jsc,jec do i=isc,iec - p_surf(i,j) = Atm(mytile)%ps(i,j) - t_bot(i,j) = Atm(mytile)%pt(i,j,npz) - p_bot(i,j) = Atm(mytile)%delp(i,j,npz)/(Atm(mytile)%peln(i,npz+1,j)-Atm(mytile)%peln(i,npz,j)) - z_bot(i,j) = rrg*t_bot(i,j)*(1.+zvir*Atm(mytile)%q(i,j,npz,1)) * & - (1. - Atm(mytile)%pe(i,npz,j)/p_bot(i,j)) + p_surf(i,j) = Atm(mygrid)%ps(i,j) + t_bot(i,j) = Atm(mygrid)%pt(i,j,npz) + p_bot(i,j) = Atm(mygrid)%delp(i,j,npz)/(Atm(mygrid)%peln(i,npz+1,j)-Atm(mygrid)%peln(i,npz,j)) + z_bot(i,j) = rrg*t_bot(i,j)*(1.+zvir*Atm(mygrid)%q(i,j,npz,sphum)) * & + (1. - Atm(mygrid)%pe(i,npz,j)/p_bot(i,j)) enddo enddo if ( present(slp) ) then ! determine 0.8 sigma reference level - sigtop = Atm(mytile)%ak(1)/pstd_mks+Atm(mytile)%bk(1) + sigtop = Atm(mygrid)%ak(1)/pstd_mks+Atm(mygrid)%bk(1) do k = 1, npz - sigbot = Atm(mytile)%ak(k+1)/pstd_mks+Atm(mytile)%bk(k+1) + sigbot = Atm(mygrid)%ak(k+1)/pstd_mks+Atm(mygrid)%bk(k+1) if (sigbot+sigtop > 1.6) then kr = k exit @@ -749,9 +754,9 @@ subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) do j=jsc,jec do i=isc,iec ! sea level pressure - tref(i,j) = Atm(mytile)%pt(i,j,kr) * (Atm(mytile)%delp(i,j,kr)/ & - ((Atm(mytile)%peln(i,kr+1,j)-Atm(mytile)%peln(i,kr,j))*Atm(mytile)%ps(i,j)))**(-rrg*tlaps) - slp(i,j) = Atm(mytile)%ps(i,j)*(1.+tlaps*Atm(mytile)%phis(i,j)/(tref(i,j)*grav))**(1./(rrg*tlaps)) + tref(i,j) = Atm(mygrid)%pt(i,j,kr) * (Atm(mygrid)%delp(i,j,kr)/ & + ((Atm(mygrid)%peln(i,kr+1,j)-Atm(mygrid)%peln(i,kr,j))*Atm(mygrid)%ps(i,j)))**(-rrg*tlaps) + slp(i,j) = Atm(mygrid)%ps(i,j)*(1.+tlaps*Atm(mygrid)%phis(i,j)/(tref(i,j)*grav))**(1./(rrg*tlaps)) enddo enddo endif @@ -760,7 +765,7 @@ subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) do m=1,nq do j=jsc,jec do i=isc,iec - tr_bot(i,j,m) = Atm(mytile)%q(i,j,npz,m) + tr_bot(i,j,m) = Atm(mygrid)%q(i,j,npz,m) enddo enddo enddo @@ -777,8 +782,8 @@ subroutine get_bottom_wind ( u_bot, v_bot ) do j=jsc,jec do i=isc,iec - u_bot(i,j) = Atm(mytile)%u_srf(i,j) - v_bot(i,j) = Atm(mytile)%v_srf(i,j) + u_bot(i,j) = Atm(mygrid)%u_srf(i,j) + v_bot(i,j) = Atm(mygrid)%v_srf(i,j) enddo enddo @@ -798,7 +803,7 @@ subroutine get_stock_pe(index, value) integer i,j,k real, pointer :: area(:,:) - area => Atm(mytile)%gridstruct%area + area => Atm(mygrid)%gridstruct%area select case (index) @@ -816,9 +821,9 @@ subroutine get_stock_pe(index, value) do k=1,npz do i=isc,iec ! Warning: the following works only with AM2 physics: water vapor; cloud water, cloud ice. - wm(i,j) = wm(i,j) + Atm(mytile)%delp(i,j,k) * ( Atm(mytile)%q(i,j,k,1) + & - Atm(mytile)%q(i,j,k,2) + & - Atm(mytile)%q(i,j,k,3) ) + wm(i,j) = wm(i,j) + Atm(mygrid)%delp(i,j,k) * ( Atm(mygrid)%q(i,j,k,sphum) + & + Atm(mygrid)%q(i,j,k,liq_wat) + & + Atm(mygrid)%q(i,j,k,ice_wat) ) enddo enddo enddo @@ -855,9 +860,9 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) Time_prev = Time Time_next = Time + Time_step_atmos - n = mytile + n = mygrid - call set_domain ( Atm(mytile)%domain ) + call set_domain ( Atm(mygrid)%domain ) !--- put u/v tendencies into haloed arrays u_dt and v_dt !$OMP parallel do default(shared) private(nb, ibs, ibe, jbs, jbe) @@ -874,7 +879,7 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) !--- diagnostic tracers are being updated in-place !--- tracer fields must be returned to the Atm structure - Atm(mytile)%qdiag(ibs:ibe,jbs:jbe,:,:) = Physics_tendency%block(nb)%qdiag + Atm(mygrid)%qdiag(ibs:ibe,jbs:jbe,:,:) = Physics_tendency%block(nb)%qdiag enddo @@ -915,7 +920,8 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) .true., Time_next, Atm(n)%flagstruct%nudge, Atm(n)%gridstruct, & Atm(n)%gridstruct%agrid(:,:,1), Atm(n)%gridstruct%agrid(:,:,2), & Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct, & - Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, Atm(n)%ptop, q_dt) + Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, Atm(n)%ptop, & + Atm(n)%phys_diag, q_dt) call timing_off('FV_UPDATE_PHYS') call mpp_clock_end (id_dynam) @@ -923,27 +929,33 @@ subroutine atmosphere_state_update (Time, Physics_tendency, Physics, Atm_block) !--- physics tendencies if (ngrids > 1 .and. p_split > 0) then call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) + call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mygrid) call timing_off('TWOWAY_UPDATE') - endif + endif + +!--- cmip6 total tendencies of temperature and specific humidity + if (query_cmip_diag_id(ID_tnt)) & + used = send_cmip_data_3d ( ID_tnt, (Atm(mygrid)%pt(isc:iec,jsc:jec,:)-ttend(:,:,:))/dt_atmos, Time) + if (query_cmip_diag_id(ID_tnhus)) & + used = send_cmip_data_3d (ID_tnhus, (Atm(mygrid)%q(isc:iec,jsc:jec,:,sphum)-qtend(:,:,:,sphum))/dt_atmos, Time) #if !defined(ATMOS_NUDGE) && !defined(CLIMATE_NUDGE) && !defined(ADA_NUDGE) - if ( .not.forecast_mode .and. Atm(mytile)%flagstruct%nudge .and. Atm(mytile)%flagstruct%na_init>0 ) then + if ( .not.forecast_mode .and. Atm(mygrid)%flagstruct%nudge .and. Atm(mygrid)%flagstruct%na_init>0 ) then if(mod(seconds, 21600)==0) call adiabatic_init_drv (Time_prev, Time_next) endif #endif call nullify_domain() !---- diagnostics for FV dynamics ----- - if (Atm(mytile)%flagstruct%print_freq /= -99) then + if (Atm(mygrid)%flagstruct%print_freq /= -99) then call mpp_clock_begin(id_fv_diag) call timing_on('FV_DIAG') fv_time = Time_next call get_time (fv_time, seconds, days) - call fv_diag(Atm(mytile:mytile), zvir, fv_time, Atm(mytile)%flagstruct%print_freq) - call fv_cmip_diag(Atm(mytile:mytile), zvir, fv_time) + call fv_diag(Atm(mygrid:mygrid), zvir, fv_time, Atm(mygrid)%flagstruct%print_freq) + call fv_cmip_diag(Atm(mygrid:mygrid), zvir, fv_time) call timing_off('FV_DIAG') call mpp_clock_end(id_fv_diag) @@ -963,10 +975,10 @@ subroutine adiabatic_init_drv (Time_prev, Time_next) !--------------------------------------------------- ! Call the adiabatic forward-backward initialization !--------------------------------------------------- - write(errstr,'(A, I4, A)') 'Performing adiabatic nudging', Atm(mytile)%flagstruct%na_init, ' times' + write(errstr,'(A, I4, A)') 'Performing adiabatic nudging', Atm(mygrid)%flagstruct%na_init, ' times' call mpp_error(NOTE, errstr) - ngc = Atm(mytile)%ng + ngc = Atm(mygrid)%ng isd = isc - ngc ied = iec + ngc jsd = jsc - ngc @@ -981,7 +993,7 @@ subroutine adiabatic_init_drv (Time_prev, Time_next) do_adiabatic_init = .true. - do n=1,Atm(mytile)%flagstruct%na_init + do n=1,Atm(mygrid)%flagstruct%na_init call adiabatic_init(Atm, Time_next, -dt_atmos, u_dt, v_dt, t_dt, q_dt, .false.) ! Backward in time one step fv_time = Time_prev call adiabatic_init(Atm, Time_next, dt_atmos, u_dt, v_dt, t_dt, q_dt, .true. ) ! Forward to the original time @@ -1015,8 +1027,8 @@ subroutine adiabatic_init (Atm, Time, dt_init, u_dt, v_dt, t_dt, q_dt, do_nudge) Time_next = Time + Time_step_atmos - n = mytile - ngc = Atm(mytile)%ng + n = mygrid + ngc = Atm(mygrid)%ng isd = isc - ngc ied = iec + ngc jsd = jsc - ngc @@ -1049,7 +1061,8 @@ subroutine adiabatic_init (Atm, Time, dt_init, u_dt, v_dt, t_dt, q_dt, do_nudge) .true., Time_next, Atm(n)%flagstruct%nudge, Atm(n)%gridstruct, & Atm(n)%gridstruct%agrid(:,:,1), Atm(n)%gridstruct%agrid(:,:,2), & Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct, & - Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, Atm(n)%ptop, q_dt) + Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, Atm(n)%ptop, & + Atm(n)%phys_diag, q_dt) endif @@ -1072,21 +1085,21 @@ subroutine atmos_physics_driver_inputs (Physics, Atm_block, Physics_tendency) jbs = Atm_block%jbs(nb) jbe = Atm_block%jbe(nb) - Physics%block(nb)%phis = Atm(mytile)%phis(ibs:ibe,jbs:jbe) - Physics%block(nb)%u = Atm(mytile)%ua(ibs:ibe,jbs:jbe,:) - Physics%block(nb)%v = Atm(mytile)%va(ibs:ibe,jbs:jbe,:) - Physics%block(nb)%t = Atm(mytile)%pt(ibs:ibe,jbs:jbe,:) - Physics%block(nb)%q = Atm(mytile)%q(ibs:ibe,jbs:jbe,:,:) - Physics%block(nb)%omega= Atm(mytile)%omga(ibs:ibe,jbs:jbe,:) - Physics%block(nb)%pe = Atm(mytile)%pe(ibs:ibe,:,jbs:jbe) - Physics%block(nb)%peln = Atm(mytile)%peln(ibs:ibe,:,jbs:jbe) - Physics%block(nb)%delp = Atm(mytile)%delp(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%phis = Atm(mygrid)%phis(ibs:ibe,jbs:jbe) + Physics%block(nb)%u = Atm(mygrid)%ua(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%v = Atm(mygrid)%va(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%t = Atm(mygrid)%pt(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%q = Atm(mygrid)%q(ibs:ibe,jbs:jbe,:,:) + Physics%block(nb)%omega= Atm(mygrid)%omga(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%pe = Atm(mygrid)%pe(ibs:ibe,:,jbs:jbe) + Physics%block(nb)%peln = Atm(mygrid)%peln(ibs:ibe,:,jbs:jbe) + Physics%block(nb)%delp = Atm(mygrid)%delp(ibs:ibe,jbs:jbe,:) if (.not.Physics%control%phys_hydrostatic) then - Physics%block(nb)%delz = Atm(mytile)%delz(ibs:ibe,jbs:jbe,:) - Physics%block(nb)%w = Atm(mytile)%w(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%delz = Atm(mygrid)%delz(ibs:ibe,jbs:jbe,:) + Physics%block(nb)%w = Atm(mygrid)%w(ibs:ibe,jbs:jbe,:) endif if (_ALLOCATED(Physics%block(nb)%tmp_4d)) & - Physics%block(nb)%tmp_4d = Atm(mytile)%qdiag(ibs:ibe,jbs:jbe,:,:) + Physics%block(nb)%tmp_4d = Atm(mygrid)%qdiag(ibs:ibe,jbs:jbe,:,:) call fv_compute_p_z (Atm_block%npz, Physics%block(nb)%phis, Physics%block(nb)%pe, & Physics%block(nb)%peln, Physics%block(nb)%delp, Physics%block(nb)%delz, & @@ -1094,9 +1107,9 @@ subroutine atmos_physics_driver_inputs (Physics, Atm_block, Physics_tendency) Physics%block(nb)%p_full, Physics%block(nb)%p_half, & Physics%block(nb)%z_full, Physics%block(nb)%z_half, & #ifdef USE_COND - Atm(mytile)%q_con(ibs:ibe,jbs:jbe,:), & + Atm(mygrid)%q_con(ibs:ibe,jbs:jbe,:), & #else - Atm(mytile)%q_con, & + Atm(mygrid)%q_con, & #endif Physics%control%phys_hydrostatic, Physics%control%do_uni_zfull) !miz @@ -1110,7 +1123,7 @@ subroutine atmos_physics_driver_inputs (Physics, Atm_block, Physics_tendency) Physics_tendency%block(nb)%v_dt = v_dt(ibs:ibe,jbs:jbe,:) Physics_tendency%block(nb)%t_dt = t_dt(ibs:ibe,jbs:jbe,:) Physics_tendency%block(nb)%q_dt = q_dt(ibs:ibe,jbs:jbe,:,:) - Physics_tendency%block(nb)%qdiag = Atm(mytile)%qdiag(ibs:ibe,jbs:jbe,:,:) + Physics_tendency%block(nb)%qdiag = Atm(mygrid)%qdiag(ibs:ibe,jbs:jbe,:,:) endif enddo @@ -1134,14 +1147,14 @@ subroutine atmos_radiation_driver_inputs (Time, Radiation, Atm_block) jbs = Atm_block%jbs(nb) jbe = Atm_block%jbe(nb) - Radiation%block(nb)%phis = Atm(mytile)%phis(ibs:ibe,jbs:jbe) - Radiation%block(nb)%t = Atm(mytile)%pt(ibs:ibe,jbs:jbe,:) - Radiation%block(nb)%q = Atm(mytile)%q(ibs:ibe,jbs:jbe,:,:) - Radiation%block(nb)%pe = Atm(mytile)%pe(ibs:ibe,:,jbs:jbe) - Radiation%block(nb)%peln = Atm(mytile)%peln(ibs:ibe,:,jbs:jbe) - Radiation%block(nb)%delp = Atm(mytile)%delp(ibs:ibe,jbs:jbe,:) + Radiation%block(nb)%phis = Atm(mygrid)%phis(ibs:ibe,jbs:jbe) + Radiation%block(nb)%t = Atm(mygrid)%pt(ibs:ibe,jbs:jbe,:) + Radiation%block(nb)%q = Atm(mygrid)%q(ibs:ibe,jbs:jbe,:,:) + Radiation%block(nb)%pe = Atm(mygrid)%pe(ibs:ibe,:,jbs:jbe) + Radiation%block(nb)%peln = Atm(mygrid)%peln(ibs:ibe,:,jbs:jbe) + Radiation%block(nb)%delp = Atm(mygrid)%delp(ibs:ibe,jbs:jbe,:) if (.not.Radiation%control%phys_hydrostatic) & - Radiation%block(nb)%delz = Atm(mytile)%delz(ibs:ibe,jbs:jbe,:) + Radiation%block(nb)%delz = Atm(mygrid)%delz(ibs:ibe,jbs:jbe,:) call fv_compute_p_z (Atm_block%npz, Radiation%block(nb)%phis, Radiation%block(nb)%pe, & Radiation%block(nb)%peln, Radiation%block(nb)%delp, Radiation%block(nb)%delz, & @@ -1149,9 +1162,9 @@ subroutine atmos_radiation_driver_inputs (Time, Radiation, Atm_block) Radiation%block(nb)%p_full, Radiation%block(nb)%p_half, & Radiation%block(nb)%z_full, Radiation%block(nb)%z_half, & #ifdef USE_COND - Atm(mytile)%q_con(ibs:ibe,jbs:jbe,:), & + Atm(mygrid)%q_con(ibs:ibe,jbs:jbe,:), & #else - Atm(mytile)%q_con, & + Atm(mygrid)%q_con, & #endif Radiation%control%phys_hydrostatic, Radiation%control%do_uni_zfull) !miz enddo @@ -1165,6 +1178,7 @@ subroutine atmos_radiation_driver_inputs (Time, Radiation, Atm_block) ! phase due to the way in which MPI interacts with nested OpenMP !---------------------------------------------------------------------- call compute_g_avg(Time, 'co2', Radiation, Atm_block) + call compute_g_avg(Time, 'ch4', Radiation, Atm_block) end subroutine atmos_radiation_driver_inputs @@ -1273,8 +1287,8 @@ subroutine reset_atmos_tracers (Physics, Physics_tendency, Atm_block) jbs = Atm_block%jbs(nb) jbe = Atm_block%jbe(nb) - Atm(mytile)%q(ibs:ibe,jbs:jbe,:,:) = Physics%block(nb)%q - Atm(mytile)%qdiag(ibs:ibe,jbs:jbe,:,:) = Physics_tendency%block(nb)%qdiag + Atm(mygrid)%q(ibs:ibe,jbs:jbe,:,:) = Physics%block(nb)%q + Atm(mygrid)%qdiag(ibs:ibe,jbs:jbe,:,:) = Physics_tendency%block(nb)%qdiag enddo end subroutine reset_atmos_tracers diff --git a/driver/SHiELD/atmosphere.F90 b/driver/SHiELD/atmosphere.F90 index ddc186884..8bdb4d80e 100644 --- a/driver/SHiELD/atmosphere.F90 +++ b/driver/SHiELD/atmosphere.F90 @@ -33,7 +33,7 @@ module atmosphere_mod use block_control_mod, only: block_control_type use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks use time_manager_mod, only: time_type, get_time, set_time, operator(+), & - operator(-) + operator(-), operator(/), time_type_to_real use fms_mod, only: file_exist, open_namelist_file, & close_file, error_mesg, FATAL, & check_nml_error, stdlog, & @@ -42,38 +42,42 @@ module atmosphere_mod mpp_clock_id, mpp_clock_begin, & mpp_clock_end, CLOCK_SUBCOMPONENT, & clock_flag_default, nullify_domain -use mpp_mod, only: mpp_error, stdout, FATAL, NOTE, & +use mpp_mod, only: mpp_error, stdout, FATAL, WARNING, NOTE, & input_nml_file, mpp_root_pe, & mpp_npes, mpp_pe, mpp_chksum, & mpp_get_current_pelist, & - mpp_set_current_pelist -use mpp_domains_mod, only: domain2d + mpp_set_current_pelist, mpp_sync +use mpp_parameter_mod, only: EUPDATE, WUPDATE, SUPDATE, NUPDATE +use mpp_domains_mod, only: domain2d, mpp_update_domains use xgrid_mod, only: grid_box_type use field_manager_mod, only: MODEL_ATMOS use tracer_manager_mod, only: get_tracer_index, get_number_tracers, & - NO_TRACER + NO_TRACER, get_tracer_names use IPD_typedefs, only: IPD_data_type, kind_phys !----------------- ! FV core modules: !----------------- -use fv_arrays_mod, only: fv_atmos_type, R_GRID -use fv_control_mod, only: fv_init, fv_end, ngrids +use fv_arrays_mod, only: fv_atmos_type, R_GRID, fv_grid_bounds_type, phys_diag_type +use fv_control_mod, only: fv_control_init, fv_end, ngrids use fv_eta_mod, only: get_eta_level use fv_fill_mod, only: fill_gfs use fv_dynamics_mod, only: fv_dynamics use fv_nesting_mod, only: twoway_nesting -use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin +use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin, prt_height use fv_nggps_diags_mod, only: fv_nggps_diag_init, fv_nggps_diag use fv_restart_mod, only: fv_restart, fv_write_restart use fv_timing_mod, only: timing_on, timing_off -use fv_mp_mod, only: switch_current_Atm +use fv_mp_mod, only: is_master use fv_sg_mod, only: fv_subgrid_z use fv_update_phys_mod, only: fv_update_phys +use fv_io_mod, only: fv_io_register_nudge_restart use fv_nwp_nudge_mod, only: fv_nwp_nudge_init, fv_nwp_nudge_end, do_adiabatic_init +use fv_regional_mod, only: start_regional_restart, read_new_bc_data +use fv_regional_mod, only: a_step, p_step +use fv_regional_mod, only: current_time_in_seconds use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain -use boundary_mod, only: update_coarse_grid implicit none private @@ -83,25 +87,29 @@ module atmosphere_mod atmosphere_dynamics, atmosphere_state_update !--- utility routines -public :: atmosphere_resolution, atmosphere_boundary, & - atmosphere_grid_center, atmosphere_domain, & +public :: atmosphere_resolution, atmosphere_grid_bdry, & + atmosphere_grid_ctr, atmosphere_domain, & atmosphere_control_data, atmosphere_pref, & - get_atmosphere_axes, get_bottom_mass, & - get_bottom_wind, get_stock_pe, & - set_atmosphere_pelist, get_atmosphere_grid + atmosphere_diag_axes, atmosphere_etalvls, & + atmosphere_hgt, atmosphere_scalar_field_halo, & +!rab atmosphere_tracer_postinit, & +! atmosphere_diss_est, & + atmosphere_nggps_diag, & + get_bottom_mass, get_bottom_wind, & + get_stock_pe, set_atmosphere_pelist !--- physics/radiation data exchange routines public :: atmos_phys_driver_statein !----------------------------------------------------------------------- - -character(len=128) :: version = '$Id$' -character(len=128) :: tagname = '$Name$' -character(len=7) :: mod_name = 'atmos' +! version number of this module +! Include variable "version" to be written to log file. +#include +character(len=20) :: mod_name = 'SHiELD/atmosphere_mod' !---- private data ---- type (time_type) :: Time_step_atmos - public Atm + public Atm, mygrid !These are convenience variables for local use only, and are set to values in Atm% real :: dt_atmos @@ -117,7 +125,7 @@ module atmosphere_mod integer, dimension(:), allocatable :: id_tracerdt_dyn integer :: sphum, liq_wat, rainwat, ice_wat, snowwat, graupel !condensate species - integer :: mytile = 1 + integer :: mygrid = 1 integer :: p_split = 1 integer, allocatable :: pelist(:) logical, allocatable :: grids_on_this_pe(:) @@ -137,12 +145,10 @@ module atmosphere_mod - subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy, area) + subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area) type (time_type), intent(in) :: Time_init, Time, Time_step type(grid_box_type), intent(inout) :: Grid_box - real(kind=kind_phys), pointer, dimension(:), intent(inout) :: ak, bk - real(kind=kind_phys), pointer, dimension(:,:), intent(inout) :: dx, dy, area - + real(kind=kind_phys), pointer, dimension(:,:), intent(inout) :: area !--- local variables --- integer :: i, n integer :: itrac @@ -150,6 +156,14 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy character(len=32) :: tracer_name, tracer_units real :: ps1, ps2 + integer :: nlunit = 9999 + character (len = 64) :: fn_nml = 'input.nml' + + !For regional + a_step = 0 + current_time_in_seconds = time_type_to_real( Time - Time_init ) + if (mpp_pe() == 0) write(0,"('atmosphere_init: current_time_seconds = ',f9.1)")current_time_in_seconds + call timing_on('ATMOS_INIT') allocate(pelist(mpp_npes())) call mpp_get_current_pelist(pelist) @@ -166,34 +180,30 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy !NOTE do we still need the second file_exist call? cold_start = (.not.file_exist('INPUT/fv_core.res.nc') .and. .not.file_exist('INPUT/fv_core.res.tile1.nc')) - call fv_init( Atm, dt_atmos, grids_on_this_pe, p_split ) ! allocates Atm components + call fv_control_init( Atm, dt_atmos, mygrid, grids_on_this_pe, p_split ) ! allocates Atm components; sets mygrid - do n=1,ngrids - if (grids_on_this_pe(n)) mytile = n - enddo - - Atm(mytile)%Time_init = Time_init + Atm(mygrid)%Time_init = Time_init !----- write version and namelist to log file ----- - call write_version_number ( version, tagname ) + call write_version_number ( 'SHiELD/ATMOSPHERE_MOD', version ) !----------------------------------- - npx = Atm(mytile)%npx - npy = Atm(mytile)%npy - npz = Atm(mytile)%npz - ncnst = Atm(mytile)%ncnst - pnats = Atm(mytile)%flagstruct%pnats + npx = Atm(mygrid)%npx + npy = Atm(mygrid)%npy + npz = Atm(mygrid)%npz + ncnst = Atm(mygrid)%ncnst + pnats = Atm(mygrid)%flagstruct%pnats - isc = Atm(mytile)%bd%isc - iec = Atm(mytile)%bd%iec - jsc = Atm(mytile)%bd%jsc - jec = Atm(mytile)%bd%jec + isc = Atm(mygrid)%bd%isc + iec = Atm(mygrid)%bd%iec + jsc = Atm(mygrid)%bd%jsc + jec = Atm(mygrid)%bd%jec - isd = isc - Atm(mytile)%bd%ng - ied = iec + Atm(mytile)%bd%ng - jsd = jsc - Atm(mytile)%bd%ng - jed = jec + Atm(mytile)%bd%ng + isd = isc - Atm(mygrid)%bd%ng + ied = iec + Atm(mygrid)%bd%ng + jsd = jsc - Atm(mygrid)%bd%ng + jed = jec + Atm(mygrid)%bd%ng nq = ncnst-pnats sphum = get_tracer_index (MODEL_ATMOS, 'sphum' ) @@ -203,14 +213,15 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat' ) graupel = get_tracer_index (MODEL_ATMOS, 'graupel' ) - if (max(sphum,liq_wat,ice_wat,rainwat,snowwat,graupel) > Atm(mytile)%flagstruct%nwat) then + if (max(sphum,liq_wat,ice_wat,rainwat,snowwat,graupel) > Atm(mygrid)%flagstruct%nwat) then call mpp_error (FATAL,' atmosphere_init: condensate species are not first in the list of & &tracers defined in the field_table') endif ! Allocate grid variables to be used to calculate gradient in 2nd order flux exchange ! This data is only needed for the COARSEST grid. - call switch_current_Atm(Atm(mytile)) + !call switch_current_Atm(Atm(mygrid)) + call set_domain(Atm(mygrid)%domain) allocate(Grid_box%dx ( isc:iec , jsc:jec+1)) allocate(Grid_box%dy ( isc:iec+1, jsc:jec )) @@ -223,54 +234,44 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy allocate(Grid_box%en2 (3, isc:iec+1, jsc:jec )) allocate(Grid_box%vlon (3, isc:iec , jsc:jec )) allocate(Grid_box%vlat (3, isc:iec , jsc:jec )) - Grid_box%dx ( isc:iec , jsc:jec+1) = Atm(mytile)%gridstruct%dx ( isc:iec, jsc:jec+1) - Grid_box%dy ( isc:iec+1, jsc:jec ) = Atm(mytile)%gridstruct%dy ( isc:iec+1, jsc:jec ) - Grid_box%area ( isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%area ( isc:iec , jsc:jec ) - Grid_box%edge_w( jsc:jec+1) = Atm(mytile)%gridstruct%edge_w( jsc:jec+1) - Grid_box%edge_e( jsc:jec+1) = Atm(mytile)%gridstruct%edge_e( jsc:jec+1) - Grid_box%edge_s( isc:iec+1 ) = Atm(mytile)%gridstruct%edge_s( isc:iec+1) - Grid_box%edge_n( isc:iec+1 ) = Atm(mytile)%gridstruct%edge_n( isc:iec+1) - Grid_box%en1 (:, isc:iec , jsc:jec+1) = Atm(mytile)%gridstruct%en1 (:, isc:iec , jsc:jec+1) - Grid_box%en2 (:, isc:iec+1, jsc:jec ) = Atm(mytile)%gridstruct%en2 (:, isc:iec+1, jsc:jec ) + Grid_box%dx ( isc:iec , jsc:jec+1) = Atm(mygrid)%gridstruct%dx ( isc:iec, jsc:jec+1) + Grid_box%dy ( isc:iec+1, jsc:jec ) = Atm(mygrid)%gridstruct%dy ( isc:iec+1, jsc:jec ) + Grid_box%area ( isc:iec , jsc:jec ) = Atm(mygrid)%gridstruct%area ( isc:iec , jsc:jec ) + Grid_box%edge_w( jsc:jec+1) = Atm(mygrid)%gridstruct%edge_w( jsc:jec+1) + Grid_box%edge_e( jsc:jec+1) = Atm(mygrid)%gridstruct%edge_e( jsc:jec+1) + Grid_box%edge_s( isc:iec+1 ) = Atm(mygrid)%gridstruct%edge_s( isc:iec+1) + Grid_box%edge_n( isc:iec+1 ) = Atm(mygrid)%gridstruct%edge_n( isc:iec+1) + Grid_box%en1 (:, isc:iec , jsc:jec+1) = Atm(mygrid)%gridstruct%en1 (:, isc:iec , jsc:jec+1) + Grid_box%en2 (:, isc:iec+1, jsc:jec ) = Atm(mygrid)%gridstruct%en2 (:, isc:iec+1, jsc:jec ) do i = 1,3 - Grid_box%vlon (i, isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%vlon (isc:iec , jsc:jec, i ) - Grid_box%vlat (i, isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%vlat (isc:iec , jsc:jec, i ) + Grid_box%vlon (i, isc:iec , jsc:jec ) = Atm(mygrid)%gridstruct%vlon (isc:iec , jsc:jec, i ) + Grid_box%vlat (i, isc:iec , jsc:jec ) = Atm(mygrid)%gridstruct%vlat (isc:iec , jsc:jec, i ) enddo - allocate (dx (isc:iec , jsc:jec+1)) - allocate (dy (isc:iec+1, jsc:jec )) allocate (area(isc:iec , jsc:jec )) - dx(isc:iec,jsc:jec+1) = Atm(mytile)%gridstruct%dx_64(isc:iec,jsc:jec+1) - dy(isc:iec+1,jsc:jec) = Atm(mytile)%gridstruct%dy_64(isc:iec+1,jsc:jec) - area(isc:iec,jsc:jec) = Atm(mytile)%gridstruct%area_64(isc:iec,jsc:jec) + area(isc:iec,jsc:jec) = Atm(mygrid)%gridstruct%area_64(isc:iec,jsc:jec) !----- allocate and zero out the dynamics (and accumulated) tendencies allocate( u_dt(isd:ied,jsd:jed,npz), & v_dt(isd:ied,jsd:jed,npz), & t_dt(isc:iec,jsc:jec,npz) ) !--- allocate pref - allocate(pref(npz+1,2), dum1d(npz+1)) + allocate(pref(npz+1,2), dum1d(npz+1)) - call set_domain ( Atm(mytile)%domain ) - call fv_restart(Atm(mytile)%domain, Atm, dt_atmos, seconds, days, cold_start, Atm(mytile)%gridstruct%grid_type, grids_on_this_pe) + call fv_restart(Atm(mygrid)%domain, Atm, dt_atmos, seconds, days, cold_start, Atm(mygrid)%gridstruct%grid_type, mygrid) fv_time = Time !----- initialize atmos_axes and fv_dynamics diagnostics !I've had trouble getting this to work with multiple grids at a time; worth revisiting? - call fv_diag_init(Atm(mytile:mytile), Atm(mytile)%atmos_axes, Time, npx, npy, npz, Atm(mytile)%flagstruct%p_ref) - call fv_nggps_diag_init(Atm(mytile:mytile), Atm(mytile)%atmos_axes, Time) + call fv_diag_init(Atm(mygrid:mygrid), Atm(mygrid)%atmos_axes, Time, npx, npy, npz, Atm(mygrid)%flagstruct%p_ref) !---------- reference profile ----------- ps1 = 101325. ps2 = 81060. pref(npz+1,1) = ps1 pref(npz+1,2) = ps2 - call get_eta_level ( npz, ps1, pref(1,1), dum1d, Atm(mytile)%ak, Atm(mytile)%bk ) - call get_eta_level ( npz, ps2, pref(1,2), dum1d, Atm(mytile)%ak, Atm(mytile)%bk ) - allocate (ak(npz+1)) - allocate (bk(npz+1)) - ak(1:npz+1) = Atm(mytile)%ak(npz+1:1:-1) - bk(1:npz+1) = Atm(mytile)%bk(npz+1:1:-1) + call get_eta_level ( npz, ps1, pref(1,1), dum1d, Atm(mygrid)%ak, Atm(mygrid)%bk ) + call get_eta_level ( npz, ps2, pref(1,2), dum1d, Atm(mygrid)%ak, Atm(mygrid)%bk ) ! --- initialize clocks for dynamics, physics_down and physics_up id_dynam = mpp_clock_id ('FV dy-core', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) @@ -279,14 +280,34 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy call timing_off('ATMOS_INIT') - if ( Atm(mytile)%flagstruct%na_init>0 ) then +! --- initiate the start for a restarted regional forecast + if ( Atm(mygrid)%gridstruct%regional .and. Atm(mygrid)%flagstruct%warm_start ) then + + call start_regional_restart(Atm(1), & + isc, iec, jsc, jec, & + isd, ied, jsd, jed ) + endif + + + if ( Atm(mygrid)%flagstruct%nudge ) then + call fv_nwp_nudge_init( Time, Atm(mygrid)%atmos_axes, npz, zvir, Atm(mygrid)%ak, Atm(mygrid)%bk, Atm(mygrid)%ts, & + Atm(mygrid)%phis, Atm(mygrid)%gridstruct, Atm(mygrid)%ks, Atm(mygrid)%npx, Atm(mygrid)%neststruct, Atm(mygrid)%bd) + call mpp_error(NOTE, 'NWP nudging is active') + endif + call fv_io_register_nudge_restart ( Atm ) + + + if ( Atm(mygrid)%flagstruct%na_init>0 ) then call nullify_domain ( ) - if ( .not. Atm(mytile)%flagstruct%hydrostatic ) then - call prt_maxmin('Before adi: W', Atm(mytile)%w, isc, iec, jsc, jec, Atm(mytile)%ng, npz, 1.) + if ( .not. Atm(mygrid)%flagstruct%hydrostatic ) then + call prt_maxmin('Before adi: W', Atm(mygrid)%w, isc, iec, jsc, jec, Atm(mygrid)%ng, npz, 1.) endif - call adiabatic_init(zvir) - if ( .not. Atm(mytile)%flagstruct%hydrostatic ) then - call prt_maxmin('After adi: W', Atm(mytile)%w, isc, iec, jsc, jec, Atm(mytile)%ng, npz, 1.) + call adiabatic_init(zvir,Atm(mygrid)%flagstruct%nudge_dz) + if ( .not. Atm(mygrid)%flagstruct%hydrostatic ) then + call prt_maxmin('After adi: W', Atm(mygrid)%w, isc, iec, jsc, jec, Atm(mygrid)%ng, npz, 1.) +! Not nested? + call prt_height('na_ini Z500', isc,iec, jsc,jec, 3, npz, 500.E2, Atm(mygrid)%phis, Atm(mygrid)%delz, & + Atm(mygrid)%peln, Atm(mygrid)%gridstruct%area_64(isc:iec,jsc:jec), Atm(mygrid)%gridstruct%agrid_64(isc:iec,jsc:jec,2)) endif else call mpp_error(NOTE,'No adiabatic initialization correction in use') @@ -294,12 +315,11 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, ak, bk, dx, dy #ifdef DEBUG call nullify_domain() - call fv_diag(Atm(mytile:mytile), zvir, Time, -1) + call fv_diag(Atm(mygrid:mygrid), zvir, Time, -1) #endif - n = mytile - call switch_current_Atm(Atm(n)) - + call set_domain(Atm(mygrid)%domain) + end subroutine atmosphere_init @@ -364,12 +384,24 @@ subroutine atmosphere_dynamics ( Time ) integer :: itrac, n, psc integer :: k, w_diff, nt_dyn + type(time_type) :: atmos_time + integer :: atmos_time_step !---- Call FV dynamics ----- call mpp_clock_begin (id_dynam) - n = mytile + n = mygrid + a_step = a_step + 1 +! +!*** If this is a regional run then read in the next boundary data when it is time. +! + if(Atm(n)%flagstruct%regional)then + + call read_new_bc_data(Atm(n), Time, Time_step_atmos, p_split, & + isd, ied, jsd, jed ) + endif do psc=1,abs(p_split) + p_step = psc call timing_on('fv_dynamics') !uc/vc only need be same on coarse grid? However BCs do need to be the same call fv_dynamics(npx, npy, npz, nq, Atm(n)%ng, dt_atmos/real(abs(p_split)),& @@ -393,8 +425,9 @@ subroutine atmosphere_dynamics ( Time ) call timing_off('fv_dynamics') if (ngrids > 1 .and. (psc < p_split .or. p_split < 0)) then + call mpp_sync() call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) + call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mygrid) call timing_off('TWOWAY_UPDATE') endif @@ -448,18 +481,22 @@ subroutine atmosphere_end (Time, Grid_box )!rab, Radiation, Physics) !rab type (physics_type), intent(inout) :: Physics ! initialize domains for writing global physics data - call set_domain ( Atm(mytile)%domain ) + call set_domain ( Atm(mygrid)%domain ) + + if ( Atm(mygrid)%flagstruct%nudge ) call fv_nwp_nudge_end + call nullify_domain ( ) if (first_diag) then call timing_on('FV_DIAG') - call fv_diag(Atm(mytile:mytile), zvir, fv_time, Atm(mytile)%flagstruct%print_freq) - call fv_nggps_diag(Atm(mytile:mytile), zvir, fv_time) + call fv_diag(Atm(mygrid:mygrid), zvir, fv_time, Atm(mygrid)%flagstruct%print_freq) + call fv_nggps_diag_init(Atm(mygrid:mygrid), Atm(mygrid)%atmos_axes, fv_time) + call fv_nggps_diag(Atm(mygrid:mygrid), zvir, fv_time) first_diag = .false. call timing_off('FV_DIAG') endif - call fv_end(Atm, grids_on_this_pe) + call fv_end(Atm, mygrid) deallocate (Atm) deallocate( u_dt, v_dt, t_dt, pref, dum1d ) @@ -476,7 +513,7 @@ end subroutine atmosphere_end subroutine atmosphere_restart(timestamp) character(len=*), intent(in) :: timestamp - call fv_write_restart(Atm, grids_on_this_pe, timestamp) + call fv_write_restart(Atm(mygrid), timestamp) end subroutine atmosphere_restart ! @@ -512,19 +549,19 @@ end subroutine atmosphere_pref subroutine atmosphere_control_data (i1, i2, j1, j2, kt, p_hydro, hydro) integer, intent(out) :: i1, i2, j1, j2, kt logical, intent(out), optional :: p_hydro, hydro - i1 = Atm(mytile)%bd%isc - i2 = Atm(mytile)%bd%iec - j1 = Atm(mytile)%bd%jsc - j2 = Atm(mytile)%bd%jec - kt = Atm(mytile)%npz + i1 = Atm(mygrid)%bd%isc + i2 = Atm(mygrid)%bd%iec + j1 = Atm(mygrid)%bd%jsc + j2 = Atm(mygrid)%bd%jec + kt = Atm(mygrid)%npz - if (present(p_hydro)) p_hydro = Atm(mytile)%flagstruct%phys_hydrostatic - if (present( hydro)) hydro = Atm(mytile)%flagstruct%hydrostatic + if (present(p_hydro)) p_hydro = Atm(mygrid)%flagstruct%phys_hydrostatic + if (present( hydro)) hydro = Atm(mygrid)%flagstruct%hydrostatic end subroutine atmosphere_control_data - subroutine atmosphere_grid_center (lon, lat) + subroutine atmosphere_grid_ctr (lon, lat) !--------------------------------------------------------------- ! returns the longitude and latitude cell centers !--------------------------------------------------------------- @@ -534,15 +571,15 @@ subroutine atmosphere_grid_center (lon, lat) do j=jsc,jec do i=isc,iec - lon(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%agrid_64(i,j,1) - lat(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%agrid_64(i,j,2) + lon(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%agrid_64(i,j,1) + lat(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%agrid_64(i,j,2) enddo end do - end subroutine atmosphere_grid_center + end subroutine atmosphere_grid_ctr - subroutine atmosphere_boundary (blon, blat, global) + subroutine atmosphere_grid_bdry (blon, blat, global) !--------------------------------------------------------------- ! returns the longitude and latitude grid box edges ! for either the local PEs grid (default) or the global grid @@ -559,39 +596,35 @@ subroutine atmosphere_boundary (blon, blat, global) do j=jsc,jec+1 do i=isc,iec+1 - blon(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%grid(i,j,1) - blat(i-isc+1,j-jsc+1) = Atm(mytile)%gridstruct%grid(i,j,2) + blon(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%grid(i,j,1) + blat(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%grid(i,j,2) enddo end do - end subroutine atmosphere_boundary + end subroutine atmosphere_grid_bdry subroutine set_atmosphere_pelist () - call mpp_set_current_pelist(Atm(mytile)%pelist, no_sync=.TRUE.) + call mpp_set_current_pelist(Atm(mygrid)%pelist, no_sync=.TRUE.) end subroutine set_atmosphere_pelist - subroutine atmosphere_domain ( fv_domain ) + subroutine atmosphere_domain ( fv_domain, layout, regional ) type(domain2d), intent(out) :: fv_domain + integer, intent(out) :: layout(2) + logical, intent(out) :: regional ! returns the domain2d variable associated with the coupling grid ! note: coupling is done using the mass/temperature grid with no halos - fv_domain = Atm(mytile)%domain_for_coupler - - end subroutine atmosphere_domain + fv_domain = Atm(mygrid)%domain_for_coupler + layout(1:2) = Atm(mygrid)%layout(1:2) + regional = Atm(mygrid)%flagstruct%regional - subroutine get_atmosphere_grid (dxmax, dxmin) - real(kind=R_GRID), intent(out) :: dxmax, dxmin - - dxmax = Atm(mytile)%gridstruct%da_max - dxmin = Atm(mytile)%gridstruct%da_min - - end subroutine get_atmosphere_grid + end subroutine atmosphere_domain - subroutine get_atmosphere_axes ( axes ) + subroutine atmosphere_diag_axes ( axes ) integer, intent(out) :: axes (:) !----- returns the axis indices for the atmospheric (mass) grid ----- @@ -599,10 +632,234 @@ subroutine get_atmosphere_axes ( axes ) 'get_atmosphere_axes in atmosphere_mod', & 'size of argument is incorrect', FATAL ) - axes (1:size(axes(:))) = Atm(mytile)%atmos_axes (1:size(axes(:))) + axes (1:size(axes(:))) = Atm(mygrid)%atmos_axes (1:size(axes(:))) + + end subroutine atmosphere_diag_axes + + + subroutine atmosphere_etalvls (ak, bk, flip) + real(kind=kind_phys), pointer, dimension(:), intent(inout) :: ak, bk + logical, intent(in) :: flip + + allocate(ak(npz+1)) + allocate(bk(npz+1)) + + if (flip) then + ak(1:npz+1) = Atm(mygrid)%ak(npz+1:1:-1) + bk(1:npz+1) = Atm(mygrid)%bk(npz+1:1:-1) + else + ak(1:npz+1) = Atm(mygrid)%ak(1:npz+1) + bk(1:npz+1) = Atm(mygrid)%bk(1:npz+1) + endif + end subroutine atmosphere_etalvls + + + subroutine atmosphere_hgt (hgt, position, relative, flip) + real(kind=kind_phys), pointer, dimension(:,:,:), intent(inout) :: hgt + character(len=5), intent(in) :: position + logical, intent(in) :: relative + logical, intent(in) :: flip + !--- local variables --- + integer:: lev, k, j, i + real(kind=kind_phys), allocatable, dimension(:,:,:) :: z, dz - end subroutine get_atmosphere_axes + if ((position .ne. "layer") .and. (position .ne. "level")) then + call mpp_error (FATAL, 'atmosphere_hgt:: incorrect position specification') + endif + + allocate(z(iec-isc+1,jec-jsc+1,npz+1)) + allocate(dz(iec-isc+1,jec-jsc+1,npz)) + z = 0 + dz = 0 + + if (Atm(mygrid)%flagstruct%hydrostatic) then + !--- generate dz using hydrostatic assumption + do j = jsc, jec + do i = isc, iec + dz(i-isc+1,j-jsc+1,1:npz) = (rdgas/grav)*Atm(mygrid)%pt(i,j,1:npz) & + * (Atm(mygrid)%peln(i,1:npz,j) - Atm(mygrid)%peln(i,2:npz+1,j)) + enddo + enddo + else + !--- use non-hydrostatic delz directly + do j = jsc, jec + do i = isc, iec + dz(i-isc+1,j-jsc+1,1:npz) = Atm(mygrid)%delz(i,j,1:npz) + enddo + enddo + endif + + !--- calculate geometric heights at the interfaces (levels) + !--- if needed, flip the indexing during this step + if (flip) then + if (.not. relative) then + z(:,:,1) = Atm(mygrid)%phis(:,:)/grav + endif + do k = 2,npz+1 + z(:,:,k) = z(:,:,k-1) - dz(:,:,npz+2-k) + enddo + else + if (.not. relative) then + z(:,:,npz+1) = Atm(mygrid)%phis(:,:)/grav + endif + do k = npz,1,-1 + z(:,:,k) = z(:,:,k+1) - dz(:,:,k) + enddo + endif + + !--- allocate and set either the level or layer height for return + if (position == "level") then + allocate (hgt(iec-isc+1,jec-jsc+1,npz+1)) + hgt = z + elseif (position == "layer") then + allocate (hgt(iec-isc+1,jec-jsc+1,npz)) + hgt(:,:,1:npz) = 0.5d0 * (z(:,:,1:npz) + z(:,:,2:npz+1)) + endif + + deallocate (z) + deallocate (dz) + + end subroutine atmosphere_hgt + + + subroutine atmosphere_scalar_field_halo (data, halo, isize, jsize, ksize, data_p) + !-------------------------------------------------------------------- + ! data - output array to return the field with halo (i,j,k) + ! optionally input for field already in (i,j,k) form + ! sized to include the halo of the field (+ 2*halo) + ! halo - size of the halo (must be less than 3) + ! ied - horizontal resolution in i-dir with haloes + ! jed - horizontal resolution in j-dir with haloes + ! ksize - vertical resolution + ! data_p - optional input field in packed format (ix,k) + !-------------------------------------------------------------------- + !--- interface variables --- + real(kind=kind_phys), dimension(1:isize,1:jsize,ksize), intent(inout) :: data + integer, intent(in) :: halo + integer, intent(in) :: isize + integer, intent(in) :: jsize + integer, intent(in) :: ksize + real(kind=kind_phys), dimension(:,:), optional, intent(in) :: data_p + !--- local variables --- + integer :: i, j, k + integer :: ic, jc + character(len=44) :: modname = 'atmosphere_mod::atmosphere_scalar_field_halo' + integer :: mpp_flags + + !--- perform error checking + if (halo .gt. 3) call mpp_error(FATAL, modname//' - halo.gt.3 requires extending the MPP domain to support') + ic = isize - 2 * halo + jc = jsize - 2 * halo + + !--- if packed data is present, unpack it into the two-dimensional data array + if (present(data_p)) then + if (ic*jc .ne. size(data_p,1)) call mpp_error(FATAL, modname//' - incorrect sizes for incoming & + &variables data and data_p') + data = 0. +!$OMP parallel do default (none) & +!$OMP shared (data, data_p, halo, ic, jc, ksize) & +!$OMP private (i, j, k) + do k = 1, ksize + do j = 1, jc + do i = 1, ic + data(i+halo, j+halo, k) = data_p(i + (j-1)*ic, k) + enddo + enddo + enddo + endif + + mpp_flags = EUPDATE + WUPDATE + SUPDATE + NUPDATE + if (halo == 1) then + call mpp_update_domains(data, Atm(mygrid)%domain_for_coupler, flags=mpp_flags, complete=.true.) + elseif (halo == 3) then + call mpp_update_domains(data, Atm(mygrid)%domain, flags=mpp_flags, complete=.true.) + else + call mpp_error(FATAL, modname//' - unsupported halo size') + endif + + !--- fill the halo points when at a corner of the cubed-sphere tile + !--- interior domain corners are handled correctly + if ( (isc==1) .or. (jsc==1) .or. (iec==npx-1) .or. (jec==npy-1) ) then + do k = 1, ksize + do j=1,halo + do i=1,halo + if ((isc== 1) .and. (jsc== 1)) data(halo+1-j ,halo+1-i ,k) = data(halo+i ,halo+1-j ,k) !SW Corner + if ((isc== 1) .and. (jec==npy-1)) data(halo+1-j ,halo+jc+i,k) = data(halo+i ,halo+jc+j,k) !NW Corner + if ((iec==npx-1) .and. (jsc== 1)) data(halo+ic+j,halo+1-i ,k) = data(halo+ic-i+1,halo+1-j ,k) !SE Corner + if ((iec==npx-1) .and. (jec==npy-1)) data(halo+ic+j,halo+jc+i,k) = data(halo+ic-i+1,halo+jc+j,k) !NE Corner + enddo + enddo + enddo + endif + + return + end subroutine atmosphere_scalar_field_halo + + + subroutine atmosphere_nggps_diag (Time, init) + !---------------------------------------------- + ! api for output of NCEP/EMC diagnostics + ! + ! if register is present and set to .true. + ! will make the initialization call + ! + ! outputs 3D state fields via either + ! NCEP write_component or GFDL/FMS diag_manager + !---------------------------------------------- + type(time_type), intent(in) :: Time + logical, optional, intent(in) :: init + + if (PRESENT(init)) then + if (init == .true.) then + call fv_nggps_diag_init(Atm(mygrid:mygrid), Atm(mygrid)%atmos_axes, Time) + return + else + call mpp_error(FATAL, 'atmosphere_nggps_diag - calling with init present, but set to .false.') + endif + endif + call fv_nggps_diag(Atm(mygrid:mygrid), zvir, Time) + + end subroutine atmosphere_nggps_diag + + +!--- Need to know the formulation of the mixing ratio being imported into FV3 +!--- in order to adjust it in a consistent manner for advection +!rab subroutine atmosphere_tracer_postinit (IPD_Data, Atm_block) +!rab !--- interface variables --- +!rab type(IPD_data_type), intent(in) :: IPD_Data(:) +!rab type(block_control_type), intent(in) :: Atm_block +!rab !--- local variables --- +!rab integer :: i, j, ix, k, k1, n, nwat, nb, blen +!rab real(kind=kind_phys) :: qwat +!rab +!rab if( nq<3 ) call mpp_error(FATAL, 'GFS phys must have 3 interactive tracers') +!rab +!rab n = mygrid +!rab nwat = Atm(n)%flagstruct%nwat +!rab +!rab!$OMP parallel do default (none) & +!rab!$OMP shared (Atm_block, Atm, IPD_Data, npz, nq, ncnst, n, nwat) & +!rab!$OMP private (nb, k, k1, ix, i, j, qwat) +!rab do nb = 1,Atm_block%nblks +!rab do k = 1, npz +!rab k1 = npz+1-k !reverse the k direction +!rab do ix = 1, Atm_block%blksz(nb) +!rab i = Atm_block%index(nb)%ii(ix) +!rab j = Atm_block%index(nb)%jj(ix) +!rab qwat = sum(Atm(n)%q(i,j,k1,1:nwat)) +!rab Atm(n)%q(i,j,k1,1:nq) = Atm(n)%q(i,j,k1,1:nq) + IPD_Data(nb)%Stateout%gq0(ix,k,1:nq) * (1.0 - qwat) +!rab if (nq .gt. ncnst) then +!rab Atm(n)%qdiag(i,j,k1,nq+1:ncnst) = Atm(n)%qdiag(i,j,k1,nq+1:ncnst) + IPD_Data(nb)%Stateout%gq0(ix,k,nq+1:ncnst) +!rab endif +!rab enddo +!rab enddo +!rab enddo +!rab +!rab call mpp_update_domains (Atm(n)%q, Atm(n)%domain, complete=.true.) +!rab +!rab return +!rab end subroutine atmosphere_tracer_postinit subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) @@ -622,19 +879,19 @@ subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) do j=jsc,jec do i=isc,iec - p_surf(i,j) = Atm(mytile)%ps(i,j) - t_bot(i,j) = Atm(mytile)%pt(i,j,npz) - p_bot(i,j) = Atm(mytile)%delp(i,j,npz)/(Atm(mytile)%peln(i,npz+1,j)-Atm(mytile)%peln(i,npz,j)) - z_bot(i,j) = rrg*t_bot(i,j)*(1.+zvir*Atm(mytile)%q(i,j,npz,1)) * & - (1. - Atm(mytile)%pe(i,npz,j)/p_bot(i,j)) + p_surf(i,j) = Atm(mygrid)%ps(i,j) + t_bot(i,j) = Atm(mygrid)%pt(i,j,npz) + p_bot(i,j) = Atm(mygrid)%delp(i,j,npz)/(Atm(mygrid)%peln(i,npz+1,j)-Atm(mygrid)%peln(i,npz,j)) + z_bot(i,j) = rrg*t_bot(i,j)*(1.+zvir*Atm(mygrid)%q(i,j,npz,1)) * & + (1. - Atm(mygrid)%pe(i,npz,j)/p_bot(i,j)) enddo enddo if ( present(slp) ) then ! determine 0.8 sigma reference level - sigtop = Atm(mytile)%ak(1)/pstd_mks+Atm(mytile)%bk(1) + sigtop = Atm(mygrid)%ak(1)/pstd_mks+Atm(mygrid)%bk(1) do k = 1, npz - sigbot = Atm(mytile)%ak(k+1)/pstd_mks+Atm(mytile)%bk(k+1) + sigbot = Atm(mygrid)%ak(k+1)/pstd_mks+Atm(mygrid)%bk(k+1) if (sigbot+sigtop > 1.6) then kr = k exit @@ -644,9 +901,9 @@ subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) do j=jsc,jec do i=isc,iec ! sea level pressure - tref(i,j) = Atm(mytile)%pt(i,j,kr) * (Atm(mytile)%delp(i,j,kr)/ & - ((Atm(mytile)%peln(i,kr+1,j)-Atm(mytile)%peln(i,kr,j))*Atm(mytile)%ps(i,j)))**(-rrg*tlaps) - slp(i,j) = Atm(mytile)%ps(i,j)*(1.+tlaps*Atm(mytile)%phis(i,j)/(tref(i,j)*grav))**(1./(rrg*tlaps)) + tref(i,j) = Atm(mygrid)%pt(i,j,kr) * (Atm(mygrid)%delp(i,j,kr)/ & + ((Atm(mygrid)%peln(i,kr+1,j)-Atm(mygrid)%peln(i,kr,j))*Atm(mygrid)%ps(i,j)))**(-rrg*tlaps) + slp(i,j) = Atm(mygrid)%ps(i,j)*(1.+tlaps*Atm(mygrid)%phis(i,j)/(tref(i,j)*grav))**(1./(rrg*tlaps)) enddo enddo endif @@ -655,7 +912,7 @@ subroutine get_bottom_mass ( t_bot, tr_bot, p_bot, z_bot, p_surf, slp ) do m=1,nq do j=jsc,jec do i=isc,iec - tr_bot(i,j,m) = Atm(mytile)%q(i,j,npz,m) + tr_bot(i,j,m) = Atm(mygrid)%q(i,j,npz,m) enddo enddo enddo @@ -672,8 +929,8 @@ subroutine get_bottom_wind ( u_bot, v_bot ) do j=jsc,jec do i=isc,iec - u_bot(i,j) = Atm(mytile)%u_srf(i,j) - v_bot(i,j) = Atm(mytile)%v_srf(i,j) + u_bot(i,j) = Atm(mygrid)%u_srf(i,j) + v_bot(i,j) = Atm(mygrid)%v_srf(i,j) enddo enddo @@ -693,7 +950,7 @@ subroutine get_stock_pe(index, value) integer i,j,k real, pointer :: area(:,:) - area => Atm(mytile)%gridstruct%area + area => Atm(mygrid)%gridstruct%area select case (index) @@ -711,9 +968,9 @@ subroutine get_stock_pe(index, value) do k=1,npz do i=isc,iec ! Warning: the following works only with AM2 physics: water vapor; cloud water, cloud ice. - wm(i,j) = wm(i,j) + Atm(mytile)%delp(i,j,k) * ( Atm(mytile)%q(i,j,k,1) + & - Atm(mytile)%q(i,j,k,2) + & - Atm(mytile)%q(i,j,k,3) ) + wm(i,j) = wm(i,j) + Atm(mygrid)%delp(i,j,k) * ( Atm(mygrid)%q(i,j,k,1) + & + Atm(mygrid)%q(i,j,k,2) + & + Atm(mygrid)%q(i,j,k,3) ) enddo enddo enddo @@ -737,43 +994,60 @@ end subroutine get_stock_pe subroutine atmosphere_state_update (Time, IPD_Data, Atm_block) + !--- interface variables --- type(time_type), intent(in) :: Time type(IPD_data_type), intent(in) :: IPD_Data(:) type(block_control_type), intent(in) :: Atm_block + !--- local variables --- type(time_type) :: Time_prev, Time_next -!--- local variables --- integer :: i, j, ix, k, k1, n, w_diff, nt_dyn, iq - integer :: nb, blen, nwat, dnats + integer :: nb, blen, nwat, dnats, nq_adv real(kind=kind_phys):: rcp, q0, qwat(nq), qt, rdt + character(len=32) :: tracer_name Time_prev = Time Time_next = Time + Time_step_atmos rdt = 1.d0 / dt_atmos - n = mytile + n = mygrid nwat = Atm(n)%flagstruct%nwat - dnats = Atm(mytile)%flagstruct%dnats + dnats = Atm(mygrid)%flagstruct%dnats + nq_adv = nq - dnats + nt_dyn = ncnst-pnats !nothing more than nq if( nq<3 ) call mpp_error(FATAL, 'GFS phys must have 3 interactive tracers') - call set_domain ( Atm(mytile)%domain ) + call set_domain ( Atm(mygrid)%domain ) call timing_on('GFS_TENDENCIES') + + call atmos_phys_qdt_diag(Atm(n)%q, Atm(n)%phys_diag, nt_dyn, dt_atmos, .true.) + !--- put u/v tendencies into haloed arrays u_dt and v_dt -!$OMP parallel do default (none) & -!$OMP shared (rdt, n, nq, dnats, npz, ncnst, nwat, mytile, u_dt, v_dt, t_dt,& +!$OMP parallel do default (none) & +!$OMP shared (rdt, n, nq, dnats, npz, ncnst, nwat, mygrid, u_dt, v_dt, t_dt,& !$OMP Atm, IPD_Data, Atm_block, sphum, liq_wat, rainwat, ice_wat, & -!$OMP snowwat, graupel) & -!$OMP private (nb, blen, i, j, k, k1, ix, q0, qwat, qt) +!$OMP snowwat, graupel, nq_adv) & +!$OMP private (nb, blen, i, j, k, k1, ix, q0, qwat, qt,tracer_name) do nb = 1,Atm_block%nblks !SJL: perform vertical filling to fix the negative humidity if the SAS convection scheme is used ! This call may be commented out if RAS or other positivity-preserving CPS is used. blen = Atm_block%blksz(nb) - call fill_gfs(blen, npz, IPD_Data(nb)%Statein%prsi, IPD_Data(nb)%Stateout%gq0, 1.e-9_kind_phys) + if (Atm(n)%flagstruct%fill_gfs) call fill_gfs(blen, npz, IPD_Data(nb)%Statein%prsi, IPD_Data(nb)%Stateout%gq0, 1.e-9_kind_phys) + +!LMH 28sep18: If the name of a tracer ends in 'nopbl' then do NOT update it; + !override this by setting Stateout%gq0(:,:,iq) to the input value + do iq = 1, nq + call get_tracer_names (MODEL_ATMOS, iq, tracer_name) + if (index(tracer_name, 'nopbl') > 0) then + IPD_Data(nb)%Stateout%gq0(:,:,iq) = IPD_Data(nb)%Statein%qgrs(:,:,iq) + endif + enddo + do k = 1, npz - k1 = npz+1-k !reverse the k direction + k1 = npz+1-k !reverse the k direction do ix = 1, blen i = Atm_block%index(nb)%ii(ix) j = Atm_block%index(nb)%jj(ix) @@ -783,20 +1057,21 @@ subroutine atmosphere_state_update (Time, IPD_Data, Atm_block) ! SJL notes: ! ---- DO not touch the code below; dry mass conservation may change due to 64bit <-> 32bit conversion ! GFS total air mass = dry_mass + water_vapor (condensate excluded) -! GFS mixing ratios = tracer_mass / (air_mass + vapor_mass) +! GFS mixing ratios = tracer_mass / (dry_mass + vapor_mass) ! FV3 total air mass = dry_mass + [water_vapor + condensate ] ! FV3 mixing ratios = tracer_mass / (dry_mass+vapor_mass+cond_mass) q0 = IPD_Data(nb)%Statein%prsi(ix,k) - IPD_Data(nb)%Statein%prsi(ix,k+1) - qwat(1:nq-dnats) = q0*IPD_Data(nb)%Stateout%gq0(ix,k,1:nq-dnats) + qwat(1:nq_adv) = q0*IPD_Data(nb)%Stateout%gq0(ix,k,1:nq_adv) ! ********************************************************************************************************** ! Dry mass: the following way of updating delp is key to mass conservation with hybrid 32-64 bit computation ! ********************************************************************************************************** -! The following example is for 2 water species. +! The following example is for 2 water species. ! q0 = Atm(n)%delp(i,j,k1)*(1.-(Atm(n)%q(i,j,k1,1)+Atm(n)%q(i,j,k1,2))) + q1 + q2 qt = sum(qwat(1:nwat)) - q0 = Atm(n)%delp(i,j,k1)*(1.-sum(Atm(n)%q(i,j,k1,1:nwat))) + qt + q0 = Atm(n)%delp(i,j,k1)*(1.-sum(Atm(n)%q(i,j,k1,1:nwat))) + qt Atm(n)%delp(i,j,k1) = q0 - Atm(n)%q(i,j,k1,1:nq-dnats) = qwat(1:nq-dnats) / q0 + Atm(n)%q(i,j,k1,1:nq_adv) = qwat(1:nq_adv) / q0 + if (dnats .gt. 0) Atm(n)%q(i,j,k1,nq_adv+1:nq) = IPD_Data(nb)%Stateout%gq0(ix,k,nq_adv+1:nq) enddo enddo @@ -819,21 +1094,21 @@ subroutine atmosphere_state_update (Time, IPD_Data, Atm_block) !--- See Note in statein... do iq = nq+1, ncnst do k = 1, npz - k1 = npz+1-k !reverse the k direction + k1 = npz+1-k !reverse the k direction do ix = 1, blen i = Atm_block%index(nb)%ii(ix) j = Atm_block%index(nb)%jj(ix) - Atm(mytile)%qdiag(i,j,k1,iq) = IPD_Data(nb)%Stateout%gq0(ix,k,iq) + Atm(mygrid)%qdiag(i,j,k1,iq) = IPD_Data(nb)%Stateout%gq0(ix,k,iq) enddo enddo enddo enddo ! nb-loop + call atmos_phys_qdt_diag(Atm(n)%q, Atm(n)%phys_diag, nt_dyn, dt_atmos, .false.) call timing_off('GFS_TENDENCIES') w_diff = get_tracer_index (MODEL_ATMOS, 'w_diff' ) - nt_dyn = ncnst-pnats !nothing more than nq if ( w_diff /= NO_TRACER ) then nt_dyn = nt_dyn - 1 endif @@ -871,21 +1146,22 @@ subroutine atmosphere_state_update (Time, IPD_Data, Atm_block) .true., Time_next, Atm(n)%flagstruct%nudge, Atm(n)%gridstruct, & Atm(n)%gridstruct%agrid(:,:,1), Atm(n)%gridstruct%agrid(:,:,2), & Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct, & - Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, Atm(n)%ptop) + Atm(n)%neststruct, Atm(n)%bd, Atm(n)%domain, Atm(n)%ptop, Atm(n)%phys_diag) call timing_off('FV_UPDATE_PHYS') call mpp_clock_end (id_dynam) !--- nesting update after updating atmospheric variables with !--- physics tendencies if (ngrids > 1 .and. p_split > 0) then + call mpp_sync() call timing_on('TWOWAY_UPDATE') - call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) + call twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, fv_time, mygrid) call timing_off('TWOWAY_UPDATE') - endif + endif call nullify_domain() !---- diagnostics for FV dynamics ----- - if (Atm(mytile)%flagstruct%print_freq /= -99) then + if (Atm(mygrid)%flagstruct%print_freq /= -99) then call mpp_clock_begin(id_fv_diag) fv_time = Time_next @@ -893,16 +1169,8 @@ subroutine atmosphere_state_update (Time, IPD_Data, Atm_block) call nullify_domain() call timing_on('FV_DIAG') - call fv_diag(Atm(mytile:mytile), zvir, fv_time, Atm(mytile)%flagstruct%print_freq) + call fv_diag(Atm(mygrid:mygrid), zvir, fv_time, Atm(mygrid)%flagstruct%print_freq) first_diag = .false. - - fv_time = Time_next - Atm(n)%Time_init - call get_time (fv_time, seconds, days) - !--- perform diagnostics on GFS fdiag schedule - if (ANY(Atm(mytile)%fdiag(:) == (real(days)*24. + real(seconds)/3600.))) then - if (mpp_pe() == mpp_root_pe()) write(6,*) 'NGGPS:FV3 DIAG STEP', (real(days)*24. + real(seconds)/3600.) - call fv_nggps_diag(Atm(mytile:mytile), zvir, Time_next) - endif call timing_off('FV_DIAG') call mpp_clock_end(id_fv_diag) @@ -911,9 +1179,10 @@ subroutine atmosphere_state_update (Time, IPD_Data, Atm_block) end subroutine atmosphere_state_update - subroutine adiabatic_init(zvir) - real, allocatable, dimension(:,:,:):: u0, v0, t0, dp0 + subroutine adiabatic_init(zvir,nudge_dz) + real, allocatable, dimension(:,:,:):: u0, v0, t0, dz0, dp0 real, intent(in):: zvir + logical, intent(inout):: nudge_dz ! real, parameter:: wt = 1. ! was 2. real, parameter:: wt = 2. !*********** @@ -933,18 +1202,18 @@ subroutine adiabatic_init(zvir) xt = 1./(1.+wt) - write(errstr,'(A, I4, A)') 'Performing adiabatic init', Atm(mytile)%flagstruct%na_init, ' times' + write(errstr,'(A, I4, A)') 'Performing adiabatic init', Atm(mygrid)%flagstruct%na_init, ' times' call mpp_error(NOTE, errstr) sphum = get_tracer_index (MODEL_ATMOS, 'sphum' ) - npz = Atm(mytile)%npz + npz = Atm(mygrid)%npz - isc = Atm(mytile)%bd%isc - iec = Atm(mytile)%bd%iec - jsc = Atm(mytile)%bd%jsc - jec = Atm(mytile)%bd%jec + isc = Atm(mygrid)%bd%isc + iec = Atm(mygrid)%bd%iec + jsc = Atm(mygrid)%bd%jsc + jec = Atm(mygrid)%bd%jec - ngc = Atm(mytile)%ng + ngc = Atm(mygrid)%ng isd = isc - ngc ied = iec + ngc jsd = jsc - ngc @@ -955,165 +1224,201 @@ subroutine adiabatic_init(zvir) allocate ( u0(isc:iec, jsc:jec+1, npz) ) allocate ( v0(isc:iec+1,jsc:jec, npz) ) - allocate ( t0(isc:iec,jsc:jec, npz) ) allocate (dp0(isc:iec,jsc:jec, npz) ) -!$omp parallel do default (none) & -!$omp shared (npz, jsc, jec, isc, iec, n, sphum, u0, v0, t0, dp0, Atm, zvir, mytile) & -!$omp private (k, j, i) + if ( Atm(mygrid)%flagstruct%hydrostatic ) nudge_dz = .false. + + if ( nudge_dz ) then + allocate (dz0(isc:iec,jsc:jec, npz) ) + else + allocate ( t0(isc:iec,jsc:jec, npz) ) + endif + +!$omp parallel do default (none) & +!$omp shared (nudge_dz, npz, jsc, jec, isc, iec, n, sphum, u0, v0, t0, dz0, dp0, Atm, zvir, mygrid) & +!$omp private (k, j, i) do k=1,npz do j=jsc,jec+1 do i=isc,iec - u0(i,j,k) = Atm(mytile)%u(i,j,k) + u0(i,j,k) = Atm(mygrid)%u(i,j,k) enddo enddo do j=jsc,jec do i=isc,iec+1 - v0(i,j,k) = Atm(mytile)%v(i,j,k) + v0(i,j,k) = Atm(mygrid)%v(i,j,k) enddo enddo - do j=jsc,jec - do i=isc,iec - t0(i,j,k) = Atm(mytile)%pt(i,j,k)*(1.+zvir*Atm(mytile)%q(i,j,k,sphum)) ! virt T - dp0(i,j,k) = Atm(mytile)%delp(i,j,k) + if ( nudge_dz ) then + do j=jsc,jec + do i=isc,iec + dp0(i,j,k) = Atm(mygrid)%delp(i,j,k) + dz0(i,j,k) = Atm(mygrid)%delz(i,j,k) + enddo enddo - enddo + else + do j=jsc,jec + do i=isc,iec + t0(i,j,k) = Atm(mygrid)%pt(i,j,k)*(1.+zvir*Atm(mygrid)%q(i,j,k,sphum)) ! virt T + dp0(i,j,k) = Atm(mygrid)%delp(i,j,k) + enddo + enddo + endif enddo - do m=1,Atm(mytile)%flagstruct%na_init + do m=1,Atm(mygrid)%flagstruct%na_init ! Forward call - call fv_dynamics(Atm(mytile)%npx, Atm(mytile)%npy, npz, nq, Atm(mytile)%ng, dt_atmos, 0., & - Atm(mytile)%flagstruct%fill, Atm(mytile)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & - Atm(mytile)%ptop, Atm(mytile)%ks, nq, Atm(mytile)%flagstruct%n_split, & - Atm(mytile)%flagstruct%q_split, Atm(mytile)%u, Atm(mytile)%v, Atm(mytile)%w, & - Atm(mytile)%delz, Atm(mytile)%flagstruct%hydrostatic, & - Atm(mytile)%pt, Atm(mytile)%delp, Atm(mytile)%q, Atm(mytile)%ps, & - Atm(mytile)%pe, Atm(mytile)%pk, Atm(mytile)%peln, Atm(mytile)%pkz, Atm(mytile)%phis, & - Atm(mytile)%q_con, Atm(mytile)%omga, Atm(mytile)%ua, Atm(mytile)%va, Atm(mytile)%uc, Atm(mytile)%vc, & - Atm(mytile)%ak, Atm(mytile)%bk, Atm(mytile)%mfx, Atm(mytile)%mfy, & - Atm(mytile)%cx, Atm(mytile)%cy, Atm(mytile)%ze0, Atm(mytile)%flagstruct%hybrid_z, & - Atm(mytile)%gridstruct, Atm(mytile)%flagstruct, & - Atm(mytile)%neststruct, Atm(mytile)%idiag, Atm(mytile)%bd, Atm(mytile)%parent_grid, & - Atm(mytile)%domain) + call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, dt_atmos, 0., & + Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & + Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & + Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & + Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & + Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & + Atm(mygrid)%ak, Atm(mygrid)%bk, Atm(mygrid)%mfx, Atm(mygrid)%mfy, & + Atm(mygrid)%cx, Atm(mygrid)%cy, Atm(mygrid)%ze0, Atm(mygrid)%flagstruct%hybrid_z, & + Atm(mygrid)%gridstruct, Atm(mygrid)%flagstruct, & + Atm(mygrid)%neststruct, Atm(mygrid)%idiag, Atm(mygrid)%bd, Atm(mygrid)%parent_grid, & + Atm(mygrid)%domain) ! Backward - call fv_dynamics(Atm(mytile)%npx, Atm(mytile)%npy, npz, nq, Atm(mytile)%ng, -dt_atmos, 0., & - Atm(mytile)%flagstruct%fill, Atm(mytile)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & - Atm(mytile)%ptop, Atm(mytile)%ks, nq, Atm(mytile)%flagstruct%n_split, & - Atm(mytile)%flagstruct%q_split, Atm(mytile)%u, Atm(mytile)%v, Atm(mytile)%w, & - Atm(mytile)%delz, Atm(mytile)%flagstruct%hydrostatic, & - Atm(mytile)%pt, Atm(mytile)%delp, Atm(mytile)%q, Atm(mytile)%ps, & - Atm(mytile)%pe, Atm(mytile)%pk, Atm(mytile)%peln, Atm(mytile)%pkz, Atm(mytile)%phis, & - Atm(mytile)%q_con, Atm(mytile)%omga, Atm(mytile)%ua, Atm(mytile)%va, Atm(mytile)%uc, Atm(mytile)%vc, & - Atm(mytile)%ak, Atm(mytile)%bk, Atm(mytile)%mfx, Atm(mytile)%mfy, & - Atm(mytile)%cx, Atm(mytile)%cy, Atm(mytile)%ze0, Atm(mytile)%flagstruct%hybrid_z, & - Atm(mytile)%gridstruct, Atm(mytile)%flagstruct, & - Atm(mytile)%neststruct, Atm(mytile)%idiag, Atm(mytile)%bd, Atm(mytile)%parent_grid, & - Atm(mytile)%domain) + call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, -dt_atmos, 0., & + Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & + Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & + Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & + Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & + Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & + Atm(mygrid)%ak, Atm(mygrid)%bk, Atm(mygrid)%mfx, Atm(mygrid)%mfy, & + Atm(mygrid)%cx, Atm(mygrid)%cy, Atm(mygrid)%ze0, Atm(mygrid)%flagstruct%hybrid_z, & + Atm(mygrid)%gridstruct, Atm(mygrid)%flagstruct, & + Atm(mygrid)%neststruct, Atm(mygrid)%idiag, Atm(mygrid)%bd, Atm(mygrid)%parent_grid, & + Atm(mygrid)%domain) ! Nudging back to IC !$omp parallel do default (none) & -!$omp shared (pref, q00, p00,npz, jsc, jec, isc, iec, n, sphum, Atm, u0, v0, t0, dp0, xt, zvir, mytile) & -!$omp private (i, j, k) +!$omp shared (pref, npz, jsc, jec, isc, iec, n, sphum, Atm, u0, v0, t0, dp0, xt, zvir, mygrid, nudge_dz, dz0) & +!$omp private (i, j, k, p00, q00) do k=1,npz do j=jsc,jec+1 do i=isc,iec - Atm(mytile)%u(i,j,k) = xt*(Atm(mytile)%u(i,j,k) + wt*u0(i,j,k)) + Atm(mygrid)%u(i,j,k) = xt*(Atm(mygrid)%u(i,j,k) + wt*u0(i,j,k)) enddo enddo do j=jsc,jec do i=isc,iec+1 - Atm(mytile)%v(i,j,k) = xt*(Atm(mytile)%v(i,j,k) + wt*v0(i,j,k)) + Atm(mygrid)%v(i,j,k) = xt*(Atm(mygrid)%v(i,j,k) + wt*v0(i,j,k)) enddo enddo - if( Atm(mytile)%flagstruct%nudge_qv ) then + if( Atm(mygrid)%flagstruct%nudge_qv ) then ! SJL note: Nudging water vaport towards HALOE climatology: ! In case of better IC (IFS) this step may not be necessary - p00 = Atm(mytile)%pe(isc,k,jsc) - if ( p00 < 30.E2 ) then - if ( p00 < 1. ) then - q00 = q1_h2o - elseif ( p00 <= 7. .and. p00 >= 1. ) then - q00 = q1_h2o + (q7_h2o-q1_h2o)*log(pref(k,1)/1.)/log(7.) - elseif ( p00 < 100. .and. p00 >= 7. ) then - q00 = q7_h2o + (q100_h2o-q7_h2o)*log(pref(k,1)/7.)/log(100./7.) - elseif ( p00 < 1000. .and. p00 >= 100. ) then - q00 = q100_h2o + (q1000_h2o-q100_h2o)*log(pref(k,1)/1.E2)/log(10.) - elseif ( p00 < 2000. .and. p00 >= 1000. ) then - q00 = q1000_h2o + (q2000_h2o-q1000_h2o)*log(pref(k,1)/1.E3)/log(2.) - else - q00 = q2000_h2o + (q3000_h2o-q2000_h2o)*log(pref(k,1)/2.E3)/log(1.5) + p00 = Atm(mygrid)%pe(isc,k,jsc) + if ( p00 < 30.E2 ) then + if ( p00 < 1. ) then + q00 = q1_h2o + elseif ( p00 <= 7. .and. p00 >= 1. ) then + q00 = q1_h2o + (q7_h2o-q1_h2o)*log(pref(k,1)/1.)/log(7.) + elseif ( p00 < 100. .and. p00 >= 7. ) then + q00 = q7_h2o + (q100_h2o-q7_h2o)*log(pref(k,1)/7.)/log(100./7.) + elseif ( p00 < 1000. .and. p00 >= 100. ) then + q00 = q100_h2o + (q1000_h2o-q100_h2o)*log(pref(k,1)/1.E2)/log(10.) + elseif ( p00 < 2000. .and. p00 >= 1000. ) then + q00 = q1000_h2o + (q2000_h2o-q1000_h2o)*log(pref(k,1)/1.E3)/log(2.) + else + q00 = q2000_h2o + (q3000_h2o-q2000_h2o)*log(pref(k,1)/2.E3)/log(1.5) + endif + do j=jsc,jec + do i=isc,iec + Atm(mygrid)%q(i,j,k,sphum) = xt*(Atm(mygrid)%q(i,j,k,sphum) + wt*q00) + enddo + enddo endif + endif + if ( nudge_dz ) then do j=jsc,jec do i=isc,iec - Atm(mytile)%q(i,j,k,sphum) = xt*(Atm(mytile)%q(i,j,k,sphum) + wt*q00) + Atm(mygrid)%delp(i,j,k) = xt*(Atm(mygrid)%delp(i,j,k) + wt*dp0(i,j,k)) + Atm(mygrid)%delz(i,j,k) = xt*(Atm(mygrid)%delz(i,j,k) + wt*dz0(i,j,k)) enddo enddo - endif - endif - do j=jsc,jec - do i=isc,iec - Atm(mytile)%pt(i,j,k) = xt*(Atm(mytile)%pt(i,j,k) + wt*t0(i,j,k)/(1.+zvir*Atm(mytile)%q(i,j,k,sphum))) - Atm(mytile)%delp(i,j,k) = xt*(Atm(mytile)%delp(i,j,k) + wt*dp0(i,j,k)) + else + do j=jsc,jec + do i=isc,iec + Atm(mygrid)%pt(i,j,k) = xt*(Atm(mygrid)%pt(i,j,k) + wt*t0(i,j,k)/(1.+zvir*Atm(mygrid)%q(i,j,k,sphum))) + Atm(mygrid)%delp(i,j,k) = xt*(Atm(mygrid)%delp(i,j,k) + wt*dp0(i,j,k)) + enddo enddo - enddo + endif + enddo ! Backward - call fv_dynamics(Atm(mytile)%npx, Atm(mytile)%npy, npz, nq, Atm(mytile)%ng, -dt_atmos, 0., & - Atm(mytile)%flagstruct%fill, Atm(mytile)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & - Atm(mytile)%ptop, Atm(mytile)%ks, nq, Atm(mytile)%flagstruct%n_split, & - Atm(mytile)%flagstruct%q_split, Atm(mytile)%u, Atm(mytile)%v, Atm(mytile)%w, & - Atm(mytile)%delz, Atm(mytile)%flagstruct%hydrostatic, & - Atm(mytile)%pt, Atm(mytile)%delp, Atm(mytile)%q, Atm(mytile)%ps, & - Atm(mytile)%pe, Atm(mytile)%pk, Atm(mytile)%peln, Atm(mytile)%pkz, Atm(mytile)%phis, & - Atm(mytile)%q_con, Atm(mytile)%omga, Atm(mytile)%ua, Atm(mytile)%va, Atm(mytile)%uc, Atm(mytile)%vc, & - Atm(mytile)%ak, Atm(mytile)%bk, Atm(mytile)%mfx, Atm(mytile)%mfy, & - Atm(mytile)%cx, Atm(mytile)%cy, Atm(mytile)%ze0, Atm(mytile)%flagstruct%hybrid_z, & - Atm(mytile)%gridstruct, Atm(mytile)%flagstruct, & - Atm(mytile)%neststruct, Atm(mytile)%idiag, Atm(mytile)%bd, Atm(mytile)%parent_grid, & - Atm(mytile)%domain) + call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, -dt_atmos, 0., & + Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & + Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & + Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & + Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & + Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & + Atm(mygrid)%ak, Atm(mygrid)%bk, Atm(mygrid)%mfx, Atm(mygrid)%mfy, & + Atm(mygrid)%cx, Atm(mygrid)%cy, Atm(mygrid)%ze0, Atm(mygrid)%flagstruct%hybrid_z, & + Atm(mygrid)%gridstruct, Atm(mygrid)%flagstruct, & + Atm(mygrid)%neststruct, Atm(mygrid)%idiag, Atm(mygrid)%bd, Atm(mygrid)%parent_grid, & + Atm(mygrid)%domain) ! Forward call - call fv_dynamics(Atm(mytile)%npx, Atm(mytile)%npy, npz, nq, Atm(mytile)%ng, dt_atmos, 0., & - Atm(mytile)%flagstruct%fill, Atm(mytile)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & - Atm(mytile)%ptop, Atm(mytile)%ks, nq, Atm(mytile)%flagstruct%n_split, & - Atm(mytile)%flagstruct%q_split, Atm(mytile)%u, Atm(mytile)%v, Atm(mytile)%w, & - Atm(mytile)%delz, Atm(mytile)%flagstruct%hydrostatic, & - Atm(mytile)%pt, Atm(mytile)%delp, Atm(mytile)%q, Atm(mytile)%ps, & - Atm(mytile)%pe, Atm(mytile)%pk, Atm(mytile)%peln, Atm(mytile)%pkz, Atm(mytile)%phis, & - Atm(mytile)%q_con, Atm(mytile)%omga, Atm(mytile)%ua, Atm(mytile)%va, Atm(mytile)%uc, Atm(mytile)%vc, & - Atm(mytile)%ak, Atm(mytile)%bk, Atm(mytile)%mfx, Atm(mytile)%mfy, & - Atm(mytile)%cx, Atm(mytile)%cy, Atm(mytile)%ze0, Atm(mytile)%flagstruct%hybrid_z, & - Atm(mytile)%gridstruct, Atm(mytile)%flagstruct, & - Atm(mytile)%neststruct, Atm(mytile)%idiag, Atm(mytile)%bd, Atm(mytile)%parent_grid, & - Atm(mytile)%domain) + call fv_dynamics(Atm(mygrid)%npx, Atm(mygrid)%npy, npz, nq, Atm(mygrid)%ng, dt_atmos, 0., & + Atm(mygrid)%flagstruct%fill, Atm(mygrid)%flagstruct%reproduce_sum, kappa, cp_air, zvir, & + Atm(mygrid)%ptop, Atm(mygrid)%ks, nq, Atm(mygrid)%flagstruct%n_split, & + Atm(mygrid)%flagstruct%q_split, Atm(mygrid)%u, Atm(mygrid)%v, Atm(mygrid)%w, & + Atm(mygrid)%delz, Atm(mygrid)%flagstruct%hydrostatic, & + Atm(mygrid)%pt, Atm(mygrid)%delp, Atm(mygrid)%q, Atm(mygrid)%ps, & + Atm(mygrid)%pe, Atm(mygrid)%pk, Atm(mygrid)%peln, Atm(mygrid)%pkz, Atm(mygrid)%phis, & + Atm(mygrid)%q_con, Atm(mygrid)%omga, Atm(mygrid)%ua, Atm(mygrid)%va, Atm(mygrid)%uc, Atm(mygrid)%vc, & + Atm(mygrid)%ak, Atm(mygrid)%bk, Atm(mygrid)%mfx, Atm(mygrid)%mfy, & + Atm(mygrid)%cx, Atm(mygrid)%cy, Atm(mygrid)%ze0, Atm(mygrid)%flagstruct%hybrid_z, & + Atm(mygrid)%gridstruct, Atm(mygrid)%flagstruct, & + Atm(mygrid)%neststruct, Atm(mygrid)%idiag, Atm(mygrid)%bd, Atm(mygrid)%parent_grid, & + Atm(mygrid)%domain) ! Nudging back to IC !$omp parallel do default (none) & -!$omp shared (npz, jsc, jec, isc, iec, n, sphum, Atm, u0, v0, t0, dp0, xt, zvir, mytile) & -!$omp private (i, j, k) +!$omp shared (nudge_dz,npz, jsc, jec, isc, iec, n, sphum, Atm, u0, v0, t0, dz0, dp0, xt, zvir, mygrid) & +!$omp private (i, j, k) do k=1,npz do j=jsc,jec+1 do i=isc,iec - Atm(mytile)%u(i,j,k) = xt*(Atm(mytile)%u(i,j,k) + wt*u0(i,j,k)) + Atm(mygrid)%u(i,j,k) = xt*(Atm(mygrid)%u(i,j,k) + wt*u0(i,j,k)) enddo enddo do j=jsc,jec do i=isc,iec+1 - Atm(mytile)%v(i,j,k) = xt*(Atm(mytile)%v(i,j,k) + wt*v0(i,j,k)) + Atm(mygrid)%v(i,j,k) = xt*(Atm(mygrid)%v(i,j,k) + wt*v0(i,j,k)) enddo enddo - do j=jsc,jec + if ( nudge_dz ) then + do j=jsc,jec do i=isc,iec - Atm(mytile)%pt(i,j,k) = xt*(Atm(mytile)%pt(i,j,k) + wt*t0(i,j,k)/(1.+zvir*Atm(mytile)%q(i,j,k,sphum))) - Atm(mytile)%delp(i,j,k) = xt*(Atm(mytile)%delp(i,j,k) + wt*dp0(i,j,k)) + Atm(mygrid)%delp(i,j,k) = xt*(Atm(mygrid)%delp(i,j,k) + wt*dp0(i,j,k)) + Atm(mygrid)%delz(i,j,k) = xt*(Atm(mygrid)%delz(i,j,k) + wt*dz0(i,j,k)) enddo - enddo + enddo + else + do j=jsc,jec + do i=isc,iec + Atm(mygrid)%pt(i,j,k) = xt*(Atm(mygrid)%pt(i,j,k) + wt*t0(i,j,k)/(1.+zvir*Atm(mygrid)%q(i,j,k,sphum))) + Atm(mygrid)%delp(i,j,k) = xt*(Atm(mygrid)%delp(i,j,k) + wt*dp0(i,j,k)) + enddo + enddo + endif enddo enddo deallocate ( u0 ) deallocate ( v0 ) - deallocate ( t0 ) deallocate (dp0 ) + if ( allocated(t0) ) deallocate ( t0 ) + if ( allocated(dz0) ) deallocate ( dz0 ) do_adiabatic_init = .false. call timing_off('adiabatic_init') @@ -1136,28 +1441,30 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) ! Local GFS-phys consistent parameters: !-------------------------------------- real(kind=kind_phys), parameter:: p00 = 1.e5 - real(kind=kind_phys), parameter:: qmin = 1.0e-10 + real(kind=kind_phys), parameter:: qmin = 1.0e-10 real(kind=kind_phys):: pk0inv, ptop, pktop real(kind=kind_phys) :: rTv, dm, qgrs_rad - integer :: nb, blen, npz, i, j, k, ix, k1 + integer :: nb, blen, npz, i, j, k, ix, k1, dnats, nq_adv !!! NOTES: lmh 6nov15 !!! - "Layer" means "layer mean", ie. the average value in a layer !!! - "Level" means "level interface", ie the point values at the top or bottom of a layer - ptop = _DBL_(_RL_(Atm(mytile)%ak(1))) + ptop = _DBL_(_RL_(Atm(mygrid)%ak(1))) pktop = (ptop/p00)**kappa pk0inv = (1.0_kind_phys/p00)**kappa npz = Atm_block%npz + dnats = Atm(mygrid)%flagstruct%dnats + nq_adv = nq - dnats !--------------------------------------------------------------------- ! use most up to date atmospheric properties when running serially !--------------------------------------------------------------------- -!$OMP parallel do default (none) & +!$OMP parallel do default (none) & !$OMP shared (Atm_block, Atm, IPD_Data, npz, nq, ncnst, sphum, liq_wat, & !$OMP ice_wat, rainwat, snowwat, graupel, pk0inv, ptop, & -!$OMP pktop, zvir, mytile) & +!$OMP pktop, zvir, mygrid, dnats, nq_adv) & !$OMP private (dm, nb, blen, i, j, ix, k1, rTv, qgrs_rad) do nb = 1,Atm_block%nblks @@ -1178,39 +1485,43 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) !Indices for FV's vertical coordinate, for which 1 = top !here, k is the index for GFS's vertical coordinate, for which 1 = bottom k1 = npz+1-k ! flipping the index - IPD_Data(nb)%Statein%tgrs(ix,k) = _DBL_(_RL_(Atm(mytile)%pt(i,j,k1))) - IPD_Data(nb)%Statein%ugrs(ix,k) = _DBL_(_RL_(Atm(mytile)%ua(i,j,k1))) - IPD_Data(nb)%Statein%vgrs(ix,k) = _DBL_(_RL_(Atm(mytile)%va(i,j,k1))) - IPD_Data(nb)%Statein%vvl(ix,k) = _DBL_(_RL_(Atm(mytile)%omga(i,j,k1))) - IPD_Data(nb)%Statein%prsl(ix,k) = _DBL_(_RL_(Atm(mytile)%delp(i,j,k1))) ! Total mass + IPD_Data(nb)%Statein%tgrs(ix,k) = _DBL_(_RL_(Atm(mygrid)%pt(i,j,k1))) + IPD_Data(nb)%Statein%ugrs(ix,k) = _DBL_(_RL_(Atm(mygrid)%ua(i,j,k1))) + IPD_Data(nb)%Statein%vgrs(ix,k) = _DBL_(_RL_(Atm(mygrid)%va(i,j,k1))) + IPD_Data(nb)%Statein%vvl(ix,k) = _DBL_(_RL_(Atm(mygrid)%omga(i,j,k1))) + IPD_Data(nb)%Statein%prsl(ix,k) = _DBL_(_RL_(Atm(mygrid)%delp(i,j,k1))) ! Total mass - if (.not.Atm(mytile)%flagstruct%hydrostatic .and. (.not.Atm(mytile)%flagstruct%use_hydro_pressure)) & - IPD_Data(nb)%Statein%phii(ix,k+1) = IPD_Data(nb)%Statein%phii(ix,k) - _DBL_(_RL_(Atm(mytile)%delz(i,j,k1)*grav)) + if (.not.Atm(mygrid)%flagstruct%hydrostatic .and. (.not.Atm(mygrid)%flagstruct%use_hydro_pressure)) & + IPD_Data(nb)%Statein%phii(ix,k+1) = IPD_Data(nb)%Statein%phii(ix,k) - _DBL_(_RL_(Atm(mygrid)%delz(i,j,k1)*grav)) ! Convert to tracer mass: - IPD_Data(nb)%Statein%qgrs(ix,k,1:nq) = _DBL_(_RL_(Atm(mytile)%q(i,j,k1,1:nq))) & + IPD_Data(nb)%Statein%qgrs(ix,k,1:nq_adv) = _DBL_(_RL_(Atm(mygrid)%q(i,j,k1,1:nq_adv))) & * IPD_Data(nb)%Statein%prsl(ix,k) + + if (dnats .gt. 0) & + IPD_Data(nb)%Statein%qgrs(ix,k,nq_adv+1:nq) = _DBL_(_RL_(Atm(mygrid)%q(i,j,k1,nq_adv+1:nq))) !--- SHOULD THESE BE CONVERTED TO MASS SINCE THE DYCORE DOES NOT TOUCH THEM IN ANY WAY??? !--- See Note in state update... - IPD_Data(nb)%Statein%qgrs(ix,k,nq+1:ncnst) = _DBL_(_RL_(Atm(mytile)%qdiag(i,j,k1,nq+1:ncnst))) + if ( ncnst > nq) & + IPD_Data(nb)%Statein%qgrs(ix,k,nq+1:ncnst) = _DBL_(_RL_(Atm(mygrid)%qdiag(i,j,k1,nq+1:ncnst))) ! Remove the contribution of condensates to delp (mass): - if ( Atm(mytile)%flagstruct%nwat .eq. 2 ) then ! GFS - IPD_Data(nb)%Statein%prsl(ix,k) = IPD_Data(nb)%Statein%prsl(ix,k) & - - IPD_Data(nb)%Statein%qgrs(ix,k,liq_wat) - elseif ( Atm(mytile)%flagstruct%nwat .eq. 6 ) then + if ( Atm(mygrid)%flagstruct%nwat .eq. 6 ) then IPD_Data(nb)%Statein%prsl(ix,k) = IPD_Data(nb)%Statein%prsl(ix,k) & - IPD_Data(nb)%Statein%qgrs(ix,k,liq_wat) & - IPD_Data(nb)%Statein%qgrs(ix,k,ice_wat) & - IPD_Data(nb)%Statein%qgrs(ix,k,rainwat) & - IPD_Data(nb)%Statein%qgrs(ix,k,snowwat) & - IPD_Data(nb)%Statein%qgrs(ix,k,graupel) + else !variable condensate numbers + IPD_Data(nb)%Statein%prsl(ix,k) = IPD_Data(nb)%Statein%prsl(ix,k) & + - sum(IPD_Data(nb)%Statein%qgrs(ix,k,2:Atm(mygrid)%flagstruct%nwat)) endif enddo enddo ! Re-compute pressure (dry_mass + water_vapor) derived fields: do i=1,blen - IPD_Data(nb)%Statein%prsi(i,npz+1) = ptop + IPD_Data(nb)%Statein%prsi(i,npz+1) = ptop enddo do k=npz,1,-1 do i=1,blen @@ -1218,8 +1529,8 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) + IPD_Data(nb)%Statein%prsl(i,k) IPD_Data(nb)%Statein%prsik(i,k) = log( IPD_Data(nb)%Statein%prsi(i,k) ) ! Redefine mixing ratios for GFS == tracer_mass / (dry_air_mass + water_vapor_mass) - IPD_Data(nb)%Statein%qgrs(i,k,1:ncnst) = IPD_Data(nb)%Statein%qgrs(i,k,1:ncnst) & - / IPD_Data(nb)%Statein%prsl(i,k) + IPD_Data(nb)%Statein%qgrs(i,k,1:nq_adv) = IPD_Data(nb)%Statein%qgrs(i,k,1:nq_adv) & + / IPD_Data(nb)%Statein%prsl(i,k) enddo enddo do i=1,blen @@ -1232,7 +1543,7 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) ! Geo-potential at interfaces: qgrs_rad = max(qmin,IPD_Data(nb)%Statein%qgrs(i,k,sphum)) rTv = rdgas*IPD_Data(nb)%Statein%tgrs(i,k)*(1.+zvir*qgrs_rad) - if ( Atm(mytile)%flagstruct%hydrostatic .or. Atm(mytile)%flagstruct%use_hydro_pressure ) & + if ( Atm(mygrid)%flagstruct%hydrostatic .or. Atm(mygrid)%flagstruct%use_hydro_pressure ) & IPD_Data(nb)%Statein%phii(i,k+1) = IPD_Data(nb)%Statein%phii(i,k) & + rTv*(IPD_Data(nb)%Statein%prsik(i,k) & - IPD_Data(nb)%Statein%prsik(i,k+1)) @@ -1242,21 +1553,12 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) - IPD_Data(nb)%Statein%phii(i,k)) !!! Ensure subgrid MONOTONICITY of Pressure: SJL 09/11/2016 - if ( .not.Atm(mytile)%flagstruct%hydrostatic ) then -#ifdef ALT_METHOD + if ( .not.Atm(mygrid)%flagstruct%hydrostatic ) then ! If violated, replaces it with hydrostatic pressure - if (IPD_Data(nb)%Statein%prsl(i,k).ge.IPD_Data(nb)%Statein%prsi(i,k) .or. & - IPD_Data(nb)%Statein%prsl(i,k).le.IPD_Data(nb)%Statein%prsi(i,k+1)) then - IPD_Data(nb)%Statein%prsl(i,k) = dm / (IPD_Data(nb)%Statein%prsik(i,k) & - - IPD_Data(nb)%Statein%prsik(i,k+1)) - endif - -#else IPD_Data(nb)%Statein%prsl(i,k) = min(IPD_Data(nb)%Statein%prsl(i,k), & IPD_Data(nb)%Statein%prsi(i,k) - 0.01*dm) IPD_Data(nb)%Statein%prsl(i,k) = max(IPD_Data(nb)%Statein%prsl(i,k), & IPD_Data(nb)%Statein%prsi(i,k+1) + 0.01*dm) -#endif endif enddo enddo @@ -1278,10 +1580,10 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) IPD_Data(nb)%Statein%prsik(i,npz+1) = pktop ! TOA enddo - if ( Atm(mytile)%flagstruct%hydrostatic .or. Atm(mytile)%flagstruct%use_hydro_pressure ) then + if ( Atm(mygrid)%flagstruct%hydrostatic .or. Atm(mygrid)%flagstruct%use_hydro_pressure ) then do k=2,npz do i=1,blen - IPD_Data(nb)%Statein%prsik(i,k) = exp( kappa*IPD_Data(nb)%Statein%prsik(i,k) )*pk0inv + IPD_Data(nb)%Statein%prsik(i,k) = exp( kappa*IPD_Data(nb)%Statein%prsik(i,k) )*pk0inv enddo enddo endif @@ -1289,4 +1591,62 @@ subroutine atmos_phys_driver_statein (IPD_Data, Atm_block) end subroutine atmos_phys_driver_statein + subroutine atmos_phys_qdt_diag(q, phys_diag, nq, dt, begin) + + integer, intent(IN) :: nq + real, intent(IN) :: dt + logical, intent(IN) :: begin + real, intent(IN) :: q(isd:ied,jsd:jed,npz,nq) + type(phys_diag_type), intent(INOUT) :: phys_diag + + integer sphum, liq_wat, ice_wat ! GFDL AM physics + integer rainwat, snowwat, graupel ! GFDL Cloud Microphysics + + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') + liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') + ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') + rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat') + snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') + graupel = get_tracer_index (MODEL_ATMOS, 'graupel') + + if (begin) then + if (allocated(phys_diag%phys_qv_dt)) phys_diag%phys_qv_dt = q(isc:iec,jsc:jec,:,sphum) + if (allocated(phys_diag%phys_ql_dt)) then + if (liq_wat < 0) call mpp_error(FATAL, " phys_ql_dt needs at least one liquid water tracer defined") + phys_diag%phys_ql_dt = q(isc:iec,jsc:jec,:,liq_wat) + endif + if (allocated(phys_diag%phys_qi_dt)) then + if (ice_wat < 0) then + call mpp_error(WARNING, " phys_qi_dt needs at least one ice water tracer defined") + phys_diag%phys_qi_dt = 0. + endif + phys_diag%phys_qi_dt = q(isc:iec,jsc:jec,:,ice_wat) + endif + else + if (allocated(phys_diag%phys_qv_dt)) phys_diag%phys_qv_dt = q(isc:iec,jsc:jec,:,sphum) - phys_diag%phys_qv_dt + if (allocated(phys_diag%phys_ql_dt)) then + phys_diag%phys_ql_dt = q(isc:iec,jsc:jec,:,liq_wat) - phys_diag%phys_ql_dt + endif + if (allocated(phys_diag%phys_qi_dt)) then + phys_diag%phys_qi_dt = q(isc:iec,jsc:jec,:,ice_wat) - phys_diag%phys_qv_dt + endif + endif + + if (allocated(phys_diag%phys_ql_dt)) then + if (rainwat > 0) phys_diag%phys_ql_dt = q(isc:iec,jsc:jec,:,rainwat) + phys_diag%phys_ql_dt + endif + if (allocated(phys_diag%phys_qi_dt)) then + if (snowwat > 0) phys_diag%phys_qi_dt = q(isc:iec,jsc:jec,:,snowwat) + phys_diag%phys_qi_dt + if (graupel > 0) phys_diag%phys_qi_dt = q(isc:iec,jsc:jec,:,graupel) + phys_diag%phys_qi_dt + endif + + if (.not. begin) then + if (allocated(phys_diag%phys_qv_dt)) phys_diag%phys_qv_dt = phys_diag%phys_qv_dt / dt + if (allocated(phys_diag%phys_ql_dt)) phys_diag%phys_ql_dt = phys_diag%phys_ql_dt / dt + if (allocated(phys_diag%phys_qi_dt)) phys_diag%phys_qi_dt = phys_diag%phys_qi_dt / dt + endif + + + end subroutine atmos_phys_qdt_diag + end module atmosphere_mod diff --git a/driver/SHiELD/constants.F90 b/driver/SHiELD/constants.F90 deleted file mode 100644 index 1ee867e15..000000000 --- a/driver/SHiELD/constants.F90 +++ /dev/null @@ -1,341 +0,0 @@ -!*********************************************************************** -!* GNU Lesser General Public License -!* -!* This file is part of the FV3 dynamical core. -!* -!* The FV3 dynamical core is free software: you can redistribute it -!* and/or modify it under the terms of the -!* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or -!* (at your option) any later version. -!* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -!* See the GNU General Public License for more details. -!* -!* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. -!* If not, see . -!*********************************************************************** - -module constants_mod - -!---variable for strong typing grid parameters -use platform_mod, only: r8_kind -! -! Defines useful constants for Earth. -! - -! -! Constants are defined as real parameters. -! Constants are accessed through the "use" statement. -! - -implicit none -private - -character(len=128) :: version='$Id$' -character(len=128) :: tagname='$Name$' -!dummy variable to use in HUGE initializations -real :: realnumber - -!------------ physical constants --------------- -! -! radius of the earth -! -! -! rotation rate of the planet (earth) -! -! -! acceleration due to gravity -! -! -! gas constant for dry air -! -! -! RDGAS / CP_AIR -! -! -! specific heat capacity of dry air at constant pressure -! -! -! specific heat capacity taken from McDougall (2002) "Potential Enthalpy ..." -! -! -! average density of sea water -! -! -! reciprocal of average density of sea water -! -! -! (kg/m^3)*(cal/kg/deg C)(joules/cal) = (joules/m^3/deg C) -! - - -#ifdef GFS_PHYS -! real(kind=r8_kind), public, parameter :: RADIUS = 6376000.0_r8_kind -! SJL: the following are from fv3_gfsphysics/gfs_physics/physics/physcons.f90 -real, public, parameter :: RADIUS = 6.3712e+6_r8_kind -real(kind=r8_kind), public, parameter :: PI_8 = 3.1415926535897931_r8_kind -real, public, parameter :: PI = 3.1415926535897931_r8_kind -real, public, parameter :: OMEGA = 7.2921e-5 -real, public, parameter :: GRAV = 9.80665_r8_kind -real(kind=r8_kind), public, parameter :: GRAV_8 = 9.80665_r8_kind -real, public, parameter :: RDGAS = 287.05_r8_kind -real, public, parameter :: RVGAS = 461.50_r8_kind -! Extra: -real, public, parameter :: HLV = 2.5e6_r8_kind -real, public, parameter :: HLF = 3.3358e5_r8_kind -real, public, parameter :: con_cliq =4.1855e+3_r8_kind ! spec heat H2O liq (J/kg/K) -real, public, parameter :: con_csol =2.1060e+3_r8_kind ! spec heat H2O ice (J/kg/K) -#else - -#ifdef SMALL_EARTH -#ifdef DCMIP - real, private, paramter :: small_fac = 1._r8_kind / 120._r8_kind #only needed for supercell test -#else -#ifdef HIWPP -#ifdef SUPER_K - real, private, parameter :: small_fac = 1._r8_kind / 120._r8_kind -#else - real, private, parameter :: small_fac = 1._r8_kind / 166.7_r8_kind -#endif -#else - real, private, parameter :: small_fac = 1._r8_kind / 10._r8_kind -#endif -#endif -#else - real, private, parameter :: small_fac = 1._r8_kind -#endif - -real, public, parameter :: RADIUS = 6371e+3_r8_kind * small_fac -real(kind=8), public, parameter :: PI_8 = 3.141592653589793_r8_kind -real, public, parameter :: PI = 3.141592653589793_r8_kind -real, public, parameter :: OMEGA = 7.292e-5_r8_kind / small_fac -real, public, parameter :: GRAV = 9.8060226_r8_kind -real, public, parameter :: RDGAS = 287.04_r8_kind -real, public, parameter :: RVGAS = 461.60_r8_kind -! Extra: -real, public, parameter :: HLV = 2.501e6_r8_kind -real, public, parameter :: HLF = 3.50e5_r8_kind -#endif -real, public, parameter :: CP_AIR = 1004.6_r8_kind -real, public, parameter :: CP_VAPOR = 4.0_r8_kind*RVGAS -real, public, parameter :: KAPPA = RDGAS/CP_AIR -!!! real, public, parameter :: STEFAN = 5.670400e-8_r8_kind -real, public, parameter :: STEFAN = 5.67051e-8_r8_kind - -real, public, parameter :: CP_OCEAN = 3989.24495292815_r8_kind -real, public, parameter :: RHO0 = 1.035e3_r8_kind -real, public, parameter :: RHO0R = 1.0_r8_kind/RHO0 -real, public, parameter :: RHO_CP = RHO0*CP_OCEAN - -!rabreal, public, parameter :: KAPPA = 2._r8_kind/7._r8_kind -!rabreal, public, parameter :: GRAV = 9.80_r8_kind -!rabreal, public, parameter :: CP_AIR = RDGAS/KAPPA - -!------------ water vapor constants --------------- -! -! Humidity factor. Controls the humidity content of the atmosphere through -! the Saturation Vapour Pressure expression when using DO_SIMPLE. -! -! -! gas constant for water vapor -! -! -! specific heat capacity of water vapor at constant pressure -! -! -! density of liquid water -! -! -! latent heat of evaporation -! -! -! latent heat of fusion -! -! -! latent heat of sublimation -! -! -! temp where fresh water freezes -! - -real, public, parameter :: ES0 = 1.0_r8_kind -real, public, parameter :: DENS_H2O = 1000._r8_kind -real, public, parameter :: HLS = HLV + HLF -real, public, parameter :: TFREEZE = 273.15_r8_kind - -!rabreal, public, parameter :: RVGAS = 461.50_r8_kind -!rabreal, public, parameter :: HLV = 2.500e6_r8_kind -!rabreal, public, parameter :: HLF = 3.34e5_r8_kind -!rabreal, public, parameter :: HLS = HLV + HLF -!rabreal, public, parameter :: TFREEZE = 273.16_r8_kind - -!-------------- radiation constants ----------------- - -! -! molecular weight of air -! -! -! molecular weight of water -! -! -! molecular weight of ozone -! -! -! molecular weight of carbon -! -! molecular weight of carbon dioxide -! -! molecular weight of molecular oxygen -! -! molecular weight of CFC-11 (CCl3F) -! -! molecular weight of CFC-21 (CCl2F2) -! -! -! diffusivity factor -! -! -! seconds in a day -! -! -! Avogadro's number -! -! -! mean sea level pressure -! -! -! mean sea level pressure -! - -real, public, parameter :: WTMAIR = 2.896440E+01_r8_kind -real, public, parameter :: WTMH2O = WTMAIR*(RDGAS/RVGAS) !pjp OK to change value because not used yet. -!real, public, parameter :: WTMO3 = 47.99820E+01_r8_kind -real, public, parameter :: WTMOZONE = 47.99820_r8_kind -real, public, parameter :: WTMC = 12.00000_r8_kind -real, public, parameter :: WTMCO2 = 44.00995_r8_kind -real, public, parameter :: WTMO2 = 31.9988_r8_kind -real, public, parameter :: WTMCFC11 = 137.3681_r8_kind -real, public, parameter :: WTMCFC12 = 120.9135_r8_kind -real, public, parameter :: DIFFAC = 1.660000E+00_r8_kind -real, public, parameter :: SECONDS_PER_DAY = 8.640000E+04_r8_kind, SECONDS_PER_HOUR = 3600._r8_kind, SECONDS_PER_MINUTE=60._r8_kind -real, public, parameter :: AVOGNO = 6.023000E+23_r8_kind -real, public, parameter :: PSTD = 1.013250E+06_r8_kind -real, public, parameter :: PSTD_MKS = 101325.0_r8_kind - -! -! factor used to convert flux divergence to heating rate in degrees per day -! -! -! factor used to convert flux divergence to heating rate in degrees per day -! -! -! mixing ratio of molecular oxygen in air -! -! -! reference atmospheric density -! -! -! minimum value allowed as argument to log function -! - -real, public, parameter :: RADCON = ((1.0E+02*GRAV)/(1.0E+04*CP_AIR))*SECONDS_PER_DAY -real, public, parameter :: RADCON_MKS = (GRAV/CP_AIR)*SECONDS_PER_DAY -real, public, parameter :: O2MIXRAT = 2.0953E-01_r8_kind -real, public, parameter :: RHOAIR = 1.292269_r8_kind -real, public, parameter :: ALOGMIN = -50.0_r8_kind - -!------------ miscellaneous constants --------------- -! -! Stefan-Boltzmann constant -! -! -! Von Karman constant -! -! -! ratio of circle circumference to diameter -! -! -! degrees per radian -! -! -! radians per degree -! -! -! equal to RAD_TO_DEG. Named RADIAN for backward compatability. -! -! -! converts rho*g*z (in mks) to dbars: 1dbar = 10^4 (kg/m^3)(m/s^2)m -! -! -! degrees Kelvin at zero Celsius -! -! -! a small number to prevent divide by zero exceptions -! - -real, public, parameter :: VONKARM = 0.40_r8_kind -real, public, parameter :: RAD_TO_DEG=180._r8_kind/PI -real, public, parameter :: DEG_TO_RAD=PI/180._r8_kind -real, public, parameter :: RADIAN = RAD_TO_DEG -real, public, parameter :: C2DBARS = 1.e-4_r8_kind -real, public, parameter :: KELVIN = 273.15_r8_kind -real, public, parameter :: EPSLN = 1.0e-15_r8_kind - -!rabreal, public, parameter :: STEFAN = 5.6734e-8_r8_kind -!rabreal, public, parameter :: EPSLN = 1.0e-40_r8_kind -!rabreal, public, parameter :: PI = 3.14159265358979323846_r8_kind - -!----------------------------------------------------------------------- -! version and tagname published -! so that write_version_number can be called for constants_mod by fms_init -public :: version, tagname -!----------------------------------------------------------------------- -public :: constants_init - -contains - -subroutine constants_init - -! dummy routine. - -end subroutine constants_init - -end module constants_mod - -! - -! -! 1. Renaming of constants. -! -! -! 2. Additional constants. -! -! -! Constants have been declared as type REAL, PARAMETER. -! -! The value a constant can not be changed in a users program. -! New constants can be defined in terms of values from the -! constants module using a parameter statement.

-! -! The name given to a particular constant may be changed.

-! -! Constants can be used on the right side on an assignment statement -! (their value can not be reassigned). -! -! -! -!
-!    use constants_mod, only:  TFREEZE, grav_new => GRAV
-!    real, parameter :: grav_inv = 1.0 / grav_new
-!    tempc(:,:,:) = tempk(:,:,:) - TFREEZE
-!    geopotential(:,:) = height(:,:) * grav_new
-!
-!
-!
- -!
- diff --git a/driver/SHiELD/gfdl_cloud_microphys.F90 b/driver/SHiELD/gfdl_cloud_microphys.F90 new file mode 100644 index 000000000..d671a7af8 --- /dev/null +++ b/driver/SHiELD/gfdl_cloud_microphys.F90 @@ -0,0 +1,4699 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** +! ======================================================================= +! cloud micro - physics package for gfdl global cloud resolving model +! the algorithms are originally derived from lin et al 1983. most of the +! key elements have been simplified / improved. this code at this stage +! bears little to no similarity to the original lin mp in zetac. +! therefore, it is best to be called gfdl micro - physics (gfdl mp) . +! developer: shian-jiann lin, linjiong zhou +! ======================================================================= + +module gfdl_cloud_microphys_mod + + ! use mpp_mod, only: stdlog, mpp_pe, mpp_root_pe, mpp_clock_id, & + ! mpp_clock_begin, mpp_clock_end, clock_routine, & + ! input_nml_file + ! use diag_manager_mod, only: register_diag_field, send_data + ! use time_manager_mod, only: time_type, get_time + ! use constants_mod, only: grav, rdgas, rvgas, cp_air, hlv, hlf, pi => pi_8 + ! use fms_mod, only: write_version_number, open_namelist_file, & + ! check_nml_error, file_exist, close_file + + implicit none + + private + + public gfdl_cloud_microphys_driver, gfdl_cloud_microphys_init, gfdl_cloud_microphys_end + public wqs1, wqs2, qs_blend, wqsat_moist, wqsat2_moist + public qsmith_init, qsmith, es2_table1d, es3_table1d, esw_table1d + public setup_con, wet_bulb + public cloud_diagnosis + + real :: missing_value = - 1.e10 + + logical :: module_is_initialized = .false. + logical :: qsmith_tables_initialized = .false. + + character (len = 17) :: mod_name = 'gfdl_cloud_microphys' + + real, parameter :: grav = 9.80665 ! gfs: acceleration due to gravity + real, parameter :: rdgas = 287.05 ! gfs: gas constant for dry air + real, parameter :: rvgas = 461.50 ! gfs: gas constant for water vapor + real, parameter :: cp_air = 1004.6 ! gfs: heat capacity of dry air at constant pressure + real, parameter :: hlv = 2.5e6 ! gfs: latent heat of evaporation + real, parameter :: hlf = 3.3358e5 ! gfs: latent heat of fusion + real, parameter :: pi = 3.1415926535897931 ! gfs: ratio of circle circumference to diameter + + ! real, parameter :: rdgas = 287.04 ! gfdl: gas constant for dry air + + ! real, parameter :: cp_air = rdgas * 7. / 2. ! 1004.675, heat capacity of dry air at constant pressure + real, parameter :: cp_vap = 4.0 * rvgas ! 1846.0, heat capacity of water vapore at constnat pressure + ! real, parameter :: cv_air = 717.56 ! satoh value + real, parameter :: cv_air = cp_air - rdgas ! 717.55, heat capacity of dry air at constant volume + ! real, parameter :: cv_vap = 1410.0 ! emanuel value + real, parameter :: cv_vap = 3.0 * rvgas ! 1384.5, heat capacity of water vapor at constant volume + + ! the following two are from emanuel's book "atmospheric convection" + ! real, parameter :: c_ice = 2106.0 ! heat capacity of ice at 0 deg c: c = c_ice + 7.3 * (t - tice) + ! real, parameter :: c_liq = 4190.0 ! heat capacity of water at 0 deg c + + real, parameter :: c_ice = 1972.0 ! gfdl: heat capacity of ice at - 15 deg c + real, parameter :: c_liq = 4185.5 ! gfdl: heat capacity of water at 15 deg c + ! real, parameter :: c_liq = 4218.0 ! ifs: heat capacity of liquid at 0 deg c + + real, parameter :: eps = rdgas / rvgas ! 0.6219934995 + real, parameter :: zvir = rvgas / rdgas - 1. ! 0.6077338443 + + real, parameter :: t_ice = 273.16 ! freezing temperature + real, parameter :: table_ice = 273.16 ! freezing point for qs table + + ! real, parameter :: e00 = 610.71 ! gfdl: saturation vapor pressure at 0 deg c + real, parameter :: e00 = 611.21 ! ifs: saturation vapor pressure at 0 deg c + + real, parameter :: dc_vap = cp_vap - c_liq ! - 2339.5, isobaric heating / cooling + real, parameter :: dc_ice = c_liq - c_ice ! 2213.5, isobaric heating / colling + + real, parameter :: hlv0 = hlv ! gfs: evaporation latent heat coefficient at 0 deg c + ! real, parameter :: hlv0 = 2.501e6 ! emanuel appendix - 2 + real, parameter :: hlf0 = hlf ! gfs: fussion latent heat coefficient at 0 deg c + ! real, parameter :: hlf0 = 3.337e5 ! emanuel + + real, parameter :: lv0 = hlv0 - dc_vap * t_ice! 3.13905782e6, evaporation latent heat coefficient at 0 deg k + real, parameter :: li00 = hlf0 - dc_ice * t_ice! - 2.7105966e5, fussion latend heat coefficient at 0 deg k + + real, parameter :: d2ice = dc_vap + dc_ice ! - 126, isobaric heating / cooling + real, parameter :: li2 = lv0 + li00 ! 2.86799816e6, sublimation latent heat coefficient at 0 deg k + + real, parameter :: qrmin = 1.e-8 ! min value for ??? + real, parameter :: qvmin = 1.e-20 ! min value for water vapor (treated as zero) + real, parameter :: qcmin = 1.e-12 ! min value for cloud condensates + + real, parameter :: vr_min = 1.e-3 ! min fall speed for rain + real, parameter :: vf_min = 1.e-5 ! min fall speed for cloud ice, snow, graupel + + real, parameter :: dz_min = 1.e-2 ! use for correcting flipped height + + real, parameter :: sfcrho = 1.2 ! surface air density + real, parameter :: rhor = 1.e3 ! density of rain water, lin83 + + real :: cracs, csacr, cgacr, cgacs, csacw, craci, csaci, cgacw, cgaci, cracw ! constants for accretions + real :: acco (3, 4) ! constants for accretions + real :: cssub (5), cgsub (5), crevp (5), cgfr (2), csmlt (5), cgmlt (5) + + real :: es0, ces0 + real :: pie, rgrav, fac_rc + real :: c_air, c_vap + + real :: lati, latv, lats, lat2, lcp, icp, tcp ! used in bigg mechanism and wet bulk + + real :: d0_vap ! the same as dc_vap, except that cp_vap can be cp_vap or cv_vap + real :: lv00 ! the same as lv0, except that cp_vap can be cp_vap or cv_vap + + ! cloud microphysics switchers + + integer :: icloud_f = 0 ! cloud scheme + integer :: irain_f = 0 ! cloud water to rain auto conversion scheme + + logical :: de_ice = .false. ! to prevent excessive build - up of cloud ice from external sources + logical :: sedi_transport = .true. ! transport of momentum in sedimentation + logical :: do_sedi_w = .false. ! transport of vertical motion in sedimentation + logical :: do_sedi_heat = .true. ! transport of heat in sedimentation + logical :: prog_ccn = .false. ! do prognostic ccn (yi ming's method) + logical :: do_qa = .true. ! do inline cloud fraction + logical :: rad_snow = .true. ! consider snow in cloud fraciton calculation + logical :: rad_graupel = .true. ! consider graupel in cloud fraction calculation + logical :: rad_rain = .true. ! consider rain in cloud fraction calculation + logical :: fix_negative = .false. ! fix negative water species + logical :: do_setup = .true. ! setup constants and parameters + logical :: p_nonhydro = .false. ! perform hydrosatic adjustment on air density + + real, allocatable :: table (:), table2 (:), table3 (:), tablew (:) + real, allocatable :: des (:), des2 (:), des3 (:), desw (:) + + logical :: tables_are_initialized = .false. + + ! logical :: master + ! integer :: id_rh, id_vtr, id_vts, id_vtg, id_vti, id_rain, id_snow, id_graupel, & + ! id_ice, id_prec, id_cond, id_var, id_droplets + ! integer :: gfdl_mp_clock ! clock for timing of driver routine + + real, parameter :: dt_fr = 8. ! homogeneous freezing of all cloud water at t_wfr - dt_fr + ! minimum temperature water can exist (moore & molinero nov. 2011, nature) + ! dt_fr can be considered as the error bar + + real :: p_min = 100. ! minimum pressure (pascal) for mp to operate + + ! slj, the following parameters are for cloud - resolving resolution: 1 - 5 km + + ! qi0_crt = 0.8e-4 + ! qs0_crt = 0.6e-3 + ! c_psaci = 0.1 + ! c_pgacs = 0.1 + + ! ----------------------------------------------------------------------- + ! namelist parameters + ! ----------------------------------------------------------------------- + + real :: cld_min = 0.05 ! minimum cloud fraction + real :: tice = 273.16 ! set tice = 165. to trun off ice - phase phys (kessler emulator) + + real :: t_min = 178. ! min temp to freeze - dry all water vapor + real :: t_sub = 184. ! min temp for sublimation of cloud ice + real :: mp_time = 150. ! maximum micro - physics time step (sec) + + ! relative humidity increment + + real :: rh_inc = 0.25 ! rh increment for complete evaporation of cloud water and cloud ice + real :: rh_inr = 0.25 ! rh increment for minimum evaporation of rain + real :: rh_ins = 0.25 ! rh increment for sublimation of snow + + ! conversion time scale + + real :: tau_r2g = 900. ! rain freezing during fast_sat + real :: tau_smlt = 900. ! snow melting + real :: tau_g2r = 600. ! graupel melting to rain + real :: tau_imlt = 600. ! cloud ice melting + real :: tau_i2s = 1000. ! cloud ice to snow auto - conversion + real :: tau_l2r = 900. ! cloud water to rain auto - conversion + real :: tau_v2l = 150. ! water vapor to cloud water (condensation) + real :: tau_l2v = 300. ! cloud water to water vapor (evaporation) + real :: tau_g2v = 900. ! grapuel sublimation + real :: tau_v2g = 21600. ! grapuel deposition -- make it a slow process + + ! horizontal subgrid variability + + real :: dw_land = 0.20 ! base value for subgrid deviation / variability over land + real :: dw_ocean = 0.10 ! base value for ocean + + ! prescribed ccn + + real :: ccn_o = 90. ! ccn over ocean (cm^ - 3) + real :: ccn_l = 270. ! ccn over land (cm^ - 3) + + real :: rthresh = 10.0e-6 ! critical cloud drop radius (micro m) + + ! ----------------------------------------------------------------------- + ! wrf / wsm6 scheme: qi_gen = 4.92e-11 * (1.e3 * exp (0.1 * tmp)) ** 1.33 + ! optimized: qi_gen = 4.92e-11 * exp (1.33 * log (1.e3 * exp (0.1 * tmp))) + ! qi_gen ~ 4.808e-7 at 0 c; 1.818e-6 at - 10 c, 9.82679e-5 at - 40c + ! the following value is constructed such that qc_crt = 0 at zero c and @ - 10c matches + ! wrf / wsm6 ice initiation scheme; qi_crt = qi_gen * min (qi_lim, 0.1 * tmp) / den + ! ----------------------------------------------------------------------- + + real :: sat_adj0 = 0.90 ! adjustment factor (0: no, 1: full) during fast_sat_adj + + real :: qc_crt = 5.0e-8 ! mini condensate mixing ratio to allow partial cloudiness + + real :: qi_lim = 1. ! cloud ice limiter to prevent large ice build up + + real :: ql_mlt = 2.0e-3 ! max value of cloud water allowed from melted cloud ice + real :: qs_mlt = 1.0e-6 ! max cloud water due to snow melt + + real :: ql_gen = 1.0e-3 ! max cloud water generation during remapping step if fast_sat_adj = .t. + real :: qi_gen = 1.82e-6 ! max cloud ice generation during remapping step + + ! cloud condensate upper bounds: "safety valves" for ql & qi + + real :: ql0_max = 2.0e-3 ! max cloud water value (auto converted to rain) + real :: qi0_max = 1.0e-4 ! max cloud ice value (by other sources) + + real :: qi0_crt = 1.0e-4 ! cloud ice to snow autoconversion threshold (was 1.e-4) + ! qi0_crt is highly dependent on horizontal resolution + real :: qr0_crt = 1.0e-4 ! rain to snow or graupel / hail threshold + ! lfo used * mixing ratio * = 1.e-4 (hail in lfo) + real :: qs0_crt = 1.0e-3 ! snow to graupel density threshold (0.6e-3 in purdue lin scheme) + + real :: c_paut = 0.55 ! autoconversion cloud water to rain (use 0.5 to reduce autoconversion) + real :: c_psaci = 0.02 ! accretion: cloud ice to snow (was 0.1 in zetac) + real :: c_piacr = 5.0 ! accretion: rain to ice: + real :: c_cracw = 0.9 ! rain accretion efficiency + real :: c_pgacs = 2.0e-3 ! snow to graupel "accretion" eff. (was 0.1 in zetac) + + ! decreasing clin to reduce csacw (so as to reduce cloud water --- > snow) + + real :: alin = 842.0 ! "a" in lin1983 + real :: clin = 4.8 ! "c" in lin 1983, 4.8 -- > 6. (to ehance ql -- > qs) + + ! fall velocity tuning constants: + + logical :: const_vi = .false. ! if .t. the constants are specified by v * _fac + logical :: const_vs = .false. ! if .t. the constants are specified by v * _fac + logical :: const_vg = .false. ! if .t. the constants are specified by v * _fac + logical :: const_vr = .false. ! if .t. the constants are specified by v * _fac + + ! good values: + + real :: vi_fac = 1. ! if const_vi: 1 / 3 + real :: vs_fac = 1. ! if const_vs: 1. + real :: vg_fac = 1. ! if const_vg: 2. + real :: vr_fac = 1. ! if const_vr: 4. + + ! upper bounds of fall speed (with variable speed option) + + real :: vi_max = 0.5 ! max fall speed for ice + real :: vs_max = 5.0 ! max fall speed for snow + real :: vg_max = 8.0 ! max fall speed for graupel + real :: vr_max = 12. ! max fall speed for rain + + ! cloud microphysics switchers + + logical :: fast_sat_adj = .false. ! has fast saturation adjustments + logical :: z_slope_liq = .true. ! use linear mono slope for autocconversions + logical :: z_slope_ice = .false. ! use linear mono slope for autocconversions + logical :: use_ccn = .false. ! must be true when prog_ccn is false + logical :: use_ppm = .false. ! use ppm fall scheme + logical :: mono_prof = .true. ! perform terminal fall with mono ppm scheme + logical :: mp_print = .false. ! cloud microphysics debugging printout + + ! real :: global_area = - 1. + + real :: log_10, tice0, t_wfr + + ! ----------------------------------------------------------------------- + ! namelist + ! ----------------------------------------------------------------------- + + namelist / gfdl_cloud_microphysics_nml / & + mp_time, t_min, t_sub, tau_r2g, tau_smlt, tau_g2r, dw_land, dw_ocean, & + vi_fac, vr_fac, vs_fac, vg_fac, ql_mlt, do_qa, fix_negative, vi_max, & + vs_max, vg_max, vr_max, qs_mlt, qs0_crt, qi_gen, ql0_max, qi0_max, & + qi0_crt, qr0_crt, fast_sat_adj, rh_inc, rh_ins, rh_inr, const_vi, & + const_vs, const_vg, const_vr, use_ccn, rthresh, ccn_l, ccn_o, qc_crt, & + tau_g2v, tau_v2g, sat_adj0, c_piacr, tau_imlt, tau_v2l, tau_l2v, & + tau_i2s, tau_l2r, qi_lim, ql_gen, c_paut, c_psaci, c_pgacs, & + z_slope_liq, z_slope_ice, prog_ccn, c_cracw, alin, clin, tice, & + rad_snow, rad_graupel, rad_rain, cld_min, use_ppm, mono_prof, & + do_sedi_heat, sedi_transport, do_sedi_w, de_ice, icloud_f, irain_f, mp_print + + public & + mp_time, t_min, t_sub, tau_r2g, tau_smlt, tau_g2r, dw_land, dw_ocean, & + vi_fac, vr_fac, vs_fac, vg_fac, ql_mlt, do_qa, fix_negative, vi_max, & + vs_max, vg_max, vr_max, qs_mlt, qs0_crt, qi_gen, ql0_max, qi0_max, & + qi0_crt, qr0_crt, fast_sat_adj, rh_inc, rh_ins, rh_inr, const_vi, & + const_vs, const_vg, const_vr, use_ccn, rthresh, ccn_l, ccn_o, qc_crt, & + tau_g2v, tau_v2g, sat_adj0, c_piacr, tau_imlt, tau_v2l, tau_l2v, & + tau_i2s, tau_l2r, qi_lim, ql_gen, c_paut, c_psaci, c_pgacs, & + z_slope_liq, z_slope_ice, prog_ccn, c_cracw, alin, clin, tice, & + rad_snow, rad_graupel, rad_rain, cld_min, use_ppm, mono_prof, & + do_sedi_heat, sedi_transport, do_sedi_w, de_ice, icloud_f, irain_f, mp_print + +contains + +! ----------------------------------------------------------------------- +! the driver of the gfdl cloud microphysics +! ----------------------------------------------------------------------- + +!subroutine gfdl_cloud_microphys_driver (qv, ql, qr, qi, qs, qg, qa, qn, & +! qv_dt, ql_dt, qr_dt, qi_dt, qs_dt, qg_dt, qa_dt, & +! pt_dt, pt, w, uin, vin, udt, vdt, dz, delp, area, dt_in, & +! land, rain, snow, ice, graupel, & +! hydrostatic, phys_hydrostatic, & +! iis, iie, jjs, jje, kks, kke, ktop, kbot, time) + +subroutine gfdl_cloud_microphys_driver (qv, ql, qr, qi, qs, qg, qa, qn, & + qv_dt, ql_dt, qr_dt, qi_dt, qs_dt, qg_dt, qa_dt, pt_dt, pt, w, & + uin, vin, udt, vdt, dz, delp, area, dt_in, land, rain, snow, ice, & + graupel, hydrostatic, phys_hydrostatic, iis, iie, jjs, jje, kks, & + kke, ktop, kbot, seconds) + + implicit none + + logical, intent (in) :: hydrostatic, phys_hydrostatic + integer, intent (in) :: iis, iie, jjs, jje ! physics window + integer, intent (in) :: kks, kke ! vertical dimension + integer, intent (in) :: ktop, kbot ! vertical compute domain + integer, intent (in) :: seconds + + real, intent (in) :: dt_in ! physics time step + + real, intent (in), dimension (:, :) :: area ! cell area + real, intent (in), dimension (:, :) :: land ! land fraction + + real, intent (in), dimension (:, :, :) :: delp, dz, uin, vin + real, intent (in), dimension (:, :, :) :: pt, qv, ql, qr, qg, qa, qn + + real, intent (inout), dimension (:, :, :) :: qi, qs + real, intent (inout), dimension (:, :, :) :: pt_dt, qa_dt, udt, vdt, w + real, intent (inout), dimension (:, :, :) :: qv_dt, ql_dt, qr_dt + real, intent (inout), dimension (:, :, :) :: qi_dt, qs_dt, qg_dt + + real, intent (out), dimension (:, :) :: rain, snow, ice, graupel + + ! logical :: used + + real :: mpdt, rdt, dts, convt, tot_prec + + integer :: i, j, k + integer :: is, ie, js, je ! physics window + integer :: ks, ke ! vertical dimension + integer :: days, ntimes + + real, dimension (iie - iis + 1, jje - jjs + 1) :: prec_mp, prec1, cond, w_var, rh0 + + real, dimension (iie - iis + 1, jje - jjs + 1, kke - kks + 1) :: vt_r, vt_s, vt_g, vt_i, qn2 + + real, dimension (size (pt, 1), size (pt, 3)) :: m2_rain, m2_sol + + real :: allmax + + is = 1 + js = 1 + ks = 1 + ie = iie - iis + 1 + je = jje - jjs + 1 + ke = kke - kks + 1 + + ! call mpp_clock_begin (gfdl_mp_clock) + + ! ----------------------------------------------------------------------- + ! define heat capacity of dry air and water vapor based on hydrostatical property + ! ----------------------------------------------------------------------- + + if (phys_hydrostatic .or. hydrostatic) then + c_air = cp_air + c_vap = cp_vap + p_nonhydro = .false. + else + c_air = cv_air + c_vap = cv_vap + p_nonhydro = .true. + endif + d0_vap = c_vap - c_liq + lv00 = hlv0 - d0_vap * t_ice + + if (hydrostatic) do_sedi_w = .false. + + ! ----------------------------------------------------------------------- + ! define latent heat coefficient used in wet bulb and bigg mechanism + ! ----------------------------------------------------------------------- + + latv = hlv + lati = hlf + lats = latv + lati + lat2 = lats * lats + + lcp = latv / cp_air + icp = lati / cp_air + tcp = (latv + lati) / cp_air + + ! tendency zero out for am moist processes should be done outside the driver + + ! ----------------------------------------------------------------------- + ! define cloud microphysics sub time step + ! ----------------------------------------------------------------------- + + mpdt = min (dt_in, mp_time) + rdt = 1. / dt_in + ntimes = nint (dt_in / mpdt) + + ! small time step: + dts = dt_in / real (ntimes) + + ! call get_time (time, seconds, days) + + ! ----------------------------------------------------------------------- + ! initialize precipitation + ! ----------------------------------------------------------------------- + + do j = js, je + do i = is, ie + graupel (i, j) = 0. + rain (i, j) = 0. + snow (i, j) = 0. + ice (i, j) = 0. + cond (i, j) = 0. + enddo + enddo + + ! ----------------------------------------------------------------------- + ! major cloud microphysics + ! ----------------------------------------------------------------------- + + do j = js, je + call mpdrv (hydrostatic, uin, vin, w, delp, pt, qv, ql, qr, qi, qs, qg, & + qa, qn, dz, is, ie, js, je, ks, ke, ktop, kbot, j, dt_in, ntimes, & + rain (:, j), snow (:, j), graupel (:, j), ice (:, j), m2_rain, & + m2_sol, cond (:, j), area (:, j), land (:, j), udt, vdt, pt_dt, & + qv_dt, ql_dt, qr_dt, qi_dt, qs_dt, qg_dt, qa_dt, w_var, vt_r, & + vt_s, vt_g, vt_i, qn2) + enddo + + ! ----------------------------------------------------------------------- + ! no clouds allowed above ktop + ! ----------------------------------------------------------------------- + + if (ks < ktop) then + do k = ks, ktop + if (do_qa) then + do j = js, je + do i = is, ie + qa_dt (i, j, k) = 0. + enddo + enddo + else + do j = js, je + do i = is, ie + ! qa_dt (i, j, k) = - qa (i, j, k) * rdt + qa_dt (i, j, k) = 0. ! gfs + enddo + enddo + endif + enddo + endif + + ! ----------------------------------------------------------------------- + ! diagnostic output + ! ----------------------------------------------------------------------- + + ! if (id_vtr > 0) then + ! used = send_data (id_vtr, vt_r, time, is_in = iis, js_in = jjs) + ! endif + + ! if (id_vts > 0) then + ! used = send_data (id_vts, vt_s, time, is_in = iis, js_in = jjs) + ! endif + + ! if (id_vtg > 0) then + ! used = send_data (id_vtg, vt_g, time, is_in = iis, js_in = jjs) + ! endif + + ! if (id_vti > 0) then + ! used = send_data (id_vti, vt_i, time, is_in = iis, js_in = jjs) + ! endif + + ! if (id_droplets > 0) then + ! used = send_data (id_droplets, qn2, time, is_in = iis, js_in = jjs) + ! endif + + ! if (id_var > 0) then + ! used = send_data (id_var, w_var, time, is_in = iis, js_in = jjs) + ! endif + + ! convert to mm / day + + convt = 86400. * rdt * rgrav + do j = js, je + do i = is, ie + rain (i, j) = rain (i, j) * convt + snow (i, j) = snow (i, j) * convt + ice (i, j) = ice (i, j) * convt + graupel (i, j) = graupel (i, j) * convt + prec_mp (i, j) = rain (i, j) + snow (i, j) + ice (i, j) + graupel (i, j) + enddo + enddo + + ! if (id_cond > 0) then + ! do j = js, je + ! do i = is, ie + ! cond (i, j) = cond (i, j) * rgrav + ! enddo + ! enddo + ! used = send_data (id_cond, cond, time, is_in = iis, js_in = jjs) + ! endif + + ! if (id_snow > 0) then + ! used = send_data (id_snow, snow, time, iis, jjs) + ! used = send_data (id_snow, snow, time, is_in = iis, js_in = jjs) + ! if (mp_print .and. seconds == 0) then + ! tot_prec = g_sum (snow, is, ie, js, je, area, 1) + ! if (master) write (*, *) 'mean snow = ', tot_prec + ! endif + ! endif + ! + ! if (id_graupel > 0) then + ! used = send_data (id_graupel, graupel, time, iis, jjs) + ! used = send_data (id_graupel, graupel, time, is_in = iis, js_in = jjs) + ! if (mp_print .and. seconds == 0) then + ! tot_prec = g_sum (graupel, is, ie, js, je, area, 1) + ! if (master) write (*, *) 'mean graupel = ', tot_prec + ! endif + ! endif + ! + ! if (id_ice > 0) then + ! used = send_data (id_ice, ice, time, iis, jjs) + ! used = send_data (id_ice, ice, time, is_in = iis, js_in = jjs) + ! if (mp_print .and. seconds == 0) then + ! tot_prec = g_sum (ice, is, ie, js, je, area, 1) + ! if (master) write (*, *) 'mean ice_mp = ', tot_prec + ! endif + ! endif + ! + ! if (id_rain > 0) then + ! used = send_data (id_rain, rain, time, iis, jjs) + ! used = send_data (id_rain, rain, time, is_in = iis, js_in = jjs) + ! if (mp_print .and. seconds == 0) then + ! tot_prec = g_sum (rain, is, ie, js, je, area, 1) + ! if (master) write (*, *) 'mean rain = ', tot_prec + ! endif + ! endif + ! + ! if (id_rh > 0) then !not used? + ! used = send_data (id_rh, rh0, time, iis, jjs) + ! used = send_data (id_rh, rh0, time, is_in = iis, js_in = jjs) + ! endif + ! + ! + ! if (id_prec > 0) then + ! used = send_data (id_prec, prec_mp, time, iis, jjs) + ! used = send_data (id_prec, prec_mp, time, is_in = iis, js_in = jjs) + ! endif + + ! if (mp_print) then + ! prec1 (:, :) = prec1 (:, :) + prec_mp (:, :) + ! if (seconds == 0) then + ! prec1 (:, :) = prec1 (:, :) * dt_in / 86400. + ! tot_prec = g_sum (prec1, is, ie, js, je, area, 1) + ! if (master) write (*, *) 'daily prec_mp = ', tot_prec + ! prec1 (:, :) = 0. + ! endif + ! endif + + ! call mpp_clock_end (gfdl_mp_clock) + +end subroutine gfdl_cloud_microphys_driver + +! ----------------------------------------------------------------------- +! gfdl cloud microphysics, major program +! lin et al., 1983, jam, 1065 - 1092, and +! rutledge and hobbs, 1984, jas, 2949 - 2972 +! terminal fall is handled lagrangianly by conservative fv algorithm +! pt: temperature (k) +! 6 water species: +! 1) qv: water vapor (kg / kg) +! 2) ql: cloud water (kg / kg) +! 3) qr: rain (kg / kg) +! 4) qi: cloud ice (kg / kg) +! 5) qs: snow (kg / kg) +! 6) qg: graupel (kg / kg) +! ----------------------------------------------------------------------- + +subroutine mpdrv (hydrostatic, uin, vin, w, delp, pt, qv, ql, qr, qi, qs, & + qg, qa, qn, dz, is, ie, js, je, ks, ke, ktop, kbot, j, dt_in, ntimes, & + rain, snow, graupel, ice, m2_rain, m2_sol, cond, area1, land, & + u_dt, v_dt, pt_dt, qv_dt, ql_dt, qr_dt, qi_dt, qs_dt, qg_dt, qa_dt, & + w_var, vt_r, vt_s, vt_g, vt_i, qn2) + + implicit none + + logical, intent (in) :: hydrostatic + + integer, intent (in) :: j, is, ie, js, je, ks, ke + integer, intent (in) :: ntimes, ktop, kbot + + real, intent (in) :: dt_in + + real, intent (in), dimension (is:) :: area1, land + + real, intent (in), dimension (is:, js:, ks:) :: uin, vin, delp, pt, dz + real, intent (in), dimension (is:, js:, ks:) :: qv, ql, qr, qg, qa, qn + + real, intent (inout), dimension (is:, js:, ks:) :: qi, qs + real, intent (inout), dimension (is:, js:, ks:) :: u_dt, v_dt, w, pt_dt, qa_dt + real, intent (inout), dimension (is:, js:, ks:) :: qv_dt, ql_dt, qr_dt, qi_dt, qs_dt, qg_dt + + real, intent (inout), dimension (is:) :: rain, snow, ice, graupel, cond + + real, intent (out), dimension (is:, js:) :: w_var + + real, intent (out), dimension (is:, js:, ks:) :: vt_r, vt_s, vt_g, vt_i, qn2 + + real, intent (out), dimension (is:, ks:) :: m2_rain, m2_sol + + real, dimension (ktop:kbot) :: qvz, qlz, qrz, qiz, qsz, qgz, qaz + real, dimension (ktop:kbot) :: vtiz, vtsz, vtgz, vtrz + real, dimension (ktop:kbot) :: dp0, dp1, dz0, dz1 + real, dimension (ktop:kbot) :: qv0, ql0, qr0, qi0, qs0, qg0, qa0 + real, dimension (ktop:kbot) :: t0, den, den0, tz, p1, denfac + real, dimension (ktop:kbot) :: ccn, c_praut, m1_rain, m1_sol, m1 + real, dimension (ktop:kbot) :: u0, v0, u1, v1, w1 + + real :: cpaut, rh_adj, rh_rain + real :: r1, s1, i1, g1, rdt, ccn0 + real :: dt_rain, dts + real :: s_leng, t_land, t_ocean, h_var + real :: cvm, tmp, omq + real :: dqi, qio, qin + + integer :: i, k, n + + dts = dt_in / real (ntimes) + dt_rain = dts * 0.5 + rdt = 1. / dt_in + + ! ----------------------------------------------------------------------- + ! use local variables + ! ----------------------------------------------------------------------- + + do i = is, ie + + do k = ktop, kbot + qiz (k) = qi (i, j, k) + qsz (k) = qs (i, j, k) + enddo + + ! ----------------------------------------------------------------------- + ! this is to prevent excessive build - up of cloud ice from external sources + ! ----------------------------------------------------------------------- + + if (de_ice) then + do k = ktop, kbot + qio = qiz (k) - dt_in * qi_dt (i, j, k) ! original qi before phys + qin = max (qio, qi0_max) ! adjusted value + if (qiz (k) > qin) then + qsz (k) = qsz (k) + qiz (k) - qin + qiz (k) = qin + dqi = (qin - qio) * rdt ! modified qi tendency + qs_dt (i, j, k) = qs_dt (i, j, k) + qi_dt (i, j, k) - dqi + qi_dt (i, j, k) = dqi + qi (i, j, k) = qiz (k) + qs (i, j, k) = qsz (k) + endif + enddo + endif + + do k = ktop, kbot + + t0 (k) = pt (i, j, k) + tz (k) = t0 (k) + dp1 (k) = delp (i, j, k) + dp0 (k) = dp1 (k) ! moist air mass * grav + + ! ----------------------------------------------------------------------- + ! convert moist mixing ratios to dry mixing ratios + ! ----------------------------------------------------------------------- + + qvz (k) = qv (i, j, k) + qlz (k) = ql (i, j, k) + qrz (k) = qr (i, j, k) + qgz (k) = qg (i, j, k) + + ! dp1: dry air_mass + ! dp1 (k) = dp1 (k) * (1. - (qvz (k) + qlz (k) + qrz (k) + qiz (k) + qsz (k) + qgz (k))) + dp1 (k) = dp1 (k) * (1. - qvz (k)) ! gfs + omq = dp0 (k) / dp1 (k) + + qvz (k) = qvz (k) * omq + qlz (k) = qlz (k) * omq + qrz (k) = qrz (k) * omq + qiz (k) = qiz (k) * omq + qsz (k) = qsz (k) * omq + qgz (k) = qgz (k) * omq + + qa0 (k) = qa (i, j, k) + qaz (k) = 0. + dz0 (k) = dz (i, j, k) + + den0 (k) = - dp1 (k) / (grav * dz0 (k)) ! density of dry air + p1 (k) = den0 (k) * rdgas * t0 (k) ! dry air pressure + + ! ----------------------------------------------------------------------- + ! save a copy of old value for computing tendencies + ! ----------------------------------------------------------------------- + + qv0 (k) = qvz (k) + ql0 (k) = qlz (k) + qr0 (k) = qrz (k) + qi0 (k) = qiz (k) + qs0 (k) = qsz (k) + qg0 (k) = qgz (k) + + ! ----------------------------------------------------------------------- + ! for sedi_momentum + ! ----------------------------------------------------------------------- + + m1 (k) = 0. + u0 (k) = uin (i, j, k) + v0 (k) = vin (i, j, k) + u1 (k) = u0 (k) + v1 (k) = v0 (k) + + enddo + + if (do_sedi_w) then + do k = ktop, kbot + w1 (k) = w (i, j, k) + enddo + endif + + ! ----------------------------------------------------------------------- + ! calculate cloud condensation nuclei (ccn) + ! the following is based on klein eq. 15 + ! ----------------------------------------------------------------------- + + cpaut = c_paut * 0.104 * grav / 1.717e-5 + + if (prog_ccn) then + do k = ktop, kbot + ! convert # / cc to # / m^3 + ccn (k) = qn (i, j, k) * 1.e6 + c_praut (k) = cpaut * (ccn (k) * rhor) ** (- 1. / 3.) + enddo + use_ccn = .false. + else + ccn0 = (ccn_l * land (i) + ccn_o * (1. - land (i))) * 1.e6 + if (use_ccn) then + ! ----------------------------------------------------------------------- + ! ccn is formulted as ccn = ccn_surface * (den / den_surface) + ! ----------------------------------------------------------------------- + ccn0 = ccn0 * rdgas * tz (kbot) / p1 (kbot) + endif + tmp = cpaut * (ccn0 * rhor) ** (- 1. / 3.) + do k = ktop, kbot + c_praut (k) = tmp + ccn (k) = ccn0 + enddo + endif + + ! ----------------------------------------------------------------------- + ! calculate horizontal subgrid variability + ! total water subgrid deviation in horizontal direction + ! default area dependent form: use dx ~ 100 km as the base + ! ----------------------------------------------------------------------- + + s_leng = sqrt (sqrt (area1 (i) / 1.e10)) + t_land = dw_land * s_leng + t_ocean = dw_ocean * s_leng + h_var = t_land * land (i) + t_ocean * (1. - land (i)) + h_var = min (0.20, max (0.01, h_var)) + ! if (id_var > 0) w_var (i, j) = h_var + + ! ----------------------------------------------------------------------- + ! relative humidity increment + ! ----------------------------------------------------------------------- + + rh_adj = 1. - h_var - rh_inc + rh_rain = max (0.35, rh_adj - rh_inr) ! rh_inr = 0.25 + + ! ----------------------------------------------------------------------- + ! fix all negative water species + ! ----------------------------------------------------------------------- + + if (fix_negative) & + call neg_adj (ktop, kbot, tz, dp1, qvz, qlz, qrz, qiz, qsz, qgz) + + m2_rain (:, :) = 0. + m2_sol (:, :) = 0. + + do n = 1, ntimes + + ! ----------------------------------------------------------------------- + ! define air density based on hydrostatical property + ! ----------------------------------------------------------------------- + + if (p_nonhydro) then + do k = ktop, kbot + dz1 (k) = dz0 (k) + den (k) = den0 (k) ! dry air density remains the same + denfac (k) = sqrt (sfcrho / den (k)) + enddo + else + do k = ktop, kbot + dz1 (k) = dz0 (k) * tz (k) / t0 (k) ! hydrostatic balance + den (k) = den0 (k) * dz0 (k) / dz1 (k) + denfac (k) = sqrt (sfcrho / den (k)) + enddo + endif + + ! ----------------------------------------------------------------------- + ! time - split warm rain processes: 1st pass + ! ----------------------------------------------------------------------- + + call warm_rain (dt_rain, ktop, kbot, dp1, dz1, tz, qvz, qlz, qrz, qiz, qsz, & + qgz, den, denfac, ccn, c_praut, rh_rain, vtrz, r1, m1_rain, w1, h_var) + + rain (i) = rain (i) + r1 + + do k = ktop, kbot + m2_rain (i, k) = m2_rain (i, k) + m1_rain (k) + m1 (k) = m1 (k) + m1_rain (k) + enddo + + ! ----------------------------------------------------------------------- + ! sedimentation of cloud ice, snow, and graupel + ! ----------------------------------------------------------------------- + + call fall_speed (ktop, kbot, den, qsz, qiz, qgz, qlz, tz, vtsz, vtiz, vtgz) + + call terminal_fall (dts, ktop, kbot, tz, qvz, qlz, qrz, qgz, qsz, qiz, & + dz1, dp1, den, vtgz, vtsz, vtiz, r1, g1, s1, i1, m1_sol, w1) + + rain (i) = rain (i) + r1 ! from melted snow & ice that reached the ground + snow (i) = snow (i) + s1 + graupel (i) = graupel (i) + g1 + ice (i) = ice (i) + i1 + + ! ----------------------------------------------------------------------- + ! heat transportation during sedimentation + ! ----------------------------------------------------------------------- + + if (do_sedi_heat) & + call sedi_heat (ktop, kbot, dp1, m1_sol, dz1, tz, qvz, qlz, qrz, qiz, & + qsz, qgz, c_ice) + + ! ----------------------------------------------------------------------- + ! time - split warm rain processes: 2nd pass + ! ----------------------------------------------------------------------- + + call warm_rain (dt_rain, ktop, kbot, dp1, dz1, tz, qvz, qlz, qrz, qiz, qsz, & + qgz, den, denfac, ccn, c_praut, rh_rain, vtrz, r1, m1_rain, w1, h_var) + + rain (i) = rain (i) + r1 + + do k = ktop, kbot + m2_rain (i, k) = m2_rain (i, k) + m1_rain (k) + m2_sol (i, k) = m2_sol (i, k) + m1_sol (k) + m1 (k) = m1 (k) + m1_rain (k) + m1_sol (k) + enddo + + ! ----------------------------------------------------------------------- + ! ice - phase microphysics + ! ----------------------------------------------------------------------- + + call icloud (ktop, kbot, tz, p1, qvz, qlz, qrz, qiz, qsz, qgz, dp1, den, & + denfac, vtsz, vtgz, vtrz, qaz, rh_adj, rh_rain, dts, h_var) + + enddo + + m2_rain (i, :) = m2_rain (i, :) * rdt * rgrav + m2_sol (i, :) = m2_sol (i, :) * rdt * rgrav + + ! ----------------------------------------------------------------------- + ! momentum transportation during sedimentation + ! note: dp1 is dry mass; dp0 is the old moist (total) mass + ! ----------------------------------------------------------------------- + + if (sedi_transport) then + do k = ktop + 1, kbot + u1 (k) = (dp0 (k) * u1 (k) + m1 (k - 1) * u1 (k - 1)) / (dp0 (k) + m1 (k - 1)) + v1 (k) = (dp0 (k) * v1 (k) + m1 (k - 1) * v1 (k - 1)) / (dp0 (k) + m1 (k - 1)) + u_dt (i, j, k) = u_dt (i, j, k) + (u1 (k) - u0 (k)) * rdt + v_dt (i, j, k) = v_dt (i, j, k) + (v1 (k) - v0 (k)) * rdt + enddo + endif + + if (do_sedi_w) then + do k = ktop, kbot + w (i, j, k) = w1 (k) + enddo + endif + + ! ----------------------------------------------------------------------- + ! update moist air mass (actually hydrostatic pressure) + ! convert to dry mixing ratios + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + omq = dp1 (k) / dp0 (k) + qv_dt (i, j, k) = qv_dt (i, j, k) + rdt * (qvz (k) - qv0 (k)) * omq + ql_dt (i, j, k) = ql_dt (i, j, k) + rdt * (qlz (k) - ql0 (k)) * omq + qr_dt (i, j, k) = qr_dt (i, j, k) + rdt * (qrz (k) - qr0 (k)) * omq + qi_dt (i, j, k) = qi_dt (i, j, k) + rdt * (qiz (k) - qi0 (k)) * omq + qs_dt (i, j, k) = qs_dt (i, j, k) + rdt * (qsz (k) - qs0 (k)) * omq + qg_dt (i, j, k) = qg_dt (i, j, k) + rdt * (qgz (k) - qg0 (k)) * omq + cvm = c_air + qvz (k) * c_vap + (qrz (k) + qlz (k)) * c_liq + (qiz (k) + qsz (k) + qgz (k)) * c_ice + pt_dt (i, j, k) = pt_dt (i, j, k) + rdt * (tz (k) - t0 (k)) * cvm / cp_air + enddo + + ! ----------------------------------------------------------------------- + ! update cloud fraction tendency + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + if (do_qa) then + qa_dt (i, j, k) = 0. + else + qa_dt (i, j, k) = qa_dt (i, j, k) + rdt * (qaz (k) / real (ntimes) - qa0 (k)) + endif + enddo + + ! ----------------------------------------------------------------------- + ! fms diagnostics: + ! ----------------------------------------------------------------------- + + ! if (id_cond > 0) then + ! do k = ktop, kbot ! total condensate + ! cond (i) = cond (i) + dp1 (k) * (qlz (k) + qrz (k) + qsz (k) + qiz (k) + qgz (k)) + ! enddo + ! endif + ! + ! if (id_vtr > 0) then + ! do k = ktop, kbot + ! vt_r (i, j, k) = vtrz (k) + ! enddo + ! endif + ! + ! if (id_vts > 0) then + ! do k = ktop, kbot + ! vt_s (i, j, k) = vtsz (k) + ! enddo + ! endif + ! + ! if (id_vtg > 0) then + ! do k = ktop, kbot + ! vt_g (i, j, k) = vtgz (k) + ! enddo + ! endif + ! + ! if (id_vts > 0) then + ! do k = ktop, kbot + ! vt_i (i, j, k) = vtiz (k) + ! enddo + ! endif + ! + ! if (id_droplets > 0) then + ! do k = ktop, kbot + ! qn2 (i, j, k) = ccn (k) + ! enddo + ! endif + + enddo + +end subroutine mpdrv + +! ----------------------------------------------------------------------- +! sedimentation of heat +! ----------------------------------------------------------------------- + +subroutine sedi_heat (ktop, kbot, dm, m1, dz, tz, qv, ql, qr, qi, qs, qg, cw) + + implicit none + + ! input q fields are dry mixing ratios, and dm is dry air mass + + integer, intent (in) :: ktop, kbot + + real, intent (in), dimension (ktop:kbot) :: dm, m1, dz, qv, ql, qr, qi, qs, qg + + real, intent (inout), dimension (ktop:kbot) :: tz + + real, intent (in) :: cw ! heat capacity + + real, dimension (ktop:kbot) :: dgz, cvn + + real :: tmp + + integer :: k + + do k = ktop, kbot + dgz (k) = - 0.5 * grav * dz (k) ! > 0 + cvn (k) = dm (k) * (cv_air + qv (k) * cv_vap + (qr (k) + ql (k)) * & + c_liq + (qi (k) + qs (k) + qg (k)) * c_ice) + enddo + + ! ----------------------------------------------------------------------- + ! sjl, july 2014 + ! assumption: the ke in the falling condensates is negligible compared to the potential energy + ! that was unaccounted for. local thermal equilibrium is assumed, and the loss in pe is transformed + ! into internal energy (to heat the whole grid box) + ! backward time - implicit upwind transport scheme: + ! dm here is dry air mass + ! ----------------------------------------------------------------------- + + k = ktop + tmp = cvn (k) + m1 (k) * cw + tz (k) = (tmp * tz (k) + m1 (k) * dgz (k)) / tmp + + ! ----------------------------------------------------------------------- + ! implicit algorithm: can't be vectorized + ! needs an inner i - loop for vectorization + ! ----------------------------------------------------------------------- + + do k = ktop + 1, kbot + tz (k) = ((cvn (k) + cw * (m1 (k) - m1 (k - 1))) * tz (k) + m1 (k - 1) * & + cw * tz (k - 1) + dgz (k) * (m1 (k - 1) + m1 (k))) / (cvn (k) + cw * m1 (k)) + enddo + +end subroutine sedi_heat + +! ----------------------------------------------------------------------- +! warm rain cloud microphysics +! ----------------------------------------------------------------------- + +subroutine warm_rain (dt, ktop, kbot, dp, dz, tz, qv, ql, qr, qi, qs, qg, & + den, denfac, ccn, c_praut, rh_rain, vtr, r1, m1_rain, w1, h_var) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in) :: dt ! time step (s) + real, intent (in) :: rh_rain, h_var + + real, intent (in), dimension (ktop:kbot) :: dp, dz, den + real, intent (in), dimension (ktop:kbot) :: denfac, ccn, c_praut + + real, intent (inout), dimension (ktop:kbot) :: tz, vtr + real, intent (inout), dimension (ktop:kbot) :: qv, ql, qr, qi, qs, qg + real, intent (inout), dimension (ktop:kbot) :: m1_rain, w1 + + real, intent (out) :: r1 + + real, parameter :: so3 = 7. / 3. + + real, dimension (ktop:kbot) :: dl, dm + real, dimension (ktop:kbot + 1) :: ze, zt + + real :: sink, dq, qc0, qc + real :: qden + real :: zs = 0. + real :: dt5 + + integer :: k + + ! fall velocity constants: + + real, parameter :: vconr = 2503.23638966667 + real, parameter :: normr = 25132741228.7183 + real, parameter :: thr = 1.e-8 + + logical :: no_fall + + dt5 = 0.5 * dt + + ! ----------------------------------------------------------------------- + ! terminal speed of rain + ! ----------------------------------------------------------------------- + + m1_rain (:) = 0. + + call check_column (ktop, kbot, qr, no_fall) + + if (no_fall) then + vtr (:) = vf_min + r1 = 0. + else + + ! ----------------------------------------------------------------------- + ! fall speed of rain + ! ----------------------------------------------------------------------- + + if (const_vr) then + vtr (:) = vr_fac ! ifs_2016: 4.0 + else + do k = ktop, kbot + qden = qr (k) * den (k) + if (qr (k) < thr) then + vtr (k) = vr_min + else + vtr (k) = vr_fac * vconr * sqrt (min (10., sfcrho / den (k))) * & + exp (0.2 * log (qden / normr)) + vtr (k) = min (vr_max, max (vr_min, vtr (k))) + endif + enddo + endif + + ze (kbot + 1) = zs + do k = kbot, ktop, - 1 + ze (k) = ze (k + 1) - dz (k) ! dz < 0 + enddo + + ! ----------------------------------------------------------------------- + ! evaporation and accretion of rain for the first 1 / 2 time step + ! ----------------------------------------------------------------------- + + ! if (.not. fast_sat_adj) & + call revap_racc (ktop, kbot, dt5, tz, qv, ql, qr, qi, qs, qg, den, denfac, rh_rain, h_var) + + if (do_sedi_w) then + do k = ktop, kbot + dm (k) = dp (k) * (1. + qv (k) + ql (k) + qr (k) + qi (k) + qs (k) + qg (k)) + enddo + endif + + ! ----------------------------------------------------------------------- + ! mass flux induced by falling rain + ! ----------------------------------------------------------------------- + + if (use_ppm) then + zt (ktop) = ze (ktop) + do k = ktop + 1, kbot + zt (k) = ze (k) - dt5 * (vtr (k - 1) + vtr (k)) + enddo + zt (kbot + 1) = zs - dt * vtr (kbot) + + do k = ktop, kbot + if (zt (k + 1) >= zt (k)) zt (k + 1) = zt (k) - dz_min + enddo + call lagrangian_fall_ppm (ktop, kbot, zs, ze, zt, dp, qr, r1, m1_rain, mono_prof) + else + call implicit_fall (dt, ktop, kbot, ze, vtr, dp, qr, r1, m1_rain) + endif + + ! ----------------------------------------------------------------------- + ! vertical velocity transportation during sedimentation + ! ----------------------------------------------------------------------- + + if (do_sedi_w) then + w1 (ktop) = (dm (ktop) * w1 (ktop) + m1_rain (ktop) * vtr (ktop)) / (dm (ktop) - m1_rain (ktop)) + do k = ktop + 1, kbot + w1 (k) = (dm (k) * w1 (k) - m1_rain (k - 1) * vtr (k - 1) + m1_rain (k) * vtr (k)) & + / (dm (k) + m1_rain (k - 1) - m1_rain (k)) + enddo + endif + + ! ----------------------------------------------------------------------- + ! heat transportation during sedimentation + ! ----------------------------------------------------------------------- + + if (do_sedi_heat) & + call sedi_heat (ktop, kbot, dp, m1_rain, dz, tz, qv, ql, qr, qi, qs, qg, c_liq) + + ! ----------------------------------------------------------------------- + ! evaporation and accretion of rain for the remaing 1 / 2 time step + ! ----------------------------------------------------------------------- + + call revap_racc (ktop, kbot, dt5, tz, qv, ql, qr, qi, qs, qg, den, denfac, rh_rain, h_var) + + endif + + ! ----------------------------------------------------------------------- + ! auto - conversion + ! assuming linear subgrid vertical distribution of cloud water + ! following lin et al. 1994, mwr + ! ----------------------------------------------------------------------- + + if (irain_f /= 0) then + + ! ----------------------------------------------------------------------- + ! no subgrid varaibility + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + qc0 = fac_rc * ccn (k) + if (tz (k) > t_wfr) then + if (use_ccn) then + ! ----------------------------------------------------------------------- + ! ccn is formulted as ccn = ccn_surface * (den / den_surface) + ! ----------------------------------------------------------------------- + qc = qc0 + else + qc = qc0 / den (k) + endif + dq = ql (k) - qc + if (dq > 0.) then + sink = min (dq, dt * c_praut (k) * den (k) * exp (so3 * log (ql (k)))) + ql (k) = ql (k) - sink + qr (k) = qr (k) + sink + endif + endif + enddo + + else + + ! ----------------------------------------------------------------------- + ! with subgrid varaibility + ! ----------------------------------------------------------------------- + + call linear_prof (kbot - ktop + 1, ql (ktop), dl (ktop), z_slope_liq, h_var) + + do k = ktop, kbot + qc0 = fac_rc * ccn (k) + if (tz (k) > t_wfr + dt_fr) then + dl (k) = min (max (1.e-6, dl (k)), 0.5 * ql (k)) + ! -------------------------------------------------------------------- + ! as in klein's gfdl am2 stratiform scheme (with subgrid variations) + ! -------------------------------------------------------------------- + if (use_ccn) then + ! -------------------------------------------------------------------- + ! ccn is formulted as ccn = ccn_surface * (den / den_surface) + ! -------------------------------------------------------------------- + qc = qc0 + else + qc = qc0 / den (k) + endif + dq = 0.5 * (ql (k) + dl (k) - qc) + ! -------------------------------------------------------------------- + ! dq = dl if qc == q_minus = ql - dl + ! dq = 0 if qc == q_plus = ql + dl + ! -------------------------------------------------------------------- + if (dq > 0.) then ! q_plus > qc + ! -------------------------------------------------------------------- + ! revised continuous form: linearly decays (with subgrid dl) to zero at qc == ql + dl + ! -------------------------------------------------------------------- + sink = min (1., dq / dl (k)) * dt * c_praut (k) * den (k) * exp (so3 * log (ql (k))) + ql (k) = ql (k) - sink + qr (k) = qr (k) + sink + endif + endif + enddo + endif + +end subroutine warm_rain + +! ----------------------------------------------------------------------- +! evaporation of rain +! ----------------------------------------------------------------------- + +subroutine revap_racc (ktop, kbot, dt, tz, qv, ql, qr, qi, qs, qg, den, denfac, rh_rain, h_var) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in) :: dt ! time step (s) + real, intent (in) :: rh_rain, h_var + + real, intent (in), dimension (ktop:kbot) :: den, denfac + + real, intent (inout), dimension (ktop:kbot) :: tz, qv, qr, ql, qi, qs, qg + + real, dimension (ktop:kbot) :: lhl, cvm, q_liq, q_sol, lcpk + + real :: dqv, qsat, dqsdt, evap, t2, qden, q_plus, q_minus, sink + real :: qpz, dq, dqh, tin + + integer :: k + + do k = ktop, kbot + + if (tz (k) > t_wfr .and. qr (k) > qrmin) then + + ! ----------------------------------------------------------------------- + ! define heat capacity and latent heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + q_liq (k) = ql (k) + qr (k) + q_sol (k) = qi (k) + qs (k) + qg (k) + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + lcpk (k) = lhl (k) / cvm (k) + + tin = tz (k) - lcpk (k) * ql (k) ! presence of clouds suppresses the rain evap + qpz = qv (k) + ql (k) + qsat = wqs2 (tin, den (k), dqsdt) + dqh = max (ql (k), h_var * max (qpz, qcmin)) + dqh = min (dqh, 0.2 * qpz) ! new limiter + dqv = qsat - qv (k) ! use this to prevent super - sat the gird box + q_minus = qpz - dqh + q_plus = qpz + dqh + + ! ----------------------------------------------------------------------- + ! qsat must be > q_minus to activate evaporation + ! qsat must be < q_plus to activate accretion + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! rain evaporation + ! ----------------------------------------------------------------------- + + if (dqv > qvmin .and. qsat > q_minus) then + if (qsat > q_plus) then + dq = qsat - qpz + else + ! ----------------------------------------------------------------------- + ! q_minus < qsat < q_plus + ! dq == dqh if qsat == q_minus + ! ----------------------------------------------------------------------- + dq = 0.25 * (q_minus - qsat) ** 2 / dqh + endif + qden = qr (k) * den (k) + t2 = tin * tin + evap = crevp (1) * t2 * dq * (crevp (2) * sqrt (qden) + crevp (3) * & + exp (0.725 * log (qden))) / (crevp (4) * t2 + crevp (5) * qsat * den (k)) + evap = min (qr (k), dt * evap, dqv / (1. + lcpk (k) * dqsdt)) + ! ----------------------------------------------------------------------- + ! alternative minimum evap in dry environmental air + ! sink = min (qr (k), dim (rh_rain * qsat, qv (k)) / (1. + lcpk (k) * dqsdt)) + ! evap = max (evap, sink) + ! ----------------------------------------------------------------------- + qr (k) = qr (k) - evap + qv (k) = qv (k) + evap + q_liq (k) = q_liq (k) - evap + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) - evap * lhl (k) / cvm (k) + endif + + ! ----------------------------------------------------------------------- + ! accretion: pracc + ! ----------------------------------------------------------------------- + + ! if (qr (k) > qrmin .and. ql (k) > 1.e-7 .and. qsat < q_plus) then + if (qr (k) > qrmin .and. ql (k) > 1.e-6 .and. qsat < q_minus) then + sink = dt * denfac (k) * cracw * exp (0.95 * log (qr (k) * den (k))) + sink = sink / (1. + sink) * ql (k) + ql (k) = ql (k) - sink + qr (k) = qr (k) + sink + endif + + endif ! warm - rain + enddo + +end subroutine revap_racc + +! ----------------------------------------------------------------------- +! definition of vertical subgrid variability +! used for cloud ice and cloud water autoconversion +! qi -- > ql & ql -- > qr +! edges: qe == qbar + / - dm +! ----------------------------------------------------------------------- + +subroutine linear_prof (km, q, dm, z_var, h_var) + + implicit none + + integer, intent (in) :: km + + real, intent (in) :: q (km), h_var + + real, intent (out) :: dm (km) + + logical, intent (in) :: z_var + + real :: dq (km) + + integer :: k + + if (z_var) then + do k = 2, km + dq (k) = 0.5 * (q (k) - q (k - 1)) + enddo + dm (1) = 0. + + ! ----------------------------------------------------------------------- + ! use twice the strength of the positive definiteness limiter (lin et al 1994) + ! ----------------------------------------------------------------------- + + do k = 2, km - 1 + dm (k) = 0.5 * min (abs (dq (k) + dq (k + 1)), 0.5 * q (k)) + if (dq (k) * dq (k + 1) <= 0.) then + if (dq (k) > 0.) then ! local max + dm (k) = min (dm (k), dq (k), - dq (k + 1)) + else + dm (k) = 0. + endif + endif + enddo + dm (km) = 0. + + ! ----------------------------------------------------------------------- + ! impose a presumed background horizontal variability that is proportional to the value itself + ! ----------------------------------------------------------------------- + + do k = 1, km + dm (k) = max (dm (k), qvmin, h_var * q (k)) + enddo + else + do k = 1, km + dm (k) = max (qvmin, h_var * q (k)) + enddo + endif + +end subroutine linear_prof + +! ======================================================================= +! ice cloud microphysics processes +! bulk cloud micro - physics; processes splitting +! with some un - split sub - grouping +! time implicit (when possible) accretion and autoconversion +! author: shian - jiann lin, gfdl +! ======================================================================= + +subroutine icloud (ktop, kbot, tzk, p1, qvk, qlk, qrk, qik, qsk, qgk, dp1, & + den, denfac, vts, vtg, vtr, qak, rh_adj, rh_rain, dts, h_var) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in), dimension (ktop:kbot) :: p1, dp1, den, denfac, vts, vtg, vtr + + real, intent (inout), dimension (ktop:kbot) :: tzk, qvk, qlk, qrk, qik, qsk, qgk, qak + + real, intent (in) :: rh_adj, rh_rain, dts, h_var + + real, dimension (ktop:kbot) :: lcpk, icpk, tcpk, di, lhl, lhi + real, dimension (ktop:kbot) :: cvm, q_liq, q_sol + + real :: rdts, fac_g2v, fac_v2g, fac_i2s, fac_imlt + real :: tz, qv, ql, qr, qi, qs, qg, melt + real :: pracs, psacw, pgacw, psacr, pgacr, pgaci, praci, psaci + real :: pgmlt, psmlt, pgfr, pgaut, psaut, pgsub + real :: tc, tsq, dqs0, qden, qim, qsm + real :: dt5, factor, sink, qi_crt + real :: tmp, qsw, qsi, dqsdt, dq + real :: dtmp, qc, q_plus, q_minus + + integer :: k + + dt5 = 0.5 * dts + + rdts = 1. / dts + + ! ----------------------------------------------------------------------- + ! define conversion scalar / factor + ! ----------------------------------------------------------------------- + + fac_i2s = 1. - exp (- dts / tau_i2s) + fac_g2v = 1. - exp (- dts / tau_g2v) + fac_v2g = 1. - exp (- dts / tau_v2g) + + fac_imlt = 1. - exp (- dt5 / tau_imlt) + + ! ----------------------------------------------------------------------- + ! define heat capacity and latend heat coefficient + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + lhi (k) = li00 + dc_ice * tzk (k) + q_liq (k) = qlk (k) + qrk (k) + q_sol (k) = qik (k) + qsk (k) + qgk (k) + cvm (k) = c_air + qvk (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + icpk (k) = lhi (k) / cvm (k) + enddo + + ! ----------------------------------------------------------------------- + ! sources of cloud ice: pihom, cold rain, and the sat_adj + ! (initiation plus deposition) + ! sources of snow: cold rain, auto conversion + accretion (from cloud ice) + ! sat_adj (deposition; requires pre - existing snow) ; initial snow comes from auto conversion + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + if (tzk (k) > tice .and. qik (k) > qcmin) then + + ! ----------------------------------------------------------------------- + ! pimlt: instant melting of cloud ice + ! ----------------------------------------------------------------------- + + melt = min (qik (k), fac_imlt * (tzk (k) - tice) / icpk (k)) + tmp = min (melt, dim (ql_mlt, qlk (k))) ! max ql amount + qlk (k) = qlk (k) + tmp + qrk (k) = qrk (k) + melt - tmp + qik (k) = qik (k) - melt + q_liq (k) = q_liq (k) + melt + q_sol (k) = q_sol (k) - melt + cvm (k) = c_air + qvk (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tzk (k) = tzk (k) - melt * lhi (k) / cvm (k) + + elseif (tzk (k) < t_wfr .and. qlk (k) > qcmin) then + + ! ----------------------------------------------------------------------- + ! pihom: homogeneous freezing of cloud water into cloud ice + ! this is the 1st occurance of liquid water freezing in the split mp process + ! ----------------------------------------------------------------------- + + dtmp = t_wfr - tzk (k) + factor = min (1., dtmp / dt_fr) + sink = min (qlk (k) * factor, dtmp / icpk (k)) + qi_crt = qi_gen * min (qi_lim, 0.1 * (tice - tzk (k))) / den (k) + tmp = min (sink, dim (qi_crt, qik (k))) + qlk (k) = qlk (k) - sink + qsk (k) = qsk (k) + sink - tmp + qik (k) = qik (k) + tmp + q_liq (k) = q_liq (k) - sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qvk (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tzk (k) = tzk (k) + sink * lhi (k) / cvm (k) + + endif + enddo + + ! ----------------------------------------------------------------------- + ! vertical subgrid variability + ! ----------------------------------------------------------------------- + + call linear_prof (kbot - ktop + 1, qik (ktop), di (ktop), z_slope_ice, h_var) + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + lhl (k) = lv00 + d0_vap * tzk (k) + lhi (k) = li00 + dc_ice * tzk (k) + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + tcpk (k) = lcpk (k) + icpk (k) + enddo + + do k = ktop, kbot + + ! ----------------------------------------------------------------------- + ! do nothing above p_min + ! ----------------------------------------------------------------------- + + if (p1 (k) < p_min) cycle + + tz = tzk (k) + qv = qvk (k) + ql = qlk (k) + qi = qik (k) + qr = qrk (k) + qs = qsk (k) + qg = qgk (k) + + pgacr = 0. + pgacw = 0. + tc = tz - tice + + if (tc .ge. 0.) then + + ! ----------------------------------------------------------------------- + ! melting of snow + ! ----------------------------------------------------------------------- + + dqs0 = ces0 / p1 (k) - qv + + if (qs > qcmin) then + + ! ----------------------------------------------------------------------- + ! psacw: accretion of cloud water by snow + ! only rate is used (for snow melt) since tc > 0. + ! ----------------------------------------------------------------------- + + if (ql > qrmin) then + factor = denfac (k) * csacw * exp (0.8125 * log (qs * den (k))) + psacw = factor / (1. + dts * factor) * ql ! rate + else + psacw = 0. + endif + + ! ----------------------------------------------------------------------- + ! psacr: accretion of rain by melted snow + ! pracs: accretion of snow by rain + ! ----------------------------------------------------------------------- + + if (qr > qrmin) then + psacr = min (acr3d (vts (k), vtr (k), qr, qs, csacr, acco (1, 2), & + den (k)), qr * rdts) + pracs = acr3d (vtr (k), vts (k), qs, qr, cracs, acco (1, 1), den (k)) + else + psacr = 0. + pracs = 0. + endif + + ! ----------------------------------------------------------------------- + ! total snow sink: + ! psmlt: snow melt (due to rain accretion) + ! ----------------------------------------------------------------------- + + psmlt = max (0., smlt (tc, dqs0, qs * den (k), psacw, psacr, csmlt, & + den (k), denfac (k))) + sink = min (qs, dts * (psmlt + pracs), tc / icpk (k)) + qs = qs - sink + ! sjl, 20170321: + tmp = min (sink, dim (qs_mlt, ql)) ! max ql due to snow melt + ql = ql + tmp + qr = qr + sink - tmp + ! qr = qr + sink + ! sjl, 20170321: + q_liq (k) = q_liq (k) + sink + q_sol (k) = q_sol (k) - sink + cvm (k) = c_air + qv * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz = tz - sink * lhi (k) / cvm (k) + tc = tz - tice + + endif + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhi (k) = li00 + dc_ice * tz + icpk (k) = lhi (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! melting of graupel + ! ----------------------------------------------------------------------- + + if (qg > qcmin .and. tc > 0.) then + + ! ----------------------------------------------------------------------- + ! pgacr: accretion of rain by graupel + ! ----------------------------------------------------------------------- + + if (qr > qrmin) & + pgacr = min (acr3d (vtg (k), vtr (k), qr, qg, cgacr, acco (1, 3), & + den (k)), rdts * qr) + + ! ----------------------------------------------------------------------- + ! pgacw: accretion of cloud water by graupel + ! ----------------------------------------------------------------------- + + qden = qg * den (k) + if (ql > qrmin) then + factor = cgacw * qden / sqrt (den (k) * sqrt (sqrt (qden))) + pgacw = factor / (1. + dts * factor) * ql ! rate + endif + + ! ----------------------------------------------------------------------- + ! pgmlt: graupel melt + ! ----------------------------------------------------------------------- + + pgmlt = dts * gmlt (tc, dqs0, qden, pgacw, pgacr, cgmlt, den (k)) + pgmlt = min (max (0., pgmlt), qg, tc / icpk (k)) + qg = qg - pgmlt + qr = qr + pgmlt + q_liq (k) = q_liq (k) + pgmlt + q_sol (k) = q_sol (k) - pgmlt + cvm (k) = c_air + qv * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz = tz - pgmlt * lhi (k) / cvm (k) + + endif + + else + + ! ----------------------------------------------------------------------- + ! cloud ice proc: + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! psaci: accretion of cloud ice by snow + ! ----------------------------------------------------------------------- + + if (qi > 3.e-7) then ! cloud ice sink terms + + if (qs > 1.e-7) then + ! ----------------------------------------------------------------------- + ! sjl added (following lin eq. 23) the temperature dependency + ! to reduce accretion, use esi = exp (0.05 * tc) as in hong et al 2004 + ! ----------------------------------------------------------------------- + factor = dts * denfac (k) * csaci * exp (0.05 * tc + 0.8125 * log (qs * den (k))) + psaci = factor / (1. + factor) * qi + else + psaci = 0. + endif + + ! ----------------------------------------------------------------------- + ! pasut: autoconversion: cloud ice -- > snow + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! similar to lfo 1983: eq. 21 solved implicitly + ! threshold from wsm6 scheme, hong et al 2004, eq (13) : qi0_crt ~0.8e-4 + ! ----------------------------------------------------------------------- + + qim = qi0_crt / den (k) + + ! ----------------------------------------------------------------------- + ! assuming linear subgrid vertical distribution of cloud ice + ! the mismatch computation following lin et al. 1994, mwr + ! ----------------------------------------------------------------------- + + if (const_vi) then + tmp = fac_i2s + else + tmp = fac_i2s * exp (0.025 * tc) + endif + + di (k) = max (di (k), qrmin) + q_plus = qi + di (k) + if (q_plus > (qim + qrmin)) then + if (qim > (qi - di (k))) then + dq = (0.25 * (q_plus - qim) ** 2) / di (k) + else + dq = qi - qim + endif + psaut = tmp * dq + else + psaut = 0. + endif + ! ----------------------------------------------------------------------- + ! sink is no greater than 75% of qi + ! ----------------------------------------------------------------------- + sink = min (0.75 * qi, psaci + psaut) + qi = qi - sink + qs = qs + sink + + ! ----------------------------------------------------------------------- + ! pgaci: accretion of cloud ice by graupel + ! ----------------------------------------------------------------------- + + if (qg > 1.e-6) then + ! ----------------------------------------------------------------------- + ! factor = dts * cgaci / sqrt (den (k)) * exp (0.05 * tc + 0.875 * log (qg * den (k))) + ! simplified form: remove temp dependency & set the exponent "0.875" -- > 1 + ! ----------------------------------------------------------------------- + factor = dts * cgaci * sqrt (den (k)) * qg + pgaci = factor / (1. + factor) * qi + qi = qi - pgaci + qg = qg + pgaci + endif + + endif + + ! ----------------------------------------------------------------------- + ! cold - rain proc: + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! rain to ice, snow, graupel processes: + ! ----------------------------------------------------------------------- + + tc = tz - tice + + if (qr > 1.e-7 .and. tc < 0.) then + + ! ----------------------------------------------------------------------- + ! * sink * terms to qr: psacr + pgfr + ! source terms to qs: psacr + ! source terms to qg: pgfr + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! psacr accretion of rain by snow + ! ----------------------------------------------------------------------- + + if (qs > 1.e-7) then ! if snow exists + psacr = dts * acr3d (vts (k), vtr (k), qr, qs, csacr, acco (1, 2), den (k)) + else + psacr = 0. + endif + + ! ----------------------------------------------------------------------- + ! pgfr: rain freezing -- > graupel + ! ----------------------------------------------------------------------- + + pgfr = dts * cgfr (1) / den (k) * (exp (- cgfr (2) * tc) - 1.) * & + exp (1.75 * log (qr * den (k))) + + ! ----------------------------------------------------------------------- + ! total sink to qr + ! ----------------------------------------------------------------------- + + sink = psacr + pgfr + factor = min (sink, qr, - tc / icpk (k)) / max (sink, qrmin) + + psacr = factor * psacr + pgfr = factor * pgfr + + sink = psacr + pgfr + qr = qr - sink + qs = qs + psacr + qg = qg + pgfr + q_liq (k) = q_liq (k) - sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qv * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz = tz + sink * lhi (k) / cvm (k) + + endif + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhi (k) = li00 + dc_ice * tz + icpk (k) = lhi (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! graupel production terms: + ! ----------------------------------------------------------------------- + + if (qs > 1.e-7) then + + ! ----------------------------------------------------------------------- + ! accretion: snow -- > graupel + ! ----------------------------------------------------------------------- + + if (qg > qrmin) then + sink = dts * acr3d (vtg (k), vts (k), qs, qg, cgacs, acco (1, 4), den (k)) + else + sink = 0. + endif + + ! ----------------------------------------------------------------------- + ! autoconversion snow -- > graupel + ! ----------------------------------------------------------------------- + + qsm = qs0_crt / den (k) + if (qs > qsm) then + factor = dts * 1.e-3 * exp (0.09 * (tz - tice)) + sink = sink + factor / (1. + factor) * (qs - qsm) + endif + sink = min (qs, sink) + qs = qs - sink + qg = qg + sink + + endif ! snow existed + + if (qg > 1.e-7 .and. tz < tice0) then + + ! ----------------------------------------------------------------------- + ! pgacw: accretion of cloud water by graupel + ! ----------------------------------------------------------------------- + + if (ql > 1.e-6) then + qden = qg * den (k) + factor = dts * cgacw * qden / sqrt (den (k) * sqrt (sqrt (qden))) + pgacw = factor / (1. + factor) * ql + else + pgacw = 0. + endif + + ! ----------------------------------------------------------------------- + ! pgacr: accretion of rain by graupel + ! ----------------------------------------------------------------------- + + if (qr > 1.e-6) then + pgacr = min (dts * acr3d (vtg (k), vtr (k), qr, qg, cgacr, acco (1, 3), & + den (k)), qr) + else + pgacr = 0. + endif + + sink = pgacr + pgacw + factor = min (sink, dim (tice, tz) / icpk (k)) / max (sink, qrmin) + pgacr = factor * pgacr + pgacw = factor * pgacw + + sink = pgacr + pgacw + qg = qg + sink + qr = qr - pgacr + ql = ql - pgacw + q_liq (k) = q_liq (k) - sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qv * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz = tz + sink * lhi (k) / cvm (k) + + endif + + endif + + tzk (k) = tz + qvk (k) = qv + qlk (k) = ql + qik (k) = qi + qrk (k) = qr + qsk (k) = qs + qgk (k) = qg + + enddo + + ! ----------------------------------------------------------------------- + ! subgrid cloud microphysics + ! ----------------------------------------------------------------------- + + call subgrid_z_proc (ktop, kbot, p1, den, denfac, dts, rh_adj, tzk, qvk, & + qlk, qrk, qik, qsk, qgk, qak, h_var, rh_rain) + +end subroutine icloud + +! ======================================================================= +! temperature sentive high vertical resolution processes +! ======================================================================= + +subroutine subgrid_z_proc (ktop, kbot, p1, den, denfac, dts, rh_adj, tz, qv, & + ql, qr, qi, qs, qg, qa, h_var, rh_rain) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in), dimension (ktop:kbot) :: p1, den, denfac + + real, intent (in) :: dts, rh_adj, h_var, rh_rain + + real, intent (inout), dimension (ktop:kbot) :: tz, qv, ql, qr, qi, qs, qg, qa + + real, dimension (ktop:kbot) :: lcpk, icpk, tcpk, tcp3, lhl, lhi + real, dimension (ktop:kbot) :: cvm, q_liq, q_sol, q_cond + + real :: fac_v2l, fac_l2v + + real :: pidep, qi_crt + + ! ----------------------------------------------------------------------- + ! qstar over water may be accurate only down to - 80 deg c with ~10% uncertainty + ! must not be too large to allow psc + ! ----------------------------------------------------------------------- + + real :: rh, rqi, tin, qsw, qsi, qpz, qstar + real :: dqsdt, dwsdt, dq, dq0, factor, tmp + real :: q_plus, q_minus, dt_evap, dt_pisub + real :: evap, sink, tc, pisub, q_adj, dtmp + real :: pssub, pgsub, tsq, qden, fac_g2v, fac_v2g + + integer :: k + + if (fast_sat_adj) then + dt_evap = 0.5 * dts + else + dt_evap = dts + endif + + ! ----------------------------------------------------------------------- + ! define conversion scalar / factor + ! ----------------------------------------------------------------------- + + fac_v2l = 1. - exp (- dt_evap / tau_v2l) + fac_l2v = 1. - exp (- dt_evap / tau_l2v) + + fac_g2v = 1. - exp (- dts / tau_g2v) + fac_v2g = 1. - exp (- dts / tau_v2g) + + ! ----------------------------------------------------------------------- + ! define heat capacity and latend heat coefficient + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + lhl (k) = lv00 + d0_vap * tz (k) + lhi (k) = li00 + dc_ice * tz (k) + q_liq (k) = ql (k) + qr (k) + q_sol (k) = qi (k) + qs (k) + qg (k) + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + tcpk (k) = lcpk (k) + icpk (k) + tcp3 (k) = lcpk (k) + icpk (k) * min (1., dim (tice, tz (k)) / (tice - t_wfr)) + enddo + + do k = ktop, kbot + + if (p1 (k) < p_min) cycle + + ! ----------------------------------------------------------------------- + ! instant deposit all water vapor to cloud ice when temperature is super low + ! ----------------------------------------------------------------------- + + if (tz (k) < t_min) then + sink = dim (qv (k), 1.e-7) + qv (k) = qv (k) - sink + qi (k) = qi (k) + sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) + sink * (lhl (k) + lhi (k)) / cvm (k) + if (.not. do_qa) qa (k) = qa (k) + 1. ! air fully saturated; 100 % cloud cover + cycle + endif + + ! ----------------------------------------------------------------------- + ! update heat capacity and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + lhi (k) = li00 + dc_ice * tz (k) + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + tcpk (k) = lcpk (k) + icpk (k) + tcp3 (k) = lcpk (k) + icpk (k) * min (1., dim (tice, tz (k)) / (tice - t_wfr)) + + ! ----------------------------------------------------------------------- + ! instant evaporation / sublimation of all clouds if rh < rh_adj -- > cloud free + ! ----------------------------------------------------------------------- + + qpz = qv (k) + ql (k) + qi (k) + tin = tz (k) - (lhl (k) * (ql (k) + qi (k)) + lhi (k) * qi (k)) / (c_air + & + qpz * c_vap + qr (k) * c_liq + (qs (k) + qg (k)) * c_ice) + if (tin > t_sub + 6.) then + rh = qpz / iqs1 (tin, den (k)) + if (rh < rh_adj) then ! qpz / rh_adj < qs + tz (k) = tin + qv (k) = qpz + ql (k) = 0. + qi (k) = 0. + cycle ! cloud free + endif + endif + + ! ----------------------------------------------------------------------- + ! cloud water < -- > vapor adjustment: + ! ----------------------------------------------------------------------- + + qsw = wqs2 (tz (k), den (k), dwsdt) + dq0 = qsw - qv (k) + if (dq0 > 0.) then + ! SJL 20170703 added ql factor to prevent the situation of high ql and low RH + ! factor = min (1., fac_l2v * sqrt (max (0., ql (k)) / 1.e-5) * 10. * dq0 / qsw) + ! factor = fac_l2v + ! factor = 1 + factor = min (1., fac_l2v * (10. * dq0 / qsw)) ! the rh dependent factor = 1 at 90% + evap = min (ql (k), factor * dq0 / (1. + tcp3 (k) * dwsdt)) + else ! condensate all excess vapor into cloud water + ! ----------------------------------------------------------------------- + ! evap = fac_v2l * dq0 / (1. + tcp3 (k) * dwsdt) + ! sjl, 20161108 + ! ----------------------------------------------------------------------- + evap = dq0 / (1. + tcp3 (k) * dwsdt) + endif + qv (k) = qv (k) + evap + ql (k) = ql (k) - evap + q_liq (k) = q_liq (k) - evap + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) - evap * lhl (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! update heat capacity and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhi (k) = li00 + dc_ice * tz (k) + icpk (k) = lhi (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! enforce complete freezing below - 48 c + ! ----------------------------------------------------------------------- + + dtmp = t_wfr - tz (k) ! [ - 40, - 48] + if (dtmp > 0. .and. ql (k) > qcmin) then + sink = min (ql (k), ql (k) * dtmp * 0.125, dtmp / icpk (k)) + ql (k) = ql (k) - sink + qi (k) = qi (k) + sink + q_liq (k) = q_liq (k) - sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) + sink * lhi (k) / cvm (k) + endif + + ! ----------------------------------------------------------------------- + ! update heat capacity and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhi (k) = li00 + dc_ice * tz (k) + icpk (k) = lhi (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! bigg mechanism + ! ----------------------------------------------------------------------- + + if (fast_sat_adj) then + dt_pisub = 0.5 * dts + else + dt_pisub = dts + tc = tice - tz (k) + if (ql (k) > qrmin .and. tc > 0.) then + sink = 3.3333e-10 * dts * (exp (0.66 * tc) - 1.) * den (k) * ql (k) * ql (k) + sink = min (ql (k), tc / icpk (k), sink) + ql (k) = ql (k) - sink + qi (k) = qi (k) + sink + q_liq (k) = q_liq (k) - sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) + sink * lhi (k) / cvm (k) + endif ! significant ql existed + endif + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + lhi (k) = li00 + dc_ice * tz (k) + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + tcpk (k) = lcpk (k) + icpk (k) + + ! ----------------------------------------------------------------------- + ! sublimation / deposition of ice + ! ----------------------------------------------------------------------- + + if (tz (k) < tice) then + qsi = iqs2 (tz (k), den (k), dqsdt) + dq = qv (k) - qsi + sink = dq / (1. + tcpk (k) * dqsdt) + if (qi (k) > qrmin) then + ! eq 9, hong et al. 2004, mwr + ! for a and b, see dudhia 1989: page 3103 eq (b7) and (b8) + pidep = dt_pisub * dq * 349138.78 * exp (0.875 * log (qi (k) * den (k))) & + / (qsi * den (k) * lat2 / (0.0243 * rvgas * tz (k) ** 2) + 4.42478e4) + else + pidep = 0. + endif + if (dq > 0.) then ! vapor - > ice + tmp = tice - tz (k) + ! 20160912: the following should produce more ice at higher altitude + ! qi_crt = 4.92e-11 * exp (1.33 * log (1.e3 * exp (0.1 * tmp))) / den (k) + qi_crt = qi_gen * min (qi_lim, 0.1 * tmp) / den (k) + sink = min (sink, max (qi_crt - qi (k), pidep), tmp / tcpk (k)) + else ! ice -- > vapor + pidep = pidep * min (1., dim (tz (k), t_sub) * 0.2) + sink = max (pidep, sink, - qi (k)) + endif + qv (k) = qv (k) - sink + qi (k) = qi (k) + sink + q_sol (k) = q_sol (k) + sink + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) + sink * (lhl (k) + lhi (k)) / cvm (k) + endif + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + lhi (k) = li00 + dc_ice * tz (k) + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + tcpk (k) = lcpk (k) + icpk (k) + + ! ----------------------------------------------------------------------- + ! sublimation / deposition of snow + ! this process happens for all temp rage + ! ----------------------------------------------------------------------- + + if (qs (k) > qrmin) then + qsi = iqs2 (tz (k), den (k), dqsdt) + qden = qs (k) * den (k) + tmp = exp (0.65625 * log (qden)) + tsq = tz (k) * tz (k) + dq = (qsi - qv (k)) / (1. + tcpk (k) * dqsdt) + pssub = cssub (1) * tsq * (cssub (2) * sqrt (qden) + cssub (3) * tmp * & + sqrt (denfac (k))) / (cssub (4) * tsq + cssub (5) * qsi * den (k)) + pssub = (qsi - qv (k)) * dts * pssub + if (pssub > 0.) then ! qs -- > qv, sublimation + pssub = min (pssub * min (1., dim (tz (k), t_sub) * 0.2), qs (k)) + else + if (tz (k) > tice) then + pssub = 0. ! no deposition + else + pssub = max (pssub, dq, (tz (k) - tice) / tcpk (k)) + endif + endif + qs (k) = qs (k) - pssub + qv (k) = qv (k) + pssub + q_sol (k) = q_sol (k) - pssub + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) - pssub * (lhl (k) + lhi (k)) / cvm (k) + endif + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + lhi (k) = li00 + dc_ice * tz (k) + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + tcpk (k) = lcpk (k) + icpk (k) + + ! ----------------------------------------------------------------------- + ! simplified 2 - way grapuel sublimation - deposition mechanism + ! ----------------------------------------------------------------------- + + if (qg (k) > qrmin) then + qsi = iqs2 (tz (k), den (k), dqsdt) + dq = (qv (k) - qsi) / (1. + tcpk (k) * dqsdt) + pgsub = (qv (k) / qsi - 1.) * qg (k) + if (pgsub > 0.) then ! deposition + if (tz (k) > tice) then + pgsub = 0. ! no deposition + else + pgsub = min (fac_v2g * pgsub, 0.2 * dq, ql (k) + qr (k), & + (tice - tz (k)) / tcpk (k)) + endif + else ! submilation + pgsub = max (fac_g2v * pgsub, dq) * min (1., dim (tz (k), t_sub) * 0.1) + endif + qg (k) = qg (k) + pgsub + qv (k) = qv (k) - pgsub + q_sol (k) = q_sol (k) + pgsub + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) + pgsub * (lhl (k) + lhi (k)) / cvm (k) + endif + +#ifdef USE_MIN_EVAP + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + lcpk (k) = lhl (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! * minimum evap of rain in dry environmental air + ! ----------------------------------------------------------------------- + + if (qr (k) > qcmin) then + qsw = wqs2 (tz (k), den (k), dqsdt) + sink = min (qr (k), dim (rh_rain * qsw, qv (k)) / (1. + lcpk (k) * dqsdt)) + qv (k) = qv (k) + sink + qr (k) = qr (k) - sink + q_liq (k) = q_liq (k) - sink + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) - sink * lhl (k) / cvm (k) + endif +#endif + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + lhl (k) = lv00 + d0_vap * tz (k) + cvm (k) = c_air + (qv (k) + q_liq (k) + q_sol (k)) * c_vap + lcpk (k) = lhl (k) / cvm (k) + + ! ----------------------------------------------------------------------- + ! compute cloud fraction + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! combine water species + ! ----------------------------------------------------------------------- + + if (do_qa) cycle + + if (rad_snow) then + q_sol (k) = qi (k) + qs (k) + else + q_sol (k) = qi (k) + endif + if (rad_rain) then + q_liq (k) = ql (k) + qr (k) + else + q_liq (k) = ql (k) + endif + q_cond (k) = q_liq (k) + q_sol (k) + + qpz = qv (k) + q_cond (k) ! qpz is conserved + + ! ----------------------------------------------------------------------- + ! use the "liquid - frozen water temperature" (tin) to compute saturated specific humidity + ! ----------------------------------------------------------------------- + + tin = tz (k) - (lcpk (k) * q_cond (k) + icpk (k) * q_sol (k)) ! minimum temperature + ! tin = tz (k) - ((lv00 + d0_vap * tz (k)) * q_cond (k) + & + ! (li00 + dc_ice * tz (k)) * q_sol (k)) / (c_air + qpz * c_vap) + + ! ----------------------------------------------------------------------- + ! determine saturated specific humidity + ! ----------------------------------------------------------------------- + + if (tin <= t_wfr) then + ! ice phase: + qstar = iqs1 (tin, den (k)) + elseif (tin >= tice) then + ! liquid phase: + qstar = wqs1 (tin, den (k)) + else + ! mixed phase: + qsi = iqs1 (tin, den (k)) + qsw = wqs1 (tin, den (k)) + if (q_cond (k) > 3.e-6) then + rqi = q_sol (k) / q_cond (k) + else + ! ----------------------------------------------------------------------- + ! mostly liquid water q_cond (k) at initial cloud development stage + ! ----------------------------------------------------------------------- + rqi = (tice - tin) / (tice - t_wfr) + endif + qstar = rqi * qsi + (1. - rqi) * qsw + endif + + ! ----------------------------------------------------------------------- + ! assuming subgrid linear distribution in horizontal; this is effectively a smoother for the + ! binary cloud scheme + ! ----------------------------------------------------------------------- + + if (qpz > qrmin) then + ! partial cloudiness by pdf: + dq = max (qcmin, h_var * qpz) + q_plus = qpz + dq ! cloud free if qstar > q_plus + q_minus = qpz - dq + if (qstar < q_minus) then + qa (k) = qa (k) + 1. ! air fully saturated; 100 % cloud cover + elseif (qstar < q_plus .and. q_cond (k) > qc_crt) then + qa (k) = qa (k) + (q_plus - qstar) / (dq + dq) ! partial cloud cover + ! qa (k) = sqrt (qa (k) + (q_plus - qstar) / (dq + dq)) + endif + endif + + enddo + +end subroutine subgrid_z_proc + +! ======================================================================= +! rain evaporation +! ======================================================================= + +subroutine revap_rac1 (hydrostatic, is, ie, dt, tz, qv, ql, qr, qi, qs, qg, den, hvar) + + implicit none + + logical, intent (in) :: hydrostatic + + integer, intent (in) :: is, ie + + real, intent (in) :: dt ! time step (s) + + real, intent (in), dimension (is:ie) :: den, hvar, qi, qs, qg + + real, intent (inout), dimension (is:ie) :: tz, qv, qr, ql + + real, dimension (is:ie) :: lcp2, denfac, q_liq, q_sol, cvm, lhl + + real :: dqv, qsat, dqsdt, evap, qden, q_plus, q_minus, sink + real :: tin, t2, qpz, dq, dqh + + integer :: i + + ! ----------------------------------------------------------------------- + ! define latend heat coefficient + ! ----------------------------------------------------------------------- + + do i = is, ie + lhl (i) = lv00 + d0_vap * tz (i) + q_liq (i) = ql (i) + qr (i) + q_sol (i) = qi (i) + qs (i) + qg (i) + cvm (i) = c_air + qv (i) * c_vap + q_liq (i) * c_liq + q_sol (i) * c_ice + lcp2 (i) = lhl (i) / cvm (i) + ! denfac (i) = sqrt (sfcrho / den (i)) + enddo + + do i = is, ie + if (qr (i) > qrmin .and. tz (i) > t_wfr) then + qpz = qv (i) + ql (i) + tin = tz (i) - lcp2 (i) * ql (i) ! presence of clouds suppresses the rain evap + qsat = wqs2 (tin, den (i), dqsdt) + dqh = max (ql (i), hvar (i) * max (qpz, qcmin)) + dqv = qsat - qv (i) + q_minus = qpz - dqh + q_plus = qpz + dqh + + ! ----------------------------------------------------------------------- + ! qsat must be > q_minus to activate evaporation + ! qsat must be < q_plus to activate accretion + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! rain evaporation + ! ----------------------------------------------------------------------- + + if (dqv > qvmin .and. qsat > q_minus) then + if (qsat > q_plus) then + dq = qsat - qpz + else + ! q_minus < qsat < q_plus + ! dq == dqh if qsat == q_minus + dq = 0.25 * (q_minus - qsat) ** 2 / dqh + endif + qden = qr (i) * den (i) + t2 = tin * tin + evap = crevp (1) * t2 * dq * (crevp (2) * sqrt (qden) + crevp (3) * exp (0.725 * log (qden))) & + / (crevp (4) * t2 + crevp (5) * qsat * den (i)) + evap = min (qr (i), dt * evap, dqv / (1. + lcp2 (i) * dqsdt)) + qr (i) = qr (i) - evap + qv (i) = qv (i) + evap + q_liq (i) = q_liq (i) - evap + cvm (i) = c_air + qv (i) * c_vap + q_liq (i) * c_liq + q_sol (i) * c_ice + tz (i) = tz (i) - evap * lhl (i) / cvm (i) + endif + + ! ----------------------------------------------------------------------- + ! accretion: pracc + ! ----------------------------------------------------------------------- + + if (qr (i) > qrmin .and. ql (i) > 1.e-8 .and. qsat < q_plus) then + denfac (i) = sqrt (sfcrho / den (i)) + sink = dt * denfac (i) * cracw * exp (0.95 * log (qr (i) * den (i))) + sink = sink / (1. + sink) * ql (i) + ql (i) = ql (i) - sink + qr (i) = qr (i) + sink + endif + endif + enddo + +end subroutine revap_rac1 + +! ======================================================================= +! compute terminal fall speed +! consider cloud ice, snow, and graupel's melting during fall +! ======================================================================= + +subroutine terminal_fall (dtm, ktop, kbot, tz, qv, ql, qr, qg, qs, qi, dz, dp, & + den, vtg, vts, vti, r1, g1, s1, i1, m1_sol, w1) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in) :: dtm ! time step (s) + + real, intent (in), dimension (ktop:kbot) :: vtg, vts, vti, den, dp, dz + + real, intent (inout), dimension (ktop:kbot) :: qv, ql, qr, qg, qs, qi, tz, m1_sol, w1 + + real, intent (out) :: r1, g1, s1, i1 + + real, dimension (ktop:kbot + 1) :: ze, zt + + real :: qsat, dqsdt, dt5, evap, dtime + real :: factor, frac + real :: tmp, precip, tc, sink + + real, dimension (ktop:kbot) :: lcpk, icpk, cvm, q_liq, q_sol, lhl, lhi + real, dimension (ktop:kbot) :: m1, dm + + real :: zs = 0. + real :: fac_imlt + + integer :: k, k0, m + + logical :: no_fall + + dt5 = 0.5 * dtm + fac_imlt = 1. - exp (- dt5 / tau_imlt) + + ! ----------------------------------------------------------------------- + ! define heat capacity and latend heat coefficient + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + m1_sol (k) = 0. + lhl (k) = lv00 + d0_vap * tz (k) + lhi (k) = li00 + dc_ice * tz (k) + q_liq (k) = ql (k) + qr (k) + q_sol (k) = qi (k) + qs (k) + qg (k) + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + lcpk (k) = lhl (k) / cvm (k) + icpk (k) = lhi (k) / cvm (k) + enddo + + ! ----------------------------------------------------------------------- + ! find significant melting level + ! ----------------------------------------------------------------------- + + k0 = kbot + do k = ktop, kbot - 1 + if (tz (k) > tice) then + k0 = k + exit + endif + enddo + + ! ----------------------------------------------------------------------- + ! melting of cloud_ice (before fall) : + ! ----------------------------------------------------------------------- + + do k = k0, kbot + tc = tz (k) - tice + if (qi (k) > qcmin .and. tc > 0.) then + sink = min (qi (k), fac_imlt * tc / icpk (k)) + tmp = min (sink, dim (ql_mlt, ql (k))) + ql (k) = ql (k) + tmp + qr (k) = qr (k) + sink - tmp + qi (k) = qi (k) - sink + q_liq (k) = q_liq (k) + sink + q_sol (k) = q_sol (k) - sink + cvm (k) = c_air + qv (k) * c_vap + q_liq (k) * c_liq + q_sol (k) * c_ice + tz (k) = tz (k) - sink * lhi (k) / cvm (k) + tc = tz (k) - tice + endif + enddo + + ! ----------------------------------------------------------------------- + ! turn off melting when cloud microphysics time step is small + ! ----------------------------------------------------------------------- + + if (dtm < 60.) k0 = kbot + + ! sjl, turn off melting of falling cloud ice, snow and graupel + k0 = kbot + ! sjl, turn off melting of falling cloud ice, snow and graupel + + ze (kbot + 1) = zs + do k = kbot, ktop, - 1 + ze (k) = ze (k + 1) - dz (k) ! dz < 0 + enddo + + zt (ktop) = ze (ktop) + + ! ----------------------------------------------------------------------- + ! update capacity heat and latend heat coefficient + ! ----------------------------------------------------------------------- + + do k = k0, kbot + lhi (k) = li00 + dc_ice * tz (k) + icpk (k) = lhi (k) / cvm (k) + enddo + + ! ----------------------------------------------------------------------- + ! melting of falling cloud ice into rain + ! ----------------------------------------------------------------------- + + call check_column (ktop, kbot, qi, no_fall) + + if (vi_fac < 1.e-5 .or. no_fall) then + i1 = 0. + else + + do k = ktop + 1, kbot + zt (k) = ze (k) - dt5 * (vti (k - 1) + vti (k)) + enddo + zt (kbot + 1) = zs - dtm * vti (kbot) + + do k = ktop, kbot + if (zt (k + 1) >= zt (k)) zt (k + 1) = zt (k) - dz_min + enddo + + if (k0 < kbot) then + do k = kbot - 1, k0, - 1 + if (qi (k) > qrmin) then + do m = k + 1, kbot + if (zt (k + 1) >= ze (m)) exit + if (zt (k) < ze (m + 1) .and. tz (m) > tice) then + dtime = min (1.0, (ze (m) - ze (m + 1)) / (max (vr_min, vti (k)) * tau_imlt)) + sink = min (qi (k) * dp (k) / dp (m), dtime * (tz (m) - tice) / icpk (m)) + tmp = min (sink, dim (ql_mlt, ql (m))) + ql (m) = ql (m) + tmp + qr (m) = qr (m) - tmp + sink + tz (m) = tz (m) - sink * icpk (m) + qi (k) = qi (k) - sink * dp (m) / dp (k) + endif + enddo + endif + enddo + endif + + if (do_sedi_w) then + do k = ktop, kbot + dm (k) = dp (k) * (1. + qv (k) + ql (k) + qr (k) + qi (k) + qs (k) + qg (k)) + enddo + endif + + if (use_ppm) then + call lagrangian_fall_ppm (ktop, kbot, zs, ze, zt, dp, qi, i1, m1_sol, mono_prof) + else + call implicit_fall (dtm, ktop, kbot, ze, vti, dp, qi, i1, m1_sol) + endif + + if (do_sedi_w) then + w1 (ktop) = (dm (ktop) * w1 (ktop) + m1_sol (ktop) * vti (ktop)) / (dm (ktop) - m1_sol (ktop)) + do k = ktop + 1, kbot + w1 (k) = (dm (k) * w1 (k) - m1_sol (k - 1) * vti (k - 1) + m1_sol (k) * vti (k)) & + / (dm (k) + m1_sol (k - 1) - m1_sol (k)) + enddo + endif + + endif + + ! ----------------------------------------------------------------------- + ! melting of falling snow into rain + ! ----------------------------------------------------------------------- + + r1 = 0. + + call check_column (ktop, kbot, qs, no_fall) + + if (no_fall) then + s1 = 0. + else + + do k = ktop + 1, kbot + zt (k) = ze (k) - dt5 * (vts (k - 1) + vts (k)) + enddo + zt (kbot + 1) = zs - dtm * vts (kbot) + + do k = ktop, kbot + if (zt (k + 1) >= zt (k)) zt (k + 1) = zt (k) - dz_min + enddo + + if (k0 < kbot) then + do k = kbot - 1, k0, - 1 + if (qs (k) > qrmin) then + do m = k + 1, kbot + if (zt (k + 1) >= ze (m)) exit + dtime = min (dtm, (ze (m) - ze (m + 1)) / (vr_min + vts (k))) + if (zt (k) < ze (m + 1) .and. tz (m) > tice) then + dtime = min (1.0, dtime / tau_smlt) + sink = min (qs (k) * dp (k) / dp (m), dtime * (tz (m) - tice) / icpk (m)) + tz (m) = tz (m) - sink * icpk (m) + qs (k) = qs (k) - sink * dp (m) / dp (k) + if (zt (k) < zs) then + r1 = r1 + sink * dp (m) ! precip as rain + else + ! qr source here will fall next time step (therefore, can evap) + qr (m) = qr (m) + sink + endif + endif + if (qs (k) < qrmin) exit + enddo + endif + enddo + endif + + if (do_sedi_w) then + do k = ktop, kbot + dm (k) = dp (k) * (1. + qv (k) + ql (k) + qr (k) + qi (k) + qs (k) + qg (k)) + enddo + endif + + if (use_ppm) then + call lagrangian_fall_ppm (ktop, kbot, zs, ze, zt, dp, qs, s1, m1, mono_prof) + else + call implicit_fall (dtm, ktop, kbot, ze, vts, dp, qs, s1, m1) + endif + + do k = ktop, kbot + m1_sol (k) = m1_sol (k) + m1 (k) + enddo + + if (do_sedi_w) then + w1 (ktop) = (dm (ktop) * w1 (ktop) + m1 (ktop) * vts (ktop)) / (dm (ktop) - m1 (ktop)) + do k = ktop + 1, kbot + w1 (k) = (dm (k) * w1 (k) - m1 (k - 1) * vts (k - 1) + m1 (k) * vts (k)) & + / (dm (k) + m1 (k - 1) - m1 (k)) + enddo + endif + + endif + + ! ---------------------------------------------- + ! melting of falling graupel into rain + ! ---------------------------------------------- + + call check_column (ktop, kbot, qg, no_fall) + + if (no_fall) then + g1 = 0. + else + + do k = ktop + 1, kbot + zt (k) = ze (k) - dt5 * (vtg (k - 1) + vtg (k)) + enddo + zt (kbot + 1) = zs - dtm * vtg (kbot) + + do k = ktop, kbot + if (zt (k + 1) >= zt (k)) zt (k + 1) = zt (k) - dz_min + enddo + + if (k0 < kbot) then + do k = kbot - 1, k0, - 1 + if (qg (k) > qrmin) then + do m = k + 1, kbot + if (zt (k + 1) >= ze (m)) exit + dtime = min (dtm, (ze (m) - ze (m + 1)) / vtg (k)) + if (zt (k) < ze (m + 1) .and. tz (m) > tice) then + dtime = min (1., dtime / tau_g2r) + sink = min (qg (k) * dp (k) / dp (m), dtime * (tz (m) - tice) / icpk (m)) + tz (m) = tz (m) - sink * icpk (m) + qg (k) = qg (k) - sink * dp (m) / dp (k) + if (zt (k) < zs) then + r1 = r1 + sink * dp (m) + else + qr (m) = qr (m) + sink + endif + endif + if (qg (k) < qrmin) exit + enddo + endif + enddo + endif + + if (do_sedi_w) then + do k = ktop, kbot + dm (k) = dp (k) * (1. + qv (k) + ql (k) + qr (k) + qi (k) + qs (k) + qg (k)) + enddo + endif + + if (use_ppm) then + call lagrangian_fall_ppm (ktop, kbot, zs, ze, zt, dp, qg, g1, m1, mono_prof) + else + call implicit_fall (dtm, ktop, kbot, ze, vtg, dp, qg, g1, m1) + endif + + do k = ktop, kbot + m1_sol (k) = m1_sol (k) + m1 (k) + enddo + + if (do_sedi_w) then + w1 (ktop) = (dm (ktop) * w1 (ktop) + m1 (ktop) * vtg (ktop)) / (dm (ktop) - m1 (ktop)) + do k = ktop + 1, kbot + w1 (k) = (dm (k) * w1 (k) - m1 (k - 1) * vtg (k - 1) + m1 (k) * vtg (k)) & + / (dm (k) + m1 (k - 1) - m1 (k)) + enddo + endif + + endif + +end subroutine terminal_fall + +! ======================================================================= +! check if water species large enough to fall +! ======================================================================= + +subroutine check_column (ktop, kbot, q, no_fall) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in) :: q (ktop:kbot) + + logical, intent (out) :: no_fall + + integer :: k + + no_fall = .true. + + do k = ktop, kbot + if (q (k) > qrmin) then + no_fall = .false. + exit + endif + enddo + +end subroutine check_column + +! ======================================================================= +! time - implicit monotonic scheme +! developed by sj lin, 2016 +! ======================================================================= + +subroutine implicit_fall (dt, ktop, kbot, ze, vt, dp, q, precip, m1) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in) :: dt + + real, intent (in), dimension (ktop:kbot + 1) :: ze + + real, intent (in), dimension (ktop:kbot) :: vt, dp + + real, intent (inout), dimension (ktop:kbot) :: q + + real, intent (out), dimension (ktop:kbot) :: m1 + + real, intent (out) :: precip + + real, dimension (ktop:kbot) :: dz, qm, dd + + integer :: k + + do k = ktop, kbot + dz (k) = ze (k) - ze (k + 1) + dd (k) = dt * vt (k) + q (k) = q (k) * dp (k) + enddo + + ! ----------------------------------------------------------------------- + ! sedimentation: non - vectorizable loop + ! ----------------------------------------------------------------------- + + qm (ktop) = q (ktop) / (dz (ktop) + dd (ktop)) + do k = ktop + 1, kbot + qm (k) = (q (k) + dd (k - 1) * qm (k - 1)) / (dz (k) + dd (k)) + enddo + + ! ----------------------------------------------------------------------- + ! qm is density at this stage + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + qm (k) = qm (k) * dz (k) + enddo + + ! ----------------------------------------------------------------------- + ! output mass fluxes: non - vectorizable loop + ! ----------------------------------------------------------------------- + + m1 (ktop) = q (ktop) - qm (ktop) + do k = ktop + 1, kbot + m1 (k) = m1 (k - 1) + q (k) - qm (k) + enddo + precip = m1 (kbot) + + ! ----------------------------------------------------------------------- + ! update: + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + q (k) = qm (k) / dp (k) + enddo + +end subroutine implicit_fall + +! ======================================================================= +! lagrangian scheme +! developed by sj lin, ???? +! ======================================================================= + +subroutine lagrangian_fall_ppm (ktop, kbot, zs, ze, zt, dp, q, precip, m1, mono) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in) :: zs + + logical, intent (in) :: mono + + real, intent (in), dimension (ktop:kbot + 1) :: ze, zt + + real, intent (in), dimension (ktop:kbot) :: dp + + ! m1: flux + real, intent (inout), dimension (ktop:kbot) :: q, m1 + + real, intent (out) :: precip + + real, dimension (ktop:kbot) :: qm, dz + + real :: a4 (4, ktop:kbot) + + real :: pl, pr, delz, esl + + integer :: k, k0, n, m + + real, parameter :: r3 = 1. / 3., r23 = 2. / 3. + + ! ----------------------------------------------------------------------- + ! density: + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + dz (k) = zt (k) - zt (k + 1) ! note: dz is positive + q (k) = q (k) * dp (k) + a4 (1, k) = q (k) / dz (k) + qm (k) = 0. + enddo + + ! ----------------------------------------------------------------------- + ! construct vertical profile with zt as coordinate + ! ----------------------------------------------------------------------- + + call cs_profile (a4 (1, ktop), dz (ktop), kbot - ktop + 1, mono) + + k0 = ktop + do k = ktop, kbot + do n = k0, kbot + if (ze (k) <= zt (n) .and. ze (k) >= zt (n + 1)) then + pl = (zt (n) - ze (k)) / dz (n) + if (zt (n + 1) <= ze (k + 1)) then + ! entire new grid is within the original grid + pr = (zt (n) - ze (k + 1)) / dz (n) + qm (k) = a4 (2, n) + 0.5 * (a4 (4, n) + a4 (3, n) - a4 (2, n)) * (pr + pl) - & + a4 (4, n) * r3 * (pr * (pr + pl) + pl ** 2) + qm (k) = qm (k) * (ze (k) - ze (k + 1)) + k0 = n + goto 555 + else + qm (k) = (ze (k) - zt (n + 1)) * (a4 (2, n) + 0.5 * (a4 (4, n) + & + a4 (3, n) - a4 (2, n)) * (1. + pl) - a4 (4, n) * (r3 * (1. + pl * (1. + pl)))) + if (n < kbot) then + do m = n + 1, kbot + ! locate the bottom edge: ze (k + 1) + if (ze (k + 1) < zt (m + 1)) then + qm (k) = qm (k) + q (m) + else + delz = zt (m) - ze (k + 1) + esl = delz / dz (m) + qm (k) = qm (k) + delz * (a4 (2, m) + 0.5 * esl * & + (a4 (3, m) - a4 (2, m) + a4 (4, m) * (1. - r23 * esl))) + k0 = m + goto 555 + endif + enddo + endif + goto 555 + endif + endif + enddo + 555 continue + enddo + + m1 (ktop) = q (ktop) - qm (ktop) + do k = ktop + 1, kbot + m1 (k) = m1 (k - 1) + q (k) - qm (k) + enddo + precip = m1 (kbot) + + ! convert back to * dry * mixing ratio: + ! dp must be dry air_mass (because moist air mass will be changed due to terminal fall) . + + do k = ktop, kbot + q (k) = qm (k) / dp (k) + enddo + +end subroutine lagrangian_fall_ppm + +subroutine cs_profile (a4, del, km, do_mono) + + implicit none + + integer, intent (in) :: km ! vertical dimension + + real, intent (in) :: del (km) + + logical, intent (in) :: do_mono + + real, intent (inout) :: a4 (4, km) + + real, parameter :: qp_min = 1.e-6 + + real :: gam (km) + real :: q (km + 1) + real :: d4, bet, a_bot, grat, pmp, lac + real :: pmp_1, lac_1, pmp_2, lac_2 + real :: da1, da2, a6da + + integer :: k + + logical extm (km) + + grat = del (2) / del (1) ! grid ratio + bet = grat * (grat + 0.5) + q (1) = (2. * grat * (grat + 1.) * a4 (1, 1) + a4 (1, 2)) / bet + gam (1) = (1. + grat * (grat + 1.5)) / bet + + do k = 2, km + d4 = del (k - 1) / del (k) + bet = 2. + 2. * d4 - gam (k - 1) + q (k) = (3. * (a4 (1, k - 1) + d4 * a4 (1, k)) - q (k - 1)) / bet + gam (k) = d4 / bet + enddo + + a_bot = 1. + d4 * (d4 + 1.5) + q (km + 1) = (2. * d4 * (d4 + 1.) * a4 (1, km) + a4 (1, km - 1) - a_bot * q (km)) & + / (d4 * (d4 + 0.5) - a_bot * gam (km)) + + do k = km, 1, - 1 + q (k) = q (k) - gam (k) * q (k + 1) + enddo + + ! ----------------------------------------------------------------------- + ! apply constraints + ! ----------------------------------------------------------------------- + + do k = 2, km + gam (k) = a4 (1, k) - a4 (1, k - 1) + enddo + + ! ----------------------------------------------------------------------- + ! apply large - scale constraints to all fields if not local max / min + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! top: + ! ----------------------------------------------------------------------- + + q (1) = max (q (1), 0.) + q (2) = min (q (2), max (a4 (1, 1), a4 (1, 2))) + q (2) = max (q (2), min (a4 (1, 1), a4 (1, 2)), 0.) + + ! ----------------------------------------------------------------------- + ! interior: + ! ----------------------------------------------------------------------- + + do k = 3, km - 1 + if (gam (k - 1) * gam (k + 1) > 0.) then + q (k) = min (q (k), max (a4 (1, k - 1), a4 (1, k))) + q (k) = max (q (k), min (a4 (1, k - 1), a4 (1, k))) + else + if (gam (k - 1) > 0.) then + ! there exists a local max + q (k) = max (q (k), min (a4 (1, k - 1), a4 (1, k))) + else + ! there exists a local min + q (k) = min (q (k), max (a4 (1, k - 1), a4 (1, k))) + q (k) = max (q (k), 0.0) + endif + endif + enddo + + ! ----------------------------------------------------------------------- + ! bottom : + ! ----------------------------------------------------------------------- + + q (km) = min (q (km), max (a4 (1, km - 1), a4 (1, km))) + q (km) = max (q (km), min (a4 (1, km - 1), a4 (1, km)), 0.) + ! q (km + 1) = max (q (km + 1), 0.) + + ! ----------------------------------------------------------------------- + ! f (s) = al + s * [ (ar - al) + a6 * (1 - s) ] (0 <= s <= 1) + ! ----------------------------------------------------------------------- + + do k = 1, km - 1 + a4 (2, k) = q (k) + a4 (3, k) = q (k + 1) + enddo + + do k = 2, km - 1 + if (gam (k) * gam (k + 1) > 0.0) then + extm (k) = .false. + else + extm (k) = .true. + endif + enddo + + if (do_mono) then + do k = 3, km - 2 + if (extm (k)) then + ! positive definite constraint only if true local extrema + if (a4 (1, k) < qp_min .or. extm (k - 1) .or. extm (k + 1)) then + a4 (2, k) = a4 (1, k) + a4 (3, k) = a4 (1, k) + endif + else + a4 (4, k) = 6. * a4 (1, k) - 3. * (a4 (2, k) + a4 (3, k)) + if (abs (a4 (4, k)) > abs (a4 (2, k) - a4 (3, k))) then + ! check within the smooth region if subgrid profile is non - monotonic + pmp_1 = a4 (1, k) - 2.0 * gam (k + 1) + lac_1 = pmp_1 + 1.5 * gam (k + 2) + a4 (2, k) = min (max (a4 (2, k), min (a4 (1, k), pmp_1, lac_1)), & + max (a4 (1, k), pmp_1, lac_1)) + pmp_2 = a4 (1, k) + 2.0 * gam (k) + lac_2 = pmp_2 - 1.5 * gam (k - 1) + a4 (3, k) = min (max (a4 (3, k), min (a4 (1, k), pmp_2, lac_2)), & + max (a4 (1, k), pmp_2, lac_2)) + endif + endif + enddo + else + do k = 3, km - 2 + if (extm (k)) then + if (a4 (1, k) < qp_min .or. extm (k - 1) .or. extm (k + 1)) then + a4 (2, k) = a4 (1, k) + a4 (3, k) = a4 (1, k) + endif + endif + enddo + endif + + do k = 1, km - 1 + a4 (4, k) = 6. * a4 (1, k) - 3. * (a4 (2, k) + a4 (3, k)) + enddo + + k = km - 1 + if (extm (k)) then + a4 (2, k) = a4 (1, k) + a4 (3, k) = a4 (1, k) + a4 (4, k) = 0. + else + da1 = a4 (3, k) - a4 (2, k) + da2 = da1 ** 2 + a6da = a4 (4, k) * da1 + if (a6da < - da2) then + a4 (4, k) = 3. * (a4 (2, k) - a4 (1, k)) + a4 (3, k) = a4 (2, k) - a4 (4, k) + elseif (a6da > da2) then + a4 (4, k) = 3. * (a4 (3, k) - a4 (1, k)) + a4 (2, k) = a4 (3, k) - a4 (4, k) + endif + endif + + call cs_limiters (km - 1, a4) + + ! ----------------------------------------------------------------------- + ! bottom layer: + ! ----------------------------------------------------------------------- + + a4 (2, km) = a4 (1, km) + a4 (3, km) = a4 (1, km) + a4 (4, km) = 0. + +end subroutine cs_profile + +subroutine cs_limiters (km, a4) + + implicit none + + integer, intent (in) :: km + + real, intent (inout) :: a4 (4, km) ! ppm array + + real, parameter :: r12 = 1. / 12. + + integer :: k + + ! ----------------------------------------------------------------------- + ! positive definite constraint + ! ----------------------------------------------------------------------- + + do k = 1, km + if (abs (a4 (3, k) - a4 (2, k)) < - a4 (4, k)) then + if ((a4 (1, k) + 0.25 * (a4 (3, k) - a4 (2, k)) ** 2 / a4 (4, k) + a4 (4, k) * r12) < 0.) then + if (a4 (1, k) < a4 (3, k) .and. a4 (1, k) < a4 (2, k)) then + a4 (3, k) = a4 (1, k) + a4 (2, k) = a4 (1, k) + a4 (4, k) = 0. + elseif (a4 (3, k) > a4 (2, k)) then + a4 (4, k) = 3. * (a4 (2, k) - a4 (1, k)) + a4 (3, k) = a4 (2, k) - a4 (4, k) + else + a4 (4, k) = 3. * (a4 (3, k) - a4 (1, k)) + a4 (2, k) = a4 (3, k) - a4 (4, k) + endif + endif + endif + enddo + +end subroutine cs_limiters + +! ======================================================================= +! calculation of vertical fall speed +! ======================================================================= + +subroutine fall_speed (ktop, kbot, den, qs, qi, qg, ql, tk, vts, vti, vtg) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in), dimension (ktop:kbot) :: den, qs, qi, qg, ql, tk + real, intent (out), dimension (ktop:kbot) :: vts, vti, vtg + + ! fall velocity constants: + + real, parameter :: thi = 1.0e-8 ! cloud ice threshold for terminal fall + real, parameter :: thg = 1.0e-8 + real, parameter :: ths = 1.0e-8 + + real, parameter :: aa = - 4.14122e-5 + real, parameter :: bb = - 0.00538922 + real, parameter :: cc = - 0.0516344 + real, parameter :: dd = 0.00216078 + real, parameter :: ee = 1.9714 + + ! marshall - palmer constants + + real, parameter :: vcons = 6.6280504 + real, parameter :: vcong = 87.2382675 + real, parameter :: norms = 942477796.076938 + real, parameter :: normg = 5026548245.74367 + + real, dimension (ktop:kbot) :: qden, tc, rhof + + real :: vi0 + + integer :: k + + ! ----------------------------------------------------------------------- + ! marshall - palmer formula + ! ----------------------------------------------------------------------- + + ! ----------------------------------------------------------------------- + ! try the local air density -- for global model; the true value could be + ! much smaller than sfcrho over high mountains + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + rhof (k) = sqrt (min (10., sfcrho / den (k))) + enddo + + ! ----------------------------------------------------------------------- + ! ice: + ! ----------------------------------------------------------------------- + + if (const_vi) then + vti (:) = vi_fac + else + ! ----------------------------------------------------------------------- + ! use deng and mace (2008, grl), which gives smaller fall speed than hd90 formula + ! ----------------------------------------------------------------------- + vi0 = 0.01 * vi_fac + do k = ktop, kbot + if (qi (k) < thi) then ! this is needed as the fall - speed maybe problematic for small qi + vti (k) = vf_min + else + tc (k) = tk (k) - tice + vti (k) = (3. + log10 (qi (k) * den (k))) * (tc (k) * (aa * tc (k) + bb) + cc) + dd * tc (k) + ee + vti (k) = vi0 * exp (log_10 * vti (k)) + vti (k) = min (vi_max, max (vf_min, vti (k))) + endif + enddo + endif + + ! ----------------------------------------------------------------------- + ! snow: + ! ----------------------------------------------------------------------- + + if (const_vs) then + vts (:) = vs_fac ! 1. ifs_2016 + else + do k = ktop, kbot + if (qs (k) < ths) then + vts (k) = vf_min + else + vts (k) = vs_fac * vcons * rhof (k) * exp (0.0625 * log (qs (k) * den (k) / norms)) + vts (k) = min (vs_max, max (vf_min, vts (k))) + endif + enddo + endif + + ! ----------------------------------------------------------------------- + ! graupel: + ! ----------------------------------------------------------------------- + + if (const_vg) then + vtg (:) = vg_fac ! 2. + else + do k = ktop, kbot + if (qg (k) < thg) then + vtg (k) = vf_min + else + vtg (k) = vg_fac * vcong * rhof (k) * sqrt (sqrt (sqrt (qg (k) * den (k) / normg))) + vtg (k) = min (vg_max, max (vf_min, vtg (k))) + endif + enddo + endif + +end subroutine fall_speed + +! ======================================================================= +! setup gfdl cloud microphysics parameters +! ======================================================================= + +subroutine setupm + + implicit none + + real :: gcon, cd, scm3, pisq, act (8) + real :: vdifu, tcond + real :: visk + real :: ch2o, hltf + real :: hlts, hltc, ri50 + + real, parameter :: gam263 = 1.456943, gam275 = 1.608355, gam290 = 1.827363, & + gam325 = 2.54925, gam350 = 3.323363, gam380 = 4.694155, & + gam425 = 8.285063, gam450 = 11.631769, gam480 = 17.837789, & + gam625 = 184.860962, gam680 = 496.604067 + + ! intercept parameters + + real, parameter :: rnzr = 8.0e6 ! lin83 + real, parameter :: rnzs = 3.0e6 ! lin83 + real, parameter :: rnzg = 4.0e6 ! rh84 + + ! density parameters + + real, parameter :: rhos = 0.1e3 ! lin83 (snow density; 1 / 10 of water) + real, parameter :: rhog = 0.4e3 ! rh84 (graupel density) + real, parameter :: acc (3) = (/ 5.0, 2.0, 0.5 /) + + real den_rc + + integer :: i, k + + pie = 4. * atan (1.0) + + ! s. klein's formular (eq 16) from am2 + + fac_rc = (4. / 3.) * pie * rhor * rthresh ** 3 + + if (prog_ccn) then + ! if (master) write (*, *) 'prog_ccn option is .t.' + else + den_rc = fac_rc * ccn_o * 1.e6 + ! if (master) write (*, *) 'mp: for ccn_o = ', ccn_o, 'ql_rc = ', den_rc + den_rc = fac_rc * ccn_l * 1.e6 + ! if (master) write (*, *) 'mp: for ccn_l = ', ccn_l, 'ql_rc = ', den_rc + endif + + vdifu = 2.11e-5 + tcond = 2.36e-2 + + visk = 1.259e-5 + hlts = 2.8336e6 + hltc = 2.5e6 + hltf = 3.336e5 + + ch2o = 4.1855e3 + ri50 = 1.e-4 + + pisq = pie * pie + scm3 = (visk / vdifu) ** (1. / 3.) + + cracs = pisq * rnzr * rnzs * rhos + csacr = pisq * rnzr * rnzs * rhor + cgacr = pisq * rnzr * rnzg * rhor + cgacs = pisq * rnzg * rnzs * rhos + cgacs = cgacs * c_pgacs + + ! act: 1 - 2:racs (s - r) ; 3 - 4:sacr (r - s) ; + ! 5 - 6:gacr (r - g) ; 7 - 8:gacs (s - g) + + act (1) = pie * rnzs * rhos + act (2) = pie * rnzr * rhor + act (6) = pie * rnzg * rhog + act (3) = act (2) + act (4) = act (1) + act (5) = act (2) + act (7) = act (1) + act (8) = act (6) + + do i = 1, 3 + do k = 1, 4 + acco (i, k) = acc (i) / (act (2 * k - 1) ** ((7 - i) * 0.25) * act (2 * k) ** (i * 0.25)) + enddo + enddo + + gcon = 40.74 * sqrt (sfcrho) ! 44.628 + + csacw = pie * rnzs * clin * gam325 / (4. * act (1) ** 0.8125) + ! decreasing csacw to reduce cloud water --- > snow + + craci = pie * rnzr * alin * gam380 / (4. * act (2) ** 0.95) + csaci = csacw * c_psaci + + cgacw = pie * rnzg * gam350 * gcon / (4. * act (6) ** 0.875) + ! cgaci = cgacw * 0.1 + + ! sjl, may 28, 2012 + cgaci = cgacw * 0.05 + ! sjl, may 28, 2012 + + cracw = craci ! cracw = 3.27206196043822 + cracw = c_cracw * cracw + + ! subl and revp: five constants for three separate processes + + cssub (1) = 2. * pie * vdifu * tcond * rvgas * rnzs + cgsub (1) = 2. * pie * vdifu * tcond * rvgas * rnzg + crevp (1) = 2. * pie * vdifu * tcond * rvgas * rnzr + cssub (2) = 0.78 / sqrt (act (1)) + cgsub (2) = 0.78 / sqrt (act (6)) + crevp (2) = 0.78 / sqrt (act (2)) + cssub (3) = 0.31 * scm3 * gam263 * sqrt (clin / visk) / act (1) ** 0.65625 + cgsub (3) = 0.31 * scm3 * gam275 * sqrt (gcon / visk) / act (6) ** 0.6875 + crevp (3) = 0.31 * scm3 * gam290 * sqrt (alin / visk) / act (2) ** 0.725 + cssub (4) = tcond * rvgas + cssub (5) = hlts ** 2 * vdifu + cgsub (4) = cssub (4) + crevp (4) = cssub (4) + cgsub (5) = cssub (5) + crevp (5) = hltc ** 2 * vdifu + + cgfr (1) = 20.e2 * pisq * rnzr * rhor / act (2) ** 1.75 + cgfr (2) = 0.66 + + ! smlt: five constants (lin et al. 1983) + + csmlt (1) = 2. * pie * tcond * rnzs / hltf + csmlt (2) = 2. * pie * vdifu * rnzs * hltc / hltf + csmlt (3) = cssub (2) + csmlt (4) = cssub (3) + csmlt (5) = ch2o / hltf + + ! gmlt: five constants + + cgmlt (1) = 2. * pie * tcond * rnzg / hltf + cgmlt (2) = 2. * pie * vdifu * rnzg * hltc / hltf + cgmlt (3) = cgsub (2) + cgmlt (4) = cgsub (3) + cgmlt (5) = ch2o / hltf + + es0 = 6.107799961e2 ! ~6.1 mb + ces0 = eps * es0 + +end subroutine setupm + +! ======================================================================= +! initialization of gfdl cloud microphysics +! ======================================================================= + +!subroutine gfdl_cloud_microphys_init (id, jd, kd, axes, time) +subroutine gfdl_cloud_microphys_init (me, master, nlunit, input_nml_file, logunit, fn_nml) + + implicit none + + integer, intent (in) :: me + integer, intent (in) :: master + integer, intent (in) :: nlunit + integer, intent (in) :: logunit + + character (len = 64), intent (in) :: fn_nml + character (len = *), intent (in) :: input_nml_file (:) + + integer :: ios + logical :: exists + + ! integer, intent (in) :: id, jd, kd + ! integer, intent (in) :: axes (4) + ! type (time_type), intent (in) :: time + + ! integer :: unit, io, ierr, k, logunit + ! logical :: flag + ! real :: tmp, q1, q2 + + ! master = (mpp_pe () .eq.mpp_root_pe ()) + + !#ifdef internal_file_nml + ! read (input_nml_file, nml = gfdl_cloud_microphys_nml, iostat = io) + ! ierr = check_nml_error (io, 'gfdl_cloud_microphys_nml') + !#else + ! if (file_exist ('input.nml')) then + ! unit = open_namelist_file () + ! io = 1 + ! do while (io .ne. 0) + ! read (unit, nml = gfdl_cloud_microphys_nml, iostat = io, end = 10) + ! ierr = check_nml_error (io, 'gfdl_cloud_microphys_nml') + ! enddo + !10 call close_file (unit) + ! endif + !#endif + ! call write_version_number ('gfdl_cloud_microphys_mod', version) + ! logunit = stdlog () + +#ifdef INTERNAL_FILE_NML + read (input_nml_file, nml = gfdl_cloud_microphysics_nml) +#else + inquire (file = trim (fn_nml), exist = exists) + if (.not. exists) then + write (6, *) 'gfdl - mp :: namelist file: ', trim (fn_nml), ' does not exist' + stop + else + open (unit = nlunit, file = fn_nml, readonly, status = 'old', iostat = ios) + endif + rewind (nlunit) + read (nlunit, nml = gfdl_cloud_microphysics_nml) + close (nlunit) +#endif + + ! write version number and namelist to log file + + if (me == master) then + write (logunit, *) " ================================================================== " + write (logunit, *) "gfdl_cloud_microphys_mod" + write (logunit, nml = gfdl_cloud_microphysics_nml) + endif + + if (do_setup) then + call setup_con + call setupm + do_setup = .false. + endif + + log_10 = log (10.) + + tice0 = tice - 0.01 + t_wfr = tice - 40.0 ! supercooled water can exist down to - 48 c, which is the "absolute" + + ! if (master) write (logunit, nml = gfdl_cloud_microphys_nml) + ! + ! id_vtr = register_diag_field (mod_name, 'vt_r', axes (1:3), time, & + ! 'rain fall speed', 'm / s', missing_value = missing_value) + ! id_vts = register_diag_field (mod_name, 'vt_s', axes (1:3), time, & + ! 'snow fall speed', 'm / s', missing_value = missing_value) + ! id_vtg = register_diag_field (mod_name, 'vt_g', axes (1:3), time, & + ! 'graupel fall speed', 'm / s', missing_value = missing_value) + ! id_vti = register_diag_field (mod_name, 'vt_i', axes (1:3), time, & + ! 'ice fall speed', 'm / s', missing_value = missing_value) + + ! id_droplets = register_diag_field (mod_name, 'droplets', axes (1:3), time, & + ! 'droplet number concentration', '# / m3', missing_value = missing_value) + ! id_rh = register_diag_field (mod_name, 'rh_lin', axes (1:2), time, & + ! 'relative humidity', 'n / a', missing_value = missing_value) + + ! id_rain = register_diag_field (mod_name, 'rain_lin', axes (1:2), time, & + ! 'rain_lin', 'mm / day', missing_value = missing_value) + ! id_snow = register_diag_field (mod_name, 'snow_lin', axes (1:2), time, & + ! 'snow_lin', 'mm / day', missing_value = missing_value) + ! id_graupel = register_diag_field (mod_name, 'graupel_lin', axes (1:2), time, & + ! 'graupel_lin', 'mm / day', missing_value = missing_value) + ! id_ice = register_diag_field (mod_name, 'ice_lin', axes (1:2), time, & + ! 'ice_lin', 'mm / day', missing_value = missing_value) + ! id_prec = register_diag_field (mod_name, 'prec_lin', axes (1:2), time, & + ! 'prec_lin', 'mm / day', missing_value = missing_value) + + ! if (master) write (*, *) 'prec_lin diagnostics initialized.', id_prec + + ! id_cond = register_diag_field (mod_name, 'cond_lin', axes (1:2), time, & + ! 'total condensate', 'kg / m ** 2', missing_value = missing_value) + ! id_var = register_diag_field (mod_name, 'var_lin', axes (1:2), time, & + ! 'subgrid variance', 'n / a', missing_value = missing_value) + + ! call qsmith_init + + ! testing the water vapor tables + + ! if (mp_debug .and. master) then + ! write (*, *) 'testing water vapor tables in gfdl_cloud_microphys' + ! tmp = tice - 90. + ! do k = 1, 25 + ! q1 = wqsat_moist (tmp, 0., 1.e5) + ! q2 = qs1d_m (tmp, 0., 1.e5) + ! write (*, *) nint (tmp - tice), q1, q2, 'dq = ', q1 - q2 + ! tmp = tmp + 5. + ! enddo + ! endif + + ! if (master) write (*, *) 'gfdl_cloud_micrphys diagnostics initialized.' + + ! gfdl_mp_clock = mpp_clock_id ('gfdl_cloud_microphys', grain = clock_routine) + + module_is_initialized = .true. + +end subroutine gfdl_cloud_microphys_init + +! ======================================================================= +! end of gfdl cloud microphysics +! ======================================================================= + +subroutine gfdl_cloud_microphys_end + + implicit none + + deallocate (table) + deallocate (table2) + deallocate (table3) + deallocate (tablew) + deallocate (des) + deallocate (des2) + deallocate (des3) + deallocate (desw) + + tables_are_initialized = .false. + +end subroutine gfdl_cloud_microphys_end + +! ======================================================================= +! qsmith table initialization +! ======================================================================= + +subroutine setup_con + + implicit none + + ! master = (mpp_pe () .eq.mpp_root_pe ()) + + rgrav = 1. / grav + + if (.not. qsmith_tables_initialized) call qsmith_init + + qsmith_tables_initialized = .true. + +end subroutine setup_con + +! ======================================================================= +! accretion function (lin et al. 1983) +! ======================================================================= + +real function acr3d (v1, v2, q1, q2, c, cac, rho) + + implicit none + + real, intent (in) :: v1, v2, c, rho + real, intent (in) :: q1, q2 ! mixing ratio!!! + real, intent (in) :: cac (3) + + real :: t1, s1, s2 + + ! integer :: k + ! + ! real :: a + ! + ! a = 0.0 + ! do k = 1, 3 + ! a = a + cac (k) * ((q1 * rho) ** ((7 - k) * 0.25) * (q2 * rho) ** (k * 0.25)) + ! enddo + ! acr3d = c * abs (v1 - v2) * a / rho + + ! optimized + + t1 = sqrt (q1 * rho) + s1 = sqrt (q2 * rho) + s2 = sqrt (s1) ! s1 = s2 ** 2 + acr3d = c * abs (v1 - v2) * q1 * s2 * (cac (1) * t1 + cac (2) * sqrt (t1) * s2 + cac (3) * s1) + +end function acr3d + +! ======================================================================= +! melting of snow function (lin et al. 1983) +! note: psacw and psacr must be calc before smlt is called +! ======================================================================= + +real function smlt (tc, dqs, qsrho, psacw, psacr, c, rho, rhofac) + + implicit none + + real, intent (in) :: tc, dqs, qsrho, psacw, psacr, c (5), rho, rhofac + + smlt = (c (1) * tc / rho - c (2) * dqs) * (c (3) * sqrt (qsrho) + & + c (4) * qsrho ** 0.65625 * sqrt (rhofac)) + c (5) * tc * (psacw + psacr) + +end function smlt + +! ======================================================================= +! melting of graupel function (lin et al. 1983) +! note: pgacw and pgacr must be calc before gmlt is called +! ======================================================================= + +real function gmlt (tc, dqs, qgrho, pgacw, pgacr, c, rho) + + implicit none + + real, intent (in) :: tc, dqs, qgrho, pgacw, pgacr, c (5), rho + + gmlt = (c (1) * tc / rho - c (2) * dqs) * (c (3) * sqrt (qgrho) + & + c (4) * qgrho ** 0.6875 / rho ** 0.25) + c (5) * tc * (pgacw + pgacr) + +end function gmlt + +! ======================================================================= +! initialization +! prepare saturation water vapor pressure tables +! ======================================================================= + +subroutine qsmith_init + + implicit none + + integer, parameter :: length = 2621 + + integer :: i + + if (.not. tables_are_initialized) then + + ! master = (mpp_pe () .eq. mpp_root_pe ()) + ! if (master) print *, ' gfdl mp: initializing qs tables' + + ! debug code + ! print *, mpp_pe (), allocated (table), allocated (table2), & + ! allocated (table3), allocated (tablew), allocated (des), & + ! allocated (des2), allocated (des3), allocated (desw) + ! end debug code + + ! generate es table (dt = 0.1 deg. c) + + allocate (table (length)) + allocate (table2 (length)) + allocate (table3 (length)) + allocate (tablew (length)) + allocate (des (length)) + allocate (des2 (length)) + allocate (des3 (length)) + allocate (desw (length)) + + call qs_table (length) + call qs_table2 (length) + call qs_table3 (length) + call qs_tablew (length) + + do i = 1, length - 1 + des (i) = max (0., table (i + 1) - table (i)) + des2 (i) = max (0., table2 (i + 1) - table2 (i)) + des3 (i) = max (0., table3 (i + 1) - table3 (i)) + desw (i) = max (0., tablew (i + 1) - tablew (i)) + enddo + des (length) = des (length - 1) + des2 (length) = des2 (length - 1) + des3 (length) = des3 (length - 1) + desw (length) = desw (length - 1) + + tables_are_initialized = .true. + + endif + +end subroutine qsmith_init + +! ======================================================================= +! compute the saturated specific humidity for table ii +! ======================================================================= + +real function wqs1 (ta, den) + + implicit none + + ! pure water phase; universal dry / moist formular using air density + ! input "den" can be either dry or moist air density + + real, intent (in) :: ta, den + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = tablew (it) + (ap1 - it) * desw (it) + wqs1 = es / (rvgas * ta * den) + +end function wqs1 + +! ======================================================================= +! compute the gradient of saturated specific humidity for table ii +! ======================================================================= + +real function wqs2 (ta, den, dqdt) + + implicit none + + ! pure water phase; universal dry / moist formular using air density + ! input "den" can be either dry or moist air density + + real, intent (in) :: ta, den + + real, intent (out) :: dqdt + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + + if (.not. tables_are_initialized) call qsmith_init + + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = tablew (it) + (ap1 - it) * desw (it) + wqs2 = es / (rvgas * ta * den) + it = ap1 - 0.5 + ! finite diff, del_t = 0.1: + dqdt = 10. * (desw (it) + (ap1 - it) * (desw (it + 1) - desw (it))) / (rvgas * ta * den) + +end function wqs2 + +! ======================================================================= +! compute wet buld temperature +! ======================================================================= + +real function wet_bulb (q, t, den) + + implicit none + + real, intent (in) :: t, q, den + + real :: qs, tp, dqdt + + wet_bulb = t + qs = wqs2 (wet_bulb, den, dqdt) + tp = 0.5 * (qs - q) / (1. + lcp * dqdt) * lcp + wet_bulb = wet_bulb - tp + + ! tp is negative if super - saturated + if (tp > 0.01) then + qs = wqs2 (wet_bulb, den, dqdt) + tp = (qs - q) / (1. + lcp * dqdt) * lcp + wet_bulb = wet_bulb - tp + endif + +end function wet_bulb + +! ======================================================================= +! compute the saturated specific humidity for table iii +! ======================================================================= + +real function iqs1 (ta, den) + + implicit none + + ! water - ice phase; universal dry / moist formular using air density + ! input "den" can be either dry or moist air density + + real, intent (in) :: ta, den + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = table2 (it) + (ap1 - it) * des2 (it) + iqs1 = es / (rvgas * ta * den) + +end function iqs1 + +! ======================================================================= +! compute the gradient of saturated specific humidity for table iii +! ======================================================================= + +real function iqs2 (ta, den, dqdt) + + implicit none + + ! water - ice phase; universal dry / moist formular using air density + ! input "den" can be either dry or moist air density + + real, intent (in) :: ta, den + + real, intent (out) :: dqdt + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = table2 (it) + (ap1 - it) * des2 (it) + iqs2 = es / (rvgas * ta * den) + it = ap1 - 0.5 + dqdt = 10. * (des2 (it) + (ap1 - it) * (des2 (it + 1) - des2 (it))) / (rvgas * ta * den) + +end function iqs2 + +! ======================================================================= +! compute the gradient of saturated specific humidity for table iii +! ======================================================================= + +real function qs1d_moist (ta, qv, pa, dqdt) + + implicit none + + real, intent (in) :: ta, pa, qv + + real, intent (out) :: dqdt + + real :: es, ap1, tmin, eps10 + + integer :: it + + tmin = table_ice - 160. + eps10 = 10. * eps + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = table2 (it) + (ap1 - it) * des2 (it) + qs1d_moist = eps * es * (1. + zvir * qv) / pa + it = ap1 - 0.5 + dqdt = eps10 * (des2 (it) + (ap1 - it) * (des2 (it + 1) - des2 (it))) * (1. + zvir * qv) / pa + +end function qs1d_moist + +! ======================================================================= +! compute the gradient of saturated specific humidity for table ii +! ======================================================================= + +real function wqsat2_moist (ta, qv, pa, dqdt) + + implicit none + + real, intent (in) :: ta, pa, qv + + real, intent (out) :: dqdt + + real :: es, ap1, tmin, eps10 + + integer :: it + + tmin = table_ice - 160. + eps10 = 10. * eps + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = tablew (it) + (ap1 - it) * desw (it) + wqsat2_moist = eps * es * (1. + zvir * qv) / pa + it = ap1 - 0.5 + dqdt = eps10 * (desw (it) + (ap1 - it) * (desw (it + 1) - desw (it))) * (1. + zvir * qv) / pa + +end function wqsat2_moist + +! ======================================================================= +! compute the saturated specific humidity for table ii +! ======================================================================= + +real function wqsat_moist (ta, qv, pa) + + implicit none + + real, intent (in) :: ta, pa, qv + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = tablew (it) + (ap1 - it) * desw (it) + wqsat_moist = eps * es * (1. + zvir * qv) / pa + +end function wqsat_moist + +! ======================================================================= +! compute the saturated specific humidity for table iii +! ======================================================================= + +real function qs1d_m (ta, qv, pa) + + implicit none + + real, intent (in) :: ta, pa, qv + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = table2 (it) + (ap1 - it) * des2 (it) + qs1d_m = eps * es * (1. + zvir * qv) / pa + +end function qs1d_m + +! ======================================================================= +! computes the difference in saturation vapor * density * between water and ice +! ======================================================================= + +real function d_sat (ta, den) + + implicit none + + real, intent (in) :: ta, den + + real :: es_w, es_i, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es_w = tablew (it) + (ap1 - it) * desw (it) + es_i = table2 (it) + (ap1 - it) * des2 (it) + d_sat = dim (es_w, es_i) / (rvgas * ta * den) ! take positive difference + +end function d_sat + +! ======================================================================= +! compute the saturated water vapor pressure for table ii +! ======================================================================= + +real function esw_table (ta) + + implicit none + + real, intent (in) :: ta + + real :: ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + esw_table = tablew (it) + (ap1 - it) * desw (it) + +end function esw_table + +! ======================================================================= +! compute the saturated water vapor pressure for table iii +! ======================================================================= + +real function es2_table (ta) + + implicit none + + real, intent (in) :: ta + + real :: ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (ta, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es2_table = table2 (it) + (ap1 - it) * des2 (it) + +end function es2_table + +! ======================================================================= +! compute the saturated water vapor pressure for table ii +! ======================================================================= + +subroutine esw_table1d (ta, es, n) + + implicit none + + integer, intent (in) :: n + + real, intent (in) :: ta (n) + + real, intent (out) :: es (n) + + real :: ap1, tmin + + integer :: i, it + + tmin = table_ice - 160. + + do i = 1, n + ap1 = 10. * dim (ta (i), tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es (i) = tablew (it) + (ap1 - it) * desw (it) + enddo + +end subroutine esw_table1d + +! ======================================================================= +! compute the saturated water vapor pressure for table iii +! ======================================================================= + +subroutine es2_table1d (ta, es, n) + + implicit none + + integer, intent (in) :: n + + real, intent (in) :: ta (n) + + real, intent (out) :: es (n) + + real :: ap1, tmin + + integer :: i, it + + tmin = table_ice - 160. + + do i = 1, n + ap1 = 10. * dim (ta (i), tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es (i) = table2 (it) + (ap1 - it) * des2 (it) + enddo + +end subroutine es2_table1d + +! ======================================================================= +! compute the saturated water vapor pressure for table iv +! ======================================================================= + +subroutine es3_table1d (ta, es, n) + + implicit none + + integer, intent (in) :: n + + real, intent (in) :: ta (n) + + real, intent (out) :: es (n) + + real :: ap1, tmin + + integer :: i, it + + tmin = table_ice - 160. + + do i = 1, n + ap1 = 10. * dim (ta (i), tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es (i) = table3 (it) + (ap1 - it) * des3 (it) + enddo + +end subroutine es3_table1d + +! ======================================================================= +! saturation water vapor pressure table ii +! 1 - phase table +! ======================================================================= + +subroutine qs_tablew (n) + + implicit none + + integer, intent (in) :: n + + real :: delt = 0.1 + real :: tmin, tem, fac0, fac1, fac2 + + integer :: i + + tmin = table_ice - 160. + + ! ----------------------------------------------------------------------- + ! compute es over water + ! ----------------------------------------------------------------------- + + do i = 1, n + tem = tmin + delt * real (i - 1) + fac0 = (tem - t_ice) / (tem * t_ice) + fac1 = fac0 * lv0 + fac2 = (dc_vap * log (tem / t_ice) + fac1) / rvgas + tablew (i) = e00 * exp (fac2) + enddo + +end subroutine qs_tablew + +! ======================================================================= +! saturation water vapor pressure table iii +! 2 - phase table +! ======================================================================= + +subroutine qs_table2 (n) + + implicit none + + integer, intent (in) :: n + + real :: delt = 0.1 + real :: tmin, tem0, tem1, fac0, fac1, fac2 + + integer :: i, i0, i1 + + tmin = table_ice - 160. + + do i = 1, n + tem0 = tmin + delt * real (i - 1) + fac0 = (tem0 - t_ice) / (tem0 * t_ice) + if (i <= 1600) then + ! ----------------------------------------------------------------------- + ! compute es over ice between - 160 deg c and 0 deg c. + ! ----------------------------------------------------------------------- + fac1 = fac0 * li2 + fac2 = (d2ice * log (tem0 / t_ice) + fac1) / rvgas + else + ! ----------------------------------------------------------------------- + ! compute es over water between 0 deg c and 102 deg c. + ! ----------------------------------------------------------------------- + fac1 = fac0 * lv0 + fac2 = (dc_vap * log (tem0 / t_ice) + fac1) / rvgas + endif + table2 (i) = e00 * exp (fac2) + enddo + + ! ----------------------------------------------------------------------- + ! smoother around 0 deg c + ! ----------------------------------------------------------------------- + + i0 = 1600 + i1 = 1601 + tem0 = 0.25 * (table2 (i0 - 1) + 2. * table (i0) + table2 (i0 + 1)) + tem1 = 0.25 * (table2 (i1 - 1) + 2. * table (i1) + table2 (i1 + 1)) + table2 (i0) = tem0 + table2 (i1) = tem1 + +end subroutine qs_table2 + +! ======================================================================= +! saturation water vapor pressure table iv +! 2 - phase table with " - 2 c" as the transition point +! ======================================================================= + +subroutine qs_table3 (n) + + implicit none + + integer, intent (in) :: n + + real :: delt = 0.1 + real :: esbasw, tbasw, esbasi, tmin, tem, aa, b, c, d, e + real :: tem0, tem1 + + integer :: i, i0, i1 + + esbasw = 1013246.0 + tbasw = table_ice + 100. + esbasi = 6107.1 + tmin = table_ice - 160. + + do i = 1, n + tem = tmin + delt * real (i - 1) + ! if (i <= 1600) then + if (i <= 1580) then ! change to - 2 c + ! ----------------------------------------------------------------------- + ! compute es over ice between - 160 deg c and 0 deg c. + ! see smithsonian meteorological tables page 350. + ! ----------------------------------------------------------------------- + aa = - 9.09718 * (table_ice / tem - 1.) + b = - 3.56654 * alog10 (table_ice / tem) + c = 0.876793 * (1. - tem / table_ice) + e = alog10 (esbasi) + table3 (i) = 0.1 * 10 ** (aa + b + c + e) + else + ! ----------------------------------------------------------------------- + ! compute es over water between - 2 deg c and 102 deg c. + ! see smithsonian meteorological tables page 350. + ! ----------------------------------------------------------------------- + aa = - 7.90298 * (tbasw / tem - 1.) + b = 5.02808 * alog10 (tbasw / tem) + c = - 1.3816e-7 * (10 ** ((1. - tem / tbasw) * 11.344) - 1.) + d = 8.1328e-3 * (10 ** ((tbasw / tem - 1.) * (- 3.49149)) - 1.) + e = alog10 (esbasw) + table3 (i) = 0.1 * 10 ** (aa + b + c + d + e) + endif + enddo + + ! ----------------------------------------------------------------------- + ! smoother around - 2 deg c + ! ----------------------------------------------------------------------- + + i0 = 1580 + i1 = 1581 + tem0 = 0.25 * (table3 (i0 - 1) + 2. * table (i0) + table3 (i0 + 1)) + tem1 = 0.25 * (table3 (i1 - 1) + 2. * table (i1) + table3 (i1 + 1)) + table3 (i0) = tem0 + table3 (i1) = tem1 + +end subroutine qs_table3 + +! ======================================================================= +! compute the saturated specific humidity for table +! note: this routine is based on "moist" mixing ratio +! ======================================================================= + +real function qs_blend (t, p, q) + + implicit none + + real, intent (in) :: t, p, q + + real :: es, ap1, tmin + + integer :: it + + tmin = table_ice - 160. + ap1 = 10. * dim (t, tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es = table (it) + (ap1 - it) * des (it) + qs_blend = eps * es * (1. + zvir * q) / p + +end function qs_blend + +! ======================================================================= +! saturation water vapor pressure table i +! 3 - phase table +! ======================================================================= + +subroutine qs_table (n) + + implicit none + + integer, intent (in) :: n + + real :: delt = 0.1 + real :: tmin, tem, esh20 + real :: wice, wh2o, fac0, fac1, fac2 + real :: esupc (200) + + integer :: i + + tmin = table_ice - 160. + + ! ----------------------------------------------------------------------- + ! compute es over ice between - 160 deg c and 0 deg c. + ! ----------------------------------------------------------------------- + + do i = 1, 1600 + tem = tmin + delt * real (i - 1) + fac0 = (tem - t_ice) / (tem * t_ice) + fac1 = fac0 * li2 + fac2 = (d2ice * log (tem / t_ice) + fac1) / rvgas + table (i) = e00 * exp (fac2) + enddo + + ! ----------------------------------------------------------------------- + ! compute es over water between - 20 deg c and 102 deg c. + ! ----------------------------------------------------------------------- + + do i = 1, 1221 + tem = 253.16 + delt * real (i - 1) + fac0 = (tem - t_ice) / (tem * t_ice) + fac1 = fac0 * lv0 + fac2 = (dc_vap * log (tem / t_ice) + fac1) / rvgas + esh20 = e00 * exp (fac2) + if (i <= 200) then + esupc (i) = esh20 + else + table (i + 1400) = esh20 + endif + enddo + + ! ----------------------------------------------------------------------- + ! derive blended es over ice and supercooled water between - 20 deg c and 0 deg c + ! ----------------------------------------------------------------------- + + do i = 1, 200 + tem = 253.16 + delt * real (i - 1) + wice = 0.05 * (table_ice - tem) + wh2o = 0.05 * (tem - 253.16) + table (i + 1400) = wice * table (i + 1400) + wh2o * esupc (i) + enddo + +end subroutine qs_table + +! ======================================================================= +! compute the saturated specific humidity and the gradient of saturated specific humidity +! input t in deg k, p in pa; p = rho rdry tv, moist pressure +! ======================================================================= + +subroutine qsmith (im, km, ks, t, p, q, qs, dqdt) + + implicit none + + integer, intent (in) :: im, km, ks + + real, intent (in), dimension (im, km) :: t, p, q + + real, intent (out), dimension (im, km) :: qs + + real, intent (out), dimension (im, km), optional :: dqdt + + real :: eps10, ap1, tmin + + real, dimension (im, km) :: es + + integer :: i, k, it + + tmin = table_ice - 160. + eps10 = 10. * eps + + if (.not. tables_are_initialized) then + call qsmith_init + endif + + do k = ks, km + do i = 1, im + ap1 = 10. * dim (t (i, k), tmin) + 1. + ap1 = min (2621., ap1) + it = ap1 + es (i, k) = table (it) + (ap1 - it) * des (it) + qs (i, k) = eps * es (i, k) * (1. + zvir * q (i, k)) / p (i, k) + enddo + enddo + + if (present (dqdt)) then + do k = ks, km + do i = 1, im + ap1 = 10. * dim (t (i, k), tmin) + 1. + ap1 = min (2621., ap1) - 0.5 + it = ap1 + dqdt (i, k) = eps10 * (des (it) + (ap1 - it) * (des (it + 1) - des (it))) * (1. + zvir * q (i, k)) / p (i, k) + enddo + enddo + endif + +end subroutine qsmith + +! ======================================================================= +! fix negative water species +! this is designed for 6 - class micro - physics schemes +! ======================================================================= + +subroutine neg_adj (ktop, kbot, pt, dp, qv, ql, qr, qi, qs, qg) + + implicit none + + integer, intent (in) :: ktop, kbot + + real, intent (in), dimension (ktop:kbot) :: dp + + real, intent (inout), dimension (ktop:kbot) :: pt, qv, ql, qr, qi, qs, qg + + real, dimension (ktop:kbot) :: lcpk, icpk + + real :: dq, cvm + + integer :: k + + ! ----------------------------------------------------------------------- + ! define heat capacity and latent heat coefficient + ! ----------------------------------------------------------------------- + + do k = ktop, kbot + cvm = c_air + qv (k) * c_vap + (qr (k) + ql (k)) * c_liq + (qi (k) + qs (k) + qg (k)) * c_ice + lcpk (k) = (lv00 + d0_vap * pt (k)) / cvm + icpk (k) = (li00 + dc_ice * pt (k)) / cvm + enddo + + do k = ktop, kbot + + ! ----------------------------------------------------------------------- + ! ice phase: + ! ----------------------------------------------------------------------- + + ! if cloud ice < 0, borrow from snow + if (qi (k) < 0.) then + qs (k) = qs (k) + qi (k) + qi (k) = 0. + endif + ! if snow < 0, borrow from graupel + if (qs (k) < 0.) then + qg (k) = qg (k) + qs (k) + qs (k) = 0. + endif + ! if graupel < 0, borrow from rain + if (qg (k) < 0.) then + qr (k) = qr (k) + qg (k) + pt (k) = pt (k) - qg (k) * icpk (k) ! heating + qg (k) = 0. + endif + + ! ----------------------------------------------------------------------- + ! liquid phase: + ! ----------------------------------------------------------------------- + + ! if rain < 0, borrow from cloud water + if (qr (k) < 0.) then + ql (k) = ql (k) + qr (k) + qr (k) = 0. + endif + ! if cloud water < 0, borrow from water vapor + if (ql (k) < 0.) then + qv (k) = qv (k) + ql (k) + pt (k) = pt (k) - ql (k) * lcpk (k) ! heating + ql (k) = 0. + endif + + enddo + + ! ----------------------------------------------------------------------- + ! fix water vapor; borrow from below + ! ----------------------------------------------------------------------- + + do k = ktop, kbot - 1 + if (qv (k) < 0.) then + qv (k + 1) = qv (k + 1) + qv (k) * dp (k) / dp (k + 1) + qv (k) = 0. + endif + enddo + + ! ----------------------------------------------------------------------- + ! bottom layer; borrow from above + ! ----------------------------------------------------------------------- + + if (qv (kbot) < 0. .and. qv (kbot - 1) > 0.) then + dq = min (- qv (kbot) * dp (kbot), qv (kbot - 1) * dp (kbot - 1)) + qv (kbot - 1) = qv (kbot - 1) - dq / dp (kbot - 1) + qv (kbot) = qv (kbot) + dq / dp (kbot) + endif + +end subroutine neg_adj + +! ======================================================================= +! compute global sum +! quick local sum algorithm +! ======================================================================= + +!real function g_sum (p, ifirst, ilast, jfirst, jlast, area, mode) +! +! use mpp_mod, only: mpp_sum +! +! implicit none +! +! integer, intent (in) :: ifirst, ilast, jfirst, jlast +! integer, intent (in) :: mode ! if == 1 divided by area +! +! real, intent (in), dimension (ifirst:ilast, jfirst:jlast) :: p, area +! +! integer :: i, j +! +! real :: gsum +! +! if (global_area < 0.) then +! global_area = 0. +! do j = jfirst, jlast +! do i = ifirst, ilast +! global_area = global_area + area (i, j) +! enddo +! enddo +! call mpp_sum (global_area) +! endif +! +! gsum = 0. +! do j = jfirst, jlast +! do i = ifirst, ilast +! gsum = gsum + p (i, j) * area (i, j) +! enddo +! enddo +! call mpp_sum (gsum) +! +! if (mode == 1) then +! g_sum = gsum / global_area +! else +! g_sum = gsum +! endif +! +!end function g_sum + +! ======================================================================= +! interpolate to a prescribed height +! ======================================================================= + +subroutine interpolate_z (is, ie, js, je, km, zl, hgt, a3, a2) + + implicit none + + integer, intent (in) :: is, ie, js, je, km + + real, intent (in), dimension (is:ie, js:je, km) :: a3 + + real, intent (in), dimension (is:ie, js:je, km + 1) :: hgt ! hgt (k) > hgt (k + 1) + + real, intent (in) :: zl + + real, intent (out), dimension (is:ie, js:je) :: a2 + + real, dimension (km) :: zm ! middle layer height + + integer :: i, j, k + + !$omp parallel do default (none) shared (is, ie, js, je, km, hgt, zl, a2, a3) private (zm) + + do j = js, je + do i = is, ie + do k = 1, km + zm (k) = 0.5 * (hgt (i, j, k) + hgt (i, j, k + 1)) + enddo + if (zl >= zm (1)) then + a2 (i, j) = a3 (i, j, 1) + elseif (zl <= zm (km)) then + a2 (i, j) = a3 (i, j, km) + else + do k = 1, km - 1 + if (zl <= zm (k) .and. zl >= zm (k + 1)) then + a2 (i, j) = a3 (i, j, k) + (a3 (i, j, k + 1) - a3 (i, j, k)) * (zm (k) - zl) / (zm (k) - zm (k + 1)) + exit + endif + enddo + endif + enddo + enddo + +end subroutine interpolate_z + +! ======================================================================= +! radius of cloud species diagnosis +! ======================================================================= + +subroutine cloud_diagnosis (is, ie, js, je, den, qw, qi, qr, qs, qg, t, & + qcw, qci, qcr, qcs, qcg, rew, rei, rer, res, reg) + + implicit none + + integer, intent (in) :: is, ie, js, je + + real, intent (in), dimension (is:ie, js:je) :: den, t + real, intent (in), dimension (is:ie, js:je) :: qw, qi, qr, qs, qg ! units: kg / kg + + real, intent (out), dimension (is:ie, js:je) :: qcw, qci, qcr, qcs, qcg ! units: kg / m^3 + real, intent (out), dimension (is:ie, js:je) :: rew, rei, rer, res, reg ! units: micron + + integer :: i, j + + real :: lambdar, lambdas, lambdag + + real :: rhow = 1.0e3, rhor = 1.0e3, rhos = 1.0e2, rhog = 4.0e2 + real :: n0r = 8.0e6, n0s = 3.0e6, n0g = 4.0e6 + real :: alphar = 0.8, alphas = 0.25, alphag = 0.5 + real :: gammar = 17.837789, gammas = 8.2850630, gammag = 11.631769 + real :: qmin = 1.0e-5, ccn = 1.0e8, beta = 1.22 + + ! real :: rewmin = 1.0, rewmax = 25.0 + ! real :: reimin = 10.0, reimax = 300.0 + ! real :: rermin = 25.0, rermax = 225.0 + ! real :: resmin = 300, resmax = 1000.0 + ! real :: regmin = 1000.0, regmax = 1.0e5 + real :: rewmin = 5.0, rewmax = 10.0 + real :: reimin = 10.0, reimax = 150.0 + real :: rermin = 0.0, rermax = 10000.0 + real :: resmin = 0.0, resmax = 10000.0 + real :: regmin = 0.0, regmax = 10000.0 + + do j = js, je + do i = is, ie + + ! ----------------------------------------------------------------------- + ! cloud water (martin et al., 1994) + ! ----------------------------------------------------------------------- + + if (qw (i, j) .gt. qmin) then + qcw (i, j) = den (i, j) * qw (i, j) + rew (i, j) = exp (1.0 / 3.0 * log ((3 * qcw (i, j)) / (4 * pi * rhow * ccn))) * 1.0e6 + rew (i, j) = max (rewmin, min (rewmax, rew (i, j))) + else + qcw (i, j) = 0.0 + rew (i, j) = rewmin + endif + + ! ----------------------------------------------------------------------- + ! cloud ice (heymsfield and mcfarquhar, 1996) + ! ----------------------------------------------------------------------- + + if (qi (i, j) .gt. qmin) then + qci (i, j) = den (i, j) * qi (i, j) + if (t (i, j) - tice .lt. - 50) then + rei (i, j) = beta / 9.917 * exp ((1 - 0.891) * log (1.0e3 * qci (i, j))) * 1.0e3 + elseif (t (i, j) - tice .lt. - 40) then + rei (i, j) = beta / 9.337 * exp ((1 - 0.920) * log (1.0e3 * qci (i, j))) * 1.0e3 + elseif (t (i, j) - tice .lt. - 30) then + rei (i, j) = beta / 9.208 * exp ((1 - 0.945) * log (1.0e3 * qci (i, j))) * 1.0e3 + else + rei (i, j) = beta / 9.387 * exp ((1 - 0.969) * log (1.0e3 * qci (i, j))) * 1.0e3 + endif + rei (i, j) = max (reimin, min (reimax, rei (i, j))) + else + qci (i, j) = 0.0 + rei (i, j) = reimin + endif + + ! ----------------------------------------------------------------------- + ! rain (lin et al., 1983) + ! ----------------------------------------------------------------------- + + if (qr (i, j) .gt. qmin) then + qcr (i, j) = den (i, j) * qr (i, j) + lambdar = exp (0.25 * log (pi * rhor * n0r / qcr (i, j))) + rer (i, j) = 0.5 * exp (log (gammar / 6) / alphar) / lambdar * 1.0e6 + rer (i, j) = max (rermin, min (rermax, rer (i, j))) + else + qcr (i, j) = 0.0 + rer (i, j) = rermin + endif + + ! ----------------------------------------------------------------------- + ! snow (lin et al., 1983) + ! ----------------------------------------------------------------------- + + if (qs (i, j) .gt. qmin) then + qcs (i, j) = den (i, j) * qs (i, j) + lambdas = exp (0.25 * log (pi * rhos * n0s / qcs (i, j))) + res (i, j) = 0.5 * exp (log (gammas / 6) / alphas) / lambdas * 1.0e6 + res (i, j) = max (resmin, min (resmax, res (i, j))) + else + qcs (i, j) = 0.0 + res (i, j) = resmin + endif + + ! ----------------------------------------------------------------------- + ! graupel (lin et al., 1983) + ! ----------------------------------------------------------------------- + + if (qg (i, j) .gt. qmin) then + qcg (i, j) = den (i, j) * qg (i, j) + lambdag = exp (0.25 * log (pi * rhog * n0g / qcg (i, j))) + reg (i, j) = 0.5 * exp (log (gammag / 6) / alphag) / lambdag * 1.0e6 + reg (i, j) = max (regmin, min (regmax, reg (i, j))) + else + qcg (i, j) = 0.0 + reg (i, j) = regmin + endif + + enddo + enddo + +end subroutine cloud_diagnosis + +end module gfdl_cloud_microphys_mod diff --git a/driver/SHiELD/lin_cloud_microphys.F90 b/driver/SHiELD/lin_cloud_microphys.F90 deleted file mode 100644 index 9bde12411..000000000 --- a/driver/SHiELD/lin_cloud_microphys.F90 +++ /dev/null @@ -1,1324 +0,0 @@ -!*********************************************************************** -!* GNU Lesser General Public License -!* -!* This file is part of the FV3 dynamical core. -!* -!* The FV3 dynamical core is free software: you can redistribute it -!* and/or modify it under the terms of the -!* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or -!* (at your option) any later version. -!* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -!* See the GNU General Public License for more details. -!* -!* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. -!* If not, see . -!*********************************************************************** -! -! Cloud micro-physics package for GFDL global cloud resolving model -! The algorithms are originally derived from Lin et al 1983. Most of the key -! elements have been simplified/improved. This code at this stage bears little -! to no similarity to the original Lin MP in Zeta. Therefore, it is best to be called -! GFDL Micro-Physics (GFDL MP). -! Developer: Shian-Jiann Lin -! -module lin_cld_microphys_mod -! use mpp_mod, only: stdlog, mpp_pe, mpp_root_pe, mpp_clock_id, & -! mpp_clock_begin, mpp_clock_end, CLOCK_ROUTINE, & -! input_nml_file, mpp_max -! use diag_manager_mod, only: register_diag_field, send_data -! use time_manager_mod, only: time_type, get_time -! use constants_mod, only: grav, rdgas, rvgas, cp_air, cp_vapor, hlv, hlf, kappa, pi=>pi_8 -! use fms_mod, only: write_version_number, open_namelist_file, & -! check_nml_error, file_exist, close_file, & -! error_mesg, FATAL - - implicit none - private - - public lin_cld_microphys_driver, lin_cld_microphys_init, lin_cld_microphys_end, wqs1, wqs2, qs_blend - public qsmith_init, qsmith, es2_table1d, es3_table1d, esw_table1d, wqsat_moist, wqsat2_moist - public setup_con, wet_bulb - public cloud_diagnosis - public cracw - real :: missing_value = -1.e10 - logical :: module_is_initialized = .false. - logical :: qsmith_tables_initialized = .false. - character(len=17) :: mod_name = 'lin_cld_microphys' - -!==== constants_mod ==== -integer, public, parameter :: R_GRID=8 -real, parameter :: grav = 9.80665_R_GRID -real, parameter :: rdgas = 287.05_R_GRID -real, parameter :: rvgas = 461.50_R_GRID -real, parameter :: cp_air = 1004.6_R_GRID -real, parameter :: cp_vapor = 4.0_R_GRID*RVGAS -real, parameter :: hlv = 2.5e6_R_GRID -real, parameter :: hlf = 3.3358e5_R_GRID -real, parameter :: kappa = rdgas/cp_air -real, parameter :: pi = 3.1415926535897931_R_GRID -!==== constants_mod ==== - -!==== fms constants ==================== -!!! real, parameter :: latv = hlv ! = 2.500e6 -!!! real, parameter:: cv_air = 717.56 ! Satoh value -!!! real, parameter :: lati = hlf ! = 3.34e5 -!!! real, parameter :: lats = latv+lati ! = 2.834E6 -! rdgas = 287.04; rvgas = 461.50 -! cp_air =rdgas * 7./2. = 1006.64 ! heat capacity at constant pressure (j/kg/k) -! The following two are from Emanuel's book "Atmospheric Convection" -!!! real, parameter :: c_liq = 4190. ! heat capacity of water at 0C - ! - real, parameter :: eps = rdgas/rvgas ! = 0.621971831 - real, parameter :: zvir = rvgas/rdgas-1. ! = 0.607789855 - real, parameter :: table_ice = 273.16 ! freezing point for qs table - real, parameter :: cv_air = cp_air - rdgas ! = rdgas * (7/2-1) = 2.5*rdgas=717.68 -! real, parameter:: e00 = 610.71 ! saturation vapor pressure at T0 - real, parameter:: e00 = 611.21 ! IFS: saturation vapor pressure at T0 - real, parameter:: c_liq = 4.1855e+3 ! heat capacity of water at 0C -! real, parameter:: c_liq = 4218. ! ECMWF-IFS -! real, parameter:: c_ice = 2106. ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice) - real, parameter:: c_ice = 1972. ! heat capacity of ice at -15 C - real, parameter:: cp_vap = cp_vapor ! 1846. -! real, parameter:: cv_vap = 1410.0 ! Emanuel value -! For consistency, cv_vap derived FMS constants: - real, parameter:: cv_vap = cp_vap - rvgas ! 1384.5 - real, parameter:: dc_ice = c_liq - c_ice ! = 2084 - real, parameter:: dc_vap = cp_vap - c_liq ! = -2344. isobaric heating/cooling -! Values at 0 Deg C -! GFS value - real, parameter:: hlv0 = 2.5e6 -! real, parameter:: hlv0 = 2.501e6 ! Emanuel Appendix-2 -! GFS value - real, parameter:: hlf0 = 3.3358e5 -! real, parameter:: hlf0 = 3.337e5 ! Emanuel - real, parameter:: t_ice = 273.16 -! Latent heat at absolute zero: - real, parameter:: li00 = hlf0 - dc_ice*t_ice ! = -2.355446e5 - real, parameter:: Lv0 = hlv0 - dc_vap*t_ice ! = 3.141264e6 - - real, parameter:: d2ice = cp_vap - c_ice - real, parameter:: Li2 = hlv0+hlf0 - d2ice*t_ice - - real, parameter :: qrmin = 1.e-8 - real, parameter :: qvmin = 1.e-20 ! min value for water vapor (treated as zero) - real, parameter :: qcmin = 1.e-12 ! min value for cloud condensates - real, parameter :: sfcrho = 1.2 ! surface air density - real, parameter :: vr_min = 1.e-3 ! minimum fall speed for rain/graupel - real, parameter :: vf_min = 1.0E-5 - real, parameter :: rhor = 1.0e3 ! LFO83 - real, parameter :: dz_min = 1.e-2 - real :: cracs, csacr, cgacr, cgacs, acco(3,4), csacw, & - craci, csaci, cgacw, cgaci, cracw, cssub(5), cgsub(5), & - crevp(5), cgfr(2), csmlt(5), cgmlt(5) - real :: es0, ces0 - real :: pie, rgrav, fac_rc - real :: lcp, icp, tcp - real :: lv00, d0_vap, c_air, c_vap - - logical :: de_ice = .false. ! - logical :: sedi_transport = .true. ! - logical :: do_sedi_w = .false. - logical :: do_sedi_heat = .true. ! - logical :: prog_ccn = .false. ! do prognostic CCN (Yi Ming's method) - logical :: do_qa = .true. ! do inline cloud fraction - logical :: rad_snow =.true. - logical :: rad_graupel =.true. - logical :: rad_rain =.true. - logical :: fix_negative =.false. - logical :: do_setup=.true. - logical :: master - logical :: p_nonhydro = .false. - - real, allocatable:: table(:), table2(:), table3(:), tablew(:), des(:), des2(:), des3(:), desw(:) - logical :: tables_are_initialized = .false. - - integer:: id_rh, id_vtr, id_vts, id_vtg, id_vti, id_rain, id_snow, id_graupel, & - id_ice, id_prec, id_cond, id_var, id_droplets - real:: lati, latv, lats - - real, parameter :: dt_fr = 8. ! homogeneous freezing of all cloud water at t_wfr - dt_fr - ! minimum temperature water can exist (Moore & Molinero Nov. 2011, Nature) - ! dt_fr can be considered as the error bar - integer :: lin_cld_mp_clock ! clock for timing of driver routine - - real :: t_snow_melt = 16. ! snow melt tempearture scale factor - real :: t_grau_melt = 32. ! graupel melt tempearture scale factor - real :: p_min = 100. ! minimum pressure (Pascal) for MP to operate - -! For cloud-resolving: 1-5 km -! qi0_crt = 0.8E-4 -! qs0_crt = 0.6E-3 -! c_psaci = 0.1 -! c_pgacs = 0.1 -!---------------------- -! namelist parameters: -!---------------------- - real :: cld_min = 0.05 - real :: tice = 273.16 ! set tice = 165. to trun off ice-phase phys (Kessler emulator) - - real :: qc_crt = 5.0e-8 ! minimum condensate mixing ratio to allow partial cloudiness - real :: t_min = 178. ! Min temp to freeze-dry all water vapor - real :: t_sub = 184. ! Min temp for sublimation of cloud ice - real :: mp_time = 150. ! maximum micro-physics time step (sec) - - real :: rh_inc = 0.25 ! rh increment for complete evap of ql and qi - real :: rh_inr = 0.25 - real :: rh_ins = 0.25 ! rh increment for sublimation of snow - -! The following 3 time scales are for melting during terminal falls - real :: tau_r = 900. ! rain freezing time scale during fast_sat - real :: tau_s = 900. ! snow melt - real :: tau_g = 600. ! graupel melt - real :: tau_mlt = 600. ! ice melting time-scale - -! Fast MP: - real :: tau_i2s = 1000. ! ice2snow auto-conversion time scale (sec) - real :: tau_l2r = 900. -! cloud water - real :: tau_v2l = 150. ! vapor --> cloud water (condensation) time scale - real :: tau_l2v = 300. ! cloud water --> vapor (evaporation) time scale -! Graupel - real :: tau_g2v = 900. ! Grapuel sublimation time scale - real :: tau_v2g = 21600. ! Grapuel deposition -- make it a slow process - - real :: dw_land = 0.20 ! base value for subgrid deviation/variability over land - real :: dw_ocean = 0.10 ! base value for ocean - real :: ccn_o = 90. - real :: ccn_l = 270. - real :: rthresh = 10.0e-6 ! critical cloud drop radius (micro m) - -!------------------------------------------------------------- -! WRF/WSM6 scheme: qi_gen = 4.92e-11 * (1.e3*exp(0.1*tmp))**1.33 -! optimized: qi_gen = 4.92e-11 * exp( 1.33*log(1.e3*exp(0.1*tmp)) ) -! qi_gen ~ 4.808e-7 at 0 C; 1.818e-6 at -10 C, 9.82679e-5 at -40C -! the following value is constructed such that qc_crt = 0 at zero C and @ -10C matches -! WRF/WSM6 ice initiation scheme; qi_crt = qi_gen*min(qi_lim, 0.1*tmp) / den -! - real :: qi_gen = 1.82E-6 - real :: qi_lim = 1. - real :: ql_mlt = 2.0e-3 ! max value of cloud water allowed from melted cloud ice - real :: ql_gen = 1.0e-3 ! max ql generation during remapping step if fast_sat_adj = .T. - real :: sat_adj0 = 0.90 ! adjustment factor (0: no, 1: full) during fast_sat_adj - -! Cloud condensate upper bounds: "safety valves" for ql & qi - real :: ql0_max = 2.0e-3 ! max ql value (auto converted to rain) - real :: qi0_max = 1.0e-4 ! max qi value (by other sources) - - real :: qi0_crt = 1.0e-4 ! ice --> snow autocon threshold (was 1.E-4) - ! qi0_crt is highly dependent on horizontal resolution - real :: qr0_crt = 1.0e-4 ! rain --> snow or graupel/hail threshold - ! LFO used *mixing ratio* = 1.E-4 (hail in LFO) - real :: c_paut = 0.55 ! autoconversion ql --> qr (use 0.5 to reduce autoconversion) - real :: c_psaci = 0.02 ! accretion: cloud ice --> snow (was 0.1 in Zetac) - real :: c_piacr = 5.0 ! accretion: rain --> ice: - real :: c_cracw = 0.9 ! rain accretion efficiency - -! Decreasing clin to reduce csacw (so as to reduce cloud water ---> snow) - real:: alin = 842.0 - real:: clin = 4.8 ! 4.8 --> 6. (to ehance ql--> qs) - -!----------------- -! Graupel control: -!----------------- - real :: qs0_crt = 1.0e-3 ! snow --> graupel density threshold (0.6e-3 in Purdue Lin scheme) - real :: c_pgacs = 2.0e-3 ! snow --> graupel "accretion" eff. (was 0.1 in Zetac) - -! fall velocity tuning constants: - logical :: const_vi = .false. ! If .T. the constants are specified by v*_fac - logical :: const_vs = .false. - logical :: const_vg = .false. - logical :: const_vr = .false. - ! Good values: - real :: vi_fac = 1. ! If const_vi: 1/3 - real :: vs_fac = 1. ! If const_vs: 1. - real :: vg_fac = 1. ! If const_vg: 2. - real :: vr_fac = 1. ! If const_vr: 4. -! Upper bounds of fall speed (with variable speed option) - real :: vi_max = 0.5 ! max fall speed for ice - real :: vs_max = 5.0 ! max fall speed for snow - real :: vg_max = 8.0 ! max fall speed for graupel - real :: vr_max = 12. ! max fall speed for rain - - logical :: fast_sat_adj = .false. - logical :: z_slope_liq = .true. ! use linear mono slope for autocconversions - logical :: z_slope_ice = .false. ! use linear mono slope for autocconversions - logical :: use_ccn = .false. - logical :: use_ppm = .false. - logical :: mono_prof = .true. ! perform terminal fall with mono ppm scheme - logical :: mp_print = .false. - - real:: global_area = -1. - - real:: tice0, t_wfr - real:: log_10 - - public mp_time, t_min, t_sub, tau_r, tau_s, tau_g, dw_land, dw_ocean, & - vi_fac, vr_fac, vs_fac, vg_fac, ql_mlt, do_qa, fix_negative, & - vi_max, vs_max, vg_max, vr_max, & - qs0_crt, qi_gen, ql0_max, qi0_max, qi0_crt, qr0_crt, fast_sat_adj, & - rh_inc, rh_ins, rh_inr, const_vi, const_vs, const_vg, const_vr, & - use_ccn, rthresh, ccn_l, ccn_o, qc_crt, tau_g2v, tau_v2g, sat_adj0, & - c_piacr, tau_mlt, tau_v2l, tau_l2v, tau_i2s, tau_l2r, qi_lim, ql_gen, & - c_paut, c_psaci, c_pgacs, z_slope_liq, z_slope_ice, prog_ccn, & - c_cracw, alin, clin, tice, rad_snow, rad_graupel, rad_rain, & - cld_min, use_ppm, mono_prof, do_sedi_heat, sedi_transport, & - do_sedi_w, de_ice, mp_print - -!---- version number ----- - character(len=128) :: version = '$Id: lin_cloud_microphys.F90,v 21.0.2.1 2014/12/18 21:14:54 Lucas.Harris Exp $' - character(len=128) :: tagname = '$Name: $' - - contains - - - subroutine lin_cld_microphys_driver(qv, ql, qr, qi, qs, qg, qa, qn, & - qv_dt, ql_dt, qr_dt, qi_dt, qs_dt, qg_dt, qa_dt, & - pt_dt, pt, w, uin, vin, udt, vdt, dz, delp, area, dt_in, & - land, rain, snow, ice, graupel, & - hydrostatic, phys_hydrostatic, & - iis,iie, jjs,jje, kks,kke, ktop, kbot, seconds) -! kks == 1; kke == kbot == npz - logical, intent(in):: hydrostatic, phys_hydrostatic - integer, intent(in):: iis,iie, jjs,jje ! physics window - integer, intent(in):: kks,kke ! vertical dimension - integer, intent(in):: ktop, kbot ! vertical compute domain - integer, intent(in):: seconds - real, intent(in):: dt_in - - real, intent(in ), dimension(:,:) :: area - real, intent(in ), dimension(:,:) :: land !land fraction - real, intent(out ), dimension(:,:) :: rain, snow, ice, graupel - real, intent(in ), dimension(:,:,:):: delp, dz, uin, vin - real, intent(in ), dimension(:,:,:):: pt, qv, ql, qr, qg, qa, qn - real, intent(inout), dimension(:,:,:):: qi, qs - real, intent(inout), dimension(:,:,:):: pt_dt, qa_dt, udt, vdt, w - real, intent(inout), dimension(:,:,:):: qv_dt, ql_dt, qr_dt, qi_dt, & - qs_dt, qg_dt - - - end subroutine lin_cld_microphys_driver - - - subroutine check_column(ktop, kbot, q, no_fall) - integer, intent(in):: ktop, kbot - real, intent(in):: q(ktop:kbot) - logical, intent(out):: no_fall -! local: - integer k - - no_fall = .true. - do k=ktop, kbot - if ( q(k) > qrmin ) then - no_fall = .false. - exit - endif - enddo - - end subroutine check_column - - - - subroutine setupm - - real :: gcon, cd, scm3, pisq, act(8), acc(3) - real :: vdifu, tcond - real :: visk - real :: ch2o, hltf - real :: hlts, hltc, ri50 - - real :: gam263, gam275, gam290, & - gam325, gam350, gam380, & - gam425, gam450, gam480, & - gam625, gam680 - - data gam263/1.456943/, gam275/1.608355/, gam290/1.827363/ & - gam325/2.54925/, gam350/3.323363/, gam380/4.694155/ & - gam425/8.285063/, gam450/11.631769/, gam480/17.837789/ & - gam625/184.860962/, gam680/496.604067/ -! -! physical constants (mks) -! - real :: rnzr, rnzs, rnzg, rhos, rhog - !Intercept parameters - data rnzr /8.0e6/ ! lin83 - data rnzs /3.0e6/ ! lin83 - data rnzg /4.0e6/ ! rh84 - !Density parameters - data rhos /0.1e3/ ! lin83 (snow density; 1/10 of water) - data rhog /0.4e3/ ! rh84 (graupel density) - data acc/5.0,2.0,0.5/ - - real den_rc - integer :: k, i - - pie = 4.*atan(1.0) - -! S. Klein's formular (EQ 16) from AM2 - fac_rc = (4./3.)*pie*rhor*rthresh**3 - - if ( prog_ccn ) then -! if(master) write(*,*) 'prog_ccn option is .T.' - else - den_rc = fac_rc * ccn_o*1.e6 -! if(master) write(*,*) 'MP: rthresh=', rthresh, 'vi_fac=', vi_fac -! if(master) write(*,*) 'MP: for ccn_o=', ccn_o, 'ql_rc=', den_rc - den_rc = fac_rc * ccn_l*1.e6 -! if(master) write(*,*) 'MP: for ccn_l=', ccn_l, 'ql_rc=', den_rc - endif - - vdifu=2.11e-5 - tcond=2.36e-2 - - visk=1.259e-5 - hlts=2.8336e6 - hltc=2.5e6 - hltf=3.336e5 - - ch2o=4.1855e3 - ri50=1.e-4 - - pisq = pie*pie - scm3 = (visk/vdifu)**(1./3.) -! - cracs = pisq*rnzr*rnzs*rhos - csacr = pisq*rnzr*rnzs*rhor - cgacr = pisq*rnzr*rnzg*rhor - cgacs = pisq*rnzg*rnzs*rhos - cgacs = cgacs*c_pgacs -! -! act: 1-2:racs(s-r); 3-4:sacr(r-s); -! 5-6:gacr(r-g); 7-8:gacs(s-g) -! - act(1) = pie * rnzs * rhos - act(2) = pie * rnzr * rhor - act(6) = pie * rnzg * rhog - act(3) = act(2) - act(4) = act(1) - act(5) = act(2) - act(7) = act(1) - act(8) = act(6) - - do i=1,3 - do k=1,4 - acco(i,k) = acc(i)/(act(2*k-1)**((7-i)*0.25)*act(2*k)**(i*0.25)) - enddo - enddo -! - gcon = 40.74 * sqrt( sfcrho ) ! 44.628 -! - csacw = pie*rnzs*clin*gam325/(4.*act(1)**0.8125) -! Decreasing csacw to reduce cloud water ---> snow - - craci = pie*rnzr*alin*gam380/(4.*act(2)**0.95) - csaci = csacw * c_psaci -! - cgacw = pie*rnzg*gam350*gcon/(4.*act(6)**0.875) -! cgaci = cgacw*0.1 -! SJL, May 28, 2012 - cgaci = cgacw*0.05 -! - cracw = craci ! cracw= 3.27206196043822 - cracw = c_cracw * cracw -! -! subl and revp: five constants for three separate processes -! - cssub(1) = 2.*pie*vdifu*tcond*rvgas*rnzs - cgsub(1) = 2.*pie*vdifu*tcond*rvgas*rnzg - crevp(1) = 2.*pie*vdifu*tcond*rvgas*rnzr - cssub(2) = 0.78/sqrt(act(1)) - cgsub(2) = 0.78/sqrt(act(6)) - crevp(2) = 0.78/sqrt(act(2)) - cssub(3) = 0.31*scm3*gam263*sqrt(clin/visk)/act(1)**0.65625 - cgsub(3) = 0.31*scm3*gam275*sqrt(gcon/visk)/act(6)**0.6875 - crevp(3) = 0.31*scm3*gam290*sqrt(alin/visk)/act(2)**0.725 - cssub(4) = tcond*rvgas - cssub(5) = hlts**2*vdifu - cgsub(4) = cssub(4) - crevp(4) = cssub(4) - cgsub(5) = cssub(5) - crevp(5) = hltc**2*vdifu -! - cgfr(1) = 20.e2*pisq*rnzr*rhor/act(2)**1.75 - cgfr(2) = 0.66 -! -!sk ******************************************************************** -!sk smlt: five constants ( lin et al. 1983 ) - csmlt(1) = 2.*pie*tcond*rnzs/hltf - csmlt(2) = 2.*pie*vdifu*rnzs*hltc/hltf - csmlt(3) = cssub(2) - csmlt(4) = cssub(3) - csmlt(5) = ch2o/hltf -!sk ******************************************************************** -! gmlt: five constants - cgmlt(1) = 2.*pie*tcond*rnzg/hltf - cgmlt(2) = 2.*pie*vdifu*rnzg*hltc/hltf - cgmlt(3) = cgsub(2) - cgmlt(4) = cgsub(3) - cgmlt(5) = ch2o/hltf -!sk ******************************************************************** - es0 = 6.107799961e2 ! ~6.1 mb - ces0 = eps*es0 - - end subroutine setupm - - - subroutine lin_cld_microphys_init - - end subroutine lin_cld_microphys_init - - - - subroutine lin_cld_microphys_end - - deallocate ( table ) - deallocate ( table2 ) - deallocate ( table3 ) - deallocate ( tablew ) - deallocate ( des ) - deallocate ( des2 ) - deallocate ( des3 ) - deallocate ( desw ) - - tables_are_initialized = .false. - - end subroutine lin_cld_microphys_end - - - - subroutine setup_con - -! master = (mpp_pe().eq.mpp_root_pe()) - rgrav = 1./ grav - - if ( .not. qsmith_tables_initialized ) call qsmith_init - qsmith_tables_initialized = .true. - - end subroutine setup_con - - - - real function acr3d(v1, v2, q1, q2, c, cac, rho) - real, intent(in) :: v1, v2, c, rho - real, intent(in) :: q1, q2 ! mixing ratio!!! - real, intent(in) :: cac(3) - real :: t1, s1, s2 -!integer :: k -! real:: a -! a=0.0 -! do k=1,3 -! a = a + cac(k)*( (q1*rho)**((7-k)*0.25) * (q2*rho)**(k*0.25) ) -! enddo -! acr3d = c * abs(v1-v2) * a/rho -!---------- -! Optimized -!---------- - t1 = sqrt(q1*rho) - s1 = sqrt(q2*rho) - s2 = sqrt(s1) ! s1 = s2**2 - acr3d = c*abs(v1-v2)*q1*s2*(cac(1)*t1 + cac(2)*sqrt(t1)*s2 + cac(3)*s1) - - end function acr3d - - - - - real function smlt(tc, dqs, qsrho,psacw,psacr,c,rho, rhofac) - real, intent(in):: tc,dqs,qsrho,psacw,psacr,c(5),rho, rhofac - - smlt = (c(1)*tc/rho-c(2)*dqs) * (c(3)*sqrt(qsrho)+ & - c(4)*qsrho**0.65625*sqrt(rhofac)) + c(5)*tc*(psacw+psacr) - - end function smlt - - - real function gmlt(tc, dqs,qgrho,pgacw,pgacr,c, rho) - real, intent(in):: tc,dqs,qgrho,pgacw,pgacr,c(5),rho - -! note: pgacw and pgacr must be calc before gmlt is called -! - gmlt = (c(1)*tc/rho-c(2)*dqs) * (c(3)*sqrt(qgrho)+ & - c(4)*qgrho**0.6875/rho**0.25) + c(5)*tc*(pgacw+pgacr) - end function gmlt - - - subroutine qsmith_init - integer, parameter:: length=2621 - integer i - - if( .not. tables_are_initialized ) then - -! master = (mpp_pe().eq.mpp_root_pe()) -! if (master) print*, ' lin MP: initializing qs tables' -!!! DEBUG CODE -! print*, mpp_pe(), allocated(table), allocated(table2), allocated(table3), allocated(tablew), allocated(des), allocated(des2), allocated(des3), allocated(desw) -!!! END DEBUG CODE - -! generate es table (dt = 0.1 deg. c) - allocate ( table( length) ) - allocate ( table2(length) ) - allocate ( table3(length) ) - allocate ( tablew(length) ) - allocate ( des (length) ) - allocate ( des2(length) ) - allocate ( des3(length) ) - allocate ( desw(length) ) - - call qs_table (length ) - call qs_table2(length ) - call qs_table3(length ) - call qs_tablew(length ) - - do i=1,length-1 - des(i) = max(0., table(i+1) - table(i)) - des2(i) = max(0., table2(i+1) - table2(i)) - des3(i) = max(0., table3(i+1) - table3(i)) - desw(i) = max(0., tablew(i+1) - tablew(i)) - enddo - des(length) = des(length-1) - des2(length) = des2(length-1) - des3(length) = des3(length-1) - desw(length) = desw(length-1) - - tables_are_initialized = .true. - endif - - end subroutine qsmith_init - - real function wqs1(ta, den) -! Pure water phase; universal dry/moist formular using air density -! Input "den" can be either dry or moist air density - real, intent(in):: ta, den -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = tablew(it) + (ap1-it)*desw(it) - wqs1 = es / (rvgas*ta*den) - - end function wqs1 - - real function wqs2(ta, den, dqdt) -! Pure water phase; universal dry/moist formular using air density -! Input "den" can be either dry or moist air density - real, intent(in):: ta, den - real, intent(out):: dqdt -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - integer it - - if (.not. tables_are_initialized) call qsmith_init - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = tablew(it) + (ap1-it)*desw(it) - wqs2 = es / (rvgas*ta*den) - it = ap1 - 0.5 -! Finite diff, del_T = 0.1: - dqdt = 10.*(desw(it) + (ap1-it)*(desw(it+1)-desw(it))) / (rvgas*ta*den) - - end function wqs2 - - real function wet_bulb(q, t, den) -! Liquid phase only - real, intent(in):: t, q, den - real:: qs, tp, dqdt - - wet_bulb = t - qs = wqs2(wet_bulb, den, dqdt) - tp = 0.5*(qs-q)/(1.+lcp*dqdt)*lcp - wet_bulb = wet_bulb - tp -! tp is negative if super-saturated - if ( tp > 0.01 ) then - qs = wqs2(wet_bulb, den, dqdt) - tp = (qs-q)/(1.+lcp*dqdt)*lcp - wet_bulb = wet_bulb - tp - endif - - end function wet_bulb - real function iqs1(ta, den) -! water-ice phase; universal dry/moist formular using air density -! Input "den" can be either dry or moist air density - real, intent(in):: ta, den -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = table2(it) + (ap1-it)*des2(it) - iqs1 = es / (rvgas*ta*den) - - end function iqs1 - - real function iqs2(ta, den, dqdt) -! water-ice phase; universal dry/moist formular using air density -! Input "den" can be either dry or moist air density - real, intent(in):: ta, den - real, intent(out):: dqdt -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = table2(it) + (ap1-it)*des2(it) - iqs2 = es / (rvgas*ta*den) - it = ap1 - 0.5 - dqdt = 10.*(des2(it) + (ap1-it)*(des2(it+1)-des2(it))) / (rvgas*ta*den) - - end function iqs2 - - real function qs1d_moist(ta, qv, pa, dqdt) -! 2-phase tabel - real, intent(in):: ta, pa, qv - real, intent(out):: dqdt -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - real, parameter:: eps10 = 10.*eps - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = table2(it) + (ap1-it)*des2(it) - qs1d_moist = eps*es*(1.+zvir*qv)/pa - it = ap1 - 0.5 - dqdt = eps10*(des2(it) + (ap1-it)*(des2(it+1)-des2(it)))*(1.+zvir*qv)/pa - - end function qs1d_moist - - real function wqsat2_moist(ta, qv, pa, dqdt) -! Pure water phase - real, intent(in):: ta, pa, qv - real, intent(out):: dqdt -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - real, parameter:: eps10 = 10.*eps - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = tablew(it) + (ap1-it)*desw(it) - wqsat2_moist = eps*es*(1.+zvir*qv)/pa - dqdt = eps10*(desw(it) + (ap1-it)*(desw(it+1)-desw(it)))*(1.+zvir*qv)/pa - - end function wqsat2_moist - - real function wqsat_moist(ta, qv, pa) -! Pure water phase - real, intent(in):: ta, pa, qv -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = tablew(it) + (ap1-it)*desw(it) - wqsat_moist = eps*es*(1.+zvir*qv)/pa - - end function wqsat_moist - - real function qs1d_m(ta, qv, pa) -! 2-phase tabel - real, intent(in):: ta, pa, qv -! local: - real es, ap1 - real, parameter:: tmin=table_ice - 160. - real, parameter:: eps10 = 10.*eps - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = table2(it) + (ap1-it)*des2(it) - qs1d_m = eps*es*(1.+zvir*qv)/pa - - end function qs1d_m - - real function d_sat(ta) -! Computes the difference in saturation vapor *density* between water and ice - real, intent(in):: ta - real, parameter:: tmin=table_ice - 160. - real es_w, es_i, ap1 - integer it - - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 -! over Water: - es_w = tablew(it) + (ap1-it)*desw(it) -! over Ice: - es_i = table2(it) + (ap1-it)*des2(it) - d_sat = dim(es_w, es_i)/(rvgas*ta) ! Take positive difference - - end function d_sat - - - real function esw_table(ta) -! pure water phase table - real, intent(in):: ta - real, parameter:: tmin=table_ice - 160. - real ap1 - integer it - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - esw_table = tablew(it) + (ap1-it)*desw(it) - end function esw_table - - - real function es2_table(ta) -! two-phase table - real, intent(in):: ta - real, parameter:: tmin=table_ice - 160. - real ap1 - integer it - ap1 = 10.*dim(ta, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es2_table = table2(it) + (ap1-it)*des2(it) - end function es2_table - - - subroutine esw_table1d(ta, es, n) - integer, intent(in):: n -! For waterphase only - real, intent(in):: ta(n) - real, intent(out):: es(n) - real, parameter:: tmin=table_ice - 160. - real ap1 - integer i, it - - do i=1, n - ap1 = 10.*dim(ta(i), tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es(i) = tablew(it) + (ap1-it)*desw(it) - enddo - end subroutine esw_table1d - - - - subroutine es2_table1d(ta, es, n) - integer, intent(in):: n -! two-phase table with -2C as the transition point for ice-water phase -! For sea ice model - real, intent(in):: ta(n) - real, intent(out):: es(n) - real, parameter:: tmin=table_ice - 160. - real ap1 - integer i, it - - do i=1, n - ap1 = 10.*dim(ta(i), tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es(i) = table2(it) + (ap1-it)*des2(it) - enddo - end subroutine es2_table1d - - - subroutine es3_table1d(ta, es, n) - integer, intent(in):: n -! two-phase table with -2C as the transition point for ice-water phase - real, intent(in):: ta(n) - real, intent(out):: es(n) - real, parameter:: tmin=table_ice - 160. - real ap1 - integer i, it - - do i=1, n - ap1 = 10.*dim(ta(i), tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es(i) = table3(it) + (ap1-it)*des3(it) - enddo - end subroutine es3_table1d - - - - subroutine qs_tablew(n) -! Over water - integer, intent(in):: n - real:: delt=0.1 - real esbasw, tbasw, esbasi, tbasi, tmin, tem - integer i - -! constants - esbasw = 1013246.0 - tbasw = table_ice + 100. ! 373.16 - esbasi = 6107.1 - tbasi = table_ice - tmin = tbasi - 160. - - do i=1,n - tem = tmin+delt*real(i-1) -! compute es over water - tablew(i) = e00*exp((dc_vap*log(tem/t_ice)+Lv0*(tem-t_ice)/(tem*t_ice))/rvgas) - enddo - - end subroutine qs_tablew - - - subroutine qs_table2(n) -! 2-phase table - integer, intent(in):: n - real:: delt=0.1 - real esbasw, tbasw, esbasi, tbasi, tmin, tem - integer :: i0, i1 - real :: tem0, tem1 - integer i - -! constants - esbasw = 1013246.0 - tbasw = table_ice + 100. ! 373.16 - esbasi = 6107.1 - tbasi = table_ice - tmin = tbasi - 160. - - do i=1,n - tem = tmin+delt*real(i-1) - if ( i<= 1600 ) then -! compute es over ice between -160c and 0 c. - table2(i) = e00*exp((d2ice*log(tem/t_ice)+Li2*(tem-t_ice)/(tem*t_ice))/rvgas) - else -! compute es over water between 0c and 102c. - table2(i) = e00*exp((dc_vap*log(tem/t_ice)+Lv0*(tem-t_ice)/(tem*t_ice))/rvgas) - endif - enddo - -!---------- -! smoother -!---------- - i0 = 1600; i1 = 1601 - tem0 = 0.25*(table2(i0-1) + 2.*table(i0) + table2(i0+1)) - tem1 = 0.25*(table2(i1-1) + 2.*table(i1) + table2(i1+1)) - table2(i0) = tem0 - table2(i1) = tem1 - - end subroutine qs_table2 - - - - subroutine qs_table3(n) -! 2-phase table with "-2 C" as the transition point - integer, intent(in):: n - real:: delt=0.1 - real esbasw, tbasw, esbasi, tbasi, tmin, tem, aa, b, c, d, e - integer :: i0, i1 - real :: tem0, tem1 - integer i - -! constants - esbasw = 1013246.0 - tbasw = table_ice + 100. ! 373.16 - esbasi = 6107.1 - tbasi = table_ice ! 273.16 - tmin = tbasi - 160. - - do i=1,n - tem = tmin+delt*real(i-1) -! if ( i<= 1600 ) then - if ( i<= 1580 ) then ! to -2 C -! compute es over ice between -160c and 0 c. -! see smithsonian meteorological tables page 350. - aa = -9.09718 *(tbasi/tem-1.) - b = -3.56654 *alog10(tbasi/tem) - c = 0.876793*(1.-tem/tbasi) - e = alog10(esbasi) - table3(i) = 0.1 * 10**(aa+b+c+e) - else -! compute es over water between -2c and 102c. -! see smithsonian meteorological tables page 350. - aa = -7.90298*(tbasw/tem-1.) - b = 5.02808*alog10(tbasw/tem) - c = -1.3816e-07*(10**((1.-tem/tbasw)*11.344)-1.) - d = 8.1328e-03*(10**((tbasw/tem-1.)*(-3.49149))-1.) - e = alog10(esbasw) - table3(i) = 0.1 * 10**(aa+b+c+d+e) - endif - enddo - -!---------- -! smoother -!---------- - i0 = 1580 - tem0 = 0.25*(table3(i0-1) + 2.*table(i0) + table3(i0+1)) - i1 = 1581 - tem1 = 0.25*(table3(i1-1) + 2.*table(i1) + table3(i1+1)) - table3(i0) = tem0 - table3(i1) = tem1 - - end subroutine qs_table3 - - - real function qs_blend(t, p, q) -! Note: this routine is based on "moist" mixing ratio -! Blended mixed phase table - real, intent(in):: t, p, q - real es, ap1 - real, parameter:: tmin=table_ice - 160. - integer it - - ap1 = 10.*dim(t, tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es = table(it) + (ap1-it)*des(it) - qs_blend = eps*es*(1.+zvir*q)/p - - end function qs_blend - - subroutine qs_table(n) - integer, intent(in):: n - real esupc(200) - real:: delt=0.1 - real esbasw, tbasw, esbasi, tbasi, tmin, tem, esh20 - real wice, wh2o - integer i - -! constants - esbasw = 1013246.0 - tbasw = table_ice + 100. ! 373.16 - esbasi = 6107.1 - tbasi = table_ice ! 273.16 - -! compute es over ice between -160c and 0 c. - tmin = tbasi - 160. -! see smithsonian meteorological tables page 350. - do i=1,1600 - tem = tmin+delt*real(i-1) - table(i) = e00*exp((d2ice*log(tem/t_ice)+Li2*(tem-t_ice)/(tem*t_ice))/rvgas) - enddo - -! compute es over water between -20c and 102c. -! see smithsonian meteorological tables page 350. - do i=1,1221 - tem = 253.16+delt*real(i-1) - esh20 = e00*exp((dc_vap*log(tem/t_ice)+Lv0*(tem-t_ice)/(tem*t_ice))/rvgas) - if (i <= 200) then - esupc(i) = esh20 - else - table(i+1400) = esh20 - endif - enddo - -! derive blended es over ice and supercooled water between -20c and 0c - do i=1,200 - tem = 253.16+delt*real(i-1) - wice = 0.05*(table_ice-tem) - wh2o = 0.05*(tem-253.16) - table(i+1400) = wice*table(i+1400)+wh2o*esupc(i) - enddo - - end subroutine qs_table - - - subroutine qsmith(im, km, ks, t, p, q, qs, dqdt) -! input t in deg k; p (pa) : moist pressure - integer, intent(in):: im, km, ks - real, intent(in),dimension(im,km):: t, p, q - real, intent(out),dimension(im,km):: qs - real, intent(out), optional:: dqdt(im,km) -! local: - real, parameter:: eps10 = 10.*eps - real es(im,km) - real ap1 - real, parameter:: tmin=table_ice - 160. - integer i, k, it - - if( .not. tables_are_initialized ) then - call qsmith_init - endif - - do k=ks,km - do i=1,im - ap1 = 10.*dim(t(i,k), tmin) + 1. - ap1 = min(2621., ap1) - it = ap1 - es(i,k) = table(it) + (ap1-it)*des(it) - qs(i,k) = eps*es(i,k)*(1.+zvir*q(i,k))/p(i,k) - enddo - enddo - - if ( present(dqdt) ) then - do k=ks,km - do i=1,im - ap1 = 10.*dim(t(i,k), tmin) + 1. - ap1 = min(2621., ap1) - 0.5 - it = ap1 - dqdt(i,k) = eps10*(des(it)+(ap1-it)*(des(it+1)-des(it)))*(1.+zvir*q(i,k))/p(i,k) - enddo - enddo - endif - - end subroutine qsmith - - - subroutine neg_adj(ktop, kbot, pt, dp, qv, ql, qr, qi, qs, qg) -! 1d version: -! this is designed for 6-class micro-physics schemes - integer, intent(in):: ktop, kbot - real, intent(in):: dp(ktop:kbot) - real, intent(inout), dimension(ktop:kbot):: & - pt, qv, ql, qr, qi, qs, qg -! local: - real lcpk(ktop:kbot), icpk(ktop:kbot) - real dq, tmp1, cvm - integer k - - do k=ktop,kbot - cvm = c_air + qv(k)*c_vap + (qr(k)+ql(k))*c_liq + (qi(k)+qs(k)+qg(k))*c_ice - lcpk(k) = (lv00+d0_vap*pt(k)) / cvm - icpk(k) = (li00+dc_ice*pt(k)) / cvm - enddo - - do k=ktop, kbot -!----------- -! ice-phase: -!----------- -! if ice<0 borrow from snow - if( qi(k) < 0. ) then - qs(k) = qs(k) + qi(k) - qi(k) = 0. - endif -! if snow<0 borrow from graupel - if( qs(k) < 0. ) then - qg(k) = qg(k) + qs(k) - qs(k) = 0. - endif -! if graupel < 0 then borrow from rain - if ( qg(k) < 0. ) then - qr(k) = qr(k) + qg(k) - pt(k) = pt(k) - qg(k)*icpk(k) ! heating - qg(k) = 0. - endif - -! liquid phase: -! fix negative rain by borrowing from cloud water - if ( qr(k) < 0. ) then - ql(k) = ql(k) + qr(k) - qr(k) = 0. - endif -! fix negative cloud water with vapor - if ( ql(k) < 0. ) then - qv(k) = qv(k) + ql(k) - pt(k) = pt(k) - ql(k)*lcpk(k) - ql(k) = 0. - endif - enddo - -!----------------------------------- -! fix water vapor; borrow from below -!----------------------------------- - do k=ktop,kbot-1 - if( qv(k) < 0. ) then - qv(k+1) = qv(k+1) + qv(k)*dp(k)/dp(k+1) - qv(k ) = 0. - endif - enddo - -! bottom layer; borrow from above - if( qv(kbot) < 0. .and. qv(kbot-1)>0.) then - dq = min(-qv(kbot)*dp(kbot), qv(kbot-1)*dp(kbot-1)) - qv(kbot-1) = qv(kbot-1) - dq/dp(kbot-1) - qv(kbot ) = qv(kbot ) + dq/dp(kbot ) - endif -! if qv is still < 0 - - end subroutine neg_adj - - -! real function g_sum(p, ifirst, ilast, jfirst, jlast, area, mode) -!!------------------------- -!! Quick local sum algorithm -!!------------------------- -! use mpp_mod, only: mpp_sum -! integer, intent(IN) :: ifirst, ilast -! integer, intent(IN) :: jfirst, jlast -! integer, intent(IN) :: mode ! if ==1 divided by area -! real, intent(IN) :: p(ifirst:ilast,jfirst:jlast) ! field to be summed -! real, intent(IN) :: area(ifirst:ilast,jfirst:jlast) -! integer :: i,j -! real gsum -! -! if( global_area < 0. ) then -! global_area = 0. -! do j=jfirst,jlast -! do i=ifirst,ilast -! global_area = global_area + area(i,j) -! enddo -! enddo -! call mpp_sum(global_area) -! end if -! -! gsum = 0. -! do j=jfirst,jlast -! do i=ifirst,ilast -! gsum = gsum + p(i,j)*area(i,j) -! enddo -! enddo -! call mpp_sum(gsum) -! -! if ( mode==1 ) then -! g_sum = gsum / global_area -! else -! g_sum = gsum -! endif -! -! end function g_sum - - subroutine interpolate_z(is, ie, js, je, km, zl, hght, a3, a2) - - integer, intent(in):: is, ie, js, je, km - real, intent(in):: hght(is:ie,js:je,km+1) ! hght(k) > hght(k+1) - real, intent(in):: a3(is:ie,js:je,km) - real, intent(in):: zl - real, intent(out):: a2(is:ie,js:je) -! local: - real zm(km) - integer i,j,k - - -!$OMP parallel do default(none) shared(is,ie,js,je,km,hght,zl,a2,a3) private(zm) - do j=js,je - do 1000 i=is,ie - do k=1,km - zm(k) = 0.5*(hght(i,j,k)+hght(i,j,k+1)) - enddo - if( zl >= zm(1) ) then - a2(i,j) = a3(i,j,1) - elseif ( zl <= zm(km) ) then - a2(i,j) = a3(i,j,km) - else - do k=1,km-1 - if( zl <= zm(k) .and. zl >= zm(k+1) ) then - a2(i,j) = a3(i,j,k) + (a3(i,j,k+1)-a3(i,j,k))*(zm(k)-zl)/(zm(k)-zm(k+1)) - go to 1000 - endif - enddo - endif -1000 continue - enddo - - end subroutine interpolate_z - - subroutine cloud_diagnosis(is, ie, js, je, den, qw, qi, qr, qs, qg, T, qcw, qci, qcr, qcs, qcg, rew, rei, rer, res, reg) - - implicit none - - integer, intent(in) :: is, ie, js, je - real, dimension(is:ie,js:je), intent(in) :: den, T - real, dimension(is:ie,js:je), intent(in) :: qw, qi, qr, qs, qg ! units: kg/kg - real, dimension(is:ie,js:je), intent(out) :: qcw, qci, qcr, qcs, qcg ! units: kg/m^3 - real, dimension(is:ie,js:je), intent(out) :: rew, rei, rer, res, reg ! units: micron - - integer :: i, j - real :: lambdar, lambdas, lambdag - - real :: rhow = 1.0E3, rhor = 1.0E3, rhos = 1.0E2, rhog = 4.0E2 - real :: n0r = 8.0E6, n0s = 3.0E6, n0g = 4.0E6 - real :: alphar = 0.8, alphas = 0.25, alphag = 0.5 - real :: gammar = 17.837789, gammas = 8.2850630, gammag = 11.631769 - real :: qmin = 1.0E-5, ccn = 1.0E8, beta = 1.22 - -! real :: rewmin = 1.0, rewmax = 25.0 -! real :: reimin = 10.0, reimax = 300.0 -! real :: rermin = 25.0, rermax = 225.0 -! real :: resmin = 300, resmax = 1000.0 -! real :: regmin = 1000.0, regmax = 1.0E5 - real :: rewmin = 5.0, rewmax = 10.0 - real :: reimin = 10.0, reimax = 150.0 - real :: rermin = 0.0, rermax = 10000.0 - real :: resmin = 0.0, resmax = 10000.0 - real :: regmin = 0.0, regmax = 10000.0 - - do j = js, je - do i = is, ie - -! cloud water (Martin et al., 1994) - if (qw(i,j) .gt. qmin) then - qcw(i,j) = den(i,j) * qw(i,j) - rew(i,j) = exp(1.0 / 3.0 * log((3 * qcw(i,j)) / (4 * pi * rhow * ccn))) * 1.0E6 - rew(i,j) = max(rewmin, min(rewmax, rew(i,j))) - else - qcw(i,j) = 0.0 - rew(i,j) = rewmin - end if - -! cloud ice (Heymsfield and McFarquhar, 1996) - if (qi(i,j) .gt. qmin) then - qci(i,j) = den(i,j) * qi(i,j) - if (T(i,j) - tice .lt. -50) then - rei(i,j) = beta / 9.917 * exp((1 - 0.891) * log(1.0E3 * qci(i,j))) * 1.0E3 - elseif (T(i,j) - tice .lt. -40) then - rei(i,j) = beta / 9.337 * exp((1 - 0.920) * log(1.0E3 * qci(i,j))) * 1.0E3 - elseif (T(i,j) - tice .lt. -30) then - rei(i,j) = beta / 9.208 * exp((1 - 0.945) * log(1.0E3 * qci(i,j))) * 1.0E3 - else - rei(i,j) = beta / 9.387 * exp((1 - 0.969) * log(1.0E3 * qci(i,j))) * 1.0E3 - end if - rei(i,j) = max(reimin, min(reimax, rei(i,j))) - else - qci(i,j) = 0.0 - rei(i,j) = reimin - end if - -! rain (Lin et al., 1983) - if (qr(i,j) .gt. qmin) then - qcr(i,j) = den(i,j) * qr(i,j) - lambdar = exp(0.25 * log(pi * rhor * n0r / qcr(i,j))) - rer(i,j) = 0.5 * exp(log(gammar / 6) / alphar) / lambdar * 1.0E6 - rer(i,j) = max(rermin, min(rermax, rer(i,j))) - else - qcr(i,j) = 0.0 - rer(i,j) = rermin - end if - -! snow (Lin et al., 1983) - if (qs(i,j) .gt. qmin) then - qcs(i,j) = den(i,j) * qs(i,j) - lambdas = exp(0.25 * log(pi * rhos * n0s / qcs(i,j))) - res(i,j) = 0.5 * exp(log(gammas / 6) / alphas) / lambdas * 1.0E6 - res(i,j) = max(resmin, min(resmax, res(i,j))) - else - qcs(i,j) = 0.0 - res(i,j) = resmin - end if - -! graupel (Lin et al., 1983) - if (qg(i,j) .gt. qmin) then - qcg(i,j) = den(i,j) * qg(i,j) - lambdag = exp(0.25 * log(pi * rhog * n0g / qcg(i,j))) - reg(i,j) = 0.5 * exp(log(gammag / 6) / alphag) / lambdag * 1.0E6 - reg(i,j) = max(regmin, min(regmax, reg(i,j))) - else - qcg(i,j) = 0.0 - reg(i,j) = regmin - end if - - end do - end do - - end subroutine cloud_diagnosis - -end module lin_cld_microphys_mod diff --git a/model_nh/README b/model/README_nh_core similarity index 100% rename from model_nh/README rename to model/README_nh_core diff --git a/model/a2b_edge.F90 b/model/a2b_edge.F90 index 52b347d8b..25fddd548 100644 --- a/model/a2b_edge.F90 +++ b/model/a2b_edge.F90 @@ -44,10 +44,6 @@ module a2b_edge_mod private public :: a2b_ord2, a2b_ord4 -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains #ifndef USE_OLD_ALGORITHM @@ -99,7 +95,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace ! Corners: ! 3-way extrapolation - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js-2,je+2 do i=is,ie+1 @@ -180,13 +176,13 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace qx(npx-1,j) = (3.*(qin(npx-2,j)+g_in*qin(npx-1,j)) - (g_in*qx(npx,j)+qx(npx-2,j)))/(2.+2.*g_in) enddo endif - + end if !------------ ! Y-Interior: !------------ - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js,je+1 @@ -242,7 +238,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace end if !-------------------------------------- - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js, je+1 do i=is,ie+1 @@ -281,14 +277,14 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace enddo endif - + do j=max(2,js),min(npy-1,je+1) do i=max(3,is),min(npx-2,ie+1) qyy(i,j) = a2*(qy(i-2,j)+qy(i+1,j)) + a1*(qy(i-1,j)+qy(i,j)) enddo if ( is==1 ) qyy(2,j) = c1*(qy(1,j)+qy(2,j))+c2*(qout(1,j)+qyy(3,j)) if((ie+1)==npx) qyy(npx-1,j) = c1*(qy(npx-2,j)+qy(npx-1,j))+c2*(qout(npx,j)+qyy(npx-2,j)) - + do i=max(2,is),min(npx-1,ie+1) qout(i,j) = 0.5*(qxx(i,j) + qyy(i,j)) ! averaging enddo @@ -312,7 +308,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace qy(i,j) = b1*(qin(i,j-1)+qin(i,j)) + b2*(qin(i,j-2)+qin(i,j+1)) enddo enddo - + do j=js,je+1 do i=is,ie+1 qout(i,j) = 0.5*( a1*(qx(i,j-1)+qx(i,j ) + qy(i-1,j)+qy(i, j)) + & @@ -330,9 +326,9 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace enddo endif endif - + end subroutine a2b_ord4 - + #else ! Working version: @@ -448,7 +444,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace !------------ ! X-Interior: !------------ - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js-2,je+2 do i=is, ie+1 @@ -519,7 +515,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace !------------ ! Y-Interior: !------------ - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js,je+1 do i=is-2, ie+2 @@ -587,7 +583,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace end if - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js,je+1 do i=is,ie+1 @@ -607,7 +603,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace else - + do j=max(3,js),min(npy-2,je+1) do i=max(2,is),min(npx-1,ie+1) qxx(i,j) = a2*(qx(i,j-2)+qx(i,j+1)) + a1*(qx(i,j-1)+qx(i,j)) @@ -625,14 +621,14 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace enddo endif - + do j=max(2,js),min(npy-1,je+1) do i=max(3,is),min(npx-2,ie+1) qyy(i,j) = a2*(qy(i-2,j)+qy(i+1,j)) + a1*(qy(i-1,j)+qy(i,j)) enddo if ( is==1 ) qyy(2,j) = c1*(qy(1,j)+qy(2,j))+c2*(qout(1,j)+qyy(3,j)) if((ie+1)==npx) qyy(npx-1,j) = c1*(qy(npx-2,j)+qy(npx-1,j))+c2*(qout(npx,j)+qyy(npx-2,j)) - + do i=max(2,is),min(npx-1,ie+1) qout(i,j) = 0.5*(qxx(i,j) + qyy(i,j)) ! averaging enddo @@ -656,7 +652,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace qy(i,j) = b1*(qin(i,j-1)+qin(i,j)) + b2*(qin(i,j-2)+qin(i,j+1)) enddo enddo - + do j=js,je+1 do i=is,ie+1 qout(i,j) = 0.5*( a1*(qx(i,j-1)+qx(i,j ) + qy(i-1,j)+qy(i, j)) + & @@ -674,7 +670,7 @@ subroutine a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace enddo endif endif - + end subroutine a2b_ord4 #endif @@ -689,7 +685,7 @@ subroutine a2b_ord2(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace real q1(npx), q2(npy) integer :: i,j integer :: is1, js1, is2, js2, ie1, je1 - + real, pointer, dimension(:,:,:) :: grid, agrid real, pointer, dimension(:,:) :: dxa, dya @@ -707,7 +703,7 @@ subroutine a2b_ord2(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace if (gridstruct%grid_type < 3) then - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then do j=js-2,je+1+2 do i=is-2,ie+1+2 @@ -789,7 +785,7 @@ subroutine a2b_ord2(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace endif - + if ( present(replace) ) then if ( replace ) then do j=js,je+1 @@ -799,7 +795,7 @@ subroutine a2b_ord2(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace enddo endif endif - + end subroutine a2b_ord2 real function extrap_corner ( p0, p1, p2, q1, q2 ) @@ -906,13 +902,13 @@ subroutine a2b_ord4(qin, qout, grid, agrid, npx, npy, is, ie, js, je, ng, replac qin(0,npy ) = qin(-1,npy-1) endif - qout(i,j) = van2(1, i,j)*qin(i-2,j-2) + van2(2, i,j)*qin(i-1,j-2) + & - van2(3, i,j)*qin(i ,j-2) + van2(4, i,j)*qin(i+1,j-2) + & - van2(5, i,j)*qin(i-2,j-1) + van2(6, i,j)*qin(i-1,j-1) + & - van2(7, i,j)*qin(i ,j-1) + van2(8, i,j)*qin(i+1,j-1) + & - van2(9, i,j)*qin(i-2,j ) + van2(10,i,j)*qin(i-1,j ) + & - van2(11,i,j)*qin(i ,j ) + van2(12,i,j)*qin(i+1,j ) + & - van2(13,i,j)*qin(i-2,j+1) + van2(14,i,j)*qin(i-1,j+1) + & + qout(i,j) = van2(1, i,j)*qin(i-2,j-2) + van2(2, i,j)*qin(i-1,j-2) + & + van2(3, i,j)*qin(i ,j-2) + van2(4, i,j)*qin(i+1,j-2) + & + van2(5, i,j)*qin(i-2,j-1) + van2(6, i,j)*qin(i-1,j-1) + & + van2(7, i,j)*qin(i ,j-1) + van2(8, i,j)*qin(i+1,j-1) + & + van2(9, i,j)*qin(i-2,j ) + van2(10,i,j)*qin(i-1,j ) + & + van2(11,i,j)*qin(i ,j ) + van2(12,i,j)*qin(i+1,j ) + & + van2(13,i,j)*qin(i-2,j+1) + van2(14,i,j)*qin(i-1,j+1) + & van2(15,i,j)*qin(i ,j+1) + van2(16,i,j)*qin(i+1,j+1) 123 continue enddo @@ -944,7 +940,7 @@ subroutine a2b_ord4(qin, qout, grid, agrid, npx, npy, is, ie, js, je, ng, replac extrap_corner(p0, agrid(0,npy-1,1:2), agrid(-1,npy-2,1:2), qin(0,npy-1), qin(-1,npy-2)) + & extrap_corner(p0, agrid(1,npy, 1:2), agrid( 2,npy+1,1:2), qin(1,npy ), qin( 2,npy+1)))*r3 endif - + else ! grid_type>=3 !------------------------ @@ -962,7 +958,7 @@ subroutine a2b_ord4(qin, qout, grid, agrid, npx, npy, is, ie, js, je, ng, replac qy(i,j) = b1*(qin(i,j-1)+qin(i,j)) + b2*(qin(i,j-2)+qin(i,j+1)) enddo enddo - + do j=js,je+1 do i=is,ie+1 qout(i,j) = 0.5*( a1*(qx(i,j-1)+qx(i,j ) + qy(i-1,j)+qy(i, j)) + & @@ -981,8 +977,8 @@ subroutine a2b_ord4(qin, qout, grid, agrid, npx, npy, is, ie, js, je, ng, replac enddo endif endif - + end subroutine a2b_ord4 #endif - + end module a2b_edge_mod diff --git a/model/boundary.F90 b/model/boundary.F90 index f0f2ef14b..9b3c7a056 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -20,12 +20,13 @@ !*********************************************************************** module boundary_mod - use fv_mp_mod, only: ng, isc,jsc,iec,jec, isd,jsd,ied,jed, is,js,ie,je, is_master + use fv_mp_mod, only: is_master use constants_mod, only: grav use mpp_domains_mod, only: mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST use mpp_domains_mod, only: mpp_global_field, mpp_get_pelist + use mpp_domains_mod, only: AGRID, BGRID_NE, CGRID_NE, DGRID_NE use mpp_mod, only: mpp_error, FATAL, mpp_sum, mpp_sync, mpp_npes, mpp_broadcast, WARNING, mpp_pe use fv_mp_mod, only: mp_bcst @@ -45,12 +46,24 @@ module boundary_mod interface nested_grid_BC module procedure nested_grid_BC_2d - module procedure nested_grid_BC_mpp - module procedure nested_grid_BC_mpp_send +! module procedure nested_grid_BC_mpp_2d + module procedure nested_grid_BC_mpp_3d + module procedure nested_grid_BC_mpp_send_2d + module procedure nested_grid_BC_mpp_send_3d module procedure nested_grid_BC_2D_mpp module procedure nested_grid_BC_3d + module procedure nested_grid_BC_mpp_3d_vector end interface + interface nested_grid_BC_send + module procedure nested_grid_BC_send_scalar + module procedure nested_grid_BC_send_vector + end interface + + interface nested_grid_BC_recv + module procedure nested_grid_BC_recv_scalar + module procedure nested_grid_BC_recv_vector + end interface interface fill_nested_grid module procedure fill_nested_grid_2d @@ -60,6 +73,7 @@ module boundary_mod interface update_coarse_grid module procedure update_coarse_grid_mpp module procedure update_coarse_grid_mpp_2d + module procedure update_coarse_grid_mpp_vector end interface contains @@ -106,7 +120,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else debug = .false. end if - + if (is == 1) then if (pd) then @@ -134,7 +148,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) end do end if - + end if if (js == 1) then @@ -164,7 +178,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) end do end if - + end if if (ie == npx - 1) then @@ -173,7 +187,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=jstart,jend+jstag do i=ie+1+istag,ied+istag - + if (real(i) >= ie+istag + q(ie+istag,j)/(q(ie+istag-1,j)-q(ie+istag,j)+1.e-12) .and. & q(ie+istag,j) < q(ie+istag-1,j)) then q(i,j) = q(i-1,j) @@ -244,7 +258,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else q(i,j) = 0.5*( real(2-i)*q(1,j) - real(1-i)*q(2,j) ) end if - + if (real(j) <= 1. - q(i,1)/(q(i,2) - q(i,1) + 1.e-12) .and. q(i,2) > q(i,1)) then q(i,j) = q(i,j) + 0.5*q(i,j+1) @@ -259,10 +273,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=jsd,0 do i=isd,0 - + q(i,j) = 0.5*( real(2-i)*q(1,j) - real(1-i)*q(2,j) ) + & 0.5*( real(2-j)*q(i,1) - real(1-j)*q(i,2) ) - + end do end do @@ -291,7 +305,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else q(i,j) = q(i,j) + 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) end if - + end do end do @@ -299,10 +313,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=je+1+jstag,jed+jstag do i=isd,0 - + q(i,j) = 0.5*( real(2-i)*q(1,j) - real(1-i)*q(2,j) ) + & 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) - + end do end do @@ -316,8 +330,8 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=je+1+jstag,jed+jstag do i=ie+1+istag,ied+istag - - + + if (real(i) >= ie+istag + q(ie+istag,j)/(q(ie+istag-1,j)-q(ie+istag,j)+1.e-12) .and. & q(ie+istag-1,j) > q(ie+istag,j)) then q(i,j) = 0.5*q(i-1,j) @@ -331,7 +345,7 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) else q(i,j) = q(i,j) + 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) end if - + end do end do @@ -339,10 +353,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=je+1+jstag,jed+jstag do i=ie+1+istag,ied+istag - + q(i,j) = 0.5*( real(i - (ie+istag-1))*q(ie+istag,j) + real((ie+istag) - i)*q(ie+istag-1,j) ) + & 0.5*( real(j - (je+jstag-1))*q(i,je+jstag) + real((je+jstag) - j)*q(i,je+jstag-1) ) - + end do end do @@ -356,22 +370,22 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=0,jsd,-1 do i=ie+1+istag,ied+istag - - + + if (real(i) >= ie+istag + q(ie+istag,j)/(q(ie+istag-1,j)-q(ie+istag,j)+1.e-12) .and. & q(ie+istag-1,j) > q(ie+istag,j)) then q(i,j) = 0.5*q(i-1,j) else q(i,j) = 0.5*(real(i - (ie+istag-1))*q(ie+istag,j) + real((ie+istag) - i)*q(ie+istag-1,j)) end if - + if (real(j) <= 1. - q(i,1)/(q(i,2) - q(i,1) + 1.e-12) .and. & q(i,2) > q(i,1)) then q(i,j) = q(i,j) + 0.5*q(i,j+1) else q(i,j) = q(i,j) + 0.5*(real(2-j)*q(i,1) - real(1-j)*q(i,2)) end if - + end do end do @@ -380,10 +394,10 @@ subroutine extrapolation_BC(q, istag, jstag, npx, npy, bd, pd_in, debug_in) do j=jsd,0 do i=ie+1+istag,ied+istag - + q(i,j) = 0.5*( real(i - (ie+istag-1))*q(ie+istag,j) + real((ie+istag) - i)*q(ie+istag-1,j) ) + & 0.5*( real(2-j)*q(i,1) - real(1-j)*q(i,2) ) - + end do end do @@ -399,7 +413,7 @@ subroutine fill_nested_grid_2D(var_nest, var_coarse, ind, wt, istag, jstag, & type(fv_grid_bounds_type), intent(IN) :: bd real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag), intent(INOUT) :: var_nest - real, dimension(isg:ieg+istag,jsg:jeg+jstag), intent(IN) :: var_coarse + real, dimension(isg:ieg+istag,jsg:jeg+jstag), intent(IN) :: var_coarse integer, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,2), intent(IN) :: ind real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,4), intent(IN) :: wt integer, intent(IN) :: istag, jstag, isg, ieg, jsg, jeg @@ -452,13 +466,13 @@ subroutine fill_nested_grid_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do end subroutine fill_nested_grid_2D - + subroutine fill_nested_grid_3D(var_nest, var_coarse, ind, wt, istag, jstag, & isg, ieg, jsg, jeg, npz, bd, istart_in, iend_in, jstart_in, jend_in) @@ -519,7 +533,7 @@ subroutine fill_nested_grid_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -527,9 +541,38 @@ subroutine fill_nested_grid_3D(var_nest, var_coarse, ind, wt, istag, jstag, & end do end subroutine fill_nested_grid_3D - - subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, jstag, & - npx, npy, npz, bd, isg, ieg, jsg, jeg, nstep_in, nsplit_in, proc_in) + +!!$ subroutine nested_grid_BC_mpp_2d(var_nest, nest_domain, ind, wt, istag, jstag, & +!!$ npx, npy, bd, isg, ieg, jsg, jeg, nstep_in, nsplit_in, proc_in) +!!$ +!!$ type(fv_grid_bounds_type), intent(IN) :: bd +!!$ real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag), intent(INOUT) :: var_nest +!!$ real, dimension(isg:ieg+istag,jsg:jeg+jstag), intent(IN) :: var_coarse +!!$ type(nest_domain_type), intent(INOUT) :: nest_domain +!!$ integer, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,2), intent(IN) :: ind +!!$ real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,4), intent(IN) :: wt +!!$ integer, intent(IN) :: istag, jstag, npx, npy, isg, ieg, jsg, jeg +!!$ integer, intent(IN), OPTIONAL :: nstep_in, nsplit_in +!!$ logical, intent(IN), OPTIONAL :: proc_in +!!$ +!!$ real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,1) :: var_nest_3d +!!$ +!!$ integer :: i,j +!!$ +!!$ do j=bd%jsd,bd%jed+jstag +!!$ do i=bd%isd,bd%ied+istag +!!$ var_nest_3d(i,j,1) = var_nest(i,j) +!!$ enddo +!!$ enddo +!!$ +!!$ call nested_grid_BC_mpp_3d(var_nest_3d, nest_domain, ind, wt, istag, jstag, & +!!$ npx, npy, 1, bd, isg, ieg, jsg, jeg, nstep_in, nsplit_in, proc_in) +!!$ +!!$ +!!$ end subroutine nested_grid_BC_mpp_2d + + subroutine nested_grid_BC_mpp_3d(var_nest, var_coarse, nest_domain, ind, wt, istag, jstag, & + npx, npy, npz, bd, isg, ieg, jsg, jeg, nest_level, nstep_in, nsplit_in, proc_in) type(fv_grid_bounds_type), intent(IN) :: bd real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz), intent(INOUT) :: var_nest @@ -538,6 +581,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, integer, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,2), intent(IN) :: ind real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,4), intent(IN) :: wt integer, intent(IN) :: istag, jstag, npx, npy, npz, isg, ieg, jsg, jeg + integer, intent(IN) :: nest_level integer, intent(IN), OPTIONAL :: nstep_in, nsplit_in logical, intent(IN), OPTIONAL :: proc_in @@ -584,13 +628,13 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, end if call mpp_get_C2F_index(nest_domain, isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c, & - WEST, position=position) + WEST, nest_level=nest_level, position=position) call mpp_get_C2F_index(nest_domain, ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c, & - EAST, position=position) + EAST, nest_level=nest_level, position=position) call mpp_get_C2F_index(nest_domain, iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c, & - SOUTH, position=position) + SOUTH, nest_level=nest_level, position=position) call mpp_get_C2F_index(nest_domain, isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c, & - NORTH, position=position) + NORTH, nest_level=nest_level, position=position) if( iew_c .GE. isw_c .AND. jew_c .GE. jsw_c ) then allocate(wbuffer(isw_c:iew_c, jsw_c:jew_c,npz)) @@ -622,12 +666,14 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, call timing_on ('COMM_TOTAL') - call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, position=position) + call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, & + nest_level=nest_level, position=position) call timing_off('COMM_TOTAL') if (process) then if (is == 1) then +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,isd,ind,var_nest,wt,wbuffer) private(ic,jc) do k=1,npz do j=jsd,jed+jstag do i=isd,0 @@ -639,7 +685,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*wbuffer(ic, jc, k) + & wt(i,j,2)*wbuffer(ic, jc+1,k) + & wt(i,j,3)*wbuffer(ic+1,jc+1,k) + & - wt(i,j,4)*wbuffer(ic+1,jc, k) + wt(i,j,4)*wbuffer(ic+1,jc, k) end do end do @@ -660,6 +706,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, iend = ied end if +!OMP parallel do default(none) shared(npz,jsd,istart,iend,istag,ind,var_nest,wt,sbuffer) private(ic,jc) do k=1,npz do j=jsd,0 do i=istart,iend+istag @@ -671,7 +718,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*sbuffer(ic, jc, k) + & wt(i,j,2)*sbuffer(ic, jc+1,k) + & wt(i,j,3)*sbuffer(ic+1,jc+1,k) + & - wt(i,j,4)*sbuffer(ic+1,jc, k) + wt(i,j,4)*sbuffer(ic+1,jc, k) end do end do @@ -680,6 +727,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, if (ie == npx-1) then +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,npx,ied,istag,ind,var_nest,wt,ebuffer) private(ic,jc) do k=1,npz do j=jsd,jed+jstag do i=npx+istag,ied+istag @@ -691,7 +739,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*ebuffer(ic, jc, k) + & wt(i,j,2)*ebuffer(ic, jc+1,k) + & wt(i,j,3)*ebuffer(ic+1,jc+1,k) + & - wt(i,j,4)*ebuffer(ic+1,jc, k) + wt(i,j,4)*ebuffer(ic+1,jc, k) end do end do @@ -712,6 +760,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, iend = ied end if +!OMP parallel do default(none) shared(npz,jstag,npy,jed,istart,iend,istag,ind,var_nest,wt,nbuffer) private(ic,jc) do k=1,npz do j=npy+jstag,jed+jstag do i=istart,iend+istag @@ -723,7 +772,7 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, wt(i,j,1)*nbuffer(ic, jc, k) + & wt(i,j,2)*nbuffer(ic, jc+1,k) + & wt(i,j,3)*nbuffer(ic+1,jc+1,k) + & - wt(i,j,4)*nbuffer(ic+1,jc, k) + wt(i,j,4)*nbuffer(ic+1,jc, k) end do end do @@ -734,13 +783,323 @@ subroutine nested_grid_BC_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, deallocate(wbuffer, ebuffer, sbuffer, nbuffer) - end subroutine nested_grid_BC_mpp + end subroutine nested_grid_BC_mpp_3d + + subroutine get_vector_position(position_x, position_y, gridtype) + integer, intent(OUT) :: position_x, position_y + integer, optional, intent(IN) :: gridtype + + integer :: grid_offset_type + + grid_offset_type = AGRID + if(present(gridtype)) grid_offset_type = gridtype + + select case(grid_offset_type) + case (AGRID) + position_x = CENTER + position_y = CENTER + case (BGRID_NE) + position_x = CORNER + position_y = CORNER + case (CGRID_NE) + position_x = EAST + position_y = NORTH + case (DGRID_NE) + position_y = EAST + position_x = NORTH + case default + call mpp_error(FATAL, "get_vector_position: invalid value of gridtype") + end select + + + end subroutine get_vector_position + + subroutine init_buffer(nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, npz, nest_level, position) + type(nest_domain_type), intent(INOUT) :: nest_domain + real, allocatable, dimension(:,:,:), intent(OUT) :: wbuffer, sbuffer, ebuffer, nbuffer + integer, intent(IN) :: npz, position, nest_level + integer :: isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c + integer :: ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c + integer :: iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c + integer :: isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c + + call mpp_get_C2F_index(nest_domain, isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c, & + WEST, nest_level=nest_level, position=position) + call mpp_get_C2F_index(nest_domain, ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c, & + EAST, nest_level=nest_level, position=position) + call mpp_get_C2F_index(nest_domain, iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c, & + SOUTH, nest_level=nest_level, position=position) + call mpp_get_C2F_index(nest_domain, isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c, & + NORTH, nest_level=nest_level, position=position) + + if( iew_c .GE. isw_c .AND. jew_c .GE. jsw_c ) then + allocate(wbuffer(isw_c:iew_c, jsw_c:jew_c,npz)) + else + allocate(wbuffer(1,1,1)) + endif + wbuffer = 0 + + if( iee_c .GE. ise_c .AND. jee_c .GE. jse_c ) then + allocate(ebuffer(ise_c:iee_c, jse_c:jee_c,npz)) + else + allocate(ebuffer(1,1,1)) + endif + ebuffer = 0 + + if( ies_c .GE. iss_c .AND. jes_c .GE. jss_c ) then + allocate(sbuffer(iss_c:ies_c, jss_c:jes_c,npz)) + else + allocate(sbuffer(1,1,1)) + endif + sbuffer = 0 + + if( ien_c .GE. isn_c .AND. jen_c .GE. jsn_c ) then + allocate(nbuffer(isn_c:ien_c, jsn_c:jen_c,npz)) + else + allocate(nbuffer(1,1,1)) + endif + nbuffer = 0 + + end subroutine init_buffer + + + subroutine nested_grid_BC_mpp_3d_vector(u_nest, v_nest, u_coarse, v_coarse, nest_domain, ind_u, ind_v, wt_u, wt_v, & + istag_u, jstag_u, istag_v, jstag_v, npx, npy, npz, bd, isg, ieg, jsg, jeg, nest_level, nstep_in, nsplit_in, proc_in, & + flags, gridtype) + + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: istag_u, jstag_u, istag_v, jstag_v, npx, npy, npz, isg, ieg, jsg, jeg + real, dimension(bd%isd:bd%ied+istag_u,bd%jsd:bd%jed+jstag_u,npz), intent(INOUT) :: u_nest + real, dimension(bd%isd:bd%ied+istag_v,bd%jsd:bd%jed+jstag_v,npz), intent(INOUT) :: v_nest + real, dimension(isg:ieg+istag_u,jsg:jeg+jstag_u,npz), intent(IN) :: u_coarse + real, dimension(isg:ieg+istag_v,jsg:jeg+jstag_v,npz), intent(IN) :: v_coarse + type(nest_domain_type), intent(INOUT) :: nest_domain + integer, dimension(bd%isd:bd%ied+istag_u,bd%jsd:bd%jed+jstag_u,2), intent(IN) :: ind_u + integer, dimension(bd%isd:bd%ied+istag_v,bd%jsd:bd%jed+jstag_v,2), intent(IN) :: ind_v + real, dimension(bd%isd:bd%ied+istag_u,bd%jsd:bd%jed+jstag_u,4), intent(IN) :: wt_u + real, dimension(bd%isd:bd%ied+istag_v,bd%jsd:bd%jed+jstag_v,4), intent(IN) :: wt_v + integer, intent(IN) :: nest_level + integer, intent(IN), OPTIONAL :: nstep_in, nsplit_in + logical, intent(IN), OPTIONAL :: proc_in + integer, intent(IN), OPTIONAL :: flags, gridtype + + real, allocatable :: wbufferx(:,:,:), wbuffery(:,:,:) + real, allocatable :: ebufferx(:,:,:), ebuffery(:,:,:) + real, allocatable :: sbufferx(:,:,:), sbuffery(:,:,:) + real, allocatable :: nbufferx(:,:,:), nbuffery(:,:,:) + + integer :: i,j, ic, jc, istart, iend, k + + integer :: position_x, position_y + logical :: process + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (PRESENT(proc_in)) then + process = proc_in + else + process = .true. + endif + + call get_vector_position(position_x, position_y, gridtype) + call init_buffer(nest_domain, wbufferx, sbufferx, ebufferx, nbufferx, npz, nest_level, position_x) + call init_buffer(nest_domain, wbuffery, sbuffery, ebuffery, nbuffery, npz, nest_level, position_x) - subroutine nested_grid_BC_mpp_send(var_coarse, nest_domain, istag, jstag) + call timing_on ('COMM_TOTAL') + call mpp_update_nest_fine(u_coarse, v_coarse, nest_domain, wbufferx, wbuffery, sbufferx, sbuffery, & + ebufferx, ebuffery, nbufferx, nbuffery, flags=flags, nest_level=nest_level, gridtype=gridtype) + call timing_off('COMM_TOTAL') + + if (process) then + + if (is == 1) then +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,isd,ind,var_nest,wt,wbuffer) private(ic,jc) + do k=1,npz + do j=jsd,jed+jstag_u + do i=isd,0 + + ic = ind_u(i,j,1) + jc = ind_u(i,j,2) + + u_nest(i,j,k) = & + wt_u(i,j,1)*wbufferx(ic, jc, k) + & + wt_u(i,j,2)*wbufferx(ic, jc+1,k) + & + wt_u(i,j,3)*wbufferx(ic+1,jc+1,k) + & + wt_u(i,j,4)*wbufferx(ic+1,jc, k) + + end do + end do + do j=jsd,jed+jstag_v + do i=isd,0 + + ic = ind_v(i,j,1) + jc = ind_v(i,j,2) + + v_nest(i,j,k) = & + wt_v(i,j,1)*wbuffery(ic, jc, k) + & + wt_v(i,j,2)*wbuffery(ic, jc+1,k) + & + wt_v(i,j,3)*wbuffery(ic+1,jc+1,k) + & + wt_v(i,j,4)*wbuffery(ic+1,jc, k) + + end do + end do + end do + + end if + + if (js == 1) then + + if (is == 1) then + istart = is + else + istart = isd + end if + + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + +!OMP parallel do default(none) shared(npz,jsd,istart,iend,istag,ind,var_nest,wt,sbuffer) private(ic,jc) + do k=1,npz + do j=jsd,0 + do i=istart,iend+istag_u + + ic = ind_u(i,j,1) + jc = ind_u(i,j,2) + + u_nest(i,j,k) = & + wt_u(i,j,1)*sbufferx(ic, jc, k) + & + wt_u(i,j,2)*sbufferx(ic, jc+1,k) + & + wt_u(i,j,3)*sbufferx(ic+1,jc+1,k) + & + wt_u(i,j,4)*sbufferx(ic+1,jc, k) + + end do + end do + do j=jsd,0 + do i=istart,iend+istag_v + + ic = ind_v(i,j,1) + jc = ind_v(i,j,2) + + v_nest(i,j,k) = & + wt_v(i,j,1)*sbuffery(ic, jc, k) + & + wt_v(i,j,2)*sbuffery(ic, jc+1,k) + & + wt_v(i,j,3)*sbuffery(ic+1,jc+1,k) + & + wt_v(i,j,4)*sbuffery(ic+1,jc, k) + + end do + end do + end do + end if + + + if (ie == npx-1) then +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,npx,ied,istag,ind,var_nest,wt,ebuffer) private(ic,jc) + do k=1,npz + do j=jsd,jed+jstag_u + do i=npx+istag_u,ied+istag_u + + ic = ind_u(i,j,1) + jc = ind_u(i,j,2) + + u_nest(i,j,k) = & + wt_u(i,j,1)*ebufferx(ic, jc, k) + & + wt_u(i,j,2)*ebufferx(ic, jc+1,k) + & + wt_u(i,j,3)*ebufferx(ic+1,jc+1,k) + & + wt_u(i,j,4)*ebufferx(ic+1,jc, k) + + end do + end do + do j=jsd,jed+jstag_v + do i=npx+istag_v,ied+istag_v + + ic = ind_v(i,j,1) + jc = ind_v(i,j,2) + + v_nest(i,j,k) = & + wt_v(i,j,1)*ebuffery(ic, jc, k) + & + wt_v(i,j,2)*ebuffery(ic, jc+1,k) + & + wt_v(i,j,3)*ebuffery(ic+1,jc+1,k) + & + wt_v(i,j,4)*ebuffery(ic+1,jc, k) + + end do + end do + end do + end if + + if (je == npy-1) then + + if (is == 1) then + istart = is + else + istart = isd + end if + + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + +!OMP parallel do default(none) shared(npz,jstag,npy,jed,istart,iend,istag,ind,var_nest,wt,nbuffer) private(ic,jc) + do k=1,npz + do j=npy+jstag_u,jed+jstag_u + do i=istart,iend+istag_u + + ic = ind_u(i,j,1) + jc = ind_u(i,j,2) + + u_nest(i,j,k) = & + wt_u(i,j,1)*nbufferx(ic, jc, k) + & + wt_u(i,j,2)*nbufferx(ic, jc+1,k) + & + wt_u(i,j,3)*nbufferx(ic+1,jc+1,k) + & + wt_u(i,j,4)*nbufferx(ic+1,jc, k) + + end do + end do + do j=npy+jstag_v,jed+jstag_v + do i=istart,iend+istag_v + + ic = ind_v(i,j,1) + jc = ind_v(i,j,2) + + v_nest(i,j,k) = & + wt_v(i,j,1)*nbuffery(ic, jc, k) + & + wt_v(i,j,2)*nbuffery(ic, jc+1,k) + & + wt_v(i,j,3)*nbuffery(ic+1,jc+1,k) + & + wt_v(i,j,4)*nbuffery(ic+1,jc, k) + + end do + end do + end do + end if + + endif !process + + deallocate(wbufferx, ebufferx, sbufferx, nbufferx) + deallocate(wbuffery, ebuffery, sbuffery, nbuffery) + + end subroutine nested_grid_BC_mpp_3d_vector + + + subroutine nested_grid_BC_mpp_send_3d(var_coarse, nest_domain, istag, jstag, nest_level) real, dimension(:,:,:), intent(IN) :: var_coarse type(nest_domain_type), intent(INOUT) :: nest_domain integer, intent(IN) :: istag, jstag + integer, intent(IN) :: nest_level real, allocatable :: wbuffer(:,:,:) real, allocatable :: ebuffer(:,:,:) @@ -773,16 +1132,62 @@ subroutine nested_grid_BC_mpp_send(var_coarse, nest_domain, istag, jstag) call timing_on ('COMM_TOTAL') - call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, position=position) + call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nest_level, position=position) call timing_off('COMM_TOTAL') deallocate(wbuffer, ebuffer, sbuffer, nbuffer) - end subroutine nested_grid_BC_mpp_send + end subroutine nested_grid_BC_mpp_send_3d + + subroutine nested_grid_BC_mpp_send_2d(var_coarse, nest_domain, istag, jstag, nest_level) + + real, dimension(:,:), intent(IN) :: var_coarse + type(nest_domain_type), intent(INOUT) :: nest_domain + integer, intent(IN) :: istag, jstag + integer, intent(IN) :: nest_level + + real, allocatable :: wbuffer(:,:) + real, allocatable :: ebuffer(:,:) + real, allocatable :: sbuffer(:,:) + real, allocatable :: nbuffer(:,:) + + integer :: i,j, ic, jc, istart, iend, k + + integer :: position + + + if (istag == 1 .and. jstag == 1) then + position = CORNER + else if (istag == 0 .and. jstag == 1) then + position = NORTH + else if (istag == 1 .and. jstag == 0) then + position = EAST + else + position = CENTER + end if + + + allocate(wbuffer(1,1)) + + allocate(ebuffer(1,1)) + + allocate(sbuffer(1,1)) + + allocate(nbuffer(1,1)) + + + call timing_on ('COMM_TOTAL') + call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nest_level, position=position) + call timing_off('COMM_TOTAL') + + + deallocate(wbuffer, ebuffer, sbuffer, nbuffer) + + end subroutine nested_grid_BC_mpp_send_2d subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, istag, jstag, & - npx, npy, bd, isg, ieg, jsg, jeg, nstep_in, nsplit_in, proc_in) + npx, npy, bd, isg, ieg, jsg, jeg, nest_level, nstep_in, nsplit_in, proc_in) type(fv_grid_bounds_type), intent(IN) :: bd real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag), intent(INOUT) :: var_nest @@ -791,6 +1196,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist integer, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,2), intent(IN) :: ind real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,4), intent(IN) :: wt integer, intent(IN) :: istag, jstag, npx, npy, isg, ieg, jsg, jeg + integer, intent(IN), OPTIONAL :: nest_level integer, intent(IN), OPTIONAL :: nstep_in, nsplit_in logical, intent(IN), OPTIONAL :: proc_in @@ -804,6 +1210,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist real, allocatable :: nbuffer(:,:) integer :: i,j, ic, jc, istart, iend, k + integer :: nl = 1 !nest_level integer :: position logical :: process @@ -826,6 +1233,10 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist process = .true. endif + if (PRESENT(nest_level)) then + nl = nest_level + endif + if (istag == 1 .and. jstag == 1) then position = CORNER else if (istag == 0 .and. jstag == 1) then @@ -837,13 +1248,13 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist end if call mpp_get_C2F_index(nest_domain, isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c, & - WEST, position=position) + WEST, nest_level=nl, position=position) call mpp_get_C2F_index(nest_domain, ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c, & - EAST, position=position) + EAST, nest_level=nl, position=position) call mpp_get_C2F_index(nest_domain, iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c, & - SOUTH, position=position) + SOUTH, nest_level=nl, position=position) call mpp_get_C2F_index(nest_domain, isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c, & - NORTH, position=position) + NORTH, nest_level=nl, position=position) if( iew_c .GE. isw_c .AND. jew_c .GE. jsw_c ) then allocate(wbuffer(isw_c:iew_c, jsw_c:jew_c)) @@ -874,7 +1285,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist nbuffer = 0 call timing_on ('COMM_TOTAL') - call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, position=position) + call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nl, position=position) call timing_off('COMM_TOTAL') if (process) then @@ -890,7 +1301,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*wbuffer(ic, jc) + & wt(i,j,2)*wbuffer(ic, jc+1) + & wt(i,j,3)*wbuffer(ic+1,jc+1) + & - wt(i,j,4)*wbuffer(ic+1,jc) + wt(i,j,4)*wbuffer(ic+1,jc) end do end do @@ -920,7 +1331,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*sbuffer(ic, jc) + & wt(i,j,2)*sbuffer(ic, jc+1) + & wt(i,j,3)*sbuffer(ic+1,jc+1) + & - wt(i,j,4)*sbuffer(ic+1,jc) + wt(i,j,4)*sbuffer(ic+1,jc) end do end do @@ -938,7 +1349,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*ebuffer(ic, jc) + & wt(i,j,2)*ebuffer(ic, jc+1) + & wt(i,j,3)*ebuffer(ic+1,jc+1) + & - wt(i,j,4)*ebuffer(ic+1,jc) + wt(i,j,4)*ebuffer(ic+1,jc) end do end do @@ -968,7 +1379,7 @@ subroutine nested_grid_BC_2D_mpp(var_nest, var_coarse, nest_domain, ind, wt, ist wt(i,j,1)*nbuffer(ic, jc) + & wt(i,j,2)*nbuffer(ic, jc+1) + & wt(i,j,3)*nbuffer(ic+1,jc+1) + & - wt(i,j,4)*nbuffer(ic+1,jc) + wt(i,j,4)*nbuffer(ic+1,jc) end do end do @@ -1026,7 +1437,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1056,7 +1467,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1074,7 +1485,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1105,7 +1516,7 @@ subroutine nested_grid_BC_2D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc) + & wt(i,j,2)*var_coarse(ic, jc+1) + & wt(i,j,3)*var_coarse(ic+1,jc+1) + & - wt(i,j,4)*var_coarse(ic+1,jc) + wt(i,j,4)*var_coarse(ic+1,jc) end do end do @@ -1151,6 +1562,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & end if if (is == 1) then +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,isd,ind,var_nest,wt,var_coarse) private(ic,jc) do k=1,npz do j=jsd,jed+jstag do i=isd,0 @@ -1162,7 +1574,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1183,6 +1595,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & iend = ied end if +!OMP parallel do default(none) shared(npz,jsd,istart,iend,istag,ind,var_nest,wt,var_coarse) private(ic,jc) do k=1,npz do j=jsd,0 do i=istart,iend+istag @@ -1194,7 +1607,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1203,6 +1616,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & if (ie == npx-1) then +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,npx,ied,istag,ind,var_nest,wt,var_coarse) private(ic,jc) do k=1,npz do j=jsd,jed+jstag do i=npx+istag,ied+istag @@ -1214,7 +1628,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1235,6 +1649,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & iend = ied end if +!OMP parallel do default(none) shared(npz,npy,jed,jstag,istart,iend,istag,ind,var_nest,wt,var_coarse) private(ic,jc) do k=1,npz do j=npy+jstag,jed+jstag do i=istart,iend+istag @@ -1246,7 +1661,7 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & wt(i,j,1)*var_coarse(ic, jc, k) + & wt(i,j,2)*var_coarse(ic, jc+1,k) + & wt(i,j,3)*var_coarse(ic+1,jc+1,k) + & - wt(i,j,4)*var_coarse(ic+1,jc, k) + wt(i,j,4)*var_coarse(ic+1,jc, k) end do end do @@ -1257,11 +1672,12 @@ subroutine nested_grid_BC_3D(var_nest, var_coarse, ind, wt, istag, jstag, & end subroutine nested_grid_BC_3D - subroutine nested_grid_BC_send(var_coarse, nest_domain, istag, jstag) + subroutine nested_grid_BC_send_scalar(var_coarse, nest_domain, istag, jstag, nest_level) real, dimension(:,:,:), intent(IN) :: var_coarse type(nest_domain_type), intent(INOUT) :: nest_domain integer, intent(IN) :: istag, jstag + integer, intent(IN) :: nest_level integer :: position @@ -1282,28 +1698,29 @@ subroutine nested_grid_BC_send(var_coarse, nest_domain, istag, jstag) end if call timing_on ('COMM_TOTAL') - call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, position=position) + call mpp_update_nest_fine(var_coarse, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level=nest_level, position=position) call timing_off('COMM_TOTAL') - end subroutine nested_grid_BC_send + end subroutine nested_grid_BC_send_scalar - subroutine nested_grid_BC_recv(nest_domain, istag, jstag, npz, & - bd, nest_BC_buffers) + subroutine nested_grid_BC_recv_scalar(nest_domain, istag, jstag, npz, & + bd, nest_BC_buffers, nest_level) type(fv_grid_bounds_type), intent(IN) :: bd type(nest_domain_type), intent(INOUT) :: nest_domain integer, intent(IN) :: istag, jstag, npz + integer, intent(IN) :: nest_level type(fv_nest_BC_type_3d), intent(INOUT), target :: nest_BC_buffers - + real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var_coarse_dummy integer :: position - integer :: isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c - integer :: ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c - integer :: iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c - integer :: isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c +!!$ integer :: isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c +!!$ integer :: ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c +!!$ integer :: iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c +!!$ integer :: isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c integer :: i,j, k @@ -1318,80 +1735,152 @@ subroutine nested_grid_BC_recv(nest_domain, istag, jstag, npz, & end if if (.not. allocated(nest_BC_buffers%west_t1) ) then + call init_nest_bc_type(nest_domain, nest_BC_buffers, npz, nest_level, position) + endif + + call timing_on ('COMM_TOTAL') + call mpp_update_nest_fine(var_coarse_dummy, nest_domain, nest_BC_buffers%west_t1, nest_BC_buffers%south_t1, & + nest_BC_buffers%east_t1, nest_BC_buffers%north_t1, nest_level=nest_level, position=position) + call timing_off('COMM_TOTAL') + + end subroutine nested_grid_BC_recv_scalar + + subroutine nested_grid_BC_send_vector(u_coarse, v_coarse, nest_domain, nest_level, flags, gridtype) + real, dimension(:,:,:), intent(IN) :: u_coarse, v_coarse + type(nest_domain_type), intent(INOUT) :: nest_domain + integer, intent(IN) :: nest_level + integer, optional, intent(IN) :: flags, gridtype + + real :: wbufferx(1,1,1), wbuffery(1,1,1) + real :: ebufferx(1,1,1), ebuffery(1,1,1) + real :: sbufferx(1,1,1), sbuffery(1,1,1) + real :: nbufferx(1,1,1), nbuffery(1,1,1) + + integer :: nl = 1 + + call timing_on ('COMM_TOTAL') + call mpp_update_nest_fine(u_coarse, v_coarse, nest_domain, wbufferx,wbuffery, sbufferx, sbuffery, & + ebufferx, ebuffery, nbufferx, nbuffery, nest_level=nest_level, flags=flags, gridtype=gridtype) + call timing_off('COMM_TOTAL') + + end subroutine nested_grid_BC_send_vector + + subroutine init_nest_bc_type(nest_domain, nest_BC_buffers, npz, nest_level, position) + type(nest_domain_type), intent(INOUT) :: nest_domain + type(fv_nest_BC_type_3d), intent(INOUT) :: nest_BC_buffers + integer, intent(IN) :: npz, position, nest_level + + integer :: isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c + integer :: ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c + integer :: iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c + integer :: isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c + integer :: i, j, k call mpp_get_C2F_index(nest_domain, isw_f, iew_f, jsw_f, jew_f, isw_c, iew_c, jsw_c, jew_c, & - WEST, position=position) + WEST, nest_level=nest_level, position=position) call mpp_get_C2F_index(nest_domain, ise_f, iee_f, jse_f, jee_f, ise_c, iee_c, jse_c, jee_c, & - EAST, position=position) + EAST, nest_level=nest_level, position=position) call mpp_get_C2F_index(nest_domain, iss_f, ies_f, jss_f, jes_f, iss_c, ies_c, jss_c, jes_c, & - SOUTH, position=position) + SOUTH, nest_level=nest_level, position=position) call mpp_get_C2F_index(nest_domain, isn_f, ien_f, jsn_f, jen_f, isn_c, ien_c, jsn_c, jen_c, & - NORTH, position=position) + NORTH, nest_level=nest_level, position=position) if( iew_c .GE. isw_c .AND. jew_c .GE. jsw_c ) then If (.not. allocated(nest_BC_buffers%west_t1)) allocate(nest_BC_buffers%west_t1(isw_c:iew_c, jsw_c:jew_c,npz)) !compatible with first touch principle +!OMP parallel do default(none) shared(npz,jsw_c,jew_c,isw_c,iew_c,nest_BC_buffers) do k=1,npz do j=jsw_c,jew_c do i=isw_c,iew_c - nest_BC_buffers%west_t1(i,j,k) = 0. + nest_BC_buffers%west_t1(i,j,k) = 1.e24 + enddo enddo enddo - enddo else allocate(nest_BC_buffers%west_t1(1,1,1)) - nest_BC_buffers%west_t1(1,1,1) = 0. + nest_BC_buffers%west_t1(1,1,1) = 1.e24 endif if( iee_c .GE. ise_c .AND. jee_c .GE. jse_c ) then If (.not. allocated(nest_BC_buffers%east_t1)) allocate(nest_BC_buffers%east_t1(ise_c:iee_c, jse_c:jee_c,npz)) +!OMP parallel do default(none) shared(npz,jse_c,jee_c,ise_c,iee_c,nest_BC_buffers) do k=1,npz do j=jse_c,jee_c do i=ise_c,iee_c - nest_BC_buffers%east_t1(i,j,k) = 0. + nest_BC_buffers%east_t1(i,j,k) = 1.e24 enddo enddo enddo else allocate(nest_BC_buffers%east_t1(1,1,1)) - nest_BC_buffers%east_t1(1,1,1) = 0. + nest_BC_buffers%east_t1(1,1,1) = 1.e24 endif if( ies_c .GE. iss_c .AND. jes_c .GE. jss_c ) then If (.not. allocated(nest_BC_buffers%south_t1)) allocate(nest_BC_buffers%south_t1(iss_c:ies_c, jss_c:jes_c,npz)) +!OMP parallel do default(none) shared(npz,jss_c,jes_c,iss_c,ies_c,nest_BC_buffers) do k=1,npz do j=jss_c,jes_c do i=iss_c,ies_c - nest_BC_buffers%south_t1(i,j,k) = 0. + nest_BC_buffers%south_t1(i,j,k) = 1.e24 enddo enddo enddo else allocate(nest_BC_buffers%south_t1(1,1,1)) - nest_BC_buffers%south_t1(1,1,1) = 0. + nest_BC_buffers%south_t1(1,1,1) = 1.e24 endif if( ien_c .GE. isn_c .AND. jen_c .GE. jsn_c ) then If (.not. allocated(nest_BC_buffers%north_t1)) allocate(nest_BC_buffers%north_t1(isn_c:ien_c, jsn_c:jen_c,npz)) +!OMP parallel do default(none) shared(npz,jsn_c,jen_c,isn_c,ien_c,nest_BC_buffers) do k=1,npz do j=jsn_c,jen_c do i=isn_c,ien_c - nest_BC_buffers%north_t1(i,j,k) = 0. + nest_BC_buffers%north_t1(i,j,k) = 1.e24 enddo enddo enddo else allocate(nest_BC_buffers%north_t1(1,1,1)) - nest_BC_buffers%north_t1(1,1,1) = 0 + nest_BC_buffers%north_t1(1,1,1) = 1.e24 endif + + end subroutine init_nest_bc_type + + subroutine nested_grid_BC_recv_vector(nest_domain, npz, bd, nest_BC_u_buffers, nest_BC_v_buffers, nest_level, flags, gridtype) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(nest_domain_type), intent(INOUT) :: nest_domain + integer, intent(IN) :: npz + type(fv_nest_BC_type_3d), intent(INOUT), target :: nest_BC_u_buffers, nest_BC_v_buffers + integer, intent(IN) :: nest_level + integer, optional, intent(IN) :: flags, gridtype + + real, dimension(1,1,npz) :: u_coarse_dummy, v_coarse_dummy + + integer :: i,j, k + integer :: position_x, position_y + + call get_vector_position(position_x, position_y, gridtype) + + if (.not. allocated(nest_BC_u_buffers%west_t1) ) then + call init_nest_bc_type(nest_domain, nest_BC_u_buffers, npz, nest_level, position_x) + endif + if (.not. allocated(nest_BC_v_buffers%west_t1) ) then + call init_nest_bc_type(nest_domain, nest_BC_v_buffers, npz, nest_level, position_y) endif call timing_on ('COMM_TOTAL') - call mpp_update_nest_fine(var_coarse_dummy, nest_domain, nest_BC_buffers%west_t1, nest_BC_buffers%south_t1, nest_BC_buffers%east_t1, nest_BC_buffers%north_t1, position=position) + call mpp_update_nest_fine(u_coarse_dummy, v_coarse_dummy, nest_domain, & + nest_BC_u_buffers%west_t1, nest_BC_v_buffers%west_t1, nest_BC_u_buffers%south_t1, nest_BC_v_buffers%south_t1, & + nest_BC_u_buffers%east_t1, nest_BC_v_buffers%east_t1, nest_BC_u_buffers%north_t1, nest_BC_v_buffers%north_t1, & + nest_level, flags, gridtype) call timing_off('COMM_TOTAL') - end subroutine nested_grid_BC_recv + end subroutine nested_grid_BC_recv_vector + subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & npx, npy, npz, bd, nest_BC, nest_BC_buffers, pd_in) @@ -1406,7 +1895,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & !!NOTE: if declaring an ALLOCATABLE array with intent(OUT), the resulting dummy array !! will NOT be allocated! This goes for allocatable members of derived types as well. type(fv_nest_BC_type_3d), intent(INOUT), target :: nest_BC, nest_BC_buffers - + real, dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var_coarse_dummy real, dimension(:,:,:), pointer :: var_east, var_west, var_south, var_north @@ -1451,7 +1940,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & !To do this more securely, instead of using is/etc we could use the fine-grid indices defined above if (is == 1 ) then -!$NO-MP parallel do default(none) shared(npz,isd,ied,jsd,jed,jstag,ind,var_west,wt,buf_west) private(ic,jc) +!$OMP parallel do default(none) shared(npz,isd,ied,jsd,jed,jstag,ind,var_west,wt,buf_west) private(ic,jc) do k=1,npz do j=jsd,jed+jstag do i=isd,0 @@ -1464,14 +1953,14 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_west(ic, jc,k) + & wt(i,j,2)*buf_west(ic, jc+1,k) + & wt(i,j,3)*buf_west(ic+1,jc+1,k) + & - wt(i,j,4)*buf_west(ic+1,jc,k) + wt(i,j,4)*buf_west(ic+1,jc,k) end do end do end do if (pd) then -!$NO-MP parallel do default(none) shared(npz,jsd,jed,jstag,isd,var_west,nest_BC) +!$OMP parallel do default(none) shared(npz,jsd,jed,jstag,isd,var_west,nest_BC) do k=1,npz do j=jsd,jed+jstag do i=isd,0 @@ -1479,7 +1968,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & var_west(i,j,k) = max(var_west(i,j,k), 0.5*nest_BC%west_t0(i,j,k)) end do end do - end do + end do endif end if @@ -1498,7 +1987,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & iend = ied end if -!$NO-MP parallel do default(none) shared(npz,istart,iend,jsd,jed,istag,ind,var_south,wt,buf_south) private(ic,jc) +!$OMP parallel do default(none) shared(npz,istart,iend,jsd,jed,istag,ind,var_south,wt,buf_south) private(ic,jc) do k=1,npz do j=jsd,0 do i=istart,iend+istag @@ -1511,14 +2000,14 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_south(ic, jc,k) + & wt(i,j,2)*buf_south(ic, jc+1,k) + & wt(i,j,3)*buf_south(ic+1,jc+1,k) + & - wt(i,j,4)*buf_south(ic+1,jc,k) + wt(i,j,4)*buf_south(ic+1,jc,k) end do end do end do if (pd) then -!$NO-MP parallel do default(none) shared(npz,jsd,jed,istart,iend,istag,var_south,nest_BC) +!$OMP parallel do default(none) shared(npz,jsd,jed,istart,iend,istag,var_south,nest_BC) do k=1,npz do j=jsd,0 do i=istart,iend+istag @@ -1527,7 +2016,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end do end do - end do + end do endif end if @@ -1535,7 +2024,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & if (ie == npx-1 ) then -!$NO-MP parallel do default(none) shared(npx,npz,isd,ied,jsd,jed,istag,jstag,ind,var_east,wt,buf_east) private(ic,jc) +!$OMP parallel do default(none) shared(npx,npz,isd,ied,jsd,jed,istag,jstag,ind,var_east,wt,buf_east) private(ic,jc) do k=1,npz do j=jsd,jed+jstag do i=npx+istag,ied+istag @@ -1548,14 +2037,14 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_east(ic, jc,k) + & wt(i,j,2)*buf_east(ic, jc+1,k) + & wt(i,j,3)*buf_east(ic+1,jc+1,k) + & - wt(i,j,4)*buf_east(ic+1,jc,k) + wt(i,j,4)*buf_east(ic+1,jc,k) end do end do end do if (pd) then -!$NO-MP parallel do default(none) shared(npx,npz,jsd,jed,istag,jstag,ied,var_east,nest_BC) +!$OMP parallel do default(none) shared(npx,npz,jsd,jed,istag,jstag,ied,var_east,nest_BC) do k=1,npz do j=jsd,jed+jstag do i=npx+istag,ied+istag @@ -1564,7 +2053,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end do end do - end do + end do endif end if @@ -1583,7 +2072,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & iend = ied end if -!$NO-MP parallel do default(none) shared(npy,npz,istart,iend,jsd,jed,istag,jstag,ind,var_north,wt,buf_north) private(ic,jc) +!$OMP parallel do default(none) shared(npy,npz,istart,iend,jsd,jed,istag,jstag,ind,var_north,wt,buf_north) private(ic,jc) do k=1,npz do j=npy+jstag,jed+jstag do i=istart,iend+istag @@ -1596,14 +2085,14 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & wt(i,j,1)*buf_north(ic, jc,k) + & wt(i,j,2)*buf_north(ic, jc+1,k) + & wt(i,j,3)*buf_north(ic+1,jc+1,k) + & - wt(i,j,4)*buf_north(ic+1,jc,k) + wt(i,j,4)*buf_north(ic+1,jc,k) end do end do end do if (pd) then -!$NO-MP parallel do default(none) shared(npy,npz,jsd,jed,istart,iend,istag,jstag,ied,var_north,nest_BC) +!$OMP parallel do default(none) shared(npy,npz,jsd,jed,istart,iend,istag,jstag,ied,var_north,nest_BC) do k=1,npz do j=npy+jstag,jed+jstag do i=istart,iend+istag @@ -1612,7 +2101,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end do end do - end do + end do endif end if @@ -1620,7 +2109,7 @@ subroutine nested_grid_BC_save_proc(nest_domain, ind, wt, istag, jstag, & end subroutine nested_grid_BC_save_proc - ! A NOTE ON BCTYPE: currently only an interpolation BC is implemented, + ! A NOTE ON BCTYPE: currently only an interpolation BC is implemented, ! bctype >= 2 currently correspond ! to a flux BC on the tracers ONLY, which is implemented in fv_tracer. @@ -1633,7 +2122,7 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & integer, intent(IN) :: istag, jstag, npx, npy, npz real, intent(IN) :: split, step integer, intent(IN) :: bctype - + type(fv_nest_BC_type_3D), intent(IN), target :: BC real, pointer, dimension(:,:,:) :: var_t0, var_t1 @@ -1658,13 +2147,13 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & if (is == 1 ) then var_t0 => BC%west_t0 var_t1 => BC%west_t1 +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,isd,var_nest,var_t0,var_t1,split,step,denom) do k=1,npz do j=jsd,jed+jstag do i=isd,0 var_nest(i,j,k) = (var_t0(i,j,k)*(split-step) + step*var_t1(i,j,k))*denom end do - - end do + end do end do end if @@ -1684,10 +2173,10 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & var_t0 => BC%south_t0 var_t1 => BC%south_t1 +!OMP parallel do default(none) shared(npz,jsd,istart,iend,istag,var_nest,var_t0,var_t1,split,step,denom) do k=1,npz do j=jsd,0 - do i=istart,iend+istag - + do i=istart,iend+istag var_nest(i,j,k) = (var_t0(i,j,k)*(split-step) + step*var_t1(i,j,k))*denom end do end do @@ -1698,15 +2187,14 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & if (ie == npx-1 ) then var_t0 => BC%east_t0 var_t1 => BC%east_t1 +!OMP parallel do default(none) shared(npz,jsd,jed,jstag,npx,isd,istag,var_nest,var_t0,var_t1,split,step,denom) do k=1,npz do j=jsd,jed+jstag - do i=npx+istag,ied+istag - var_nest(i,j,k) = (var_t0(i,j,k)*(split-step) + step*var_t1(i,j,k))*denom - - end do + do i=npx+istag,ied+istag + var_nest(i,j,k) = (var_t0(i,j,k)*(split-step) + step*var_t1(i,j,k))*denom + end do end do end do - end if if (je == npy-1 ) then @@ -1725,14 +2213,13 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & var_t0 => BC%north_t0 var_t1 => BC%north_t1 +!OMP parallel do default(none) shared(npz,npy,jed,jstag,istart,iend,istag,var_nest,var_t0,var_t1,split,step,denom) do k=1,npz do j=npy+jstag,jed+jstag - do i=istart,iend+istag - - var_nest(i,j,k) = (var_t0(i,j,k)*(split-step) + step*var_t1(i,j,k))*denom - - end do - end do + do i=istart,iend+istag + var_nest(i,j,k) = (var_t0(i,j,k)*(split-step) + step*var_t1(i,j,k))*denom + end do + end do end do end if @@ -1740,71 +2227,73 @@ subroutine nested_grid_BC_apply_intT(var_nest, istag, jstag, & end subroutine nested_grid_BC_apply_intT - subroutine update_coarse_grid_mpp_2d(var_coarse, var_nest, nest_domain, ind_update, dx, dy, area, & - isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, isu, ieu, jsu, jeu, npx, npy, & - istag, jstag, r, nestupdate, upoff, nsponge, parent_proc, child_proc, parent_grid) + subroutine update_coarse_grid_mpp_2d(var_coarse, var_nest, nest_domain, dx, dy, area, & + bd, isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, isu, ieu, jsu, jeu, npx, npy, & + istag, jstag, r, nestupdate, upoff, nsponge, parent_proc, child_proc, parent_grid, nest_level) + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n integer, intent(IN) :: isu, ieu, jsu, jeu integer, intent(IN) :: istag, jstag, r, nestupdate, upoff, nsponge - integer, intent(IN) :: ind_update(isd_p:ied_p+1,jsd_p:jed_p+1,2) integer, intent(IN) :: npx, npy - real, intent(IN) :: var_nest(is_n:ie_n+istag,js_n:je_n+jstag) - real, intent(INOUT) :: var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag) - real, intent(IN) :: dx(isd:ied,jsd:jed+1) - real, intent(IN) :: dy(isd:ied+1,jsd:jed) - real, intent(IN) :: area(isd:ied,jsd:jed) + real, intent(IN), target :: var_nest(is_n:ie_n+istag,js_n:je_n+jstag) + real, intent(INOUT), target :: var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag) + real, intent(IN) :: dx(bd%isd:bd%ied, bd%jsd:bd%jed+1) + real, intent(IN) :: dy(bd%isd:bd%ied+1,bd%jsd:bd%jed) + real, intent(IN) :: area(bd%isd:bd%ied,bd%jsd:bd%jed) logical, intent(IN) :: parent_proc, child_proc - type(fv_atmos_type), intent(INOUT) :: parent_grid + type(fv_atmos_type), pointer, intent(IN) :: parent_grid type(nest_domain_type), intent(INOUT) :: nest_domain + integer, intent(IN) :: nest_level real :: var_nest_3d(is_n:ie_n+istag,js_n:je_n+jstag,1) real :: var_coarse_3d(isd_p:ied_p+istag,jsd_p:jed_p+jstag,1) + pointer(ptr_nest, var_nest_3d) + pointer(ptr_coarse, var_coarse_3d) - if (child_proc .and. size(var_nest) > 1) var_nest_3d(is_n:ie_n+istag,js_n:je_n+jstag,1) = var_nest(is_n:ie_n+istag,js_n:je_n+jstag) - if (parent_proc .and. size(var_coarse) > 1) var_coarse_3d(isd_p:ied_p+istag,jsd_p:jed_p,1) = var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag) + if (child_proc .and. size(var_nest) > 1) ptr_nest = LOC(var_nest) + if (parent_proc .and. size(var_coarse) > 1) ptr_coarse = LOC(var_coarse) call update_coarse_grid_mpp(var_coarse_3d, var_nest_3d, & - nest_domain, ind_update, dx, dy, area, & - isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, & + nest_domain, dx, dy, area, & + bd, isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, & isu, ieu, jsu, jeu, npx, npy, 1, & istag, jstag, r, nestupdate, upoff, nsponge, & - parent_proc, child_proc, parent_grid) - - if (size(var_coarse) > 1 .and. parent_proc) var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag) = var_coarse_3d(isd_p:ied_p+istag,jsd_p:jed_p,1) + parent_proc, child_proc, parent_grid, nest_level ) end subroutine update_coarse_grid_mpp_2d - subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, dx, dy, area, & - isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, & + subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, area, & + bd, isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, & isu, ieu, jsu, jeu, npx, npy, npz, & - istag, jstag, r, nestupdate, upoff, nsponge, parent_proc, child_proc, parent_grid) + istag, jstag, r, nestupdate, upoff, nsponge, & + parent_proc, child_proc, parent_grid, nest_level) !This routine assumes the coarse and nested grids are properly ! aligned, and that in particular for odd refinement ratios all - ! coarse-grid points coincide with nested-grid points + ! coarse-grid cells (faces) coincide with nested-grid cells (faces) + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n integer, intent(IN) :: isu, ieu, jsu, jeu integer, intent(IN) :: istag, jstag, npx, npy, npz, r, nestupdate, upoff, nsponge - integer, intent(IN) :: ind_update(isd_p:ied_p+1,jsd_p:jed_p+1,2) real, intent(IN) :: var_nest(is_n:ie_n+istag,js_n:je_n+jstag,npz) real, intent(INOUT) :: var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag,npz) - real, intent(IN) :: area(isd:ied,jsd:jed) - real, intent(IN) :: dx(isd:ied,jsd:jed+1) - real, intent(IN) :: dy(isd:ied+1,jsd:jed) + real, intent(IN) :: area(bd%isd:bd%ied,bd%jsd:bd%jed) + real, intent(IN) :: dx(bd%isd:bd%ied,bd%jsd:bd%jed+1) + real, intent(IN) :: dy(bd%isd:bd%ied+1,bd%jsd:bd%jed) logical, intent(IN) :: parent_proc, child_proc - type(fv_atmos_type), intent(INOUT) :: parent_grid - + type(fv_atmos_type), pointer, intent(IN) :: parent_grid type(nest_domain_type), intent(INOUT) :: nest_domain + integer, intent(IN) :: nest_level integer :: in, jn, ini, jnj, s, qr integer :: is_c, ie_c, js_c, je_c, is_f, ie_f, js_f, je_f integer :: istart, istop, jstart, jstop, ishift, jshift, j, i, k real :: val - real, allocatable, dimension(:,:,:) :: nest_dat - real :: var_nest_send(is_n:ie_n+istag,js_n:je_n+jstag,npz) + real, allocatable, dimension(:,:,:) :: coarse_dat_send + real, allocatable :: coarse_dat_recv(:,:,:) integer :: position if (istag == 1 .and. jstag == 1) then @@ -1817,47 +2306,105 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, position = CENTER end if - call mpp_get_F2C_index(nest_domain, is_c, ie_c, js_c, je_c, is_f, ie_f, js_f, je_f, position=position) - if (ie_f > is_f .and. je_f > js_f) then - allocate(nest_dat (is_f:ie_f, js_f:je_f,npz)) - else - allocate(nest_dat(1,1,1)) + call mpp_get_F2C_index(nest_domain, is_c, ie_c, js_c, je_c, is_f, ie_f, js_f, je_f, nest_level=nest_level, position=position) + if (child_proc) then + allocate(coarse_dat_send(is_c:ie_c, js_c:je_c,npz)) + coarse_dat_send = -1200. endif - nest_dat = -600 + allocate(coarse_dat_recv(isd_p:ied_p+istag, jsd_p:jed_p+jstag, npz)) if (child_proc) then -!! IF an area average (for istag == jstag == 0) or a linear average then multiply in the areas before sending data + call fill_coarse_data_send(coarse_dat_send, var_nest, dx, dy, area, & + bd, is_c, ie_c, js_c, je_c, is_f, js_f, is_n, ie_n, js_n, je_n, & + npx, npy, npz, istag, jstag, r, nestupdate) + endif + + call timing_on('COMM_TOTAL') + call mpp_update_nest_coarse(field_in=coarse_dat_send, nest_domain=nest_domain, field_out=coarse_dat_recv, & + nest_level=nest_level, position=position) + + if (allocated(coarse_dat_send)) then + deallocate(coarse_dat_send) + end if + + call timing_off('COMM_TOTAL') + + s = r/2 !rounds down (since r > 0) + qr = r*upoff + nsponge - s + + if (parent_proc .and. .not. (ie_c < is_c .or. je_c < js_c)) then + call fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed_p, & + is_c, ie_c, js_c, je_c, npx, npy, npz, istag, jstag, nestupdate, parent_grid) + endif + + if (allocated(coarse_dat_recv)) deallocate(coarse_dat_recv) + + end subroutine update_coarse_grid_mpp + + subroutine fill_coarse_data_send(coarse_dat_send, var_nest, dx, dy, area, & + bd, is_c, ie_c, js_c, je_c, is_f, js_f, is_n, ie_n, js_n, je_n, & + npx, npy, npz, istag, jstag, r, nestupdate) + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: is_c, ie_c, js_c, je_c, is_n, ie_n, js_n, je_n + integer, intent(IN) :: is_f, js_f + integer, intent(IN) :: istag, jstag + integer, intent(IN) :: npx, npy, npz, r, nestupdate + real, intent(INOUT) :: coarse_dat_send(is_c:ie_c,js_c:je_c,npz) + real, intent(IN) :: var_nest(is_n:ie_n+istag,js_n:je_n+jstag,npz) + real, intent(IN) :: area(bd%isd:bd%ied,bd%jsd:bd%jed) + real, intent(IN) :: dx(bd%isd:bd%ied,bd%jsd:bd%jed+1) + real, intent(IN) :: dy(bd%isd:bd%ied+1,bd%jsd:bd%jed) + integer :: in, jn, ini, jnj, k, j, i + real :: val + + if (istag == 0 .and. jstag == 0) then select case (nestupdate) case (1,2,6,7,8) - -!$NO-MP parallel do default(none) shared(npz,js_n,je_n,is_n,ie_n,var_nest_send,var_nest,area) - do k=1,npz - do j=js_n,je_n - do i=is_n,ie_n +!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,js_f,is_f,coarse_dat_send,var_nest,area,r) private(in,jn,val) + do k=1,npz + jn = js_f + do j=js_c,je_c + in = is_f + do i=is_c,ie_c - var_nest_send(i,j,k) = var_nest(i,j,k)*area(i,j) + val = 0. + do jnj=jn,jn+r-1 + do ini=in,in+r-1 + val = val + var_nest(ini,jnj,k)*area(ini,jnj) + end do + end do + coarse_dat_send(i,j,k) = val !divide area on coarse grid + in = in + r end do + jn = jn + r end do end do end select else if (istag == 0 .and. jstag > 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) -!$NO-MP parallel do default(none) shared(npz,js_n,je_n,is_n,ie_n,var_nest_send,var_nest,dx) +!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,js_f,is_f,coarse_dat_send,var_nest,dx,r) private(in,jn,val) do k=1,npz - do j=js_n,je_n+1 - do i=is_n,ie_n + jn = js_f + do j=js_c,je_c!+1 + in = is_f + do i=is_c,ie_c + val = 0. + do ini=in,in+r-1 + val = val + var_nest(ini,jn,k)*dx(ini,jn) + end do + coarse_dat_send(i,j,k) = val - var_nest_send(i,j,k) = var_nest(i,j,k)*dx(i,j) - + in = in + r end do + jn = jn + r end do end do @@ -1868,18 +2415,26 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, end select else if (istag > 0 .and. jstag == 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average -!$NO-MP parallel do default(none) shared(npz,js_n,je_n,is_n,ie_n,var_nest_send,var_nest,dy) +!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,js_f,is_f,coarse_dat_send,var_nest,dy,r) private(in,jn,val) do k=1,npz - do j=js_n,je_n - do i=is_n,ie_n+1 + jn = js_f + do j=js_c,je_c + in = is_f + do i=is_c,ie_c!+1 - var_nest_send(i,j,k) = var_nest(i,j,k)*dy(i,j) + val = 0. + do jnj=jn,jn+r-1 + val = val + var_nest(in,jnj,k)*dy(in,jnj) + end do + coarse_dat_send(i,j,k) = val + in = in + r end do + jn = jn + r end do end do @@ -1890,53 +2445,41 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, end select else - + call mpp_error(FATAL, "Cannot have both nonzero istag and jstag.") - endif endif - call timing_on('COMM_TOTAL') - call mpp_update_nest_coarse(var_nest_send, nest_domain, nest_dat, position=position) - call timing_off('COMM_TOTAL') - s = r/2 !rounds down (since r > 0) - qr = r*upoff + nsponge - s - - if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then - if (istag == 0 .and. jstag == 0) then - - select case (nestupdate) - case (1,2,6,7,8) ! 1 = Conserving update on all variables; 2 = conserving update for cell-centered values; 6 = conserving remap-update + end subroutine fill_coarse_data_send -!$NO-MP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,ind_update,nest_dat,parent_grid,var_coarse,r) & -!$NO-MP private(in,jn,val) - do k=1,npz - do j=jsu,jeu - do i=isu,ieu + subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed_p, & + is_c, ie_c, js_c, je_c, npx, npy, npz, istag, jstag, nestupdate, parent_grid) - in = ind_update(i,j,1) - jn = ind_update(i,j,2) + !This routine assumes the coarse and nested grids are properly + ! aligned, and that in particular for odd refinement ratios all + ! coarse-grid cells (faces) coincide with nested-grid cells (faces) -!!$ if (in < max(1+qr,is_f) .or. in > min(npx-1-qr-r+1,ie_f) .or. & -!!$ jn < max(1+qr,js_f) .or. jn > min(npy-1-qr-r+1,je_f)) then -!!$ write(mpp_pe()+3000,'(A, 14I6)') 'SKIP: ', i, j, in, jn, 1+qr, is_f, ie_f, js_f, je_f, npy-1-qr-r+1, isu, ieu, jsu, jeu -!!$ cycle -!!$ endif + integer, intent(IN) :: isd_p, ied_p, jsd_p, jed_p + integer, intent(IN) :: is_c, ie_c, js_c, je_c + integer, intent(IN) :: istag, jstag + integer, intent(IN) :: npx, npy, npz, nestupdate + real, intent(INOUT) :: var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag,npz) + real, intent(INOUT) :: coarse_dat_recv(isd_p:ied_p+istag,jsd_p:jed_p+jstag,npz) + type(fv_atmos_type), intent(IN) :: parent_grid - val = 0. - do jnj=jn,jn+r-1 - do ini=in,in+r-1 - val = val + nest_dat(ini,jnj,k) - end do - end do + integer :: i, j, k - !var_coarse(i,j,k) = val/r**2. + if (istag == 0 .and. jstag == 0) then - !!! CLEANUP: Couldn't rarea and rdx and rdy be built into the weight arrays? - !!! Two-way updates do not yet have weights, tho - var_coarse(i,j,k) = val*parent_grid%gridstruct%rarea(i,j) + select case (nestupdate) + case (1,2,6,7,8) ! 1 = Conserving update on all variables; 2 = conserving update for cell-centered values; 6 = conserving remap-update +!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse) + do k=1,npz + do j=js_c,je_c + do i=is_c,ie_c + var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rarea(i,j) end do end do end do @@ -1952,32 +2495,14 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, else if (istag == 0 .and. jstag > 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) -!$NO-MP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,ind_update,nest_dat,parent_grid,var_coarse,r) & -!$NO-MP private(in,jn,val) +!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz - do j=jsu,jeu+1 - do i=isu,ieu - - in = ind_update(i,j,1) - jn = ind_update(i,j,2) - -!!$ if (in < max(1+qr,is_f) .or. in > min(npx-1-qr-r+1,ie_f) .or. & -!!$ jn < max(1+qr+s,js_f) .or. jn > min(npy-1-qr-s+1,je_f)) then -!!$ write(mpp_pe()+3000,'(A, 14I)') 'SKIP u: ', i, j, in, jn, 1+qr, is_f, ie_f, js_f, je_f, npy-1-qr-s+1, isu, ieu, jsu, jeu -!!$ cycle -!!$ endif - - val = 0. - do ini=in,in+r-1 - val = val + nest_dat(ini,jn,k) - end do - -! var_coarse(i,j,k) = val/r - var_coarse(i,j,k) = val*parent_grid%gridstruct%rdx(i,j) - + do j=js_c,je_c+1 + do i=is_c,ie_c + var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rdx(i,j) end do end do end do @@ -1990,32 +2515,14 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, else if (istag > 0 .and. jstag == 0) then - select case (nestupdate) + select case (nestupdate) case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average -!$NO-MP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,ind_update,nest_dat,parent_grid,var_coarse,r) & -!$NO-MP private(in,jn,val) +!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz - do j=jsu,jeu - do i=isu,ieu+1 - - in = ind_update(i,j,1) - jn = ind_update(i,j,2) - -!!$ if (in < max(1+qr+s,is_f) .or. in > min(npx-1-qr-s+1,ie_f) .or. & -!!$ jn < max(1+qr,js_f) .or. jn > min(npy-1-qr-r+1,je_f)) then -!!$ write(mpp_pe()+3000,'(A, 14I6)') 'SKIP v: ', i, j, in, jn, 1+qr, is_f, ie_f, js_f, je_f, npx-1-qr-s+1, isu, ieu, jsu, jeu -!!$ cycle -!!$ endif - - val = 0. - do jnj=jn,jn+r-1 - val = val + nest_dat(in,jnj,k) - end do - -! var_coarse(i,j,k) = val/r - var_coarse(i,j,k) = val*parent_grid%gridstruct%rdy(i,j) - + do j=js_c,je_c + do i=is_c,ie_c+1 + var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rdy(i,j) end do end do end do @@ -2029,11 +2536,93 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, ind_update, end if + end subroutine fill_var_coarse + + subroutine update_coarse_grid_mpp_vector(u_coarse, v_coarse, u_nest, v_nest, nest_domain, dx, dy, area, & + bd, isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n, & + isu, ieu, jsu, jeu, npx, npy, npz, istag_u, jstag_u, istag_v, jstag_v, & + r, nestupdate, upoff, nsponge, & + parent_proc, child_proc, parent_grid, nest_level, flags, gridtype) + + !This routine assumes the coarse and nested grids are properly + ! aligned, and that in particular for odd refinement ratios all + ! coarse-grid cells (faces) coincide with nested-grid cells (faces) + + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: isd_p, ied_p, jsd_p, jed_p, is_n, ie_n, js_n, je_n + integer, intent(IN) :: isu, ieu, jsu, jeu + integer, intent(IN) :: istag_u, jstag_u, istag_v, jstag_v + integer, intent(IN) :: npx, npy, npz, r, nestupdate, upoff, nsponge + real, intent(IN) :: u_nest(is_n:ie_n+istag_u,js_n:je_n+jstag_u,npz) + real, intent(INOUT) :: u_coarse(isd_p:ied_p+istag_u,jsd_p:jed_p+jstag_u,npz) + real, intent(IN) :: v_nest(is_n:ie_n+istag_v,js_n:je_n+jstag_v,npz) + real, intent(INOUT) :: v_coarse(isd_p:ied_p+istag_v,jsd_p:jed_p+jstag_v,npz) + real, intent(IN) :: area(bd%isd:bd%ied,bd%jsd:bd%jed) + real, intent(IN) :: dx(bd%isd:bd%ied,bd%jsd:bd%jed+1) + real, intent(IN) :: dy(bd%isd:bd%ied+1,bd%jsd:bd%jed) + logical, intent(IN) :: parent_proc, child_proc + type(fv_atmos_type), intent(INOUT) :: parent_grid + integer, intent(IN) :: nest_level + type(nest_domain_type), intent(INOUT) :: nest_domain + integer, optional, intent(IN) :: flags, gridtype + + integer :: s, qr + integer :: is_cx, ie_cx, js_cx, je_cx, is_fx, ie_fx, js_fx, je_fx + integer :: is_cy, ie_cy, js_cy, je_cy, is_fy, ie_fy, js_fy, je_fy + integer :: istart, istop, jstart, jstop, ishift, jshift, j, i, k + real :: val + real, allocatable, dimension(:,:,:) :: coarse_dat_send_u, coarse_dat_send_v + real, allocatable :: coarse_dat_recv_u(:,:,:), coarse_dat_recv_v(:,:,:) + integer :: position_x, position_y + + call get_vector_position(position_x, position_y, gridtype) + + call mpp_get_F2C_index(nest_domain, is_cx, ie_cx, js_cx, je_cx, is_fx, ie_fx, js_fx, je_fx, & + nest_level=nest_level, position=position_x) + call mpp_get_F2C_index(nest_domain, is_cy, ie_cy, js_cy, je_cy, is_fy, ie_fy, js_fy, je_fy, & + nest_level=nest_level, position=position_y) + if (child_proc) then + allocate(coarse_dat_send_u(is_cx:ie_cx, js_cx:je_cx,npz)) + allocate(coarse_dat_send_v(is_cy:ie_cy, js_cy:je_cy,npz)) + coarse_dat_send_u = -1200. + coarse_dat_send_v = -1200. endif - deallocate(nest_dat) - - end subroutine update_coarse_grid_mpp + allocate(coarse_dat_recv_u(isd_p:ied_p+istag_u, jsd_p:jed_p+jstag_u, npz)) + allocate(coarse_dat_recv_v(isd_p:ied_p+istag_v, jsd_p:jed_p+jstag_v, npz)) + + if (child_proc) then + call fill_coarse_data_send(coarse_dat_send_u, u_nest, dx, dy, area, & + bd, is_cx, ie_cx, js_cx, je_cx, is_fx, js_fx, is_n, ie_n, js_n, je_n, & + npx, npy, npz, istag_u, jstag_u, r, nestupdate) + call fill_coarse_data_send(coarse_dat_send_v, v_nest, dx, dy, area, & + bd, is_cy, ie_cy, js_cy, je_cy, is_fy, js_fy, is_n, ie_n, js_n, je_n, & + npx, npy, npz, istag_v, jstag_v, r, nestupdate) + endif + + call timing_on('COMM_TOTAL') + call mpp_update_nest_coarse(coarse_dat_send_u, coarse_dat_send_v, nest_domain, coarse_dat_recv_u, & + coarse_dat_recv_v, nest_level, flags, gridtype) + + if (allocated(coarse_dat_send_u)) deallocate(coarse_dat_send_u) + if (allocated(coarse_dat_send_v)) deallocate(coarse_dat_send_v) + + call timing_off('COMM_TOTAL') + + s = r/2 !rounds down (since r > 0) + qr = r*upoff + nsponge - s + + if (parent_proc .and. .not. (ie_cx < is_cx .or. je_cx < js_cx)) then + call fill_var_coarse(u_coarse, coarse_dat_recv_u, isd_p, ied_p, jsd_p, jed_p, & + is_cx, ie_cx, js_cx, je_cx, npx, npy, npz, istag_u, jstag_u, nestupdate, parent_grid) + endif + if (parent_proc .and. .not. (ie_cy < is_cy .or. je_cy < js_cy)) then + call fill_var_coarse(v_coarse, coarse_dat_recv_v, isd_p, ied_p, jsd_p, jed_p, & + is_cy, ie_cy, js_cy, je_cy, npx, npy, npz, istag_v, jstag_v, nestupdate, parent_grid) + endif + + if (allocated(coarse_dat_recv_u)) deallocate(coarse_dat_recv_u) + if (allocated(coarse_dat_recv_v)) deallocate(coarse_dat_recv_v) + end subroutine update_coarse_grid_mpp_vector - end module boundary_mod diff --git a/model/dyn_core.F90 b/model/dyn_core.F90 index c48cde19a..86e49c8f3 100644 --- a/model/dyn_core.F90 +++ b/model/dyn_core.F90 @@ -21,7 +21,7 @@ module dyn_core_mod use constants_mod, only: rdgas, radius, cp_air, pi - use mpp_mod, only: mpp_pe + use mpp_mod, only: mpp_pe use mpp_domains_mod, only: CGRID_NE, DGRID_NE, mpp_get_boundary, mpp_update_domains, & domain2d use mpp_parameter_mod, only: CORNER @@ -30,7 +30,7 @@ module dyn_core_mod use fv_mp_mod, only: group_halo_update_type use sw_core_mod, only: c_sw, d_sw use a2b_edge_mod, only: a2b_ord2, a2b_ord4 - use nh_core_mod, only: Riem_Solver3, Riem_Solver_C, update_dz_c, update_dz_d, nest_halo_nh + use nh_core_mod, only: Riem_Solver3, Riem_Solver_C, update_dz_c, update_dz_d, nh_bc use tp_core_mod, only: copy_corners use fv_timing_mod, only: timing_on, timing_off use fv_diagnostics_mod, only: prt_maxmin, fv_time, prt_mxm @@ -44,13 +44,18 @@ module dyn_core_mod #endif use diag_manager_mod, only: send_data use fv_arrays_mod, only: fv_grid_type, fv_flags_type, fv_nest_type, fv_diag_type, & - fv_grid_bounds_type, R_GRID + fv_grid_bounds_type, R_GRID, fv_nest_BC_type_3d use boundary_mod, only: extrapolation_BC, nested_grid_BC_apply_intT + use fv_regional_mod, only: regional_boundary_update + use fv_regional_mod, only: current_time_in_seconds, bc_time_interval + use fv_regional_mod, only: delz_regBC ! TEMPORARY --- lmh #ifdef SW_DYNAMICS use test_cases_mod, only: test_case, case9_forcing1, case9_forcing2 #endif + use fv_regional_mod, only: dump_field, exch_uv, H_STAGGER, U_STAGGER, V_STAGGER + use fv_regional_mod, only: a_step, p_step, k_step, n_step implicit none private @@ -66,21 +71,18 @@ module dyn_core_mod real(kind=R_GRID), parameter :: cnst_0p20=0.20d0 real, allocatable :: rf(:) + integer:: k_rf = 0 logical:: RFF_initialized = .false. integer :: kmax=1 -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains !----------------------------------------------------------------------- ! dyn_core :: FV Lagrangian dynamics driver !----------------------------------------------------------------------- - - subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, cappa, grav, hydrostatic, & - u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, & + + subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_map, n_split, zvir, cp, akap, cappa, grav, hydrostatic, & + u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, & uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, & ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, & init_step, i_pack, end_step, time_total) @@ -88,7 +90,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, integer, intent(IN) :: npy integer, intent(IN) :: npz integer, intent(IN) :: ng, nq, sphum - integer, intent(IN) :: n_split + integer, intent(IN) :: n_map, n_split real , intent(IN) :: bdt real , intent(IN) :: zvir, cp, akap, grav real , intent(IN) :: ptop @@ -102,15 +104,15 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz):: u ! D grid zonal wind (m/s) real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz):: v ! D grid meridional wind (m/s) real, intent(inout) :: w( bd%isd:,bd%jsd:,1:) ! vertical vel. (m/s) - real, intent(inout) :: delz(bd%isd:,bd%jsd:,1:) ! delta-height (m, negative) + real, intent(inout) :: delz(bd%is:,bd%js:,1:) ! delta-height (m, negative) real, intent(inout) :: cappa(bd%isd:,bd%jsd:,1:) ! moist kappa real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! temperature (K) real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! pressure thickness (pascal) - real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) ! + real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) ! real, intent(in), optional:: time_total ! total time (seconds) since start !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -178,6 +180,8 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, real :: dt, dt2, rdt real :: d2_divg real :: k1k, rdg, dtmp, delt + real :: recip_k_split_n_split + real :: reg_bc_update_time logical :: last_step, remap_step logical used real :: split_timestep_bc @@ -207,6 +211,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, beta = flagstruct%beta rdg = -rdgas / grav cv_air = cp_air - rdgas + recip_k_split_n_split=1./real(flagstruct%k_split*n_split) ! Indexes: iep1 = ie + 1 @@ -219,7 +224,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, !$OMP parallel do default(none) shared(npz,dp_ref,ak,bk) do k=1,npz - dp_ref(k) = ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.E5 + dp_ref(k) = ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.E5 enddo !$OMP parallel do default(none) shared(isd,ied,jsd,jed,zs,phis,rgrav) @@ -304,7 +309,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, if ( flagstruct%fv_debug ) then if(is_master()) write(*,*) 'n_split loop, it=', it if ( .not. flagstruct%hydrostatic ) & - call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) + call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain) endif if (gridstruct%nested) then @@ -329,31 +334,38 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, call timing_off('COMM_TOTAL') if ( it==1 ) then - if (gridstruct%nested) then -!$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,gz,zs,delz) - do j=jsd,jed + if (gridstruct%bounded_domain) then +!$OMP parallel do default(none) shared(isd,ied,jsd,jed,gz,zs,npz) + do j=jsd,jed do i=isd,ied gz(i,j,npz+1) = zs(i,j) enddo - do k=npz,1,-1 - do i=isd,ied - gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k) - enddo enddo - enddo + if (gridstruct%nested) then + call gz_bc(gz,neststruct%delz_BC,bd,npx,npy,npz,split_timestep_BC, real(n_split*flagstruct%k_split)) + endif + if (gridstruct%regional) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + if (is_master() .and. flagstruct%fv_debug) print*, ' REG_BC_UPDATE_TIME: ', it, current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + call gz_bc(gz, delz_regBC,bd,npx,npy,npz,mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600.) + endif else -!$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,zs,delz) - do j=js,je +!$OMP parallel do default(none) shared(is,ie,js,je,gz,zs,npz) + do j=js,je do i=is,ie gz(i,j,npz+1) = zs(i,j) enddo + enddo + endif + +!$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,delz) + do j=js,je do k=npz,1,-1 do i=is,ie gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k) enddo enddo enddo - endif call timing_on('COMM_TOTAL') call start_group_halo_update(i_pack(5), gz, domain) call timing_off('COMM_TOTAL') @@ -397,7 +409,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, else last_step = .false. endif - + call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(8), domain) if( .not. hydrostatic ) & @@ -432,11 +444,26 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, call nested_grid_BC_apply_intT(ptc, & 0, 0, npx, npy, npz, bd, split_timestep_BC+0.5, real(n_split*flagstruct%k_split), & neststruct%pt_BC, bctype=neststruct%nestbctype ) +#endif + endif + if (flagstruct%regional) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(0.5+(it-1))*dt + call regional_boundary_update(delpc, 'delp', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) +#ifndef SW_DYNAMICS + call regional_boundary_update(ptc, 'pt', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) #endif endif if ( hydrostatic ) then call geopk(ptop, pe, peln, delpc, pkc, gz, phis, ptc, q_con, pkz, npz, akap, .true., & - gridstruct%nested, .false., npx, npy, flagstruct%a2b_ord, bd) + gridstruct%bounded_domain, .false., npx, npy, flagstruct%a2b_ord, bd) else #ifndef SW_DYNAMICS if ( it == 1 ) then @@ -455,7 +482,19 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, enddo enddo - else + else + + if (gridstruct%bounded_domain) then + if (gridstruct%nested) then + call gz_bc(gz,neststruct%delz_BC,bd,npx,npy,npz,split_timestep_BC, real(n_split*flagstruct%k_split)) + endif + if (gridstruct%regional) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + if (is_master() .and. flagstruct%fv_debug) print*, ' REG_BC_UPDATE_TIME: ', it, current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + call gz_bc(gz, delz_regBC,bd,npx,npy,npz,mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600.) + endif + endif + !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz) do k=1, npz+1 do j=jsd,jed @@ -464,6 +503,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, enddo enddo enddo + endif call timing_on('UPDATE_DZ_C') call update_dz_c(is, ie, js, je, npz, ng, dt2, dp_ref, zs, gridstruct%area, ut, vt, gz, ws3, & @@ -479,15 +519,22 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, call timing_off('Riem_Solver') if (gridstruct%nested) then - call nested_grid_BC_apply_intT(delz, & - 0, 0, npx, npy, npz, bd, split_timestep_BC+0.5, real(n_split*flagstruct%k_split), & - neststruct%delz_BC, bctype=neststruct%nestbctype ) + call nh_bc(ptop, grav, akap, cp, delpc, neststruct%delz_BC, ptc, phis, & +#ifdef USE_COND + q_con, & +#ifdef MOIST_CAPPA + cappa, & +#endif +#endif + pkc, gz, pk3, & + split_timestep_BC+0.5, real(n_split*flagstruct%k_split), & + npx, npy, npz, gridstruct%bounded_domain, .false., .false., .false., bd) + endif + if (flagstruct%regional) then - !Compute gz/pkc - !NOTE: nominally only need to compute quantities one out in the halo for p_grad_c - !(instead of entire halo) - call nest_halo_nh(ptop, grav, akap, cp, delpc, delz, ptc, phis, & + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(0.5+(it-1))*dt + call nh_bc(ptop, grav, akap, cp, delpc, delz_regBC, ptc, phis, & #ifdef USE_COND q_con, & #ifdef MOIST_CAPPA @@ -495,7 +542,8 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, #endif #endif pkc, gz, pk3, & - npx, npy, npz, gridstruct%nested, .false., .false., .false., bd) + mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600., & + npx, npy, npz, gridstruct%bounded_domain, .false., .false., .false., bd) endif @@ -527,37 +575,72 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, ! domain and of each processor element. We must either ! apply an interpolated BC, or extrapolate into the ! boundary halo - ! NOTE: + ! NOTE: !The update_domains calls for uc and vc need to go BEFORE the BCs to ensure cross-restart !bitwise-consistent solutions when doing the spatial extrapolation; should not make a !difference for interpolated BCs from the coarse grid. call nested_grid_BC_apply_intT(vc, & - 0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), & + 0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), & neststruct%vc_BC, bctype=neststruct%nestbctype ) call nested_grid_BC_apply_intT(uc, & 1, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), & neststruct%uc_BC, bctype=neststruct%nestbctype ) - !QUESTION: What to do with divgd in nested halo? call nested_grid_BC_apply_intT(divgd, & 1, 1, npx, npy, npz, bd, split_timestep_bc, real(n_split*flagstruct%k_split), & neststruct%divg_BC, bctype=neststruct%nestbctype ) -!!$ if (is == 1 .and. js == 1) then -!!$ do j=jsd,5 -!!$ write(mpp_pe()+2000,*) j, divg(isd:5,j,1) -!!$ endif end if - if ( gridstruct%nested .and. flagstruct%inline_q ) then + if (flagstruct%regional) then + + !call exch_uv(domain, bd, npz, vc, uc) + call mpp_update_domains(uc, vc, domain, gridtype=CGRID_NE) + + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(0.5+(it-1))*dt + call regional_boundary_update(vc, 'vc', & + isd, ied, jsd, jed+1, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + call regional_boundary_update(uc, 'uc', & + isd, ied+1, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + call mpp_update_domains(uc, vc, domain, gridtype=CGRID_NE) +!!! Currently divgd is always 0.0 in the regional domain boundary area. + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + call regional_boundary_update(divgd, 'divgd', & + isd, ied+1, jsd, jed+1, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + endif + + if ( flagstruct%inline_q ) then + if ( gridstruct%nested ) then do iq=1,nq call nested_grid_BC_apply_intT(q(isd:ied,jsd:jed,:,iq), & 0, 0, npx, npy, npz, bd, split_timestep_BC+1, real(n_split*flagstruct%k_split), & neststruct%q_BC(iq), bctype=neststruct%nestbctype ) end do endif + if (flagstruct%regional) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + do iq=1,nq + call regional_boundary_update(q(:,:,:,iq), 'q', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + enddo + endif + + endif + call timing_on('d_sw') !$OMP parallel do default(none) shared(npz,flagstruct,nord_v,pfull,damp_vt,hydrostatic,last_step, & @@ -608,7 +691,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, nord_w=0; damp_w = d2_divg if ( flagstruct%do_vort_damp ) then ! damping on delp and vorticity: - nord_v(k)=0; + nord_v(k)=0; #ifndef HIWPP damp_vt(k) = 0.5*d2_divg #endif @@ -618,7 +701,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, nord_k=0; d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k2) nord_w=0; damp_w = d2_divg if ( flagstruct%do_vort_damp ) then - nord_v(k)=0; + nord_v(k)=0; #ifndef HIWPP damp_vt(k) = 0.5*d2_divg #endif @@ -694,6 +777,10 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, endif enddo ! end openMP k-loop + if (flagstruct%regional) then + call mpp_update_domains(uc, vc, domain, gridtype=CGRID_NE) + call mpp_update_domains(u , v , domain, gridtype=DGRID_NE) + endif call timing_off('d_sw') if( flagstruct%fill_dp ) call mix_dp(hydrostatic, w, delp, pt, npz, ak, bk, .false., flagstruct%fv_debug, bd) @@ -736,7 +823,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, call timing_off('COMM_TOTAL') if ( flagstruct%fv_debug ) then if ( .not. flagstruct%hydrostatic ) & - call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) + call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain) endif !Want to move this block into the hydro/nonhydro branch above and merge the two if structures @@ -754,24 +841,48 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, #ifdef USE_COND call nested_grid_BC_apply_intT(q_con, & 0, 0, npx, npy, npz, bd, split_timestep_BC+1, real(n_split*flagstruct%k_split), & - neststruct%q_con_BC, bctype=neststruct%nestbctype ) + neststruct%q_con_BC, bctype=neststruct%nestbctype ) #endif #endif end if + if (flagstruct%regional) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+(it-1)*dt + call regional_boundary_update(delp, 'delp', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) +#ifndef SW_DYNAMICS + call regional_boundary_update(pt, 'pt', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + +#ifdef USE_COND + call regional_boundary_update(q_con, 'q_con', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) +#endif + +#endif + endif if ( hydrostatic ) then call geopk(ptop, pe, peln, delp, pkc, gz, phis, pt, q_con, pkz, npz, akap, .false., & - gridstruct%nested, .true., npx, npy, flagstruct%a2b_ord, bd) + gridstruct%bounded_domain, .true., npx, npy, flagstruct%a2b_ord, bd) else #ifndef SW_DYNAMICS call timing_on('UPDATE_DZ') call update_dz_d(nord_v, damp_vt, flagstruct%hord_tm, is, ie, js, je, npz, ng, npx, npy, gridstruct%area, & - gridstruct%rarea, dp_ref, zs, zh, crx, cry, xfx, yfx, delz, ws, rdt, gridstruct, bd) + gridstruct%rarea, dp_ref, zs, zh, crx, cry, xfx, yfx, ws, rdt, gridstruct, bd) call timing_off('UPDATE_DZ') if ( flagstruct%fv_debug ) then if ( .not. flagstruct%hydrostatic ) & - call prt_mxm('delz updated', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) + call prt_mxm('delz updated', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain) endif if (idiag%id_ws>0 .and. last_step) then @@ -781,7 +892,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, - + call timing_on('Riem_Solver') call Riem_Solver3(flagstruct%m_split, dt, is, ie, js, je, npz, ng, & @@ -808,22 +919,35 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, else call pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp) endif - if (gridstruct%nested) then - call nested_grid_BC_apply_intT(delz, & - 0, 0, npx, npy, npz, bd, split_timestep_BC+1., real(n_split*flagstruct%k_split), & - neststruct%delz_BC, bctype=neststruct%nestbctype ) - - !Compute gz/pkc/pk3; note that now pkc should be nonhydro pert'n pressure - call nest_halo_nh(ptop, grav, akap, cp, delp, delz, pt, phis, & + + if (gridstruct%nested) then + call nh_bc(ptop, grav, akap, cp, delp, neststruct%delz_BC, pt, phis, & +#ifdef USE_COND + q_con, & +#ifdef MOIST_CAPPA + cappa, & +#endif +#endif + pkc, gz, pk3, & + split_timestep_BC+1., real(n_split*flagstruct%k_split), & + npx, npy, npz, gridstruct%bounded_domain, .true., .true., .true., bd) + endif + + if (flagstruct%regional) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+it*dt + call nh_bc(ptop, grav, akap, cp, delp, delz_regBC, pt, phis, & #ifdef USE_COND q_con, & #ifdef MOIST_CAPPA cappa, & #endif #endif - pkc, gz, pk3, npx, npy, npz, gridstruct%nested, .true., .true., .true., bd) + pkc, gz, pk3, & + mod(reg_bc_update_time,bc_time_interval*3600.), bc_time_interval*3600., & + npx, npy, npz, gridstruct%bounded_domain, .true., .true., .true., bd) + + endif - endif call timing_on('COMM_TOTAL') call complete_group_halo_update(i_pack(4), domain) call timing_off('COMM_TOTAL') @@ -837,9 +961,9 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, enddo if ( gridstruct%square_domain ) then call timing_on('COMM_TOTAL') - call complete_group_halo_update(i_pack(5), domain) + call complete_group_halo_update(i_pack(5), domain) call timing_off('COMM_TOTAL') - endif + endif #endif SW_DYNAMICS endif ! end hydro check @@ -902,11 +1026,6 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, endif call timing_off('PG_D') -! Inline Rayleigh friction here? -#ifdef USE_SUPER_RAY - if( flagstruct%tau > 0. ) & - call Rayleigh_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, ptop, hydrostatic, flagstruct%rf_cutoff, bd) -#endif !------------------------------------------------------------------------------------------------------- if ( flagstruct%breed_vortex_inline ) then @@ -936,7 +1055,7 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, !------------------------------------------------------------------------------------------------------- call timing_on('COMM_TOTAL') - if( it==n_split .and. gridstruct%grid_type<4 .and. .not. gridstruct%nested) then + if( it==n_split .and. gridstruct%grid_type<4 .and. .not. gridstruct%bounded_domain) then ! Prevent accumulation of rounding errors at overlapped domain edges: call mpp_get_boundary(u, v, domain, ebuffery=ebuffer, & nbufferx=nbuffer, gridtype=DGRID_NE ) @@ -1033,6 +1152,33 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, end if + if (flagstruct%regional) then + +#ifndef SW_DYNAMICS + if (.not. hydrostatic) then + reg_bc_update_time=current_time_in_seconds+bdt*(n_map-1)+it*dt + call regional_boundary_update(w, 'w', & + isd, ied, jsd, jed, ubound(w,3), & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + endif +#endif SW_DYNAMICS + + call regional_boundary_update(u, 'u', & + isd, ied, jsd, jed+1, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + call regional_boundary_update(v, 'v', & + isd, ied+1, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + + call mpp_update_domains(u, v, domain, gridtype=DGRID_NE) + end if + !----------------------------------------------------- enddo ! time split loop !----------------------------------------------------- @@ -1081,8 +1227,8 @@ subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, do k=1,n_con delt = abs(bdt*flagstruct%delt_max) ! Sponge layers: -! if ( k == 1 ) delt = 2.0*delt -! if ( k == 2 ) delt = 1.5*delt + if ( k == 1 ) delt = 0.1*delt + if ( k == 2 ) delt = 0.5*delt do j=js,je do i=is,ie #ifdef MOIST_CAPPA @@ -1313,7 +1459,7 @@ subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng) do j=js,je do i=is,ie do n=1,3 - v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j) + v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j) enddo enddo enddo @@ -1437,9 +1583,9 @@ subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, n real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! perturbation pressure real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! p**kappa real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! g * h -real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz) - type(fv_grid_type), intent(INOUT), target :: gridstruct +type(fv_grid_type), intent(INOUT), target :: gridstruct ! Local: real wk1(bd%isd:bd%ied, bd%jsd:bd%jed) real wk(bd%is: bd%ie+1,bd%js: bd%je+1) @@ -1456,26 +1602,24 @@ subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, n ied = bd%ied jsd = bd%jsd jed = bd%jed - + if ( use_logp ) then top_value = peln1 else top_value = ptk endif -!Remember that not all compilers set pp to zero by default -!$OMP parallel do default(none) shared(is,ie,js,je,pp,pk,top_value) -do j=js,je+1 - do i=is,ie+1 - pp(i,j,1) = 0. - pk(i,j,1) = top_value - enddo -enddo - -!$OMP parallel do default(none) shared(isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) & +!$OMP parallel do default(none) shared(top_value,isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) & !$OMP private(wk1) do k=1,npz+1 - if ( k/=1 ) then + if ( k==1 ) then + do j=js,je+1 + do i=is,ie+1 + pp(i,j,1) = 0. + pk(i,j,1) = top_value + enddo + enddo + else call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.) call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.) endif @@ -1537,9 +1681,9 @@ subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, n real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! perturbation pressure real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! p**kappa real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! g * h -! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) ! real, intent(inout) :: dv(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) -real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz) type(fv_grid_type), intent(INOUT), target :: gridstruct ! Local: @@ -1558,7 +1702,7 @@ subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, n ied = bd%ied jsd = bd%jsd jed = bd%jed - + if ( use_logp ) then top_value = peln1 else @@ -1643,7 +1787,7 @@ end subroutine split_p_grad subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, & - ptop, hydrostatic, a2b_ord, d_ext) + ptop, hydrostatic, a2b_ord, d_ext) integer, intent(IN) :: ng, npx, npy, npz, a2b_ord real, intent(IN) :: dt, ptop, d_ext @@ -1653,7 +1797,7 @@ subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, np real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed ,npz) -real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) type(fv_grid_type), intent(INOUT), target :: gridstruct ! Local: @@ -1786,7 +1930,7 @@ subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, real, intent(in):: divg2(bd%is:bd%ie+1,bd%js:bd%je+1) real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1) -real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) type(fv_grid_type), intent(INOUT), target :: gridstruct @@ -1839,7 +1983,7 @@ subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, enddo !$OMP parallel do default(none) shared(npz,is,ie,js,je,pk,u,beta,gz,divg2,alpha, & -!$OMP gridstruct,v,dt,du,dv) & +!$OMP gridstruct,v,dt,du,dv) & !$OMP private(wk) do k=1,npz @@ -1944,14 +2088,14 @@ subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd) ip = ip + 1 endif enddo - if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip - ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip + if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip + ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip 1000 continue end subroutine mix_dp - subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, nested, computehalo, npx, npy, a2b_ord, bd) + subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, bounded_domain, computehalo, npx, npy, a2b_ord, bd) integer, intent(IN) :: km, npx, npy, a2b_ord real , intent(IN) :: akap, ptop @@ -1959,7 +2103,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, real , intent(IN) :: hs(bd%isd:bd%ied,bd%jsd:bd%jed) real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp real, intent(IN), dimension(bd%isd:,bd%jsd:,1:):: q_con - logical, intent(IN) :: CG, nested, computehalo + logical, intent(IN) :: CG, bounded_domain, computehalo ! !OUTPUT PARAMETERS real, intent(OUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km+1):: gz, pk real, intent(OUT) :: pe(bd%is-1:bd%ie+1,km+1,bd%js-1:bd%je+1) @@ -1988,7 +2132,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, jsd = bd%jsd jed = bd%jed - if ( (.not. CG .and. a2b_ord==4) .or. (nested .and. .not. CG) ) then ! D-Grid + if ( (.not. CG .and. a2b_ord==4) .or. (bounded_domain .and. .not. CG) ) then ! D-Grid ifirst = is-2; ilast = ie+2 jfirst = js-2; jlast = je+2 else @@ -1996,7 +2140,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, jfirst = js-1; jlast = je+1 endif - if (nested .and. computehalo) then + if (bounded_domain .and. computehalo) then if (is == 1) ifirst = isd if (ie == npx-1) ilast = ied if (js == 1) jfirst = jsd @@ -2027,7 +2171,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, #endif if( j>(js-2) .and. j<(je+2) ) then - do i=max(ifirst,is-1), min(ilast,ie+1) + do i=max(ifirst,is-1), min(ilast,ie+1) pe(i,1,j) = ptop enddo endif @@ -2037,7 +2181,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, do i=ifirst, ilast p1d(i) = p1d(i) + delp(i,j,k-1) logp(i) = log(p1d(i)) - pk(i,j,k) = exp( akap*logp(i) ) + pk(i,j,k) = exp( akap*logp(i) ) #ifdef USE_COND peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1)) pkg(i,k) = exp( akap*log(peg(i,k)) ) @@ -2045,7 +2189,7 @@ subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, enddo if( j>(js-2) .and. j<(je+2) ) then - do i=max(ifirst,is-1), min(ilast,ie+1) + do i=max(ifirst,is-1), min(ilast,ie+1) pe(i,k,j) = p1d(i) enddo if( j>=js .and. j<=je) then @@ -2118,7 +2262,7 @@ subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd) ! rarea => gridstruct%rarea ! del6_u => gridstruct%del6_u ! del6_v => gridstruct%del6_v - + ! sw_corner => gridstruct%sw_corner ! nw_corner => gridstruct%nw_corner ! se_corner => gridstruct%se_corner @@ -2161,7 +2305,7 @@ subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd) q(1,npy,k) = q(1,je,k) endif - if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%nested, bd, & + if(nt>0 .and. (.not. gridstruct%bounded_domain)) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner ) do j=js-nt,je+nt do i=is-nt,ie+1+nt @@ -2173,7 +2317,7 @@ subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd) enddo enddo - if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%nested, bd, & + if(nt>0 .and. (.not. gridstruct%bounded_domain)) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nt,je+1+nt do i=is-nt,ie+nt @@ -2290,5 +2434,92 @@ subroutine Rayleigh_fast(dt, npx, npy, npz, pfull, tau, u, v, w, & end subroutine Rayleigh_fast + subroutine gz_bc(gz,delzBC,bd,npx,npy,npz,step,split) + + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npx, npy, npz + real, intent(INOUT) :: gz(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1) + type(fv_nest_BC_type_3d), intent(IN) :: delzBC + real, intent(IN) :: step, split + + real :: a1, a2 + integer i, j, k + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + integer :: istart, iend + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + a1 = (split-step)/split + a2 = step/split + + if (is == 1) then +!$OMP parallel do default(none) shared(jsd,jed,npz,isd,delzBC,gz,a1,a2) + do j=jsd,jed + do k=npz,1,-1 + do i=isd,0 + gz(i,j,k) = gz(i,j,k+1) - (delzBC%west_t1(i,j,k)*a2 + delzBC%west_t0(i,j,k)*a1) + enddo + enddo + enddo + endif + + if (ie == npx-1) then +!$OMP parallel do default(none) shared(jsd,jed,npz,npx,ied,delzBC,gz,a1,a2) + do j=jsd,jed + do k=npz,1,-1 + do i=npx,ied + gz(i,j,k) = gz(i,j,k+1) - (delzBC%east_t1(i,j,k)*a2 + delzBC%east_t0(i,j,k)*a1) + enddo + enddo + enddo + endif + + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then +!$OMP parallel do default(none) shared(jsd,npz,istart,iend,delzBC,gz,a1,a2) + do j=jsd,0 + do k=npz,1,-1 + do i=istart,iend + gz(i,j,k) = gz(i,j,k+1) - (delzBC%south_t1(i,j,k)*a2 + delzBC%south_t0(i,j,k)*a1) + !if (gz(i,j,k) <= gz(i,j,k+1) .or. abs(gz(i,j,k)) > 1.e6) print*, ' BAD GZ (bc): ', i, j, k, gz(i,j,k:k+1), delzBC%west_t1(i,j,k), delzBC%west_t0(i,j,k) + enddo + enddo + enddo + endif + + if (je == npy-1) then +!$OMP parallel do default(none) shared(npy,jed,npz,istart,iend,delzBC,gz,a1,a2) + do j=npy,jed + do k=npz,1,-1 + do i=istart,iend + gz(i,j,k) = gz(i,j,k+1) - (delzBC%north_t1(i,j,k)*a2 + delzBC%north_t0(i,j,k)*a1) + !if (gz(i,j,k) <= gz(i,j,k+1) .or. abs(gz(i,j,k)) > 1.e6) print*, ' BAD GZ (bc): ', i, j, k, gz(i,j,k:k+1), delzBC%west_t1(i,j,k), delzBC%west_t0(i,j,k) + enddo + enddo + enddo + endif + + end subroutine gz_bc + end module dyn_core_mod diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index 155f2cec3..e112817a6 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -24,7 +24,6 @@ module fv_arrays_mod use fms_io_mod, only: restart_file_type use time_manager_mod, only: time_type use horiz_interp_type_mod, only: horiz_interp_type - use mpp_domains_mod, only: nest_domain_type use mpp_mod, only: mpp_broadcast use platform_mod, only: r8_kind public @@ -53,16 +52,20 @@ module fv_arrays_mod id_tq, id_rh, id_c15, id_c25, id_c35, id_c45, & id_f15, id_f25, id_f35, id_f45, id_ctp, & id_ppt, id_ts, id_tb, id_ctt, id_pmask, id_pmaskv2, & - id_delp, id_delz, id_zratio, id_ws, id_iw, id_lw, & + id_delp, id_delz, id_ws, id_iw, id_lw, & id_pfhy, id_pfnh, & - id_qn, id_qn200, id_qn500, id_qn850, id_qp, id_mdt, id_qdt, id_aam, id_amdt, & - id_acly, id_acl, id_acl2, id_dbz, id_maxdbz, id_basedbz, id_dbz4km + id_qn, id_qn200, id_qn500, id_qn850, id_qp, id_mdt, & + id_qdt, id_aam, id_amdt, & + id_acly, id_acl, id_acl2, & + id_dbz, id_maxdbz, id_basedbz, id_dbz4km, id_dbztop, id_dbz_m10C, & + id_ctz, id_w1km, id_wmaxup, id_wmaxdn, id_cape, id_cin ! Selected p-level fields from 3D variables: integer :: id_vort200, id_vort500, id_w500, id_w700 - integer :: id_vort850, id_w850, id_x850, id_srh, id_srh25, id_srh01, & + integer :: id_vort850, id_w850, id_x850, id_srh25, & id_uh03, id_uh25, id_theta_e, & id_w200, id_s200, id_sl12, id_sl13, id_w5km, id_rain5km, id_w2500m + integer :: id_srh1, id_srh3, id_ustm, id_vstm ! NGGPS 31-level diag integer, allocatable :: id_u(:), id_v(:), id_t(:), id_h(:), id_q(:), id_omg(:) @@ -70,11 +73,13 @@ module fv_arrays_mod ! IPCC diag integer :: id_rh10, id_rh50, id_rh100, id_rh200, id_rh250, id_rh300, & id_rh500, id_rh700, id_rh850, id_rh925, id_rh1000 + integer :: id_dp10, id_dp50, id_dp100, id_dp200, id_dp250, id_dp300, & + id_dp500, id_dp700, id_dp850, id_dp925, id_dp1000 integer :: id_rh1000_cmip, id_rh925_cmip, id_rh850_cmip, id_rh700_cmip, id_rh500_cmip, & id_rh300_cmip, id_rh250_cmip, id_rh100_cmip, id_rh50_cmip, id_rh10_cmip - integer :: id_hght + integer :: id_hght3d, id_any_hght integer :: id_u100m, id_v100m, id_w100m ! For initial conditions: @@ -91,6 +96,19 @@ module fv_arrays_mod real, allocatable :: zxg(:,:) real, allocatable :: pt1(:) + integer :: id_prer, id_prei, id_pres, id_preg + integer :: id_qv_dt_gfdlmp, id_T_dt_gfdlmp, id_ql_dt_gfdlmp, id_qi_dt_gfdlmp + integer :: id_u_dt_gfdlmp, id_v_dt_gfdlmp + integer :: id_t_dt_phys, id_qv_dt_phys, id_ql_dt_phys, id_qi_dt_phys, id_u_dt_phys, id_v_dt_phys + integer :: id_intqv, id_intql, id_intqi, id_intqr, id_intqs, id_intqg + +! ESM/CM 3-D diagostics + integer :: id_uq, id_vq, id_wq, id_iuq, id_ivq, id_iwq, & ! moisture flux & vertical integral + id_ut, id_vt, id_wt, id_iut, id_ivt, id_iwt, & ! heat flux + id_uu, id_uv, id_vv, id_ww, & ! momentum flux + id_iuu, id_iuv, id_iuw, id_ivv, id_ivw, id_iww ! vertically integral of momentum flux + + integer :: id_uw, id_vw, id_hw, id_qvw, id_qlw, id_qiw, id_o3w logical :: initialized = .false. real sphum, liq_wat, ice_wat ! GFDL physics @@ -115,7 +133,7 @@ module fv_arrays_mod real, allocatable, dimension(:,:,:) :: grid, agrid real, allocatable, dimension(:,:) :: area, area_c - real, allocatable, dimension(:,:) :: rarea, rarea_c + real, allocatable, dimension(:,:) :: rarea, rarea_c real, allocatable, dimension(:,:) :: sina, cosa real, allocatable, dimension(:,:,:) :: e1,e2 @@ -196,9 +214,9 @@ module fv_arrays_mod real, allocatable :: fC(:,:), f0(:,:) integer, dimension(:,:,:), allocatable :: iinta, jinta, iintb, jintb - + !Scalar data - + integer :: npx_g, npy_g, ntiles_g ! global domain real(kind=R_GRID) :: global_area @@ -209,7 +227,7 @@ module fv_arrays_mod real :: acapN, acapS real :: globalarea ! total Global Area - + logical :: latlon = .false. logical :: cubed_sphere = .false. logical :: have_south_pole = .false. @@ -221,8 +239,15 @@ module fv_arrays_mod !! Convenience pointers - integer, pointer :: grid_type - logical, pointer :: nested + integer, pointer :: grid_type !< Which type of grid to use. If 0, the equidistant gnomonic + !< cubed-sphere will be used. If 4, a doubly-periodic + !< f-plane cartesian grid will be used. If -1, the grid is read + !< from INPUT/grid_spec.nc. Values 2, 3, 5, 6, and 7 are not + !< supported and will likely not run. The default value is 0. + + logical, pointer :: nested !< Whether this is a nested grid. .false. by default. + logical, pointer :: regional !< Is this a (stand-alone) limited area regional domain? + logical :: bounded_domain !< Is this a regional or nested domain? end type fv_grid_type @@ -254,12 +279,12 @@ module fv_arrays_mod ! -> moved to grid_tools ! Momentum (or KE) options: - integer :: hord_mt = 9 ! the best option for Gnomonic grids + integer :: hord_mt = 9 ! the best option for Gnomonic grids integer :: kord_mt = 8 ! vertical mapping option for (u,v) integer :: kord_wz = 8 ! vertical mapping option for w ! Vorticity & w transport options: - integer :: hord_vt = 9 ! 10 not recommended (noisy case-5) + integer :: hord_vt = 9 ! 10 not recommended (noisy case-5) ! Heat & air mass (delp) transport options: integer :: hord_tm = 9 ! virtual potential temperature @@ -267,14 +292,14 @@ module fv_arrays_mod integer :: kord_tm =-8 ! ! Tracer transport options: - integer :: hord_tr = 12 !11: PPM mono constraint (Lin 2004); fast + integer :: hord_tr = 12 !11: PPM mono constraint (Lin 2004); fast !12: Huynh 2nd constraint (Lin 2004) + ! positive definite (Lin & Rood 1996); slower !>12: positive definite only (Lin & Rood 1996); fastest - integer :: kord_tr = 8 ! - real :: scale_z = 0. ! diff_z = scale_z**2 * 0.25 - real :: w_max = 75. ! max w (m/s) threshold for hydostatiic adjustment - real :: z_min = 0.05 ! min ratio of dz_nonhydrostatic/dz_hydrostatic + integer :: kord_tr = 8 ! + real :: scale_z = 0. ! diff_z = scale_z**2 * 0.25 (only used for Riemann solver) + real :: w_max = 75. ! max w (m/s) threshold for hydostatiic adjustment (not used) + real :: z_min = 0.05 ! min ratio of dz_nonhydrostatic/dz_hydrostatic (not used?) integer :: nord=1 ! 0: del-2, 1: del-4, 2: del-6, 3: del-8 divergence damping ! Alternative setting for high-res: nord=1; d4_bg = 0.075 @@ -285,7 +310,7 @@ module fv_arrays_mod real :: d4_bg = 0.16 ! coefficient for background del-4(6) divergence damping ! for stability, d4_bg must be <=0.16 if nord=3 real :: vtdm4 = 0.0 ! coefficient for del-4 vorticity damping - real :: trdm2 = 0.0 ! coefficient for del-2 tracer damping + real :: trdm2 = 0.0 ! coefficient for del-2 tracer damping !! WARNING !! buggy real :: d2_bg_k1 = 4. ! factor for d2_bg (k=1) real :: d2_bg_k2 = 2. ! factor for d2_bg (k=2) real :: d2_divg_max_k1 = 0.15 ! d2_divg max value (k=1) @@ -299,39 +324,40 @@ module fv_arrays_mod logical :: full_zs_filter=.false.! perform full filtering of topography (in external_ic only ) logical :: consv_am = .false. ! Apply Angular Momentum Correction (to zonal wind component) - logical :: do_sat_adj= .false. ! - logical :: do_f3d = .false. ! + logical :: do_sat_adj= .false. ! + logical :: do_f3d = .false. ! logical :: no_dycore = .false. ! skip the dycore - logical :: convert_ke = .false. - logical :: do_vort_damp = .false. - logical :: use_old_omega = .true. + logical :: convert_ke = .false. + logical :: do_vort_damp = .false. + logical :: use_old_omega = .true. ! PG off centering: real :: beta = 0.0 ! 0.5 is "neutral" but it may not be stable #ifdef SW_DYNAMICS integer :: n_sponge = 0 ! Number of sponge layers at the top of the atmosphere - real :: d_ext = 0. + real :: d_ext = 0. integer :: nwat = 0 ! Number of water species - logical :: warm_start = .false. + logical :: warm_start = .false. logical :: inline_q = .true. logical :: adiabatic = .true. ! Run without physics (full or idealized). #else integer :: n_sponge = 1 ! Number of sponge layers at the top of the atmosphere real :: d_ext = 0.02 ! External model damping (was 0.02) integer :: nwat = 3 ! Number of water species - logical :: warm_start = .true. + logical :: warm_start = .true. ! Set to .F. if cold_start is desired (including terrain generation) logical :: inline_q = .false. logical :: adiabatic = .false. ! Run without physics (full or idealized). #endif !----------------------------------------------------------- -! Grid shifting, rotation, and the Schmidt transformation: +! Grid shifting, rotation, and cube transformations: !----------------------------------------------------------- real :: shift_fac = 18. ! shift west by 180/shift_fac = 10 degrees -! Defaults for Schmidt transformation: - logical :: do_schmidt = .false. +! Defaults for Schmidt/cube transformation: + logical :: do_schmidt = .false. + logical :: do_cube_transform = .false. real(kind=R_GRID) :: stretch_fac = 1. ! No stretching - real(kind=R_GRID) :: target_lat = -90. ! -90: no grid rotation - real(kind=R_GRID) :: target_lon = 0. ! + real(kind=R_GRID) :: target_lat = -90. ! -90: no grid rotation + real(kind=R_GRID) :: target_lon = 0. ! !----------------------------------------------------------------------------------------------- ! Example #1a: US regional climate simulation, center located over Oklahoma city: (262.4, 35.4) @@ -343,7 +369,7 @@ module fv_arrays_mod ! stretching factor: 5-10 !----------------------------------------------------------------------------------------------- - logical :: reset_eta = .false. + logical :: reset_eta = .false. real :: p_fac = 0.05 real :: a_imp = 0.75 ! Off center parameter for the implicit solver [0.5,1.0] integer :: n_split = 0 ! Number of time splits for the lagrangian dynamics @@ -364,9 +390,9 @@ module fv_arrays_mod ! C2000: ~5 90 18 (5 s) 2 !=================================================== ! The nonhydrostatic algorithm is described in Lin 2006, QJ, (submitted) -! C2000 should easily scale to at least 6 * 100 * 100 = 60,000 CPUs +! C2000 should easily scale to at least 6 * 100 * 100 = 60,000 CPUs ! For a 1024 system: try 6 x 13 * 13 = 1014 CPUs - + integer :: q_split = 0 ! Number of time splits for tracer transport integer :: print_freq = 0 ! Print max/min of selected fields @@ -374,23 +400,33 @@ module fv_arrays_mod ! positive n: every n hours ! negative n: every time step + logical :: write_3d_diags = .true. !whether to write large 3d outputs + !on this grid !------------------------------------------ ! Model Domain parameters !------------------------------------------ integer :: npx ! Number of Grid Points in X- dir integer :: npy ! Number of Grid Points in Y- dir integer :: npz ! Number of Vertical Levels +#ifdef USE_GFSL63 + character(24) :: npz_type = 'gfs' ! Option for selecting vertical level setup (gfs levels, when available, by default) +#else + character(24) :: npz_type = '' ! Option for selecting vertical level setup (empty by default) +#endif integer :: npz_rst = 0 ! Original Vertical Levels (in the restart) ! 0: no change (default) integer :: ncnst = 0 ! Number of advected consituents integer :: pnats = 0 ! Number of non-advected consituents integer :: dnats = 0 ! Number of non-advected consituents (as seen by dynamics) - integer :: ntiles = 1 ! Number or tiles that make up the Grid + integer :: dnrts = -1 ! Number of non-remapped consituents. Only makes sense for dnrts <= dnats + integer :: ntiles = 1 ! Number or tiles that make up the Grid integer :: ndims = 2 ! Lat-Lon Dims for Grid in Radians integer :: nf_omega = 1 ! Filter omega "nf_omega" times integer :: fv_sg_adj = -1 ! Perform grid-scale dry adjustment if > 0 ! Relaxzation time scale (sec) if positive + real :: sg_cutoff = -1 ! cutoff level for fv_sg_adj (2dz filter; overrides n_sponge) integer :: na_init = 0 ! Perform adiabatic initialization + logical :: nudge_dz = .false. ! Whether to nudge delz in the adiabatic initialization real :: p_ref = 1.E5 real :: dry_mass = 98290. integer :: nt_prog = 0 @@ -412,6 +448,7 @@ module fv_arrays_mod logical :: fill = .false. logical :: fill_dp = .false. logical :: fill_wz = .false. + logical :: fill_gfs = .true. ! default behavior logical :: check_negative = .false. logical :: non_ortho = .true. logical :: moist_phys = .true. ! Run with moist physics @@ -445,17 +482,20 @@ module fv_arrays_mod !-------------------------------------------------------------------------------------- logical :: nudge = .false. ! Perform nudging logical :: nudge_ic = .false. ! Perform nudging on IC - logical :: ncep_ic = .false. ! use NCEP ICs - logical :: nggps_ic = .false. ! use NGGPS ICs - logical :: ecmwf_ic = .false. ! use ECMWF ICs - logical :: gfs_phil = .false. ! if .T., compute geopotential inside of GFS physics + logical :: ncep_ic = .false. ! use NCEP ICs + logical :: nggps_ic = .false. ! use NGGPS ICs + logical :: ecmwf_ic = .false. ! use ECMWF ICs + logical :: gfs_phil = .false. ! if .T., compute geopotential inside of GFS physics (not used?) logical :: agrid_vel_rst = .false. ! if .T., include ua/va (agrid winds) in the restarts - logical :: use_new_ncep = .false. ! use the NCEP ICs created after 2014/10/22, if want to read CWAT - logical :: use_ncep_phy = .false. ! if .T., separate CWAT by weights of liq_wat and liq_ice in FV_IC + logical :: use_new_ncep = .false. ! use the NCEP ICs created after 2014/10/22, if want to read CWAT (not used??) + logical :: use_ncep_phy = .false. ! if .T., separate CWAT by weights of liq_wat and liq_ice in FV_IC (not used??) logical :: fv_diag_ic = .false. ! reconstruct IC from fv_diagnostics on lat-lon grid logical :: external_ic = .false. ! use ICs from external sources; e.g. lat-lon FV core ! or NCEP re-analysis; both vertical remapping & horizontal ! (lat-lon to cubed sphere) interpolation will be done + logical :: external_eta = .false. ! allow the use of externally defined ak/bk values and not + ! require coefficients to be defined vi set_eta + logical :: read_increment = .false. ! read in analysis increment and add to restart ! Default restart files from the "Memphis" latlon FV core: character(len=128) :: res_latlon_dynamics = 'INPUT/fv_rst.res.nc' character(len=128) :: res_latlon_tracers = 'INPUT/atmos_tracers.res.nc' @@ -469,7 +509,7 @@ module fv_arrays_mod logical :: use_hydro_pressure = .false. ! GFS control logical :: do_uni_zfull = .false. ! compute zfull as a simply average of two zhalf logical :: hybrid_z = .false. ! use hybrid_z for remapping - logical :: Make_NH = .false. ! Initialize (w, delz) from hydro restart file + logical :: Make_NH = .false. ! Initialize (w, delz) from hydro restart file logical :: make_hybrid_z = .false. ! transform hydrostatic eta-coord IC into non-hydrostatic hybrid_z logical :: nudge_qv = .false. ! Nudge the water vapor (during na_init) above 30 mb towards HALOE climatology real :: add_noise = -1. !Amplitude of random noise added upon model startup; <=0 means no noise added @@ -484,13 +524,17 @@ module fv_arrays_mod real(kind=R_GRID) :: deglon_start = -30., deglon_stop = 30., & ! boundaries of latlon patch deglat_start = -30., deglat_stop = 30. - !Convenience pointers + logical :: regional = .false. !< Default setting for the regional domain. + + integer :: bc_update_interval = 3 !< Default setting for interval (hours) between external regional BC data files. + + !Convenience pointers integer, pointer :: grid_number !f1p logical :: adj_mass_vmr = .false. !TER: This is to reproduce answers for verona patch. This default can be changed ! to .true. in the next city release if desired - + !integer, pointer :: test_case !real, pointer :: alpha @@ -522,19 +566,28 @@ module fv_arrays_mod end type fv_nest_BC_type_4D + type nest_level_type + !Interpolation arrays for grid nesting + logical :: on_level ! indicate if current processor on this level. + logical :: do_remap_BC + integer, allocatable, dimension(:,:,:) :: ind_h, ind_u, ind_v, ind_b ! I don't think these are necessary since BC interpolation is done locally + real, allocatable, dimension(:,:,:) :: wt_h, wt_u, wt_v, wt_b + end type nest_level_type + type fv_nest_type !nested grid flags: integer :: refinement = 3 !Refinement wrt parent - integer :: parent_tile = 1 !Tile (of cubed sphere) in which nested grid lies + integer :: parent_tile = 1 !Tile (of cubed sphere) in which nested grid lies logical :: nested = .false. integer :: nestbctype = 1 integer :: nsponge = 0 - integer :: nestupdate = 0 - logical :: twowaynest = .false. + integer :: nestupdate = 0 + logical :: twowaynest = .false. integer :: ioffset, joffset !Position of nest within parent grid + integer :: nlevel = 0 ! levels down from top-most domain integer :: nest_timestep = 0 !Counter for nested-grid timesteps integer :: tracer_nest_timestep = 0 !Counter for nested-grid timesteps @@ -543,15 +596,19 @@ module fv_arrays_mod integer :: refinement_of_global = 1 integer :: npx_global integer :: upoff = 1 ! currently the same for all variables - integer :: isu = -999, ieu = -1000, jsu = -999, jeu = -1000 ! limits of update regions on coarse grid + integer :: isu = -999, ieu = -1000, jsu = -999, jeu = -1000 ! limits of update regions on coarse grid + real :: update_blend = 1. ! option for controlling how much "blending" is done during two-way update + logical, allocatable :: do_remap_BC(:) - type(nest_domain_type) :: nest_domain !Structure holding link from this grid to its parent - type(nest_domain_type), allocatable :: nest_domain_all(:) + !nest_domain now a global structure defined in fv_mp_mod + !type(nest_domain_type) :: nest_domain !Structure holding link from this grid to its parent + !type(nest_domain_type), allocatable :: nest_domain_all(:) + integer :: num_nest_level ! number of nest levels. + type(nest_level_type), allocatable :: nest(:) ! store data for each level. !Interpolation arrays for grid nesting integer, allocatable, dimension(:,:,:) :: ind_h, ind_u, ind_v, ind_b real, allocatable, dimension(:,:,:) :: wt_h, wt_u, wt_v, wt_b - integer, allocatable, dimension(:,:,:) :: ind_update_h !These arrays are not allocated by allocate_fv_atmos_type; but instead !allocated for all grids, regardless of whether the grid is @@ -560,7 +617,7 @@ module fv_arrays_mod logical :: parent_proc, child_proc logical :: parent_of_twoway = .false. - + !These are for time-extrapolated BCs type(fv_nest_BC_type_3D) :: delp_BC, u_BC, v_BC, uc_BC, vc_BC, divg_BC type(fv_nest_BC_type_3D), allocatable, dimension(:) :: q_BC @@ -574,12 +631,27 @@ module fv_arrays_mod #endif #endif + !points to same parent grid as does Atm%parent_grid + type(fv_atmos_type), pointer :: parent_grid => NULL() + + !These are for tracer flux BCs logical :: do_flux_BCs, do_2way_flux_BCs !For a parent grid; determine whether there is a need to send BCs type(restart_file_type) :: BCfile_ne, BCfile_sw end type fv_nest_type + type phys_diag_type + + real, _ALLOCATABLE :: phys_t_dt(:,:,:) + real, _ALLOCATABLE :: phys_qv_dt(:,:,:) + real, _ALLOCATABLE :: phys_ql_dt(:,:,:) + real, _ALLOCATABLE :: phys_qi_dt(:,:,:) + real, _ALLOCATABLE :: phys_u_dt(:,:,:) + real, _ALLOCATABLE :: phys_v_dt(:,:,:) + + end type phys_diag_type + interface allocate_fv_nest_BC_type module procedure allocate_fv_nest_BC_type_3D module procedure allocate_fv_nest_BC_type_3D_Atm @@ -595,22 +667,41 @@ module fv_arrays_mod integer :: isd, ied, jsd, jed integer :: isc, iec, jsc, jec - integer :: ng + integer :: ng = 3 !default end type fv_grid_bounds_type + type fv_regional_bc_bounds_type + + integer :: is_north ,ie_north ,js_north ,je_north & + ,is_south ,ie_south ,js_south ,je_south & + ,is_east ,ie_east ,js_east ,je_east & + ,is_west ,ie_west ,js_west ,je_west + + integer :: is_north_uvs ,ie_north_uvs ,js_north_uvs ,je_north_uvs & + ,is_south_uvs ,ie_south_uvs ,js_south_uvs ,je_south_uvs & + ,is_east_uvs ,ie_east_uvs ,js_east_uvs ,je_east_uvs & + ,is_west_uvs ,ie_west_uvs ,js_west_uvs ,je_west_uvs + + integer :: is_north_uvw ,ie_north_uvw ,js_north_uvw ,je_north_uvw & + ,is_south_uvw ,ie_south_uvw ,js_south_uvw ,je_south_uvw & + ,is_east_uvw ,ie_east_uvw ,js_east_uvw ,je_east_uvw & + ,is_west_uvw ,ie_west_uvw ,js_west_uvw ,je_west_uvw + + end type fv_regional_bc_bounds_type type fv_atmos_type logical :: allocated = .false. logical :: dummy = .false. ! same as grids_on_this_pe(n) integer :: grid_number = 1 + character(len=32) :: nml_filename = "input.nml" !Timestep-related variables. type(time_type) :: Time_init, Time, Run_length, Time_end, Time_step_atmos #ifdef GFS_PHYS - !--- used for GFS PHYSICS only + !--- DUMMY for backwards-compatibility. Will be removed real, dimension(2048) :: fdiag = 0. #endif @@ -667,7 +758,7 @@ module fv_arrays_mod real, _ALLOCATABLE :: sgh(:,:) _NULL ! Terrain standard deviation real, _ALLOCATABLE :: oro(:,:) _NULL ! land fraction (1: all land; 0: all water) real, _ALLOCATABLE :: ts(:,:) _NULL ! skin temperature (sst) from NCEP/GFS (K) -- tile - + !----------------------------------------------------------------------- ! Others: !----------------------------------------------------------------------- @@ -691,7 +782,7 @@ module fv_arrays_mod real, _ALLOCATABLE :: cy(:,:,:) _NULL type(fv_flags_type) :: flagstruct - + !! Convenience pointers integer, pointer :: npx, npy, npz, ncnst, ng @@ -699,18 +790,20 @@ module fv_arrays_mod type(fv_grid_bounds_type) :: bd + type(fv_regional_bc_bounds_type) :: regional_bc_bounds type(domain2D) :: domain #if defined(SPMD) type(domain2D) :: domain_for_coupler ! domain used in coupled model with halo = 1. - integer :: num_contact, npes_per_tile, tile, npes_this_grid + !global tile and tile_of_mosaic only have a meaning for the CURRENT pe + integer :: num_contact, npes_per_tile, global_tile, tile_of_mosaic, npes_this_grid integer :: layout(2), io_layout(2) = (/ 1,1 /) #endif !These do not actually belong to the grid, but to the process !integer :: masterproc - !integer :: gid + !integer :: gid !!!!!!!!!!!!!!!! ! From fv_grid_tools @@ -720,7 +813,7 @@ module fv_arrays_mod real :: ptop type(fv_grid_type) :: gridstruct - + !!!!!!!!!!!!!!!! !fv_diagnostics! @@ -739,19 +832,16 @@ module fv_arrays_mod !Hold on to coarse-grid global grid, so we don't have to waste processor time getting it again when starting to do grid nesting real(kind=R_GRID), allocatable, dimension(:,:,:,:) :: grid_global - integer :: atmos_axes(4) + integer :: atmos_axes(4) + type(phys_diag_type) :: phys_diag end type fv_atmos_type -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie_in, js_in, je_in, & - npx_in, npy_in, npz_in, ndims_in, ncnst_in, nq_in, ng_in, dummy, alloc_2d, ngrids_in) + npx_in, npy_in, npz_in, ndims_in, ncnst_in, nq_in, dummy, alloc_2d, ngrids_in) !WARNING: Before calling this routine, be sure to have set up the ! proper domain parameters from the namelists (as is done in @@ -760,7 +850,7 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie implicit none type(fv_atmos_type), intent(INOUT), target :: Atm integer, intent(IN) :: isd_in, ied_in, jsd_in, jed_in, is_in, ie_in, js_in, je_in - integer, intent(IN) :: npx_in, npy_in, npz_in, ndims_in, ncnst_in, nq_in, ng_in + integer, intent(IN) :: npx_in, npy_in, npz_in, ndims_in, ncnst_in, nq_in logical, intent(IN) :: dummy, alloc_2d integer, intent(IN) :: ngrids_in integer:: isd, ied, jsd, jed, is, ie, js, je @@ -775,71 +865,67 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie if (Atm%allocated) return if (dummy) then - isd = 0 - ied= -1 - jsd= 0 - jed= -1 - is= 0 - ie= -1 - js= 0 - je= -1 - npx= 1 - npy= 1 - npz= 1 - ndims= 1 - ncnst= 1 + isd = 0 + ied= -1 + jsd= 0 + jed= -1 + is= 0 + ie= -1 + js= 0 + je= -1 + npx= 1 + npy= 1 + npz= 1 + ndims= 1 + ncnst= 1 nq= 1 - ng = 1 else - isd = isd_in - ied= ied_in - jsd= jsd_in - jed= jed_in - is= is_in - ie= ie_in - js= js_in - je= je_in - npx= npx_in - npy= npy_in - npz= npz_in - ndims= ndims_in - ncnst= ncnst_in + isd = isd_in + ied= ied_in + jsd= jsd_in + jed= jed_in + is= is_in + ie= ie_in + js= js_in + je= je_in + npx= npx_in + npy= npy_in + npz= npz_in + ndims= ndims_in + ncnst= ncnst_in nq= nq_in - ng = ng_in endif if ((.not. dummy) .or. alloc_2d) then - isd_2d = isd_in - ied_2d= ied_in - jsd_2d= jsd_in - jed_2d= jed_in - is_2d= is_in - ie_2d= ie_in - js_2d= js_in - je_2d= je_in - npx_2d= npx_in - npy_2d= npy_in - npz_2d= npz_in - ndims_2d= ndims_in - ncnst_2d= ncnst_in - nq_2d= nq_in - ng_2d = ng_in + isd_2d = isd_in + ied_2d= ied_in + jsd_2d= jsd_in + jed_2d= jed_in + is_2d= is_in + ie_2d= ie_in + js_2d= js_in + je_2d= je_in + npx_2d= npx_in + npy_2d= npy_in + npz_2d= npz_in + ndims_2d= ndims_in + ncnst_2d= ncnst_in + nq_2d= nq_in else - isd_2d = 0 - ied_2d= -1 - jsd_2d= 0 - jed_2d= -1 - is_2d= 0 - ie_2d= -1 - js_2d= 0 - je_2d= -1 - npx_2d= 1 - npy_2d= 1 - npz_2d= 0 !for ak, bk - ndims_2d= 1 - ncnst_2d= 1 - nq_2d= 1 - ng_2d = 1 + isd_2d = 0 + ied_2d= -1 + jsd_2d= 0 + jed_2d= -1 + is_2d= 0 + ie_2d= -1 + js_2d= 0 + je_2d= -1 + npx_2d= 1 + npy_2d= 1 + npz_2d= npz_in !for ak, bk, which are 1D arrays and thus OK to allocate + ndims_2d= 1 + ncnst_2d= 1 + nq_2d= 1 endif !This should be set up in fv_mp_mod @@ -858,8 +944,6 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie !!$ Atm%bd%jsc = Atm%bd%js !!$ Atm%bd%jec = Atm%bd%je - Atm%bd%ng = ng - !Convenience pointers Atm%npx => Atm%flagstruct%npx Atm%npy => Atm%flagstruct%npy @@ -921,11 +1005,11 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie if ( Atm%flagstruct%hydrostatic ) then !Note length-one initialization if hydrostatic = .true. allocate ( Atm%w(isd:isd, jsd:jsd ,1) ) - allocate ( Atm%delz(isd:isd, jsd:jsd ,1) ) + allocate ( Atm%delz(is:is, js:js ,1) ) allocate ( Atm%ze0(is:is, js:js ,1) ) else allocate ( Atm%w(isd:ied, jsd:jed ,npz ) ) - allocate ( Atm%delz(isd:ied, jsd:jed ,npz) ) + allocate ( Atm%delz(is:ie, js:je ,npz) ) if( Atm%flagstruct%hybrid_z ) then allocate ( Atm%ze0(is:ie, js:je ,npz+1) ) else @@ -940,11 +1024,10 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie allocate ( Atm%q_con(isd:isd,jsd:jsd,1) ) #endif -#ifndef NO_TOUCH_MEM ! Notes by SJL ! Place the memory in the optimal shared mem space ! This will help the scaling with OpenMP -!$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,Atm,nq,ncnst) +!$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,Atm,nq,ncnst) do k=1, npz do j=jsd, jed do i=isd, ied @@ -956,13 +1039,13 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie enddo do j=jsd, jed+1 do i=isd, ied - Atm%u(i,j,k) = real_big + Atm%u(i,j,k) = 0. Atm%vc(i,j,k) = real_big enddo enddo do j=jsd, jed do i=isd, ied+1 - Atm%v(i,j,k) = real_big + Atm%v(i,j,k) = 0. Atm%uc(i,j,k) = real_big enddo enddo @@ -970,6 +1053,10 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie do j=jsd, jed do i=isd, ied Atm%w(i,j,k) = real_big + enddo + enddo + do j=js, je + do i=is, ie Atm%delz(i,j,k) = real_big enddo enddo @@ -989,37 +1076,42 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie enddo enddo enddo -#endif + do j=js, je + do i=is, ie + Atm%ts(i,j) = 300. + Atm%phis(i,j) = real_big + enddo + enddo allocate ( Atm%gridstruct% area(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) ! Cell Centered allocate ( Atm%gridstruct% area_64(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) ! Cell Centered allocate ( Atm%gridstruct%rarea(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) ! Cell Centered - + allocate ( Atm%gridstruct% area_c(isd_2d:ied_2d+1,jsd_2d:jed_2d+1) ) ! Cell Corners allocate ( Atm%gridstruct% area_c_64(isd_2d:ied_2d+1,jsd_2d:jed_2d+1) )! Cell Corners allocate ( Atm%gridstruct%rarea_c(isd_2d:ied_2d+1,jsd_2d:jed_2d+1) ) ! Cell Corners - + allocate ( Atm%gridstruct% dx(isd_2d:ied_2d ,jsd_2d:jed_2d+1) ) allocate ( Atm%gridstruct% dx_64(isd_2d:ied_2d ,jsd_2d:jed_2d+1) ) allocate ( Atm%gridstruct%rdx(isd_2d:ied_2d ,jsd_2d:jed_2d+1) ) allocate ( Atm%gridstruct% dy(isd_2d:ied_2d+1,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct% dy_64(isd_2d:ied_2d+1,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct%rdy(isd_2d:ied_2d+1,jsd_2d:jed_2d ) ) - + allocate ( Atm%gridstruct% dxc(isd_2d:ied_2d+1,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct% dxc_64(isd_2d:ied_2d+1,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct%rdxc(isd_2d:ied_2d+1,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct% dyc(isd_2d:ied_2d ,jsd_2d:jed_2d+1) ) allocate ( Atm%gridstruct% dyc_64(isd_2d:ied_2d ,jsd_2d:jed_2d+1) ) allocate ( Atm%gridstruct%rdyc(isd_2d:ied_2d ,jsd_2d:jed_2d+1) ) - + allocate ( Atm%gridstruct% dxa(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct% dxa_64(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct%rdxa(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct% dya(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct% dya_64(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) allocate ( Atm%gridstruct%rdya(isd_2d:ied_2d ,jsd_2d:jed_2d ) ) - + allocate ( Atm%gridstruct%grid (isd_2d:ied_2d+1,jsd_2d:jed_2d+1,1:ndims_2d) ) allocate ( Atm%gridstruct%grid_64 (isd_2d:ied_2d+1,jsd_2d:jed_2d+1,1:ndims_2d) ) allocate ( Atm%gridstruct%agrid(isd_2d:ied_2d ,jsd_2d:jed_2d ,1:ndims_2d) ) @@ -1029,7 +1121,7 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie allocate ( Atm%gridstruct%rsina(is_2d:ie_2d+1,js_2d:je_2d+1) ) ! Why is the size different? allocate ( Atm%gridstruct% cosa(isd_2d:ied_2d+1,jsd_2d:jed_2d+1) ) ! COS(angle of intersection) allocate ( Atm%gridstruct% cosa_64(isd_2d:ied_2d+1,jsd_2d:jed_2d+1) ) ! COS(angle of intersection) - + allocate ( Atm%gridstruct% e1(3,isd_2d:ied_2d+1,jsd_2d:jed_2d+1) ) allocate ( Atm%gridstruct% e2(3,isd_2d:ied_2d+1,jsd_2d:jed_2d+1) ) @@ -1127,6 +1219,7 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie if (Atm%neststruct%nested) then + allocate(Atm%neststruct%ind_h(isd:ied,jsd:jed,4)) allocate(Atm%neststruct%ind_u(isd:ied,jsd:jed+1,4)) allocate(Atm%neststruct%ind_v(isd:ied+1,jsd:jed,4)) @@ -1169,27 +1262,31 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie #endif - if (Atm%neststruct%twowaynest) allocate(& - Atm%neststruct%ind_update_h( & - Atm%parent_grid%bd%isd:Atm%parent_grid%bd%ied+1, & - Atm%parent_grid%bd%jsd:Atm%parent_grid%bd%jed+1,2)) - end if !--- Do the memory allocation only for nested model if( ngrids_in > 1 ) then if (Atm%flagstruct%grid_type < 4) then if (Atm%neststruct%nested) then - allocate(Atm%grid_global(1-ng_2d:npx_2d +ng_2d,1-ng_2d:npy_2d +ng_2d,2,1)) + allocate(Atm%grid_global(1-Atm%ng:npx_2d +Atm%ng,1-Atm%ng:npy_2d +Atm%ng,2,1)) else - allocate(Atm%grid_global(1-ng_2d:npx_2d +ng_2d,1-ng_2d:npy_2d +ng_2d,2,1:6)) + allocate(Atm%grid_global(1-Atm%ng:npx_2d +Atm%ng,1-Atm%ng:npy_2d +Atm%ng,2,1:6)) endif end if endif + + !!Convenience pointers + Atm%gridstruct%nested => Atm%neststruct%nested + Atm%gridstruct%grid_type => Atm%flagstruct%grid_type + Atm%flagstruct%grid_number => Atm%grid_number + Atm%gridstruct%regional => Atm%flagstruct%regional + Atm%gridstruct%bounded_domain = Atm%flagstruct%regional .or. Atm%neststruct%nested + if (Atm%neststruct%nested) Atm%neststruct%parent_grid => Atm%parent_grid + Atm%allocated = .true. if (dummy) Atm%dummy = .true. - + end subroutine allocate_fv_atmos_type subroutine deallocate_fv_atmos_type(Atm) @@ -1237,30 +1334,30 @@ subroutine deallocate_fv_atmos_type(Atm) deallocate ( Atm%gridstruct% area ) ! Cell Centered deallocate ( Atm%gridstruct%rarea ) ! Cell Centered - + deallocate ( Atm%gridstruct% area_c ) ! Cell Corners deallocate ( Atm%gridstruct%rarea_c ) ! Cell Corners - + deallocate ( Atm%gridstruct% dx ) deallocate ( Atm%gridstruct%rdx ) deallocate ( Atm%gridstruct% dy ) deallocate ( Atm%gridstruct%rdy ) - + deallocate ( Atm%gridstruct% dxc ) deallocate ( Atm%gridstruct%rdxc ) deallocate ( Atm%gridstruct% dyc ) deallocate ( Atm%gridstruct%rdyc ) - + deallocate ( Atm%gridstruct% dxa ) deallocate ( Atm%gridstruct%rdxa ) deallocate ( Atm%gridstruct% dya ) deallocate ( Atm%gridstruct%rdya ) - + deallocate ( Atm%gridstruct%grid ) deallocate ( Atm%gridstruct%agrid ) deallocate ( Atm%gridstruct%sina ) ! SIN(angle of intersection) deallocate ( Atm%gridstruct%cosa ) ! COS(angle of intersection) - + deallocate ( Atm%gridstruct% e1 ) deallocate ( Atm%gridstruct% e2 ) @@ -1392,15 +1489,12 @@ subroutine deallocate_fv_atmos_type(Atm) endif #endif - - if (Atm%neststruct%twowaynest) deallocate(Atm%neststruct%ind_update_h) - end if if (Atm%flagstruct%grid_type < 4) then if(allocated(Atm%grid_global)) deallocate(Atm%grid_global) end if - + Atm%allocated = .false. end subroutine deallocate_fv_atmos_type diff --git a/model/fv_control.F90 b/model/fv_control.F90 index fd58c9ea6..29fc68420 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -18,7 +18,6 @@ !* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** -! $Id$ ! !---------------- ! FV contro panel @@ -30,11 +29,12 @@ module fv_control_mod use field_manager_mod, only: MODEL_ATMOS use fms_mod, only: write_version_number, open_namelist_file, & check_nml_error, close_file, file_exist + use fms_io_mod, only: set_domain use mpp_mod, only: FATAL, mpp_error, mpp_pe, stdlog, & mpp_npes, mpp_get_current_pelist, & input_nml_file, get_unit, WARNING, & read_ascii_file, INPUT_STR_LENGTH - use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain + use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_tile_id use tracer_manager_mod, only: tm_get_number_tracers => get_number_tracers, & tm_get_tracer_index => get_tracer_index, & tm_get_tracer_indices => get_tracer_indices, & @@ -50,456 +50,1024 @@ module fv_control_mod use fv_grid_utils_mod, only: grid_utils_init, grid_utils_end, ptop_min use fv_eta_mod, only: set_eta use fv_grid_tools_mod, only: init_grid - use fv_mp_mod, only: mp_start, mp_assign_gid, domain_decomp - use fv_mp_mod, only: ng, switch_current_Atm - use fv_mp_mod, only: broadcast_domains, mp_barrier, is_master, setup_master -!!! CLEANUP: should be replaced by a getter function? - use test_cases_mod, only: test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size + use fv_mp_mod, only: mp_start, domain_decomp, mp_assign_gid, global_nest_domain + use fv_mp_mod, only: broadcast_domains, mp_barrier, is_master, setup_master, grids_master_procs, tile_fine + use fv_mp_mod, only: MAX_NNEST, MAX_NTILE + !use test_cases_mod, only: test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt use mpp_domains_mod, only: domain2D use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type, mpp_get_global_domain - use mpp_domains_mod, only: mpp_get_C2F_index, mpp_get_F2C_index, mpp_broadcast_domain + use mpp_domains_mod, only: mpp_get_C2F_index, mpp_get_F2C_index use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST, WEST, SOUTH - use mpp_mod, only: mpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, mpp_declare_pelist, mpp_root_pe, mpp_recv, mpp_sync_self, mpp_broadcast, read_input_nml + use mpp_mod, only: mpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, & + mpp_declare_pelist, mpp_root_pe, mpp_recv, mpp_sync_self, read_input_nml, & + mpp_max use fv_diagnostics_mod, only: fv_diag_init_gn implicit none private -!----------------------------------------------------------------------- -! Grid descriptor file setup -!----------------------------------------------------------------------- -!------------------------------------------ -! Model Domain parameters -! See fv_arrays.F90 for descriptions -!------------------------------------------ -!CLEANUP module pointers - character(len=80) , pointer :: grid_name - character(len=120), pointer :: grid_file - integer, pointer :: grid_type - integer , pointer :: hord_mt - integer , pointer :: kord_mt - integer , pointer :: kord_wz - integer , pointer :: hord_vt - integer , pointer :: hord_tm - integer , pointer :: hord_dp - integer , pointer :: kord_tm - integer , pointer :: hord_tr - integer , pointer :: kord_tr - real , pointer :: scale_z - real , pointer :: w_max - real , pointer :: z_min - - integer , pointer :: nord - integer , pointer :: nord_tr - real , pointer :: dddmp - real , pointer :: d2_bg - real , pointer :: d4_bg - real , pointer :: vtdm4 - real , pointer :: trdm2 - real , pointer :: d2_bg_k1 - real , pointer :: d2_bg_k2 - real , pointer :: d2_divg_max_k1 - real , pointer :: d2_divg_max_k2 - real , pointer :: damp_k_k1 - real , pointer :: damp_k_k2 - integer , pointer :: n_zs_filter - integer , pointer :: nord_zs_filter - logical , pointer :: full_zs_filter - - logical , pointer :: consv_am - logical , pointer :: do_sat_adj - logical , pointer :: do_f3d - logical , pointer :: no_dycore - logical , pointer :: convert_ke - logical , pointer :: do_vort_damp - logical , pointer :: use_old_omega -! PG off centering: - real , pointer :: beta - integer , pointer :: n_sponge - real , pointer :: d_ext - integer , pointer :: nwat - logical , pointer :: warm_start - logical , pointer :: inline_q - real , pointer :: shift_fac - logical , pointer :: do_schmidt - real(kind=R_GRID) , pointer :: stretch_fac - real(kind=R_GRID) , pointer :: target_lat - real(kind=R_GRID) , pointer :: target_lon - - logical , pointer :: reset_eta - real , pointer :: p_fac - real , pointer :: a_imp - integer , pointer :: n_split - ! Default - integer , pointer :: m_split - integer , pointer :: k_split - logical , pointer :: use_logp - - integer , pointer :: q_split - integer , pointer :: print_freq - - integer , pointer :: npx - integer , pointer :: npy - integer , pointer :: npz - integer , pointer :: npz_rst - - integer , pointer :: ncnst - integer , pointer :: pnats - integer , pointer :: dnats - integer , pointer :: ntiles - integer , pointer :: nf_omega - integer , pointer :: fv_sg_adj - - integer , pointer :: na_init - real , pointer :: p_ref - real , pointer :: dry_mass - integer , pointer :: nt_prog - integer , pointer :: nt_phys - real , pointer :: tau_h2o - - real , pointer :: delt_max - real , pointer :: d_con - real , pointer :: ke_bg - real , pointer :: consv_te - real , pointer :: tau - real , pointer :: rf_cutoff - logical , pointer :: filter_phys - logical , pointer :: dwind_2d - logical , pointer :: breed_vortex_inline - logical , pointer :: range_warn - logical , pointer :: fill - logical , pointer :: fill_dp - logical , pointer :: fill_wz - logical , pointer :: check_negative - logical , pointer :: non_ortho - logical , pointer :: adiabatic - logical , pointer :: moist_phys - logical , pointer :: do_Held_Suarez - logical , pointer :: do_reed_physics - logical , pointer :: reed_cond_only - logical , pointer :: reproduce_sum - logical , pointer :: adjust_dry_mass - logical , pointer :: fv_debug - logical , pointer :: srf_init - logical , pointer :: mountain - logical , pointer :: remap_t - logical , pointer :: z_tracer - - logical , pointer :: old_divg_damp - logical , pointer :: fv_land - logical , pointer :: nudge - logical , pointer :: nudge_ic - logical , pointer :: ncep_ic - logical , pointer :: nggps_ic - logical , pointer :: ecmwf_ic - logical , pointer :: gfs_phil - logical , pointer :: agrid_vel_rst - logical , pointer :: use_new_ncep - logical , pointer :: use_ncep_phy - logical , pointer :: fv_diag_ic - logical , pointer :: external_ic - character(len=128) , pointer :: res_latlon_dynamics - character(len=128) , pointer :: res_latlon_tracers - logical , pointer :: hydrostatic - logical , pointer :: phys_hydrostatic - logical , pointer :: use_hydro_pressure - logical , pointer :: do_uni_zfull !miz - logical , pointer :: adj_mass_vmr ! f1p - logical , pointer :: hybrid_z - logical , pointer :: Make_NH - logical , pointer :: make_hybrid_z - logical , pointer :: nudge_qv - real, pointer :: add_noise - - integer , pointer :: a2b_ord - integer , pointer :: c2l_ord - - integer, pointer :: ndims - - real(kind=R_GRID), pointer :: dx_const - real(kind=R_GRID), pointer :: dy_const - real(kind=R_GRID), pointer :: deglon_start, deglon_stop, & ! boundaries of latlon patch - deglat_start, deglat_stop - real(kind=R_GRID), pointer :: deglat - - logical, pointer :: nested, twowaynest - integer, pointer :: parent_tile, refinement, nestbctype, nestupdate, nsponge, ioffset, joffset - real, pointer :: s_weight - - integer, pointer :: layout(:), io_layout(:) - - integer :: ntilesMe ! Number of tiles on this process =1 for now - #ifdef OVERLOAD_R4 real :: too_big = 1.E8 #else real :: too_big = 1.E35 #endif - public :: fv_init, fv_end + public :: fv_control_init, fv_end integer, public :: ngrids = 1 - integer, public, allocatable :: pelist_all(:) - integer :: commID, max_refinement_of_global = 1. - integer :: gid - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - - real :: umax = 350. ! max wave speed for grid_type>3 - integer :: parent_grid_num = -1 + integer :: commID, global_commID integer :: halo_update_type = 1 ! 1 for two-interfaces non-block ! 2 for block ! 3 for four-interfaces non-block +! version number of this module +! Include variable "version" to be written to log file. +#include + contains !------------------------------------------------------------------------------- - - subroutine fv_init(Atm, dt_atmos, grids_on_this_pe, p_split) - type(fv_atmos_type), allocatable, intent(inout), target :: Atm(:) - real, intent(in) :: dt_atmos - logical, allocatable, intent(INOUT) :: grids_on_this_pe(:) - integer, intent(INOUT) :: p_split + subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) + + type(fv_atmos_type), allocatable, intent(inout), target :: Atm(:) + real, intent(in) :: dt_atmos + integer, intent(OUT) :: this_grid + logical, allocatable, intent(OUT) :: grids_on_this_pe(:) + + integer, intent(INOUT) :: p_split + character(100) :: pe_list_name, errstring + integer :: n, npes, pecounter, i, num_family, ntiles_nest_all + integer, allocatable :: global_pelist(:) + integer, dimension(MAX_NNEST) :: grid_pes = 0 + integer, dimension(MAX_NNEST) :: grid_coarse = -1 + integer, dimension(MAX_NNEST) :: nest_refine = 3 + integer, dimension(MAX_NNEST) :: nest_ioffsets = -999, nest_joffsets = -999 + integer, dimension(MAX_NNEST) :: all_npx = 0 + integer, dimension(MAX_NNEST) :: all_npy = 0 + integer, dimension(MAX_NNEST) :: all_npz = 0 + integer, dimension(MAX_NNEST) :: all_ntiles = 0 + !integer, dimension(MAX_NNEST) :: tile_fine = 0 + integer, dimension(MAX_NNEST) :: icount_coarse = 1 + integer, dimension(MAX_NNEST) :: jcount_coarse = 1 + integer, dimension(MAX_NNEST) :: nest_level = 0 + integer, dimension(MAX_NNEST) :: tile_coarse = 0 + integer, dimension(MAX_NTILE) :: npes_nest_tile = 0 + + real :: sdt + integer :: unit, ens_root_pe, tile_id(1) + + !!!!!!!!!! POINTERS FOR READING NAMELISTS !!!!!!!!!! + + !------------------------------------------ + ! Model Domain parameters + ! See fv_arrays.F90 for descriptions + !------------------------------------------ + !CLEANUP module pointers + character(len=80) , pointer :: grid_name + character(len=120), pointer :: grid_file + integer, pointer :: grid_type + integer , pointer :: hord_mt + integer , pointer :: kord_mt + integer , pointer :: kord_wz + integer , pointer :: hord_vt + integer , pointer :: hord_tm + integer , pointer :: hord_dp + integer , pointer :: kord_tm + integer , pointer :: hord_tr + integer , pointer :: kord_tr + real , pointer :: scale_z + real , pointer :: w_max + real , pointer :: z_min + + integer , pointer :: nord + integer , pointer :: nord_tr + real , pointer :: dddmp + real , pointer :: d2_bg + real , pointer :: d4_bg + real , pointer :: vtdm4 + real , pointer :: trdm2 + real , pointer :: d2_bg_k1 + real , pointer :: d2_bg_k2 + real , pointer :: d2_divg_max_k1 + real , pointer :: d2_divg_max_k2 + real , pointer :: damp_k_k1 + real , pointer :: damp_k_k2 + integer , pointer :: n_zs_filter + integer , pointer :: nord_zs_filter + logical , pointer :: full_zs_filter + + logical , pointer :: consv_am + logical , pointer :: do_sat_adj + logical , pointer :: do_f3d + logical , pointer :: no_dycore + logical , pointer :: convert_ke + logical , pointer :: do_vort_damp + logical , pointer :: use_old_omega + ! PG off centering: + real , pointer :: beta + integer , pointer :: n_sponge + real , pointer :: d_ext + integer , pointer :: nwat + logical , pointer :: warm_start + logical , pointer :: inline_q + real , pointer :: shift_fac + logical , pointer :: do_schmidt, do_cube_transform + real(kind=R_GRID) , pointer :: stretch_fac + real(kind=R_GRID) , pointer :: target_lat + real(kind=R_GRID) , pointer :: target_lon + + logical , pointer :: reset_eta + real , pointer :: p_fac + real , pointer :: a_imp + integer , pointer :: n_split + ! Default + integer , pointer :: m_split + integer , pointer :: k_split + logical , pointer :: use_logp + + integer , pointer :: q_split + integer , pointer :: print_freq + logical , pointer :: write_3d_diags + + integer , pointer :: npx + integer , pointer :: npy + integer , pointer :: npz + character(len=24), pointer :: npz_type + integer , pointer :: npz_rst + + integer , pointer :: ncnst + integer , pointer :: pnats + integer , pointer :: dnats + integer , pointer :: dnrts + integer , pointer :: ntiles + integer , pointer :: nf_omega + integer , pointer :: fv_sg_adj + real , pointer :: sg_cutoff + + integer , pointer :: na_init + logical , pointer :: nudge_dz + real , pointer :: p_ref + real , pointer :: dry_mass + integer , pointer :: nt_prog + integer , pointer :: nt_phys + real , pointer :: tau_h2o + + real , pointer :: delt_max + real , pointer :: d_con + real , pointer :: ke_bg + real , pointer :: consv_te + real , pointer :: tau + real , pointer :: rf_cutoff + logical , pointer :: filter_phys + logical , pointer :: dwind_2d + logical , pointer :: breed_vortex_inline + logical , pointer :: range_warn + logical , pointer :: fill + logical , pointer :: fill_dp + logical , pointer :: fill_wz + logical , pointer :: fill_gfs + logical , pointer :: check_negative + logical , pointer :: non_ortho + logical , pointer :: adiabatic + logical , pointer :: moist_phys + logical , pointer :: do_Held_Suarez + logical , pointer :: do_reed_physics + logical , pointer :: reed_cond_only + logical , pointer :: reproduce_sum + logical , pointer :: adjust_dry_mass + logical , pointer :: fv_debug + logical , pointer :: srf_init + logical , pointer :: mountain + logical , pointer :: remap_t + logical , pointer :: z_tracer + + logical , pointer :: old_divg_damp + logical , pointer :: fv_land + logical , pointer :: nudge + logical , pointer :: nudge_ic + logical , pointer :: ncep_ic + logical , pointer :: nggps_ic + logical , pointer :: ecmwf_ic + logical , pointer :: gfs_phil + logical , pointer :: agrid_vel_rst + logical , pointer :: use_new_ncep + logical , pointer :: use_ncep_phy + logical , pointer :: fv_diag_ic + logical , pointer :: external_ic + logical , pointer :: external_eta + logical , pointer :: read_increment + logical , pointer :: hydrostatic + logical , pointer :: phys_hydrostatic + logical , pointer :: use_hydro_pressure + logical , pointer :: do_uni_zfull !miz + logical , pointer :: adj_mass_vmr ! f1p + logical , pointer :: hybrid_z + logical , pointer :: Make_NH + logical , pointer :: make_hybrid_z + logical , pointer :: nudge_qv + real, pointer :: add_noise + + integer , pointer :: a2b_ord + integer , pointer :: c2l_ord + + integer, pointer :: ndims + + real(kind=R_GRID), pointer :: dx_const + real(kind=R_GRID), pointer :: dy_const + real(kind=R_GRID), pointer :: deglon_start, deglon_stop, & ! boundaries of latlon patch + deglat_start, deglat_stop + real(kind=R_GRID), pointer :: deglat + + logical, pointer :: nested, twowaynest + logical, pointer :: regional + integer, pointer :: bc_update_interval + integer, pointer :: parent_tile, refinement, nestbctype, nestupdate, nsponge, ioffset, joffset + real, pointer :: s_weight, update_blend + + integer, pointer :: layout(:), io_layout(:) + + !!!!!!!!!! END POINTERS !!!!!!!!!!!!!!!!!!!!!!!!!!!! + + this_grid = -1 ! default + call mp_assign_gid + ens_root_pe = mpp_root_pe() + + ! 1. read nesting namelists + call read_namelist_nest_nml + call read_namelist_fv_nest_nml + + ! 2. Set up Atm and PElists + + ngrids = 1 + do n=2,MAX_NNEST + if (grid_coarse(n) <= 0) then + exit + endif + ngrids = ngrids + 1 + enddo + allocate(Atm(ngrids)) + npes = mpp_npes() ! now on global pelist + + allocate(global_pelist(npes)) + call mpp_get_current_pelist(global_pelist, commID=global_commID) ! for commID + + + allocate(grids_master_procs(ngrids)) + pecounter = 0 + allocate(grids_on_this_pe(ngrids)) + grids_on_this_pe(:) = .false. + + do n=1,ngrids + + if (ngrids == 1 .or. grid_pes(n) == 0) then + grid_pes(n) = npes - sum(grid_pes) + if (grid_pes(n) == 0) then + if ( n > 1 ) then + call mpp_error(FATAL, 'Only one zero entry in grid_pes permitted.') + else + grid_pes(n) = npes + endif + endif + endif + + allocate(Atm(n)%pelist(grid_pes(n))) + grids_master_procs(n) = pecounter + do i=1,grid_pes(n) + if (pecounter >= npes) then + if (mpp_pe() == 0) then + print*, 'ngrids = ', ngrids, ', grid_pes = ', grid_pes(1:ngrids) + endif + call mpp_error(FATAL, 'grid_pes assigns more PEs than are available.') + endif + Atm(n)%pelist(i) = pecounter + ens_root_pe !TODO PELIST set up by mpp_define_nest_domains??? + pecounter = pecounter + 1 + Atm(n)%npes_this_grid = grid_pes(n) + enddo + Atm(n)%grid_number = n + + !TODO: we are required to use PE name for reading INTERNAL namelist + ! and the actual file name for EXTERNAL namelists. Need to clean up this code + if (n == 1) then + pe_list_name = '' + else + write(pe_list_name,'(A4, I2.2)') 'nest', n + endif + call mpp_declare_pelist(Atm(n)%pelist, pe_list_name) + !If nest need to re-initialize internal NML + if (n > 1) then + Atm(n)%nml_filename = 'input_'//trim(pe_list_name)//'.nml' + else + Atm(n)%nml_filename = 'input.nml' + endif + if (.not. file_exist(Atm(n)%nml_filename)) then + call mpp_error(FATAL, "Could not find nested grid namelist "//Atm(n)%nml_filename) + endif + enddo + + do n=1,ngrids + !ONE grid per pe + if (ANY(mpp_pe() == Atm(n)%pelist)) then + if (this_grid > 0) then + print*, mpp_pe(), this_grid, n + call mpp_error(FATAL, " Grid assigned to multiple pes") + endif + call mpp_set_current_pelist(Atm(n)%pelist) + call setup_master(Atm(n)%pelist) + this_grid = n + grids_on_this_pe(n) = .true. + endif + Atm(n)%neststruct%nested = ( grid_coarse(n) > 0 ) + + if (Atm(n)%neststruct%nested) then + if ( grid_coarse(n) > ngrids .or. grid_coarse(n) == n .or. grid_coarse(n) < 1) then + write(errstring,'(2(A,I3))') "Could not find parent grid #", grid_coarse(n), ' for grid #', n + call mpp_error(FATAL, errstring) + endif + Atm(n)%parent_grid => Atm(grid_coarse(n)) + + Atm(n)%neststruct%ioffset = nest_ioffsets(n) + Atm(n)%neststruct%joffset = nest_joffsets(n) + Atm(n)%neststruct%parent_tile = tile_coarse(n) + Atm(n)%neststruct%refinement = nest_refine(n) + + else + + Atm(n)%neststruct%ioffset = -999 + Atm(n)%neststruct%joffset = -999 + Atm(n)%neststruct%parent_tile = -1 + Atm(n)%neststruct%refinement = -1 + + endif + + enddo + + if (pecounter /= npes) then + if (mpp_pe() == 0) then + print*, 'npes = ', npes, ', grid_pes = ', grid_pes(1:ngrids) + call mpp_error(FATAL, 'grid_pes in fv_nest_Nml does not assign all of the available PEs') + endif + endif - integer :: i, j, k, n, p - real :: sdt + ! 3pre. + call timing_init + call timing_on('TOTAL') -! tracers - integer :: num_family ! output of register_tracers + ! 3. Read namelists, do option processing and I/O - integer :: isc_p, iec_p, jsc_p, jec_p, isg, ieg, jsg, jeg, upoff, jind - integer :: ic, jc + call set_namelist_pointers(Atm(this_grid)) + call fv_diag_init_gn(Atm(this_grid)) +#ifdef INTERNAL_FILE_NML + if (this_grid .gt. 1) then + write(Atm(this_grid)%nml_filename,'(A4, I2.2)') 'nest', this_grid + if (.not. file_exist('input_'//trim(Atm(this_grid)%nml_filename)//'.nml')) then + call mpp_error(FATAL, "Could not find nested grid namelist "//'input_'//trim(Atm(this_grid)%nml_filename)//'.nml') + endif + else + Atm(this_grid)%nml_filename = '' + endif + call read_input_nml(Atm(this_grid)%nml_filename) !re-reads into internal namelist +#endif + call read_namelist_fv_grid_nml + call read_namelist_fv_core_nml(Atm(this_grid)) ! do options processing here too? + !TODO test_case_nml moved to test_cases + call mpp_get_current_pelist(Atm(this_grid)%pelist, commID=commID) ! for commID + call mp_start(commID,halo_update_type) + + ! 4. Set up domains + ! This should make use of new fv_nest_nml namelists + !!!! TODO TEMPORARY location for this code + if (Atm(this_grid)%neststruct%nested) then + + if ( Atm(this_grid)%flagstruct%consv_te > 0.) then + call mpp_error(FATAL, 'The global energy fixer cannot be used on a nested grid. consv_te must be set to 0.') + end if + + if (mod(Atm(this_grid)%flagstruct%npx-1 , Atm(this_grid)%neststruct%refinement) /= 0 .or. & + mod(Atm(this_grid)%flagstruct%npy-1, Atm(this_grid)%neststruct%refinement) /= 0) then + call mpp_error(FATAL, 'npx or npy not an even refinement of its coarse grid.') + endif - gid = mpp_pe() + endif - call init_nesting(Atm, grids_on_this_pe, p_split) + !Now only one call to mpp_define_nest_domains for ALL nests + ! set up nest_level, tile_fine, tile_coarse + ! need number of tiles, npx, and npy on each grid + ! need to define a global PElist + + all_ntiles(this_grid) = ntiles + call mpp_max(all_ntiles, ngrids, global_pelist) + + all_npx(this_grid) = npx + call mpp_max(all_npx, ngrids, global_pelist) + + all_npy(this_grid) = npy + call mpp_max(all_npy, ngrids, global_pelist) + + all_npz(this_grid) = npz + call mpp_max(all_npz, ngrids, global_pelist) + + ntiles_nest_all = 0 + do n=1,ngrids + if (n/=this_grid) then + Atm(n)%flagstruct%npx = all_npx(n) + Atm(n)%flagstruct%npy = all_npy(n) + Atm(n)%flagstruct%npz = all_npz(n) + Atm(n)%flagstruct%ntiles = all_ntiles(n) + endif + npes_nest_tile(ntiles_nest_all+1:ntiles_nest_all+all_ntiles(n)) = & + Atm(n)%npes_this_grid / all_ntiles(n) + ntiles_nest_all = ntiles_nest_all + all_ntiles(n) + + if (n > 1) then + tile_fine(n) = all_ntiles(n) + tile_fine(n-1) + if (tile_coarse(n) < 1) then !set automatically; only works for single tile parents + tile_coarse(n) = tile_fine(grid_coarse(n)) + endif + icount_coarse(n) = all_npx(n)/nest_refine(n) + jcount_coarse(n) = all_npy(n)/nest_refine(n) + nest_level(n) = nest_level(grid_coarse(n)) + 1 + else + tile_fine(n) = all_ntiles(n) + nest_level(n) = 0 + endif + enddo + + if (mpp_pe() == 0) then + print*, ' NESTING TREE' + do n=1,ngrids + write(*,'(12i4)') n, nest_level(n), nest_ioffsets(n), nest_joffsets(n), icount_coarse(n), jcount_coarse(n), tile_fine(n), tile_coarse(n), nest_refine(n), all_ntiles(n), all_npx(n), all_npy(n) + write(*,*) + enddo + print*, npes_nest_tile(1:ntiles_nest_all) + print*, '' + endif - !This call is needed to set up the pointers for fv_current_grid, even for a single-grid run - !call switch_current_Atm(Atm(1), .false.) - call setup_pointers(Atm(1)) + ! 5. domain_decomp() + call domain_decomp(Atm(this_grid)%flagstruct%npx,Atm(this_grid)%flagstruct%npy,Atm(this_grid)%flagstruct%ntiles,& + Atm(this_grid)%flagstruct%grid_type,Atm(this_grid)%neststruct%nested, & + Atm(this_grid)%layout,Atm(this_grid)%io_layout,Atm(this_grid)%bd,Atm(this_grid)%tile_of_mosaic, & + Atm(this_grid)%gridstruct%square_domain,Atm(this_grid)%npes_per_tile,Atm(this_grid)%domain, & + Atm(this_grid)%domain_for_coupler,Atm(this_grid)%num_contact,Atm(this_grid)%pelist) + call set_domain(Atm(this_grid)%domain) + call broadcast_domains(Atm,Atm(this_grid)%pelist,size(Atm(this_grid)%pelist)) + do n=1,ngrids + tile_id = mpp_get_tile_id(Atm(n)%domain) + Atm(n)%global_tile = tile_id(1) ! only meaningful locally + Atm(n)%npes_per_tile = size(Atm(n)%pelist)/Atm(n)%flagstruct%ntiles ! domain decomp doesn't set this globally + enddo + + ! 6. Set up domain and Atm structure + call tm_register_tracers (MODEL_ATMOS, Atm(this_grid)%flagstruct%ncnst, Atm(this_grid)%flagstruct%nt_prog, & + Atm(this_grid)%flagstruct%pnats, num_family) + if(is_master()) then + write(*,*) 'ncnst=', ncnst,' num_prog=',Atm(this_grid)%flagstruct%nt_prog,' pnats=',Atm(this_grid)%flagstruct%pnats,' dnats=',dnats,& + ' num_family=',num_family + print*, '' + endif + if (dnrts < 0) dnrts = dnats + + do n=1,ngrids + !FIXME still setting up dummy structures for other grids for convenience reasons + !isc, etc. set in domain_decomp + call allocate_fv_atmos_type(Atm(n), & + Atm(n)%bd%isd, Atm(n)%bd%ied, & + Atm(n)%bd%jsd, Atm(n)%bd%jed, & + Atm(n)%bd%isc, Atm(n)%bd%iec, & + Atm(n)%bd%jsc, Atm(n)%bd%jec, & + Atm(n)%flagstruct%npx, Atm(n)%flagstruct%npy, Atm(n)%flagstruct%npz, & + Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ncnst, Atm(n)%flagstruct%ncnst-Atm(n)%flagstruct%pnats, & + n/=this_grid, n==this_grid, ngrids) !TODO don't need both of the last arguments + enddo + if ( (Atm(this_grid)%bd%iec-Atm(this_grid)%bd%isc+1).lt.4 .or. (Atm(this_grid)%bd%jec-Atm(this_grid)%bd%jsc+1).lt.4 ) then + if (is_master()) write(*,'(6I6)') Atm(this_grid)%bd%isc, Atm(this_grid)%bd%iec, Atm(this_grid)%bd%jsc, Atm(this_grid)%bd%jec, this_grid + call mpp_error(FATAL,'Domain Decomposition: Cubed Sphere compute domain has a & + &minium requirement of 4 points in X and Y, respectively') + end if + + + !Tile_coarse is needed to determine which processors are needed to send around their + ! data for computing the interpolation coefficients + if (ngrids > 1) then + !reset to universal pelist + call mpp_set_current_pelist( global_pelist ) + !Except for npes_nest_tile all arrays should be just the nests and should NOT include the top level + call mpp_define_nest_domains(global_nest_domain, Atm(this_grid)%domain, & + ngrids-1, nest_level=nest_level(2:ngrids) , & + istart_coarse=nest_ioffsets(2:ngrids), jstart_coarse=nest_joffsets(2:ngrids), & + icount_coarse=icount_coarse(2:ngrids), jcount_coarse=jcount_coarse(2:ngrids), & + npes_nest_tile=npes_nest_tile(1:ntiles_nest_all), & + tile_fine=tile_fine(2:ngrids), tile_coarse=tile_coarse(2:ngrids), & + x_refine=nest_refine(2:ngrids), y_refine=nest_refine(2:ngrids), name="global_nest_domain") + call mpp_set_current_pelist(Atm(this_grid)%pelist) -! Start up MPI + endif - !call mp_assign_gid + allocate(Atm(this_grid)%neststruct%child_grids(ngrids)) !only temporary? + do n=1,ngrids + Atm(this_grid)%neststruct%child_grids(n) = (grid_coarse(n) == this_grid) + allocate(Atm(n)%neststruct%do_remap_bc(ngrids)) + Atm(n)%neststruct%do_remap_bc(:) = .false. + enddo + Atm(this_grid)%neststruct%parent_proc = ANY(tile_coarse == Atm(this_grid)%global_tile) + !Atm(this_grid)%neststruct%child_proc = ANY(Atm(this_grid)%pelist == gid) !this means a nested grid +!!$ if (Atm(this_grid)%neststruct%nestbctype > 1) then +!!$ call mpp_error(FATAL, 'nestbctype > 1 not yet implemented') +!!$ Atm(this_grid)%neststruct%upoff = 0 +!!$ endif +!!$ end if +!!$ +!!$ do nn=1,size(Atm) +!!$ if (n == 1) allocate(Atm(nn)%neststruct%nest_domain_all(size(Atm))) +!!$ Atm(nn)%neststruct%nest_domain_all(n) = Atm(this_grid)%neststruct%nest_domain +!!$ enddo + + if (Atm(this_grid)%gridstruct%bounded_domain .and. is_master()) print*, & + ' Bounded domain: nested = ', Atm(this_grid)%neststruct%nested, ', regional = ', Atm(this_grid)%flagstruct%regional + + ! 7. Init_grid() (including two-way nesting) + call init_grid(Atm(this_grid), Atm(this_grid)%flagstruct%grid_name, Atm(this_grid)%flagstruct%grid_file, & + Atm(this_grid)%flagstruct%npx, Atm(this_grid)%flagstruct%npy, Atm(this_grid)%flagstruct%npz, Atm(this_grid)%flagstruct%ndims, Atm(this_grid)%flagstruct%ntiles, Atm(this_grid)%ng, tile_coarse) + + + ! 8. grid_utils_init() + ! Initialize the SW (2D) part of the model + call grid_utils_init(Atm(this_grid), Atm(this_grid)%flagstruct%npx, Atm(this_grid)%flagstruct%npy, Atm(this_grid)%flagstruct%npz, Atm(this_grid)%flagstruct%non_ortho, Atm(this_grid)%flagstruct%grid_type, Atm(this_grid)%flagstruct%c2l_ord) + + ! Finish up initialization; write damping coefficients dependent upon + + if ( is_master() ) then + sdt = dt_atmos/real(Atm(this_grid)%flagstruct%n_split*Atm(this_grid)%flagstruct%k_split*abs(p_split)) + write(*,*) ' ' + write(*,*) 'Divergence damping Coefficients' + write(*,*) 'For small dt=', sdt + write(*,*) 'External mode del-2 (m**2/s)=', Atm(this_grid)%flagstruct%d_ext*Atm(this_grid)%gridstruct%da_min_c/sdt + write(*,*) 'Internal mode del-2 SMAG dimensionless coeff=', Atm(this_grid)%flagstruct%dddmp + write(*,*) 'Internal mode del-2 background diff=', Atm(this_grid)%flagstruct%d2_bg*Atm(this_grid)%gridstruct%da_min_c/sdt + + if (nord==1) then + write(*,*) 'Internal mode del-4 background diff=', Atm(this_grid)%flagstruct%d4_bg + write(*,*) 'Vorticity del-4 (m**4/s)=', (Atm(this_grid)%flagstruct%vtdm4*Atm(this_grid)%gridstruct%da_min)**2/sdt*1.E-6 + endif + if (Atm(this_grid)%flagstruct%nord==2) write(*,*) 'Internal mode del-6 background diff=', Atm(this_grid)%flagstruct%d4_bg + if (Atm(this_grid)%flagstruct%nord==3) write(*,*) 'Internal mode del-8 background diff=', Atm(this_grid)%flagstruct%d4_bg + write(*,*) 'tracer del-2 diff=', Atm(this_grid)%flagstruct%trdm2 + + write(*,*) 'Vorticity del-4 (m**4/s)=', (Atm(this_grid)%flagstruct%vtdm4*Atm(this_grid)%gridstruct%da_min)**2/sdt*1.E-6 + write(*,*) 'beta=', Atm(this_grid)%flagstruct%beta + write(*,*) ' ' + endif - ! Initialize timing routines - call timing_init - call timing_on('TOTAL') - ! Setup the run from namelist - ntilesMe = size(Atm(:)) !Full number of Atm arrays; one less than number of grids, if multiple grids +!!$ Atm(this_grid)%ts = 300. +!!$ Atm(this_grid)%phis = too_big +!!$ ! The following statements are to prevent the phantom corner regions from +!!$ ! growing instability +!!$ Atm(this_grid)%u = 0. +!!$ Atm(this_grid)%v = 0. +!!$ Atm(this_grid)%ua = too_big +!!$ Atm(this_grid)%va = too_big +!!$ - call run_setup(Atm,dt_atmos, grids_on_this_pe, p_split) ! initializes domain_decomp + !Initialize restart + call fv_restart_init() +! if ( reset_eta ) then +! do n=1, ntilesMe +! call set_eta(npz, Atm(this_grid)%ks, ptop, Atm(this_grid)%ak, Atm(this_grid)%bk, Atm(this_grid)%flagstruct%npz_type) +! enddo +! if(is_master()) write(*,*) "Hybrid sigma-p coordinate has been reset" +! endif - do n=1,ntilesMe - - !In a single-grid run this will still be needed to correctly set the domain - call switch_current_Atm(Atm(n)) - call setup_pointers(Atm(n)) - - target_lon = target_lon * pi/180. - target_lat = target_lat * pi/180. -!-------------------------------------------------- -! override number of tracers by reading field_table -!-------------------------------------------------- + contains + + subroutine set_namelist_pointers(Atm) + type(fv_atmos_type), intent(INOUT), target :: Atm + + !This routine associates the MODULE flag pointers with the ARRAY flag variables for the grid active on THIS pe so the flags can be read in from the namelist. + + grid_type => Atm%flagstruct%grid_type + grid_name => Atm%flagstruct%grid_name + grid_file => Atm%flagstruct%grid_file + hord_mt => Atm%flagstruct%hord_mt + kord_mt => Atm%flagstruct%kord_mt + kord_wz => Atm%flagstruct%kord_wz + hord_vt => Atm%flagstruct%hord_vt + hord_tm => Atm%flagstruct%hord_tm + hord_dp => Atm%flagstruct%hord_dp + kord_tm => Atm%flagstruct%kord_tm + hord_tr => Atm%flagstruct%hord_tr + kord_tr => Atm%flagstruct%kord_tr + scale_z => Atm%flagstruct%scale_z + w_max => Atm%flagstruct%w_max + z_min => Atm%flagstruct%z_min + nord => Atm%flagstruct%nord + nord_tr => Atm%flagstruct%nord_tr + dddmp => Atm%flagstruct%dddmp + d2_bg => Atm%flagstruct%d2_bg + d4_bg => Atm%flagstruct%d4_bg + vtdm4 => Atm%flagstruct%vtdm4 + trdm2 => Atm%flagstruct%trdm2 + d2_bg_k1 => Atm%flagstruct%d2_bg_k1 + d2_bg_k2 => Atm%flagstruct%d2_bg_k2 + d2_divg_max_k1 => Atm%flagstruct%d2_divg_max_k1 + d2_divg_max_k2 => Atm%flagstruct%d2_divg_max_k2 + damp_k_k1 => Atm%flagstruct%damp_k_k1 + damp_k_k2 => Atm%flagstruct%damp_k_k2 + n_zs_filter => Atm%flagstruct%n_zs_filter + nord_zs_filter => Atm%flagstruct%nord_zs_filter + full_zs_filter => Atm%flagstruct%full_zs_filter + consv_am => Atm%flagstruct%consv_am + do_sat_adj => Atm%flagstruct%do_sat_adj + do_f3d => Atm%flagstruct%do_f3d + no_dycore => Atm%flagstruct%no_dycore + convert_ke => Atm%flagstruct%convert_ke + do_vort_damp => Atm%flagstruct%do_vort_damp + use_old_omega => Atm%flagstruct%use_old_omega + beta => Atm%flagstruct%beta + n_sponge => Atm%flagstruct%n_sponge + d_ext => Atm%flagstruct%d_ext + nwat => Atm%flagstruct%nwat + use_logp => Atm%flagstruct%use_logp + warm_start => Atm%flagstruct%warm_start + inline_q => Atm%flagstruct%inline_q + shift_fac => Atm%flagstruct%shift_fac + do_schmidt => Atm%flagstruct%do_schmidt + do_cube_transform => Atm%flagstruct%do_cube_transform + stretch_fac => Atm%flagstruct%stretch_fac + target_lat => Atm%flagstruct%target_lat + target_lon => Atm%flagstruct%target_lon + regional => Atm%flagstruct%regional + bc_update_interval => Atm%flagstruct%bc_update_interval + reset_eta => Atm%flagstruct%reset_eta + p_fac => Atm%flagstruct%p_fac + a_imp => Atm%flagstruct%a_imp + n_split => Atm%flagstruct%n_split + m_split => Atm%flagstruct%m_split + k_split => Atm%flagstruct%k_split + use_logp => Atm%flagstruct%use_logp + q_split => Atm%flagstruct%q_split + print_freq => Atm%flagstruct%print_freq + write_3d_diags => Atm%flagstruct%write_3d_diags + npx => Atm%flagstruct%npx + npy => Atm%flagstruct%npy + npz => Atm%flagstruct%npz + npz_type => Atm%flagstruct%npz_type + npz_rst => Atm%flagstruct%npz_rst + ncnst => Atm%flagstruct%ncnst + pnats => Atm%flagstruct%pnats + dnats => Atm%flagstruct%dnats + dnrts => Atm%flagstruct%dnrts + ntiles => Atm%flagstruct%ntiles + nf_omega => Atm%flagstruct%nf_omega + fv_sg_adj => Atm%flagstruct%fv_sg_adj + sg_cutoff => Atm%flagstruct%sg_cutoff + na_init => Atm%flagstruct%na_init + nudge_dz => Atm%flagstruct%nudge_dz + p_ref => Atm%flagstruct%p_ref + dry_mass => Atm%flagstruct%dry_mass + nt_prog => Atm%flagstruct%nt_prog + nt_phys => Atm%flagstruct%nt_phys + tau_h2o => Atm%flagstruct%tau_h2o + delt_max => Atm%flagstruct%delt_max + d_con => Atm%flagstruct%d_con + ke_bg => Atm%flagstruct%ke_bg + consv_te => Atm%flagstruct%consv_te + tau => Atm%flagstruct%tau + rf_cutoff => Atm%flagstruct%rf_cutoff + filter_phys => Atm%flagstruct%filter_phys + dwind_2d => Atm%flagstruct%dwind_2d + breed_vortex_inline => Atm%flagstruct%breed_vortex_inline + range_warn => Atm%flagstruct%range_warn + fill => Atm%flagstruct%fill + fill_dp => Atm%flagstruct%fill_dp + fill_wz => Atm%flagstruct%fill_wz + fill_gfs => Atm%flagstruct%fill_gfs + check_negative => Atm%flagstruct%check_negative + non_ortho => Atm%flagstruct%non_ortho + adiabatic => Atm%flagstruct%adiabatic + moist_phys => Atm%flagstruct%moist_phys + do_Held_Suarez => Atm%flagstruct%do_Held_Suarez + do_reed_physics => Atm%flagstruct%do_reed_physics + reed_cond_only => Atm%flagstruct%reed_cond_only + reproduce_sum => Atm%flagstruct%reproduce_sum + adjust_dry_mass => Atm%flagstruct%adjust_dry_mass + fv_debug => Atm%flagstruct%fv_debug + srf_init => Atm%flagstruct%srf_init + mountain => Atm%flagstruct%mountain + remap_t => Atm%flagstruct%remap_t + z_tracer => Atm%flagstruct%z_tracer + old_divg_damp => Atm%flagstruct%old_divg_damp + fv_land => Atm%flagstruct%fv_land + nudge => Atm%flagstruct%nudge + nudge_ic => Atm%flagstruct%nudge_ic + ncep_ic => Atm%flagstruct%ncep_ic + nggps_ic => Atm%flagstruct%nggps_ic + ecmwf_ic => Atm%flagstruct%ecmwf_ic + gfs_phil => Atm%flagstruct%gfs_phil + agrid_vel_rst => Atm%flagstruct%agrid_vel_rst + use_new_ncep => Atm%flagstruct%use_new_ncep + use_ncep_phy => Atm%flagstruct%use_ncep_phy + fv_diag_ic => Atm%flagstruct%fv_diag_ic + external_ic => Atm%flagstruct%external_ic + external_eta => Atm%flagstruct%external_eta + read_increment => Atm%flagstruct%read_increment + + hydrostatic => Atm%flagstruct%hydrostatic + phys_hydrostatic => Atm%flagstruct%phys_hydrostatic + use_hydro_pressure => Atm%flagstruct%use_hydro_pressure + do_uni_zfull => Atm%flagstruct%do_uni_zfull !miz + adj_mass_vmr => Atm%flagstruct%adj_mass_vmr !f1p + hybrid_z => Atm%flagstruct%hybrid_z + Make_NH => Atm%flagstruct%Make_NH + make_hybrid_z => Atm%flagstruct%make_hybrid_z + nudge_qv => Atm%flagstruct%nudge_qv + add_noise => Atm%flagstruct%add_noise + a2b_ord => Atm%flagstruct%a2b_ord + c2l_ord => Atm%flagstruct%c2l_ord + ndims => Atm%flagstruct%ndims + + dx_const => Atm%flagstruct%dx_const + dy_const => Atm%flagstruct%dy_const + deglon_start => Atm%flagstruct%deglon_start + deglon_stop => Atm%flagstruct%deglon_stop + deglat_start => Atm%flagstruct%deglat_start + deglat_stop => Atm%flagstruct%deglat_stop + + deglat => Atm%flagstruct%deglat + + nested => Atm%neststruct%nested + twowaynest => Atm%neststruct%twowaynest + parent_tile => Atm%neststruct%parent_tile + refinement => Atm%neststruct%refinement + nestbctype => Atm%neststruct%nestbctype + nestupdate => Atm%neststruct%nestupdate + nsponge => Atm%neststruct%nsponge + s_weight => Atm%neststruct%s_weight + ioffset => Atm%neststruct%ioffset + joffset => Atm%neststruct%joffset + update_blend => Atm%neststruct%update_blend + + layout => Atm%layout + io_layout => Atm%io_layout + end subroutine set_namelist_pointers + + + subroutine read_namelist_nest_nml + + integer :: f_unit, ios, ierr, dum + namelist /nest_nml/ dum ! ngrids, ntiles, nest_pes, p_split !emptied lmh 7may2019 - !not sure if this works with multiple grids - call tm_register_tracers (MODEL_ATMOS, ncnst, nt_prog, pnats, num_family) - if(is_master()) then - write(*,*) 'ncnst=', ncnst,' num_prog=',nt_prog,' pnats=',pnats,' dnats=',dnats,' num_family=',num_family - print*, '' - endif +#ifdef INTERNAL_FILE_NML + read (input_nml_file,nest_nml,iostat=ios) + ierr = check_nml_error(ios,'nest_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) + read (f_unit,nest_nml,iostat=ios) + ierr = check_nml_error(ios,'nest_nml') + call close_file(f_unit) +#endif + if (ierr > 0) then + call mpp_error(FATAL, " &nest_nml is depreciated. Please use &fv_nest_nml instead.") + endif - if (grids_on_this_pe(n)) then - call allocate_fv_atmos_type(Atm(n), Atm(n)%bd%isd, Atm(n)%bd%ied, Atm(n)%bd%jsd, Atm(n)%bd%jed, & - Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, & - npx, npy, npz, ndims, ncnst, ncnst-pnats, ng, .false., grids_on_this_pe(n), ngrids) + end subroutine read_namelist_nest_nml - if (grids_on_this_pe(n)) then - - call switch_current_Atm(Atm(n)) - call setup_pointers(Atm(n)) - - if ( (Atm(n)%bd%iec-Atm(n)%bd%isc+1).lt.4 .or. (Atm(n)%bd%jec-Atm(n)%bd%jsc+1).lt.4 ) then - if (is_master()) write(*,'(6I6)') Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, n - call mpp_error(FATAL,'Domain Decomposition: Cubed Sphere compute domain has a & - &minium requirement of 4 points in X and Y, respectively') - end if - - endif - - !!CLEANUP: Convenience pointers - Atm(n)%gridstruct%nested => Atm(n)%neststruct%nested - Atm(n)%gridstruct%grid_type => Atm(n)%flagstruct%grid_type - Atm(n)%flagstruct%grid_number => Atm(n)%grid_number - - call init_grid(Atm(n), grid_name, grid_file, npx, npy, npz, ndims, ntiles, ng) - - ! Initialize the SW (2D) part of the model - !!!CLEANUP: this call could definitely use some cleaning up - call grid_utils_init(Atm(n), npx, npy, npz, non_ortho, grid_type, c2l_ord) - - !!!CLEANUP: Are these correctly writing out on all pes? - if ( is_master() ) then - sdt = dt_atmos/real(n_split*k_split*abs(p_split)) - write(*,*) ' ' - write(*,*) 'Divergence damping Coefficients' - write(*,*) 'For small dt=', sdt - write(*,*) 'External mode del-2 (m**2/s)=', d_ext*Atm(n)%gridstruct%da_min_c/sdt - write(*,*) 'Internal mode del-2 SMAG dimensionless coeff=', dddmp - write(*,*) 'Internal mode del-2 background diff=', d2_bg*Atm(n)%gridstruct%da_min_c/sdt - - if (nord==1) then - write(*,*) 'Internal mode del-4 background diff=', d4_bg - write(*,*) 'Vorticity del-4 (m**4/s)=', (vtdm4*Atm(n)%gridstruct%da_min)**2/sdt*1.E-6 - endif - if (nord==2) write(*,*) 'Internal mode del-6 background diff=', d4_bg - if (nord==3) write(*,*) 'Internal mode del-8 background diff=', d4_bg - write(*,*) 'tracer del-2 diff=', trdm2 - - write(*,*) 'Vorticity del-4 (m**4/s)=', (vtdm4*Atm(n)%gridstruct%da_min)**2/sdt*1.E-6 - write(*,*) 'beta=', beta - write(*,*) ' ' - endif - - - Atm(n)%ts = 300. - Atm(n)%phis = too_big - ! The following statements are to prevent the phatom corner regions from - ! growing instability - Atm(n)%u = 0. - Atm(n)%v = 0. - Atm(n)%ua = too_big - Atm(n)%va = too_big - - else !this grid is NOT defined on this pe - - !Allocate dummy arrays - call allocate_fv_atmos_type(Atm(n), Atm(n)%bd%isd, Atm(n)%bd%ied, Atm(n)%bd%jsd, Atm(n)%bd%jed, & - Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, & - npx, npy, npz, ndims, ncnst, ncnst-pnats, ng, .true., .false., ngrids) - - !Need to SEND grid_global to any child grids; this is received in setup_aligned_nest in fv_grid_tools - if (Atm(n)%neststruct%nested) then - - call mpp_get_global_domain( Atm(n)%parent_grid%domain, & - isg, ieg, jsg, jeg) - - !FIXME: Should replace this by generating the global grid (or at least one face thereof) on the - ! nested PEs instead of sending it around. - if (gid == Atm(n)%parent_grid%pelist(1)) then - call mpp_send(Atm(n)%parent_grid%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile), & - size(Atm(n)%parent_grid%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile)), & - Atm(n)%pelist(1)) !send to p_ind in setup_aligned_nest - call mpp_sync_self() - endif - - if (Atm(n)%neststruct%twowaynest) then - - !This in reality should be very simple. With the - ! restriction that only the compute domain data is - ! sent from the coarse grid, we can compute - ! exactly which coarse grid cells should use - ! which nested-grid data. We then don't need to send around p_ind. - - Atm(n)%neststruct%ind_update_h = -99999 - - if (Atm(n)%parent_grid%tile == Atm(n)%neststruct%parent_tile) then - - isc_p = Atm(n)%parent_grid%bd%isc - iec_p = Atm(n)%parent_grid%bd%iec - jsc_p = Atm(n)%parent_grid%bd%jsc - jec_p = Atm(n)%parent_grid%bd%jec - upoff = Atm(n)%neststruct%upoff - - Atm(n)%neststruct%jsu = jsc_p - Atm(n)%neststruct%jeu = jsc_p-1 - do j=jsc_p,jec_p+1 - if (j < joffset+upoff) then - do i=isc_p,iec_p+1 - Atm(n)%neststruct%ind_update_h(i,j,2) = -9999 - enddo - Atm(n)%neststruct%jsu = Atm(n)%neststruct%jsu + 1 - elseif (j > joffset + (npy-1)/refinement - upoff) then - do i=isc_p,iec_p+1 - Atm(n)%neststruct%ind_update_h(i,j,2) = -9999 - enddo - else - jind = (j - joffset)*refinement + 1 - do i=isc_p,iec_p+1 - Atm(n)%neststruct%ind_update_h(i,j,2) = jind - enddo - if ( (j < joffset + (npy-1)/refinement - upoff) .and. j <= jec_p) Atm(n)%neststruct%jeu = j - endif - !write(mpp_pe()+4000,*) j, joffset, upoff, Atm(n)%neststruct%ind_update_h(isc_p,j,2) - enddo - - Atm(n)%neststruct%isu = isc_p - Atm(n)%neststruct%ieu = isc_p-1 - do i=isc_p,iec_p+1 - if (i < ioffset+upoff) then - Atm(n)%neststruct%ind_update_h(i,:,1) = -9999 - Atm(n)%neststruct%isu = Atm(n)%neststruct%isu + 1 - elseif (i > ioffset + (npx-1)/refinement - upoff) then - Atm(n)%neststruct%ind_update_h(i,:,1) = -9999 - else - Atm(n)%neststruct%ind_update_h(i,:,1) = (i-ioffset)*refinement + 1 - if ( (i < ioffset + (npx-1)/refinement - upoff) .and. i <= iec_p) Atm(n)%neststruct%ieu = i - end if - !write(mpp_pe()+5000,*) i, ioffset, upoff, Atm(n)%neststruct%ind_update_h(i,jsc_p,1) - enddo - - end if - - - end if - - endif - endif - end do - - ! Initialize restart functions - call fv_restart_init() + subroutine read_namelist_fv_nest_nml -! if ( reset_eta ) then -! do n=1, ntilesMe -! call set_eta(npz, Atm(n)%ks, ptop, Atm(n)%ak, Atm(n)%bk) -! enddo -! if(is_master()) write(*,*) "Hybrid sigma-p coordinate has been reset" -! endif + integer :: f_unit, ios, ierr + namelist /fv_nest_nml/ grid_pes, grid_coarse, tile_coarse, nest_refine, & + nest_ioffsets, nest_joffsets, p_split + +#ifdef INTERNAL_FILE_NML + read (input_nml_file,fv_nest_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_nest_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) + read (f_unit,fv_nest_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_nest_nml') + call close_file(f_unit) +#endif - if (ntilesMe > 1) call switch_current_Atm(Atm(1)) - if (ntilesMe > 1) call setup_pointers(Atm(1)) + end subroutine read_namelist_fv_nest_nml - end subroutine fv_init -!------------------------------------------------------------------------------- + subroutine read_namelist_fv_grid_nml + + integer :: f_unit, ios, ierr + ! local version of these variables to allow PGI compiler to compile + character(len=80) :: grid_name = '' + character(len=120) :: grid_file = '' + namelist /fv_grid_nml/ grid_name, grid_file + +#ifdef INTERNAL_FILE_NML + ! Read Main namelist + read (input_nml_file,fv_grid_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_grid_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) + ! Read Main namelist + read (f_unit,fv_grid_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_grid_nml') + rewind (f_unit) +#endif + call write_version_number ( 'FV_CONTROL_MOD', version ) + unit = stdlog() + write(unit, nml=fv_grid_nml) + + !Basic option processing + if (len_trim(grid_file) /= 0) Atm(this_grid)%flagstruct%grid_file = grid_file + if (len_trim(grid_name) /= 0) Atm(this_grid)%flagstruct%grid_name = grid_name + + + end subroutine read_namelist_fv_grid_nml + + subroutine read_namelist_fv_core_nml(Atm) + + type(fv_atmos_type), intent(inout) :: Atm + integer :: f_unit, ios, ierr + real :: dim0 = 180. ! base dimension + real :: dt0 = 1800. ! base time step + real :: ns0 = 5. ! base nsplit for base dimension + real :: dimx, dl, dp, dxmin, dymin, d_fac + real :: umax = 350. ! max wave speed for grid_type>3 + + integer :: n0split + + ! local version of these variables to allow PGI compiler to compile + character(len=128) :: res_latlon_dynamics = '' + character(len=128) :: res_latlon_tracers = '' + + namelist /fv_core_nml/npx, npy, ntiles, npz, npz_type, npz_rst, layout, io_layout, ncnst, nwat, & + use_logp, p_fac, a_imp, k_split, n_split, m_split, q_split, print_freq, write_3d_diags, & + do_schmidt, do_cube_transform, & + hord_mt, hord_vt, hord_tm, hord_dp, hord_tr, shift_fac, stretch_fac, target_lat, target_lon, & + kord_mt, kord_wz, kord_tm, kord_tr, fv_debug, fv_land, nudge, do_sat_adj, do_f3d, & + external_ic, read_increment, ncep_ic, nggps_ic, ecmwf_ic, use_new_ncep, use_ncep_phy, fv_diag_ic, & + external_eta, res_latlon_dynamics, res_latlon_tracers, scale_z, w_max, z_min, & + dddmp, d2_bg, d4_bg, vtdm4, trdm2, d_ext, delt_max, beta, non_ortho, n_sponge, & + warm_start, adjust_dry_mass, mountain, d_con, ke_bg, nord, nord_tr, convert_ke, use_old_omega, & + dry_mass, grid_type, do_Held_Suarez, do_reed_physics, reed_cond_only, & + consv_te, fill, filter_phys, fill_dp, fill_wz, fill_gfs, consv_am, & + range_warn, dwind_2d, inline_q, z_tracer, reproduce_sum, adiabatic, do_vort_damp, no_dycore, & + tau, tau_h2o, rf_cutoff, nf_omega, hydrostatic, fv_sg_adj, sg_cutoff, breed_vortex_inline, & + na_init, nudge_dz, hybrid_z, Make_NH, n_zs_filter, nord_zs_filter, full_zs_filter, reset_eta, & + pnats, dnats, dnrts, a2b_ord, remap_t, p_ref, d2_bg_k1, d2_bg_k2, & + c2l_ord, dx_const, dy_const, umax, deglat, & + deglon_start, deglon_stop, deglat_start, deglat_stop, & + phys_hydrostatic, use_hydro_pressure, make_hybrid_z, old_divg_damp, add_noise, & + nested, twowaynest, nudge_qv, & + nestbctype, nestupdate, nsponge, s_weight, & + check_negative, nudge_ic, halo_update_type, gfs_phil, agrid_vel_rst, & + do_uni_zfull, adj_mass_vmr, update_blend, regional, bc_update_interval + +#ifdef INTERNAL_FILE_NML + ! Read FVCORE namelist + read (input_nml_file,fv_core_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_core_nml') + ! Reset input_file_nml to default behavior (CHECK do we still need this???) + !call read_input_nml +#else + f_unit = open_namelist_file(Atm%nml_filename) + ! Read FVCORE namelist + read (f_unit,fv_core_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_core_nml') + call close_file(f_unit) +#endif + call write_version_number ( 'FV_CONTROL_MOD', version ) + unit = stdlog() + write(unit, nml=fv_core_nml) + + if (len_trim(res_latlon_dynamics) /= 0) Atm%flagstruct%res_latlon_dynamics = res_latlon_dynamics + if (len_trim(res_latlon_tracers) /= 0) Atm%flagstruct%res_latlon_tracers = res_latlon_tracers + + !*** single tile for Cartesian grids + if (grid_type>3) then + ntiles=1 + non_ortho = .false. + nf_omega = 0 + endif + + if (.not. (nested .or. regional)) Atm%neststruct%npx_global = npx + + ! Define n_split if not in namelist + if (ntiles==6) then + dimx = 4.0*(npx-1) + if ( hydrostatic ) then + if ( npx >= 120 ) ns0 = 6 + else + if ( npx <= 45 ) then + ns0 = 6 + elseif ( npx <=90 ) then + ns0 = 7 + else + ns0 = 8 + endif + endif + else + dimx = max ( npx, 2*(npy-1) ) + endif + + if (grid_type < 4) then + n0split = nint ( ns0*abs(dt_atmos)*dimx/(dt0*dim0) + 0.49 ) + elseif (grid_type == 4 .or. grid_type == 7) then + n0split = nint ( 2.*umax*dt_atmos/sqrt(dx_const**2 + dy_const**2) + 0.49 ) + elseif (grid_type == 5 .or. grid_type == 6) then + if (grid_type == 6) then + deglon_start = 0.; deglon_stop = 360. + endif + dl = (deglon_stop-deglon_start)*pi/(180.*(npx-1)) + dp = (deglat_stop-deglat_start)*pi/(180.*(npy-1)) + + dxmin=dl*radius*min(cos(deglat_start*pi/180.-Atm%bd%ng*dp), & + cos(deglat_stop *pi/180.+Atm%bd%ng*dp)) + dymin=dp*radius + n0split = nint ( 2.*umax*dt_atmos/sqrt(dxmin**2 + dymin**2) + 0.49 ) + endif + n0split = max ( 1, n0split ) + + if ( n_split == 0 ) then + n_split = nint( real(n0split)/real(k_split*abs(p_split)) * stretch_fac + 0.5 ) + if(is_master()) write(*,*) 'For k_split (remapping)=', k_split + if(is_master()) write(*,198) 'n_split is set to ', n_split, ' for resolution-dt=',npx,npy,ntiles,dt_atmos + else + if(is_master()) write(*,199) 'Using n_split from the namelist: ', n_split + endif + if (is_master() .and. n == 1 .and. abs(p_split) > 1) then + write(*,199) 'Using p_split = ', p_split + endif + + if (old_divg_damp) then + if (is_master()) write(*,*) " fv_control: using AM2/AM3 damping methods " + d2_bg_k1 = 6. ! factor for d2_bg (k=1) - default(4.) + d2_bg_k2 = 4. ! factor for d2_bg (k=2) - default(2.) + d2_divg_max_k1 = 0.02 ! d2_divg max value (k=1) - default(0.05) + d2_divg_max_k2 = 0.01 ! d2_divg max value (k=2) - default(0.02) + damp_k_k1 = 0. ! damp_k value (k=1) - default(0.05) + damp_k_k2 = 0. ! damp_k value (k=2) - default(0.025) + elseif (n_sponge == 0 ) then + if ( d2_bg_k1 > 1. ) d2_bg_k1 = 0.20 + if ( d2_bg_k2 > 1. ) d2_bg_k2 = 0.015 + endif + + if ( .not.hydrostatic ) then + if ( m_split==0 ) then + m_split = 1. + abs(dt_atmos)/real(k_split*n_split*abs(p_split)) + if (abs(a_imp) < 0.5) then + if(is_master()) write(*,199) 'm_split is set to ', m_split + endif + endif + if(is_master()) then + write(*,*) 'Off center implicit scheme param=', a_imp + write(*,*) ' p_fac=', p_fac + endif + endif + + if(is_master()) then + if (n_sponge >= 0) write(*,199) 'Using n_sponge : ', n_sponge + write(*,197) 'Using non_ortho : ', non_ortho + endif + +197 format(A,l7) +198 format(A,i2.2,A,i4.4,'x',i4.4,'x',i1.1,'-',f9.3) +199 format(A,i3.3) + + !if (.not. (nested .or. regional)) alpha = alpha*pi !TODO for test_case_nml + + !allocate(Atm%neststruct%child_grids(size(Atm))) !TODO want to remove + !Atm(N)%neststruct%child_grids = .false. + + target_lon = target_lon * pi/180. + target_lat = target_lat * pi/180. + + end subroutine read_namelist_fv_core_nml + + + end subroutine fv_control_init !------------------------------------------------------------------------------- - - subroutine fv_end(Atm, grids_on_this_pe) + + subroutine fv_end(Atm, this_grid) type(fv_atmos_type), intent(inout) :: Atm(:) - logical, intent(INOUT) :: grids_on_this_pe(:) + integer, intent(IN) :: this_grid integer :: n call timing_off('TOTAL') - call timing_prt( gid ) + call timing_prt( mpp_pe() ) - call fv_restart_end(Atm, grids_on_this_pe) + call fv_restart_end(Atm(this_grid)) call fv_io_exit() ! Free temporary memory from sw_core routines - ! Deallocate call grid_utils_end - do n = 1, ntilesMe + do n = 1, ngrids call deallocate_fv_atmos_type(Atm(n)) end do @@ -507,730 +1075,4 @@ subroutine fv_end(Atm, grids_on_this_pe) end subroutine fv_end !------------------------------------------------------------------------------- -!------------------------------------------------------------------------------- -! -! run_setup :: initialize run from namelist -! - subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) - type(fv_atmos_type), intent(inout), target :: Atm(:) - real, intent(in) :: dt_atmos - logical, intent(INOUT) :: grids_on_this_pe(:) - integer, intent(INOUT) :: p_split - - character(len=80) :: filename, tracerName, errString, nested_grid_filename - integer :: ios, ierr, f_unit, unit - logical :: exists - - real :: dim0 = 180. ! base dimension - real :: dt0 = 1800. ! base time step - real :: ns0 = 5. ! base nsplit for base dimension - ! For cubed sphere 5 is better - !real :: umax = 350. ! max wave speed for grid_type>3 ! Now defined above - real :: dimx, dl, dp, dxmin, dymin, d_fac - - integer :: n0split - integer :: n, nn, i - - integer :: pe_counter - -! local version of these variables to allow PGI compiler to compile - character(len=128) :: res_latlon_dynamics = '' - character(len=128) :: res_latlon_tracers = '' - character(len=80) :: grid_name = '' - character(len=120) :: grid_file = '' - - namelist /fv_grid_nml/ grid_name, grid_file - namelist /fv_core_nml/npx, npy, ntiles, npz, npz_rst, layout, io_layout, ncnst, nwat, & - use_logp, p_fac, a_imp, k_split, n_split, m_split, q_split, print_freq, do_schmidt, & - hord_mt, hord_vt, hord_tm, hord_dp, hord_tr, shift_fac, stretch_fac, target_lat, target_lon, & - kord_mt, kord_wz, kord_tm, kord_tr, fv_debug, fv_land, nudge, do_sat_adj, do_f3d, & - external_ic, ncep_ic, nggps_ic, ecmwf_ic, use_new_ncep, use_ncep_phy, fv_diag_ic, & - res_latlon_dynamics, res_latlon_tracers, scale_z, w_max, z_min, & - dddmp, d2_bg, d4_bg, vtdm4, trdm2, d_ext, delt_max, beta, non_ortho, n_sponge, & - warm_start, adjust_dry_mass, mountain, d_con, ke_bg, nord, nord_tr, convert_ke, use_old_omega, & - dry_mass, grid_type, do_Held_Suarez, do_reed_physics, reed_cond_only, & - consv_te, fill, filter_phys, fill_dp, fill_wz, consv_am, & - range_warn, dwind_2d, inline_q, z_tracer, reproduce_sum, adiabatic, do_vort_damp, no_dycore, & - tau, tau_h2o, rf_cutoff, nf_omega, hydrostatic, fv_sg_adj, breed_vortex_inline, & - na_init, hybrid_z, Make_NH, n_zs_filter, nord_zs_filter, full_zs_filter, reset_eta, & - pnats, dnats, a2b_ord, remap_t, p_ref, d2_bg_k1, d2_bg_k2, & - c2l_ord, dx_const, dy_const, umax, deglat, & - deglon_start, deglon_stop, deglat_start, deglat_stop, & - phys_hydrostatic, use_hydro_pressure, make_hybrid_z, old_divg_damp, add_noise, & - nested, twowaynest, parent_grid_num, parent_tile, nudge_qv, & - refinement, nestbctype, nestupdate, nsponge, s_weight, & - ioffset, joffset, check_negative, nudge_ic, halo_update_type, gfs_phil, agrid_vel_rst, & - do_uni_zfull, adj_mass_vmr - - namelist /test_case_nml/test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size - -#ifdef GFS_PHYS - real, dimension(2048) :: fdiag = 0. - namelist /nggps_diag_nml/ fdiag -#endif - - pe_counter = mpp_root_pe() - -! Make alpha = 0 the default: - alpha = 0. - bubble_do = .false. - test_case = 11 ! (USGS terrain) - - filename = "input.nml" - - inquire(file=filename,exist=exists) - if (.not. exists) then ! This will be replaced with fv_error wrapper - if(is_master()) write(*,*) "file ",trim(filename)," doesn't exist" - call mpp_error(FATAL,'FV core terminating 1') - endif - -#ifdef INTERNAL_FILE_NML -! rewind (f_unit) - ! Read Main namelist - read (input_nml_file,fv_grid_nml,iostat=ios) - ierr = check_nml_error(ios,'fv_grid_nml') - ! Read Test_Case namelist - read (input_nml_file,test_case_nml,iostat=ios) - ierr = check_nml_error(ios,'test_case_nml') -#else - f_unit=open_namelist_file() - rewind (f_unit) - ! Read Main namelist - read (f_unit,fv_grid_nml,iostat=ios) - ierr = check_nml_error(ios,'fv_grid_nml') - rewind (f_unit) -#endif - - unit = stdlog() - write(unit, nml=fv_grid_nml) - - do n=1,size(Atm) - - call switch_current_Atm(Atm(n), .false.) - call setup_pointers(Atm(n)) - Atm(n)%grid_number = n - if (grids_on_this_pe(n)) then - call fv_diag_init_gn(Atm(n)) - endif - -#ifdef INTERNAL_FILE_NML - if (size(Atm) > 1) then - call mpp_error(FATAL, "Nesting not implemented with INTERNAL_FILE_NML") - endif - ! Read FVCORE namelist - read (input_nml_file,fv_core_nml,iostat=ios) - ierr = check_nml_error(ios,'fv_core_nml') - ! Read Test_Case namelist - read (input_nml_file,test_case_nml,iostat=ios) - ierr = check_nml_error(ios,'test_case_nml') -#ifdef GFS_PHYS - ! Read NGGPS_DIAG namelist - read (input_nml_file,nggps_diag_nml,iostat=ios) - ierr = check_nml_error(ios,'nggps_diag_nml') -!--- check fdiag to see if it is an interval or a list - if (nint(fdiag(2)) == 0) then - Atm(n)%fdiag(1) = fdiag(1) - do i = 2, size(fdiag,1) - Atm(n)%fdiag(i) = Atm(n)%fdiag(i-1) + fdiag(1) - enddo - else - atm(n)%fdiag = fdiag - endif -#endif -#else - if (size(Atm) == 1) then - f_unit = open_namelist_file() - else if (n == 1) then - f_unit = open_namelist_file('input.nml') - else - write(nested_grid_filename,'(A10, I2.2, A4)') 'input_nest', n, '.nml' - f_unit = open_namelist_file(nested_grid_filename) - endif - - ! Read FVCORE namelist - read (f_unit,fv_core_nml,iostat=ios) - ierr = check_nml_error(ios,'fv_core_nml') - - ! Read Test_Case namelist - rewind (f_unit) - read (f_unit,test_case_nml,iostat=ios) - ierr = check_nml_error(ios,'test_case_nml') -#ifdef GFS_PHYS - ! Read NGGPS_DIAG namelist - rewind (f_unit) - read (f_unit,nggps_diag_nml,iostat=ios) - ierr = check_nml_error(ios,'nggps_diag_nml') -!--- check fdiag to see if it is an interval or a list - if (nint(fdiag(2)) == 0) then - Atm(n)%fdiag(1) = fdiag(1) - do i = 2, size(fdiag,1) - Atm(n)%fdiag(i) = Atm(n)%fdiag(i-1) + fdiag(1) - enddo - else - atm(n)%fdiag = fdiag - endif -#endif - call close_file(f_unit) -#endif - if (len_trim(grid_file) /= 0) Atm(n)%flagstruct%grid_file = grid_file - if (len_trim(grid_name) /= 0) Atm(n)%flagstruct%grid_name = grid_name - if (len_trim(res_latlon_dynamics) /= 0) Atm(n)%flagstruct%res_latlon_dynamics = res_latlon_dynamics - if (len_trim(res_latlon_tracers) /= 0) Atm(n)%flagstruct%res_latlon_tracers = res_latlon_tracers - - write(unit, nml=fv_core_nml) - write(unit, nml=test_case_nml) -#ifdef GFS_PHYS - write(unit, nml=nggps_diag_nml) -#endif - - !*** single tile for Cartesian grids - if (grid_type>3) then - ntiles=1 - non_ortho = .false. - nf_omega = 0 - endif - - if (.not. nested) Atm(n)%neststruct%npx_global = npx - - ! Define n_split if not in namelist - if (ntiles==6) then - dimx = 4.0*(npx-1) - if ( hydrostatic ) then - if ( npx >= 120 ) ns0 = 6 - else - if ( npx <= 45 ) then - ns0 = 6 - elseif ( npx <=90 ) then - ns0 = 7 - else - ns0 = 8 - endif - endif - else - dimx = max ( npx, 2*(npy-1) ) - endif - - if (grid_type < 4) then - n0split = nint ( ns0*abs(dt_atmos)*dimx/(dt0*dim0) + 0.49 ) - elseif (grid_type == 4 .or. grid_type == 7) then - n0split = nint ( 2.*umax*dt_atmos/sqrt(dx_const**2 + dy_const**2) + 0.49 ) - elseif (grid_type == 5 .or. grid_type == 6) then - if (grid_type == 6) then - deglon_start = 0.; deglon_stop = 360. - endif - dl = (deglon_stop-deglon_start)*pi/(180.*(npx-1)) - dp = (deglat_stop-deglat_start)*pi/(180.*(npy-1)) - - dxmin=dl*radius*min(cos(deglat_start*pi/180.-ng*dp), & - cos(deglat_stop *pi/180.+ng*dp)) - dymin=dp*radius - n0split = nint ( 2.*umax*dt_atmos/sqrt(dxmin**2 + dymin**2) + 0.49 ) - endif - n0split = max ( 1, n0split ) - - if ( n_split == 0 ) then - n_split = nint( real(n0split)/real(k_split*abs(p_split)) * stretch_fac + 0.5 ) - if(is_master()) write(*,*) 'For k_split (remapping)=', k_split - if(is_master()) write(*,198) 'n_split is set to ', n_split, ' for resolution-dt=',npx,npy,ntiles,dt_atmos - else - if(is_master()) write(*,199) 'Using n_split from the namelist: ', n_split - endif - if (is_master() .and. n == 1 .and. abs(p_split) > 1) then - write(*,199) 'Using p_split = ', p_split - endif - - if (Atm(n)%neststruct%nested) then - do i=1,n-1 - if (Atm(i)%grid_number == parent_grid_num) then - Atm(n)%parent_grid => Atm(i) - exit - end if - end do - if (.not. associated(Atm(n)%parent_grid)) then - write(errstring,'(2(A,I3))') "Could not find parent grid #", parent_grid_num, ' for grid #', n - call mpp_error(FATAL, errstring) - end if - - !Note that if a gnomonic grid has a parent it is a NESTED gnomonic grid and therefore only has one tile - if ( Atm(n)%parent_grid%flagstruct%grid_type < 3 .and. & - .not. associated(Atm(n)%parent_grid%parent_grid)) then - if (parent_tile > 6 .or. parent_tile < 1) then - call mpp_error(FATAL, 'parent tile must be between 1 and 6 if the parent is a cubed-sphere grid') - end if - else - if (parent_tile /= 1) then - call mpp_error(FATAL, 'parent tile must be 1 if the parent is not a cubed-sphere grid') - end if - end if - - if ( refinement < 1 ) call mpp_error(FATAL, 'grid refinement must be positive') - - if (nestupdate == 1 .or. nestupdate == 2) then - - if (mod(npx-1,refinement) /= 0 .or. mod(npy-1,refinement) /= 0) then - call mpp_error(WARNING, 'npx-1 or npy-1 is not evenly divisible by the refinement ratio; averaging update cannot be mass-conservative.') - end if - - end if - - if ( consv_te > 0.) then - call mpp_error(FATAL, 'The global energy fixer cannot be used on a nested grid. consv_te must be set to 0.') - end if - - Atm(n)%neststruct%refinement_of_global = Atm(n)%neststruct%refinement * Atm(n)%parent_grid%neststruct%refinement_of_global - max_refinement_of_global = max(Atm(n)%neststruct%refinement_of_global,max_refinement_of_global) - Atm(n)%neststruct%npx_global = Atm(n)%neststruct%refinement * Atm(n)%parent_grid%neststruct%npx_global - - else - Atm(n)%neststruct%ioffset = -999 - Atm(n)%neststruct%joffset = -999 - Atm(n)%neststruct%parent_tile = -1 - Atm(n)%neststruct%refinement = -1 - end if - - if (Atm(n)%neststruct%nested) then - if (Atm(n)%flagstruct%grid_type >= 4 .and. Atm(n)%parent_grid%flagstruct%grid_type >= 4) then - Atm(n)%flagstruct%dx_const = Atm(n)%parent_grid%flagstruct%dx_const / real(Atm(n)%neststruct%refinement) - Atm(n)%flagstruct%dy_const = Atm(n)%parent_grid%flagstruct%dy_const / real(Atm(n)%neststruct%refinement) - end if - end if - - -!---------------------------------------- -! Adjust divergence damping coefficients: -!---------------------------------------- -! d_fac = real(n0split)/real(n_split) -! dddmp = dddmp * d_fac -! d2_bg = d2_bg * d_fac -! d4_bg = d4_bg * d_fac -! d_ext = d_ext * d_fac -! vtdm4 = vtdm4 * d_fac - if (old_divg_damp) then - if (is_master()) write(*,*) " fv_control: using original values for divergence damping " - d2_bg_k1 = 6. ! factor for d2_bg (k=1) - default(4.) - d2_bg_k2 = 4. ! factor for d2_bg (k=2) - default(2.) - d2_divg_max_k1 = 0.02 ! d2_divg max value (k=1) - default(0.05) - d2_divg_max_k2 = 0.01 ! d2_divg max value (k=2) - default(0.02) - damp_k_k1 = 0. ! damp_k value (k=1) - default(0.05) - damp_k_k2 = 0. ! damp_k value (k=2) - default(0.025) - elseif (n_sponge == 0 ) then - if ( d2_bg_k1 > 1. ) d2_bg_k1 = 0.20 - if ( d2_bg_k2 > 1. ) d2_bg_k2 = 0.015 - endif - -! if ( beta < 1.e-5 ) beta = 0. ! beta < 0 is used for non-hydrostatic "one_grad_p" - - if ( .not.hydrostatic ) then - if ( m_split==0 ) then - m_split = 1. + abs(dt_atmos)/real(k_split*n_split*abs(p_split)) - if (abs(a_imp) < 0.5) then - if(is_master()) write(*,199) 'm_split is set to ', m_split - endif - endif - if(is_master()) then - write(*,*) 'Off center implicit scheme param=', a_imp - write(*,*) ' p_fac=', p_fac - endif - endif - - if(is_master()) then - if (n_sponge >= 0) write(*,199) 'Using n_sponge : ', n_sponge - write(*,197) 'Using non_ortho : ', non_ortho - endif - - 197 format(A,l7) - 198 format(A,i2.2,A,i4.4,'x',i4.4,'x',i1.1,'-',f9.3) - 199 format(A,i3.3) - - if (.not. nested) alpha = alpha*pi - - - allocate(Atm(n)%neststruct%child_grids(size(Atm))) - Atm(N)%neststruct%child_grids = .false. - - !Broadcast data - - !Check layout - - enddo - - !Set pelists - do n=1,size(Atm) - if (ANY(Atm(n)%pelist == gid)) then - call mpp_set_current_pelist(Atm(n)%pelist) - call mpp_get_current_pelist(Atm(n)%pelist, commID=commID) - call mp_start(commID,halo_update_type) - endif - - if (Atm(n)%neststruct%nested) then - Atm(n)%neststruct%parent_proc = ANY(Atm(n)%parent_grid%pelist == gid) - Atm(n)%neststruct%child_proc = ANY(Atm(n)%pelist == gid) - endif - enddo - - do n=1,size(Atm) - - call switch_current_Atm(Atm(n),.false.) - call setup_pointers(Atm(n)) - !! CLEANUP: WARNING not sure what changes to domain_decomp may cause - call domain_decomp(npx,npy,ntiles,grid_type,nested,Atm(n),layout,io_layout) - enddo - - !!! CLEANUP: This sets the pelist to ALL, which is also - !!! required for the define_nest_domains step in the next loop. - !!! Later the pelist must be reset to the 'local' pelist. - call broadcast_domains(Atm) - - do n=1,size(Atm) - call switch_current_Atm(Atm(n)) - call setup_pointers(Atm(n)) - - if (nested) then - if (mod(npx-1 , refinement) /= 0 .or. mod(npy-1, refinement) /= 0) & - call mpp_error(FATAL, 'npx or npy not an even refinement of its coarse grid.') - - !Pelist needs to be set to ALL (which should have been done - !in broadcast_domains) to get this to work - call mpp_define_nest_domains(Atm(n)%neststruct%nest_domain, Atm(n)%domain, Atm(parent_grid_num)%domain, & - 7, parent_tile, & - 1, npx-1, 1, npy-1, & !Grid cells, not points - ioffset, ioffset + (npx-1)/refinement - 1, & - joffset, joffset + (npy-1)/refinement - 1, & - (/ (i,i=0,mpp_npes()-1) /), extra_halo = 0, name="nest_domain") !What pelist to use? - call mpp_define_nest_domains(Atm(n)%neststruct%nest_domain, Atm(n)%domain, Atm(parent_grid_num)%domain, & - 7, parent_tile, & - 1, npx-1, 1, npy-1, & !Grid cells, not points - ioffset, ioffset + (npx-1)/refinement - 1, & - joffset, joffset + (npy-1)/refinement - 1, & - (/ (i,i=0,mpp_npes()-1) /), extra_halo = 0, name="nest_domain") !What pelist to use? -! (/ (i,i=0,mpp_npes()-1) /), extra_halo = 2, name="nest_domain_for_BC") !What pelist to use? - - Atm(parent_grid_num)%neststruct%child_grids(n) = .true. - - if (Atm(n)%neststruct%nestbctype > 1) then - - call mpp_error(FATAL, 'nestbctype > 1 not yet implemented') - - !This check is due to a bug which has not yet been identified. Beware. -! if (Atm(n)%parent_grid%flagstruct%hord_tr == 7) & -! call mpp_error(FATAL, "Flux-form nested BCs (nestbctype > 1) should not use hord_tr == 7 (on parent grid), since there is no guarantee of tracer mass conservation with this option.") - -!!$ if (Atm(n)%flagstruct%q_split > 0 .and. Atm(n)%parent_grid%flagstruct%q_split > 0) then -!!$ if (mod(Atm(n)%flagstruct%q_split,Atm(n)%parent_grid%flagstruct%q_split) /= 0) call mpp_error(FATAL, & -!!$ "Flux-form nested BCs (nestbctype > 1) require q_split on the nested grid to be evenly divisible by that on the coarse grid.") -!!$ endif -!!$ if (mod((Atm(n)%npx-1),Atm(n)%neststruct%refinement) /= 0 .or. mod((Atm(n)%npy-1),Atm(n)%neststruct%refinement) /= 0) call mpp_error(FATAL, & -!!$ "Flux-form nested BCs (nestbctype > 1) requires npx and npy to be one more than a multiple of the refinement ratio.") -!!$ Atm(n)%parent_grid%neststruct%do_flux_BCs = .true. -!!$ if (Atm(n)%neststruct%nestbctype == 3 .or. Atm(n)%neststruct%nestbctype == 4) Atm(n)%parent_grid%neststruct%do_2way_flux_BCs = .true. - Atm(n)%neststruct%upoff = 0 - endif - - end if - - do nn=1,size(Atm) - if (n == 1) allocate(Atm(nn)%neststruct%nest_domain_all(size(Atm))) - Atm(nn)%neststruct%nest_domain_all(n) = Atm(n)%neststruct%nest_domain - enddo - - end do - - do n=1,size(Atm) - if (ANY(Atm(n)%pelist == gid)) then - call mpp_set_current_pelist(Atm(n)%pelist) - endif - enddo - - end subroutine run_setup - - subroutine init_nesting(Atm, grids_on_this_pe, p_split) - - type(fv_atmos_type), intent(inout), allocatable :: Atm(:) - logical, allocatable, intent(INOUT) :: grids_on_this_pe(:) - integer, intent(INOUT) :: p_split - character(100) :: pe_list_name - integer :: nest_pes(100) - integer :: n, npes, ntiles, pecounter, i - integer, allocatable :: pelist(:) - integer :: f_unit, ios, ierr - - !This is an OPTIONAL namelist, that needs to be read before everything else - namelist /nest_nml/ ngrids, ntiles, nest_pes, p_split - - call mp_assign_gid - - nest_pes = 0 - ntiles = -999 - -#ifdef INTERNAL_FILE_NML - read (input_nml_file,nest_nml,iostat=ios) - ierr = check_nml_error(ios,'nest_nml') -#else - f_unit=open_namelist_file() - rewind (f_unit) - read (f_unit,nest_nml,iostat=ios) - ierr = check_nml_error(ios,'nest_nml') - call close_file(f_unit) -#endif - - if (ntiles /= -999) ngrids = ntiles - if (ngrids > 10) call mpp_error(FATAL, "More than 10 nested grids not supported") - - allocate(Atm(ngrids)) - - allocate(grids_on_this_pe(ngrids)) - grids_on_this_pe = .false. !initialization - - npes = mpp_npes() - - ! Need to get a global pelist to send data around later? - allocate( pelist_all(npes) ) - pelist_all = (/ (i,i=0,npes-1) /) - pelist_all = pelist_all + mpp_root_pe() - - if (ngrids == 1) then - - !Set up the single pelist - allocate(Atm(1)%pelist(npes)) - Atm(1)%pelist = (/(i, i=0, npes-1)/) - Atm(1)%pelist = Atm(1)%pelist + mpp_root_pe() - call mpp_declare_pelist(Atm(1)%pelist) - call mpp_set_current_pelist(Atm(1)%pelist) - !Now set in domain_decomp - !masterproc = Atm(1)%pelist(1) - call setup_master(Atm(1)%pelist) - grids_on_this_pe(1) = .true. - Atm(1)%npes_this_grid = npes - - else - - pecounter = mpp_root_pe() - do n=1,ngrids - if (n == 1) then - pe_list_name = '' - else - write(pe_list_name,'(A4, I2.2)') 'nest', n - endif - - if (nest_pes(n) == 0) then - if (n < ngrids) call mpp_error(FATAL, 'Only nest_pes(ngrids) in nest_nml can be zero; preceeding values must be nonzero.') - allocate(Atm(n)%pelist(npes-pecounter)) - Atm(n)%pelist = (/(i, i=pecounter, npes-1)/) - if (n > 1) then - call mpp_declare_pelist(Atm(n)%pelist, trim(pe_list_name)) - !Make sure nested-grid input file exists - if (.not. file_exist('input_'//trim(pe_list_name)//'.nml')) then - call mpp_error(FATAL, "Could not find nested grid namelist input_"//trim(pe_list_name)//".nml") - endif - endif - exit - else - allocate(Atm(n)%pelist(nest_pes(n))) - Atm(n)%pelist = (/ (i, i=pecounter, pecounter+nest_pes(n)-1) /) - if (Atm(n)%pelist(nest_pes(n)) >= npes) then - call mpp_error(FATAL, 'PEs assigned by nest_pes in nest_nml exceeds number of available PEs.') - endif - - call mpp_declare_pelist(Atm(n)%pelist, trim(pe_list_name)) - !Make sure nested-grid input file exists - if (n > 1) then - if (.not. file_exist('input_'//trim(pe_list_name)//'.nml')) then - call mpp_error(FATAL, "Could not find nested grid namelist input_"//trim(pe_list_name)//".nml") - endif - endif - pecounter = pecounter+nest_pes(n) - endif - enddo - - !Set pelists - do n=1,ngrids - Atm(n)%npes_this_grid = size(Atm(n)%pelist) - if (ANY(gid == Atm(n)%pelist)) then - call mpp_set_current_pelist(Atm(n)%pelist) - !now set in domain_decomp - !masterproc = Atm(n)%pelist(1) - call setup_master(Atm(n)%pelist) - grids_on_this_pe(n) = .true. -#if defined (INTERNAL_FILE_NML) - if (n > 1) call read_input_nml -#else - !Namelist file read in fv_control.F90 -#endif - exit - endif - enddo - - if (pecounter /= npes) then - call mpp_error(FATAL, 'nest_pes in nest_nml does not assign all of the available PEs.') - endif - endif - - !Layout is checked later, in fv_control - - end subroutine init_nesting - - subroutine setup_pointers(Atm) - - type(fv_atmos_type), intent(INOUT), target :: Atm - - !This routine associates the MODULE flag pointers with the ARRAY flag variables for the grid active on THIS pe so the flags can be read in from the namelist. - - res_latlon_dynamics => Atm%flagstruct%res_latlon_dynamics - res_latlon_tracers => Atm%flagstruct%res_latlon_tracers - - grid_type => Atm%flagstruct%grid_type - grid_name => Atm%flagstruct%grid_name - grid_file => Atm%flagstruct%grid_file - hord_mt => Atm%flagstruct%hord_mt - kord_mt => Atm%flagstruct%kord_mt - kord_wz => Atm%flagstruct%kord_wz - hord_vt => Atm%flagstruct%hord_vt - hord_tm => Atm%flagstruct%hord_tm - hord_dp => Atm%flagstruct%hord_dp - kord_tm => Atm%flagstruct%kord_tm - hord_tr => Atm%flagstruct%hord_tr - kord_tr => Atm%flagstruct%kord_tr - scale_z => Atm%flagstruct%scale_z - w_max => Atm%flagstruct%w_max - z_min => Atm%flagstruct%z_min - nord => Atm%flagstruct%nord - nord_tr => Atm%flagstruct%nord_tr - dddmp => Atm%flagstruct%dddmp - d2_bg => Atm%flagstruct%d2_bg - d4_bg => Atm%flagstruct%d4_bg - vtdm4 => Atm%flagstruct%vtdm4 - trdm2 => Atm%flagstruct%trdm2 - d2_bg_k1 => Atm%flagstruct%d2_bg_k1 - d2_bg_k2 => Atm%flagstruct%d2_bg_k2 - d2_divg_max_k1 => Atm%flagstruct%d2_divg_max_k1 - d2_divg_max_k2 => Atm%flagstruct%d2_divg_max_k2 - damp_k_k1 => Atm%flagstruct%damp_k_k1 - damp_k_k2 => Atm%flagstruct%damp_k_k2 - n_zs_filter => Atm%flagstruct%n_zs_filter - nord_zs_filter => Atm%flagstruct%nord_zs_filter - full_zs_filter => Atm%flagstruct%full_zs_filter - consv_am => Atm%flagstruct%consv_am - do_sat_adj => Atm%flagstruct%do_sat_adj - do_f3d => Atm%flagstruct%do_f3d - no_dycore => Atm%flagstruct%no_dycore - convert_ke => Atm%flagstruct%convert_ke - do_vort_damp => Atm%flagstruct%do_vort_damp - use_old_omega => Atm%flagstruct%use_old_omega - beta => Atm%flagstruct%beta - n_sponge => Atm%flagstruct%n_sponge - d_ext => Atm%flagstruct%d_ext - nwat => Atm%flagstruct%nwat - use_logp => Atm%flagstruct%use_logp - warm_start => Atm%flagstruct%warm_start - inline_q => Atm%flagstruct%inline_q - shift_fac => Atm%flagstruct%shift_fac - do_schmidt => Atm%flagstruct%do_schmidt - stretch_fac => Atm%flagstruct%stretch_fac - target_lat => Atm%flagstruct%target_lat - target_lon => Atm%flagstruct%target_lon - reset_eta => Atm%flagstruct%reset_eta - p_fac => Atm%flagstruct%p_fac - a_imp => Atm%flagstruct%a_imp - n_split => Atm%flagstruct%n_split - m_split => Atm%flagstruct%m_split - k_split => Atm%flagstruct%k_split - use_logp => Atm%flagstruct%use_logp - q_split => Atm%flagstruct%q_split - print_freq => Atm%flagstruct%print_freq - npx => Atm%flagstruct%npx - npy => Atm%flagstruct%npy - npz => Atm%flagstruct%npz - npz_rst => Atm%flagstruct%npz_rst - ncnst => Atm%flagstruct%ncnst - pnats => Atm%flagstruct%pnats - dnats => Atm%flagstruct%dnats - ntiles => Atm%flagstruct%ntiles - nf_omega => Atm%flagstruct%nf_omega - fv_sg_adj => Atm%flagstruct%fv_sg_adj - na_init => Atm%flagstruct%na_init - p_ref => Atm%flagstruct%p_ref - dry_mass => Atm%flagstruct%dry_mass - nt_prog => Atm%flagstruct%nt_prog - nt_phys => Atm%flagstruct%nt_phys - tau_h2o => Atm%flagstruct%tau_h2o - delt_max => Atm%flagstruct%delt_max - d_con => Atm%flagstruct%d_con - ke_bg => Atm%flagstruct%ke_bg - consv_te => Atm%flagstruct%consv_te - tau => Atm%flagstruct%tau - rf_cutoff => Atm%flagstruct%rf_cutoff - filter_phys => Atm%flagstruct%filter_phys - dwind_2d => Atm%flagstruct%dwind_2d - breed_vortex_inline => Atm%flagstruct%breed_vortex_inline - range_warn => Atm%flagstruct%range_warn - fill => Atm%flagstruct%fill - fill_dp => Atm%flagstruct%fill_dp - fill_wz => Atm%flagstruct%fill_wz - check_negative => Atm%flagstruct%check_negative - non_ortho => Atm%flagstruct%non_ortho - adiabatic => Atm%flagstruct%adiabatic - moist_phys => Atm%flagstruct%moist_phys - do_Held_Suarez => Atm%flagstruct%do_Held_Suarez - do_reed_physics => Atm%flagstruct%do_reed_physics - reed_cond_only => Atm%flagstruct%reed_cond_only - reproduce_sum => Atm%flagstruct%reproduce_sum - adjust_dry_mass => Atm%flagstruct%adjust_dry_mass - fv_debug => Atm%flagstruct%fv_debug - srf_init => Atm%flagstruct%srf_init - mountain => Atm%flagstruct%mountain - remap_t => Atm%flagstruct%remap_t - z_tracer => Atm%flagstruct%z_tracer - old_divg_damp => Atm%flagstruct%old_divg_damp - fv_land => Atm%flagstruct%fv_land - nudge => Atm%flagstruct%nudge - nudge_ic => Atm%flagstruct%nudge_ic - ncep_ic => Atm%flagstruct%ncep_ic - nggps_ic => Atm%flagstruct%nggps_ic - ecmwf_ic => Atm%flagstruct%ecmwf_ic - gfs_phil => Atm%flagstruct%gfs_phil - agrid_vel_rst => Atm%flagstruct%agrid_vel_rst - use_new_ncep => Atm%flagstruct%use_new_ncep - use_ncep_phy => Atm%flagstruct%use_ncep_phy - fv_diag_ic => Atm%flagstruct%fv_diag_ic - external_ic => Atm%flagstruct%external_ic - - hydrostatic => Atm%flagstruct%hydrostatic - phys_hydrostatic => Atm%flagstruct%phys_hydrostatic - use_hydro_pressure => Atm%flagstruct%use_hydro_pressure - do_uni_zfull => Atm%flagstruct%do_uni_zfull !miz - adj_mass_vmr => Atm%flagstruct%adj_mass_vmr !f1p - hybrid_z => Atm%flagstruct%hybrid_z - Make_NH => Atm%flagstruct%Make_NH - make_hybrid_z => Atm%flagstruct%make_hybrid_z - nudge_qv => Atm%flagstruct%nudge_qv - add_noise => Atm%flagstruct%add_noise - a2b_ord => Atm%flagstruct%a2b_ord - c2l_ord => Atm%flagstruct%c2l_ord - ndims => Atm%flagstruct%ndims - - dx_const => Atm%flagstruct%dx_const - dy_const => Atm%flagstruct%dy_const - deglon_start => Atm%flagstruct%deglon_start - deglon_stop => Atm%flagstruct%deglon_stop - deglat_start => Atm%flagstruct%deglat_start - deglat_stop => Atm%flagstruct%deglat_stop - - deglat => Atm%flagstruct%deglat - - nested => Atm%neststruct%nested - twowaynest => Atm%neststruct%twowaynest - parent_tile => Atm%neststruct%parent_tile - refinement => Atm%neststruct%refinement - nestbctype => Atm%neststruct%nestbctype - nestupdate => Atm%neststruct%nestupdate - nsponge => Atm%neststruct%nsponge - s_weight => Atm%neststruct%s_weight - ioffset => Atm%neststruct%ioffset - joffset => Atm%neststruct%joffset - - layout => Atm%layout - io_layout => Atm%io_layout - end subroutine setup_pointers - - end module fv_control_mod diff --git a/model/fv_current_grid.F90 b/model/fv_current_grid.F90 deleted file mode 100644 index 0c474e7cb..000000000 --- a/model/fv_current_grid.F90 +++ /dev/null @@ -1,251 +0,0 @@ -!*********************************************************************** -!* GNU Lesser General Public License -!* -!* This file is part of the FV3 dynamical core. -!* -!* The FV3 dynamical core is free software: you can redistribute it -!* and/or modify it under the terms of the -!* GNU Lesser General Public License as published by the -!* Free Software Foundation, either version 3 of the License, or -!* (at your option) any later version. -!* -!* The FV3 dynamical core is distributed in the hope that it will be -!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty -!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -!* See the GNU General Public License for more details. -!* -!* You should have received a copy of the GNU Lesser General Public -!* License along with the FV3 dynamical core. -!* If not, see . -!*********************************************************************** -module fv_current_grid_mod - -#ifdef FV_CURRENT_GRID - -#include - use mpp_domains_mod, only: domain2d - use fms_io_mod, only: restart_file_type - use fv_arrays_mod, only: fv_atmos_type, fv_diag_type, max_step - use time_manager_mod, only: time_type - - implicit none - public - - type(fv_atmos_type), pointer :: current_Atm - integer, pointer :: grid_number - - !Timestep-related variables. - !Each grid should have its own set of timing utilities - - type(time_type) , pointer :: Time_init, Time, Run_length, Time_end, Time_step_atmos - - logical , pointer :: grid_active - - !----------------------------------------------------------------------- - ! Five prognostic state variables for the f-v dynamics - !----------------------------------------------------------------------- - ! dyn_state: - ! D-grid prognostatic variables: u, v, and delp (and other scalars) - ! - ! o--------u(i,j+1)----------o - ! | | | - ! | | | - ! v(i,j)------scalar(i,j)----v(i+1,j) - ! | | | - ! | | | - ! o--------u(i,j)------------o - ! - ! The C grid component is "diagnostic" in that it is predicted every time step - ! from the D grid variables. - real, pointer :: u(:,:,:) ! D grid zonal wind (m/s) - real, pointer :: v(:,:,:) ! D grid meridional wind (m/s) - real, pointer :: pt(:,:,:) ! temperature (K) - real, pointer :: delp(:,:,:) ! pressure thickness (pascal) - real, pointer :: q(:,:,:,:) ! specific humidity and constituents - - !---------------------- - ! non-hydrostatic state: - !---------------------------------------------------------------------- - real, pointer :: w(:,:,:) ! cell center vertical wind (m/s) - real, pointer :: delz(:,:,:) ! layer thickness (meters) - real, pointer :: ze0(:,:,:) ! height at layer edges for remapping - - !----------------------------------------------------------------------- - ! Auxilliary pressure arrays: - ! The 5 vars below can be re-computed from delp and ptop. - !----------------------------------------------------------------------- - ! dyn_aux: - real, pointer :: ps (:,:) ! Surface pressure (pascal) - real, pointer :: pe (:,:,: ) ! edge pressure (pascal) - real, pointer :: pk (:,:,:) ! pe**cappa - real, pointer :: peln(:,:,:) ! ln(pe) - real, pointer :: pkz (:,:,:) ! finite-volume mean pk -#ifdef PKC - real, pointer :: pkc (:,:,:) ! finite-volume edge pk -#endif - - ! For phys coupling: - real, pointer :: u_srf(:,:) ! Surface u-wind - real, pointer :: v_srf(:,:) ! Surface v-wind - real, pointer :: sgh(:,:) ! Terrain standard deviation - real, pointer :: oro(:,:) ! land fraction (1: all land; 0: all water) - real, pointer :: ts(:,:) ! skin temperature (sst) from NCEP/GFS (K) -- tile - - !----------------------------------------------------------------------- - ! Others: - !----------------------------------------------------------------------- - real, pointer :: phis(:,:) ! Surface geopotential (g*Z_surf) - real, pointer :: omga(:,:,:) ! Vertical pressure velocity (pa/s) - real, pointer :: ua(:,:,:) ! (ua, va) are mostly used as the A grid winds - real, pointer :: va(:,:,:) - real, pointer :: uc(:,:,:) ! (uc, vc) are mostly used as the C grid winds - real, pointer :: vc(:,:,:) - - real, pointer :: ak(:) - real, pointer :: bk(:) - - ! Accumulated Mass flux arrays - real, pointer :: mfx(:,:,:) - real, pointer :: mfy(:,:,:) - ! Accumulated Courant number arrays - real, pointer :: cx(:,:,:) - real, pointer :: cy(:,:,:) - - - -!!!!!!!!!!!!!!!!!! -! From fv_mp_mod ! -!!!!!!!!!!!!!!!!!! - - integer, pointer, dimension(:) :: pelist - - integer , pointer :: ng !this SHOULD be a constant, but structure elements are not allowed to be constants - type(domain2D) , pointer :: domain -#if defined(SPMD) - - type(domain2D) , pointer :: domain_for_coupler ! domain used in coupled model with halo = 1. - - integer , pointer :: num_contact, npes_per_tile, tile, npes_this_grid - -#endif - -!!!!!!!!!!!!!!!! -!fv_diagnostics! -!!!!!!!!!!!!!!!! - - type(fv_diag_type), pointer :: idiag - - -!!!!!!!!!!!!!!!!!!!!!! - ! From fv_grid_utils ! -!!!!!!!!!!!!!!!!!!!!!! - - - real , pointer :: ptop - - -!!!!!!!!!!!!!! -! From fv_io ! -!!!!!!!!!!!!!! - type(restart_file_type) , pointer :: Fv_restart, SST_restart, Fv_tile_restart, & - Rsf_restart, Mg_restart, Lnd_restart, Tra_restart - - - !Hold on to coarse-grid global grid, so we don't have to waste processor time getting it again when starting to do grid nesting - real, dimension(:,:,:,:) , pointer :: grid_global - - - integer, pointer :: atmos_axes(:) - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - -contains - - subroutine switch_current_grid_pointers(Atm) - type(fv_atmos_type), intent(IN), target :: Atm - - grid_number => Atm%grid_number - - Time_init => Atm%Time_init - Time => Atm%Time - Run_length => Atm%Run_length - Time_end => Atm%Time_end - Time_step_atmos => Atm%Time_step_atmos - grid_active => Atm%grid_active - u => Atm%u - v => Atm%v - pt => Atm%pt - delp => Atm%delp - q => Atm%q - w => Atm%w - delz => Atm%delz - ze0 => Atm%ze0 - ps => Atm%ps - pe => Atm%pe - pk => Atm%pk - peln => Atm%peln - pkz => Atm%pkz -#ifdef PKC - pkc => Atm%pkc -#endif - u_srf => Atm%u_srf - v_srf => Atm%v_srf - sgh => Atm%sgh - oro => Atm%oro - ts => Atm%ts - phis => Atm%phis - omga => Atm%omga - ua => Atm%ua - va => Atm%va - uc => Atm%uc - vc => Atm%vc - ak => Atm%ak - bk => Atm%bk - mfx => Atm%mfx - mfy => Atm%mfy - cx => Atm%cx - cy => Atm%cy - isc => Atm%isc - iec => Atm%iec - jsc => Atm%jsc - jec => Atm%jec - - pelist => Atm%pelist - ng => Atm%ng - domain => Atm%domain - domain_for_coupler => Atm%domain_for_coupler - num_contact => Atm%num_contact - npes_per_tile => Atm%npes_per_tile - tile => Atm%tile - npes_this_grid => Atm%npes_this_grid - is => Atm%is - ie => Atm%ie - js => Atm%js - je => Atm%je - isd => Atm%isd - ied => Atm%ied - jsd => Atm%jsd - jed => Atm%jed - isc => Atm%isc - iec => Atm%iec - jsc => Atm%jsc - jec => Atm%jec - - idiag => Atm%idiag - Fv_restart => Atm%Fv_restart - SST_restart => Atm%SST_restart - Fv_tile_restart => Atm%Fv_tile_restart - Rsf_restart => Atm%Rsf_restart - Mg_restart => Atm%Mg_restart - Lnd_restart => Atm%Lnd_restart - Tra_restart => Atm%Tra_restart - - grid_global => Atm%grid_global - atmos_axes => Atm%atmos_axes - end subroutine switch_current_grid_pointers - -#endif - - end module fv_current_grid_mod diff --git a/model/fv_dynamics.F90 b/model/fv_dynamics.F90 index edff41f2d..3d630b48c 100644 --- a/model/fv_dynamics.F90 +++ b/model/fv_dynamics.F90 @@ -37,6 +37,10 @@ module fv_dynamics_mod use tracer_manager_mod, only: get_tracer_index use fv_sg_mod, only: neg_adj3 use fv_nesting_mod, only: setup_nested_grid_BCs + use fv_regional_mod, only: regional_boundary_update, set_regional_BCs + use fv_regional_mod, only: dump_field, H_STAGGER, U_STAGGER, V_STAGGER + use fv_regional_mod, only: a_step, p_step, k_step + use fv_regional_mod, only: current_time_in_seconds use boundary_mod, only: nested_grid_BC_apply_intT use fv_arrays_mod, only: fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, fv_grid_bounds_type use fv_nwp_nudge_mod, only: do_adiabatic_init @@ -47,6 +51,7 @@ module fv_dynamics_mod logical :: bad_range = .false. real, allocatable :: rf(:) integer :: kmax=1 + real :: agrav #ifdef HIWPP real, allocatable:: u00(:,:,:), v00(:,:,:) @@ -54,16 +59,12 @@ module fv_dynamics_mod private public :: fv_dynamics -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains !----------------------------------------------------------------------- ! fv_dynamics :: FV dynamical core driver !----------------------------------------------------------------------- - + subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, & reproduce_sum, kappa, cp_air, zvir, ptop, ks, ncnst, n_split, & q_split, u, v, w, delz, hydrostatic, pt, delp, q, & @@ -99,12 +100,12 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! temperature (K) real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! pressure thickness (pascal) real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) ! specific humidity and constituents - real, intent(inout) :: delz(bd%isd:,bd%jsd:,1:) ! delta-height (m); non-hydrostatic only + real, intent(inout) :: delz(bd%is:,bd%js:,1:) ! delta-height (m); non-hydrostatic only real, intent(inout) :: ze0(bd%is:, bd%js: ,1:) ! height at edges (m); non-hydrostatic ! ze0 no longer used !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -114,7 +115,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je) ! ln(pe) real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) ! finite-volume mean pk real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:) - + !----------------------------------------------------------------------- ! Others: !----------------------------------------------------------------------- @@ -137,7 +138,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, type(fv_flags_type), intent(INOUT) :: flagstruct type(fv_nest_type), intent(INOUT) :: neststruct type(domain2d), intent(INOUT) :: domain - type(fv_atmos_type), intent(INOUT) :: parent_grid + type(fv_atmos_type), pointer, intent(IN) :: parent_grid type(fv_diag_type), intent(IN) :: idiag ! Local Arrays @@ -150,8 +151,9 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, real, dimension(bd%is:bd%ie):: cvm real, allocatable :: dp1(:,:,:), dtdt_m(:,:,:), cappa(:,:,:) real:: akap, rdg, ph1, ph2, mdt, gam, amdt, u0 + real:: recip_k_split,reg_bc_update_time integer:: kord_tracer(ncnst) - integer :: i,j,k, n, iq, n_map, nq, nwat, k_split + integer :: i,j,k, n, iq, n_map, nq, nr, nwat, k_split integer :: sphum, liq_wat = -999, ice_wat = -999 ! GFDL physics integer :: rainwat = -999, snowwat = -999, graupel = -999, cld_amt = -999 integer :: theta_d = -999 @@ -175,12 +177,14 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, agrav = 1. / grav dt2 = 0.5*bdt k_split = flagstruct%k_split + recip_k_split=1./real(k_split) nwat = flagstruct%nwat nq = nq_tot - flagstruct%dnats + nr = nq_tot - flagstruct%dnrts rdg = -rdgas * agrav allocate ( dp1(isd:ied, jsd:jed, 1:npz) ) - - + + #ifdef MOIST_CAPPA allocate ( cappa(isd:ied,jsd:jed,npz) ) call init_ijk_mem(isd,ied, jsd,jed, npz, cappa, 0.) @@ -188,38 +192,39 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, allocate ( cappa(isd:isd,jsd:jsd,1) ) cappa = 0. #endif - !We call this BEFORE converting pt to virtual potential temperature, + !We call this BEFORE converting pt to virtual potential temperature, !since we interpolate on (regular) temperature rather than theta. if (gridstruct%nested .or. ANY(neststruct%child_grids)) then call timing_on('NEST_BCs') call setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & - u, v, w, pt, delp, delz, q, uc, vc, pkz, & - neststruct%nested, flagstruct%inline_q, flagstruct%make_nh, ng, & - gridstruct, flagstruct, neststruct, & - neststruct%nest_timestep, neststruct%tracer_nest_timestep, & - domain, bd, nwat) - -#ifndef SW_DYNAMICS - if (gridstruct%nested) then - !Correct halo values have now been set up for BCs; we can go ahead and apply them too... - call nested_grid_BC_apply_intT(pt, & - 0, 0, npx, npy, npz, bd, 1., 1., & - neststruct%pt_BC, bctype=neststruct%nestbctype ) + u, v, w, pt, delp, delz, q, uc, vc, & #ifdef USE_COND - call nested_grid_BC_apply_intT(q_con, & - 0, 0, npx, npy, npz, bd, 1., 1., & - neststruct%q_con_BC, bctype=neststruct%nestbctype ) + q_con, & #ifdef MOIST_CAPPA - call nested_grid_BC_apply_intT(cappa, & - 0, 0, npx, npy, npz, bd, 1., 1., & - neststruct%cappa_BC, bctype=neststruct%nestbctype ) + cappa, & #endif #endif - endif -#endif + neststruct%nested, flagstruct%inline_q, flagstruct%make_nh, ng, & + gridstruct, flagstruct, neststruct, & + neststruct%nest_timestep, neststruct%tracer_nest_timestep, & + domain, parent_grid, bd, nwat, ak, bk) + call timing_off('NEST_BCs') endif + ! For the regional domain set values valid the beginning of the + ! current large timestep at the boundary points of the pertinent + ! prognostic arrays. + + if (flagstruct%regional) then + call timing_on('Regional_BCs') + + reg_bc_update_time=current_time_in_seconds + call set_regional_BCs & !<-- Insert values into the boundary region valid for the start of this large timestep. + (delp,delz,w,pt,q_con,cappa,q,u,v,uc,vc, bd, npz, ncnst, reg_bc_update_time ) + + call timing_off('Regional_BCs') + endif if ( flagstruct%no_dycore ) then if ( nwat.eq.2 .and. (.not.hydrostatic) ) then @@ -273,7 +278,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, enddo else !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,zvir,q,q_con,sphum,liq_wat, & -!$OMP rainwat,ice_wat,snowwat,graupel,pkz,flagstruct, & +!$OMP rainwat,ice_wat,snowwat,graupel,pkz,flagstruct, & !$OMP cappa,kappa,rdg,delp,pt,delz,nwat) & !$OMP private(cvm) do k=1,npz @@ -315,7 +320,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, #endif call prt_mxm('PS', ps, is, ie, js, je, ng, 1, 0.01, gridstruct%area_64, domain) call prt_mxm('T_dyn_b', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) - if ( .not. hydrostatic) call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) + if ( .not. hydrostatic) call prt_mxm('delz', delz, is, ie, js, je, 0, npz, 1., gridstruct%area_64, domain) call prt_mxm('delp_b ', delp, is, ie, js, je, ng, npz, 0.01, gridstruct%area_64, domain) call prt_mxm('pk_b', pk, is, ie, js, je, 0, npz+1, 1.,gridstruct%area_64, domain) call prt_mxm('pkz_b', pkz,is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain) @@ -346,7 +351,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if( flagstruct%tau > 0. ) then if ( gridstruct%grid_type<4 ) then call Rayleigh_Super(abs(bdt), npx, npy, npz, ks, pfull, phis, flagstruct%tau, u, v, w, pt, & - ua, va, delz, gridstruct%agrid, cp_air, rdgas, ptop, hydrostatic, (.not. neststruct%nested), flagstruct%rf_cutoff, gridstruct, domain, bd) + ua, va, delz, gridstruct%agrid, cp_air, rdgas, ptop, hydrostatic, & + .not. gridstruct%bounded_domain, flagstruct%rf_cutoff, gridstruct, domain, bd) else call Rayleigh_Friction(abs(bdt), npx, npy, npz, ks, pfull, flagstruct%tau, u, v, w, pt, & ua, va, delz, cp_air, rdgas, ptop, hydrostatic, .true., flagstruct%rf_cutoff, gridstruct, domain, bd) @@ -410,6 +416,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, call timing_on('FV_DYN_LOOP') do n_map=1, k_split ! first level of time-split + k_step = n_map call timing_on('COMM_TOTAL') #ifdef USE_COND call start_group_halo_update(i_pack(11), q_con, domain) @@ -444,8 +451,8 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, #endif call timing_on('DYN_CORE') - call dyn_core(npx, npy, npz, ng, sphum, nq, mdt, n_split, zvir, cp_air, akap, cappa, grav, hydrostatic, & - u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, & + call dyn_core(npx, npy, npz, ng, sphum, nq, mdt, n_map, n_split, zvir, cp_air, akap, cappa, grav, hydrostatic, & + u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, & uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, ks, & gridstruct, flagstruct, neststruct, idiag, bd, & domain, n_map==1, i_pack, last_step, time_total) @@ -453,24 +460,24 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, #ifdef SW_DYNAMICS -!$OMP parallel do default(none) shared(is,ie,js,je,delp,agrav) +!!$OMP parallel do default(none) shared(is,ie,js,je,ps,delp,agrav) do j=js,je do i=is,ie ps(i,j) = delp(i,j,1) * agrav enddo enddo #else - if( .not. flagstruct%inline_q .and. nq /= 0 ) then + if( .not. flagstruct%inline_q .and. nq /= 0 ) then !-------------------------------------------------------- ! Perform large-time-step scalar transport using the accumulated CFL and ! mass fluxes call timing_on('tracer_2d') !!! CLEANUP: merge these two calls? - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then call tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, & flagstruct%hord_tr, q_split, mdt, idiag%id_divg, i_pack(10), & flagstruct%nord_tr, flagstruct%trdm2, & - k_split, neststruct, parent_grid) + k_split, neststruct, parent_grid, n_map) else if ( flagstruct%z_tracer ) then call tracer_2d_1L(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, nq, & @@ -484,23 +491,25 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, endif call timing_off('tracer_2d') +#ifdef FILL2D if ( flagstruct%hord_tr<8 .and. flagstruct%moist_phys ) then call timing_on('Fill2D') if ( liq_wat > 0 ) & - call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,liq_wat), delp, gridstruct%area, domain, neststruct%nested, npx, npy) + call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,liq_wat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) if ( rainwat > 0 ) & - call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,rainwat), delp, gridstruct%area, domain, neststruct%nested, npx, npy) + call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,rainwat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) if ( ice_wat > 0 ) & - call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,ice_wat), delp, gridstruct%area, domain, neststruct%nested, npx, npy) + call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,ice_wat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) if ( snowwat > 0 ) & - call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,snowwat), delp, gridstruct%area, domain, neststruct%nested, npx, npy) + call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,snowwat), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) if ( graupel > 0 ) & - call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,graupel), delp, gridstruct%area, domain, neststruct%nested, npx, npy) + call fill2D(is, ie, js, je, ng, npz, q(isd,jsd,1,graupel), delp, gridstruct%area, domain, gridstruct%bounded_domain, npx, npy) call timing_off('Fill2D') endif +#endif if( last_step .and. idiag%id_divg>0 ) then - used = send_data(idiag%id_divg, dp1, fv_time) + used = send_data(idiag%id_divg, dp1, fv_time) if(flagstruct%fv_debug) call prt_mxm('divg', dp1, is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain) endif endif @@ -513,7 +522,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, ! Eulerian coordinate. !------------------------------------------------------------------------ - do iq=1,nq + do iq=1,nr kord_tracer(iq) = flagstruct%kord_tr if ( iq==cld_amt ) kord_tracer(iq) = 9 ! monotonic enddo @@ -525,15 +534,27 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, #endif call Lagrangian_to_Eulerian(last_step, consv_te, ps, pe, delp, & - pkz, pk, mdt, bdt, npz, is,ie,js,je, isd,ied,jsd,jed, & - nq, nwat, sphum, q_con, u, v, w, delz, pt, q, phis, & + pkz, pk, mdt, bdt, npx, npy, npz, is,ie,js,je, isd,ied,jsd,jed, & + nr, nwat, sphum, q_con, u, v, w, delz, pt, q, phis, & zvir, cp_air, akap, cappa, flagstruct%kord_mt, flagstruct%kord_wz, & kord_tracer, flagstruct%kord_tm, peln, te_2d, & ng, ua, va, omga, dp1, ws, fill, reproduce_sum, & idiag%id_mdt>0, dtdt_m, ptop, ak, bk, pfull, gridstruct, domain, & flagstruct%do_sat_adj, hydrostatic, hybrid_z, do_omega, & - flagstruct%adiabatic, do_adiabatic_init) + flagstruct%adiabatic, do_adiabatic_init, & + flagstruct%c2l_ord, bd, flagstruct%fv_debug, & + flagstruct%moist_phys) + if ( flagstruct%fv_debug ) then + if (is_master()) write(*,'(A, I3, A1, I3)') 'finished k_split ', n_map, '/', k_split + call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) + call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) + call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) + call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) + call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) + call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) + call prt_mxm('graupel_dyn', q(isd,jsd,1,graupel), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) + endif #ifdef AVEC_TIMERS call avec_timer_stop(6) #endif @@ -544,6 +565,14 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, 0, 0, npx, npy, npz, bd, real(n_map+1), real(k_split), & neststruct%cappa_BC, bctype=neststruct%nestbctype ) endif + if ( flagstruct%regional .and. .not. last_step) then + reg_bc_update_time=current_time_in_seconds+(n_map+1)*mdt + call regional_boundary_update(cappa, 'cappa', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time ) + endif #endif if( last_step ) then @@ -566,6 +595,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, end if #endif enddo ! n_map loop + call timing_off('FV_DYN_LOOP') if ( idiag%id_mdt > 0 .and. (.not.do_adiabatic_init) ) then ! Output temperature tendency due to inline moist physics: @@ -605,15 +635,6 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, q(isd,jsd,1,snowwat), & q(isd,jsd,1,graupel), check_negative=flagstruct%check_negative) endif - if ( flagstruct%fv_debug ) then - call prt_mxm('T_dyn_a3', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain) - call prt_mxm('SPHUM_dyn', q(isd,jsd,1,sphum ), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) - call prt_mxm('liq_wat_dyn', q(isd,jsd,1,liq_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) - call prt_mxm('rainwat_dyn', q(isd,jsd,1,rainwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) - call prt_mxm('ice_wat_dyn', q(isd,jsd,1,ice_wat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) - call prt_mxm('snowwat_dyn', q(isd,jsd,1,snowwat), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) - call prt_mxm('graupel_dyn', q(isd,jsd,1,graupel), is, ie, js, je, ng, npz, 1.,gridstruct%area_64, domain) - endif endif if( (flagstruct%consv_am.or.idiag%id_amdt>0.or.idiag%id_aam>0) .and. (.not.do_adiabatic_init) ) then @@ -622,14 +643,14 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if( idiag%id_aam>0 ) then used = send_data(idiag%id_aam, te_2d, fv_time) if ( prt_minmax ) then - gam = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0) + gam = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0) if( is_master() ) write(6,*) 'Total AAM =', gam endif endif endif if( (flagstruct%consv_am.or.idiag%id_amdt>0) .and. (.not.do_adiabatic_init) ) then -!$OMP parallel do default(none) shared(is,ie,js,je,te_2d,teq,dt2,ps2,ps,idiag) +!$OMP parallel do default(none) shared(is,ie,js,je,te_2d,teq,dt2,ps2,ps,idiag) do j=js,je do i=is,ie ! Note: the mountain torque computation contains also numerical error @@ -640,7 +661,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if( idiag%id_amdt>0 ) used = send_data(idiag%id_amdt, te_2d/bdt, fv_time) if ( flagstruct%consv_am .or. prt_minmax ) then - amdt = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.) + amdt = g_sum( domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.) u0 = -radius*amdt/g_sum( domain, m_fac, is, ie, js, je, ng, gridstruct%area_64, 0,reproduce=.true.) if(is_master() .and. prt_minmax) & write(6,*) 'Dynamic AM tendency (Hadleys)=', amdt/(bdt*1.e18), 'del-u (per day)=', u0*86400./bdt @@ -671,7 +692,7 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, endif 911 call cubed_to_latlon(u, v, ua, va, gridstruct, & - npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%nested, flagstruct%c2l_ord, bd) + npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%bounded_domain, flagstruct%c2l_ord, bd) deallocate(dp1) deallocate(cappa) @@ -685,21 +706,23 @@ subroutine fv_dynamics(npx, npy, npz, nq_tot, ng, bdt, consv_te, fill, if ( flagstruct%range_warn ) then call range_check('UA_dyn', ua, is, ie, js, je, ng, npz, gridstruct%agrid, & - -280., 280., bad_range) + -280., 280., bad_range, fv_time) call range_check('VA_dyn', ua, is, ie, js, je, ng, npz, gridstruct%agrid, & - -280., 280., bad_range) + -280., 280., bad_range, fv_time) call range_check('TA_dyn', pt, is, ie, js, je, ng, npz, gridstruct%agrid, & - 150., 335., bad_range) + 150., 335., bad_range, fv_time) if ( .not. hydrostatic ) & call range_check('W_dyn', w, is, ie, js, je, ng, npz, gridstruct%agrid, & - -50., 100., bad_range) + -50., 100., bad_range, fv_time) endif end subroutine fv_dynamics + subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & - ua, va, delz, agrid, cp, rg, ptop, hydrostatic, conserve, rf_cutoff, gridstruct, domain, bd) + ua, va, delz, agrid, cp, rg, ptop, hydrostatic, & + conserve, rf_cutoff, gridstruct, domain, bd) real, intent(in):: dt real, intent(in):: tau ! time scale (days) real, intent(in):: cp, rg, ptop, rf_cutoff @@ -712,9 +735,9 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) ! D grid meridional wind (m/s) real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: ) ! cell center vertical wind (m/s) real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! temp - real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! - real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! - real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: ) ! delta-height (m); non-hydrostatic only + real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: delz(bd%is: ,bd%js: ,1: ) ! delta-height (m); non-hydrostatic only real, intent(in) :: agrid(bd%isd:bd%ied, bd%jsd:bd%jed,2) real, intent(in) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed) ! Surface geopotential (g*Z_surf) type(fv_grid_type), intent(IN) :: gridstruct @@ -782,7 +805,7 @@ subroutine Rayleigh_Super(dt, npx, npy, npz, ks, pm, phis, tau, u, v, w, pt, & RF_initialized = .true. endif - call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested) + call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%bounded_domain) allocate( u2f(isd:ied,jsd:jed,kmax) ) @@ -883,9 +906,9 @@ subroutine Rayleigh_Friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, & real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) ! D grid meridional wind (m/s) real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: ) ! cell center vertical wind (m/s) real, intent(inout):: pt(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! temp - real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! - real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! - real, intent(inout):: delz(bd%isd: ,bd%jsd: ,1: ) ! delta-height (m); non-hydrostatic only + real, intent(inout):: ua(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: va(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! + real, intent(inout):: delz(bd%is: ,bd%js: ,1: ) ! delta-height (m); non-hydrostatic only type(fv_grid_type), intent(IN) :: gridstruct type(domain2d), intent(INOUT) :: domain ! local: @@ -898,7 +921,7 @@ subroutine Rayleigh_Friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, & integer :: is, ie, js, je integer :: isd, ied, jsd, jed - + is = bd%is ie = bd%ie js = bd%js @@ -928,7 +951,7 @@ subroutine Rayleigh_Friction(dt, npx, npy, npz, ks, pm, tau, u, v, w, pt, & allocate( u2f(isd:ied,jsd:jed,kmax) ) - call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested) + call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%bounded_domain) !$OMP parallel do default(none) shared(is,ie,js,je,kmax,u2f,hydrostatic,ua,va,w) do k=1,kmax @@ -1026,8 +1049,8 @@ subroutine compute_aam(npz, is, ie, js, je, isd, ied, jsd, jed, gridstruct, bd, real, dimension(is:ie):: r1, r2, dm integer i, j, k - call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%nested) - + call c2l_ord2(u, v, ua, va, gridstruct, npz, gridstruct%grid_type, bd, gridstruct%bounded_domain) + !$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,aam,m_fac,ps,ptop,delp,agrav,ua) & !$OMP private(r1, r2, dm) do j=js,je diff --git a/model/fv_fill.F90 b/model/fv_fill.F90 index 46d5887fc..5742e2961 100644 --- a/model/fv_fill.F90 +++ b/model/fv_fill.F90 @@ -28,10 +28,6 @@ module fv_fill_mod public fill_gfs public fill2D -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine fillz(im, km, nq, q, dp) @@ -85,20 +81,20 @@ subroutine fillz(im, km, nq, q, dp) zfix(i) = .true. if ( q(i,k-1,ic) > 0. ) then ! Borrow from above - dq = min ( q(i,k-1,ic)*dp(i,k-1), -q(i,k,ic)*dp(i,k) ) + dq = min ( q(i,k-1,ic)*dp(i,k-1), -q(i,k,ic)*dp(i,k) ) q(i,k-1,ic) = q(i,k-1,ic) - dq/dp(i,k-1) q(i,k ,ic) = q(i,k ,ic) + dq/dp(i,k ) endif if ( q(i,k,ic)<0.0 .and. q(i,k+1,ic)>0. ) then ! Borrow from below: - dq = min ( q(i,k+1,ic)*dp(i,k+1), -q(i,k,ic)*dp(i,k) ) + dq = min ( q(i,k+1,ic)*dp(i,k+1), -q(i,k,ic)*dp(i,k) ) q(i,k+1,ic) = q(i,k+1,ic) - dq/dp(i,k+1) q(i,k ,ic) = q(i,k ,ic) + dq/dp(i,k ) endif endif enddo enddo - + ! Bottom layer k = km do i=1,im @@ -108,7 +104,7 @@ subroutine fillz(im, km, nq, q, dp) qup = q(i,k-1,ic)*dp(i,k-1) qly = -q(i,k ,ic)*dp(i,k ) dup = min(qly, qup) - q(i,k-1,ic) = q(i,k-1,ic) - dup/dp(i,k-1) + q(i,k-1,ic) = q(i,k-1,ic) - dup/dp(i,k-1) q(i,k, ic) = q(i,k, ic) + dup/dp(i,k ) endif enddo @@ -184,11 +180,11 @@ subroutine fill_gfs(im, km, pe2, q, q_min) end subroutine fill_gfs - subroutine fill2D(is, ie, js, je, ng, km, q, delp, area, domain, nested, npx, npy) + subroutine fill2D(is, ie, js, je, ng, km, q, delp, area, domain, bounded_domain, npx, npy) ! This is a diffusive type filling algorithm type(domain2D), intent(INOUT) :: domain integer, intent(in):: is, ie, js, je, ng, km, npx, npy - logical, intent(IN):: nested + logical, intent(IN):: bounded_domain real, intent(in):: area(is-ng:ie+ng, js-ng:je+ng) real, intent(in):: delp(is-ng:ie+ng, js-ng:je+ng, km) real, intent(inout):: q(is-ng:ie+ng, js-ng:je+ng, km) @@ -200,7 +196,7 @@ subroutine fill2D(is, ie, js, je, ng, km, q, delp, area, domain, nested, npx, np integer:: i, j, k integer :: is1, ie1, js1, je1 - if (nested) then + if (bounded_domain) then if (is == 1) then is1 = is-1 else diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index 579fe65f5..9e150859a 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -19,7 +19,7 @@ !* If not, see . !*********************************************************************** module fv_grid_utils_mod - + #include use constants_mod, only: omega, pi=>pi_8, cnst_radius=>radius use mpp_mod, only: FATAL, mpp_error, WARNING @@ -32,7 +32,7 @@ module fv_grid_utils_mod use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_grid_bounds_type, & R_GRID use fv_eta_mod, only: set_eta - use fv_mp_mod, only: ng, is_master + use fv_mp_mod, only: is_master use fv_mp_mod, only: mp_reduce_sum, mp_reduce_min, mp_reduce_max use fv_mp_mod, only: fill_corners, XDir, YDir use fv_timing_mod, only: timing_on, timing_off @@ -54,15 +54,16 @@ module fv_grid_utils_mod real, parameter:: ptop_min=1.d-8 - public f_p + public f_p public ptop_min, big_number !CLEANUP: OK to keep since they are constants? public cos_angle - public latlon2xyz, gnomonic_grids, & + public update_dwinds_phys, update2d_dwinds_phys, latlon2xyz, gnomonic_grids, & global_mx, unit_vect_latlon, & cubed_to_latlon, c2l_ord2, g_sum, global_qsum, great_circle_dist, & v_prod, get_unit_vect2, project_sphere_v public mid_pt_sphere, mid_pt_cart, vect_cross, grid_utils_init, grid_utils_end, & - spherical_angle, cell_center2, get_area, inner_prod, fill_ghost, direct_transform, & + spherical_angle, cell_center2, get_area, inner_prod, fill_ghost, & + direct_transform, cube_transform, & make_eta_level, expand_cell, cart_to_latlon, intp_great_circle, normalize_vect, & dist2side_latlon, spherical_linear_interpolation, get_latlon_vector public symm_grid @@ -74,10 +75,6 @@ module fv_grid_utils_mod MODULE PROCEDURE fill_ghost_r8 END INTERFACE -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) @@ -88,13 +85,13 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) integer, intent(in):: grid_type, c2l_order ! ! Super (composite) grid: - + ! 9---4---8 ! | | ! 1 5 3 ! | | ! 6---2---7 - + real(kind=R_GRID) grid3(3,Atm%bd%isd:Atm%bd%ied+1,Atm%bd%jsd:Atm%bd%jed+1) real(kind=R_GRID) p1(3), p2(3), p3(3), p4(3), pp(3), ex(3), ey(3), e1(3), e2(3) real(kind=R_GRID) pp1(2), pp2(2), pp3(2) @@ -177,7 +174,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ne_corner => Atm%gridstruct%ne_corner nw_corner => Atm%gridstruct%nw_corner - if ( Atm%flagstruct%do_schmidt .and. abs(Atm%flagstruct%stretch_fac-1.) > 1.E-5 ) then + if ( (Atm%flagstruct%do_schmidt .or. Atm%flagstruct%do_cube_transform) .and. abs(Atm%flagstruct%stretch_fac-1.) > 1.E-5 ) then Atm%gridstruct%stretched_grid = .true. symm_grid = .false. else @@ -194,15 +191,17 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) Atm%ks = 0 elseif ( .not. Atm%flagstruct%hybrid_z ) then ! Initialize (ak,bk) for cold start; overwritten with restart file - call set_eta(npz, Atm%ks, Atm%ptop, Atm%ak, Atm%bk) - if ( is_master() ) then - write(*,*) 'Grid_init', npz, Atm%ks, Atm%ptop - tmp1 = Atm%ak(Atm%ks+1) - do k=Atm%ks+1,npz - tmp1 = max(tmp1, (Atm%ak(k)-Atm%ak(k+1))/max(1.E-9, (Atm%bk(k+1)-Atm%bk(k))) ) - enddo - write(*,*) 'Hybrid Sigma-P: minimum allowable surface pressure (hpa)=', tmp1/100. - if ( tmp1 > 420.E2 ) write(*,*) 'Warning: the chosen setting in set_eta can cause instability' + if (.not. Atm%flagstruct%external_eta) then + call set_eta(npz, Atm%ks, Atm%ptop, Atm%ak, Atm%bk, Atm%flagstruct%npz_type) + if ( is_master() ) then + write(*,*) 'Grid_init', npz, Atm%ks, Atm%ptop + tmp1 = Atm%ak(Atm%ks+1) + do k=Atm%ks+1,npz + tmp1 = max(tmp1, (Atm%ak(k)-Atm%ak(k+1))/max(1.E-9, (Atm%bk(k+1)-Atm%bk(k))) ) + enddo + write(*,*) 'Hybrid Sigma-P: minimum allowable surface pressure (hpa)=', tmp1/100. + if ( tmp1 > 420.E2 ) write(*,*) 'Warning: the chosen setting in set_eta can cause instability' + endif endif endif @@ -221,7 +220,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ne_corner = .false. nw_corner = .false. - if (grid_type < 3 .and. .not. Atm%neststruct%nested) then + if (grid_type < 3 .and. .not. Atm%gridstruct%bounded_domain) then if ( is==1 .and. js==1 ) sw_corner = .true. if ( (ie+1)==npx .and. js==1 ) se_corner = .true. if ( (ie+1)==npx .and. (je+1)==npy ) ne_corner = .true. @@ -235,7 +234,8 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) endif if (grid_type < 3) then - if ( .not. Atm%neststruct%nested ) then +!xxx if ( .not. Atm%neststruct%nested ) then + if ( .not. Atm%gridstruct%bounded_domain ) then call fill_corners(grid(:,:,1), npx, npy, FILL=XDir, BGRID=.true.) call fill_corners(grid(:,:,2), npx, npy, FILL=XDir, BGRID=.true.) end if @@ -250,7 +250,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) call get_center_vect( npx, npy, grid3, ec1, ec2, Atm%bd ) ! Fill arbitrary values in the non-existing corner regions: - if (.not. Atm%neststruct%nested) then + if (.not. Atm%gridstruct%bounded_domain) then do k=1,3 call fill_ghost(ec1(k,:,:), npx, npy, big_number, Atm%bd) call fill_ghost(ec2(k,:,:), npx, npy, big_number, Atm%bd) @@ -261,14 +261,14 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) do j=jsd,jed do i=isd+1,ied if ( ( (i<1 .and. j<1 ) .or. (i>npx .and. j<1 ) .or. & - (i>npx .and. j>(npy-1)) .or. (i<1 .and. j>(npy-1)) ) .and. .not. Atm%neststruct%nested) then + (i>npx .and. j>(npy-1)) .or. (i<1 .and. j>(npy-1)) ) .and. .not. Atm%gridstruct%bounded_domain) then ew(1:3,i,j,1:2) = 0. else call mid_pt_cart( grid(i,j,1:2), grid(i,j+1,1:2), pp) - if (i==1 .and. .not. Atm%neststruct%nested) then + if (i==1 .and. .not. Atm%gridstruct%bounded_domain) then call latlon2xyz( agrid(i,j,1:2), p1) call vect_cross(p2, pp, p1) - elseif(i==npx .and. .not. Atm%neststruct%nested) then + elseif(i==npx .and. .not. Atm%gridstruct%bounded_domain) then call latlon2xyz( agrid(i-1,j,1:2), p1) call vect_cross(p2, p1, pp) else @@ -289,17 +289,17 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) do j=jsd+1,jed do i=isd,ied if ( ( (i<1 .and. j<1 ) .or. (i>(npx-1) .and. j<1 ) .or. & - (i>(npx-1) .and. j>npy) .or. (i<1 .and. j>npy) ) .and. .not. Atm%neststruct%nested) then + (i>(npx-1) .and. j>npy) .or. (i<1 .and. j>npy) ) .and. .not. Atm%gridstruct%bounded_domain) then es(1:3,i,j,1:2) = 0. else call mid_pt_cart(grid(i,j,1:2), grid(i+1,j,1:2), pp) - if (j==1 .and. .not. Atm%neststruct%nested) then + if (j==1 .and. .not. Atm%gridstruct%bounded_domain) then call latlon2xyz( agrid(i,j,1:2), p1) call vect_cross(p2, pp, p1) - elseif (j==npy .and. .not. Atm%neststruct%nested) then + elseif (j==npy .and. .not. Atm%gridstruct%bounded_domain) then call latlon2xyz( agrid(i,j-1,1:2), p1) call vect_cross(p2, p1, pp) - else + else call latlon2xyz( agrid(i,j ,1:2), p1) call latlon2xyz( agrid(i,j-1,1:2), p3) call vect_cross(p2, p3, p1) @@ -332,11 +332,11 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! NW corner: cos_sg(i,j,9) = -cos_angle( grid3(1,i,j+1), grid3(1,i,j), grid3(1,i+1,j+1) ) ! Mid-points by averaging: -!!! cos_sg(i,j,1) = 0.5*( cos_sg(i,j,6) + cos_sg(i,j,9) ) -!!! cos_sg(i,j,2) = 0.5*( cos_sg(i,j,6) + cos_sg(i,j,7) ) -!!! cos_sg(i,j,3) = 0.5*( cos_sg(i,j,7) + cos_sg(i,j,8) ) -!!! cos_sg(i,j,4) = 0.5*( cos_sg(i,j,8) + cos_sg(i,j,9) ) -!!!!! cos_sg(i,j,5) = 0.25*(cos_sg(i,j,6)+cos_sg(i,j,7)+cos_sg(i,j,8)+cos_sg(i,j,9)) +!!! cos_sg(i,j,1) = 0.5*( cos_sg(i,j,6) + cos_sg(i,j,9) ) +!!! cos_sg(i,j,2) = 0.5*( cos_sg(i,j,6) + cos_sg(i,j,7) ) +!!! cos_sg(i,j,3) = 0.5*( cos_sg(i,j,7) + cos_sg(i,j,8) ) +!!! cos_sg(i,j,4) = 0.5*( cos_sg(i,j,8) + cos_sg(i,j,9) ) +!!!!! cos_sg(i,j,5) = 0.25*(cos_sg(i,j,6)+cos_sg(i,j,7)+cos_sg(i,j,8)+cos_sg(i,j,9)) ! No averaging ----- call latlon2xyz(agrid(i,j,1:2), p3) ! righ-hand system consistent with grid3 call mid_pt3_cart(grid3(1,i,j), grid3(1,i,j+1), p1) @@ -364,33 +364,34 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! ------------------------------- ! For transport operation ! ------------------------------- - if (.not. Atm%neststruct%nested) then +!xxx if (.not. Atm%neststruct%nested) then + if (.not. Atm%gridstruct%bounded_domain) then if ( sw_corner ) then do i=-2,0 - sin_sg(0,i,3) = sin_sg(i,1,2) - sin_sg(i,0,4) = sin_sg(1,i,1) + sin_sg(0,i,3) = sin_sg(i,1,2) + sin_sg(i,0,4) = sin_sg(1,i,1) enddo endif if ( nw_corner ) then do i=npy,npy+2 - sin_sg(0,i,3) = sin_sg(npy-i,npy-1,4) + sin_sg(0,i,3) = sin_sg(npy-i,npy-1,4) enddo do i=-2,0 - sin_sg(i,npy,2) = sin_sg(1,npx+i,1) + sin_sg(i,npy,2) = sin_sg(1,npx+i,1) enddo endif if ( se_corner ) then do j=-2,0 - sin_sg(npx,j,1) = sin_sg(npx-j,1,2) + sin_sg(npx,j,1) = sin_sg(npx-j,1,2) enddo do i=npx,npx+2 - sin_sg(i,0,4) = sin_sg(npx-1,npx-i,3) + sin_sg(i,0,4) = sin_sg(npx-1,npx-i,3) enddo endif if ( ne_corner ) then do i=npy,npy+2 - sin_sg(npx,i,1) = sin_sg(i,npy-1,4) - sin_sg(i,npy,2) = sin_sg(npx-1,i,3) + sin_sg(npx,i,1) = sin_sg(i,npy-1,4) + sin_sg(i,npy,2) = sin_sg(npx-1,i,3) enddo endif endif @@ -432,7 +433,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ew(1,:,:,1)=1. ew(2,:,:,1)=0. ew(3,:,:,1)=0. - + ew(1,:,:,2)=0. ew(2,:,:,2)=1. ew(3,:,:,2)=0. @@ -440,7 +441,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) es(1,:,:,1)=1. es(2,:,:,1)=0. es(3,:,:,1)=0. - + es(1,:,:,2)=0. es(2,:,:,2)=1. es(3,:,:,2)=0. @@ -462,9 +463,9 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) do j=js,je+1 do i=is,ie+1 ! unit vect in X-dir: ee1 - if (i==1 .and. .not. Atm%neststruct%nested) then + if (i==1 .and. .not. Atm%gridstruct%bounded_domain) then call vect_cross(pp, grid3(1,i, j), grid3(1,i+1,j)) - elseif(i==npx .and. .not. Atm%neststruct%nested) then + elseif(i==npx .and. .not. Atm%gridstruct%bounded_domain) then call vect_cross(pp, grid3(1,i-1,j), grid3(1,i, j)) else call vect_cross(pp, grid3(1,i-1,j), grid3(1,i+1,j)) @@ -473,9 +474,9 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) call normalize_vect( ee1(1:3,i,j) ) ! unit vect in Y-dir: ee2 - if (j==1 .and. .not. Atm%neststruct%nested) then + if (j==1 .and. .not. Atm%gridstruct%bounded_domain) then call vect_cross(pp, grid3(1:3,i,j ), grid3(1:3,i,j+1)) - elseif(j==npy .and. .not. Atm%neststruct%nested) then + elseif(j==npy .and. .not. Atm%gridstruct%bounded_domain) then call vect_cross(pp, grid3(1:3,i,j-1), grid3(1:3,i,j )) else call vect_cross(pp, grid3(1:3,i,j-1), grid3(1:3,i,j+1)) @@ -516,7 +517,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) rsin_v(i,j) = 1. / max(tiny_number, sina_v(i,j)**2) enddo enddo - + do j=jsd,jed do i=isd,ied cosa_s(i,j) = cos_sg(i,j,5) @@ -525,7 +526,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) enddo enddo ! Force the model to fail if incorrect corner values are to be used: - if (.not. Atm%neststruct%nested) then + if (.not. Atm%gridstruct%bounded_domain) then call fill_ghost(cosa_s, npx, npy, big_number, Atm%bd) end if !------------------------------------ @@ -533,8 +534,8 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) !------------------------------------ do j=js,je+1 do i=is,ie+1 - if ( i==npx .and. j==npy .and. .not. Atm%neststruct%nested) then - else if ( ( i==1 .or. i==npx .or. j==1 .or. j==npy ) .and. .not. Atm%neststruct%nested ) then + if ( i==npx .and. j==npy .and. .not. Atm%gridstruct%bounded_domain) then + else if ( ( i==1 .or. i==npx .or. j==1 .or. j==npy ) .and. .not. Atm%gridstruct%bounded_domain ) then rsina(i,j) = big_number else ! rsina(i,j) = 1. / sina(i,j)**2 @@ -545,7 +546,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) do j=jsd,jed do i=is,ie+1 - if ( (i==1 .or. i==npx) .and. .not. Atm%neststruct%nested ) then + if ( (i==1 .or. i==npx) .and. .not. Atm%gridstruct%bounded_domain ) then ! rsin_u(i,j) = 1. / sina_u(i,j) rsin_u(i,j) = 1. / sign(max(tiny_number,abs(sina_u(i,j))), sina_u(i,j)) endif @@ -554,16 +555,18 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) do j=js,je+1 do i=isd,ied - if ( (j==1 .or. j==npy) .and. .not. Atm%neststruct%nested ) then + if ( (j==1 .or. j==npy) .and. .not. Atm%gridstruct%bounded_domain ) then ! rsin_v(i,j) = 1. / sina_v(i,j) rsin_v(i,j) = 1. / sign(max(tiny_number,abs(sina_v(i,j))), sina_v(i,j)) endif enddo enddo - !EXPLANATION HERE: calling fill_ghost overwrites **SOME** of the sin_sg values along the outward-facing edge of a tile in the corners, which is incorrect. What we will do is call fill_ghost and then fill in the appropriate values + !EXPLANATION HERE: calling fill_ghost overwrites **SOME** of the sin_sg + !values along the outward-facing edge of a tile in the corners, which is incorrect. + !What we will do is call fill_ghost and then fill in the appropriate values - if (.not. Atm%neststruct%nested) then + if (.not. Atm%gridstruct%bounded_domain) then do k=1,9 call fill_ghost(sin_sg(:,:,k), npx, npy, tiny_number, Atm%bd) ! this will cause NAN if used call fill_ghost(cos_sg(:,:,k), npx, npy, big_number, Atm%bd) @@ -575,28 +578,28 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! ------------------------------- if ( sw_corner ) then do i=0,-2,-1 - sin_sg(0,i,3) = sin_sg(i,1,2) - sin_sg(i,0,4) = sin_sg(1,i,1) - cos_sg(0,i,3) = cos_sg(i,1,2) - cos_sg(i,0,4) = cos_sg(1,i,1) + sin_sg(0,i,3) = sin_sg(i,1,2) + sin_sg(i,0,4) = sin_sg(1,i,1) + cos_sg(0,i,3) = cos_sg(i,1,2) + cos_sg(i,0,4) = cos_sg(1,i,1) !!! cos_sg(0,i,7) = cos_sg(i,1,6) !!! cos_sg(0,i,8) = cos_sg(i,1,7) !!! cos_sg(i,0,8) = cos_sg(1,i,9) !!! cos_sg(i,0,9) = cos_sg(1,i,6) enddo !!! cos_sg(0,0,8) = 0.5*(cos_sg(0,1,7)+cos_sg(1,0,9)) - + endif if ( nw_corner ) then do i=npy,npy+2 - sin_sg(0,i,3) = sin_sg(npy-i,npy-1,4) - cos_sg(0,i,3) = cos_sg(npy-i,npy-1,4) + sin_sg(0,i,3) = sin_sg(npy-i,npy-1,4) + cos_sg(0,i,3) = cos_sg(npy-i,npy-1,4) !!! cos_sg(0,i,7) = cos_sg(npy-i,npy-1,8) !!! cos_sg(0,i,8) = cos_sg(npy-i,npy-1,9) enddo do i=0,-2,-1 - sin_sg(i,npy,2) = sin_sg(1,npy-i,1) - cos_sg(i,npy,2) = cos_sg(1,npy-i,1) + sin_sg(i,npy,2) = sin_sg(1,npy-i,1) + cos_sg(i,npy,2) = cos_sg(1,npy-i,1) !!! cos_sg(i,npy,6) = cos_sg(1,npy-i,9) !!! cos_sg(i,npy,7) = cos_sg(1,npy-i,6) enddo @@ -604,16 +607,16 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) endif if ( se_corner ) then do j=0,-2,-1 - sin_sg(npx,j,1) = sin_sg(npx-j,1,2) - cos_sg(npx,j,1) = cos_sg(npx-j,1,2) -!!! cos_sg(npx,j,6) = cos_sg(npx-j,1,7) -!!! cos_sg(npx,j,9) = cos_sg(npx-j,1,6) + sin_sg(npx,j,1) = sin_sg(npx-j,1,2) + cos_sg(npx,j,1) = cos_sg(npx-j,1,2) +!!! cos_sg(npx,j,6) = cos_sg(npx-j,1,7) +!!! cos_sg(npx,j,9) = cos_sg(npx-j,1,6) enddo do i=npx,npx+2 - sin_sg(i,0,4) = sin_sg(npx-1,npx-i,3) - cos_sg(i,0,4) = cos_sg(npx-1,npx-i,3) -!!! cos_sg(i,0,9) = cos_sg(npx-1,npx-i,8) -!!! cos_sg(i,0,8) = cos_sg(npx-1,npx-i,7) + sin_sg(i,0,4) = sin_sg(npx-1,npx-i,3) + cos_sg(i,0,4) = cos_sg(npx-1,npx-i,3) +!!! cos_sg(i,0,9) = cos_sg(npx-1,npx-i,8) +!!! cos_sg(i,0,8) = cos_sg(npx-1,npx-i,7) enddo !!! cos_sg(npx,0,9) = 0.5*(cos_sg(npx,1,6)+cos_sg(npx-1,0,8)) endif @@ -629,7 +632,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) !!! cos_sg(npx+i,npy,7) = cos_sg(npx-1,npy+i,8) end do !!! cos_sg(npx,npy,6) = 0.5*(cos_sg(npx-1,npy,7)+cos_sg(npx,npy-1,9)) - endif + endif else sina = 1. @@ -638,9 +641,9 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) rsin2 = 1. sina_u = 1. sina_v = 1. - cosa_u = 0. - cosa_v = 0. - cosa_s = 0. + cosa_u = 0. + cosa_v = 0. + cosa_s = 0. rsin_u = 1. rsin_v = 1. endif @@ -652,16 +655,16 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! Make normal vect at face edges after consines are computed: !------------------------------------------------------------- ! for old d2a2c_vect routines - if (.not. Atm%neststruct%nested) then + if (.not. Atm%gridstruct%bounded_domain) then do j=js-1,je+1 if ( is==1 ) then i=1 - call vect_cross(ew(1,i,j,1), grid3(1,i,j+1), grid3(1,i,j)) + call vect_cross(ew(1,i,j,1), grid3(1,i,j+1), grid3(1,i,j)) call normalize_vect( ew(1,i,j,1) ) endif if ( (ie+1)==npx ) then i=npx - call vect_cross(ew(1,i,j,1), grid3(1,i,j+1), grid3(1,i,j)) + call vect_cross(ew(1,i,j,1), grid3(1,i,j+1), grid3(1,i,j)) call normalize_vect( ew(1,i,j,1) ) endif enddo @@ -669,14 +672,14 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) if ( js==1 ) then j=1 do i=is-1,ie+1 - call vect_cross(es(1,i,j,2), grid3(1,i,j),grid3(1,i+1,j)) + call vect_cross(es(1,i,j,2), grid3(1,i,j),grid3(1,i+1,j)) call normalize_vect( es(1,i,j,2) ) enddo endif if ( (je+1)==npy ) then j=npy do i=is-1,ie+1 - call vect_cross(es(1,i,j,2), grid3(1,i,j),grid3(1,i+1,j)) + call vect_cross(es(1,i,j,2), grid3(1,i,j),grid3(1,i+1,j)) call normalize_vect( es(1,i,j,2) ) enddo endif @@ -693,7 +696,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) enddo do j=js,je do i=is,ie+1 - call vect_cross(en2(1:3,i,j), grid3(1,i,j+1), grid3(1,i,j)) + call vect_cross(en2(1:3,i,j), grid3(1,i,j+1), grid3(1,i,j)) call normalize_vect( en2(1:3,i,j) ) enddo enddo @@ -701,9 +704,9 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! Make unit vectors for the coordinate extension: !------------------------------------------------------------- endif - + do j=jsd,jed+1 - if ((j==1 .OR. j==npy) .and. .not. Atm%neststruct%nested) then + if ((j==1 .OR. j==npy) .and. .not. Atm%gridstruct%bounded_domain) then do i=isd,ied divg_u(i,j) = 0.5*(sin_sg(i,j,2)+sin_sg(i,j-1,4))*dyc(i,j)/dx(i,j) del6_u(i,j) = 0.5*(sin_sg(i,j,2)+sin_sg(i,j-1,4))*dx(i,j)/dyc(i,j) @@ -720,11 +723,11 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) divg_v(i,j) = sina_u(i,j)*dxc(i,j)/dy(i,j) del6_v(i,j) = sina_u(i,j)*dy(i,j)/dxc(i,j) enddo - if (is == 1 .and. .not. Atm%neststruct%nested) then + if (is == 1 .and. .not. Atm%gridstruct%bounded_domain) then divg_v(is,j) = 0.5*(sin_sg(1,j,1)+sin_sg(0,j,3))*dxc(is,j)/dy(is,j) del6_v(is,j) = 0.5*(sin_sg(1,j,1)+sin_sg(0,j,3))*dy(is,j)/dxc(is,j) endif - if (ie+1 == npx .and. .not. Atm%neststruct%nested) then + if (ie+1 == npx .and. .not. Atm%gridstruct%bounded_domain) then divg_v(ie+1,j) = 0.5*(sin_sg(npx,j,1)+sin_sg(npx-1,j,3))*dxc(ie+1,j)/dy(ie+1,j) del6_v(ie+1,j) = 0.5*(sin_sg(npx,j,1)+sin_sg(npx-1,j,3))*dy(ie+1,j)/dxc(ie+1,j) endif @@ -733,7 +736,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! Initialize cubed_sphere to lat-lon transformation: call init_cubed_to_latlon( Atm%gridstruct, Atm%flagstruct%hydrostatic, agrid, grid_type, c2l_order, Atm%bd ) - call global_mx(area, ng, Atm%gridstruct%da_min, Atm%gridstruct%da_max, Atm%bd) + call global_mx(area, Atm%ng, Atm%gridstruct%da_min, Atm%gridstruct%da_max, Atm%bd) if( is_master() ) write(*,*) 'da_max/da_min=', Atm%gridstruct%da_max/Atm%gridstruct%da_min call global_mx_c(area_c(is:ie,js:je), is, ie, js, je, Atm%gridstruct%da_min_c, Atm%gridstruct%da_max_c) @@ -744,7 +747,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) ! Initialization for interpolation at face edges !------------------------------------------------ ! A->B scalar: - if (grid_type < 3 .and. .not. Atm%neststruct%nested) then + if (grid_type < 3 .and. .not. Atm%gridstruct%bounded_domain ) then call mpp_update_domains(divg_v, divg_u, Atm%domain, flags=SCALAR_PAIR, & gridtype=CGRID_NE_PARAM, complete=.true.) call mpp_update_domains(del6_v, del6_u, Atm%domain, flags=SCALAR_PAIR, & @@ -753,7 +756,7 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) Atm%gridstruct%edge_e, non_ortho, grid, agrid, npx, npy, Atm%bd) call efactor_a2c_v(Atm%gridstruct%edge_vect_s, Atm%gridstruct%edge_vect_n, & Atm%gridstruct%edge_vect_w, Atm%gridstruct%edge_vect_e, & - non_ortho, grid, agrid, npx, npy, Atm%neststruct%nested, Atm%bd) + non_ortho, grid, agrid, npx, npy, Atm%gridstruct%bounded_domain, Atm%bd) ! call extend_cube_s(non_ortho, grid, agrid, npx, npy, .false., Atm%neststruct%nested) ! call van2d_init(grid, agrid, npx, npy) else @@ -842,9 +845,9 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) end subroutine grid_utils_init - + subroutine grid_utils_end - + ! deallocate sst_ncep (if allocated) #ifndef DYCORE_SOLO if (allocated(sst_ncep)) deallocate( sst_ncep ) @@ -856,7 +859,7 @@ subroutine direct_transform(c, i1, i2, j1, j2, lon_p, lat_p, n, lon, lat) ! ! This is a direct transformation of the standard (symmetrical) cubic grid ! to a locally enhanced high-res grid on the sphere; it is an application -! of the Schmidt transformation at the south pole followed by a +! of the Schmidt transformation at the south pole followed by a ! pole_shift_to_target (rotation) operation ! real(kind=R_GRID), intent(in):: c ! Stretching factor @@ -886,13 +889,13 @@ subroutine direct_transform(c, i1, i2, j1, j2, lon_p, lat_p, n, lon, lat) do j=j1,j2 do i=i1,i2 if ( abs(c2m1) > 1.d-7 ) then - sin_lat = sin(lat(i,j)) + sin_lat = sin(lat(i,j)) lat_t = asin( (c2m1+c2p1*sin_lat)/(c2p1+c2m1*sin_lat) ) else ! no stretching lat_t = lat(i,j) endif - sin_lat = sin(lat_t) - cos_lat = cos(lat_t) + sin_lat = sin(lat_t) + cos_lat = cos(lat_t) sin_o = -(sin_p*sin_lat + cos_p*cos_lat*cos(lon(i,j))) if ( (1.-abs(sin_o)) < 1.d-7 ) then ! poles lon(i,j) = 0.d0 @@ -913,11 +916,75 @@ subroutine direct_transform(c, i1, i2, j1, j2, lon_p, lat_p, n, lon, lat) end subroutine direct_transform + subroutine cube_transform(c, i1, i2, j1, j2, lon_p, lat_p, n, lon, lat) +! +! This is a direct transformation of the standard (symmetrical) cubic grid +! to a locally enhanced high-res grid on the sphere; it is an application +! of the Schmidt transformation at the **north** pole followed by a +! pole_shift_to_target (rotation) operation +! + real(kind=R_GRID), intent(in):: c ! Stretching factor + real(kind=R_GRID), intent(in):: lon_p, lat_p ! center location of the target face, radian + integer, intent(in):: n ! grid face number + integer, intent(in):: i1, i2, j1, j2 +! 0 <= lon <= 2*pi ; -pi/2 <= lat <= pi/2 + real(kind=R_GRID), intent(inout), dimension(i1:i2,j1:j2):: lon, lat +! + real(f_p):: lat_t, sin_p, cos_p, sin_lat, cos_lat, sin_o, p2, two_pi + real(f_p):: c2p1, c2m1 + integer:: i, j + + p2 = 0.5d0*pi + two_pi = 2.d0*pi + + if( is_master() .and. n==1 ) then + write(*,*) n, 'Cube transformation (revised Schmidt): stretching factor=', c, ' center=', lon_p, lat_p + endif + + c2p1 = 1.d0 + c*c + c2m1 = 1.d0 - c*c + + sin_p = sin(lat_p) + cos_p = cos(lat_p) + + !Try rotating pole around before doing the regular rotation?? + + do j=j1,j2 + do i=i1,i2 + if ( abs(c2m1) > 1.d-7 ) then + sin_lat = sin(lat(i,j)) + lat_t = asin( (c2m1+c2p1*sin_lat)/(c2p1+c2m1*sin_lat) ) + else ! no stretching + lat_t = lat(i,j) + endif + sin_lat = sin(lat_t) + cos_lat = cos(lat_t) + lon(i,j) = lon(i,j) + pi ! rotate around first to get final orientation correct + sin_o = -(sin_p*sin_lat + cos_p*cos_lat*cos(lon(i,j))) + if ( (1.-abs(sin_o)) < 1.d-7 ) then ! poles + lon(i,j) = 0.d0 + lat(i,j) = sign( p2, sin_o ) + else + lat(i,j) = asin( sin_o ) + lon(i,j) = lon_p + atan2( -cos_lat*sin(lon(i,j)), & + -sin_lat*cos_p+cos_lat*sin_p*cos(lon(i,j))) + if ( lon(i,j) < 0.d0 ) then + lon(i,j) = lon(i,j) + two_pi + elseif( lon(i,j) >= two_pi ) then + lon(i,j) = lon(i,j) - two_pi + endif + endif + enddo + enddo + + end subroutine cube_transform + + real function inner_prod(v1, v2) real(kind=R_GRID),intent(in):: v1(3), v2(3) real (f_p) :: vp1(3), vp2(3), prod16 integer k - + do k=1,3 vp1(k) = real(v1(k),kind=f_p) vp2(k) = real(v2(k),kind=f_p) @@ -928,7 +995,7 @@ real function inner_prod(v1, v2) end function inner_prod - subroutine efactor_a2c_v(edge_vect_s, edge_vect_n, edge_vect_w, edge_vect_e, non_ortho, grid, agrid, npx, npy, nested, bd) + subroutine efactor_a2c_v(edge_vect_s, edge_vect_n, edge_vect_w, edge_vect_e, non_ortho, grid, agrid, npx, npy, bounded_domain, bd) ! ! Initialization of interpolation factors at face edges ! for interpolating vectors from A to C grid @@ -936,7 +1003,7 @@ subroutine efactor_a2c_v(edge_vect_s, edge_vect_n, edge_vect_w, edge_vect_e, non type(fv_grid_bounds_type), intent(IN) :: bd real(kind=R_GRID), intent(INOUT), dimension(bd%isd:bd%ied) :: edge_vect_s, edge_vect_n real(kind=R_GRID), intent(INOUT), dimension(bd%jsd:bd%jed) :: edge_vect_w, edge_vect_e - logical, intent(in):: non_ortho, nested + logical, intent(in):: non_ortho, bounded_domain real(kind=R_GRID), intent(in):: grid(bd%isd:bd%ied+1,bd%jsd:bd%jed+1,2) real(kind=R_GRID), intent(in):: agrid(bd%isd:bd%ied ,bd%jsd:bd%jed ,2) integer, intent(in):: npx, npy @@ -946,7 +1013,7 @@ subroutine efactor_a2c_v(edge_vect_s, edge_vect_n, edge_vect_w, edge_vect_e, non real(kind=R_GRID) d1, d2 integer i, j integer im2, jm2 - + integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -971,7 +1038,7 @@ subroutine efactor_a2c_v(edge_vect_s, edge_vect_n, edge_vect_w, edge_vect_e, non edge_vect_w = big_number edge_vect_e = big_number - if ( npx /= npy .and. .not. nested) call mpp_error(FATAL, 'efactor_a2c_v: npx /= npy') + if ( npx /= npy .and. .not. (bounded_domain)) call mpp_error(FATAL, 'efactor_a2c_v: npx /= npy') if ( (npx/2)*2 == npx ) call mpp_error(FATAL, 'efactor_a2c_v: npx/npy is not an odd number') im2 = (npx-1)/2 @@ -1146,7 +1213,7 @@ subroutine edge_factors(edge_s, edge_n, edge_w, edge_e, non_ortho, grid, agrid, edge_n = big_number edge_w = big_number edge_e = big_number - + ! west edge: !---------------------------------------------------------- ! p_west(j) = (1.-edge_w(j)) * p(j) + edge_w(j) * p(j-1) @@ -1246,11 +1313,11 @@ subroutine gnomonic_ed(im, lamda, theta) !----------------------------------------------------- ! Equal distance along the 4 edges of the cubed sphere !----------------------------------------------------- -! Properties: +! Properties: ! * defined by intersections of great circles ! * max(dx,dy; global) / min(dx,dy; global) = sqrt(2) = 1.4142 ! * Max(aspect ratio) = 1.06089 -! * the N-S coordinate curves are const longitude on the 4 faces with equator +! * the N-S coordinate curves are const longitude on the 4 faces with equator ! For C2000: (dx_min, dx_max) = (3.921, 5.545) in km unit ! This is the grid of choice for global cloud resolving @@ -1264,7 +1331,7 @@ subroutine gnomonic_ed(im, lamda, theta) real(f_p):: rsq3, alpha, delx, dely integer i, j, k - rsq3 = 1.d0/sqrt(3.d0) + rsq3 = 1.d0/sqrt(3.d0) alpha = asin( rsq3 ) ! Ranges: @@ -1334,16 +1401,16 @@ subroutine gnomonic_ed(im, lamda, theta) if ( is_master() ) then p1(1) = lamda(1,1); p1(2) = theta(1,1) p2(1) = lamda(2,1); p2(2) = theta(2,1) - write(*,*) 'Gird distance at face edge (km)=',great_circle_dist( p1, p2, radius ) ! earth radius is assumed + write(*,*) 'Grid distance at face edge (m)=',great_circle_dist( p1, p2, radius ) ! earth radius is assumed endif end subroutine gnomonic_ed subroutine gnomonic_ed_limited(im, in, nghost, lL, lR, uL, uR, lamda, theta) - + !This routine creates a limited-area equidistant gnomonic grid with !corners given by lL (lower-left), lR (lower-right), uL (upper-left), - !and uR (upper-right) with im by in cells. lamda and theta are the + !and uR (upper-right) with im by in cells. lamda and theta are the !latitude-longitude coordinates of the corners of the cells. !This formulation assumes the coordinates given are on the @@ -1362,8 +1429,8 @@ subroutine gnomonic_ed_limited(im, in, nghost, lL, lR, uL, uR, lamda, theta) real(kind=R_GRID) p1(2), p2(2) real(f_p):: rsq3, alpha, delx, dely integer i, j, k, irefl - - rsq3 = 1.d0/sqrt(3.d0) + + rsq3 = 1.d0/sqrt(3.d0) alpha = asin( rsq3 ) lamda(1,1) = lL(1); theta(1,1) = lL(2) @@ -1407,7 +1474,7 @@ subroutine gnomonic_ed_limited(im, in, nghost, lL, lR, uL, uR, lamda, theta) end do !Get cartesian coordinates and project onto the cube face with x = -rsq3 - + i=1 do j=1-nghost,in+1+nghost call latlon2xyz2(lamda(i,j), theta(i,j), pp(1,i,j)) @@ -1444,7 +1511,7 @@ subroutine gnomonic_ed_limited(im, in, nghost, lL, lR, uL, uR, lamda, theta) lamda(1-nghost:im+1+nghost,1-nghost:in+1+nghost), & theta(1-nghost:im+1+nghost,1-nghost:in+1+nghost)) !call cart_to_latlon( (im+1)*(in+1), pp(:,1:im+1,1:in+1), lamda(1:im+1,1:in+1), theta(1:im+1,1:in+1)) - + ! Compute great-circle-distance "resolution" along the face edge: if ( is_master() ) then p1(1) = lamda(1,1); p1(2) = theta(1,1) @@ -1474,7 +1541,7 @@ subroutine gnomonic_angl(im, lamda, theta) dp = 0.5d0*pi/real(im,kind=R_GRID) - rsq3 = 1.d0/sqrt(3.d0) + rsq3 = 1.d0/sqrt(3.d0) do k=1,im+1 do j=1,im+1 p(1,j,k) =-rsq3 ! constant @@ -1500,9 +1567,9 @@ subroutine gnomonic_dist(im, lamda, theta) ! Face-2 - rsq3 = 1.d0/sqrt(3.d0) + rsq3 = 1.d0/sqrt(3.d0) xf = -rsq3 - y0 = rsq3; dy = -2.d0*rsq3/im + y0 = rsq3; dy = -2.d0*rsq3/im z0 = -rsq3; dz = 2.d0*rsq3/im do k=1,im+1 @@ -1535,7 +1602,7 @@ subroutine symm_ed(im, lamda, theta) ip = im + 2 - i avg = 0.5d0*(lamda(i,j)-lamda(ip,j)) lamda(i, j) = avg + pi - lamda(ip,j) = pi - avg + lamda(ip,j) = pi - avg avg = 0.5d0*(theta(i,j)+theta(ip,j)) theta(i, j) = avg theta(ip,j) = avg @@ -1599,7 +1666,7 @@ end subroutine latlon2xyz subroutine mirror_xyz(p1, p2, p0, p) -! Given the "mirror" as defined by p1(x1, y1, z1), p2(x2, y2, z2), and center +! Given the "mirror" as defined by p1(x1, y1, z1), p2(x2, y2, z2), and center ! of the sphere, compute the mirror image of p0(x0, y0, z0) as p(x, y, z) !------------------------------------------------------------------------------- @@ -1607,7 +1674,7 @@ subroutine mirror_xyz(p1, p2, p0, p) ! ! p(k) = p0(k) - 2 * [p0(k) .dot. NB(k)] * NB(k) ! -! where +! where ! NB(k) = p1(k) .cross. p2(k) ---- direction of NB is imaterial ! the normal unit vector to the "mirror" plane !------------------------------------------------------------------------------- @@ -1631,12 +1698,12 @@ subroutine mirror_xyz(p1, p2, p0, p) p(k) = p0(k) - 2.d0*pdot*nb(k) enddo - end subroutine mirror_xyz + end subroutine mirror_xyz subroutine mirror_latlon(lon1, lat1, lon2, lat2, lon0, lat0, lon3, lat3) ! -! Given the "mirror" as defined by (lon1, lat1), (lon2, lat2), and center +! Given the "mirror" as defined by (lon1, lat1), (lon2, lat2), and center ! of the sphere, compute the mirror image of (lon0, lat0) as (lon3, lat3) real(kind=R_GRID), intent(in):: lon1, lat1, lon2, lat2, lon0, lat0 @@ -1697,7 +1764,7 @@ subroutine cart_to_latlon(np, q, xs, ys) if ( lon < 0.) lon = real(2.,kind=f_p)*pi + lon ! RIGHT_HAND system: lat = asin(p(3)) - + xs(i) = lon ys(i) = lat ! q Normalized: @@ -1816,7 +1883,7 @@ subroutine normalize_vect(e) integer k pdot = e(1)**2 + e(2)**2 + e(3)**2 - pdot = sqrt( pdot ) + pdot = sqrt( pdot ) do k=1,3 e(k) = e(k) / pdot @@ -1867,7 +1934,7 @@ subroutine spherical_linear_interpolation(beta, p1, p2, pb) real(kind=R_GRID):: pm(2) real(kind=R_GRID):: e1(3), e2(3), eb(3) real(kind=R_GRID):: dd, alpha, omg - + if ( abs(p1(1) - p2(1)) < 1.d-8 .and. abs(p1(2) - p2(2)) < 1.d-8) then call mpp_error(WARNING, 'spherical_linear_interpolation was passed two colocated points.') pb = p1 @@ -1878,13 +1945,13 @@ subroutine spherical_linear_interpolation(beta, p1, p2, pb) call latlon2xyz(p2, e2) dd = sqrt( e1(1)**2 + e1(2)**2 + e1(3)**2 ) - + e1(1) = e1(1) / dd e1(2) = e1(2) / dd e1(3) = e1(3) / dd dd = sqrt( e2(1)**2 + e2(2)**2 + e2(3)**2 ) - + e2(1) = e2(1) / dd e2(2) = e2(2) / dd e2(3) = e2(3) / dd @@ -1972,7 +2039,7 @@ end subroutine mid_pt_cart real function great_circle_dist( q1, q2, radius ) real(kind=R_GRID), intent(IN) :: q1(2), q2(2) real(kind=R_GRID), intent(IN), optional :: radius - + real (f_p):: p1(2), p2(2) real (f_p):: beta integer n @@ -1999,7 +2066,7 @@ function great_circle_dist_cart(v1, v2, radius) ! date: July 2006 ! ! version: 0.1 ! ! ! - ! calculate normalized great circle distance between v1 and v2 ! + ! calculate normalized great circle distance between v1 and v2 ! !------------------------------------------------------------------! real(kind=R_GRID) :: great_circle_dist_cart real(kind=R_GRID), dimension(3), intent(in) :: v1, v2 @@ -2008,7 +2075,7 @@ function great_circle_dist_cart(v1, v2, radius) norm = (v1(1)*v1(1)+v1(2)*v1(2)+v1(3)*v1(3)) & *(v2(1)*v2(1)+v2(2)*v2(2)+v2(3)*v2(3)) - + !if (norm <= 0.) print*, 'negative norm: ', norm, v1, v2 great_circle_dist_cart=(v1(1)*v2(1)+v1(2)*v2(2)+v1(3)*v2(3)) & @@ -2109,7 +2176,7 @@ subroutine check_local(x1,x2,local) dx(:)=x1(:)-x2(:) dist=dx(1)*dx(1)+dx(2)*dx(2)+dx(3)*dx(3) - + dx(:)=x1(:)-x_inter(:) dist1=dx(1)*dx(1)+dx(2)*dx(2)+dx(3)*dx(3) dx(:)=x2(:)-x_inter(:) @@ -2120,7 +2187,7 @@ subroutine check_local(x1,x2,local) else local=.false. endif - + end subroutine check_local !------------------------------------------------------------------! end subroutine intersect @@ -2150,7 +2217,7 @@ subroutine intersect_cross(a1,a2,b1,b2,radius,x_inter,local_a,local_b) ! vector v1, which is the cross product of any two vectors lying ! in the plane; here, we use position vectors, which are unit ! vectors lying in the plane and rooted at the center of the - ! sphere. + ! sphere. !The intersection of two great circles is where the the ! intersection of the planes, a line, itself intersects the ! sphere. Since the planes are defined by perpendicular vectors @@ -2168,7 +2235,7 @@ subroutine intersect_cross(a1,a2,b1,b2,radius,x_inter,local_a,local_b) !Normalize x_inter = x_inter/sqrt(x_inter(1)**2 + x_inter(2)**2 + x_inter(3)**2) - ! check if intersection is between pairs of points on sphere + ! check if intersection is between pairs of points on sphere call get_nearest() call check_local(a1,a2,local_a) call check_local(b1,b2,local_b) @@ -2197,7 +2264,7 @@ subroutine check_local(x1,x2,local) dx(:)=x1(:)-x2(:) dist=dx(1)*dx(1)+dx(2)*dx(2)+dx(3)*dx(3) - + dx(:)=x1(:)-x_inter(:) dist1=dx(1)*dx(1)+dx(2)*dx(2)+dx(3)*dx(3) dx(:)=x2(:)-x_inter(:) @@ -2208,7 +2275,7 @@ subroutine check_local(x1,x2,local) else local=.false. endif - + end subroutine check_local !------------------------------------------------------------------! end subroutine intersect_cross @@ -2315,8 +2382,8 @@ subroutine init_cubed_to_latlon( gridstruct, hydrostatic, agrid, grid_type, ord, end subroutine init_cubed_to_latlon - subroutine cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, nested, c2l_ord, bd) - type(fv_grid_bounds_type), intent(IN) :: bd + subroutine cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, bounded_domain, c2l_ord, bd) + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in) :: km, npx, npy, grid_type, c2l_ord integer, intent(in) :: mode ! update if present type(fv_grid_type), intent(IN) :: gridstruct @@ -2325,18 +2392,18 @@ subroutine cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_ty real, intent(out):: ua(bd%isd:bd%ied, bd%jsd:bd%jed,km) real, intent(out):: va(bd%isd:bd%ied, bd%jsd:bd%jed,km) type(domain2d), intent(INOUT) :: domain - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain if ( c2l_ord == 2 ) then call c2l_ord2(u, v, ua, va, gridstruct, km, grid_type, bd, .false.) else - call c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, nested, mode, bd) + call c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, bounded_domain, mode, bd) endif end subroutine cubed_to_latlon - subroutine c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, nested, mode, bd) + subroutine c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, bounded_domain, mode, bd) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in) :: km, npx, npy, grid_type @@ -2347,8 +2414,8 @@ subroutine c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, n real, intent(out):: ua(bd%isd:bd%ied, bd%jsd:bd%jed,km) real, intent(out):: va(bd%isd:bd%ied, bd%jsd:bd%jed,km) type(domain2d), intent(INOUT) :: domain - logical, intent(IN) :: nested -! Local + logical, intent(IN) :: bounded_domain +! Local ! 4-pt Lagrange interpolation real :: a1 = 0.5625 real :: a2 = -0.0625 @@ -2374,12 +2441,12 @@ subroutine c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, n call timing_off('COMM_TOTAL') endif -!$OMP parallel do default(none) shared(is,ie,js,je,km,npx,npy,grid_type,nested,c2,c1, & +!$OMP parallel do default(none) shared(is,ie,js,je,km,npx,npy,grid_type,bounded_domain,c2,c1, & !$OMP u,v,gridstruct,ua,va,a1,a2) & !$OMP private(utmp, vtmp, wu, wv) do k=1,km if ( grid_type < 4 ) then - if (nested) then + if (bounded_domain) then do j=max(1,js),min(npy-1,je) do i=max(1,is),min(npx-1,ie) utmp(i,j) = c2*(u(i,j-1,k)+u(i,j+2,k)) + c1*(u(i,j,k)+u(i,j+1,k)) @@ -2455,7 +2522,7 @@ subroutine c2l_ord4(u, v, ua, va, gridstruct, npx, npy, km, grid_type, domain, n enddo endif - endif !nested + endif !bounded_domain !Transform local a-grid winds into latitude-longitude coordinates do j=js,je @@ -2487,7 +2554,7 @@ subroutine c2l_ord2(u, v, ua, va, gridstruct, km, grid_type, bd, do_halo) real, intent(out):: ua(bd%isd:bd%ied, bd%jsd:bd%jed,km) real, intent(out):: va(bd%isd:bd%ied, bd%jsd:bd%jed,km) !-------------------------------------------------------------- -! Local +! Local real wu(bd%is-1:bd%ie+1, bd%js-1:bd%je+2) real wv(bd%is-1:bd%ie+2, bd%js-1:bd%je+1) real u1(bd%is-1:bd%ie+1), v1(bd%is-1:bd%ie+1) @@ -2595,12 +2662,12 @@ subroutine expand_cell(q1, q2, q3, q4, a1, a2, a3, a4, fac) ec(k) = ec(k) / dd ! cell center position enddo -! Perform the "extrapolation" in 3D (x-y-z) +! Perform the "extrapolation" in 3D (x-y-z) do k=1,3 - qq1(k) = ec(k) + fac*(p1(k)-ec(k)) - qq2(k) = ec(k) + fac*(p2(k)-ec(k)) - qq3(k) = ec(k) + fac*(p3(k)-ec(k)) - qq4(k) = ec(k) + fac*(p4(k)-ec(k)) + qq1(k) = ec(k) + fac*(p1(k)-ec(k)) + qq2(k) = ec(k) + fac*(p2(k)-ec(k)) + qq3(k) = ec(k) + fac*(p3(k)-ec(k)) + qq4(k) = ec(k) + fac*(p4(k)-ec(k)) enddo !-------------------------------------------------------- @@ -2768,7 +2835,7 @@ end function dist2side_latlon real(kind=R_GRID) function spherical_angle(p1, p2, p3) - + ! p3 ! / ! / @@ -2795,13 +2862,13 @@ real(kind=R_GRID) function spherical_angle(p1, p2, p3) ! Page 41, Silverman's book on Vector Algebra; spherical trigonmetry !------------------------------------------------------------------- ! Vector P: - px = e1(2)*e2(3) - e1(3)*e2(2) - py = e1(3)*e2(1) - e1(1)*e2(3) - pz = e1(1)*e2(2) - e1(2)*e2(1) + px = e1(2)*e2(3) - e1(3)*e2(2) + py = e1(3)*e2(1) - e1(1)*e2(3) + pz = e1(1)*e2(2) - e1(2)*e2(1) ! Vector Q: - qx = e1(2)*e3(3) - e1(3)*e3(2) - qy = e1(3)*e3(1) - e1(1)*e3(3) - qz = e1(1)*e3(2) - e1(2)*e3(1) + qx = e1(2)*e3(3) - e1(3)*e3(2) + qy = e1(3)*e3(1) - e1(1)*e3(3) + qz = e1(1)*e3(2) - e1(2)*e3(1) ddd = (px*px+py*py+pz*pz)*(qx*qx+qy*qy+qz*qz) @@ -2815,7 +2882,7 @@ real(kind=R_GRID) function spherical_angle(p1, p2, p3) if (ddd < 0.d0) then angle = 4.d0*atan(1.0d0) !should be pi else - angle = 0.d0 + angle = 0.d0 end if else angle = acos( ddd ) @@ -2830,9 +2897,9 @@ end function spherical_angle real(kind=R_GRID) function cos_angle(p1, p2, p3) ! As spherical_angle, but returns the cos(angle) ! p3 -! ^ -! | -! | +! ^ +! | +! | ! p1 ---> p2 ! real(kind=R_GRID), intent(in):: p1(3), p2(3), p3(3) @@ -2853,19 +2920,19 @@ real(kind=R_GRID) function cos_angle(p1, p2, p3) ! Page 41, Silverman's book on Vector Algebra; spherical trigonmetry !------------------------------------------------------------------- ! Vector P:= e1 X e2 - px = e1(2)*e2(3) - e1(3)*e2(2) - py = e1(3)*e2(1) - e1(1)*e2(3) - pz = e1(1)*e2(2) - e1(2)*e2(1) + px = e1(2)*e2(3) - e1(3)*e2(2) + py = e1(3)*e2(1) - e1(1)*e2(3) + pz = e1(1)*e2(2) - e1(2)*e2(1) ! Vector Q: e1 X e3 - qx = e1(2)*e3(3) - e1(3)*e3(2) - qy = e1(3)*e3(1) - e1(1)*e3(3) - qz = e1(1)*e3(2) - e1(2)*e3(1) + qx = e1(2)*e3(3) - e1(3)*e3(2) + qy = e1(3)*e3(1) - e1(1)*e3(3) + qz = e1(1)*e3(2) - e1(2)*e3(1) ! ddd = sqrt[ (P*P) (Q*Q) ] ddd = sqrt( (px**2+py**2+pz**2)*(qx**2+qy**2+qz**2) ) if ( ddd > 0.d0 ) then - angle = (px*qx+py*qy+pz*qz) / ddd + angle = (px*qx+py*qy+pz*qz) / ddd else angle = 1.d0 endif @@ -2876,7 +2943,7 @@ end function cos_angle real function g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, reproduce) -! Fast version of globalsum +! Fast version of globalsum integer, intent(IN) :: ifirst, ilast integer, intent(IN) :: jfirst, jlast, ngc integer, intent(IN) :: mode ! if ==1 divided by area @@ -2888,14 +2955,14 @@ real function g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, re real gsum logical, SAVE :: g_sum_initialized = .false. real(kind=R_GRID), SAVE :: global_area - real :: tmp(ifirst:ilast,jfirst:jlast) - + real :: tmp(ifirst:ilast,jfirst:jlast) + if ( .not. g_sum_initialized ) then global_area = mpp_global_sum(domain, area, flags=BITWISE_EFP_SUM) if ( is_master() ) write(*,*) 'Global Area=',global_area g_sum_initialized = .true. end if - + !------------------------- ! FMS global sum algorithm: !------------------------- @@ -2935,7 +3002,7 @@ real function global_qsum(p, ifirst, ilast, jfirst, jlast) real, intent(IN) :: p(ifirst:ilast,jfirst:jlast) ! field to be summed integer :: i,j real gsum - + gsum = 0. do j=jfirst,jlast do i=ifirst,ilast @@ -3018,7 +3085,7 @@ subroutine fill_ghost_r4(q, npx, npy, value, bd) ied = bd%ied jsd = bd%jsd jed = bd%jed - + do j=jsd,jed do i=isd,ied if ( (i<1 .and. j<1) ) then @@ -3058,7 +3125,7 @@ subroutine fill_ghost_r8(q, npx, npy, value, bd) ied = bd%ied jsd = bd%jsd jed = bd%jed - + do j=jsd,jed do i=isd,ied if ( (i<1 .and. j<1) ) then @@ -3094,12 +3161,13 @@ subroutine make_eta_level(km, pe, area, kks, ak, bk, ptop, domain, bd) real, allocatable:: pem(:,:) real(kind=4) :: p4 integer k, i, j - integer :: is, ie, js, je + integer :: is, ie, js, je, ng is = bd%is ie = bd%ie js = bd%js je = bd%je + ng = bd%ng allocate ( pem(is:ie,js:je) ) @@ -3119,7 +3187,7 @@ subroutine make_eta_level(km, pe, area, kks, ak, bk, ptop, domain, bd) ptop = ph(1) do j=js-1,je+1 do i=is-1,ie+1 - pe(i,1,j) = ptop + pe(i,1,j) = ptop enddo enddo @@ -3155,7 +3223,7 @@ subroutine invert_matrix(n, a, x) real(kind=R_GRID), intent (out), dimension (n,n):: x ! inverted maxtrix real(kind=R_GRID), dimension (n,n) :: b integer indx(n) - + do i = 1, n do j = 1, n b(i,j) = 0.0 @@ -3165,9 +3233,9 @@ subroutine invert_matrix(n, a, x) do i = 1, n b(i,i) = 1.0 end do - + call elgs (a,n,indx) - + do i = 1, n-1 do j = i+1, n do k = 1, n @@ -3175,7 +3243,7 @@ subroutine invert_matrix(n, a, x) end do end do end do - + do i = 1, n x(n,i) = b(indx(n),i)/a(indx(n),n) do j = n-1, 1, -1 @@ -3188,7 +3256,7 @@ subroutine invert_matrix(n, a, x) end do end subroutine invert_matrix - + subroutine elgs (a,n,indx) @@ -3197,7 +3265,7 @@ subroutine elgs (a,n,indx) ! a(n,n) is the original matrix in the input and transformed matrix ! plus the pivoting element ratios below the diagonal in the output. !------------------------------------------------------------------ - + integer, intent (in) :: n integer :: i,j,k,itmp integer, intent (out), dimension (n) :: indx @@ -3205,7 +3273,7 @@ subroutine elgs (a,n,indx) ! real(kind=R_GRID) :: c1, pie, pi1, pj real(kind=R_GRID), dimension (n) :: c - + do i = 1, n indx(i) = i end do @@ -3251,7 +3319,7 @@ subroutine elgs (a,n,indx) end do end do end do - + end subroutine elgs subroutine get_latlon_vector(pp, elon, elat) @@ -3270,8 +3338,8 @@ subroutine get_latlon_vector(pp, elon, elat) end subroutine get_latlon_vector - - + + subroutine project_sphere_v( np, f, e ) !--------------------------------- @@ -3291,6 +3359,337 @@ subroutine project_sphere_v( np, f, e ) end subroutine project_sphere_v + subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) + +! Purpose; Transform wind tendencies on A grid to D grid for the final update + + integer, intent(in):: is, ie, js, je + integer, intent(in):: isd, ied, jsd, jed + integer, intent(IN) :: npx,npy, npz + real, intent(in):: dt + real, intent(inout):: u(isd:ied, jsd:jed+1,npz) + real, intent(inout):: v(isd:ied+1,jsd:jed ,npz) + real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt + type(fv_grid_type), intent(IN), target :: gridstruct + type(domain2d), intent(INOUT) :: domain + +! local: + real v3(is-1:ie+1,js-1:je+1,3) + real ue(is-1:ie+1,js:je+1,3) ! 3D winds at edges + real ve(is:ie+1,js-1:je+1, 3) ! 3D winds at edges + real, dimension(is:ie):: ut1, ut2, ut3 + real, dimension(js:je):: vt1, vt2, vt3 + real dt5, gratio + integer i, j, k, m, im2, jm2 + + real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat + real(kind=R_GRID), pointer, dimension(:,:,:,:) :: es, ew + real(kind=R_GRID), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n + + es => gridstruct%es + ew => gridstruct%ew + vlon => gridstruct%vlon + vlat => gridstruct%vlat + + edge_vect_w => gridstruct%edge_vect_w + edge_vect_e => gridstruct%edge_vect_e + edge_vect_s => gridstruct%edge_vect_s + edge_vect_n => gridstruct%edge_vect_n + + dt5 = 0.5 * dt + im2 = (npx-1)/2 + jm2 = (npy-1)/2 + +!$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,u,dt5,u_dt,v,v_dt, & +!$OMP vlon,vlat,jm2,edge_vect_w,npx,edge_vect_e,im2, & +!$OMP edge_vect_s,npy,edge_vect_n,es,ew) & +!$OMP private(ut1, ut2, ut3, vt1, vt2, vt3, ue, ve, v3) + do k=1, npz + + if ( gridstruct%grid_type > 3 ) then ! Local & one tile configurations + + do j=js,je+1 + do i=is,ie + u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k) + u_dt(i,j,k)) + enddo + enddo + do j=js,je + do i=is,ie+1 + v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k) + v_dt(i,j,k)) + enddo + enddo + + else +! Compute 3D wind tendency on A grid + do j=js-1,je+1 + do i=is-1,ie+1 + v3(i,j,1) = u_dt(i,j,k)*vlon(i,j,1) + v_dt(i,j,k)*vlat(i,j,1) + v3(i,j,2) = u_dt(i,j,k)*vlon(i,j,2) + v_dt(i,j,k)*vlat(i,j,2) + v3(i,j,3) = u_dt(i,j,k)*vlon(i,j,3) + v_dt(i,j,k)*vlat(i,j,3) + enddo + enddo + +! Interpolate to cell edges + do j=js,je+1 + do i=is-1,ie+1 + ue(i,j,1) = v3(i,j-1,1) + v3(i,j,1) + ue(i,j,2) = v3(i,j-1,2) + v3(i,j,2) + ue(i,j,3) = v3(i,j-1,3) + v3(i,j,3) + enddo + enddo + + do j=js-1,je+1 + do i=is,ie+1 + ve(i,j,1) = v3(i-1,j,1) + v3(i,j,1) + ve(i,j,2) = v3(i-1,j,2) + v3(i,j,2) + ve(i,j,3) = v3(i-1,j,3) + v3(i,j,3) + enddo + enddo + +! --- E_W edges (for v-wind): + if ( is==1 .and. .not. gridstruct%bounded_domain ) then + i = 1 + do j=js,je + if ( j>jm2 ) then + vt1(j) = edge_vect_w(j)*ve(i,j-1,1)+(1.-edge_vect_w(j))*ve(i,j,1) + vt2(j) = edge_vect_w(j)*ve(i,j-1,2)+(1.-edge_vect_w(j))*ve(i,j,2) + vt3(j) = edge_vect_w(j)*ve(i,j-1,3)+(1.-edge_vect_w(j))*ve(i,j,3) + else + vt1(j) = edge_vect_w(j)*ve(i,j+1,1)+(1.-edge_vect_w(j))*ve(i,j,1) + vt2(j) = edge_vect_w(j)*ve(i,j+1,2)+(1.-edge_vect_w(j))*ve(i,j,2) + vt3(j) = edge_vect_w(j)*ve(i,j+1,3)+(1.-edge_vect_w(j))*ve(i,j,3) + endif + enddo + do j=js,je + ve(i,j,1) = vt1(j) + ve(i,j,2) = vt2(j) + ve(i,j,3) = vt3(j) + enddo + endif + if ( (ie+1)==npx .and. .not. gridstruct%bounded_domain ) then + i = npx + do j=js,je + if ( j>jm2 ) then + vt1(j) = edge_vect_e(j)*ve(i,j-1,1)+(1.-edge_vect_e(j))*ve(i,j,1) + vt2(j) = edge_vect_e(j)*ve(i,j-1,2)+(1.-edge_vect_e(j))*ve(i,j,2) + vt3(j) = edge_vect_e(j)*ve(i,j-1,3)+(1.-edge_vect_e(j))*ve(i,j,3) + else + vt1(j) = edge_vect_e(j)*ve(i,j+1,1)+(1.-edge_vect_e(j))*ve(i,j,1) + vt2(j) = edge_vect_e(j)*ve(i,j+1,2)+(1.-edge_vect_e(j))*ve(i,j,2) + vt3(j) = edge_vect_e(j)*ve(i,j+1,3)+(1.-edge_vect_e(j))*ve(i,j,3) + endif + enddo + do j=js,je + ve(i,j,1) = vt1(j) + ve(i,j,2) = vt2(j) + ve(i,j,3) = vt3(j) + enddo + endif +! N-S edges (for u-wind): + if ( js==1 .and. .not. gridstruct%bounded_domain) then + j = 1 + do i=is,ie + if ( i>im2 ) then + ut1(i) = edge_vect_s(i)*ue(i-1,j,1)+(1.-edge_vect_s(i))*ue(i,j,1) + ut2(i) = edge_vect_s(i)*ue(i-1,j,2)+(1.-edge_vect_s(i))*ue(i,j,2) + ut3(i) = edge_vect_s(i)*ue(i-1,j,3)+(1.-edge_vect_s(i))*ue(i,j,3) + else + ut1(i) = edge_vect_s(i)*ue(i+1,j,1)+(1.-edge_vect_s(i))*ue(i,j,1) + ut2(i) = edge_vect_s(i)*ue(i+1,j,2)+(1.-edge_vect_s(i))*ue(i,j,2) + ut3(i) = edge_vect_s(i)*ue(i+1,j,3)+(1.-edge_vect_s(i))*ue(i,j,3) + endif + enddo + do i=is,ie + ue(i,j,1) = ut1(i) + ue(i,j,2) = ut2(i) + ue(i,j,3) = ut3(i) + enddo + endif + if ( (je+1)==npy .and. .not. gridstruct%bounded_domain) then + j = npy + do i=is,ie + if ( i>im2 ) then + ut1(i) = edge_vect_n(i)*ue(i-1,j,1)+(1.-edge_vect_n(i))*ue(i,j,1) + ut2(i) = edge_vect_n(i)*ue(i-1,j,2)+(1.-edge_vect_n(i))*ue(i,j,2) + ut3(i) = edge_vect_n(i)*ue(i-1,j,3)+(1.-edge_vect_n(i))*ue(i,j,3) + else + ut1(i) = edge_vect_n(i)*ue(i+1,j,1)+(1.-edge_vect_n(i))*ue(i,j,1) + ut2(i) = edge_vect_n(i)*ue(i+1,j,2)+(1.-edge_vect_n(i))*ue(i,j,2) + ut3(i) = edge_vect_n(i)*ue(i+1,j,3)+(1.-edge_vect_n(i))*ue(i,j,3) + endif + enddo + do i=is,ie + ue(i,j,1) = ut1(i) + ue(i,j,2) = ut2(i) + ue(i,j,3) = ut3(i) + enddo + endif + do j=js,je+1 + do i=is,ie + u(i,j,k) = u(i,j,k) + dt5*( ue(i,j,1)*es(1,i,j,1) + & + ue(i,j,2)*es(2,i,j,1) + & + ue(i,j,3)*es(3,i,j,1) ) + enddo + enddo + do j=js,je + do i=is,ie+1 + v(i,j,k) = v(i,j,k) + dt5*( ve(i,j,1)*ew(1,i,j,2) + & + ve(i,j,2)*ew(2,i,j,2) + & + ve(i,j,3)*ew(3,i,j,2) ) + enddo + enddo +! Update: + endif ! end grid_type + + enddo ! k-loop + + end subroutine update_dwinds_phys + + + subroutine update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) + +! Purpose; Transform wind tendencies on A grid to D grid for the final update + + integer, intent(in):: is, ie, js, je + integer, intent(in):: isd, ied, jsd, jed + real, intent(in):: dt + real, intent(inout):: u(isd:ied, jsd:jed+1,npz) + real, intent(inout):: v(isd:ied+1,jsd:jed ,npz) + real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt + type(fv_grid_type), intent(IN), target :: gridstruct + integer, intent(IN) :: npx,npy, npz + type(domain2d), intent(INOUT) :: domain + +! local: + real ut(isd:ied,jsd:jed) + real:: dt5, gratio + integer i, j, k + + real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat + real(kind=R_GRID), pointer, dimension(:,:,:,:) :: es, ew + real(kind=R_GRID), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n + real, pointer, dimension(:,:) :: z11, z12, z21, z22, dya, dxa + + es => gridstruct%es + ew => gridstruct%ew + vlon => gridstruct%vlon + vlat => gridstruct%vlat + + edge_vect_w => gridstruct%edge_vect_w + edge_vect_e => gridstruct%edge_vect_e + edge_vect_s => gridstruct%edge_vect_s + edge_vect_n => gridstruct%edge_vect_n + + z11 => gridstruct%z11 + z21 => gridstruct%z21 + z12 => gridstruct%z12 + z22 => gridstruct%z22 + + dxa => gridstruct%dxa + dya => gridstruct%dya + +! Transform wind tendency on A grid to local "co-variant" components: + +!$OMP parallel do default(none) shared(is,ie,js,je,npz,z11,u_dt,z12,v_dt,z21,z22) & +!$OMP private(ut) + do k=1,npz + do j=js,je + do i=is,ie + ut(i,j) = z11(i,j)*u_dt(i,j,k) + z12(i,j)*v_dt(i,j,k) + v_dt(i,j,k) = z21(i,j)*u_dt(i,j,k) + z22(i,j)*v_dt(i,j,k) + u_dt(i,j,k) = ut(i,j) + enddo + enddo + enddo +! (u_dt,v_dt) are now on local coordinate system + call timing_on('COMM_TOTAL') + call mpp_update_domains(u_dt, v_dt, domain, gridtype=AGRID_PARAM) + call timing_off('COMM_TOTAL') + + dt5 = 0.5 * dt + +!$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,u,dt5,u_dt,v,v_dt, & +!$OMP dya,npy,dxa,npx) & +!$OMP private(gratio) + do k=1, npz + + if ( gridstruct%grid_type > 3 .or. gridstruct%bounded_domain) then ! Local & one tile configurations + + do j=js,je+1 + do i=is,ie + u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k) + u_dt(i,j,k)) + enddo + enddo + do j=js,je + do i=is,ie+1 + v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k) + v_dt(i,j,k)) + enddo + enddo + + else + +!-------- +! u-wind +!-------- +! Edges: + if ( js==1 ) then + do i=is,ie + gratio = dya(i,2) / dya(i,1) + u(i,1,k) = u(i,1,k) + dt5*((2.+gratio)*(u_dt(i,0,k)+u_dt(i,1,k)) & + -(u_dt(i,-1,k)+u_dt(i,2,k)))/(1.+gratio) + enddo + endif + +! Interior + do j=max(2,js),min(npy-1,je+1) + do i=is,ie + u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k)+u_dt(i,j,k)) + enddo + enddo + + if ( (je+1)==npy ) then + do i=is,ie + gratio = dya(i,npy-2) / dya(i,npy-1) + u(i,npy,k) = u(i,npy,k) + dt5*((2.+gratio)*(u_dt(i,npy-1,k)+u_dt(i,npy,k)) & + -(u_dt(i,npy-2,k)+u_dt(i,npy+1,k)))/(1.+gratio) + enddo + endif + +!-------- +! v-wind +!-------- +! West Edges: + if ( is==1 ) then + do j=js,je + gratio = dxa(2,j) / dxa(1,j) + v(1,j,k) = v(1,j,k) + dt5*((2.+gratio)*(v_dt(0,j,k)+v_dt(1,j,k)) & + -(v_dt(-1,j,k)+v_dt(2,j,k)))/(1.+gratio) + enddo + endif + +! Interior + do j=js,je + do i=max(2,is),min(npx-1,ie+1) + v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k)+v_dt(i,j,k)) + enddo + enddo + +! East Edges: + if ( (ie+1)==npx ) then + do j=js,je + gratio = dxa(npx-2,j) / dxa(npx-1,j) + v(npx,j,k) = v(npx,j,k) + dt5*((2.+gratio)*(v_dt(npx-1,j,k)+v_dt(npx,j,k)) & + -(v_dt(npx-2,j,k)+v_dt(npx+1,j,k)))/(1.+gratio) + enddo + endif + + endif ! end grid_type + + enddo ! k-loop + + end subroutine update2d_dwinds_phys + + #ifdef TO_DO_MQ subroutine init_mq(phis, gridstruct, npx, npy, is, ie, js, je, ng) integer, intent(in):: npx, npy, is, ie, js, je, ng diff --git a/model/fv_mapz.F90 b/model/fv_mapz.F90 index 0b750dda3..168b1dcd0 100644 --- a/model/fv_mapz.F90 +++ b/model/fv_mapz.F90 @@ -26,17 +26,17 @@ module fv_mapz_mod use constants_mod, only: radius, pi=>pi_8, rvgas, rdgas, grav, hlv, hlf, cp_air, cp_vapor use tracer_manager_mod,only: get_tracer_index use field_manager_mod, only: MODEL_ATMOS - use fv_grid_utils_mod, only: g_sum, ptop_min + use fv_grid_utils_mod, only: g_sum, ptop_min, cubed_to_latlon, update_dwinds_phys use fv_fill_mod, only: fillz use mpp_domains_mod, only: mpp_update_domains, domain2d - use mpp_mod, only: FATAL, mpp_error, get_unit, mpp_root_pe, mpp_pe - use fv_arrays_mod, only: fv_grid_type + use mpp_mod, only: NOTE, mpp_error, get_unit, mpp_root_pe, mpp_pe + use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type, R_GRID use fv_timing_mod, only: timing_on, timing_off - use fv_mp_mod, only: is_master + use fv_mp_mod, only: is_master, mp_reduce_min, mp_reduce_max use fv_cmp_mod, only: qs_init, fv_sat_adj implicit none - real, parameter:: consv_min= 0.001 ! below which no correction applies + real, parameter:: consv_min = 0.001 ! below which no correction applies real, parameter:: t_min= 184. ! below which applies stricter constraint real, parameter:: r3 = 1./3., r23 = 2./3., r12 = 1./12. real, parameter:: cv_vap = 3.*rvgas ! 1384.5 @@ -48,28 +48,32 @@ module fv_mapz_mod real, parameter:: cp_vap = cp_vapor ! 1846. real, parameter:: tice = 273.16 + real, parameter :: w_max = 60. + real, parameter :: w_min = -30. + logical, parameter :: w_limiter = .false. ! doesn't work so well?? + real(kind=4) :: E_Flux = 0. private public compute_total_energy, Lagrangian_to_Eulerian, moist_cv, moist_cp, & - rst_remap, mappm, E_Flux - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' + rst_remap, mappm, E_Flux, remap_2d contains subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & - mdt, pdt, km, is,ie,js,je, isd,ied,jsd,jed, & + mdt, pdt, npx, npy, km, is,ie,js,je, isd,ied,jsd,jed, & nq, nwat, sphum, q_con, u, v, w, delz, pt, q, hs, r_vir, cp, & akap, cappa, kord_mt, kord_wz, kord_tr, kord_tm, peln, te0_2d, & ng, ua, va, omga, te, ws, fill, reproduce_sum, out_dt, dtdt, & ptop, ak, bk, pfull, gridstruct, domain, do_sat_adj, & - hydrostatic, hybrid_z, do_omega, adiabatic, do_adiabatic_init) + hydrostatic, hybrid_z, do_omega, adiabatic, do_adiabatic_init, & + c2l_ord, bd, fv_debug, & + moist_phys) logical, intent(in):: last_step + logical, intent(in):: fv_debug real, intent(in):: mdt ! remap time step real, intent(in):: pdt ! phys time step + integer, intent(in):: npx, npy integer, intent(in):: km integer, intent(in):: nq ! number of tracers (including h2o) integer, intent(in):: nwat @@ -81,6 +85,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & integer, intent(in):: kord_wz ! Mapping order/option for w integer, intent(in):: kord_tr(nq) ! Mapping order for tracers integer, intent(in):: kord_tm ! Mapping order for thermodynamics + integer, intent(in):: c2l_ord real, intent(in):: consv ! factor for TE conservation real, intent(in):: r_vir @@ -100,6 +105,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & real, intent(in):: pfull(km) type(fv_grid_type), intent(IN), target :: gridstruct type(domain2d), intent(INOUT) :: domain + type(fv_grid_bounds_type), intent(IN) :: bd ! !INPUT/OUTPUT real, intent(inout):: pk(is:ie,js:je,km+1) ! pe to the kappa @@ -112,12 +118,14 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & real, intent(inout):: u(isd:ied ,jsd:jed+1,km) ! u-wind (m/s) real, intent(inout):: v(isd:ied+1,jsd:jed ,km) ! v-wind (m/s) real, intent(inout):: w(isd: ,jsd: ,1:) ! vertical velocity (m/s) - real, intent(inout):: pt(isd:ied ,jsd:jed ,km) ! cp*virtual potential temperature + real, intent(inout):: pt(isd:ied ,jsd:jed ,km) ! cp*virtual potential temperature ! as input; output: temperature - real, intent(inout), dimension(isd:,jsd:,1:)::delz, q_con, cappa + real, intent(inout), dimension(isd:,jsd:,1:)::q_con, cappa + real, intent(inout), dimension(is:,js:,1:)::delz logical, intent(in):: hydrostatic logical, intent(in):: hybrid_z logical, intent(in):: out_dt + logical, intent(in):: moist_phys real, intent(inout):: ua(isd:ied,jsd:jed,km) ! u-wind (m/s) on physics grid real, intent(inout):: va(isd:ied,jsd:jed,km) ! v-wind (m/s) on physics grid @@ -127,21 +135,27 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & real, intent(out):: pkz(is:ie,js:je,km) ! layer-mean pk for converting t to pt real, intent(out):: te(isd:ied,jsd:jed,km) + ! !DESCRIPTION: ! ! !REVISION HISTORY: ! SJL 03.11.04: Initial version for partial remapping ! !----------------------------------------------------------------------- + real, allocatable, dimension(:,:,:) :: dp0, u0, v0 + real, allocatable, dimension(:,:,:) :: u_dt, v_dt real, dimension(is:ie,js:je):: te_2d, zsum0, zsum1, dpln - real, dimension(is:ie,km) :: q2, dp2 + real, dimension(is:ie,km) :: q2, dp2, t0, w2 real, dimension(is:ie,km+1):: pe1, pe2, pk1, pk2, pn2, phis + real, dimension(isd:ied,jsd:jed,km):: pe4 real, dimension(is:ie+1,km+1):: pe0, pe3 - real, dimension(is:ie):: gz, cvm, qv - real rcp, rg, tmp, tpe, rrg, bkh, dtmp, k1k + real, dimension(is:ie):: gsize, gz, cvm, qv + + real rcp, rg, rrg, bkh, dtmp, k1k logical:: fast_mp_consv - integer:: i,j,k + integer:: i,j,k integer:: nt, liq_wat, ice_wat, rainwat, snowwat, cld_amt, graupel, iq, n, kmp, kp, k_next + integer:: ccn_cm3 k1k = rdgas/cv_air ! akap / (1.-akap) = rg/Cv=0.4 rg = rdgas @@ -154,8 +168,9 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') graupel = get_tracer_index (MODEL_ATMOS, 'graupel') cld_amt = get_tracer_index (MODEL_ATMOS, 'cld_amt') + ccn_cm3 = get_tracer_index (MODEL_ATMOS, 'ccn_cm3') - if ( do_sat_adj ) then + if ( do_adiabatic_init .or. do_sat_adj ) then fast_mp_consv = (.not.do_adiabatic_init) .and. consv>consv_min do k=1,km kmp = k @@ -169,9 +184,9 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & !$OMP graupel,q_con,sphum,cappa,r_vir,rcp,k1k,delp, & !$OMP delz,akap,pkz,te,u,v,ps, gridstruct, last_step, & !$OMP ak,bk,nq,isd,ied,jsd,jed,kord_tr,fill, adiabatic, & -!$OMP hs,w,ws,kord_wz,do_omega,omga,rrg,kord_mt,ua) & +!$OMP hs,w,ws,kord_wz,do_omega,omga,rrg,kord_mt,pe4) & !$OMP private(qv,gz,cvm,kp,k_next,bkh,dp2, & -!$OMP pe0,pe1,pe2,pe3,pk1,pk2,pn2,phis,q2) +!$OMP pe0,pe1,pe2,pe3,pk1,pk2,pn2,phis,q2,w2) do 1000 j=js,je+1 do k=1,km+1 @@ -199,15 +214,6 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & ! Transform "density pt" to "density temp" do k=1,km #ifdef MOIST_CAPPA - if ( nwat==2 ) then - do i=is,ie - qv(i) = max(0., q(i,j,k,sphum)) - q_con(i,j,k) = max(0., q(i,j,k,liq_wat)) - cvm(i) = (1.-qv(i))*cv_air + qv(i)*cv_vap - cappa(i,j,k) = rdgas / ( rdgas + cvm(i)/(1.+r_vir*qv(i)) ) - pt(i,j,k) = pt(i,j,k)*exp(cappa(i,j,k)/(1.-cappa(i,j,k))*log(rrg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) - enddo - else call moist_cv(is,ie,isd,ied,jsd,jed, km, j, k, nwat, sphum, liq_wat, rainwat, & ice_wat, snowwat, graupel, q, gz, cvm) do i=is,ie @@ -215,7 +221,6 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & cappa(i,j,k) = rdgas / ( rdgas + cvm(i)/(1.+r_vir*q(i,j,k,sphum)) ) pt(i,j,k) = pt(i,j,k)*exp(cappa(i,j,k)/(1.-cappa(i,j,k))*log(rrg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) enddo - endif #else do i=is,ie pt(i,j,k) = pt(i,j,k)*exp(k1k*log(rrg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) @@ -299,7 +304,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & if ( kord_tm<0 ) then !---------------------------------- -! Map t using logp +! Map t using logp !---------------------------------- call map_scalar(km, peln(is,1,j), pt, gz, & km, pn2, pt, & @@ -338,14 +343,15 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & km, pe2, w, & is, ie, j, isd, ied, jsd, jed, -2, kord_wz) ! Remap delz for hybrid sigma-p coordinate - call map1_ppm (km, pe1, delz, gz, & + call map1_ppm (km, pe1, delz, gz, & ! works km, pe2, delz, & - is, ie, j, isd, ied, jsd, jed, 1, abs(kord_tm)) + is, ie, j, is, ie, js, je, 1, abs(kord_tm)) do k=1,km do i=is,ie delz(i,j,k) = -delz(i,j,k)*dp2(i,k) enddo enddo + endif !---------- @@ -391,15 +397,6 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & ! Note: pt at this stage is T_v or T_m do k=1,km #ifdef MOIST_CAPPA - if ( nwat==2 ) then - do i=is,ie - qv(i) = max(0., q(i,j,k,sphum)) - q_con(i,j,k) = max(0., q(i,j,k,liq_wat)) - cvm(i) = (1.-qv(i))*cv_air + qv(i)*cv_vap - cappa(i,j,k) = rdgas / ( rdgas + cvm(i)/(1.+r_vir*qv(i)) ) - pkz(i,j,k) = exp(cappa(i,j,k)*log(rrg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) - enddo - else call moist_cv(is,ie,isd,ied,jsd,jed, km, j, k, nwat, sphum, liq_wat, rainwat, & ice_wat, snowwat, graupel, q, gz, cvm) do i=is,ie @@ -407,7 +404,6 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & cappa(i,j,k) = rdgas / ( rdgas + cvm(i)/(1.+r_vir*q(i,j,k,sphum)) ) pkz(i,j,k) = exp(cappa(i,j,k)*log(rrg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) enddo - endif ! nwat test #else if ( kord_tm < 0 ) then do i=is,ie @@ -502,27 +498,30 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & do k=1,km do i=is,ie - ua(i,j,k) = pe2(i,k+1) + pe4(i,j,k) = pe2(i,k+1) enddo enddo 1000 continue -!$OMP parallel default(none) shared(is,ie,js,je,km,kmp,ptop,u,v,pe,ua,isd,ied,jsd,jed,kord_mt, & -!$OMP te_2d,te,delp,hydrostatic,hs,rg,pt,peln, adiabatic, & + +!$OMP parallel default(none) shared(is,ie,js,je,km,kmp,ptop,u,v,pe,ua,va,isd,ied,jsd,jed,kord_mt, & +!$OMP te_2d,te,delp,hydrostatic,hs,rg,pt,peln,adiabatic, & !$OMP cp,delz,nwat,rainwat,liq_wat,ice_wat,snowwat, & !$OMP graupel,q_con,r_vir,sphum,w,pk,pkz,last_step,consv, & !$OMP do_adiabatic_init,zsum1,zsum0,te0_2d,domain, & !$OMP ng,gridstruct,E_Flux,pdt,dtmp,reproduce_sum,q, & !$OMP mdt,cld_amt,cappa,dtdt,out_dt,rrg,akap,do_sat_adj, & -!$OMP fast_mp_consv,kord_tm) & -!$OMP private(pe0,pe1,pe2,pe3,qv,cvm,gz,phis,tpe,tmp, dpln) +!$OMP fast_mp_consv,kord_tm,pe4, & +!$OMP npx,npy,ccn_cm3,u_dt,v_dt, & +!$OMP c2l_ord,bd,dp0,ps) & +!$OMP private(q2,pe0,pe1,pe2,pe3,qv,cvm,gz,gsize,phis,dpln,dp2,t0) !$OMP do do k=2,km do j=js,je do i=is,ie - pe(i,k,j) = ua(i,j,k-1) + pe(i,k,j) = pe4(i,j,k-1) enddo enddo enddo @@ -566,16 +565,8 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & do k=1,km #ifdef MOIST_CAPPA - if ( nwat==2 ) then - do i=is,ie - qv(i) = max(0., q(i,j,k,sphum)) - gz(i) = max(0., q(i,j,k,liq_wat)) - cvm(i) = (1.-qv(i))*cv_air + qv(i)*cv_vap - enddo - else call moist_cv(is,ie,isd,ied,jsd,jed, km, j, k, nwat, sphum, liq_wat, rainwat, & ice_wat, snowwat, graupel, q, gz, cvm) - endif do i=is,ie ! KE using 3D winds: q_con(i,j,k) = gz(i) @@ -613,13 +604,13 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & enddo ! j-loop !$OMP single - tpe = consv*g_sum(domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.) - E_Flux = tpe / (grav*pdt*4.*pi*radius**2) ! unit: W/m**2 + dtmp = consv*g_sum(domain, te_2d, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.) + E_Flux = dtmp / (grav*pdt*4.*pi*radius**2) ! unit: W/m**2 ! Note pdt is "phys" time step if ( hydrostatic ) then - dtmp = tpe / (cp*g_sum(domain, zsum0, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.)) + dtmp = dtmp / (cp* g_sum(domain, zsum0, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.)) else - dtmp = tpe / (cv_air*g_sum(domain, zsum1, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.)) + dtmp = dtmp / (cv_air*g_sum(domain, zsum1, is, ie, js, je, ng, gridstruct%area_64, 0, reproduce=.true.)) endif !$OMP end single @@ -656,9 +647,10 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & endif ! end last_step check ! Note: pt at this stage is T_v - if ( (.not.do_adiabatic_init) .and. do_sat_adj ) then -! if ( do_sat_adj ) then +! if ( (.not.do_adiabatic_init) .and. do_sat_adj ) then + if (do_adiabatic_init .or. do_sat_adj) then call timing_on('sat_adj2') + !$OMP do do k=kmp,km do j=js,je @@ -670,8 +662,8 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & te(isd,jsd,k), q(isd,jsd,k,sphum), q(isd,jsd,k,liq_wat), & q(isd,jsd,k,ice_wat), q(isd,jsd,k,rainwat), & q(isd,jsd,k,snowwat), q(isd,jsd,k,graupel), & - dpln, delz(isd:,jsd:,k), pt(isd,jsd,k), delp(isd,jsd,k), q_con(isd:,jsd:,k), & - cappa(isd:,jsd:,k), gridstruct%area_64, dtdt(is,js,k), out_dt, last_step, cld_amt>0, q(isd,jsd,k,cld_amt)) + dpln, delz(is:ie,js:je,k), pt(isd,jsd,k), delp(isd,jsd,k), q_con(isd:,jsd:,k), & + cappa(isd:,jsd:,k), gridstruct%area_64, dtdt(is,js,k), out_dt, last_step, cld_amt>0, q(isd,jsd,k,cld_amt)) if ( .not. hydrostatic ) then do j=js,je do i=is,ie @@ -695,12 +687,13 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & enddo enddo endif + call timing_off('sat_adj2') endif ! do_sat_adj - if ( last_step ) then ! Output temperature if last_step +!!! if ( is_master() ) write(*,*) 'dtmp=', dtmp, nwat !$OMP do do k=1,km do j=js,je @@ -708,7 +701,7 @@ subroutine Lagrangian_to_Eulerian(last_step, consv, ps, pe, delp, pkz, pk, & if ( nwat==2 ) then do i=is,ie gz(i) = max(0., q(i,j,k,liq_wat)) - qv(i) = max(0., q(i,j,k,sphum)) + qv(i) = max(0., q(i,j,k,sphum)) pt(i,j,k) = (pt(i,j,k)+dtmp*pkz(i,j,k)) / ((1.+r_vir*qv(i))*(1.-gz(i))) enddo elseif ( nwat==6 ) then @@ -767,7 +760,7 @@ subroutine compute_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, & real, intent(inout):: u(isd:ied, jsd:jed+1,km) real, intent(inout):: v(isd:ied+1,jsd:jed, km) real, intent(in):: w(isd:,jsd:,1:) ! vertical velocity (m/s) - real, intent(in):: delz(isd:,jsd:,1:) + real, intent(in):: delz(is:,js:,1:) real, intent(in):: hs(isd:ied,jsd:jed) ! surface geopotential real, intent(in):: pe(is-1:ie+1,km+1,js-1:je+1) ! pressure at layer edges real, intent(in):: peln(is:ie,km+1,js:je) ! log(pe) @@ -952,9 +945,9 @@ subroutine remap_z(km, pe1, q1, kn, pe2, q2, i1, i2, iv, kord) integer, intent(in) :: kn ! Target vertical dimension integer, intent(in) :: iv - real, intent(in) :: pe1(i1:i2,km+1) ! height at layer edges + real, intent(in) :: pe1(i1:i2,km+1) ! height at layer edges ! (from model top to bottom surface) - real, intent(in) :: pe2(i1:i2,kn+1) ! hieght at layer edges + real, intent(in) :: pe2(i1:i2,kn+1) ! hieght at layer edges ! (from model top to bottom surface) real, intent(in) :: q1(i1:i2,km) ! Field input @@ -1040,10 +1033,10 @@ subroutine map_scalar( km, pe1, q1, qs, & integer, intent(in) :: km ! Original vertical dimension integer, intent(in) :: kn ! Target vertical dimension real, intent(in) :: qs(i1:i2) ! bottom BC - real, intent(in) :: pe1(i1:i2,km+1) ! pressure at layer edges + real, intent(in) :: pe1(i1:i2,km+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the original vertical coordinate - real, intent(in) :: pe2(i1:i2,kn+1) ! pressure at layer edges + real, intent(in) :: pe2(i1:i2,kn+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in) :: q1(ibeg:iend,jbeg:jend,km) ! Field input @@ -1134,10 +1127,10 @@ subroutine map1_ppm( km, pe1, q1, qs, & integer, intent(in) :: km ! Original vertical dimension integer, intent(in) :: kn ! Target vertical dimension real, intent(in) :: qs(i1:i2) ! bottom BC - real, intent(in) :: pe1(i1:i2,km+1) ! pressure at layer edges + real, intent(in) :: pe1(i1:i2,km+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the original vertical coordinate - real, intent(in) :: pe2(i1:i2,kn+1) ! pressure at layer edges + real, intent(in) :: pe2(i1:i2,kn+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in) :: q1(ibeg:iend,jbeg:jend,km) ! Field input @@ -1221,10 +1214,10 @@ subroutine mapn_tracer(nq, km, pe1, pe2, q1, dp2, kord, j, & integer, intent(in):: j, nq, i1, i2 integer, intent(in):: isd, ied, jsd, jed integer, intent(in):: kord(nq) - real, intent(in):: pe1(i1:i2,km+1) ! pressure at layer edges + real, intent(in):: pe1(i1:i2,km+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the original vertical coordinate - real, intent(in):: pe2(i1:i2,km+1) ! pressure at layer edges + real, intent(in):: pe2(i1:i2,km+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in):: dp2(i1:i2,km) @@ -1267,7 +1260,7 @@ subroutine mapn_tracer(nq, km, pe1, pe2, q1, dp2, kord, j, & ! entire new grid is within the original grid pr = (pe2(i,k+1)-pe1(i,l)) / dp1(i,l) fac1 = pr + pl - fac2 = r3*(pr*fac1 + pl*pl) + fac2 = r3*(pr*fac1 + pl*pl) fac1 = 0.5*fac1 do iq=1,nq q2(i,k,iq) = q4(2,i,l,iq) + (q4(4,i,l,iq)+q4(3,i,l,iq)-q4(2,i,l,iq))*fac1 & @@ -1345,10 +1338,10 @@ subroutine map1_q2(km, pe1, q1, & integer, intent(in) :: km ! Original vertical dimension integer, intent(in) :: kn ! Target vertical dimension - real, intent(in) :: pe1(i1:i2,km+1) ! pressure at layer edges + real, intent(in) :: pe1(i1:i2,km+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the original vertical coordinate - real, intent(in) :: pe2(i1:i2,kn+1) ! pressure at layer edges + real, intent(in) :: pe2(i1:i2,kn+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in) :: q1(ibeg:iend,jbeg:jend,km) ! Field input @@ -1432,10 +1425,10 @@ subroutine remap_2d(km, pe1, q1, & integer, intent(in):: kord integer, intent(in):: km ! Original vertical dimension integer, intent(in):: kn ! Target vertical dimension - real, intent(in):: pe1(i1:i2,km+1) ! pressure at layer edges + real, intent(in):: pe1(i1:i2,km+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the original vertical coordinate - real, intent(in):: pe2(i1:i2,kn+1) ! pressure at layer edges + real, intent(in):: pe2(i1:i2,kn+1) ! pressure at layer edges ! (from model top to bottom surface) ! in the new vertical coordinate real, intent(in) :: q1(i1:i2,km) ! Field input @@ -1537,7 +1530,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) real gam(i1:i2,km) real q(i1:i2,km+1) real d4(i1:i2) - real bet, a_bot, grat + real bet, a_bot, grat real pmp_1, lac_1, pmp_2, lac_2 integer i, k, im @@ -1555,7 +1548,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) enddo enddo do i=i1,i2 - grat = delp(i,km-1) / delp(i,km) + grat = delp(i,km-1) / delp(i,km) q(i,km) = (3.*(a4(1,i,km-1)+a4(1,i,km)) - grat*qs(i) - q(i,km-1)) / & (2. + grat + grat - gam(i,km)) q(i,km+1) = qs(i) @@ -1581,7 +1574,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) gam(i,k) = d4(i) / bet enddo enddo - + do i=i1,i2 a_bot = 1. + d4(i)*(d4(i)+1.5) q(i,km+1) = (2.*d4(i)*(d4(i)+1.)*a4(1,i,km)+a4(1,i,km-1)-a_bot*q(i,km)) & @@ -1612,7 +1605,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) !------------------ im = i2 - i1 + 1 -! Apply *large-scale* constraints +! Apply *large-scale* constraints do i=i1,i2 q(i,2) = min( q(i,2), max(a4(1,i,1), a4(1,i,2)) ) q(i,2) = max( q(i,2), min(a4(1,i,1), a4(1,i,2)) ) @@ -1685,7 +1678,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) do i=i1,i2 a4(2,i,1) = max(0., a4(2,i,1)) enddo - elseif ( iv==-1 ) then + elseif ( iv==-1 ) then do i=i1,i2 if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. enddo @@ -1839,6 +1832,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) endif enddo elseif ( abs(kord)==14 ) then + do i=i1,i2 a4(4,i,k) = 3.*(2.*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) enddo @@ -1885,7 +1879,7 @@ subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) do i=i1,i2 a4(3,i,km) = max(0., a4(3,i,km)) enddo - elseif ( iv .eq. -1 ) then + elseif ( iv .eq. -1 ) then do i=i1,i2 if ( a4(3,i,km)*a4(1,i,km) <= 0. ) a4(3,i,km) = 0. enddo @@ -1915,11 +1909,11 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) real, intent(in) :: delp(i1:i2,km) ! layer pressure thickness real, intent(inout):: a4(4,i1:i2,km) ! Interpolated values !----------------------------------------------------------------------- - logical:: extm(i1:i2,km) + logical:: extm(i1:i2,km) real gam(i1:i2,km) real q(i1:i2,km+1) real d4(i1:i2) - real bet, a_bot, grat + real bet, a_bot, grat real pmp_1, lac_1, pmp_2, lac_2 integer i, k, im @@ -1937,7 +1931,7 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) enddo enddo do i=i1,i2 - grat = delp(i,km-1) / delp(i,km) + grat = delp(i,km-1) / delp(i,km) q(i,km) = (3.*(a4(1,i,km-1)+a4(1,i,km)) - grat*qs(i) - q(i,km-1)) / & (2. + grat + grat - gam(i,km)) q(i,km+1) = qs(i) @@ -1963,7 +1957,7 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) gam(i,k) = d4(i) / bet enddo enddo - + do i=i1,i2 a_bot = 1. + d4(i)*(d4(i)+1.5) q(i,km+1) = (2.*d4(i)*(d4(i)+1.)*a4(1,i,km)+a4(1,i,km-1)-a_bot*q(i,km)) & @@ -1994,7 +1988,7 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) !------------------ im = i2 - i1 + 1 -! Apply *large-scale* constraints +! Apply *large-scale* constraints do i=i1,i2 q(i,2) = min( q(i,2), max(a4(1,i,1), a4(1,i,2)) ) q(i,2) = max( q(i,2), min(a4(1,i,1), a4(1,i,2)) ) @@ -2061,7 +2055,7 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) do i=i1,i2 a4(2,i,1) = max(0., a4(2,i,1)) enddo - elseif ( iv==-1 ) then + elseif ( iv==-1 ) then do i=i1,i2 if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. enddo @@ -2239,7 +2233,7 @@ subroutine cs_profile(qs, a4, delp, km, i1, i2, iv, kord) do i=i1,i2 a4(3,i,km) = max(0., a4(3,i,km)) enddo - elseif ( iv .eq. -1 ) then + elseif ( iv .eq. -1 ) then do i=i1,i2 if ( a4(3,i,km)*a4(1,i,km) <= 0. ) a4(3,i,km) = 0. enddo @@ -2346,7 +2340,7 @@ subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) integer, intent(in):: i2 ! Finishing longitude integer, intent(in):: km ! vertical dimension integer, intent(in):: kord ! Order (or more accurately method no.): - ! + ! real , intent(in):: delp(i1:i2,km) ! layer pressure thickness ! !INPUT/OUTPUT PARAMETERS: @@ -2355,8 +2349,8 @@ subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) ! DESCRIPTION: ! ! Perform the piecewise parabolic reconstruction -! -! !REVISION HISTORY: +! +! !REVISION HISTORY: ! S.-J. Lin revised at GFDL 2007 !----------------------------------------------------------------------- ! local arrays: @@ -2439,7 +2433,7 @@ subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) do i=i1,i2 a4(2,i,1) = max(0., a4(2,i,1)) a4(2,i,2) = max(0., a4(2,i,2)) - enddo + enddo elseif( iv==-1 ) then do i=i1,i2 if ( a4(2,i,1)*a4(1,i,1) <= 0. ) a4(2,i,1) = 0. @@ -2532,7 +2526,7 @@ subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) ! Method#2 - better h2(i,k) = 2.*(dc(i,k+1)/delp(i,k+1) - dc(i,k-1)/delp(i,k-1)) & / ( delp(i,k)+0.5*(delp(i,k-1)+delp(i,k+1)) ) & - * delp(i,k)**2 + * delp(i,k)**2 ! Method#3 !!! h2(i,k) = dc(i,k+1) - dc(i,k-1) enddo @@ -2684,7 +2678,7 @@ subroutine steepz(i1, i2, km, a4, df2, dm, dq, dp, d4) integer, intent(in) :: km, i1, i2 real , intent(in) :: dp(i1:i2,km) ! grid size real , intent(in) :: dq(i1:i2,km) ! backward diff of q - real , intent(in) :: d4(i1:i2,km) ! backward sum: dp(k)+ dp(k-1) + real , intent(in) :: d4(i1:i2,km) ! backward sum: dp(k)+ dp(k-1) real , intent(in) :: df2(i1:i2,km) ! first guess mismatch real , intent(in) :: dm(i1:i2,km) ! monotonic mismatch ! !INPUT/OUTPUT PARAMETERS: @@ -2772,7 +2766,7 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & real, intent(out):: pt(isd:ied ,jsd:jed ,kn) ! temperature real, intent(out):: q(isd:ied,jsd:jed,kn,1:ntp) real, intent(out):: qdiag(isd:ied,jsd:jed,kn,ntp+1:nq) - real, intent(out):: delz(isd:,jsd:,1:) ! delta-height (m) + real, intent(out):: delz(is:,js:,1:) ! delta-height (m) !----------------------------------------------------------------------- real r_vir, rgrav real ps(isd:ied,jsd:jed) ! surface pressure @@ -2869,7 +2863,7 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & kn, pe2, u(is:ie,j:j,1:kn), & is, ie, -1, kord) - if ( j /= (je+1) ) then + if ( j /= (je+1) ) then !--------------- ! Hybrid sigma-p @@ -2924,7 +2918,7 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & w(i,j,k) = 0. endif enddo - enddo + enddo #endif #ifndef HYDRO_DELZ_REMAP @@ -3006,7 +3000,7 @@ subroutine rst_remap(km, kn, is,ie,js,je, isd,ied,jsd,jed, nq, ntp, & do i=is,ie pt(i,j,k) = pt(i,j,k) / (1.+r_vir*q(i,j,k,1)) enddo - enddo + enddo enddo end subroutine rst_remap @@ -3018,9 +3012,9 @@ subroutine mappm(km, pe1, q1, kn, pe2, q2, i1, i2, iv, kord, ptop) ! IV = 0: constituents ! IV = 1: potential temp ! IV =-1: winds - + ! Mass flux preserving mapping: q1(im,km) -> q2(im,kn) - + ! pe1: pressure at layer edges (from model top to bottom surface) ! in the original vertical coordinate ! pe2: pressure at layer edges (from model top to bottom surface) @@ -3147,7 +3141,7 @@ subroutine moist_cv(is,ie, isd,ied, jsd,jed, km, j, k, nwat, sphum, liq_wat, rai integer, intent(in):: is, ie, isd,ied, jsd,jed, km, nwat, j, k integer, intent(in):: sphum, liq_wat, rainwat, ice_wat, snowwat, graupel real, intent(in), dimension(isd:ied,jsd:jed,km,nwat):: q - real, intent(out), dimension(is:ie):: cvm, qd + real, intent(out), dimension(is:ie):: cvm, qd ! qd is q_con real, intent(in), optional:: t1(is:ie) ! real, parameter:: t_i0 = 15. @@ -3182,28 +3176,36 @@ subroutine moist_cv(is,ie, isd,ied, jsd,jed, km, j, k, nwat, sphum, liq_wat, rai case (3) do i=is,ie qv(i) = q(i,j,k,sphum) - ql(i) = q(i,j,k,liq_wat) + ql(i) = q(i,j,k,liq_wat) qs(i) = q(i,j,k,ice_wat) qd(i) = ql(i) + qs(i) cvm(i) = (1.-(qv(i)+qd(i)))*cv_air + qv(i)*cv_vap + ql(i)*c_liq + qs(i)*c_ice enddo case(4) ! K_warm_rain with fake ice - do i=is,ie + do i=is,ie qv(i) = q(i,j,k,sphum) qd(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) cvm(i) = (1.-(qv(i)+qd(i)))*cv_air + qv(i)*cv_vap + qd(i)*c_liq enddo - + case(5) + do i=is,ie + qv(i) = q(i,j,k,sphum) + ql(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) + qs(i) = q(i,j,k,ice_wat) + q(i,j,k,snowwat) + qd(i) = ql(i) + qs(i) + cvm(i) = (1.-(qv(i)+qd(i)))*cv_air + qv(i)*cv_vap + ql(i)*c_liq + qs(i)*c_ice + enddo case(6) - do i=is,ie + do i=is,ie qv(i) = q(i,j,k,sphum) - ql(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) + ql(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) qs(i) = q(i,j,k,ice_wat) + q(i,j,k,snowwat) + q(i,j,k,graupel) qd(i) = ql(i) + qs(i) cvm(i) = (1.-(qv(i)+qd(i)))*cv_air + qv(i)*cv_vap + ql(i)*c_liq + qs(i)*c_ice enddo case default - do i=is,ie + !call mpp_error (NOTE, 'fv_mapz::moist_cv - using default cv_air') + do i=is,ie qd(i) = 0. cvm(i) = cv_air enddo @@ -3253,7 +3255,7 @@ subroutine moist_cp(is,ie, isd,ied, jsd,jed, km, j, k, nwat, sphum, liq_wat, rai case(3) do i=is,ie qv(i) = q(i,j,k,sphum) - ql(i) = q(i,j,k,liq_wat) + ql(i) = q(i,j,k,liq_wat) qs(i) = q(i,j,k,ice_wat) qd(i) = ql(i) + qs(i) cpm(i) = (1.-(qv(i)+qd(i)))*cp_air + qv(i)*cp_vapor + ql(i)*c_liq + qs(i)*c_ice @@ -3264,17 +3266,25 @@ subroutine moist_cp(is,ie, isd,ied, jsd,jed, km, j, k, nwat, sphum, liq_wat, rai qd(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) cpm(i) = (1.-(qv(i)+qd(i)))*cp_air + qv(i)*cp_vapor + qd(i)*c_liq enddo - + case(5) + do i=is,ie + qv(i) = q(i,j,k,sphum) + ql(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) + qs(i) = q(i,j,k,ice_wat) + q(i,j,k,snowwat) + qd(i) = ql(i) + qs(i) + cpm(i) = (1.-(qv(i)+qd(i)))*cp_air + qv(i)*cp_vapor + ql(i)*c_liq + qs(i)*c_ice + enddo case(6) - do i=is,ie + do i=is,ie qv(i) = q(i,j,k,sphum) - ql(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) + ql(i) = q(i,j,k,liq_wat) + q(i,j,k,rainwat) qs(i) = q(i,j,k,ice_wat) + q(i,j,k,snowwat) + q(i,j,k,graupel) qd(i) = ql(i) + qs(i) cpm(i) = (1.-(qv(i)+qd(i)))*cp_air + qv(i)*cp_vapor + ql(i)*c_liq + qs(i)*c_ice enddo case default - do i=is,ie + !call mpp_error (NOTE, 'fv_mapz::moist_cp - using default cp_air') + do i=is,ie qd(i) = 0. cpm(i) = cp_air enddo diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index cf04cb8ba..dd5d1011b 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -26,24 +26,24 @@ module fv_nesting_mod use tracer_manager_mod, only: get_tracer_index use fv_sg_mod, only: neg_adj3 use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_global_domain - use mpp_domains_mod, only: DGRID_NE, mpp_update_domains, domain2D - use fv_restart_mod, only: d2a_setup, d2c_setup - use mpp_mod, only: mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, FATAL + use mpp_domains_mod, only: AGRID, CGRID_NE, DGRID_NE, mpp_update_domains, domain2D + use mpp_mod, only: mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, FATAL, mpp_pe, WARNING, NOTE use mpp_domains_mod, only: mpp_global_sum, BITWISE_EFP_SUM, BITWISE_EXACT_SUM use boundary_mod, only: update_coarse_grid use boundary_mod, only: nested_grid_BC_send, nested_grid_BC_recv, nested_grid_BC_save_proc - use fv_mp_mod, only: is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec + use boundary_mod, only: nested_grid_BC, nested_grid_BC_apply_intT use fv_arrays_mod, only: fv_grid_type, fv_flags_type, fv_atmos_type, fv_nest_type, fv_diag_type, fv_nest_BC_type_3D - use fv_arrays_mod, only: allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type + use fv_arrays_mod, only: allocate_fv_nest_BC_type, fv_atmos_type, fv_grid_bounds_type, deallocate_fv_nest_BC_type use fv_grid_utils_mod, only: ptop_min, g_sum, cubed_to_latlon, f_p use init_hydro_mod, only: p_var use constants_mod, only: grav, pi=>pi_8, radius, hlv, rdgas, cp_air, rvgas, cp_vapor, kappa - use fv_mapz_mod, only: mappm + use fv_mapz_mod, only: mappm, remap_2d use fv_timing_mod, only: timing_on, timing_off use fv_mp_mod, only: is_master - use fv_mp_mod, only: mp_reduce_sum + use fv_mp_mod, only: mp_reduce_sum, global_nest_domain use fv_diagnostics_mod, only: sphum_ll_fix, range_check use sw_core_mod, only: divergence_corner, divergence_corner_nest + use time_manager_mod, only: time_type implicit none logical :: RF_initialized = .false. @@ -55,51 +55,58 @@ module fv_nesting_mod real, allocatable :: dp1_coarse(:,:,:) !For nested grid buffers - !Individual structures are allocated by nested_grid_BC_recv - type(fv_nest_BC_type_3d) :: u_buf, v_buf, uc_buf, vc_buf, delp_buf, delz_buf, pt_buf, pkz_buf, w_buf, divg_buf + !Individual structures are allocated by nested_grid_BC_recv + type(fv_nest_BC_type_3d) :: u_buf, v_buf, uc_buf, vc_buf, delp_buf, delz_buf, pt_buf, w_buf, divg_buf, pe_u_buf,pe_v_buf,pe_b_buf type(fv_nest_BC_type_3d), allocatable:: q_buf(:) -!#ifdef USE_COND real, dimension(:,:,:), allocatable, target :: dum_West, dum_East, dum_North, dum_South -!#endif private -public :: twoway_nesting, setup_nested_grid_BCs - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' +public :: twoway_nesting, setup_nested_grid_BCs, set_physics_BCs contains -!!!! NOTE: Many of the routines here and in boundary.F90 have a lot of -!!!! redundant code, which could be cleaned up and simplified. +!!!!NOTE: Later we can add a flag to see if remap BCs are needed +!!! if not we can save some code complexity and cycles by skipping it subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & - u, v, w, pt, delp, delz,q, uc, vc, pkz, & + u, v, w, pt, delp, delz,q, uc, vc, & +#ifdef USE_COND + q_con, & +#ifdef MOIST_CAPPA + cappa, & +#endif +#endif nested, inline_q, make_nh, ng, & gridstruct, flagstruct, neststruct, & nest_timestep, tracer_nest_timestep, & - domain, bd, nwat) + domain, parent_grid, bd, nwat, ak, bk) + - type(fv_grid_bounds_type), intent(IN) :: bd real, intent(IN) :: zvir integer, intent(IN) :: npx, npy, npz integer, intent(IN) :: ncnst, ng, nwat logical, intent(IN) :: inline_q, make_nh,nested + real, intent(IN), dimension(npz) :: ak, bk real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u ! D grid zonal wind (m/s) real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v ! D grid meridional wind (m/s) real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1:) ! W (m/s) real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! temperature (K) real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! pressure thickness (pascal) - real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1:) ! height thickness (m) + real, intent(inout) :: delz(bd%is: ,bd%js: ,1:) ! height thickness (m) real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) ! specific humidity and constituents real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) ! (uc,vc) mostly used as the C grid winds real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) - real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) ! finite-volume mean pk +#ifdef USE_COND + real, intent(inout) :: q_con( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) +#ifdef MOIST_CAPPA + real, intent(inout) :: cappa( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) +#endif +#endif integer, intent(INOUT) :: nest_timestep, tracer_nest_timestep + type(fv_atmos_type), pointer, intent(IN) :: parent_grid type(fv_grid_type), intent(INOUT) :: gridstruct type(fv_flags_type), intent(INOUT) :: flagstruct @@ -108,33 +115,40 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & real :: divg(bd%isd:bd%ied+1,bd%jsd:bd%jed+1, npz) real :: ua(bd%isd:bd%ied,bd%jsd:bd%jed) real :: va(bd%isd:bd%ied,bd%jsd:bd%jed) + real :: pe_ustag(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz+1) + real :: pe_vstag(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz+1) + real :: pe_bstag(bd%isd:bd%ied+1,bd%jsd:bd%jed+1,npz+1) + real, parameter :: a13 = 1./3. - real :: pkz_coarse( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) - integer :: i,j,k,n,p, sphum + integer :: i,j,k,n,p, sphum, npz_coarse, nnest logical :: do_pd - type(fv_nest_BC_type_3d) :: pkz_BC + type(fv_nest_BC_type_3d) :: delp_lag_BC, lag_BC, pe_lag_BC, pe_eul_BC + type(fv_nest_BC_type_3d) :: lag_u_BC, pe_u_lag_BC, pe_u_eul_BC + type(fv_nest_BC_type_3d) :: lag_v_BC, pe_v_lag_BC, pe_v_eul_BC + type(fv_nest_BC_type_3d) :: lag_b_BC, pe_b_lag_BC, pe_b_eul_BC !local pointers logical, pointer :: child_grids(:) - integer :: is, ie, js, je - integer :: isd, ied, jsd, jed + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed - is = bd%is - ie = bd%ie - js = bd%js - je = bd%je - isd = bd%isd - ied = bd%ied - jsd = bd%jsd - jed = bd%jed + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed child_grids => neststruct%child_grids - !IF nested, set up nested grid BCs for time-interpolation - !(actually applying the BCs is done in dyn_core + !(actually applying the BCs is done in dyn_core) + + !For multiple grids: Each grid has ONE parent but potentially MULTIPLE nests nest_timestep = 0 if (.not. inline_q) tracer_nest_timestep = 0 @@ -142,7 +156,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & if (neststruct%nested .and. (.not. (neststruct%first_step) .or. make_nh) ) then do_pd = .true. - call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) + call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) else !On first timestep the t0 BCs are not initialized and may contain garbage do_pd = .false. @@ -154,6 +168,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & call timing_on('COMM_TOTAL') !!! CLEANUP: could we make this a non-blocking operation? !!! Is this needed? it is on the initialization step. + call mpp_update_domains(delp, domain) !This is needed to make sure delp is updated for pe calculations call mpp_update_domains(u, v, & domain, gridtype=DGRID_NE, complete=.true.) call timing_off('COMM_TOTAL') @@ -165,7 +180,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & ua, va, & uc(isd,jsd,k), vc(isd,jsd,k), flagstruct%nord>0, & isd,ied,jsd,jed, is,ie,js,je, npx,npy, & - gridstruct%grid_type, gridstruct%nested, & + gridstruct%grid_type, gridstruct%bounded_domain, & gridstruct%se_corner, gridstruct%sw_corner, & gridstruct%ne_corner, gridstruct%nw_corner, & gridstruct%rsin_u, gridstruct%rsin_v, & @@ -175,117 +190,240 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & else call divergence_corner(u(isd,jsd,k), v(isd,jsd,k), ua, va, divg(isd,jsd,k), gridstruct, flagstruct, bd) endif - end do + end do endif -#ifndef SW_DYNAMICS - if (flagstruct%hydrostatic) then -!$OMP parallel do default(none) shared(npz,is,ie,js,je,pkz,pkz_coarse) - do k=1,npz - do j=js,je - do i=is,ie - pkz_coarse(i,j,k) = pkz(i,j,k) - enddo - enddo - enddo - endif -#endif -!! Nested grid: receive from parent grid + nnest = flagstruct%grid_number - 1 + +!! Nested grid: receive from parent grid (Lagrangian coordinate, npz_coarse) if (neststruct%nested) then + + npz_coarse = neststruct%parent_grid%npz + if (.not. allocated(q_buf)) then allocate(q_buf(ncnst)) endif - call nested_grid_BC_recv(neststruct%nest_domain, 0, 0, npz, bd, & - delp_buf) + call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, & + delp_buf, nnest) do n=1,ncnst - call nested_grid_BC_recv(neststruct%nest_domain, 0, 0, npz, bd, & - q_buf(n)) + call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, & + q_buf(n), nnest) enddo #ifndef SW_DYNAMICS - call nested_grid_BC_recv(neststruct%nest_domain, 0, 0, npz, bd, & - pt_buf) - - if (flagstruct%hydrostatic) then - call allocate_fv_nest_BC_type(pkz_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz,ng,0,0,0,.false.) - call nested_grid_BC_recv(neststruct%nest_domain, 0, 0, npz, bd, & - pkz_buf) - else - call nested_grid_BC_recv(neststruct%nest_domain, 0, 0, npz, bd, & - w_buf) - call nested_grid_BC_recv(neststruct%nest_domain, 0, 0, npz, bd, & - delz_buf) + call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, & + pt_buf, nnest) + + if (.not. flagstruct%hydrostatic) then + call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, & + w_buf, nnest) + call nested_grid_BC_recv(global_nest_domain, 0, 0, npz_coarse, bd, & + delz_buf, nnest) endif #endif - call nested_grid_BC_recv(neststruct%nest_domain, 0, 1, npz, bd, & - u_buf) - call nested_grid_BC_recv(neststruct%nest_domain, 0, 1, npz, bd, & - vc_buf) - call nested_grid_BC_recv(neststruct%nest_domain, 1, 0, npz, bd, & - v_buf) - call nested_grid_BC_recv(neststruct%nest_domain, 1, 0, npz, bd, & - uc_buf) - call nested_grid_BC_recv(neststruct%nest_domain, 1, 1, npz, bd, & - divg_buf) + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + call nested_grid_BC_recv(global_nest_domain, npz_coarse+1, bd, & + pe_u_buf, pe_v_buf, nnest, gridtype=DGRID_NE) + call nested_grid_BC_recv(global_nest_domain, 1, 1, npz_coarse+1, bd, & + pe_b_buf, nnest) + endif + + call nested_grid_BC_recv(global_nest_domain, npz_coarse, bd, & + u_buf, v_buf, nnest, gridtype=DGRID_NE) + call nested_grid_BC_recv(global_nest_domain, npz_coarse, bd, & + uc_buf, vc_buf, nnest, gridtype=CGRID_NE) + call nested_grid_BC_recv(global_nest_domain, 1, 1, npz_coarse, bd, & + divg_buf, nnest) endif -!! Coarse grid: send to child grids +!! Coarse grid: send to child grids (Lagrangian coordinate, npz_coarse) do p=1,size(child_grids) if (child_grids(p)) then - call nested_grid_BC_send(delp, neststruct%nest_domain_all(p), 0, 0) + call nested_grid_BC_send(delp, global_nest_domain, 0, 0, p-1) do n=1,ncnst - call nested_grid_BC_send(q(:,:,:,n), neststruct%nest_domain_all(p), 0, 0) + call nested_grid_BC_send(q(:,:,:,n), global_nest_domain, 0, 0, p-1) enddo #ifndef SW_DYNAMICS - call nested_grid_BC_send(pt, neststruct%nest_domain_all(p), 0, 0) + call nested_grid_BC_send(pt, global_nest_domain, 0, 0, p-1) - if (flagstruct%hydrostatic) then - !Working with PKZ is more complicated since it is only defined on the interior of the grid. - call nested_grid_BC_send(pkz_coarse, neststruct%nest_domain_all(p), 0, 0) - else - call nested_grid_BC_send(w, neststruct%nest_domain_all(p), 0, 0) - call nested_grid_BC_send(delz, neststruct%nest_domain_all(p), 0, 0) - endif + if (.not. flagstruct%hydrostatic) then + call nested_grid_BC_send(w, global_nest_domain, 0, 0, p-1) + call nested_grid_BC_send(delz, global_nest_domain, 0, 0, p-1) + endif #endif - call nested_grid_BC_send(u, neststruct%nest_domain_all(p), 0, 1) - call nested_grid_BC_send(vc, neststruct%nest_domain_all(p), 0, 1) - call nested_grid_BC_send(v, neststruct%nest_domain_all(p), 1, 0) - call nested_grid_BC_send(uc, neststruct%nest_domain_all(p), 1, 0) - call nested_grid_BC_send(divg, neststruct%nest_domain_all(p), 1, 1) + + if (neststruct%do_remap_BC(p)) then + + !Compute and send staggered pressure + !u points +!$OMP parallel do default(none) shared(ak,pe_ustag,delp, & +!$OMP is,ie,js,je,npz) + do j=js,je+1 + do i=is,ie + pe_ustag(i,j,1) = ak(1) + enddo + do k=1,npz + do i=is,ie + pe_ustag(i,j,k+1) = pe_ustag(i,j,k) + 0.5*(delp(i,j,k)+delp(i,j-1,k)) + enddo + enddo + enddo + + !v points +!$OMP parallel do default(none) shared(ak,pe_vstag,delp, & +!$OMP is,ie,js,je,npz) + do j=js,je + do i=is,ie+1 + pe_vstag(i,j,1) = ak(1) + enddo + do k=1,npz + do i=is,ie+1 + pe_vstag(i,j,k+1) = pe_vstag(i,j,k) + 0.5*(delp(i,j,k)+delp(i-1,j,k)) + enddo + enddo + enddo + call nested_grid_BC_send(pe_ustag, pe_vstag, global_nest_domain, p-1, gridtype=DGRID_NE) + + !b points +!$OMP parallel do default(none) shared(ak,pe_bstag,delp, & +!$OMP is,ie,js,je,npz) + do j=js,je+1 + do i=is,ie+1 + pe_bstag(i,j,1) = ak(1) + enddo + enddo + !Sets up so 3-point average is automatically done at the corner + if (is == 1 .and. js == 1) then + do k=1,npz + delp(0,0,k) = a13*(delp(1,1,k) + delp(0,1,k) + delp(1,0,k)) + enddo + endif + if (ie == npx-1 .and. js == 1) then + do k=1,npz + delp(npx,0,k) = a13*(delp(npx-1,1,k) + delp(npx,1,k) + delp(npx-1,0,k)) + enddo + endif + if (is == 1 .and. je == npy-1) then + do k=1,npz + delp(0,npy,k) = a13*(delp(1,npy-1,k) + delp(0,npy-1,k) + delp(1,npy,k)) + enddo + endif + if (ie == npx-1 .and. je == npy-1) then + do k=1,npz + delp(npx,npy,k) = a13*(delp(npx-1,npy-1,k) + delp(npx,npy-1,k) + delp(npx-1,npy,k)) + enddo + endif + +!$OMP parallel do default(none) shared(ak,pe_bstag,delp, & +!$OMP is,ie,js,je,npz) + do j=js,je+1 + do k=1,npz + do i=is,ie+1 + pe_bstag(i,j,k+1) = pe_bstag(i,j,k) + & + 0.25*(delp(i,j,k)+delp(i-1,j,k)+delp(i,j-1,k)+delp(i-1,j-1,k)) + enddo + enddo + enddo + call nested_grid_BC_send(pe_bstag, global_nest_domain, 1, 1, p-1) + + endif + + call nested_grid_BC_send(u, v, global_nest_domain, p-1, gridtype=DGRID_NE) + call nested_grid_BC_send(uc, vc, global_nest_domain, p-1, gridtype=CGRID_NE) + call nested_grid_BC_send(divg, global_nest_domain, 1, 1, p-1) endif enddo - + !Nested grid: do computations + ! Lag: coarse grid, npz_coarse, lagrangian coordinate---receive and use save_proc to copy into lag_BCs + ! Eul: nested grid, npz, Eulerian (reference) coordinate + ! Remapping from Lag to Eul if (nested) then - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - neststruct%delp_BC, delp_buf, pd_in=do_pd) - do n=1,ncnst - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - neststruct%q_BC(n), q_buf(n), pd_in=do_pd) - enddo + + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + + call allocate_fv_nest_BC_type(delp_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse,ng,0,0,0,.false.) + call allocate_fv_nest_BC_type(lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse,ng,0,0,0,.false.) + call allocate_fv_nest_BC_type(pe_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,0,.false.) + call allocate_fv_nest_BC_type(pe_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1,ng,0,0,0,.false.) + + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + delp_lag_BC, delp_buf, pd_in=do_pd) + !The incoming delp is on the coarse grid's lagrangian coordinate. Re-create the reference coordinate + call setup_eul_delp_BC(delp_lag_BC, neststruct%delp_BC, pe_lag_BC, pe_eul_BC, ak, bk, npx, npy, npz, npz_coarse, parent_grid%ptop, bd) + + else + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + neststruct%delp_BC, delp_buf, pd_in=do_pd) + endif + +!!$ do n=1,ncnst +!!$ call nested_grid_BC_save_proc(global_nest_domain, & +!!$ neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & +!!$ lag_BC, q_buf(n), pd_in=do_pd) +!!$ !This remapping appears to have some trouble with rounding error random noise +!!$ call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%q_BC(n), npx, npy, npz, npz_coarse, bd, 0, 0, 0, flagstruct%kord_tr, 'q') +!!$ enddo #ifndef SW_DYNAMICS - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - neststruct%pt_BC, pt_buf) + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + lag_BC, pt_buf) + !NOTE: need to remap using peln, not pe + call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%pt_BC, npx, npy, npz, npz_coarse, bd, 0, 0, 1, abs(flagstruct%kord_tm), 'pt', do_log_pe=.true.) + + else + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + neststruct%pt_BC, pt_buf) + endif + + + !For whatever reason moving the calls for q BC remapping here avoids problems with cross-restart reproducibility. + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + do n=1,ncnst + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + lag_BC, q_buf(n), pd_in=do_pd) + call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%q_BC(n), npx, npy, npz, npz_coarse, bd, 0, 0, 0, flagstruct%kord_tr, 'q2') + enddo + else + do n=1,ncnst + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + neststruct%q_BC(n), q_buf(n), pd_in=do_pd) + enddo + endif sphum = get_tracer_index (MODEL_ATMOS, 'sphum') if (flagstruct%hydrostatic) then - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - pkz_BC, pkz_buf) - call setup_pt_BC(neststruct%pt_BC, pkz_BC, neststruct%q_BC(sphum), npx, npy, npz, zvir, bd) + call setup_pt_BC(neststruct%pt_BC, pe_eul_BC, neststruct%q_BC(sphum), npx, npy, npz, zvir, bd) else - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - neststruct%w_BC, w_buf) - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz, bd, & - neststruct%delz_BC, delz_buf) !Need a negative-definite method? - + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + lag_BC, w_buf) + call remap_BC(pe_lag_BC, pe_eul_BC, lag_BC, neststruct%w_BC, npx, npy, npz, npz_coarse, bd, 0, 0, -1, flagstruct%kord_wz, 'w') + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + lag_BC, delz_buf) !Need a negative-definite method? + call remap_delz_BC(pe_lag_BC, pe_eul_BC, delp_lag_BC, lag_BC, neststruct%delp_BC, neststruct%delz_BC, npx, npy, npz, npz_coarse, bd, 0, 0, 1, flagstruct%kord_wz) + + else + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + neststruct%w_BC, w_buf) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_h, neststruct%wt_h, 0, 0, npx, npy, npz_coarse, bd, & + neststruct%delz_BC, delz_buf) !Need a negative-definite method? + endif + call setup_pt_NH_BC(neststruct%pt_BC, neststruct%delp_BC, neststruct%delz_BC, & neststruct%q_BC(sphum), neststruct%q_BC, ncnst, & #ifdef USE_COND @@ -296,32 +434,157 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & #endif npx, npy, npz, zvir, bd) endif + +#endif + + !!!NOTE: The following require remapping on STAGGERED grids, which requires additional pressure data + + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + + + call allocate_fv_nest_BC_type(pe_u_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,1,.false.) + call allocate_fv_nest_BC_type(pe_u_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,0,1,.false.) + call allocate_fv_nest_BC_type(lag_u_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,0,1,.false.) + call allocate_fv_nest_BC_type(pe_v_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,1,0,.false.) + call allocate_fv_nest_BC_type(pe_v_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,1,0,.false.) + call allocate_fv_nest_BC_type(lag_v_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,1,0,.false.) + call allocate_fv_nest_BC_type(pe_b_lag_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,1,1,.false.) + call allocate_fv_nest_BC_type(pe_b_eul_BC,is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1 ,ng,0,1,1,.false.) + call allocate_fv_nest_BC_type(lag_b_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse ,ng,0,1,1,.false.) + + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse+1, bd, & + pe_u_lag_BC, pe_u_buf) + call setup_eul_pe_BC(pe_u_lag_BC, pe_u_eul_BC, ak, bk, npx, npy, npz, npz_coarse, 0, 1, bd) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse+1, bd, & + pe_v_lag_BC, pe_v_buf) + call setup_eul_pe_BC(pe_v_lag_BC, pe_v_eul_BC, ak, bk, npx, npy, npz, npz_coarse, 1, 0, bd) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse+1, bd, & + pe_b_lag_BC, pe_b_buf) + call setup_eul_pe_BC(pe_b_lag_BC, pe_b_eul_BC, ak, bk, npx, npy, npz, npz_coarse, 1, 1, bd) + + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, & + lag_u_BC, u_buf) + call remap_BC(pe_u_lag_BC, pe_u_eul_BC, lag_u_BC, neststruct%u_BC, npx, npy, npz, npz_coarse, bd, 0, 1, -1, flagstruct%kord_mt, 'u') + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, & + lag_u_BC, vc_buf) + call remap_BC(pe_u_lag_BC, pe_u_eul_BC, lag_u_BC, neststruct%vc_BC, npx, npy, npz, npz_coarse, bd, 0, 1, -1, flagstruct%kord_mt, 'vc') + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, & + lag_v_BC, v_buf) + call remap_BC(pe_v_lag_BC, pe_v_eul_BC, lag_v_BC, neststruct%v_BC, npx, npy, npz, npz_coarse, bd, 1, 0, -1, flagstruct%kord_mt, 'v') + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, & + lag_v_BC, uc_buf) + call remap_BC(pe_v_lag_BC, pe_v_eul_BC, lag_v_BC, neststruct%uc_BC, npx, npy, npz, npz_coarse, bd, 1, 0, -1, flagstruct%kord_mt, 'uc') + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse, bd, & + lag_b_BC, divg_buf) + call remap_BC(pe_b_lag_BC, pe_b_eul_BC, lag_b_BC, neststruct%divg_BC, npx, npy, npz, npz_coarse, bd, 1, 1, -1, flagstruct%kord_mt, 'divg') + + call deallocate_fv_nest_BC_type(delp_lag_BC) + call deallocate_fv_nest_BC_type(lag_BC) + call deallocate_fv_nest_BC_type(pe_lag_BC) + call deallocate_fv_nest_BC_type(pe_eul_BC) + + call deallocate_fv_nest_BC_type(pe_u_lag_BC) + call deallocate_fv_nest_BC_type(pe_u_eul_BC) + call deallocate_fv_nest_BC_type(lag_u_BC) + call deallocate_fv_nest_BC_type(pe_v_lag_BC) + call deallocate_fv_nest_BC_type(pe_v_eul_BC) + call deallocate_fv_nest_BC_type(lag_v_BC) + call deallocate_fv_nest_BC_type(pe_b_lag_BC) + call deallocate_fv_nest_BC_type(pe_b_eul_BC) + call deallocate_fv_nest_BC_type(lag_b_BC) + + else + + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, & + neststruct%u_BC, u_buf) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz_coarse, bd, & + neststruct%vc_BC, vc_buf) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, & + neststruct%v_BC, v_buf) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz_coarse, bd, & + neststruct%uc_BC, uc_buf) + call nested_grid_BC_save_proc(global_nest_domain, & + neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz_coarse, bd, & + neststruct%divg_BC, divg_buf) + + endif + + !Correct halo values have now been set up for BCs; we can go ahead and apply them too + call nested_grid_BC_apply_intT(delp, & + 0, 0, npx, npy, npz, bd, 1., 1., & + neststruct%delp_BC, bctype=neststruct%nestbctype ) + do n=1,ncnst + call nested_grid_BC_apply_intT(q(:,:,:,n), & + 0, 0, npx, npy, npz, bd, 1., 1., & + neststruct%q_BC(n), bctype=neststruct%nestbctype ) + enddo +#ifndef SW_DYNAMICS + call nested_grid_BC_apply_intT(pt, & + 0, 0, npx, npy, npz, bd, 1., 1., & + neststruct%pt_BC, bctype=neststruct%nestbctype ) + if (.not. flagstruct%hydrostatic) then + call nested_grid_BC_apply_intT(w, & + 0, 0, npx, npy, npz, bd, 1., 1., & + neststruct%w_BC, bctype=neststruct%nestbctype ) + !Removed halo from delz --- BCs now directly applied in nh_BC --- lmh june 2018 +!!$ call nested_grid_BC_apply_intT(delz, & +!!$ 0, 0, npx, npy, npz, bd, 1., 1., & +!!$ neststruct%delz_BC, bctype=neststruct%nestbctype ) + endif +#ifdef USE_COND + call nested_grid_BC_apply_intT(q_con, & + 0, 0, npx, npy, npz, bd, 1., 1., & + neststruct%q_con_BC, bctype=neststruct%nestbctype ) +#ifdef MOIST_CAPPA + call nested_grid_BC_apply_intT(cappa, & + 0, 0, npx, npy, npz, bd, 1., 1., & + neststruct%cappa_BC, bctype=neststruct%nestbctype ) #endif - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz, bd, & - neststruct%u_BC, u_buf) - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_u, neststruct%wt_u, 0, 1, npx, npy, npz, bd, & - neststruct%vc_BC, vc_buf) - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz, bd, & - neststruct%v_BC, v_buf) - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_v, neststruct%wt_v, 1, 0, npx, npy, npz, bd, & - neststruct%uc_BC, uc_buf) - - call nested_grid_BC_save_proc(neststruct%nest_domain, & - neststruct%ind_b, neststruct%wt_b, 1, 1, npx, npy, npz, bd, & - neststruct%divg_BC, divg_buf) +#endif +#endif + call nested_grid_BC_apply_intT(u, & + 0, 1, npx, npy, npz, bd, 1., 1., & + neststruct%u_BC, bctype=neststruct%nestbctype ) + call nested_grid_BC_apply_intT(vc, & + 0, 1, npx, npy, npz, bd, 1., 1., & + neststruct%vc_BC, bctype=neststruct%nestbctype ) + call nested_grid_BC_apply_intT(v, & + 1, 0, npx, npy, npz, bd, 1., 1., & + neststruct%v_BC, bctype=neststruct%nestbctype ) + call nested_grid_BC_apply_intT(uc, & + 1, 0, npx, npy, npz, bd, 1., 1., & + neststruct%uc_BC, bctype=neststruct%nestbctype ) + !!!NOTE: Divg not available here but not needed + !!! until dyn_core anyway. +!!$ call nested_grid_BC_apply_intT(divg, & +!!$ 1, 1, npx, npy, npz, bd, 1., 1., & +!!$ neststruct%divg_BC, bctype=neststruct%nestbctype ) + + !Update domains needed for Rayleigh damping + if (.not. flagstruct%hydrostatic) call mpp_update_domains(w, domain) + call mpp_update_domains(u, v, domain, gridtype=DGRID_NE, complete=.true.) + endif if (neststruct%first_step) then if (neststruct%nested) call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) neststruct%first_step = .false. - if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false. + if (.not. flagstruct%hydrostatic) flagstruct%make_nh= .false. else if (flagstruct%make_nh) then if (neststruct%nested) call set_NH_BCs_t0(neststruct) - flagstruct%make_nh= .false. + flagstruct%make_nh= .false. endif !Unnecessary? @@ -329,7 +592,7 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & !!$ neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1 !!$ neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1 !!$ neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1 -!!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1 +!!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1 !!$ neststruct%divg_BC%initialized = .true. !!$ endif @@ -338,17 +601,110 @@ subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & end subroutine setup_nested_grid_BCs - subroutine setup_pt_BC(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd) + subroutine set_physics_BCs(ps, u_dt, v_dt, flagstruct, gridstruct, neststruct, npx, npy, npz, ng, ak, bk, bd) type(fv_grid_bounds_type), intent(IN) :: bd - type(fv_nest_BC_type_3d), intent(IN), target :: pkz_BC, sphum_BC - type(fv_nest_BC_type_3d), intent(INOUT), target :: pt_BC + type(fv_flags_type), intent(IN) :: flagstruct + type(fv_nest_type), intent(INOUT), target :: neststruct + type(fv_grid_type) :: gridstruct + integer, intent(IN) :: npx, npy, npz, ng + real, intent(IN), dimension(npz+1) :: ak, bk + real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: ps + real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz) :: u_dt, v_dt + real, dimension(1,1) :: parent_ps ! dummy variable for nesting + type(fv_nest_BC_type_3d) :: u_dt_buf, v_dt_buf, pe_src_BC, pe_dst_BC!, var_BC + + integer :: n, npz_coarse, nnest + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + real :: dum(1,1,1) + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + nnest = flagstruct%grid_number - 1 + + if (gridstruct%nested) then + + if (neststruct%do_remap_BC(flagstruct%grid_number)) then + + npz_coarse = neststruct%parent_grid%npz + + !Both nested and coarse grids assumed on Eulerian coordinates at this point + !Only need to fetch ps to form pressure levels + !Note also u_dt and v_dt are unstaggered + call nested_grid_BC(ps, parent_ps, global_nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, & + npx, npy, bd, 1, npx-1, 1, npy-1) + call nested_grid_BC_recv(global_nest_domain, npz_coarse, bd, u_dt_buf, v_dt_buf, nnest, gridtype=AGRID) + + call allocate_fv_nest_BC_type(pe_src_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz_coarse+1,ng,0,0,0,.false.) + call allocate_fv_nest_BC_type(pe_dst_BC, is,ie,js,je,isd,ied,jsd,jed,npx,npy,npz+1,ng,0,0,0,.false.) + + call copy_ps_BC(ps, pe_src_BC, npx, npy, npz_coarse, 0, 0, bd) + call setup_eul_pe_BC(pe_src_BC, pe_dst_BC, ak, bk, npx, npy, npz, npz_coarse, 0, 0, bd, & + make_src_in=.true., ak_src=neststruct%parent_grid%ak, bk_src=neststruct%parent_grid%bk) + + !Note that iv=-1 is used for remapping winds, which sets the lower reconstructed values to 0 if + ! there is a 2dx signal. Is this the best for **tendencies** though?? Probably not---so iv=1 here + call set_BC_direct( pe_src_BC, pe_dst_BC, u_dt_buf, u_dt, neststruct, npx, npy, npz, npz_coarse, ng, bd, 0, 0, 1, flagstruct%kord_mt) + call set_BC_direct( pe_src_BC, pe_dst_BC, v_dt_buf, v_dt, neststruct, npx, npy, npz, npz_coarse, ng, bd, 0, 0, 1, flagstruct%kord_mt) + + call deallocate_fv_nest_BC_type(pe_src_BC) + call deallocate_fv_nest_BC_type(pe_dst_BC) + + else + call nested_grid_BC(u_dt, v_dt, dum, dum, global_nest_domain, neststruct%ind_h, neststruct%ind_h, & + neststruct%wt_h, neststruct%wt_h, 0, 0, 0, 0, npx, npy, npz, bd, 1, npx-1, 1, npy-1, nnest, gridtype=AGRID) + endif + + endif + do n=1,size(neststruct%child_grids) + if (neststruct%child_grids(n)) then + if (neststruct%do_remap_BC(n)) & + call nested_grid_BC(ps, global_nest_domain, 0, 0, n-1) + call nested_grid_BC_send(u_dt, v_dt, global_nest_domain, n-1, gridtype=AGRID) + endif + enddo + + + end subroutine set_physics_BCs + + subroutine set_BC_direct( pe_src_BC, pe_dst_BC, buf, var, neststruct, npx, npy, npz, npz_coarse, ng, bd, istag, jstag, iv, kord) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_type), intent(INOUT) :: neststruct + integer, intent(IN) :: npx, npy, npz, npz_coarse, ng, istag, jstag, iv, kord + real, intent(INOUT), dimension(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) :: var + type(fv_nest_BC_type_3d), intent(INOUT) :: buf, pe_src_BC, pe_dst_BC + type(fv_nest_BC_type_3d) :: var_BC + + + call allocate_fv_nest_BC_type(var_BC,bd%is,bd%ie,bd%js,bd%je,bd%isd,bd%ied,bd%jsd,bd%jed,npx,npy,npz_coarse,ng,0,istag,jstag,.false.) + + call nested_grid_BC_save_proc(global_nest_domain, neststruct%ind_h, neststruct%wt_h, istag, jstag, & + npx, npy, npz_coarse, bd, var_BC, buf) + call remap_BC_direct(pe_src_BC, pe_dst_BC, var_BC, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord) + + call deallocate_fv_nest_BC_type(var_BC) + + + end subroutine set_BC_direct + + subroutine setup_pt_BC(pt_BC, pe_eul_BC, sphum_BC, npx, npy, npz, zvir, bd) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_BC_type_3d), intent(IN) :: pe_eul_BC, sphum_BC + type(fv_nest_BC_type_3d), intent(INOUT) :: pt_BC integer, intent(IN) :: npx, npy, npz real, intent(IN) :: zvir - real, dimension(:,:,:), pointer :: ptBC, pkzBC, sphumBC - - integer :: i,j,k, istart, iend + integer :: istart, iend integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -361,25 +717,12 @@ subroutine setup_pt_BC(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd) ied = bd%ied jsd = bd%jsd jed = bd%jed - + if (is == 1) then - ptBC => pt_BC%west_t1 - pkzBC => pkz_BC%west_t1 - sphumBC => sphum_BC%west_t1 -!$OMP parallel do default(none) shared(npz,jsd,jed,isd,ptBC,pkzBC,zvir,sphumBC) - do k=1,npz - do j=jsd,jed - do i=isd,0 - ptBC(i,j,k) = ptBC(i,j,k)/pkzBC(i,j,k)*(1.+zvir*sphumBC(i,j,k)) - end do - end do - end do + call setup_pt_BC_k(pt_BC%west_t1, sphum_BC%west_t1, pe_eul_BC%west_t1, zvir, isd, ied, isd, 0, jsd, jed, npz) end if if (js == 1) then - ptBC => pt_BC%south_t1 - pkzBC => pkz_BC%south_t1 - sphumBC => sphum_BC%south_t1 if (is == 1) then istart = is else @@ -391,37 +734,15 @@ subroutine setup_pt_BC(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd) iend = ied end if -!$OMP parallel do default(none) shared(npz,jsd,istart,iend,ptBC,pkzBC,zvir,sphumBC) - do k=1,npz - do j=jsd,0 - do i=istart,iend - ptBC(i,j,k) = ptBC(i,j,k)/pkzBC(i,j,k) * & - (1.+zvir*sphumBC(i,j,k)) - end do - end do - end do + call setup_pt_BC_k(pt_BC%south_t1, sphum_BC%south_t1, pe_eul_BC%south_t1, zvir, isd, ied, istart, iend, jsd, 0, npz) end if if (ie == npx-1) then - ptBC => pt_BC%east_t1 - pkzBC => pkz_BC%east_t1 - sphumBC => sphum_BC%east_t1 -!$OMP parallel do default(none) shared(npz,jsd,jed,npx,ied,ptBC,pkzBC,zvir,sphumBC) - do k=1,npz - do j=jsd,jed - do i=npx,ied - ptBC(i,j,k) = ptBC(i,j,k)/pkzBC(i,j,k) * & - (1.+zvir*sphumBC(i,j,k)) - end do - end do - end do + call setup_pt_BC_k(pt_BC%east_t1, sphum_BC%east_t1, pe_eul_BC%east_t1, zvir, isd, ied, npx, ied, jsd, jed, npz) end if if (je == npy-1) then - ptBC => pt_BC%north_t1 - pkzBC => pkz_BC%north_t1 - sphumBC => sphum_BC%north_t1 if (is == 1) then istart = is else @@ -433,58 +754,58 @@ subroutine setup_pt_BC(pt_BC, pkz_BC, sphum_BC, npx, npy, npz, zvir, bd) iend = ied end if -!$OMP parallel do default(none) shared(npz,npy,jed,npx,istart,iend,ptBC,pkzBC,zvir,sphumBC) - do k=1,npz - do j=npy,jed - do i=istart,iend - ptBC(i,j,k) = ptBC(i,j,k)/pkzBC(i,j,k) * & - (1.+zvir*sphumBC(i,j,k)) - end do - end do - end do + call setup_pt_BC_k(pt_BC%north_t1, sphum_BC%north_t1, pe_eul_BC%north_t1, zvir, isd, ied, istart, iend, npy, jed, npz) end if - + end subroutine setup_pt_BC - subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & -#ifdef USE_COND - q_con_BC, & -#ifdef MOIST_CAPPA - cappa_BC, & -#endif -#endif - npx, npy, npz, zvir, bd) - type(fv_grid_bounds_type), intent(IN) :: bd - type(fv_nest_BC_type_3d), intent(IN), target :: delp_BC, delz_BC, sphum_BC - type(fv_nest_BC_type_3d), intent(INOUT), target :: pt_BC - integer, intent(IN) :: nq - type(fv_nest_BC_type_3d), intent(IN), target :: q_BC(nq) -#ifdef USE_COND - type(fv_nest_BC_type_3d), intent(INOUT), target :: q_con_BC -#ifdef MOIST_CAPPA - type(fv_nest_BC_type_3d), intent(INOUT), target :: cappa_BC -#endif -#endif - integer, intent(IN) :: npx, npy, npz - real, intent(IN) :: zvir +!!!! A NOTE ON NOMENCLATURE +!!!! Originally the BC arrays were bounded by isd and ied in the i-direction. +!!!! However these were NOT intended to delineate the dimensions of the data domain +!!!! but instead were of the BC arrays. This is confusing especially in other locations +!!!! where BCs and data arrays are both present. + subroutine setup_pt_BC_k(ptBC, sphumBC, peBC, zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz + real, intent(IN) :: zvir + real, intent(INOUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: ptBC + real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: sphumBC + real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz+1) :: peBC + + integer :: i,j,k + real :: pealn, pebln, rpkz + +!Assumes dry kappa +!$OMP parallel do default(none) shared(peBC,ptBC,zvir,sphumBC, & +!$OMP istart,iend,jstart,jend,npz) & +!$OMP private(pealn,pebln,rpkz) + do k=1,npz + do j=jstart,jend + do i=istart,iend + pealn = log(peBC(i,j,k)) + pebln = log(peBC(i,j,k+1)) + + rpkz = kappa*(pebln - pealn)/(exp(kappa*pebln)-exp(kappa*pealn) ) + + ptBC(i,j,k) = ptBC(i,j,k)*rpkz * & + (1.+zvir*sphumBC(i,j,k)) + enddo + enddo + enddo - real, parameter:: c_liq = 4185.5 ! heat capacity of water at 0C - real, parameter:: c_ice = 1972. ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice) - real, parameter:: cv_vap = cp_vapor - rvgas ! 1384.5 + end subroutine setup_pt_BC_k - real, dimension(:,:,:), pointer :: ptBC, sphumBC, qconBC, delpBC, delzBC, cappaBC - real, dimension(:,:,:), pointer :: liq_watBC_west, ice_watBC_west, rainwatBC_west, snowwatBC_west, graupelBC_west - real, dimension(:,:,:), pointer :: liq_watBC_east, ice_watBC_east, rainwatBC_east, snowwatBC_east, graupelBC_east - real, dimension(:,:,:), pointer :: liq_watBC_north, ice_watBC_north, rainwatBC_north, snowwatBC_north, graupelBC_north - real, dimension(:,:,:), pointer :: liq_watBC_south, ice_watBC_south, rainwatBC_south, snowwatBC_south, graupelBC_south + subroutine setup_eul_delp_BC(delp_lag_BC, delp_eul_BC, pe_lag_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_coarse, ptop_src, bd) - real :: dp1, q_liq, q_sol, q_con = 0., cvm, pkz, rdg, cv_air + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_BC_type_3d), intent(INOUT), target :: delp_lag_BC + type(fv_nest_BC_type_3d), intent(INOUT), target :: delp_eul_BC, pe_lag_BC, pe_eul_BC + integer, intent(IN) :: npx, npy, npz, npz_coarse + real, intent(IN), dimension(npz+1) :: ak_dst, bk_dst + real, intent(IN) :: ptop_src integer :: i,j,k, istart, iend - integer :: liq_wat, ice_wat, rainwat, snowwat, graupel - real, parameter:: tice = 273.16 ! For GFS Partitioning - real, parameter:: t_i0 = 15. integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -497,77 +818,686 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & ied = bd%ied jsd = bd%jsd jed = bd%jed - - rdg = -rdgas / grav - cv_air = cp_air - rdgas - - liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') - ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') - rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat') - snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') - graupel = get_tracer_index (MODEL_ATMOS, 'graupel') if (is == 1) then - if (.not. allocated(dum_West)) then - allocate(dum_West(isd:0,jsd:jed,npz)) -!$OMP parallel do default(none) shared(npz,isd,jsd,jed,dum_West) - do k=1,npz - do j=jsd,jed - do i=isd,0 - dum_West(i,j,k) = 0. - enddo - enddo - enddo - endif - endif - if (js == 1) then - if (.not. allocated(dum_South)) then - allocate(dum_South(isd:ied,jsd:0,npz)) -!$OMP parallel do default(none) shared(npz,isd,ied,jsd,dum_South) - do k=1,npz - do j=jsd,0 - do i=isd,ied - dum_South(i,j,k) = 0. - enddo - enddo - enddo - endif - endif + call setup_eul_delp_BC_k(delp_lag_BC%west_t1, delp_eul_BC%west_t1, pe_lag_BC%west_t1, pe_eul_BC%west_t1, & + ptop_src, ak_dst, bk_dst, isd, 0, isd, 0, jsd, jed, npz, npz_coarse) + end if + if (ie == npx-1) then - if (.not. allocated(dum_East)) then - allocate(dum_East(npx:ied,jsd:jed,npz)) -!$OMP parallel do default(none) shared(npx,npz,ied,jsd,jed,dum_East) - do k=1,npz - do j=jsd,jed - do i=npx,ied - dum_East(i,j,k) = 0. - enddo - enddo - enddo - endif - endif - if (je == npy-1) then - if (.not. allocated(dum_North)) then - allocate(dum_North(isd:ied,npy:jed,npz)) -!$OMP parallel do default(none) shared(npy,npz,isd,ied,jed,dum_North) - do k=1,npz - do j=npy,jed - do i=isd,ied - dum_North(i,j,k) = 0. - enddo - enddo - enddo - endif - endif + call setup_eul_delp_BC_k(delp_lag_BC%east_t1, delp_eul_BC%east_t1, pe_lag_BC%east_t1, pe_eul_BC%east_t1, & + ptop_src, ak_dst, bk_dst, npx, ied, npx, ied, jsd, jed, npz, npz_coarse) + end if - if (liq_wat > 0) then - liq_watBC_west => q_BC(liq_wat)%west_t1 - liq_watBC_east => q_BC(liq_wat)%east_t1 - liq_watBC_north => q_BC(liq_wat)%north_t1 - liq_watBC_south => q_BC(liq_wat)%south_t1 + if (is == 1) then + istart = is else - liq_watBC_west => dum_west - liq_watBC_east => dum_east + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then + call setup_eul_delp_BC_k(delp_lag_BC%south_t1, delp_eul_BC%south_t1, pe_lag_BC%south_t1, pe_eul_BC%south_t1, & + ptop_src, ak_dst, bk_dst, isd, ied, istart, iend, jsd, 0, npz, npz_coarse) + end if + + if (je == npy-1) then + call setup_eul_delp_BC_k(delp_lag_BC%north_t1, delp_eul_BC%north_t1, pe_lag_BC%north_t1, pe_eul_BC%north_t1, & + ptop_src, ak_dst, bk_dst, isd, ied, istart, iend, npy, jed, npz, npz_coarse) + end if + + end subroutine setup_eul_delp_BC + + subroutine setup_eul_delp_BC_k(delplagBC, delpeulBC, pelagBC, peeulBC, ptop_src, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse + real, intent(INOUT) :: delplagBC(isd_BC:ied_BC,jstart:jend,npz_coarse), pelagBC(isd_BC:ied_BC,jstart:jend,npz_coarse+1) + real, intent(INOUT) :: delpeulBC(isd_BC:ied_BC,jstart:jend,npz), peeulBC(isd_BC:ied_BC,jstart:jend,npz+1) + real, intent(IN) :: ptop_src, ak_dst(npz+1), bk_dst(npz+1) + + integer :: i,j,k + + character(len=120) :: errstring + +!!$!!! DEBUG CODE +!!$ write(*, '(A, 7I5)') 'setup_eul_delp_BC_k', mpp_pe(), isd_BC, ied_BC, istart, iend, lbound(pelagBC,1), ubound(pelagBC,1) +!!$!!! END DEBUG CODE + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,pelagBC,ptop_src) + do j=jstart,jend + do i=istart,iend + pelagBC(i,j,1) = ptop_src + enddo + enddo +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_coarse,pelagBC,delplagBC) + do j=jstart,jend + do k=1,npz_coarse + do i=istart,iend + pelagBC(i,j,k+1) = pelagBC(i,j,k) + delplagBC(i,j,k) + end do + end do + end do +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,peeulBC,pelagBC,ak_dst,bk_dst) + do k=1,npz+1 + do j=jstart,jend + do i=istart,iend + peeulBC(i,j,k) = ak_dst(k) + pelagBC(i,j,npz_coarse+1)*bk_dst(k) + enddo + enddo + enddo +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,peeulBC,delpeulBC) + do k=1,npz + do j=jstart,jend + do i=istart,iend + delpeulBC(i,j,k) = peeulBC(i,j,k+1) - peeulBC(i,j,k) + enddo + enddo + enddo + +!!$!!! DEBUG CODE +!!$ !If more than a few percent difference then log the error +!!$ do k=1,npz +!!$ do j=jstart,jend +!!$ do i=istart,iend +!!$ if (delpeulBC(i,j,k) <= 0.) then +!!$ write(errstring,'(3I5, 3(2x, G))'), i, j, k, pelagBC(i,j,k), peeulBC(i,j,k) +!!$ call mpp_error(WARNING, ' Invalid pressure BC at '//errstring) +!!$ else if (abs( peeulBC(i,j,k) - pelagBC(i,j,k)) > 100.0 ) then +!!$ write(errstring,'(3I5, 3(2x, G))'), i, j, k, pelagBC(i,j,k), peeulBC(i,j,k) +!!$ call mpp_error(WARNING, ' Remap BC: pressure deviation at '//errstring) +!!$ endif +!!$ enddo +!!$ enddo +!!$ enddo +!!$!!! END DEBUG CODE + + end subroutine setup_eul_delp_BC_k + + subroutine copy_ps_BC(ps, pe_BC, npx, npy, npz, istag, jstag, bd) + + integer, intent(IN) :: npx, npy, npz, istag, jstag + type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(IN) :: ps(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag) + type(fv_nest_BC_type_3d), intent(INOUT) :: pe_BC + + integer :: i,j,k, istart, iend + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (is == 1) then +!$OMP parallel do default(none) shared(isd,jsd,jed,jstag,npz,pe_BC,ps) + do j=jsd,jed+jstag + do i=isd,0 + pe_BC%west_t1(i,j,npz+1) = ps(i,j) + enddo + enddo + end if + + if (ie == npx-1) then +!$OMP parallel do default(none) shared(npx,ied,istag,jsd,jed,jstag,npz,pe_BC,ps) + do j=jsd,jed+jstag + do i=npx+istag,ied+istag + pe_BC%east_t1(i,j,npz+1) = ps(i,j) + enddo + enddo + end if + + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then +!$OMP parallel do default(none) shared(isd,ied,istag,jsd,npz,pe_BC,ps) + do j=jsd,0 + do i=isd,ied+istag + pe_BC%south_t1(i,j,npz+1) = ps(i,j) + enddo + enddo + end if + + if (je == npy-1) then +!$OMP parallel do default(none) shared(isd,ied,istag,npy,jed,jstag,npz,pe_BC,ps) + do j=npy+jstag,jed+jstag + do i=isd,ied+istag + pe_BC%north_t1(i,j,npz+1) = ps(i,j) + enddo + enddo + end if + + end subroutine copy_ps_BC + +!In this routine, the pe_*_BC arrays should already have PS filled in on the npz+1 level + subroutine setup_eul_pe_BC(pe_src_BC, pe_eul_BC, ak_dst, bk_dst, npx, npy, npz, npz_src, istag, jstag, bd, make_src_in, ak_src, bk_src) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_src_BC, pe_eul_BC + integer, intent(IN) :: npx, npy, npz, npz_src, istag, jstag + real, intent(IN), dimension(npz+1) :: ak_dst, bk_dst + logical, intent(IN), OPTIONAL :: make_src_in + real, intent(IN), OPTIONAL :: ak_src(npz_src), bk_src(npz_src) + + logical :: make_src + + integer :: i,j,k, istart, iend + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + make_src = .false. + if (present(make_src_in)) make_src = make_src_in + + if (is == 1) then + call setup_eul_pe_BC_k(pe_src_BC%west_t1, pe_eul_BC%west_t1, ak_dst, bk_dst, isd, 0, isd, 0, jsd, jed+jstag, npz, npz_src, & + make_src, ak_src, bk_src) + end if + + if (ie == npx-1) then + call setup_eul_pe_BC_k(pe_src_BC%east_t1, pe_eul_BC%east_t1, ak_dst, bk_dst, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_src, & + make_src, ak_src, bk_src) + end if + + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then + call setup_eul_pe_BC_k(pe_src_BC%south_t1, pe_eul_BC%south_t1, ak_dst, bk_dst, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_src, & + make_src, ak_src, bk_src) + end if + + if (je == npy-1) then + call setup_eul_pe_BC_k(pe_src_BC%north_t1, pe_eul_BC%north_t1, ak_dst, bk_dst, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_src, & + make_src, ak_src, bk_src) + end if + + end subroutine setup_eul_pe_BC + + subroutine setup_eul_pe_BC_k(pesrcBC, peeulBC, ak_dst, bk_dst, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src, make_src, ak_src, bk_src) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_src + real, intent(INOUT) :: pesrcBC(isd_BC:ied_BC,jstart:jend,npz_src+1) + real, intent(INOUT) :: peeulBC(isd_BC:ied_BC,jstart:jend,npz+1) + real, intent(IN) :: ak_dst(npz+1), bk_dst(npz+1) + logical, intent(IN) :: make_src + real, intent(IN) :: ak_src(npz_src+1), bk_src(npz_src+1) + + integer :: i,j,k + + character(len=120) :: errstring + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_src,peeulBC,ak_dst,pesrcBC,bk_dst) + do k=1,npz+1 + do j=jstart,jend + do i=istart,iend + peeulBC(i,j,k) = ak_dst(k) + pesrcBC(i,j,npz_src+1)*bk_dst(k) + enddo + enddo + enddo + + if (make_src) then +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,pesrcBC,ak_src,bk_src) + do k=1,npz_src+1 + do j=jstart,jend + do i=istart,iend + pesrcBC(i,j,k) = ak_src(k) + pesrcBC(i,j,npz_src+1)*bk_src(k) + enddo + enddo + enddo + endif + + + end subroutine setup_eul_pe_BC_k + + subroutine remap_BC(pe_lag_BC, pe_eul_BC, var_lag_BC, var_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, varname, do_log_pe) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_lag_BC, var_lag_BC + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_eul_BC, var_eul_BC + integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord + character(len=*), intent(IN) :: varname + logical, intent(IN), OPTIONAL :: do_log_pe + + logical :: log_pe = .false. + + integer :: i,j,k, istart, iend + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (present(do_log_pe)) log_pe = do_log_pe + + if (is == 1) then + call remap_BC_k(pe_lag_BC%west_t1, pe_eul_BC%west_t1, var_lag_BC%west_t1, var_eul_BC%west_t1, isd, 0, isd, 0, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe) + end if + + if (ie == npx-1) then + call remap_BC_k(pe_lag_BC%east_t1, pe_eul_BC%east_t1, var_lag_BC%east_t1, var_eul_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe) + end if + + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then + call remap_BC_k(pe_lag_BC%south_t1, pe_eul_BC%south_t1, var_lag_BC%south_t1, var_eul_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, iv, kord, log_pe) + end if + + if (je == npy-1) then + call remap_BC_k(pe_lag_BC%north_t1, pe_eul_BC%north_t1, var_lag_BC%north_t1, var_eul_BC%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe) + end if + + end subroutine remap_BC + + subroutine remap_BC_direct(pe_lag_BC, pe_eul_BC, var_lag_BC, var, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord, do_log_pe) + + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_lag_BC, var_lag_BC + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_eul_BC + real, intent(INOUT) :: var(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz) + logical, intent(IN), OPTIONAL :: do_log_pe + + logical :: log_pe = .false. + + integer :: i,j,k, istart, iend + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (present(do_log_pe)) log_pe = do_log_pe + + if (is == 1) then + !I was unable how to do pass-by-memory referencing on parts of the 3D var array, + ! so instead I am doing an inefficient copy and copy-back. --- lmh 14jun17 + call remap_BC_k(pe_lag_BC%west_t1, pe_eul_BC%west_t1, var_lag_BC%west_t1, var(isd:0,jsd:jed+jstag,:), isd, 0, isd, 0, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe) + end if + + if (ie == npx-1) then + call remap_BC_k(pe_lag_BC%east_t1, pe_eul_BC%east_t1, var_lag_BC%east_t1, var(npx+istag:ied+istag,jsd:jed+jstag,:), npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz, npz_coarse, iv, kord, log_pe) + end if + + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then + call remap_BC_k(pe_lag_BC%south_t1, pe_eul_BC%south_t1, var_lag_BC%south_t1, var(isd:ied+istag,jsd:0,:), isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, iv, kord, log_pe) + end if + + if (je == npy-1) then + call remap_BC_k(pe_lag_BC%north_t1, pe_eul_BC%north_t1, var_lag_BC%north_t1, var(isd:ied+istag,npy+jstag:jed+jstag,:), isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe) + end if + + end subroutine remap_BC_direct + + subroutine remap_BC_k(pe_lagBC, pe_eulBC, var_lagBC, var_eulBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord, log_pe) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz, npz_coarse, iv, kord + logical, intent(IN) :: log_pe + real, intent(INOUT) :: pe_lagBC(isd_BC:ied_BC,jstart:jend,npz_coarse+1), var_lagBC(isd_BC:ied_BC,jstart:jend,npz_coarse) + real, intent(INOUT) :: pe_eulBC(isd_BC:ied_BC,jstart:jend,npz+1), var_eulBC(isd_BC:ied_BC,jstart:jend,npz) + + integer :: i, j, k + real peln_lag(istart:iend,npz_coarse+1) + real peln_eul(istart:iend,npz+1) + character(120) :: errstring + + if (log_pe) then + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,pe_lagBC,pe_eulBC,var_lagBC,var_eulBC,iv,kord) & +!$OMP private(peln_lag,peln_eul) + do j=jstart,jend + + do k=1,npz_coarse+1 + do i=istart,iend +!!$!!! DEBUG CODE +!!$ if (pe_lagBC(i,j,k) <= 0.) then +!!$ write(errstring,'(3I5, 2x, G)'), i, j, k, pe_lagBC(i,j,k) +!!$ call mpp_error(WARNING, ' Remap BC: invalid pressure at at '//errstring) +!!$ endif +!!$!!! END DEBUG CODE + peln_lag(i,k) = log(pe_lagBC(i,j,k)) + enddo + enddo + + do k=1,npz+1 + do i=istart,iend +!!$!!! DEBUG CODE +!!$ if (pe_lagBC(i,j,k) <= 0.) then +!!$ write(errstring,'(3I5, 2x, G)'), i, j, k, pe_lagBC(i,j,k) +!!$ call mpp_error(WARNING, ' Remap BC: invalid pressure at at '//errstring) +!!$ endif +!!$!!! END DEBUG CODE + peln_eul(i,k) = log(pe_eulBC(i,j,k)) + enddo + enddo + + call mappm(npz_coarse, peln_lag, var_lagBC(istart:iend,j:j,:), & + npz, peln_eul, var_eulBC(istart:iend,j:j,:), & + istart, iend, iv, kord, pe_eulBC(istart,j,1)) + + enddo + + else + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,npz_coarse,pe_lagBC,pe_eulBC,var_lagBC,var_eulBC,iv,kord) + do j=jstart,jend + + call mappm(npz_coarse, pe_lagBC(istart:iend,j:j,:), var_lagBC(istart:iend,j:j,:), & + npz, pe_eulBC(istart:iend,j:j,:), var_eulBC(istart:iend,j:j,:), & + istart, iend, iv, kord, pe_eulBC(istart,j,1)) + !!! NEED A FILLQ/FILLZ CALL HERE?? + + enddo + endif + + end subroutine remap_BC_k + + subroutine remap_delz_BC(pe_lag_BC, pe_eul_BC, delp_lag_BC, delz_lag_BC, delp_eul_BC, delz_eul_BC, npx, npy, npz, npz_coarse, bd, istag, jstag, iv, kord) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_lag_BC, delp_lag_BC, delz_lag_BC + type(fv_nest_BC_type_3d), intent(INOUT), target :: pe_eul_BC, delp_eul_BC, delz_eul_BC + integer, intent(IN) :: npx, npy, npz, npz_coarse, istag, jstag, iv, kord + + integer :: i,j,k, istart, iend + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (is == 1) then + call compute_specific_volume_BC_k(delp_lag_BC%west_t1, delz_lag_BC%west_t1, isd, 0, isd, 0, jsd, jed, npz_coarse) + call remap_BC_k(pe_lag_BC%west_t1, pe_eul_BC%west_t1, delz_lag_BC%west_t1, delz_eul_BC%west_t1, isd, 0, isd, 0, jsd, jed+jstag, & + npz, npz_coarse, iv, kord, log_pe=.false.) + call compute_delz_BC_k(delp_eul_BC%west_t1, delz_eul_BC%west_t1, isd, 0, isd, 0, jsd, jed, npz) + end if + + if (ie == npx-1) then + call compute_specific_volume_BC_k(delp_lag_BC%east_t1, delz_lag_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz_coarse) + call remap_BC_k(pe_lag_BC%east_t1, pe_eul_BC%east_t1, delz_lag_BC%east_t1, delz_eul_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, & + npz, npz_coarse, iv, kord, log_pe=.false.) + call compute_delz_BC_k(delp_eul_BC%east_t1, delz_eul_BC%east_t1, npx+istag, ied+istag, npx+istag, ied+istag, jsd, jed+jstag, npz) + end if + + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if + + if (js == 1) then + call compute_specific_volume_BC_k(delp_lag_BC%south_t1, delz_lag_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz_coarse) + call remap_BC_k(pe_lag_BC%south_t1, pe_eul_BC%south_t1, delz_lag_BC%south_t1, delz_eul_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz, npz_coarse, & + iv, kord, log_pe=.false.) + call compute_delz_BC_k(delp_eul_BC%south_t1, delz_eul_BC%south_t1, isd, ied+istag, istart, iend+istag, jsd, 0, npz) + end if + + if (je == npy-1) then + call compute_specific_volume_BC_k(delp_lag_BC%north_t1, delz_lag_BC%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz_coarse) + call remap_BC_k(pe_lag_BC%north_t1, pe_eul_BC%north_t1, delz_lag_BC%north_t1, delz_eul_BC%north_t1, & + isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz, npz_coarse, iv, kord, log_pe=.false.) + call compute_delz_BC_k(delp_eul_BC%north_t1, delz_eul_BC%north_t1, isd, ied+istag, istart, iend+istag, npy+jstag, jed+jstag, npz) + end if + + end subroutine remap_delz_BC + + subroutine compute_specific_volume_BC_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz + real, intent(IN) :: delpBC(isd_BC:ied_BC,jstart:jend,npz) + real, intent(INOUT) :: delzBC(isd_BC:ied_BC,jstart:jend,npz) + + character(len=120) :: errstring + integer :: i,j,k + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,delzBC,delpBC) + do k=1,npz + do j=jstart,jend + do i=istart,iend + delzBC(i,j,k) = -delzBC(i,j,k)/delpBC(i,j,k) +!!$!!! DEBUG CODE +!!$ if (delzBC(i,j,k) <= 0. ) then +!!$ write(errstring,'(3I5, 2(2x, G))'), i, j, k, delzBC(i,j,k), delpBC(i,j,k) +!!$ call mpp_error(WARNING, ' Remap BC (sfc volume): invalid delz at '//errstring) +!!$ endif +!!$!!! END DEBUG CODE + end do + end do + end do + + end subroutine compute_specific_volume_BC_k + + subroutine compute_delz_BC_k(delpBC, delzBC, isd_BC, ied_BC, istart, iend, jstart, jend, npz) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz + real, intent(IN) :: delpBC(isd_BC:ied_BC,jstart:jend,npz) + real, intent(INOUT) :: delzBC(isd_BC:ied_BC,jstart:jend,npz) + + character(len=120) :: errstring + integer :: i,j,k + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,delzBC,delpBC) + do k=1,npz + do j=jstart,jend + do i=istart,iend + delzBC(i,j,k) = -delzBC(i,j,k)*delpBC(i,j,k) +!!$!!! DEBUG CODE +!!$ if (delzBC(i,j,k) >=0. ) then +!!$ write(errstring,'(3I5, 2(2x, G))'), i, j, k, delzBC(i,j,k), delpBC(i,j,k) +!!$ call mpp_error(WARNING, ' Remap BC (compute delz): invalid delz at '//errstring) +!!$ endif +!!$!!! END DEBUG CODE + end do + end do + end do + + end subroutine compute_delz_BC_k + + + subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & +#ifdef USE_COND + q_con_BC, & +#ifdef MOIST_CAPPA + cappa_BC, & +#endif +#endif + npx, npy, npz, zvir, bd) + + type(fv_grid_bounds_type), intent(IN) :: bd + type(fv_nest_BC_type_3d), intent(IN), target :: delp_BC, delz_BC, sphum_BC + type(fv_nest_BC_type_3d), intent(INOUT), target :: pt_BC + integer, intent(IN) :: nq + type(fv_nest_BC_type_3d), intent(IN), target :: q_BC(nq) +#ifdef USE_COND + type(fv_nest_BC_type_3d), intent(INOUT), target :: q_con_BC +#ifdef MOIST_CAPPA + type(fv_nest_BC_type_3d), intent(INOUT), target :: cappa_BC +#endif +#endif + integer, intent(IN) :: npx, npy, npz + real, intent(IN) :: zvir + + real, parameter:: c_liq = 4185.5 ! heat capacity of water at 0C + real, parameter:: c_ice = 1972. ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice) + real, parameter:: cv_vap = cp_vapor - rvgas ! 1384.5 + + real, dimension(:,:,:), pointer :: liq_watBC_west, ice_watBC_west, rainwatBC_west, snowwatBC_west, graupelBC_west + real, dimension(:,:,:), pointer :: liq_watBC_east, ice_watBC_east, rainwatBC_east, snowwatBC_east, graupelBC_east + real, dimension(:,:,:), pointer :: liq_watBC_north, ice_watBC_north, rainwatBC_north, snowwatBC_north, graupelBC_north + real, dimension(:,:,:), pointer :: liq_watBC_south, ice_watBC_south, rainwatBC_south, snowwatBC_south, graupelBC_south + + real :: dp1, q_liq, q_sol, q_con = 0., cvm, pkz, rdg, cv_air + + integer :: i,j,k, istart, iend + integer :: liq_wat, ice_wat, rainwat, snowwat, graupel + real, parameter:: tice = 273.16 ! For GFS Partitioning + real, parameter:: t_i0 = 15. + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + rdg = -rdgas / grav + cv_air = cp_air - rdgas + + liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') + ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') + rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat') + snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') + graupel = get_tracer_index (MODEL_ATMOS, 'graupel') + + if (is == 1) then + if (.not. allocated(dum_West)) then + allocate(dum_West(isd:0,jsd:jed,npz)) +!$OMP parallel do default(none) shared(npz,isd,jsd,jed,dum_West) + do k=1,npz + do j=jsd,jed + do i=isd,0 + dum_West(i,j,k) = 0. + enddo + enddo + enddo + endif + endif + if (js == 1) then + if (.not. allocated(dum_South)) then + allocate(dum_South(isd:ied,jsd:0,npz)) +!$OMP parallel do default(none) shared(npz,isd,ied,jsd,dum_South) + do k=1,npz + do j=jsd,0 + do i=isd,ied + dum_South(i,j,k) = 0. + enddo + enddo + enddo + endif + endif + if (ie == npx-1) then + if (.not. allocated(dum_East)) then + allocate(dum_East(npx:ied,jsd:jed,npz)) +!$OMP parallel do default(none) shared(npx,npz,ied,jsd,jed,dum_East) + do k=1,npz + do j=jsd,jed + do i=npx,ied + dum_East(i,j,k) = 0. + enddo + enddo + enddo + endif + endif + if (je == npy-1) then + if (.not. allocated(dum_North)) then + allocate(dum_North(isd:ied,npy:jed,npz)) +!$OMP parallel do default(none) shared(npy,npz,isd,ied,jed,dum_North) + do k=1,npz + do j=npy,jed + do i=isd,ied + dum_North(i,j,k) = 0. + enddo + enddo + enddo + endif + endif + + if (liq_wat > 0) then + liq_watBC_west => q_BC(liq_wat)%west_t1 + liq_watBC_east => q_BC(liq_wat)%east_t1 + liq_watBC_north => q_BC(liq_wat)%north_t1 + liq_watBC_south => q_BC(liq_wat)%south_t1 + else + liq_watBC_west => dum_west + liq_watBC_east => dum_east liq_watBC_north => dum_north liq_watBC_south => dum_south endif @@ -606,78 +1536,31 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & endif if (graupel > 0) then graupelBC_west => q_BC(graupel)%west_t1 - graupelBC_east => q_BC(graupel)%east_t1 - graupelBC_north => q_BC(graupel)%north_t1 - graupelBC_south => q_BC(graupel)%south_t1 - else - graupelBC_west => dum_west - graupelBC_east => dum_east - graupelBC_north => dum_north - graupelBC_south => dum_south - endif - - if (is == 1) then - ptBC => pt_BC%west_t1 - sphumBC => sphum_BC%west_t1 -#ifdef USE_COND - qconBC => q_con_BC%west_t1 -#ifdef MOIST_CAPPA - cappaBC => cappa_BC%west_t1 -#endif -#endif - delpBC => delp_BC%west_t1 - delzBC => delz_BC%west_t1 - -!$OMP parallel do default(none) shared(npz,jsd,jed,isd,zvir,sphumBC,liq_watBC_west,rainwatBC_west,ice_watBC_west,snowwatBC_west,graupelBC_west,qconBC,cappaBC, & -!$OMP rdg,cv_air,delpBC,delzBC,ptBC) & -!$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz) - do k=1,npz - do j=jsd,jed - do i=isd,0 - dp1 = zvir*sphumBC(i,j,k) -#ifdef USE_COND -#ifdef GFS_PHYS - q_con = liq_watBC_west(i,j,k) - q_sol = q_con*max(min((tice-ptBC(i,j,k))/t_i0,1.),0.) - q_liq = q_con - q_sol -#else - q_liq = liq_watBC_west(i,j,k) + rainwatBC_west(i,j,k) - q_sol = ice_watBC_west(i,j,k) + snowwatBC_west(i,j,k) + graupelBC_west(i,j,k) - q_con = q_liq + q_sol -#endif - qconBC(i,j,k) = q_con -#ifdef MOIST_CAPPA - cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice - cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) - pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) -#else - pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) -#endif - ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)*(1.-q_con)/pkz -#else - pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)/delzBC(i,j,k))) - ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)/pkz -#endif - end do - end do - end do - end if + graupelBC_east => q_BC(graupel)%east_t1 + graupelBC_north => q_BC(graupel)%north_t1 + graupelBC_south => q_BC(graupel)%south_t1 + else + graupelBC_west => dum_west + graupelBC_east => dum_east + graupelBC_north => dum_north + graupelBC_south => dum_south + endif + if (is == 1) then - if (js == 1) then - ptBC => pt_BC%south_t1 - sphumBC => sphum_BC%south_t1 + call setup_pt_NH_BC_k(pt_BC%west_t1, sphum_BC%west_t1, delp_BC%west_t1, delz_BC%west_t1, & + liq_watBC_west, rainwatBC_west, ice_watBC_west, snowwatBC_west, graupelBC_west, & #ifdef USE_COND - qconBC => q_con_BC%south_t1 + q_con_BC%west_t1, & #ifdef MOIST_CAPPA - cappaBC => cappa_BC%south_t1 + cappa_BC%west_t1, & #endif #endif - delpBC => delp_BC%south_t1 - delzBC => delz_BC%south_t1 + zvir, isd, 0, isd, 0, jsd, jed, npz) + end if + + + if (js == 1) then if (is == 1) then istart = is else @@ -689,108 +1572,32 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & iend = ied end if -!$OMP parallel do default(none) shared(npz,jsd,istart,iend,zvir,sphumBC, & -!$OMP liq_watBC_south,rainwatBC_south,ice_watBC_south,& -!$OMP snowwatBC_south,graupelBC_south,qconBC,cappaBC, & -!$OMP rdg,cv_air,delpBC,delzBC,ptBC) & -!$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz) - do k=1,npz - do j=jsd,0 - do i=istart,iend - dp1 = zvir*sphumBC(i,j,k) + call setup_pt_NH_BC_k(pt_BC%south_t1, sphum_BC%south_t1, delp_BC%south_t1, delz_BC%south_t1, & + liq_watBC_south, rainwatBC_south, ice_watBC_south, snowwatBC_south, graupelBC_south, & #ifdef USE_COND -#ifdef GFS_PHYS - q_con = liq_watBC_south(i,j,k) - q_sol = q_con*max(min((tice-ptBC(i,j,k))/t_i0,1.),0.) - q_liq = q_con - q_sol -#else - q_liq = liq_watBC_south(i,j,k) + rainwatBC_south(i,j,k) - q_sol = ice_watBC_south(i,j,k) + snowwatBC_south(i,j,k) + graupelBC_south(i,j,k) - q_con = q_liq + q_sol -#endif - qconBC(i,j,k) = q_con + q_con_BC%south_t1, & #ifdef MOIST_CAPPA - cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice - cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) - pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) -#else - pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + cappa_BC%south_t1, & #endif - ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)*(1.-q_con)/pkz -#else - pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)/delzBC(i,j,k))) - ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)/pkz #endif - end do - end do - end do + zvir, isd, ied, istart, iend, jsd, 0, npz) end if if (ie == npx-1) then - ptBC => pt_BC%east_t1 - sphumBC => sphum_BC%east_t1 -#ifdef USE_COND - qconBC => q_con_BC%east_t1 -#ifdef MOIST_CAPPA - cappaBC => cappa_BC%east_t1 -#endif -#endif - delpBC => delp_BC%east_t1 - delzBC => delz_BC%east_t1 -!$OMP parallel do default(none) shared(npz,jsd,jed,npx,ied,zvir,sphumBC, & -!$OMP liq_watBC_east,rainwatBC_east,ice_watBC_east,snowwatBC_east,graupelBC_east,qconBC,cappaBC, & -!$OMP rdg,cv_air,delpBC,delzBC,ptBC) & -!$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz) - do k=1,npz - do j=jsd,jed - do i=npx,ied - dp1 = zvir*sphumBC(i,j,k) + + call setup_pt_NH_BC_k(pt_BC%east_t1, sphum_BC%east_t1, delp_BC%east_t1, delz_BC%east_t1, & + liq_watBC_east, rainwatBC_east, ice_watBC_east, snowwatBC_east, graupelBC_east, & #ifdef USE_COND -#ifdef GFS_PHYS - q_con = liq_watBC_east(i,j,k) - q_sol = q_con*max(min((tice-ptBC(i,j,k))/t_i0,1.),0.) - q_liq = q_con - q_sol -#else - q_liq = liq_watBC_east(i,j,k) + rainwatBC_east(i,j,k) - q_sol = ice_watBC_east(i,j,k) + snowwatBC_east(i,j,k) + graupelBC_east(i,j,k) - q_con = q_liq + q_sol -#endif - qconBC(i,j,k) = q_con + q_con_BC%east_t1, & #ifdef MOIST_CAPPA - cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice - cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) - pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) -#else - pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + cappa_BC%east_t1, & #endif - ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)*(1.-q_con)/pkz -#else - pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)/delzBC(i,j,k))) - ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)/pkz #endif - end do - end do - end do + zvir, npx, ied, npx, ied, jsd, jed, npz) end if if (je == npy-1) then - ptBC => pt_BC%north_t1 - sphumBC => sphum_BC%north_t1 -#ifdef USE_COND - qconBC => q_con_BC%north_t1 -#ifdef MOIST_CAPPA - cappaBC => cappa_BC%north_t1 -#endif -#endif - delpBC => delp_BC%north_t1 - delzBC => delz_BC%north_t1 if (is == 1) then istart = is else @@ -802,30 +1609,81 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & iend = ied end if -!$OMP parallel do default(none) shared(npz,npy,jed,istart,iend,zvir, & -!$OMP sphumBC,liq_watBC_north,rainwatBC_north,ice_watBC_north,snowwatBC_north,graupelBC_north,qconBC,cappaBC, & -!$OMP rdg,cv_air,delpBC,delzBC,ptBC) & -!$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz) - do k=1,npz - do j=npy,jed - do i=istart,iend + call setup_pt_NH_BC_k(pt_BC%north_t1, sphum_BC%north_t1, delp_BC%north_t1, delz_BC%north_t1, & + liq_watBC_north, rainwatBC_north, ice_watBC_north, snowwatBC_north, graupelBC_north, & +#ifdef USE_COND + q_con_BC%north_t1, & +#ifdef MOIST_CAPPA + cappa_BC%north_t1, & +#endif +#endif + zvir, isd, ied, istart, iend, npy, jed, npz) + end if + + end subroutine setup_pt_NH_BC + + + subroutine setup_pt_NH_BC_k(ptBC,sphumBC,delpBC,delzBC, & + liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC, & +#ifdef USE_COND + q_conBC, & +#ifdef MOIST_CAPPA + cappaBC, & +#endif +#endif + zvir, isd_BC, ied_BC, istart, iend, jstart, jend, npz) + + integer, intent(IN) :: isd_BC, ied_BC, istart, iend, jstart, jend, npz + real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: ptBC + real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: sphumBC, delpBC, delzBC + real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC +#ifdef USE_COND + real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: q_conBC +#ifdef MOIST_CAPPA + real, intent(OUT), dimension(isd_BC:ied_BC,jstart:jend,npz) :: cappaBC +#endif +#endif + real, intent(IN) :: zvir + + integer :: i,j,k + real :: dp1, q_con, q_sol, q_liq, cvm, pkz, rdg, cv_air + + real, parameter:: c_liq = 4185.5 ! heat capacity of water at 0C + real, parameter:: c_ice = 1972. ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice) + real, parameter:: cv_vap = cp_vapor - rvgas ! 1384.5 + real, parameter:: tice = 273.16 ! For GFS Partitioning + real, parameter:: t_i0 = 15. + + rdg = -rdgas / grav + cv_air = cp_air - rdgas + +!!$!!! DEBUG CODE +!!$ write(*, '(A, 7I5)') 'setup_pt_NH_BC_k', mpp_pe(), isd, ied, istart, iend, lbound(ptBC,1), ubound(ptBC,1) +!!$!!! END DEBUG CODE + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz,zvir,ptBC,sphumBC,delpBC,delzBC,liq_watBC,rainwatBC,ice_watBC,snowwatBC,graupelBC, & +#ifdef USE_COND +!$OMP q_conBC, & +#ifdef MOIST_CAPPA +!$OMP cappaBC, & +#endif +#endif +!$OMP rdg, cv_air) & +!$OMP private(dp1,q_liq,q_sol,q_con,cvm,pkz) + do k=1,npz + do j=jstart,jend + do i=istart,iend dp1 = zvir*sphumBC(i,j,k) #ifdef USE_COND -#ifdef GFS_PHYS - q_con = liq_watBC_north(i,j,k) - q_sol = q_con*max(min((tice-ptBC(i,j,k))/t_i0,1.),0.) - q_liq = q_con - q_sol -#else - q_liq = liq_watBC_north(i,j,k) + rainwatBC_north(i,j,k) - q_sol = ice_watBC_north(i,j,k) + snowwatBC_north(i,j,k) + graupelBC_north(i,j,k) + q_liq = liq_watBC(i,j,k) + rainwatBC(i,j,k) + q_sol = ice_watBC(i,j,k) + snowwatBC(i,j,k) + graupelBC(i,j,k) q_con = q_liq + q_sol -#endif - qconBC(i,j,k) = q_con + q_conBC(i,j,k) = q_con #ifdef MOIST_CAPPA cvm = (1.-(sphumBC(i,j,k)+q_con))*cv_air+sphumBC(i,j,k)*cv_vap+q_liq*c_liq+q_sol*c_ice cappaBC(i,j,k) = rdgas/(rdgas + cvm/(1.+dp1)) pkz = exp( cappaBC(i,j,k)*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & - (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) + (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) #else pkz = exp( kappa*log(rdg*delpBC(i,j,k)*ptBC(i,j,k) * & (1.+dp1)*(1.-q_con)/delzBC(i,j,k))) @@ -836,15 +1694,11 @@ subroutine setup_pt_NH_BC(pt_BC, delp_BC, delz_BC, sphum_BC, q_BC, nq, & (1.+dp1)/delzBC(i,j,k))) ptBC(i,j,k) = ptBC(i,j,k)*(1.+dp1)/pkz #endif - end do - end do - end do - end if - - - - end subroutine setup_pt_NH_BC + end do + end do + end do + end subroutine setup_pt_NH_BC_k subroutine set_NH_BCs_t0(neststruct) @@ -866,75 +1720,489 @@ end subroutine set_NH_BCs_t0 subroutine set_BCs_t0(ncnst, hydrostatic, neststruct) - integer, intent(IN) :: ncnst - logical, intent(IN) :: hydrostatic - type(fv_nest_type), intent(INOUT) :: neststruct + integer, intent(IN) :: ncnst + logical, intent(IN) :: hydrostatic + type(fv_nest_type), intent(INOUT) :: neststruct + + integer :: n + + neststruct%delp_BC%east_t0 = neststruct%delp_BC%east_t1 + neststruct%delp_BC%west_t0 = neststruct%delp_BC%west_t1 + neststruct%delp_BC%north_t0 = neststruct%delp_BC%north_t1 + neststruct%delp_BC%south_t0 = neststruct%delp_BC%south_t1 + do n=1,ncnst + neststruct%q_BC(n)%east_t0 = neststruct%q_BC(n)%east_t1 + neststruct%q_BC(n)%west_t0 = neststruct%q_BC(n)%west_t1 + neststruct%q_BC(n)%north_t0 = neststruct%q_BC(n)%north_t1 + neststruct%q_BC(n)%south_t0 = neststruct%q_BC(n)%south_t1 + enddo +#ifndef SW_DYNAMICS + neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1 + neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1 + neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1 + neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1 + neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1 + neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1 + neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1 + neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1 + +#ifdef USE_COND + neststruct%q_con_BC%east_t0 = neststruct%q_con_BC%east_t1 + neststruct%q_con_BC%west_t0 = neststruct%q_con_BC%west_t1 + neststruct%q_con_BC%north_t0 = neststruct%q_con_BC%north_t1 + neststruct%q_con_BC%south_t0 = neststruct%q_con_BC%south_t1 +#ifdef MOIST_CAPPA + neststruct%cappa_BC%east_t0 = neststruct%cappa_BC%east_t1 + neststruct%cappa_BC%west_t0 = neststruct%cappa_BC%west_t1 + neststruct%cappa_BC%north_t0 = neststruct%cappa_BC%north_t1 + neststruct%cappa_BC%south_t0 = neststruct%cappa_BC%south_t1 +#endif +#endif + + if (.not. hydrostatic) then + call set_NH_BCs_t0(neststruct) + endif +#endif + neststruct%u_BC%east_t0 = neststruct%u_BC%east_t1 + neststruct%u_BC%west_t0 = neststruct%u_BC%west_t1 + neststruct%u_BC%north_t0 = neststruct%u_BC%north_t1 + neststruct%u_BC%south_t0 = neststruct%u_BC%south_t1 + neststruct%v_BC%east_t0 = neststruct%v_BC%east_t1 + neststruct%v_BC%west_t0 = neststruct%v_BC%west_t1 + neststruct%v_BC%north_t0 = neststruct%v_BC%north_t1 + neststruct%v_BC%south_t0 = neststruct%v_BC%south_t1 + + + neststruct%vc_BC%east_t0 = neststruct%vc_BC%east_t1 + neststruct%vc_BC%west_t0 = neststruct%vc_BC%west_t1 + neststruct%vc_BC%north_t0 = neststruct%vc_BC%north_t1 + neststruct%vc_BC%south_t0 = neststruct%vc_BC%south_t1 + neststruct%uc_BC%east_t0 = neststruct%uc_BC%east_t1 + neststruct%uc_BC%west_t0 = neststruct%uc_BC%west_t1 + neststruct%uc_BC%north_t0 = neststruct%uc_BC%north_t1 + neststruct%uc_BC%south_t0 = neststruct%uc_BC%south_t1 + + neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1 + neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1 + neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1 + neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1 + + end subroutine set_BCs_t0 + + subroutine d2c_setup(u, v, & + ua, va, & + uc, vc, dord4, & + isd,ied,jsd,jed, is,ie,js,je, npx,npy, & + grid_type, bounded_domain, & + se_corner, sw_corner, ne_corner, nw_corner, & + rsin_u,rsin_v,cosa_s,rsin2 ) + + logical, intent(in):: dord4 + real, intent(in) :: u(isd:ied,jsd:jed+1) + real, intent(in) :: v(isd:ied+1,jsd:jed) + real, intent(out), dimension(isd:ied ,jsd:jed ):: ua + real, intent(out), dimension(isd:ied ,jsd:jed ):: va + real, intent(out), dimension(isd:ied+1,jsd:jed ):: uc + real, intent(out), dimension(isd:ied ,jsd:jed+1):: vc + integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type + logical, intent(in) :: bounded_domain, se_corner, sw_corner, ne_corner, nw_corner + real, intent(in) :: rsin_u(isd:ied+1,jsd:jed) + real, intent(in) :: rsin_v(isd:ied,jsd:jed+1) + real, intent(in) :: cosa_s(isd:ied,jsd:jed) + real, intent(in) :: rsin2(isd:ied,jsd:jed) + +! Local + real, dimension(isd:ied,jsd:jed):: utmp, vtmp + real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. + real, parameter:: a1 = 0.5625 + real, parameter:: a2 = -0.0625 + real, parameter:: c1 = -2./14. + real, parameter:: c2 = 11./14. + real, parameter:: c3 = 5./14. + integer npt, i, j, ifirst, ilast, id + + if ( dord4) then + id = 1 + else + id = 0 + endif + + + if (grid_type < 3 .and. .not. bounded_domain) then + npt = 4 + else + npt = -2 + endif + + if ( bounded_domain) then + + do j=jsd+1,jed-1 + do i=isd,ied + utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) + enddo + enddo + do i=isd,ied + j = jsd + utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) + j = jed + utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) + end do + + do j=jsd,jed + do i=isd+1,ied-1 + vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) + enddo + i = isd + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + i = ied + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + enddo + + do j=jsd,jed + do i=isd,ied + ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j) + va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j) + enddo + enddo + + else + + !---------- + ! Interior: + !---------- + utmp = 0. + vtmp = 0. + + + do j=max(npt,js-1),min(npy-npt,je+1) + do i=max(npt,isd),min(npx-npt,ied) + utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) + enddo + enddo + do j=max(npt,jsd),min(npy-npt,jed) + do i=max(npt,is-1),min(npx-npt,ie+1) + vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) + enddo + enddo + + !---------- + ! edges: + !---------- + if (grid_type < 3) then + + if ( js==1 .or. jsd=(npy-npt)) then + do j=npy-npt+1,jed + do i=isd,ied + utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) + vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) + enddo + enddo + endif + + if ( is==1 .or. isd=(npx-npt)) then + do j=max(npt,jsd),min(npy-npt,jed) + do i=npx-npt+1,ied + utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) + vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) + enddo + enddo + endif + + endif + do j=js-1-id,je+1+id + do i=is-1-id,ie+1+id + ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j) + va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j) + enddo + enddo + + end if + +! A -> C +!-------------- +! Fix the edges +!-------------- +! Xdir: + if( sw_corner ) then + do i=-2,0 + utmp(i,0) = -vtmp(0,1-i) + enddo + endif + if( se_corner ) then + do i=0,2 + utmp(npx+i,0) = vtmp(npx,i+1) + enddo + endif + if( ne_corner ) then + do i=0,2 + utmp(npx+i,npy) = -vtmp(npx,je-i) + enddo + endif + if( nw_corner ) then + do i=-2,0 + utmp(i,npy) = vtmp(0,je+i) + enddo + endif + + if (grid_type < 3 .and. .not. bounded_domain) then + ifirst = max(3, is-1) + ilast = min(npx-2,ie+2) + else + ifirst = is-1 + ilast = ie+2 + endif +!--------------------------------------------- +! 4th order interpolation for interior points: +!--------------------------------------------- + do j=js-1,je+1 + do i=ifirst,ilast + uc(i,j) = a1*(utmp(i-1,j)+utmp(i,j))+a2*(utmp(i-2,j)+utmp(i+1,j)) + enddo + enddo + + if (grid_type < 3) then +! Xdir: + if( is==1 .and. .not. bounded_domain ) then + do j=js-1,je+1 + uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j) + uc(1,j) = ( t14*(utmp( 0,j)+utmp(1,j)) & + + t12*(utmp(-1,j)+utmp(2,j)) & + + t15*(utmp(-2,j)+utmp(3,j)) )*rsin_u(1,j) + uc(2,j) = c1*utmp(3,j) + c2*utmp(2,j) + c3*utmp(1,j) + enddo + endif + + if( (ie+1)==npx .and. .not. bounded_domain ) then + do j=js-1,je+1 + uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j) + uc(npx,j) = (t14*(utmp(npx-1,j)+utmp(npx,j))+ & + t12*(utmp(npx-2,j)+utmp(npx+1,j)) & + + t15*(utmp(npx-3,j)+utmp(npx+2,j)))*rsin_u(npx,j) + uc(npx+1,j) = c3*utmp(npx,j)+c2*utmp(npx+1,j)+c1*utmp(npx+2,j) + enddo + endif + + endif + +!------ +! Ydir: +!------ + if( sw_corner ) then + do j=-2,0 + vtmp(0,j) = -utmp(1-j,0) + enddo + endif + if( nw_corner ) then + do j=0,2 + vtmp(0,npy+j) = utmp(j+1,npy) + enddo + endif + if( se_corner ) then + do j=-2,0 + vtmp(npx,j) = utmp(ie+j,0) + enddo + endif + if( ne_corner ) then + do j=0,2 + vtmp(npx,npy+j) = -utmp(ie-j,npy) + enddo + endif + + if (grid_type < 3) then + + do j=js-1,je+2 + if ( j==1 .and. .not. bounded_domain) then + do i=is-1,ie+1 + vc(i,1) = (t14*(vtmp(i, 0)+vtmp(i,1)) & + + t12*(vtmp(i,-1)+vtmp(i,2)) & + + t15*(vtmp(i,-2)+vtmp(i,3)))*rsin_v(i,1) + enddo + elseif ( (j==0 .or. j==(npy-1)) .and. .not. bounded_domain) then + do i=is-1,ie+1 + vc(i,j) = c1*vtmp(i,j-2) + c2*vtmp(i,j-1) + c3*vtmp(i,j) + enddo + elseif ( (j==2 .or. j==(npy+1)) .and. .not. bounded_domain) then + do i=is-1,ie+1 + vc(i,j) = c1*vtmp(i,j+1) + c2*vtmp(i,j) + c3*vtmp(i,j-1) + enddo + elseif ( j==npy .and. .not. bounded_domain) then + do i=is-1,ie+1 + vc(i,npy) = (t14*(vtmp(i,npy-1)+vtmp(i,npy)) & + + t12*(vtmp(i,npy-2)+vtmp(i,npy+1)) & + + t15*(vtmp(i,npy-3)+vtmp(i,npy+2)))*rsin_v(i,npy) + enddo + else +! 4th order interpolation for interior points: + do i=is-1,ie+1 + vc(i,j) = a2*(vtmp(i,j-2)+vtmp(i,j+1))+a1*(vtmp(i,j-1)+vtmp(i,j)) + enddo + endif + enddo + else +! 4th order interpolation: + do j=js-1,je+2 + do i=is-1,ie+1 + vc(i,j) = a2*(vtmp(i,j-2)+vtmp(i,j+1))+a1*(vtmp(i,j-1)+vtmp(i,j)) + enddo + enddo + endif + + end subroutine d2c_setup + + subroutine d2a_setup(u, v, ua, va, dord4, & + isd,ied,jsd,jed, is,ie,js,je, npx,npy, & + grid_type, bounded_domain, & + cosa_s,rsin2 ) + + logical, intent(in):: dord4 + real, intent(in) :: u(isd:ied,jsd:jed+1) + real, intent(in) :: v(isd:ied+1,jsd:jed) + real, intent(out), dimension(isd:ied ,jsd:jed ):: ua + real, intent(out), dimension(isd:ied ,jsd:jed ):: va + integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type + real, intent(in) :: cosa_s(isd:ied,jsd:jed) + real, intent(in) :: rsin2(isd:ied,jsd:jed) + logical, intent(in) :: bounded_domain + +! Local + real, dimension(isd:ied,jsd:jed):: utmp, vtmp + real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. + real, parameter:: a1 = 0.5625 + real, parameter:: a2 = -0.0625 + real, parameter:: c1 = -2./14. + real, parameter:: c2 = 11./14. + real, parameter:: c3 = 5./14. + integer npt, i, j, ifirst, ilast, id + + if ( dord4) then + id = 1 + else + id = 0 + endif + + + if (grid_type < 3 .and. .not. bounded_domain) then + npt = 4 + else + npt = -2 + endif + + if ( bounded_domain) then + + do j=jsd+1,jed-1 + do i=isd,ied + utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) + enddo + enddo + do i=isd,ied + j = jsd + utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) + j = jed + utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) + end do + + do j=jsd,jed + do i=isd+1,ied-1 + vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) + enddo + i = isd + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + i = ied + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + enddo + + else + + !---------- + ! Interior: + !---------- + + do j=max(npt,js-1),min(npy-npt,je+1) + do i=max(npt,isd),min(npx-npt,ied) + utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) + enddo + enddo + do j=max(npt,jsd),min(npy-npt,jed) + do i=max(npt,is-1),min(npx-npt,ie+1) + vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) + enddo + enddo - integer :: n + !---------- + ! edges: + !---------- + if (grid_type < 3) then + + if ( js==1 .or. jsd=(npy-npt)) then + do j=npy-npt+1,jed + do i=isd,ied + utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) + vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) + enddo + enddo + endif + + if ( is==1 .or. isd=(npx-npt)) then + do j=max(npt,jsd),min(npy-npt,jed) + do i=npx-npt+1,ied + utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) + vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) + enddo + enddo + endif - neststruct%delp_BC%east_t0 = neststruct%delp_BC%east_t1 - neststruct%delp_BC%west_t0 = neststruct%delp_BC%west_t1 - neststruct%delp_BC%north_t0 = neststruct%delp_BC%north_t1 - neststruct%delp_BC%south_t0 = neststruct%delp_BC%south_t1 - do n=1,ncnst - neststruct%q_BC(n)%east_t0 = neststruct%q_BC(n)%east_t1 - neststruct%q_BC(n)%west_t0 = neststruct%q_BC(n)%west_t1 - neststruct%q_BC(n)%north_t0 = neststruct%q_BC(n)%north_t1 - neststruct%q_BC(n)%south_t0 = neststruct%q_BC(n)%south_t1 - enddo -#ifndef SW_DYNAMICS - neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1 - neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1 - neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1 - neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1 - neststruct%pt_BC%east_t0 = neststruct%pt_BC%east_t1 - neststruct%pt_BC%west_t0 = neststruct%pt_BC%west_t1 - neststruct%pt_BC%north_t0 = neststruct%pt_BC%north_t1 - neststruct%pt_BC%south_t0 = neststruct%pt_BC%south_t1 + endif -#ifdef USE_COND - neststruct%q_con_BC%east_t0 = neststruct%q_con_BC%east_t1 - neststruct%q_con_BC%west_t0 = neststruct%q_con_BC%west_t1 - neststruct%q_con_BC%north_t0 = neststruct%q_con_BC%north_t1 - neststruct%q_con_BC%south_t0 = neststruct%q_con_BC%south_t1 -#ifdef MOIST_CAPPA - neststruct%cappa_BC%east_t0 = neststruct%cappa_BC%east_t1 - neststruct%cappa_BC%west_t0 = neststruct%cappa_BC%west_t1 - neststruct%cappa_BC%north_t0 = neststruct%cappa_BC%north_t1 - neststruct%cappa_BC%south_t0 = neststruct%cappa_BC%south_t1 -#endif -#endif + end if - if (.not. hydrostatic) then - call set_NH_BCs_t0(neststruct) - endif -#endif - neststruct%u_BC%east_t0 = neststruct%u_BC%east_t1 - neststruct%u_BC%west_t0 = neststruct%u_BC%west_t1 - neststruct%u_BC%north_t0 = neststruct%u_BC%north_t1 - neststruct%u_BC%south_t0 = neststruct%u_BC%south_t1 - neststruct%v_BC%east_t0 = neststruct%v_BC%east_t1 - neststruct%v_BC%west_t0 = neststruct%v_BC%west_t1 - neststruct%v_BC%north_t0 = neststruct%v_BC%north_t1 - neststruct%v_BC%south_t0 = neststruct%v_BC%south_t1 - neststruct%vc_BC%east_t0 = neststruct%vc_BC%east_t1 - neststruct%vc_BC%west_t0 = neststruct%vc_BC%west_t1 - neststruct%vc_BC%north_t0 = neststruct%vc_BC%north_t1 - neststruct%vc_BC%south_t0 = neststruct%vc_BC%south_t1 - neststruct%uc_BC%east_t0 = neststruct%uc_BC%east_t1 - neststruct%uc_BC%west_t0 = neststruct%uc_BC%west_t1 - neststruct%uc_BC%north_t0 = neststruct%uc_BC%north_t1 - neststruct%uc_BC%south_t0 = neststruct%uc_BC%south_t1 + do j=js-1-id,je+1+id + do i=is-1-id,ie+1+id + ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j) + va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j) + enddo + enddo - neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1 - neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1 - neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1 - neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1 +end subroutine d2a_setup - end subroutine set_BCs_t0 !! nestupdate types @@ -959,88 +2227,96 @@ end subroutine set_BCs_t0 !! unless flux nested grid BCs are specified, or if a quantity is !! not updated at all. This ability has not been implemented. -subroutine twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir) +subroutine twoway_nesting(Atm, ngrids, grids_on_this_pe, zvir, Time, this_grid) type(fv_atmos_type), intent(INOUT) :: Atm(ngrids) - integer, intent(IN) :: ngrids + integer, intent(IN) :: ngrids, this_grid logical, intent(IN) :: grids_on_this_pe(ngrids) real, intent(IN) :: zvir + type(time_type), intent(IN) :: Time integer :: n, p, sphum - + if (ngrids > 1) then +! Re-compute pressures on each grid + + call p_var(Atm(this_grid)%npz, Atm(this_grid)%bd%is, Atm(this_grid)%bd%ie, Atm(this_grid)%bd%js, Atm(this_grid)%bd%je, & + Atm(this_grid)%ptop, ptop_min, Atm(this_grid)%delp, Atm(this_grid)%delz, Atm(this_grid)%pt, & + Atm(this_grid)%ps, Atm(this_grid)%pe, Atm(this_grid)%peln, Atm(this_grid)%pk, Atm(this_grid)%pkz, kappa, & + Atm(this_grid)%q, Atm(this_grid)%ng, Atm(this_grid)%flagstruct%ncnst, Atm(this_grid)%gridstruct%area_64, 0., & + .false., .false., & + Atm(this_grid)%flagstruct%moist_phys, Atm(this_grid)%flagstruct%hydrostatic, & + Atm(this_grid)%flagstruct%nwat, Atm(this_grid)%domain, Atm(this_grid)%flagstruct%adiabatic, .false.) + do n=ngrids,2,-1 !loop backwards to allow information to propagate from finest to coarsest grids - !two-way updating + !two-way updating if (Atm(n)%neststruct%twowaynest ) then - if (grids_on_this_pe(n) .or. grids_on_this_pe(Atm(n)%parent_grid%grid_number)) then + !if (grids_on_this_pe(n) .or. grids_on_this_pe(Atm(n)%parent_grid%grid_number)) then + if (n==this_grid .or. Atm(n)%parent_grid%grid_number==this_grid) then sphum = get_tracer_index (MODEL_ATMOS, 'sphum') call twoway_nest_update(Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, zvir, & - Atm(n)%ncnst, sphum, Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%omga, & - Atm(n)%pt, Atm(n)%delp, Atm(n)%q, Atm(n)%uc, Atm(n)%vc, & - Atm(n)%pkz, Atm(n)%delz, Atm(n)%ps, Atm(n)%ptop, & - Atm(n)%gridstruct, Atm(n)%flagstruct, Atm(n)%neststruct, Atm(n)%parent_grid, Atm(N)%bd, .false.) + Atm(n)%ncnst, sphum, Atm(n)%u, Atm(n)%v, Atm(n)%w, & + Atm(n)%pt, Atm(n)%delp, Atm(n)%q, & + Atm(n)%pe, Atm(n)%pkz, Atm(n)%delz, Atm(n)%ps, Atm(n)%ptop, Atm(n)%ak, Atm(n)%bk, & + Atm(n)%gridstruct, Atm(n)%flagstruct, Atm(n)%neststruct, Atm(n)%domain, & + Atm(n)%parent_grid, Atm(N)%bd, n, .false.) endif endif end do !NOTE: these routines need to be used with any grid which has been updated to, not just the coarsest grid. - do n=1,ngrids - if (Atm(n)%neststruct%parent_of_twoway .and. grids_on_this_pe(n)) then - call after_twoway_nest_update( Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%ng, Atm(n)%ncnst, & - Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, & - Atm(n)%pt, Atm(n)%delp, Atm(n)%q, & - Atm(n)%ps, Atm(n)%pe, Atm(n)%pk, Atm(n)%peln, Atm(n)%pkz, & - Atm(n)%phis, Atm(n)%ua, Atm(n)%va, & - Atm(n)%ptop, Atm(n)%gridstruct, Atm(n)%flagstruct, & - Atm(n)%domain, Atm(n)%bd) - endif - enddo + if (Atm(this_grid)%neststruct%parent_of_twoway .and. grids_on_this_pe(n)) then + call after_twoway_nest_update( Atm(this_grid)%npx, Atm(this_grid)%npy, Atm(this_grid)%npz, & + Atm(this_grid)%ng, Atm(this_grid)%ncnst, & + Atm(this_grid)%u, Atm(this_grid)%v, Atm(this_grid)%w, Atm(this_grid)%delz, & + Atm(this_grid)%pt, Atm(this_grid)%delp, Atm(this_grid)%q, & + Atm(this_grid)%ps, Atm(this_grid)%pe, Atm(this_grid)%pk, Atm(this_grid)%peln, Atm(this_grid)%pkz, & + Atm(this_grid)%phis, Atm(this_grid)%ua, Atm(this_grid)%va, & + Atm(this_grid)%ptop, Atm(this_grid)%gridstruct, Atm(this_grid)%flagstruct, & + Atm(this_grid)%domain, Atm(this_grid)%bd, Time) + endif endif ! ngrids > 1 - - - end subroutine twoway_nesting !!!CLEANUP: this routine assumes that the PARENT GRID has pt = (regular) temperature, !!!not potential temperature; which may cause problems when updating if this is not the case. subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & - u, v, w, omga, pt, delp, q, & - uc, vc, pkz, delz, ps, ptop, & + u, v, w, pt, delp, q, & + pe, pkz, delz, ps, ptop, ak, bk, & gridstruct, flagstruct, neststruct, & - parent_grid, bd, conv_theta_in) + domain, parent_grid, bd, grid_number, conv_theta_in) - real, intent(IN) :: zvir, ptop + real, intent(IN) :: zvir, ptop, ak(npz+1), bk(npz+1) integer, intent(IN) :: npx, npy, npz - integer, intent(IN) :: ncnst, sphum + integer, intent(IN) :: ncnst, sphum, grid_number logical, intent(IN), OPTIONAL :: conv_theta_in type(fv_grid_bounds_type), intent(IN) :: bd real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) :: u ! D grid zonal wind (m/s) real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) :: v ! D grid meridional wind (m/s) real, intent(inout) :: w( bd%isd: ,bd%jsd: ,1: ) ! W (m/s) - real, intent(inout) :: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! Vertical pressure velocity (pa/s) real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! temperature (K) real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! pressure thickness (pascal) real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) ! specific humidity and constituents - real, intent(inout) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) ! (uc,vc) C grid winds - real, intent(inout) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) - real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) ! finite-volume mean pk - real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1: ) ! delta-height (m); non-hydrostatic only - real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed) ! Surface pressure (pascal) + real, intent(inout) :: pe (bd%is-1:bd%ie+1,npz+1,bd%js-1:bd%je+1) ! finite-volume interface p ! NOTE TRANSPOSITION NEEDED + real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) ! finite-volume mean p^kappa + real, intent(inout) :: delz(bd%is: ,bd%js: ,1: ) ! delta-height (m); non-hydrostatic only + real, intent(inout) :: ps (bd%isd:bd%ied ,bd%jsd:bd%jed) ! Surface pressure (pascal) type(fv_grid_type), intent(INOUT) :: gridstruct type(fv_flags_type), intent(INOUT) :: flagstruct type(fv_nest_type), intent(INOUT) :: neststruct + type(domain2d), intent(INOUT) :: domain - type(fv_atmos_type), intent(INOUT) :: parent_grid + type(fv_atmos_type), pointer, intent(IN) :: parent_grid real, allocatable :: t_nest(:,:,:), ps0(:,:) integer :: i,j,k,n @@ -1051,14 +2327,18 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & logical :: used, conv_theta=.true. real :: qdp( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) - real, allocatable :: qdp_coarse(:,:,:) + real, allocatable, dimension(:,:,:) :: qdp_coarse + real, allocatable, dimension(:,:,:) :: var_src + real, allocatable, dimension(:,:,:) :: pt_src, w_src, u_src, v_src real(kind=f_p), allocatable :: q_diff(:,:,:) - real :: L_sum_b(npz), L_sum_a(npz) - + real :: L_sum_b(npz), L_sum_a(npz), blend_wt(parent_grid%npz) + real :: pfull, ph1, ph2, rfcut, sgcut + integer :: upoff integer :: is, ie, js, je integer :: isd, ied, jsd, jed integer :: isu, ieu, jsu, jeu + logical, SAVE :: first_timestep = .true. is = bd%is ie = bd%ie @@ -1086,155 +2366,176 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & call mpp_get_compute_domain( parent_grid%domain, & isc_p, iec_p, jsc_p, jec_p ) + ph2 = parent_grid%ak(1) + rfcut = max(flagstruct%rf_cutoff, parent_grid%flagstruct%rf_cutoff) + sgcut = ak(flagstruct%n_sponge+1) + bk(flagstruct%n_sponge+1)*flagstruct%p_ref + sgcut = max(sgcut, parent_grid%ak(parent_grid%flagstruct%n_sponge+1) + parent_grid%bk(parent_grid%flagstruct%n_sponge+1)*parent_grid%flagstruct%p_ref) + rfcut = max(rfcut, sgcut) + do k=1,parent_grid%npz + ph1 = ph2 + ph2 = parent_grid%ak(k+1) + parent_grid%bk(k+1)*parent_grid%flagstruct%p_ref + pfull = (ph2 - ph1) / log(ph2/ph1) + !if above nested-grid ptop or top two nested-grid levels do not remap + if ( pfull <= ak(3) .or. k <= 2 ) then + blend_wt(k) = 0. + !Partial blend of nested-grid's Rayleigh damping region + !ALSO do not blend n_sponge areas?? + elseif (pfull <= rfcut) then + blend_wt(k) = 0. + !blend_wt(k) = neststruct%update_blend*cos(0.5*pi*log(rfcut/pfull)/log(rfcut/ptop))**2 + else + blend_wt(k) = neststruct%update_blend + endif + enddo - !delp/ps - - if (neststruct%nestupdate < 3) then - - call update_coarse_grid(parent_grid%delp, delp, neststruct%nest_domain,& - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & - neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & - npx, npy, npz, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, & - neststruct%parent_proc, neststruct%child_proc, parent_grid) - - call mpp_sync!self - -#ifdef SW_DYNAMICS - if (neststruct%parent_proc) then - do j=jsd_p,jed_p - do i=isd_p,ied_p - - parent_grid%ps(i,j) = & - parent_grid%delp(i,j,1)/grav - - end do - end do - endif -#endif + if (neststruct%parent_proc .and. is_master() .and. first_timestep) then + print*, ' TWO-WAY BLENDING WEIGHTS' + ph2 = parent_grid%ak(1) + do k=1,parent_grid%npz + ph1 = ph2 + ph2 = parent_grid%ak(k+1) + parent_grid%bk(k+1)*parent_grid%flagstruct%p_ref + pfull = (ph2 - ph1) / log(ph2/ph1) + print*, k, pfull, blend_wt(k) + enddo + first_timestep = .false. + endif - end if - !if (neststruct%nestupdate /= 3 .and. neststruct%nestbctype /= 3) then + !!! RENORMALIZATION UPDATE OPTION if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 7 .and. neststruct%nestupdate /= 8) then - allocate(qdp_coarse(isd_p:ied_p,jsd_p:jed_p,npz)) - if (parent_grid%flagstruct%nwat > 0) then - allocate(q_diff(isd_p:ied_p,jsd_p:jed_p,npz)) - q_diff = 0. - endif - - do n=1,parent_grid%flagstruct%nwat - - qdp_coarse = 0. - if (neststruct%child_proc) then - do k=1,npz - do j=jsd,jed - do i=isd,ied - qdp(i,j,k) = q(i,j,k,n)*delp(i,j,k) - enddo - enddo - enddo - else - qdp = 0. - endif - - if (neststruct%parent_proc) then - !Add up ONLY region being replaced by nested grid - do k=1,npz - do j=jsu,jeu - do i=isu,ieu - qdp_coarse(i,j,k) = parent_grid%q(i,j,k,n)*parent_grid%delp(i,j,k) - enddo - enddo - enddo - call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, & - parent_grid%bd, npz, L_sum_b) - else - qdp_coarse = 0. - endif - if (neststruct%parent_proc) then - if (n <= parent_grid%flagstruct%nwat) then - do k=1,npz - do j=jsu,jeu - do i=isu,ieu - q_diff(i,j,k) = q_diff(i,j,k) - qdp_coarse(i,j,k) - enddo - enddo - enddo - endif - endif - - call update_coarse_grid(qdp_coarse, qdp, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & - neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & - npx, npy, npz, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) - - call mpp_sync!self - - if (neststruct%parent_proc) then - call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, & - parent_grid%bd, npz, L_sum_a) - do k=1,npz - if (L_sum_a(k) > 0.) then - fix = L_sum_b(k)/L_sum_a(k) - do j=jsu,jeu - do i=isu,ieu - !Normalization mass fixer - parent_grid%q(i,j,k,n) = qdp_coarse(i,j,k)*fix - enddo - enddo - endif - enddo - if (n == 1) sphum_ll_fix = 1. - fix - endif - if (neststruct%parent_proc) then - if (n <= parent_grid%flagstruct%nwat) then - do k=1,npz - do j=jsu,jeu - do i=isu,ieu - q_diff(i,j,k) = q_diff(i,j,k) + parent_grid%q(i,j,k,n) - enddo - enddo - enddo - endif - endif - - end do - - if (neststruct%parent_proc) then - if (parent_grid%flagstruct%nwat > 0) then - do k=1,npz - do j=jsu,jeu - do i=isu,ieu - parent_grid%delp(i,j,k) = parent_grid%delp(i,j,k) + q_diff(i,j,k) - enddo - enddo - enddo - endif - - do n=1,parent_grid%flagstruct%nwat - do k=1,npz - do j=jsu,jeu - do i=isu,ieu - parent_grid%q(i,j,k,n) = parent_grid%q(i,j,k,n)/parent_grid%delp(i,j,k) - enddo - enddo - enddo - enddo - endif - - deallocate(qdp_coarse) - if (allocated(q_diff)) deallocate(q_diff) +!!$ allocate(qdp_coarse(isd_p:ied_p,jsd_p:jed_p,npz)) +!!$ if (parent_grid%flagstruct%nwat > 0) then +!!$ allocate(q_diff(isd_p:ied_p,jsd_p:jed_p,npz)) +!!$ q_diff = 0. +!!$ endif +!!$ +!!$ do n=1,parent_grid%flagstruct%nwat +!!$ +!!$ qdp_coarse = 0. +!!$ if (neststruct%child_proc) then +!!$ do k=1,npz +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ qdp(i,j,k) = q(i,j,k,n)*delp(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ else +!!$ qdp = 0. +!!$ endif +!!$ +!!$ if (neststruct%parent_proc) then +!!$ !Add up ONLY region being replaced by nested grid +!!$ do k=1,npz +!!$ do j=jsu,jeu +!!$ do i=isu,ieu +!!$ qdp_coarse(i,j,k) = parent_grid%q(i,j,k,n)*parent_grid%delp(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, & +!!$ parent_grid%bd, npz, L_sum_b) +!!$ else +!!$ qdp_coarse = 0. +!!$ endif +!!$ if (neststruct%parent_proc) then +!!$ if (n <= parent_grid%flagstruct%nwat) then +!!$ do k=1,npz +!!$ do j=jsu,jeu +!!$ do i=isu,ieu +!!$ q_diff(i,j,k) = q_diff(i,j,k) - qdp_coarse(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ endif +!!$ endif +!!$ +!!$ call mpp_update_domains(qdp, domain) +!!$ call update_coarse_grid(var_src, qdp, global_nest_domain, & +!!$ gridstruct%dx, gridstruct%dy, gridstruct%area, & +!!$ bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & +!!$ neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & +!!$ npx, npy, npz, 0, 0, & +!!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, & +!!$ neststruct%parent_proc, neststruct%child_proc, parent_grid) +!!$ if (neststruct%parent_proc) call remap_up_k(ps0, parent_grid%ps, & +!!$ ak, bk, parent_grid%ak, parent_grid%bk, var_src, qdp_coarse, & +!!$ parent_grid%bd, neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & +!!$ 0, 0, npz, parent_grid%npz, 0, parent_grid%flagstruct%kord_tr, blend_wt, log_pe=.false.) +!!$ +!!$ call mpp_sync!self +!!$ +!!$ if (neststruct%parent_proc) then +!!$ call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, & +!!$ parent_grid%bd, npz, L_sum_a) +!!$ do k=1,npz +!!$ if (L_sum_a(k) > 0.) then +!!$ fix = L_sum_b(k)/L_sum_a(k) +!!$ do j=jsu,jeu +!!$ do i=isu,ieu +!!$ !Normalization mass fixer +!!$ parent_grid%q(i,j,k,n) = qdp_coarse(i,j,k)*fix +!!$ enddo +!!$ enddo +!!$ endif +!!$ enddo +!!$ if (n == 1) sphum_ll_fix = 1. - fix +!!$ endif +!!$ if (neststruct%parent_proc) then +!!$ if (n <= parent_grid%flagstruct%nwat) then +!!$ do k=1,npz +!!$ do j=jsu,jeu +!!$ do i=isu,ieu +!!$ q_diff(i,j,k) = q_diff(i,j,k) + parent_grid%q(i,j,k,n) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ endif +!!$ endif +!!$ +!!$ end do +!!$ +!!$ if (neststruct%parent_proc) then +!!$ if (parent_grid%flagstruct%nwat > 0) then +!!$ do k=1,npz +!!$ do j=jsu,jeu +!!$ do i=isu,ieu +!!$ parent_grid%delp(i,j,k) = parent_grid%delp(i,j,k) + q_diff(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ do n=1,parent_grid%flagstruct%nwat +!!$ do k=1,npz +!!$ do j=jsu,jeu +!!$ do i=isu,ieu +!!$ parent_grid%q(i,j,k,n) = parent_grid%q(i,j,k,n)/parent_grid%delp(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ deallocate(qdp_coarse) +!!$ if (allocated(q_diff)) deallocate(q_diff) endif + !!! END RENORMALIZATION UPDATE #ifndef SW_DYNAMICS if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 8) then + if (neststruct%child_proc) then + call mpp_update_domains(ps, domain, complete=.true.) + if (.not. flagstruct%hydrostatic) call mpp_update_domains(w, domain) + ! if (neststruct%child_proc) call mpp_update_domains(delz, domain) + call mpp_update_domains(u, v, domain, gridtype=DGRID_NE) + endif + allocate(pt_src(isd_p:ied_p,jsd_p:jed_p,npz)) + pt_src = -999. + if (conv_theta) then if (neststruct%child_proc) then @@ -1250,68 +2551,74 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & enddo enddo enddo - deallocate(t_nest) + call mpp_update_domains(t_nest, domain, complete=.true.) endif - call update_coarse_grid(parent_grid%pt, & - t_nest, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & - neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & - npx, npy, npz, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) + call update_coarse_grid(pt_src, & + t_nest, global_nest_domain, & + gridstruct%dx, gridstruct%dy, gridstruct%area, & + bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & + neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & + npx, npy, npz, 0, 0, & + neststruct%refinement, neststruct%nestupdate, upoff, 0, & + neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) + if (neststruct%child_proc) deallocate(t_nest) else + if (neststruct%child_proc) call mpp_update_domains(pt, domain, complete=.true.) - call update_coarse_grid(parent_grid%pt, & - pt, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & + call update_coarse_grid(pt_src, & + pt, global_nest_domain, & + gridstruct%dx, gridstruct%dy, gridstruct%area, & + bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, npz, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) + neststruct%refinement, neststruct%nestupdate, upoff, 0, & + neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) endif !conv_theta call mpp_sync!self - if (.not. flagstruct%hydrostatic) then - call update_coarse_grid(parent_grid%w, w, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & - neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & - npx, npy, npz, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) - !Updating for delz not yet implemented; may be problematic -!!$ call update_coarse_grid(parent_grid%delz, delz, neststruct%nest_domain, & -!!$ neststruct%ind_update_h, & -!!$ isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, npz, 0, 0, & -!!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc) + if (.not. flagstruct%hydrostatic) then + allocate(w_src(isd_p:ied_p,jsd_p:jed_p,npz)) + w_src = -999. + call update_coarse_grid(w_src, w, global_nest_domain, & + gridstruct%dx, gridstruct%dy, gridstruct%area, & + bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & + neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & + npx, npy, npz, 0, 0, & + neststruct%refinement, neststruct%nestupdate, upoff, 0, & + neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) call mpp_sync!self + !Updating for delz not yet implemented; + ! may need to think very carefully how one would do this!!! + ! consider updating specific volume instead? +!!$ call update_coarse_grid(parent_grid%delz, delz, global_nest_domain, & +!!$ bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, npz, 0, 0, & +!!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc) + end if - + end if !Neststruct%nestupdate /= 3 #endif - call update_coarse_grid(parent_grid%u, u, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & - neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & - npx, npy, npz, 0, 1, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) - - call update_coarse_grid(parent_grid%v, v, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & - neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & - npx, npy, npz, 1, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) - - call mpp_sync!self + allocate(u_src(isd_p:ied_p, jsd_p:jed_p+1,npz)) + allocate(v_src(isd_p:ied_p+1,jsd_p:jed_p,npz)) + u_src = -999. + v_src = -999. + call update_coarse_grid(u_src, v_src, u, v, global_nest_domain, & + gridstruct%dx, gridstruct%dy, gridstruct%area, & + bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & + neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & + npx, npy, npz, 0, 1, 1, 0, & + neststruct%refinement, neststruct%nestupdate, upoff, 0, & + neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1, gridtype=DGRID_NE) + call mpp_sync() #ifndef SW_DYNAMICS if (neststruct%nestupdate >= 5 .and. npz > 4) then @@ -1325,10 +2632,9 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & if (neststruct%parent_proc) then parent_grid%ps = parent_grid%ptop -!This loop appears to cause problems with OMP -!$OMP parallel do default(none) shared(npz,jsd_p,jed_p,isd_p,ied_p,parent_grid) +!$OMP parallel do default(none) shared(jsd_p,jed_p,isd_p,ied_p,parent_grid) do j=jsd_p,jed_p - do k=1,npz + do k=1,parent_grid%npz do i=isd_p,ied_p parent_grid%ps(i,j) = parent_grid%ps(i,j) + & parent_grid%delp(i,j,k) @@ -1352,26 +2658,25 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & end do endif - call update_coarse_grid(ps0, ps, neststruct%nest_domain, & - neststruct%ind_update_h, gridstruct%dx, gridstruct%dy, gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & + call update_coarse_grid(ps0, ps, global_nest_domain, & + gridstruct%dx, gridstruct%dy, gridstruct%area, & + bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid) + neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) !!! The mpp version of update_coarse_grid does not return a consistent value of ps !!! across PEs, as it does not go into the haloes of a given coarse-grid PE. This !!! update_domains call takes care of the problem. - if (neststruct%parent_proc) then - call mpp_update_domains(parent_grid%ps, parent_grid%domain, complete=.false.) - call mpp_update_domains(ps0, parent_grid%domain, complete=.true.) - endif - + if (neststruct%parent_proc) then + call mpp_update_domains(parent_grid%ps, parent_grid%domain, complete=.false.) + call mpp_update_domains(ps0, parent_grid%domain, complete=.true.) + endif call mpp_sync!self - if (parent_grid%tile == neststruct%parent_tile) then + if (parent_grid%global_tile == neststruct%parent_tile) then if (neststruct%parent_proc) then @@ -1380,8 +2685,8 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & !idealized simulations with a background uniform theta) since near the top !boundary theta is exponential, which is hard to accurately interpolate with a spline if (.not. parent_grid%flagstruct%remap_t) then -!$OMP parallel do default(none) shared(npz,jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum) - do k=1,npz +!$OMP parallel do default(none) shared(jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum) + do k=1,parent_grid%npz do j=jsc_p,jec_p do i=isc_p,iec_p parent_grid%pt(i,j,k) = & @@ -1391,17 +2696,29 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & end do end do end if - call update_remap_tqw(npz, parent_grid%ak, parent_grid%bk, & - parent_grid%ps, parent_grid%delp, & +!!$!!!! DEBUG CODE +!!$ do k=1,parent_grid%npz +!!$ write(mpp_pe()+3000,*) 'k = ', k, parent_grid%ak(k), parent_grid%bk(k) +!!$ enddo +!!$ write(mpp_pe()+3000,*) +!!$ do k=1,npz +!!$ write(mpp_pe()+3000,*) 'k = ', k, ak(k), bk(k) +!!$ enddo +!!$!!!! END DEBUG CODE + + call update_remap_tqw(parent_grid%npz, parent_grid%ak, parent_grid%bk, & + parent_grid%ps, & parent_grid%pt, parent_grid%q, parent_grid%w, & parent_grid%flagstruct%hydrostatic, & - npz, ps0, zvir, parent_grid%ptop, ncnst, & + npz, ps0, ak, bk, pt_src, w_src, & + zvir, parent_grid%ptop, ncnst, & parent_grid%flagstruct%kord_tm, parent_grid%flagstruct%kord_tr, & parent_grid%flagstruct%kord_wz, & - isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, .false. ) !neststruct%nestupdate < 7) + isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, .false., & + neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, blend_wt) !neststruct%nestupdate < 7) if (.not. parent_grid%flagstruct%remap_t) then -!$OMP parallel do default(none) shared(npz,jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum) - do k=1,npz +!$OMP parallel do default(none) shared(jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum) + do k=1,parent_grid%npz do j=jsc_p,jec_p do i=isc_p,iec_p parent_grid%pt(i,j,k) = & @@ -1412,11 +2729,12 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & end do end if - call update_remap_uv(npz, parent_grid%ak, parent_grid%bk, & - parent_grid%ps, & - parent_grid%u, & - parent_grid%v, npz, ps0, parent_grid%flagstruct%kord_mt, & - isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, parent_grid%ptop) + call update_remap_uv(parent_grid%npz, parent_grid%ak, parent_grid%bk, & + parent_grid%ps, parent_grid%u, parent_grid%v, & + npz, ak, bk, ps0, u_src, v_src, & + parent_grid%flagstruct%kord_mt, & + isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, parent_grid%ptop, & + neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, blend_wt) endif !neststruct%parent_proc @@ -1428,6 +2746,14 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & #endif + + + deallocate(pt_src) + deallocate(w_src) + deallocate(u_src) + deallocate(v_src) + + end subroutine twoway_nest_update subroutine level_sum(q, area, domain, bd, npz, L_sum) @@ -1436,9 +2762,9 @@ subroutine level_sum(q, area, domain, bd, npz, L_sum) type(fv_grid_bounds_type), intent(IN) :: bd real, intent(in) :: area( bd%isd:bd%ied ,bd%jsd:bd%jed) real, intent(in) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) - real, intent(OUT) :: L_sum( npz ) + real, intent(OUT) :: L_sum( npz ) type(domain2d), intent(IN) :: domain - + integer :: i, j, k, n real :: qA!(bd%is:bd%ie, bd%js:bd%je) @@ -1458,12 +2784,145 @@ subroutine level_sum(q, area, domain, bd, npz, L_sum) end subroutine level_sum +![ij]start and [ij]end should already take staggering into account +!!! CHECK ARRAY BOUNDS!! +!! Make sure data is in the correct place. + subroutine remap_up_k(ps_src, ps_dst, ak_src, bk_src, ak_dst, bk_dst, var_src, var_dst, & + bd, istart, iend, jstart, jend, istag, jstag, npz_src, npz_dst, iv, kord, blend_wt, log_pe) + + !Note here that pe is TRANSPOSED to make loops faster + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: istart, iend, jstart, jend, npz_dst, npz_src, iv, kord, istag, jstag + logical, intent(IN) :: log_pe + real, intent(INOUT) :: ps_src(bd%isd:bd%ied,bd%jsd:bd%jed), var_src(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz_src) + real, intent(INOUT) :: ps_dst(bd%isd:bd%ied,bd%jsd:bd%jed), var_dst(bd%isd:bd%ied+istag,bd%jsd:bd%jed+jstag,npz_dst) + real, intent(IN) :: blend_wt(npz_dst), ak_src(npz_src+1), bk_src(npz_src+1), ak_dst(npz_dst+1), bk_dst(npz_dst+1) + + integer :: i, j, k + real pe_src(istart:iend,npz_src+1) + real pe_dst(istart:iend,npz_dst+1) + real peln_src(istart:iend,npz_src+1) + real peln_dst(istart:iend,npz_dst+1) + character(120) :: errstring + real var_dst_unblend(istart:iend,npz_dst) + real bw1, bw2 + + if (iend < istart) return + if (jend < jstart) return + +!!$!!!! DEBUG CODE +!!$ write(debug_unit,*) bd%isd,bd%ied,bd%jsd,bd%jed +!!$ write(debug_unit,*) istart,iend,jstart,jend,istag,jstag +!!$ write(debug_unit,*) +!!$!!! END DEBUG CODE + + + !Compute Eulerian pressures + !NOTE: assumes that istag + jstag <= 1 + if (istag > 0) then +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,ak_src,ps_src,bk_src,pe_dst,ak_dst,ps_dst,bk_dst) + do j=jstart,jend + do k=1,npz_src+1 + do i=istart,iend + pe_src(i,k) = ak_src(k) + 0.5*(ps_src(i,j)+ps_src(i-1,j))*bk_src(k) + enddo + enddo + do k=1,npz_dst+1 + do i=istart,iend + pe_dst(i,k) = ak_dst(k) + 0.5*(ps_dst(i,j)+ps_dst(i-1,j))*bk_dst(k) + enddo + enddo + enddo + elseif (jstag > 0) then +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,ak_src,ps_src,bk_src,pe_dst,ak_dst,ps_dst,bk_dst) + do j=jstart,jend + do k=1,npz_src+1 + do i=istart,iend + pe_src(i,k) = ak_src(k) + 0.5*(ps_src(i,j)+ps_src(i,j-1))*bk_src(k) + enddo + enddo + do k=1,npz_dst+1 + do i=istart,iend + pe_dst(i,k) = ak_dst(k) + 0.5*(ps_dst(i,j)+ps_dst(i,j-1))*bk_dst(k) + enddo + enddo + enddo + else +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,ak_src,ps_src,bk_src,pe_dst,ak_dst,ps_dst,bk_dst) + do j=jstart,jend + do k=1,npz_src+1 + do i=istart,iend + pe_src(i,k) = ak_src(k) + ps_src(i,j)*bk_src(k) + enddo + enddo + do k=1,npz_dst+1 + do i=istart,iend + pe_dst(i,k) = ak_dst(k) + ps_dst(i,j)*bk_dst(k) + enddo + enddo + enddo + endif + + if (log_pe) then + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,pe_dst,var_src,var_dst,iv,kord,blend_wt) & +!$OMP private(peln_src,peln_dst,bw1,bw2,var_dst_unblend) + do j=jstart,jend + + do k=1,npz_src+1 + do i=istart,iend + peln_src(i,k) = log(pe_src(i,k)) + enddo + enddo + + do k=1,npz_dst+1 + do i=istart,iend + peln_dst(i,k) = log(pe_dst(i,k)) + enddo + enddo + + !remap_2d seems to have some bugs when doing logp remapping + call mappm(npz_src, peln_src, var_src(istart:iend,j:j,:), & + npz_dst, peln_dst, var_dst_unblend, & + istart, iend, iv, kord, peln_dst(istart,1)) + + do k=1,npz_dst + bw1 = blend_wt(k) + bw2 = 1. - bw1 + do i=istart,iend + var_dst(i,j,k) = var_dst(i,j,k)*bw2 + var_dst_unblend(i,k)*bw1 + enddo + enddo + enddo + + else + +!$OMP parallel do default(none) shared(istart,iend,jstart,jend,npz_src,npz_dst,pe_src,pe_dst,var_src,var_dst,iv,kord,blend_wt) & +!$OMP private(bw1,bw2,var_dst_unblend) + do j=jstart,jend + + call mappm(npz_src, pe_src, var_src(istart:iend,j:j,:), & + npz_dst, pe_dst, var_dst_unblend, & + istart, iend, iv, kord, pe_dst(istart,1)) + + do k=1,npz_dst + bw1 = blend_wt(k) + bw2 = 1. - bw1 + do i=istart,iend + var_dst(i,j,k) = var_dst(i,j,k)*bw2 + var_dst_unblend(i,k)*bw1 + enddo + enddo + enddo + + endif + + end subroutine remap_up_k subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & u, v, w, delz, pt, delp, q, & ps, pe, pk, peln, pkz, phis, ua, va, & ptop, gridstruct, flagstruct, & - domain, bd) + domain, bd, Time) type(fv_grid_bounds_type), intent(IN) :: bd real, intent(IN) :: ptop @@ -1477,10 +2936,10 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! temperature (K) real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! pressure thickness (pascal) real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) ! specific humidity and constituents - real, intent(inout) :: delz(bd%isd: ,bd%jsd: ,1: ) ! delta-height (m); non-hydrostatic only + real, intent(inout) :: delz(bd%is: ,bd%js: ,1: ) ! delta-height (m); non-hydrostatic only !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -1489,7 +2948,7 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & real, intent(inout) :: pk (bd%is:bd%ie,bd%js:bd%je, npz+1) ! pe**cappa real, intent(inout) :: peln(bd%is:bd%ie,npz+1,bd%js:bd%je) ! ln(pe) real, intent(inout) :: pkz (bd%is:bd%ie,bd%js:bd%je,npz) ! finite-volume mean pk - + !----------------------------------------------------------------------- ! Others: !----------------------------------------------------------------------- @@ -1499,12 +2958,13 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & type(fv_grid_type), intent(IN) :: gridstruct type(fv_flags_type), intent(IN) :: flagstruct type(domain2d), intent(INOUT) :: domain + type(time_type), intent(IN) :: Time logical :: bad_range integer :: is, ie, js, je integer :: isd, ied, jsd, jed - + is = bd%is ie = bd%ie js = bd%js @@ -1517,7 +2977,7 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & call cubed_to_latlon(u, v, ua, va, & gridstruct, npx, npy, npz, & 1, gridstruct%grid_type, domain, & - gridstruct%nested, flagstruct%c2l_ord, bd) + gridstruct%bounded_domain, flagstruct%c2l_ord, bd) #ifndef SW_DYNAMICS @@ -1534,16 +2994,16 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & q, ng, flagstruct%ncnst, gridstruct%area_64, 0., & .false., .false., & !mountain argument not used flagstruct%moist_phys, flagstruct%hydrostatic, & - flagstruct%nwat, domain, .false.) + flagstruct%nwat, domain, flagstruct%adiabatic, .false.) #endif if (flagstruct%range_warn) then - call range_check('TA update', pt, is, ie, js, je, ng, npz, gridstruct%agrid, 130., 350., bad_range) - call range_check('UA update', ua, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 250., bad_range) - call range_check('VA update', va, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 220., bad_range) + call range_check('TA update', pt, is, ie, js, je, ng, npz, gridstruct%agrid, 130., 350., bad_range, Time) + call range_check('UA update', ua, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 250., bad_range, Time) + call range_check('VA update', va, is, ie, js, je, ng, npz, gridstruct%agrid, -220., 220., bad_range, Time) if (.not. flagstruct%hydrostatic) then - call range_check('W update', w, is, ie, js, je, ng, npz, gridstruct%agrid, -50., 100., bad_range) + call range_check('W update', w, is, ie, js, je, ng, npz, gridstruct%agrid, -50., 100., bad_range, Time) endif endif @@ -1551,21 +3011,25 @@ subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, & end subroutine after_twoway_nest_update - !Routines for remapping (interpolated) nested-grid data to the coarse-grid's vertical coordinate. - !This does not yet do anything for the tracers - subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, & - kmd, ps0, zvir, ptop, nq, kord_tm, kord_tr, kord_wz, & - is, ie, js, je, isd, ied, jsd, jed, do_q) + !Routines for remapping (interpolated) nestedp-grid data to the coarse-grid's vertical coordinate. + + subroutine update_remap_tqw( npz, ak_dst, bk_dst, ps_dst, t_dst, q_dst, w_dst, & + hydrostatic, & + kmd, ps_src, ak_src, bk_src, t_src, w_src, & + zvir, ptop, nq, kord_tm, kord_tr, kord_wz, & + is, ie, js, je, isd, ied, jsd, jed, do_q, & + istart, iend, jstart, jend, blend_wt) integer, intent(in):: npz, kmd, nq, kord_tm, kord_tr, kord_wz real, intent(in):: zvir, ptop - real, intent(in):: ak(npz+1), bk(npz+1) - real, intent(in), dimension(isd:ied,jsd:jed):: ps0 - real, intent(in), dimension(isd:ied,jsd:jed):: ps - real, intent(in), dimension(isd:ied,jsd:jed,npz):: delp - real, intent(inout), dimension(isd:ied,jsd:jed,npz):: t, w - real, intent(inout), dimension(isd:ied,jsd:jed,npz,nq):: q - integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed + real, intent(in):: ak_src(kmd+1), bk_src(kmd+1) + real, intent(in):: ak_dst(npz+1), bk_dst(npz+1), blend_wt(npz) + real, intent(in), dimension(isd:ied,jsd:jed):: ps_src + real, intent(in), dimension(isd:ied,jsd:jed):: ps_dst + real, intent(inout), dimension(isd:ied,jsd:jed,npz):: t_dst, w_dst + real, intent(inout), dimension(isd:ied,jsd:jed,npz,nq):: q_dst + real, intent(in), dimension(isd:ied,jsd:jed,kmd):: t_src, w_src + integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed, istart, iend, jstart, jend logical, intent(in) :: hydrostatic, do_q ! local: real, dimension(is:ie,kmd):: tp, qp @@ -1573,67 +3037,80 @@ subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, & real, dimension(is:ie,npz):: qn1 real, dimension(is:ie,npz+1):: pe1, pn1 integer i,j,k,iq + real :: wt1, wt2 + + if (do_q) call mpp_error(FATAL, ' update_remap_tqw: q remapping not yet supported') + + !This line to check if the update region is correctly defined or not is + ! IMPORTANT. Sometimes one or the other pair of limits will give a + ! non-empty loop, even though no data was transferred! This is why + ! I was having so much trouble getting the remap-update to work --- lmh 11jul17 + if (istart > iend .or. jstart > jend) return -!$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps0,q,npz,ptop,do_q,& -!$OMP t,w,ps,nq,hydrostatic,kord_tm,kord_tr,kord_wz) & -!$OMP private(pe0,pn0,pe1,pn1,qp,tp,qn1) - do 5000 j=js,je +!$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak_dst,bk_dst,ps_dst,q_dst,npz,ptop,do_q,& +!$OMP t_dst,w_dst,t_src,w_src,ak_src,bk_src,ps_src,nq,hydrostatic,kord_tm,kord_tr,kord_wz,istart,iend,jstart,jend,blend_wt) & +!$OMP private(pe0,pn0,pe1,pn1,qp,tp,qn1,wt1,wt2) + do 5000 j=jstart,jend do k=1,kmd+1 - do i=is,ie - pe0(i,k) = ak(k) + bk(k)*ps0(i,j) + do i=istart,iend + pe0(i,k) = ak_src(k) + bk_src(k)*ps_src(i,j) pn0(i,k) = log(pe0(i,k)) enddo - enddo - do k=1,kmd+1 - do i=is,ie - pe1(i,k) = ak(k) + bk(k)*ps(i,j) + enddo + do k=1,npz+1 + do i=istart,iend + pe1(i,k) = ak_dst(k) + bk_dst(k)*ps_dst(i,j) pn1(i,k) = log(pe1(i,k)) enddo - enddo + enddo if (do_q) then do iq=1,nq do k=1,kmd - do i=is,ie - qp(i,k) = q(i,j,k,iq) + do i=istart,iend + qp(i,k) = q_dst(i,j,k,iq) enddo enddo - call mappm(kmd, pe0, qp, npz, pe1, qn1, is,ie, 0, kord_tr, ptop) + call mappm(kmd, pe0, qp, npz, pe1, qn1, is,ie, 0, kord_tr, ptop) !not sure about indices do k=1,npz - do i=is,ie - q(i,j,k,iq) = qn1(i,k) + do i=istart,iend + q_dst(i,j,k,iq) = qn1(i,k) enddo enddo enddo endif do k=1,kmd - do i=is,ie - tp(i,k) = t(i,j,k) + do i=istart,iend + tp(i,k) = t_src(i,j,k) enddo enddo !Remap T using logp - call mappm(kmd, pn0, tp, npz, pn1, qn1, is,ie, 1, abs(kord_tm), ptop) - + call mappm(kmd, pn0(istart:iend,:), tp(istart:iend,:), npz, pn1(istart:iend,:), qn1(istart:iend,:), istart,iend, 1, abs(kord_tm), ptop) + do k=1,npz - do i=is,ie - t(i,j,k) = qn1(i,k) + wt1 = blend_wt(k) + wt2 = 1. - wt1 + do i=istart,iend + t_dst(i,j,k) = qn1(i,k)*wt1 + t_dst(i,j,k)*wt2 enddo enddo if (.not. hydrostatic) then do k=1,kmd - do i=is,ie - tp(i,k) = w(i,j,k) + do i=istart,iend + tp(i,k) = w_src(i,j,k) enddo enddo !Remap w using p !Using iv == -1 instead of -2 - call mappm(kmd, pe0, tp, npz, pe1, qn1, is,ie, -1, kord_wz, ptop) + call mappm(kmd, pe0(istart:iend,:), tp(istart:iend,:), npz, pe1(istart:iend,:), qn1(istart:iend,:), istart,iend, -1, kord_wz, ptop) do k=1,npz - do i=is,ie - w(i,j,k) = qn1(i,k) + wt1 = blend_wt(k) + wt2 = 1. - wt1 + do i=istart,iend + w_dst(i,j,k) = qn1(i,k)*wt1 + w_dst(i,j,k)*wt2 enddo enddo endif @@ -1643,18 +3120,26 @@ subroutine update_remap_tqw( npz, ak, bk, ps, delp, t, q, w, hydrostatic, & end subroutine update_remap_tqw !remap_uv as-is remaps only a-grid velocities. A new routine has been written to handle staggered grids. - subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, & - is, ie, js, je, isd, ied, jsd, jed, ptop) + subroutine update_remap_uv(npz, ak_dst, bk_dst, ps_dst, u_dst, v_dst, & + kmd, ak_src, bk_src, ps_src, u_src, v_src, & + kord_mt, & + is, ie, js, je, isd, ied, jsd, jed, ptop, & + istart, iend, jstart, jend, blend_wt) integer, intent(in):: npz - real, intent(in):: ak(npz+1), bk(npz+1) - real, intent(in):: ps(isd:ied,jsd:jed) - real, intent(inout), dimension(isd:ied,jsd:jed+1,npz):: u - real, intent(inout), dimension(isd:ied+1,jsd:jed,npz):: v + real, intent(in):: ak_dst(npz+1), bk_dst(npz+1), blend_wt(npz) + real, intent(in):: ps_dst(isd:ied,jsd:jed) + real, intent(inout), dimension(isd:ied,jsd:jed+1,npz):: u_dst + real, intent(inout), dimension(isd:ied+1,jsd:jed,npz):: v_dst + integer, intent(in):: kmd + real, intent(in):: ak_src(kmd+1), bk_src(kmd+1) + real, intent(in):: ps_src(isd:ied,jsd:jed) + real, intent(inout), dimension(isd:ied,jsd:jed+1,kmd):: u_src + real, intent(inout), dimension(isd:ied+1,jsd:jed,kmd):: v_src ! - integer, intent(in):: kmd, kord_mt + integer, intent(in):: kord_mt real, intent(IN) :: ptop - real, intent(in):: ps0(isd:ied,jsd:jed) integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed + integer, intent(IN) :: istart, iend, jstart, jend ! ! local: real, dimension(is:ie+1,kmd+1):: pe0 @@ -1662,27 +3147,33 @@ subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, & real, dimension(is:ie+1,kmd):: qt real, dimension(is:ie+1,npz):: qn1 integer i,j,k + real :: wt1, wt2 + + !This line to check if the update region is correctly defined or not is + ! IMPORTANT. Sometimes one or the other pair of limits will give a + ! non-empty loop, even though no data was transferred! + if (istart > iend .or. jstart > jend) return !------ ! map u !------ -!$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps,ps0,npz,u,ptop,kord_mt) & -!$OMP private(pe0,pe1,qt,qn1) - do j=js,je+1 +!$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak_dst,bk_dst,ps_dst,u_dst,v_dst,npz,ak_src,bk_src,ps_src,u_src,v_src,ptop,kord_mt,istart,iend,jstart,jend,blend_wt) & +!$OMP private(pe0,pe1,qt,qn1,wt1,wt2) + do j=jstart,jend+1 !------ ! Data !------ do k=1,kmd+1 - do i=is,ie - pe0(i,k) = ak(k) + bk(k)*0.5*(ps0(i,j)+ps0(i,j-1)) + do i=istart,iend + pe0(i,k) = ak_src(k) + bk_src(k)*0.5*(ps_src(i,j)+ps_src(i,j-1)) enddo enddo !------ ! Model !------ - do k=1,kmd+1 - do i=is,ie - pe1(i,k) = ak(k) + bk(k)*0.5*(ps(i,j)+ps(i,j-1)) + do k=1,npz+1 + do i=istart,iend + pe1(i,k) = ak_dst(k) + bk_dst(k)*0.5*(ps_dst(i,j)+ps_dst(i,j-1)) enddo enddo !------ @@ -1690,15 +3181,17 @@ subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, & !------ qt = 0. do k=1,kmd - do i=is,ie - qt(i,k) = u(i,j,k) + do i=istart,iend + qt(i,k) = u_src(i,j,k) enddo enddo qn1 = 0. - call mappm(kmd, pe0(is:ie,:), qt(is:ie,:), npz, pe1(is:ie,:), qn1(is:ie,:), is,ie, -1, kord_mt, ptop) + call mappm(kmd, pe0(istart:iend,:), qt(istart:iend,:), npz, pe1(istart:iend,:), qn1(istart:iend,:), istart,iend, -1, kord_mt, ptop) do k=1,npz - do i=is,ie - u(i,j,k) = qn1(i,k) + wt1 = blend_wt(k) + wt2 = 1. - wt1 + do i=istart,iend + u_dst(i,j,k) = qn1(i,k)*wt1 + u_dst(i,j,k)*wt2 enddo enddo @@ -1707,23 +3200,23 @@ subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, & !------ ! map v !------ -!$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps,ps0,npz,v,ptop) & -!$OMP private(pe0,pe1,qt,qn1) - do j=js,je +!$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak_dst,bk_dst,ps_dst,u_dst,v_dst,ak_src,bk_src,ps_src,npz,u_src,v_src,ptop,istart,iend,jstart,jend,blend_wt) & +!$OMP private(pe0,pe1,qt,qn1,wt1,wt2) + do j=jstart,jend !------ ! Data !------ do k=1,kmd+1 - do i=is,ie+1 - pe0(i,k) = ak(k) + bk(k)*0.5*(ps0(i,j)+ps0(i-1,j)) + do i=istart,iend+1 + pe0(i,k) = ak_src(k) + bk_src(k)*0.5*(ps_src(i,j)+ps_src(i-1,j)) enddo enddo !------ ! Model !------ - do k=1,kmd+1 - do i=is,ie+1 - pe1(i,k) = ak(k) + bk(k)*0.5*(ps(i,j)+ps(i-1,j)) + do k=1,npz+1 + do i=istart,iend+1 + pe1(i,k) = ak_dst(k) + bk_dst(k)*0.5*(ps_dst(i,j)+ps_dst(i-1,j)) enddo enddo !------ @@ -1731,15 +3224,17 @@ subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, & !------ qt = 0. do k=1,kmd - do i=is,ie+1 - qt(i,k) = v(i,j,k) + do i=istart,iend+1 + qt(i,k) = v_src(i,j,k) enddo enddo qn1 = 0. - call mappm(kmd, pe0(is:ie+1,:), qt(is:ie+1,:), npz, pe1(is:ie+1,:), qn1(is:ie+1,:), is,ie+1, -1, 8, ptop) + call mappm(kmd, pe0(istart:iend+1,:), qt(istart:iend+1,:), npz, pe1(istart:iend+1,:), qn1(istart:iend+1,:), istart,iend+1, -1, 8, ptop) do k=1,npz - do i=is,ie+1 - v(i,j,k) = qn1(i,k) + wt1 = blend_wt(k) + wt2 = 1. - wt1 + do i=istart,iend+1 + v_dst(i,j,k) = qn1(i,k)*wt1 + v_dst(i,j,k)*wt2 !Does this kill OMP??? enddo enddo end do @@ -1747,4 +3242,5 @@ subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, & end subroutine update_remap_uv + end module fv_nesting_mod diff --git a/model/fv_regional_bc.F90 b/model/fv_regional_bc.F90 new file mode 100644 index 000000000..7bdd6eab9 --- /dev/null +++ b/model/fv_regional_bc.F90 @@ -0,0 +1,5727 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** +!!! This code contributed by Tom Black and Jim Abeles at NCEP/EMC !!! + +module fv_regional_mod + + use mpp_domains_mod, only: domain2d + use mpp_domains_mod, only: domain1D, mpp_get_domain_components, & + mpp_get_global_domain, & + mpp_get_data_domain, & + mpp_get_compute_domain, & + NORTH, SOUTH, EAST, WEST, & + CENTER, CORNER, & + mpp_domains_set_stack_size, & + mpp_update_domains, mpp_get_neighbor_pe + use mpp_mod, only: FATAL, input_nml_file, & + mpp_error ,mpp_pe, mpp_sync, & + mpp_npes, mpp_root_pe, mpp_gather, & + mpp_get_current_pelist, NULL_PE + use mpp_io_mod + use tracer_manager_mod,only: get_tracer_index + use field_manager_mod, only: MODEL_ATMOS + use time_manager_mod, only: get_time & + ,operator(-),operator(/) & + ,time_type,time_type_to_real + use constants_mod, only: cp_air, cp_vapor, grav, kappa & + ,pi=>pi_8,rdgas, rvgas + use fv_arrays_mod, only: fv_atmos_type & + ,fv_grid_bounds_type & + ,fv_regional_bc_bounds_type & + ,R_GRID & + ,fv_nest_BC_type_3D & + ,allocate_fv_nest_BC_type + + use fv_diagnostics_mod,only: prt_gb_nh_sh, prt_height + use fv_grid_utils_mod, only: g_sum,mid_pt_sphere,get_unit_vect2 & + ,get_latlon_vector,inner_prod & + ,cell_center2 + use fv_mapz_mod, only: mappm, moist_cp, moist_cv + use fv_mp_mod, only: is_master, mp_reduce_min, mp_reduce_max + use fv_fill_mod, only: fillz + use fv_eta_mod, only: get_eta_level + use fms_mod, only: check_nml_error + use fms_io_mod, only: read_data + use boundary_mod, only: fv_nest_BC_type_3D + + private + + public ak_in, bk_in & + ,bc_hour & + ,bc_time_interval & + ,BC_t0,BC_t1 & + ,begin_regional_restart,exch_uv & + ,ntimesteps_per_bc_update & + ,read_new_bc_data & + ,regional_bc_data & + ,regional_bc_t1_to_t0 & + ,regional_boundary_update & + ,next_time_to_read_bcs & + ,set_regional_BCs & + ,setup_regional_BC & + ,start_regional_cold_start & + ,start_regional_restart & + ,dump_field & + ,current_time_in_seconds & + ,a_step, p_step, k_step, n_step + + integer,parameter :: bc_time_interval=3 & + ,nhalo_data =4 & + ,nhalo_model=3 + + integer, public, parameter :: H_STAGGER = 1 + integer, public, parameter :: U_STAGGER = 2 + integer, public, parameter :: V_STAGGER = 3 + + !These parameters are ONLY used for the dump_field debugging routines + real, parameter :: stretch_factor = 1.5 + real, parameter :: target_lon = -97.5 + real, parameter :: target_lat = 35.5 + integer, parameter :: parent_tile = 6 + integer, parameter :: refine_ratio = 3 + + integer, parameter :: cube_res = 96 + integer, parameter :: istart_nest = 26 + integer, parameter :: jstart_nest = 36 + integer, parameter :: iend_nest = 167 + integer, parameter :: jend_nest = 165 + +! integer, parameter :: cube_res = 768 +! integer, parameter :: istart_nest = 191 +! integer, parameter :: jstart_nest = 327 +! integer, parameter :: iend_nest = 1346 +! integer, parameter :: jend_nest = 1290 + + real :: current_time_in_seconds + integer,save :: ncid,next_time_to_read_bcs,npz,ntracers + integer,save :: liq_water_index,o3mr_index,sphum_index !<-- Locations of tracer vbls in the tracers array + integer,save :: bc_hour, ntimesteps_per_bc_update + + real(kind=R_GRID),dimension(:,:,:),allocatable :: agrid_reg & !<-- Lon/lat of cell centers + ,grid_reg !<-- Lon/lat of cell corners + + real,dimension(:,:),allocatable :: phis_reg !<-- Filtered sfc geopotential + + real,dimension(:),allocatable :: ak_in, bk_in + + logical,save :: north_bc,south_bc,east_bc,west_bc & + ,begin_regional_restart=.true. + + type fv_regional_BC_variables + real,dimension(:,:,:),allocatable :: delp_BC, divgd_BC, u_BC, v_BC, uc_BC, vc_BC + real,dimension(:,:,:,:),allocatable :: q_BC +#ifndef SW_DYNAMICS + real,dimension(:,:,:),allocatable :: pt_BC, w_BC, delz_BC +#ifdef USE_COND + real,dimension(:,:,:),allocatable :: q_con_BC +#ifdef MOIST_CAPPA + real,dimension(:,:,:),allocatable :: cappa_BC +#endif +#endif +#endif + end type fv_regional_BC_variables + + type fv_domain_sides + type(fv_regional_BC_variables) :: north, south, east, west + end type fv_domain_sides + + type(fv_domain_sides),target,save :: BC_t0, BC_t1 !<-- Boundary values for all BC variables at successive times from the regional BC file + + type(fv_regional_BC_variables),pointer,save :: bc_north_t0 & + ,bc_south_t0 & + ,bc_west_t0 & + ,bc_east_t0 & + ,bc_north_t1 & + ,bc_south_t1 & + ,bc_west_t1 & + ,bc_east_t1 + + type(fv_regional_bc_bounds_type),pointer,save :: regional_bounds + + type(fv_nest_BC_type_3D), public :: delz_regBC ! lmh + integer :: ns = 0 ! lmh + + real,parameter :: tice=273.16 & + ,t_i0=15. + real, parameter :: c_liq = 4185.5 ! gfdl: heat capacity of liquid at 15 deg c + real, parameter :: c_ice = 1972.0 ! gfdl: heat capacity of ice at - 15 deg c + real, parameter :: zvir = rvgas/rdgas - 1. & + ,cv_air = cp_air - rdgas & + ,cv_vap = cp_vapor - rvgas + + real,dimension(:),allocatable :: dum1d, pref + character(len=100) :: grid_data='grid.tile7.halo4.nc' & + ,oro_data ='oro_data.tile7.halo4.nc' + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + real(kind=R_GRID), parameter:: dbl_snan=x'FFF7FFFFFFFFFFFF' + + interface dump_field + module procedure dump_field_3d + module procedure dump_field_2d + end interface dump_field + + integer :: a_step, p_step, k_step, n_step + +contains + +!----------------------------------------------------------------------- +! + subroutine setup_regional_BC(Atm & + ,isd,ied,jsd,jed & + ,npx,npy ) +! +!----------------------------------------------------------------------- +!*** Regional boundary data is obtained from the external BC file. +!----------------------------------------------------------------------- + use netcdf +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!--------------------- +!*** Input variables +!--------------------- +! + integer,intent(in) :: isd,ied,jsd,jed,npx,npy +! + type(fv_atmos_type),target,intent(inout) :: Atm !<-- Atm object for the current domain +! +!-------------------- +!*** Local variables +!-------------------- +! + integer :: i,i_start,i_end,j,j_start,j_end,klev_out +! + real :: ps1 +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** The boundary data is laid out so that the pieces for the north +!*** and south sides span the entire distance from the east side of +!*** of the east halo to the west side of the west halo. Therefore +!*** there the # of cells in the x direction in the north/south BC +!*** data is nx+2*nhalo where nx is the # of cells in the x direction +!*** on the compute domain. This means the # of cells spanned in the +!*** west/east side BC data is just ny (the # of cells in the y +!*** direction on the compute domain) and not ny+2*nhalo since the +!*** halo values on the south and north ends of the east/west sides +!*** are already part of the BC data on the north/south sides. +!----------------------------------------------------------------------- +! +! nhalo_model=3 +! +! |----------- nxp-1 -----------| <-- east/west compute points +! |---------- north BC data ----------| +! xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +! xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +! xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +! --- ooo ---j=1--- ooo --- --- +! | ooo ooo | | +! | ooo |ooo | | +! ooo i=1-->|ooo +! west BC data ooo| |ooo east BC data nyp-1 <-- north/south compute points +! ooo|<--i=isd-nhalo_model ooo +! | ooo| ooo | | +! | ooo ooo | | +! --- ooo ---j=jsd-nhalo_model--- ooo --- --- +! xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +! xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +! xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +! |---------- south BC data ----------| +! +!----------------------------------------------------------------------- +! + north_bc=.false. + south_bc=.false. + east_bc =.false. + west_bc =.false. +! +!----------------------------------------------------------------------- +!*** Which side(s) of the domain does this task lie on if any? +!----------------------------------------------------------------------- +! + if(jsd<0)then + north_bc=.true. + endif + + if(jed>npy-1)then + south_bc=.true. + endif + + if(isd<0)then + east_bc=.true. + endif + + if(ied>npx-1)then + west_bc=.true. + endif +! + if(.not.(north_bc.or.south_bc.or.east_bc.or.west_bc))then + return !<-- This task is not on the domain boundary so exit. + endif +! +! +!----------------------------------------------------------------------- +! + ntracers=Atm%ncnst !<-- # of advected tracers + npz=Atm%npz !<-- # of layers in vertical configuration of integration + klev_out=npz +! + regional_bounds=>Atm%regional_bc_bounds +! +!----------------------------------------------------------------------- +!*** Compute the index limits within the boundary region on each +!*** side of the domain for both scalars and winds. Since the +!*** domain does not move then the computations need to be done +!*** only once. Likewise find and save the locations of the +!*** available tracers in the tracers array. +!----------------------------------------------------------------------- +! + call compute_regional_bc_indices(Atm%regional_bc_bounds) +! + liq_water_index=get_tracer_index(MODEL_ATMOS, 'liq_wat') + o3mr_index =get_tracer_index(MODEL_ATMOS, 'o3mr') + sphum_index =get_tracer_index(MODEL_ATMOS, 'sphum') +! +!----------------------------------------------------------------------- +!*** Allocate the objects that will hold the boundary variables +!*** at the two time levels surrounding each piece of the regional +!*** domain's integration. Data is read from the BC files into +!*** time level t1 while t0 holds the data from the preceding +!*** BC file. +!----------------------------------------------------------------------- +!*** Point pointers at each side's boundary data for both time levels. +!*** Those are needed when the actual update of boundary points is +!*** executed. +!----------------------------------------------------------------------- +! + if(north_bc)then + call allocate_regional_BC_arrays('north' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_north & + ,Atm%regional_bc_bounds%ie_north & + ,Atm%regional_bc_bounds%js_north & + ,Atm%regional_bc_bounds%je_north & + ,Atm%regional_bc_bounds%is_north_uvs & + ,Atm%regional_bc_bounds%ie_north_uvs & + ,Atm%regional_bc_bounds%js_north_uvs & + ,Atm%regional_bc_bounds%je_north_uvs & + ,Atm%regional_bc_bounds%is_north_uvw & + ,Atm%regional_bc_bounds%ie_north_uvw & + ,Atm%regional_bc_bounds%js_north_uvw & + ,Atm%regional_bc_bounds%je_north_uvw & + ,klev_out & + ,ntracers & + ,BC_t1%north ) +! + call allocate_regional_BC_arrays('north' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_north & + ,Atm%regional_bc_bounds%ie_north & + ,Atm%regional_bc_bounds%js_north & + ,Atm%regional_bc_bounds%je_north & + ,Atm%regional_bc_bounds%is_north_uvs & + ,Atm%regional_bc_bounds%ie_north_uvs & + ,Atm%regional_bc_bounds%js_north_uvs & + ,Atm%regional_bc_bounds%je_north_uvs & + ,Atm%regional_bc_bounds%is_north_uvw & + ,Atm%regional_bc_bounds%ie_north_uvw & + ,Atm%regional_bc_bounds%js_north_uvw & + ,Atm%regional_bc_bounds%je_north_uvw & + ,klev_out & + ,ntracers & + ,BC_t0%north ) +! + bc_north_t0=>BC_t0%north + bc_north_t1=>BC_t1%north +! + endif + + if(south_bc)then + call allocate_regional_BC_arrays('south' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_south & + ,Atm%regional_bc_bounds%ie_south & + ,Atm%regional_bc_bounds%js_south & + ,Atm%regional_bc_bounds%je_south & + ,Atm%regional_bc_bounds%is_south_uvs & + ,Atm%regional_bc_bounds%ie_south_uvs & + ,Atm%regional_bc_bounds%js_south_uvs & + ,Atm%regional_bc_bounds%je_south_uvs & + ,Atm%regional_bc_bounds%is_south_uvw & + ,Atm%regional_bc_bounds%ie_south_uvw & + ,Atm%regional_bc_bounds%js_south_uvw & + ,Atm%regional_bc_bounds%je_south_uvw & + ,klev_out & + ,ntracers & + ,BC_t1%south ) +! + call allocate_regional_BC_arrays('south' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_south & + ,Atm%regional_bc_bounds%ie_south & + ,Atm%regional_bc_bounds%js_south & + ,Atm%regional_bc_bounds%je_south & + ,Atm%regional_bc_bounds%is_south_uvs & + ,Atm%regional_bc_bounds%ie_south_uvs & + ,Atm%regional_bc_bounds%js_south_uvs & + ,Atm%regional_bc_bounds%je_south_uvs & + ,Atm%regional_bc_bounds%is_south_uvw & + ,Atm%regional_bc_bounds%ie_south_uvw & + ,Atm%regional_bc_bounds%js_south_uvw & + ,Atm%regional_bc_bounds%je_south_uvw & + ,klev_out & + ,ntracers & + ,BC_t0%south ) +! + bc_south_t0=>BC_t0%south + bc_south_t1=>BC_t1%south +! + endif +! + if(east_bc)then + call allocate_regional_BC_arrays('east ' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_east & + ,Atm%regional_bc_bounds%ie_east & + ,Atm%regional_bc_bounds%js_east & + ,Atm%regional_bc_bounds%je_east & + ,Atm%regional_bc_bounds%is_east_uvs & + ,Atm%regional_bc_bounds%ie_east_uvs & + ,Atm%regional_bc_bounds%js_east_uvs & + ,Atm%regional_bc_bounds%je_east_uvs & + ,Atm%regional_bc_bounds%is_east_uvw & + ,Atm%regional_bc_bounds%ie_east_uvw & + ,Atm%regional_bc_bounds%js_east_uvw & + ,Atm%regional_bc_bounds%je_east_uvw & + ,klev_out & + ,ntracers & + ,BC_t1%east ) +! + call allocate_regional_BC_arrays('east ' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_east & + ,Atm%regional_bc_bounds%ie_east & + ,Atm%regional_bc_bounds%js_east & + ,Atm%regional_bc_bounds%je_east & + ,Atm%regional_bc_bounds%is_east_uvs & + ,Atm%regional_bc_bounds%ie_east_uvs & + ,Atm%regional_bc_bounds%js_east_uvs & + ,Atm%regional_bc_bounds%je_east_uvs & + ,Atm%regional_bc_bounds%is_east_uvw & + ,Atm%regional_bc_bounds%ie_east_uvw & + ,Atm%regional_bc_bounds%js_east_uvw & + ,Atm%regional_bc_bounds%je_east_uvw & + ,klev_out & + ,ntracers & + ,BC_t0%east ) +! + bc_east_t0=>BC_t0%east + bc_east_t1=>BC_t1%east +! + endif +! + if(west_bc)then + call allocate_regional_BC_arrays('west ' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_west & + ,Atm%regional_bc_bounds%ie_west & + ,Atm%regional_bc_bounds%js_west & + ,Atm%regional_bc_bounds%je_west & + ,Atm%regional_bc_bounds%is_west_uvs & + ,Atm%regional_bc_bounds%ie_west_uvs & + ,Atm%regional_bc_bounds%js_west_uvs & + ,Atm%regional_bc_bounds%je_west_uvs & + ,Atm%regional_bc_bounds%is_west_uvw & + ,Atm%regional_bc_bounds%ie_west_uvw & + ,Atm%regional_bc_bounds%js_west_uvw & + ,Atm%regional_bc_bounds%je_west_uvw & + ,klev_out & + ,ntracers & + ,BC_t1%west ) +! + call allocate_regional_BC_arrays('west ' & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,Atm%regional_bc_bounds%is_west & + ,Atm%regional_bc_bounds%ie_west & + ,Atm%regional_bc_bounds%js_west & + ,Atm%regional_bc_bounds%je_west & + ,Atm%regional_bc_bounds%is_west_uvs & + ,Atm%regional_bc_bounds%ie_west_uvs & + ,Atm%regional_bc_bounds%js_west_uvs & + ,Atm%regional_bc_bounds%je_west_uvs & + ,Atm%regional_bc_bounds%is_west_uvw & + ,Atm%regional_bc_bounds%ie_west_uvw & + ,Atm%regional_bc_bounds%js_west_uvw & + ,Atm%regional_bc_bounds%je_west_uvw & + ,klev_out & + ,ntracers & + ,BC_t0%west ) +! + bc_west_t0=>BC_t0%west + bc_west_t1=>BC_t1%west +! + endif + + call allocate_fv_nest_BC_type(delz_regBC,Atm,ns,0,0,.false.) +! +!----------------------------------------------------------------------- +!*** We need regional versions of the arrays for surface elevation, +!*** latitude/longitude of grid cell corners, and lat/lon of the +!*** cell centers because those variables are needed an extra row +!*** beyond FV3's normal bounday region width of nhalo_model rows. +!----------------------------------------------------------------------- +! + allocate(phis_reg(isd-1:ied+1,jsd-1:jed+1)) ; phis_reg=real_snan !<-- Sfc elevation of filtered topography. +! + allocate(agrid_reg(isd-1:ied+1,jsd-1:jed+1,2)); agrid_reg=dbl_snan !<-- Center lat/lon of grid cells. + allocate(grid_reg(isd-1:ied+2,jsd-1:jed+2,2)) ; grid_reg=dbl_snan !<-- Lon/lat of grid cell corners. +! +!----------------------------------------------------------------------- +!*** From the data holding nhalo_model rows of boundary values +!*** read in the lat/lon of the grid cell corners and fill in +!*** the values of the grid cell centers. The regional mode needs +!*** the extra row of data. +!----------------------------------------------------------------------- +! + call read_regional_lon_lat +! +!----------------------------------------------------------------------- +!*** From the data holding nhalo_model rows of filtered topography +!*** read in those values. The regional mode needs the extra row +!*** of data. +!----------------------------------------------------------------------- +! + call read_regional_filtered_topo +! +!----------------------------------------------------------------------- +!*** In the init step Atm%phis is given values only in the integration +!*** domain but in a regional run values are also needed in the +!*** boundary rows. Since the same data is read in the preceding +!*** subroutine call as when Atm%phis was first filled, fill its +!*** boundary rows now. +!----------------------------------------------------------------------- +! + if(north_bc)then + i_start=isd + i_end =ied + j_start=jsd + if(.not.Atm%flagstruct%warm_start)then !<-- NOT a restarted run. + j_end =jsd+nhalo_model-1 + else !<-- A restarted run. + j_end=jsd+nhalo_model+1 + endif + do j=j_start,j_end + do i=i_start,i_end + Atm%phis(i,j)=phis_reg(i,j) + enddo + enddo + endif +! + if(south_bc)then + i_start=isd + i_end =ied + j_end =jed + if(.not.Atm%flagstruct%warm_start)then !<-- NOT a restarted run. + j_start=jed-nhalo_model+1 + else !<-- A restarted run. + j_start=jed-nhalo_model-1 + endif + do j=j_start,j_end + do i=i_start,i_end + Atm%phis(i,j)=phis_reg(i,j) + enddo + enddo + endif + if(east_bc)then + i_start=isd + j_start=jsd + j_end =jed + if(.not.Atm%flagstruct%warm_start)then !<-- NOT a restarted run. + i_end=isd+nhalo_model-1 + else !<-- A restarted run. + i_end=isd+nhalo_model+1 + endif + do j=j_start,j_end + do i=i_start,i_end + Atm%phis(i,j)=phis_reg(i,j) + enddo + enddo + endif + if(west_bc)then + i_end =ied + j_start=jsd + j_end =jed + if(.not.Atm%flagstruct%warm_start)then !<-- NOT a restarted run. + i_start=ied-nhalo_model+1 + else !<-- A restarted run. + i_start=ied-nhalo_model-1 + endif + do j=j_start,j_end + do i=i_start,i_end + Atm%phis(i,j)=phis_reg(i,j) + enddo + enddo + endif +! +!----------------------------------------------------------------------- +!*** When nudging of specific humidity is selected then we need a +!*** reference pressure profile. Compute it now. +!----------------------------------------------------------------------- +! + allocate(pref(npz+1)) + allocate(dum1d(npz+1)) +! + ps1=101325. + pref(npz+1)=ps1 + call get_eta_level(npz,ps1,pref(1),dum1d,Atm%ak,Atm%bk ) +! +!----------------------------------------------------------------------- + + contains + +!----------------------------------------------------------------------- +! + subroutine compute_regional_bc_indices(regional_bc_bounds) +! +!----------------------------------------------------------------------- +!*** This routine computes the starting and ending indices for +!*** working arrays of task subdomains that lie on the edges +!*** of the FV3 regional domain. These arrays will hold boundary +!*** region values of scalar variables located at the grid cell +!*** centers and wind components lying on the east/west sides +!*** and north/south sides of each cell. Note that the width +!*** of the domain's boundary region (4 rows) is currently +!*** greater than the fundamental width of the task subdomains' +!*** halo regions (3 rows). The variables isd,ied,jsd,jed are +!*** the task subdomain index limits including their halos. +!*** The diagram in subroutine regional_bc_data will help to +!*** understand these index limits have the values they do. +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + type(fv_regional_bc_bounds_type),intent(out) :: regional_bc_bounds +! +!--------------------- +!*** Local variables +!--------------------- +! + integer, parameter :: invalid_index = -99 + integer :: halo_diff +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + regional_bc_bounds%is_north = invalid_index + regional_bc_bounds%ie_north = invalid_index + regional_bc_bounds%js_north = invalid_index + regional_bc_bounds%je_north = invalid_index + regional_bc_bounds%is_north_uvs = invalid_index + regional_bc_bounds%ie_north_uvs = invalid_index + regional_bc_bounds%js_north_uvs = invalid_index + regional_bc_bounds%je_north_uvs = invalid_index + regional_bc_bounds%is_north_uvw = invalid_index + regional_bc_bounds%ie_north_uvw = invalid_index + regional_bc_bounds%js_north_uvw = invalid_index + regional_bc_bounds%je_north_uvw = invalid_index + + regional_bc_bounds%is_south = invalid_index + regional_bc_bounds%ie_south = invalid_index + regional_bc_bounds%js_south = invalid_index + regional_bc_bounds%je_south = invalid_index + regional_bc_bounds%is_south_uvs = invalid_index + regional_bc_bounds%ie_south_uvs = invalid_index + regional_bc_bounds%js_south_uvs = invalid_index + regional_bc_bounds%je_south_uvs = invalid_index + regional_bc_bounds%is_south_uvw = invalid_index + regional_bc_bounds%ie_south_uvw = invalid_index + regional_bc_bounds%js_south_uvw = invalid_index + regional_bc_bounds%je_south_uvw = invalid_index + + regional_bc_bounds%is_east = invalid_index + regional_bc_bounds%ie_east = invalid_index + regional_bc_bounds%js_east = invalid_index + regional_bc_bounds%je_east = invalid_index + regional_bc_bounds%is_east_uvs = invalid_index + regional_bc_bounds%ie_east_uvs = invalid_index + regional_bc_bounds%js_east_uvs = invalid_index + regional_bc_bounds%je_east_uvs = invalid_index + regional_bc_bounds%is_east_uvw = invalid_index + regional_bc_bounds%ie_east_uvw = invalid_index + regional_bc_bounds%js_east_uvw = invalid_index + regional_bc_bounds%je_east_uvw = invalid_index + + regional_bc_bounds%is_west = invalid_index + regional_bc_bounds%ie_west = invalid_index + regional_bc_bounds%js_west = invalid_index + regional_bc_bounds%je_west = invalid_index + regional_bc_bounds%is_west_uvs = invalid_index + regional_bc_bounds%ie_west_uvs = invalid_index + regional_bc_bounds%js_west_uvs = invalid_index + regional_bc_bounds%je_west_uvs = invalid_index + regional_bc_bounds%is_west_uvw = invalid_index + regional_bc_bounds%ie_west_uvw = invalid_index + regional_bc_bounds%js_west_uvw = invalid_index + regional_bc_bounds%je_west_uvw = invalid_index +! +!----------------------------------------------------------------------- +!*** Scalar BC indices +!----------------------------------------------------------------------- +!*** These must reach one row beyond nhalo_model since we must +!*** surround the wind points on the cell edges with mass points. +!----------------------------------------------------------------------- +! + halo_diff=nhalo_data-nhalo_model +! +!----------- +!*** North +!----------- +! + if (north_bc) then + regional_bc_bounds%is_north=isd-1 + regional_bc_bounds%ie_north=ied+1 +! + regional_bc_bounds%js_north=jsd-1 + regional_bc_bounds%je_north=0 + endif +! +!----------- +!*** South +!----------- +! + if (south_bc) then + regional_bc_bounds%is_south=isd-1 + regional_bc_bounds%ie_south=ied+1 +! + regional_bc_bounds%js_south=jed-nhalo_model+1 + regional_bc_bounds%je_south=jed+1 + endif +! +!---------- +!*** East +!---------- +! + if (east_bc) then + regional_bc_bounds%is_east=isd-1 + regional_bc_bounds%ie_east=0 +! + regional_bc_bounds%js_east=jsd-1 + if(north_bc)then + regional_bc_bounds%js_east=1 + endif +! + regional_bc_bounds%je_east=jed+1 + if(south_bc)then + regional_bc_bounds%je_east=jed-nhalo_model + endif + endif +! +!---------- +!*** West +!---------- +! + if (west_bc) then + regional_bc_bounds%is_west=ied-nhalo_model+1 + regional_bc_bounds%ie_west=ied+1 +! + regional_bc_bounds%js_west=jsd-1 + if(north_bc)then + regional_bc_bounds%js_west=1 + endif +! + regional_bc_bounds%je_west=jed+1 + if(south_bc)then + regional_bc_bounds%je_west=jed-nhalo_model + endif + endif +! +!----------------------------------------------------------------------- +!*** Wind component BC indices +!----------------------------------------------------------------------- +! +!----------- +!*** North +!----------- +! + if (north_bc) then + regional_bc_bounds%is_north_uvs=isd + regional_bc_bounds%ie_north_uvs=ied +! + regional_bc_bounds%js_north_uvs=jsd +!xxxxxx regional_bc_bounds%je_north_uvs=0 +!xxxxxx regional_bc_bounds%je_north_uvs=1 + regional_bc_bounds%je_north_uvs=1 +! + regional_bc_bounds%is_north_uvw=isd + regional_bc_bounds%ie_north_uvw=ied+1 +! + regional_bc_bounds%js_north_uvw=jsd + regional_bc_bounds%je_north_uvw=0 + endif +! +!----------- +!*** South +!----------- +! + if (south_bc) then + regional_bc_bounds%is_south_uvs=isd + regional_bc_bounds%ie_south_uvs=ied +! +!xxxxxregional_bc_bounds%js_south_uvs=jed-nhalo_model+2 + regional_bc_bounds%js_south_uvs=jed-nhalo_model+1 + regional_bc_bounds%je_south_uvs=jed+1 +! + regional_bc_bounds%is_south_uvw=isd + regional_bc_bounds%ie_south_uvw=ied+1 +! + regional_bc_bounds%js_south_uvw=jed-nhalo_model+1 + regional_bc_bounds%je_south_uvw=jed + endif +! +!---------- +!*** East +!---------- +! + if (east_bc) then + regional_bc_bounds%is_east_uvs=isd + regional_bc_bounds%ie_east_uvs=0 +! + regional_bc_bounds%js_east_uvs=jsd + if(north_bc)then +!xxxx regional_bc_bounds%js_east_uvs=2 !<-- north side of cell at j=2 (north bdry contains north side of j=1) + regional_bc_bounds%js_east_uvs=1 !<-- north side of cell at j=1 (north bdry contains north side of j=1) + endif +! + regional_bc_bounds%je_east_uvs=jed+1 + if(south_bc)then +!xxxx regional_bc_bounds%je_east_uvs=jed-nhalo_model + regional_bc_bounds%je_east_uvs=jed-nhalo_model+1 + endif +! +! regional_bc_bounds%is_east_uvw=isd-1 + regional_bc_bounds%is_east_uvw=isd + regional_bc_bounds%ie_east_uvw=0 !<-- east side of cell at i=0 +! + regional_bc_bounds%js_east_uvw=jsd + if(north_bc)then + regional_bc_bounds%js_east_uvw=1 + endif + regional_bc_bounds%je_east_uvw=jed + if(south_bc)then + regional_bc_bounds%je_east_uvw=jed-nhalo_model + endif + endif +! +!---------- +!*** West +!---------- +! + if (west_bc) then + regional_bc_bounds%is_west_uvs=ied-nhalo_model+1 + regional_bc_bounds%ie_west_uvs=ied +! + regional_bc_bounds%js_west_uvs=jsd + if(north_bc)then +!xxxx regional_bc_bounds%js_west_uvs=2 + regional_bc_bounds%js_west_uvs=1 + endif +! + regional_bc_bounds%je_west_uvs=jed+1 + if(south_bc)then +!xxxx regional_bc_bounds%je_west_uvs=jed-nhalo_model + regional_bc_bounds%je_west_uvs=jed-nhalo_model+1 + endif +! + regional_bc_bounds%is_west_uvw=ied-nhalo_model+2 + regional_bc_bounds%ie_west_uvw=ied+1 +! + regional_bc_bounds%js_west_uvw=jsd + if(north_bc)then + regional_bc_bounds%js_west_uvw=1 + endif +! + regional_bc_bounds%je_west_uvw=jed + if(south_bc)then + regional_bc_bounds%je_west_uvw=jed-nhalo_model + endif + endif +! +!----------------------------------------------------------------------- +! + end subroutine compute_regional_bc_indices +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine read_regional_lon_lat +! +!----------------------------------------------------------------------- +!*** Read the longitude/latitude of the grid cell corners from +!*** the external file holding the additional row of data required +!*** by the regional domain. +!----------------------------------------------------------------------- + use netcdf +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!-------------------- +!*** Local variables +!-------------------- +! + integer :: i_start_data,istat,j_start_data,n,ncid_grid,var_id +! + character(len=150) :: filename,vname +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** Open the data file. +!----------------------------------------------------------------------- +! + filename='INPUT/'//trim(grid_data) +! + call check(nf90_open(filename,nf90_nowrite,ncid_grid)) !<-- Open the netcdf file; get the file ID. +! +! write(0,*)' opened grid file',trim(filename) +!----------------------------------------------------------------------- +!*** The longitude and latitude are on the super grid. We need only +!*** the points on each corner of the grid cells which is every other +!*** point on the super grid. +!----------------------------------------------------------------------- +! + i_start_data=2*(isd+nhalo_model)-1 + j_start_data=2*(jsd+nhalo_model)-1 +! +! write(0,11110)i_start_data,j_start_data +11110 format(' i_start_data=',i5,' j_start_data=',i5) +!--------------- +!*** Longitude +!--------------- +! + vname='x' !<-- Geographic_longitude (degrees east) in netcdf file + call check(nf90_inq_varid(ncid_grid,vname,var_id)) !<-- Get the variable ID. + call check(nf90_get_var(ncid_grid,var_id & + ,grid_reg(isd-1:ied+2,jsd-1:jed+2,1) & !<-- Longitude of grid cell corners + ,start=(/i_start_data,j_start_data/) & + ,stride=(/2,2/) ) ) +! +!-------------- +!*** Latitude +!-------------- +! + vname='y' !<-- Geographic_latitude (degrees north) in netcdf file + call check(nf90_inq_varid(ncid_grid,vname,var_id)) !<-- Get the variable ID. + call check(nf90_get_var(ncid_grid,var_id & + ,grid_reg(isd-1:ied+2,jsd-1:jed+2,2) & !<-- Latitude of grid cell corners + ,start=(/i_start_data,j_start_data/) & + ,stride=(/2,2/) ) ) +! + call check(nf90_close(ncid_grid)) +! +!----------------------------------------------------------------------- +!*** Convert from degrees to radians. +!----------------------------------------------------------------------- +! + do n=1,2 + do j=jsd-1,jed+2 + do i=isd-1,ied+2 + grid_reg(i,j,n)=grid_reg(i,j,n)*pi/180. + enddo + enddo + enddo +! +!----------------------------------------------------------------------- +!*** Compute the longitude/latitude in the cell centers. +!----------------------------------------------------------------------- +! + do j=jsd-1,jed+1 + do i=isd-1,ied+1 + call cell_center2(grid_reg(i,j, 1:2), grid_reg(i+1,j, 1:2), & + grid_reg(i,j+1,1:2), grid_reg(i+1,j+1,1:2), & + agrid_reg(i,j,1:2) ) + enddo + enddo +! +!----------------------------------------------------------------------- +! + end subroutine read_regional_lon_lat +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine read_regional_filtered_topo +! +!----------------------------------------------------------------------- +!*** Read the filtered topography including the extra outer row. +!----------------------------------------------------------------------- + use netcdf +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,i_start_data,istat,j,j_start_data,ncid_oro,var_id +! + character(len=150) :: filename,vname +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** Get the name of the working directory. Open the data file. +!----------------------------------------------------------------------- +! + filename='INPUT/'//trim(oro_data) + + if (is_master()) then + write(*,23421)trim(filename) +23421 format(' topo filename=',a) + endif +! + call check(nf90_open(filename,nf90_nowrite,ncid_oro)) !<-- Open the netcdf file; get the file ID. +! +!----------------------------------------------------------------------- +!*** Read in the data including the extra outer row. +!----------------------------------------------------------------------- +! + i_start_data=isd+nhalo_model + j_start_data=jsd+nhalo_model +! + vname='orog_filt' !<-- Filtered topography (m) in netcdf file + call check(nf90_inq_varid(ncid_oro,vname,var_id)) !<-- Get the variable ID. + call check(nf90_get_var(ncid_oro,var_id & + ,phis_reg(isd-1:ied+1,jsd-1:jed+1) & !<-- Extracted filtered topography (m) + ,start=(/i_start_data,j_start_data/))) +! + call check(nf90_close(ncid_oro)) +! +!----------------------------------------------------------------------- +!*** We want the geopotential. +!----------------------------------------------------------------------- +! + do j=jsd-1,jed+1 + do i=isd-1,ied+1 + phis_reg(i,j)=phis_reg(i,j)*grav + enddo + enddo +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + end subroutine read_regional_filtered_topo +! +!----------------------------------------------------------------------- +! + end subroutine setup_regional_BC +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine start_regional_cold_start(Atm, ak, bk, levp & + ,is ,ie ,js ,je & + ,isd,ied,jsd,jed ) +! +!----------------------------------------------------------------------- +!*** Prepare the regional run for a cold start. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + type(fv_atmos_type),intent(inout) :: Atm !<-- Atm object for the current domain +! + integer ,intent(in) :: is ,ie ,js ,je & !<-- Integration limits of task subdomain + ,isd,ied,jsd,jed & !<-- Memory limits of task subdomain + ,levp +! + real,intent(in) :: ak(1:levp+1), bk(1:levp+1) +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: k +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + call setup_regional_BC(Atm & + ,isd, ied, jsd, jed & + ,Atm%npx, Atm%npy ) +! + bc_hour=0 + call regional_bc_data(Atm, bc_hour & !<-- Fill time level t1 from BC file at 0 hours. + ,is, ie, js, je & + ,isd, ied, jsd, jed & + ,ak, bk ) + call regional_bc_t1_to_t0(BC_t1, BC_t0 & ! + ,Atm%npz & !<-- Move BC t1 data + ,Atm%ncnst & ! to t0. + ,Atm%regional_bc_bounds ) ! +! + bc_hour=bc_hour+bc_time_interval +! + call regional_bc_data(Atm, bc_hour & !<-- Fill time level t1 + ,is, ie, js, je & ! from the 2nd time level + ,isd, ied, jsd, jed & ! in the BC file. + ,ak, bk ) ! +! + allocate (ak_in(1:levp+1)) !<-- Save the input vertical structure for + allocate (bk_in(1:levp+1)) ! remapping BC updates during the forecast. + do k=1,levp+1 + ak_in(k)=ak(k) + bk_in(k)=bk(k) + enddo +! +!----------------------------------------------------------------------- +! + end subroutine start_regional_cold_start +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine start_regional_restart(Atm & + ,isc,iec,jsc,jec & + ,isd,ied,jsd,jed ) +! +!----------------------------------------------------------------------- +!*** Prepare the regional forecast for a restart. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + type(fv_atmos_type),intent(inout) :: Atm !<-- Atm object for the current domain +! + integer ,intent(in) :: isc,iec,jsc,jec & !<-- Integration limits of task subdomain + ,isd,ied,jsd,jed !<-- Memory limits of task subdomain +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: ierr, ios + real, allocatable :: wk2(:,:) +! + logical :: filtered_terrain = .true. + logical :: gfs_dwinds = .true. + integer :: levp = 64 + logical :: checker_tr = .false. + integer :: nt_checker = 0 + namelist /external_ic_nml/ filtered_terrain, levp, gfs_dwinds & + ,checker_tr, nt_checker +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** Read the number of model layers in the external forecast (=levp). +!----------------------------------------------------------------------- +! + read (input_nml_file,external_ic_nml,iostat=ios) + ierr = check_nml_error(ios,'external_ic_nml') + if(ierr/=0)then + write(0,11011)ierr +11011 format(' start_regional_restart failed to read external_ic_nml ierr=',i3) + endif +! +!----------------------------------------------------------------------- +!*** Preliminary setup for the forecast. +!----------------------------------------------------------------------- +! + call setup_regional_BC(Atm & + ,isd, ied, jsd, jed & + ,Atm%npx, Atm%npy ) +! + allocate (wk2(levp+1,2)) + allocate (ak_in(levp+1)) !<-- Save the input vertical structure for + allocate (bk_in(levp+1)) ! remapping BC updates during the forecast. + call read_data('INPUT/gfs_ctrl.nc','vcoord',wk2, no_domain=.TRUE.) + ak_in(1:levp+1) = wk2(1:levp+1,1) + ak_in(1) = 1.e-9 + bk_in(1:levp+1) = wk2(1:levp+1,2) + deallocate(wk2) + bc_hour=nint(current_time_in_seconds/3600.) +! +!----------------------------------------------------------------------- +!*** Fill time level t1 from the BC file at the restart time. +!----------------------------------------------------------------------- +! + call regional_bc_data(Atm, bc_hour & + ,isc, iec, jsc, jec & + ,isd, ied, jsd, jed & + ,ak_in, bk_in ) +! +!----------------------------------------------------------------------- +! + end subroutine start_regional_restart +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine read_new_bc_data(Atm, Time, Time_step_atmos, p_split & + ,isd,ied,jsd,jed ) +! +!----------------------------------------------------------------------- +!*** When it is time to read new boundary data from the external files +!*** move time level t1 to t0 and then read the data into t1. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + type(fv_atmos_type),intent(inout) :: Atm !<-- Atm object for the current domain + type(time_type),intent(in) :: Time !<-- Current forecast time + type (time_type),intent(in) :: Time_step_atmos !<-- Large (physics) timestep +! + integer,intent(in) :: isd,ied,jsd,jed & !<-- Memory limits of task subdomain + ,p_split +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: atmos_time_step, sec + real :: dt_atmos + type(time_type) :: atmos_time +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + atmos_time = Time - Atm%Time_init + atmos_time_step = atmos_time / Time_step_atmos + current_time_in_seconds = time_type_to_real( atmos_time ) + if (mpp_pe() == 0 .and. Atm%flagstruct%fv_debug) write(*,"('current_time_seconds = ',f9.1)")current_time_in_seconds +! + call get_time (Time_step_atmos, sec) + dt_atmos = real(sec) +! + if(atmos_time_step==0.or.Atm%flagstruct%warm_start)then + ntimesteps_per_bc_update=nint(Atm%flagstruct%bc_update_interval*3600./(dt_atmos/real(abs(p_split)))) + endif +! + if(atmos_time_step+1>=ntimesteps_per_bc_update.and.mod(atmos_time_step,ntimesteps_per_bc_update)==0 & + .or. & + Atm%flagstruct%warm_start.and.begin_regional_restart)then +! + begin_regional_restart=.false. + bc_hour=bc_hour+Atm%flagstruct%bc_update_interval +! +!----------------------------------------------------------------------- +!*** Transfer the time level t1 data to t0. +!----------------------------------------------------------------------- +! + call regional_bc_t1_to_t0(BC_t1, BC_t0 & + ,Atm%npz & + ,Atm%ncnst & + ,Atm%regional_bc_bounds ) +! +!----------------------------------------------------------------------- +!*** Fill time level t1 from the BC file containing data from +!*** the next time level. +!----------------------------------------------------------------------- +! + call regional_bc_data(Atm, bc_hour & + ,Atm%bd%is, Atm%bd%ie & + ,Atm%bd%js, Atm%bd%je & + ,isd, ied, jsd, jed & + ,ak_in, bk_in ) + endif +! +!----------------------------------------------------------------------- +! + end subroutine read_new_bc_data +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine regional_bc_data(Atm,bc_hour & + ,is,ie,js,je & + ,isd,ied,jsd,jed & + ,ak,bk ) +! +!----------------------------------------------------------------------- +!*** Regional boundary data is obtained from the external BC file. +!----------------------------------------------------------------------- + use netcdf +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! +!----------- +!*** Input +!----------- +! + integer,intent(in) :: bc_hour !<-- The forecast hour of the BC file to be read. +! + integer,intent(in) :: is,ie,js,je & !<-- Compute limits of task subdomain + ,isd,ied,jsd,jed !<-- Halo limits of task subdomain +! + real,dimension(:),intent(in) :: ak,bk +! +!----------------- +!*** Input/output +!----------------- +! + type(fv_atmos_type),target,intent(inout) :: Atm !<-- Atm object for the current domain +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: dimid,i,j,k,klev_in,klev_out,n,nlev +! + integer :: is_north,is_south,is_east,is_west & + ,ie_north,ie_south,ie_east,ie_west & + ,js_north,js_south,js_east,js_west & + ,je_north,je_south,je_east,je_west +! + integer :: is_u,ie_u,js_u,je_u & + ,is_v,ie_v,js_v,je_v +! + integer :: is_input,ie_input,js_input,je_input +! + integer :: i_start,i_end,j_start,j_end +! + real,dimension(:,:,:),allocatable :: ud,vd,uc,vc +! + real,dimension(:,:),allocatable :: ps_reg + real,dimension(:,:,:),allocatable :: ps_input,w_input,zh_input + real,dimension(:,:,:),allocatable :: u_s_input,v_s_input & + ,u_w_input,v_w_input + real,dimension(:,:,:,:),allocatable :: tracers_input +! + real(kind=R_GRID), dimension(2):: p1, p2, p3, p4 + real(kind=R_GRID), dimension(3):: e1, e2, ex, ey + +#undef USE_FMS_READ +#ifdef USE_FMS_READ + integer :: isc2, iec2, jsc2, jec2 + real(kind=R_GRID), allocatable, dimension(:,:) :: tmpx, tmpy + integer :: start(4), nread(4) + real(kind=R_GRID), allocatable, dimension(:,:,:) :: reg_grid + real(kind=R_GRID), allocatable, dimension(:,:,:) :: reg_agrid +#endif +! + logical,save :: computed_regional_bc_indices=.false. +! + character(len=3) :: int_to_char + character(len=6) :: fmt='(i3.3)' +! + character(len=50) :: file_name +! + integer,save :: kount1=0,kount2=0 + integer :: istart, iend, jstart, jend + integer :: npx, npy +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** Only boundary tasks are needed. +!----------------------------------------------------------------------- +! + if(.not.(north_bc.or.south_bc.or.east_bc.or.west_bc))then + return + endif +! +!----------------------------------------------------------------------- +! + klev_out=Atm%npz !<-- # of layers in vertical configuration of integration +! +!----------------------------------------------------------------------- +!*** Construct the name of the regional BC file to be read. +!----------------------------------------------------------------------- +! + write(int_to_char,fmt) bc_hour + file_name='INPUT/gfs_bndy.tile7.'//int_to_char//'.nc' +! + if (is_master()) then + write(*,22211)trim(file_name) +22211 format(' regional_bc_data file_name=',a) + endif +!----------------------------------------------------------------------- +!*** Open the regional BC file. +!*** Find the # of layers (klev_in) in the BC input. +!----------------------------------------------------------------------- +! + call check(nf90_open(file_name,nf90_nowrite,ncid)) !<-- Open the netcdf file; get the file ID. +! + call check(nf90_inq_dimid(ncid,'lev',dimid)) !<-- Get the vertical dimension's NetCDF ID. + call check(nf90_inquire_dimension(ncid,dimid,len=klev_in)) !<-- Get the vertical dimension's value (klev_in). +! +!----------------------------------------------------------------------- +!*** Allocate the boundary variables and initialize them to garbage. +!----------------------------------------------------------------------- +! + is_input=is-nhalo_data + ie_input=ie+nhalo_data + js_input=js-nhalo_data + je_input=je+nhalo_data +! + npx = Atm%npx + npy = Atm%npy +! + allocate( ps_input(is_input:ie_input,js_input:je_input,1)) ; ps_input=real_snan !<-- Sfc pressure + allocate( w_input(is_input:ie_input,js_input:je_input,1:klev_in)) ; w_input=real_snan !<-- Vertical velocity + allocate( zh_input(is_input:ie_input,js_input:je_input,1:klev_in+1)) ; zh_input=real_snan !<-- Interface heights + allocate(u_s_input(is_input:ie_input,js_input:je_input,1:klev_in)) ; u_s_input=real_snan !<-- D-grid u component + allocate(v_s_input(is_input:ie_input,js_input:je_input,1:klev_in)) ; v_s_input=real_snan !<-- C-grid v component + allocate(u_w_input(is_input:ie_input,js_input:je_input,1:klev_in)) ; u_w_input=real_snan !<-- C-grid u component + allocate(v_w_input(is_input:ie_input,js_input:je_input,1:klev_in)) ; v_w_input=real_snan !<-- D-grid v component +! + allocate(tracers_input(is_input:ie_input,js_input:je_input,klev_in,ntracers)) !; tracers_input=real_snan + tracers_input=0. ! Temporary fix +! +!----------------------------------------------------------------------- +!*** Extract each variable from the regional BC file. The final +!*** argument is the object being filled. +!----------------------------------------------------------------------- +! +!------------------ +!*** Sfc pressure +!------------------ +! + nlev=1 + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'ps ' & + ,array_3d=ps_input ) !<-- ps is 2D but for simplicity here use a 3rd dim of 1 +! +!!!!! NOTE !!!!!!! NEED TO FILL IN OTHER TRACERS WITH *****ZEROES****** if not present +!----------------------- +!*** Specific humidity +!----------------------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'sphum ' & + ,array_4d=tracers_input & + ,tlev=sphum_index ) +! +!------------------ +!*** Liquid water +!------------------ +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'liq_wat' & + ,array_4d=tracers_input & + ,tlev=liq_water_index ) +! +!----------- +!*** Ozone +!----------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'o3mr ' & + ,array_4d=tracers_input & + ,tlev=o3mr_index ) +! +!----------------------- +!*** Vertical velocity +!----------------------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'w ' & + ,array_3d=w_input) +! +!----------------------- +!*** Interface heights +!----------------------- +! + nlev=klev_in+1 + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'zh ' & + ,array_3d=zh_input) +! +!----------------------------- +!*** U component south/north +!----------------------------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'u_s ' & + ,array_3d=u_s_input) +! +!----------------------------- +!*** V component south/north +!----------------------------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'v_s ' & + ,array_3d=v_s_input) +! +!--------------------------- +!*** U component east/west +!--------------------------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'u_w ' & + ,array_3d=u_w_input) +! +!--------------------------- +!*** V component east/west +!--------------------------- +! + nlev=klev_in + call read_regional_bc_file(is_input,ie_input,js_input,je_input & + ,nlev & + ,ntracers & +! ,Atm%regional_bc_bounds & + ,'v_w ' & + ,array_3d=v_w_input) +! +!----------------------------------------------------------------------- +!*** We now have the boundary variables from the BC file on the +!*** levels of the input data. Before remapping the 3-D variables +!*** from the input levels to the model integration levels we will +!*** simply copy the 2-D sfc pressure (ps) into the model array. +!----------------------------------------------------------------------- +! +! do j=jsd,jed +! do i=isd,ied +! Atm%ps(i,j)=ps(i,j) +! enddo +! enddo +! +! deallocate(ps%north,ps%south,ps%east,ps%west) +! +!----------------------------------------------------------------------- +!*** One final array needs to be allocated. It is the sfc pressure +!*** in the domain's boundary region that is derived from the input +!*** sfc pressure from the BC files. The derived sfc pressure will +!*** be needed in the vertical remapping of the wind components to +!*** the integration levels. +!----------------------------------------------------------------------- +! + allocate(ps_reg(is_input:ie_input,js_input:je_input)) ; ps_reg=-9999999 ! for now don't set to snan until remap dwinds is changed +! +!----------------------------------------------------------------------- +!*** We have the boundary variables from the BC file on the levels +!*** of the input data. Remap the scalars (tracers, vertical +!*** velocity, ozone) to the FV3 domain levels. Scalar remapping +!*** must be done on all four sides before remapping of the winds +!*** since pressures are needed on each side of wind points and so +!*** for a given wind component those pressures could include values +!*** from two different boundary side regions. +!----------------------------------------------------------------------- +! +! Definitions in this module greatly differ from those in existing nesting +! code or elsewhere in FMS. North <--> South, East <--> West, and +! North and South always span [isd-1 , ied+1] while East and West do not +! go into the outermost corners (so the they span [1, je], always.) +!----------- +!*** North +!----------- +! + if(north_bc)then +! + call remap_scalar_nggps_regional_bc(Atm & + ,'north' & + + ,isd,ied,jsd,jed & !<-- Atm array indices w/halo + + ,is_input & !<-- + ,ie_input & ! Input array + ,js_input & ! index limits. + ,je_input & !<-- + + ,klev_in, klev_out & + ,ntracers & + ,ak, bk & + + ,ps_input & !<-- + ,tracers_input & ! BC vbls on + ,w_input & ! input model levels + ,zh_input & !<-- + + ,phis_reg & !<-- Filtered topography + + ,ps_reg & !<-- Derived FV3 psfc in regional domain boundary region + + ,BC_t1%north ) !<-- North BC vbls on final integration levels + + if (is == 1) then + istart = 1 + else + istart = isd + endif + if (ie == npx-1) then + iend = npx-1 + else + iend = ied + endif + + do k=1,npz + do j=jsd,0 + do i=istart,iend + delz_regBC%south_t1(i,j,k) = BC_t1%north%delz_BC(i,j,k) + delz_regBC%south_t0(i,j,k) = BC_t0%north%delz_BC(i,j,k) + enddo + enddo + enddo + + ! North, south include all corners + if (is == 1) then + do k=1,npz + do j=jsd,0 + do i=isd,0 + delz_regBC%west_t1(i,j,k) = BC_t1%north%delz_BC(i,j,k) + delz_regBC%west_t0(i,j,k) = BC_t0%north%delz_BC(i,j,k) + enddo + enddo + enddo + endif + + if (ie == npx-1) then + do k=1,npz + do j=jsd,0 + do i=npx,ied + delz_regBC%east_t1(i,j,k) = BC_t1%north%delz_BC(i,j,k) + delz_regBC%east_t0(i,j,k) = BC_t0%north%delz_BC(i,j,k) + enddo + enddo + enddo + endif +! + endif +! +!----------- +!*** South +!----------- +! + if(south_bc)then +! + call remap_scalar_nggps_regional_bc(Atm & + ,'south' & + + ,isd,ied,jsd,jed & !<-- Atm array indices w/halo + + ,is_input & !<-- + ,ie_input & ! Input array + ,js_input & ! index limits. + ,je_input & !<-- + + ,klev_in, klev_out & + ,ntracers & + ,ak, bk & + + ,ps_input & !<-- + ,tracers_input & ! BC vbls on + ,w_input & ! input model levels + ,zh_input & !<-- + + ,phis_reg & !<-- Filtered topography + + ,ps_reg & !<-- Derived FV3 psfc in regional domain boundary region + + ,BC_t1%south ) !<-- South BC vbls on final integration levels +! + + if (is == 1) then + istart = 1 + else + istart = isd + endif + if (ie == npx-1) then + iend = npx-1 + else + iend = ied + endif + + do k=1,npz + do j=npy,jed + do i=istart,iend + delz_regBC%north_t1(i,j,k) = BC_t1%south%delz_BC(i,j,k) + delz_regBC%north_t0(i,j,k) = BC_t0%south%delz_BC(i,j,k) + enddo + enddo + enddo + + ! North, south include all corners + if (is == 1) then + do k=1,npz + do j=npy,jed + do i=isd,0 + delz_regBC%west_t1(i,j,k) = BC_t1%south%delz_BC(i,j,k) + delz_regBC%west_t0(i,j,k) = BC_t0%south%delz_BC(i,j,k) + enddo + enddo + enddo + endif + + if (ie == npx-1) then + do k=1,npz + do j=npy,jed + do i=npx,ied + delz_regBC%east_t1(i,j,k) = BC_t1%south%delz_BC(i,j,k) + delz_regBC%east_t0(i,j,k) = BC_t0%south%delz_BC(i,j,k) + enddo + enddo + enddo + endif + endif +! +!---------- +!*** East +!---------- +! + if(east_bc)then +! + call remap_scalar_nggps_regional_bc(Atm & + ,'east ' & + + ,isd,ied,jsd,jed & !<-- Atm array indices w/halo + + ,is_input & !<-- + ,ie_input & ! Input array + ,js_input & ! index limits. + ,je_input & !<-- + + ,klev_in, klev_out & + ,ntracers & + ,ak, bk & + + ,ps_input & !<-- + ,tracers_input & ! BC vbls on + ,w_input & ! input model levels + ,zh_input & !<-- + + ,phis_reg & !<-- Filtered topography + + ,ps_reg & !<-- Derived FV3 psfc in regional domain boundary region + + ,BC_t1%east ) +! + if (js == 1) then + jstart = 1 + else + jstart = jsd + endif + if (je == npy-1) then + jend = je + else + jend = jed + endif + + + do k=1,npz + do j=jstart,jend + do i=isd,0 + delz_regBC%west_t1(i,j,k) = BC_t1%east%delz_BC(i,j,k) + delz_regBC%west_t0(i,j,k) = BC_t0%east%delz_BC(i,j,k) + enddo + enddo + enddo + + endif +! +!---------- +!*** West +!---------- +! + if(west_bc)then +! + call remap_scalar_nggps_regional_bc(Atm & + ,'west ' & + + ,isd,ied,jsd,jed & !<-- Atm array indices w/halo + + ,is_input & !<-- + ,ie_input & ! Input array + ,js_input & ! index limits. + ,je_input & !<-- + + ,klev_in, klev_out & + ,ntracers & + ,ak, bk & + + ,ps_input & !<-- + ,tracers_input & ! BC vbls on + ,w_input & ! input model levels + ,zh_input & !<-- + + ,phis_reg & !<-- Filtered topography + + ,ps_reg & !<-- Derived FV3 psfc in regional domain boundary region + + ,BC_t1%west ) +! + if (js == 1) then + jstart = 1 + else + jstart = jsd + endif + if (je == npy-1) then + jend = je + else + jend = jed + endif + + do k=1,npz + do j=jstart,jend + do i=npx,ied + delz_regBC%east_t1(i,j,k) = BC_t1%west%delz_BC(i,j,k) + delz_regBC%east_t0(i,j,k) = BC_t0%west%delz_BC(i,j,k) + enddo + enddo + enddo + endif +! +!----------------------------------------------------------------------- +!*** Now that we have the pressure throughout the boundary region +!*** including a row beyond the boundary winds we are ready to +!*** finalize those winds. +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** Transform the D-grid wind components on the north side of +!*** the regional domain then remap them from the input levels +!*** to the integration levels. +!----------------------------------------------------------------------- +! +#ifdef USE_FMS_READ + isc2 = 2*(isd-1+nhalo_data)-1 + iec2 = 2*(ied+2+nhalo_data)-1 + jsc2 = 2*(jsd-1+nhalo_data)-1 + jec2 = 2*(jed+2+nhalo_data)-1 + allocate(tmpx(isc2:iec2, jsc2:jec2)) ; tmpx=dbl_snan + allocate(tmpy(isc2:iec2, jsc2:jec2)) ; tmpy=dbl_snan + start = 1; nread = 1 + start(1) = isc2; nread(1) = iec2 - isc2 + 1 + start(2) = jsc2; nread(2) = jec2 - jsc2 + 1 + call read_data("INPUT/grid.tile7.halo4.nc", 'x', tmpx, start, nread, no_domain=.TRUE.) + call read_data("INPUT/grid.tile7.halo4.nc", 'y', tmpy, start, nread, no_domain=.TRUE.) + + allocate(reg_grid(isd-1:ied+2,jsd-1:jed+2,1:2)) ; reg_grid=dbl_snan + do j = jsd-1, jed+2 + do i = isd-1, ied+2 + reg_grid(i,j,1) = tmpx(2*(i+nhalo_data)-1, 2*(j+nhalo_data)-1)*pi/180. + reg_grid(i,j,2) = tmpy(2*(i+nhalo_data)-1, 2*(j+nhalo_data)-1)*pi/180. + if ( reg_grid(i,j,1) /= grid_reg(i,j,1) ) then + write(0,*)' reg_grid(i,j,1) /= grid_reg(i,j,1) ',i,j, reg_grid(i,j,1),grid_reg(i,j,1) + endif + enddo + enddo + + allocate(reg_agrid(isd-1:ied+1,jsd-1:jed+1,1:2)) ; reg_agrid=dbl_snan + do j=jsd-1,jed+1 + do i=isd-1,ied+1 + call cell_center2(reg_grid(i,j, 1:2), reg_grid(i+1,j, 1:2), & + reg_grid(i,j+1,1:2), reg_grid(i+1,j+1,1:2), & + reg_agrid(i,j,1:2) ) + enddo + enddo +#endif +! + if(north_bc)then +! + is_u=Atm%regional_bc_bounds%is_north_uvs + ie_u=Atm%regional_bc_bounds%ie_north_uvs + js_u=Atm%regional_bc_bounds%js_north_uvs + je_u=Atm%regional_bc_bounds%je_north_uvs +! + is_v=Atm%regional_bc_bounds%is_north_uvw + ie_v=Atm%regional_bc_bounds%ie_north_uvw + js_v=Atm%regional_bc_bounds%js_north_uvw + je_v=Atm%regional_bc_bounds%je_north_uvw +! + allocate(ud(is_u:ie_u,js_u:je_u,1:nlev)) ; ud=real_snan + allocate(vd(is_v:ie_v,js_v:je_v,1:nlev)) ; vd=real_snan + allocate(vc(is_u:ie_u,js_u:je_u,1:nlev)) ; vc=real_snan + allocate(uc(is_v:ie_v,js_v:je_v,1:nlev)) ; uc=real_snan +! + do k=1,nlev + do j=js_u,je_u + do i=is_u,ie_u + p1(:) = grid_reg(i, j,1:2) + p2(:) = grid_reg(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + ud(i,j,k) = u_s_input(i,j,k)*inner_prod(e1,ex)+v_s_input(i,j,k)*inner_prod(e1,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e2) !C-grid V-wind unit vector + vc(i,j,k) = u_s_input(i,j,k)*inner_prod(e2,ex)+v_s_input(i,j,k)*inner_prod(e2,ey) + enddo + enddo +! + do j=js_v,je_v + do i=is_v,ie_v + p1(:) = grid_reg(i,j ,1:2) + p2(:) = grid_reg(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + vd(i,j,k) = u_w_input(i,j,k)*inner_prod(e2,ex)+v_w_input(i,j,k)*inner_prod(e2,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e1) !C-grid U-wind unit vector + uc(i,j,k) = u_w_input(i,j,k)*inner_prod(e1,ex)+v_w_input(i,j,k)*inner_prod(e1,ey) + enddo + enddo + enddo +! + call remap_dwinds_regional_bc(Atm & + + ,is_input & !<-- + ,ie_input & ! Index limits for scalars + ,js_input & ! at center of north BC region grid cells. + ,je_input & !<-- + + ,is_u & !<-- + ,ie_u & ! Index limits for u component + ,js_u & ! on north edge of BC region grid cells. + ,je_u & !<-- + + ,is_v & !<-- + ,ie_v & ! Index limits for v component + ,js_v & ! on north edge of BC region grid cells. + ,je_v & !<-- + + ,klev_in, klev_out & !<-- data / model levels + ,ak, bk & + + ,ps_reg & !<-- BC values of sfc pressure + ,ud ,vd & !<-- BC values of D-grid u and v + ,uc ,vc & !<-- BC values of C-grid u and v + ,BC_t1%north ) !<-- North BC vbls on final integration levels + +! + deallocate(ud,vd,uc,vc) +! + endif +! +!----------------------------------------------------------------------- +!*** Transform the D-grid wind components on the south side of +!*** the regional domain then remap them from the input levels +!*** to the integration levels. +!----------------------------------------------------------------------- +! + if(south_bc)then +! + is_u=Atm%regional_bc_bounds%is_south_uvs + ie_u=Atm%regional_bc_bounds%ie_south_uvs + js_u=Atm%regional_bc_bounds%js_south_uvs + je_u=Atm%regional_bc_bounds%je_south_uvs + is_v=Atm%regional_bc_bounds%is_south_uvw + ie_v=Atm%regional_bc_bounds%ie_south_uvw + js_v=Atm%regional_bc_bounds%js_south_uvw + je_v=Atm%regional_bc_bounds%je_south_uvw +! + allocate(ud(is_u:ie_u,js_u:je_u,1:nlev)) ; ud=real_snan + allocate(vd(is_v:ie_v,js_v:je_v,1:nlev)) ; vd=real_snan + allocate(vc(is_u:ie_u,js_u:je_u,1:nlev)) ; vc=real_snan + allocate(uc(is_v:ie_v,js_v:je_v,1:nlev)) ; uc=real_snan +! + do k=1,nlev + do j=js_u,je_u + do i=is_u,ie_u + p1(:) = grid_reg(i, j,1:2) + p2(:) = grid_reg(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + ud(i,j,k) = u_s_input(i,j,k)*inner_prod(e1,ex)+v_s_input(i,j,k)*inner_prod(e1,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e2) !C-grid V-wind unit vector + vc(i,j,k) = u_s_input(i,j,k)*inner_prod(e2,ex)+v_s_input(i,j,k)*inner_prod(e2,ey) + enddo + enddo +! + do j=js_v,je_v + do i=is_v,ie_v + p1(:) = grid_reg(i,j ,1:2) + p2(:) = grid_reg(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + vd(i,j,k) = u_w_input(i,j,k)*inner_prod(e2,ex)+v_w_input(i,j,k)*inner_prod(e2,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e1) !C-grid U-wind unit vector + uc(i,j,k) = u_w_input(i,j,k)*inner_prod(e1,ex)+v_w_input(i,j,k)*inner_prod(e1,ey) + enddo + enddo + enddo +! + call remap_dwinds_regional_bc(Atm & + + ,is_input & !<-- + ,ie_input & ! Index limits for scalars + ,js_input & ! at center of south BC region grid cells. + ,je_input & !<-- + + ,is_u & !<-- + ,ie_u & ! Index limits for u component + ,js_u & ! on north edge of BC region grid cells. + ,je_u & !<-- + + ,is_v & !<-- + ,ie_v & ! Index limits for v component + ,js_v & ! on east edge of BC region grid cells. + ,je_v & !<-- + + ,klev_in, klev_out & !<-- data / model levels + ,ak, bk & + + ,ps_reg & !<-- BC values of sfc pressure + ,ud, vd & !<-- BC values of D-grid u and v + ,uc, vc & !<-- BC values of C-grid u and v + + ,BC_t1%south ) !<-- South BC vbls on final integration levels +! + deallocate(ud,vd,uc,vc) +! + endif +! +!----------------------------------------------------------------------- +!*** Transform the D-grid wind components on the east side of +!*** the regional domain then remap them from the input levels +!*** to the integration levels. +!----------------------------------------------------------------------- +! + if(east_bc)then +! + is_u=Atm%regional_bc_bounds%is_east_uvs + ie_u=Atm%regional_bc_bounds%ie_east_uvs + js_u=Atm%regional_bc_bounds%js_east_uvs + je_u=Atm%regional_bc_bounds%je_east_uvs + is_v=Atm%regional_bc_bounds%is_east_uvw + ie_v=Atm%regional_bc_bounds%ie_east_uvw + js_v=Atm%regional_bc_bounds%js_east_uvw + je_v=Atm%regional_bc_bounds%je_east_uvw +! + allocate(ud(is_u:ie_u,js_u:je_u,1:nlev)) ; ud=real_snan + allocate(vd(is_v:ie_v,js_v:je_v,1:nlev)) ; vd=real_snan + allocate(vc(is_u:ie_u,js_u:je_u,1:nlev)) ; vc=real_snan + allocate(uc(is_v:ie_v,js_v:je_v,1:nlev)) ; uc=real_snan +! + do k=1,nlev + do j=js_u,je_u + do i=is_u,ie_u + p1(:) = grid_reg(i, j,1:2) + p2(:) = grid_reg(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + ud(i,j,k) = u_s_input(i,j,k)*inner_prod(e1,ex)+v_s_input(i,j,k)*inner_prod(e1,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e2) !C-grid V-wind unit vector + vc(i,j,k) = u_s_input(i,j,k)*inner_prod(e2,ex)+v_s_input(i,j,k)*inner_prod(e2,ey) + enddo + enddo +! +! + do j=js_v,je_v + do i=is_v,ie_v + p1(:) = grid_reg(i,j ,1:2) + p2(:) = grid_reg(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + vd(i,j,k) = u_w_input(i,j,k)*inner_prod(e2,ex)+v_w_input(i,j,k)*inner_prod(e2,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e1) !C-grid U-wind unit vector + uc(i,j,k) = u_w_input(i,j,k)*inner_prod(e1,ex)+v_w_input(i,j,k)*inner_prod(e1,ey) + enddo + enddo + enddo +! + call remap_dwinds_regional_bc(Atm & + + ,is_input & !<-- + ,ie_input & ! Index limits for scalars + ,js_input & ! at center of east BC region grid cells. + ,je_input & !<-- + + ,is_u & !<-- + ,ie_u & ! Index limits for u component + ,js_u & ! on north edge of BC region grid cells. + ,je_u & !<-- + + ,is_v & !<-- + ,ie_v & ! Index limits for v component + ,js_v & ! on east edge of BC region grid cells. + ,je_v & !<-- + + ,klev_in, klev_out & !<-- data / model levels + ,ak, bk & + + ,ps_reg & !<-- BC values of sfc pressure + ,ud, vd & !<-- BC values of D-grid u and v + ,uc, vc & !<-- BC values of C-grid u and v + + ,BC_t1%east ) !<-- East BC vbls on final integration levels +! + deallocate(ud,vd,uc,vc) +! + endif +! +!----------------------------------------------------------------------- +!*** Transform the D-grid wind components on the west side of +!*** the regional domain then remap them from the input levels +!*** to the integration levels. +!----------------------------------------------------------------------- +! + if(west_bc)then +! + is_u=Atm%regional_bc_bounds%is_west_uvs + ie_u=Atm%regional_bc_bounds%ie_west_uvs + js_u=Atm%regional_bc_bounds%js_west_uvs + je_u=Atm%regional_bc_bounds%je_west_uvs + is_v=Atm%regional_bc_bounds%is_west_uvw + ie_v=Atm%regional_bc_bounds%ie_west_uvw + js_v=Atm%regional_bc_bounds%js_west_uvw + je_v=Atm%regional_bc_bounds%je_west_uvw +! + allocate(ud(is_u:ie_u,js_u:je_u,1:nlev)) ; ud=real_snan + allocate(vd(is_v:ie_v,js_v:je_v,1:nlev)) ; vd=real_snan + allocate(vc(is_u:ie_u,js_u:je_u,1:nlev)) ; vc=real_snan + allocate(uc(is_v:ie_v,js_v:je_v,1:nlev)) ; uc=real_snan +! + do k=1,nlev + do j=js_u,je_u + do i=is_u,ie_u + p1(:) = grid_reg(i, j,1:2) + p2(:) = grid_reg(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + ud(i,j,k) = u_s_input(i,j,k)*inner_prod(e1,ex)+v_s_input(i,j,k)*inner_prod(e1,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e2) !C-grid V-wind unit vector + vc(i,j,k) = u_s_input(i,j,k)*inner_prod(e2,ex)+v_s_input(i,j,k)*inner_prod(e2,ey) + enddo + enddo +! + do j=js_v,je_v + do i=is_v,ie_v + p1(:) = grid_reg(i,j ,1:2) + p2(:) = grid_reg(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + vd(i,j,k) = u_w_input(i,j,k)*inner_prod(e2,ex)+v_w_input(i,j,k)*inner_prod(e2,ey) + p4(:) = agrid_reg(i,j,1:2) ! cell centroid + call get_unit_vect2(p3, p4, e1) !C-grid U-wind unit vector + uc(i,j,k) = u_w_input(i,j,k)*inner_prod(e1,ex)+v_w_input(i,j,k)*inner_prod(e1,ey) + enddo + enddo + enddo +! + call remap_dwinds_regional_bc(Atm & + + ,is_input & !<-- + ,ie_input & ! Index limits for scalars + ,js_input & ! at center of west BC region grid cells. + ,je_input & !<-- + + ,is_u & !<-- + ,ie_u & ! Index limits for u component + ,js_u & ! on north edge of BC region grid cells. + ,je_u & !<-- + + ,is_v & !<-- + ,ie_v & ! Index limits for v component + ,js_v & ! on east edge of BC region grid cells. + ,je_v & !<-- + + ,klev_in, klev_out & !<-- data / model levels + ,ak, bk & + + ,ps_reg & !<-- BC values of sfc pressure + ,ud, vd & !<-- BC values of D-grid u and v + ,uc, vc & !<-- BC values of C-grid u and v + + ,BC_t1%west ) !<-- West BC vbls on final integration levels +! + deallocate(ud,vd,uc,vc) +! + endif +! +!----------------------------------------------------------------------- +!*** Close the boundary file. +!----------------------------------------------------------------------- +! + call check(nf90_close(ncid)) +! write(0,*)' closed BC netcdf file' +! +!----------------------------------------------------------------------- +!*** Deallocate working arrays. +!----------------------------------------------------------------------- +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + if(allocated(ps_input))then + deallocate(ps_input) + endif + if(allocated(zh_input))then + deallocate(zh_input) + endif + if(allocated(w_input))then + deallocate(w_input) + endif + if(allocated(tracers_input))then + deallocate(tracers_input) + endif + if(allocated(u_s_input))then + deallocate(u_s_input) + endif + if(allocated(u_w_input))then + deallocate(u_w_input) + endif + if(allocated(v_s_input))then + deallocate(v_s_input) + endif + if(allocated(v_w_input))then + deallocate(v_w_input) + endif +! +!----------------------------------------------------------------------- +!*** Fill the remaining boundary arrays starting with the divergence. +!----------------------------------------------------------------------- +! + call fill_divgd_BC +! +!----------------------------------------------------------------------- +!*** Fill the total condensate in the regional boundary array. +!----------------------------------------------------------------------- +! + call fill_q_con_BC +! +!----------------------------------------------------------------------- +!*** Fill moist kappa in the regional domain boundary array. +!----------------------------------------------------------------------- +! +#ifdef MOIST_CAPPA + call fill_cappa_BC +#endif +! +!----------------------------------------------------------------------- +!*** Convert the boundary region sensible temperature array to +!*** FV3's modified virtual potential temperature. +!----------------------------------------------------------------------- +! + call convert_to_virt_pot_temp(isd,ied,jsd,jed,npz & + ,sphum_index,liq_water_index ) +! +!----------------------------------------------------------------------- +!*** If nudging of the specific humidity has been selected then +!*** nudge the boundary values in the same way as is done for the +!*** interior. +!----------------------------------------------------------------------- +! + if(Atm%flagstruct%nudge_qv)then + call nudge_qv_bc(Atm,isd,ied,jsd,jed) + endif +! +!----------------------------------------------------------------------- + + contains + +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine fill_divgd_BC +! +!----------------------------------------------------------------------- +!*** For now fill the boundary divergence with zero. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!-------------------- +!*** Local variables +!-------------------- +! + integer :: i,ie,is,j,je,js,k +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + if(north_bc)then +! + is_north=lbound(BC_t1%north%divgd_BC,1) + ie_north=ubound(BC_t1%north%divgd_BC,1) + js_north=lbound(BC_t1%north%divgd_BC,2) + je_north=ubound(BC_t1%north%divgd_BC,2) +! + do k=1,klev_out + do j=js_north,je_north + do i=is_north,ie_north + BC_t1%north%divgd_BC(i,j,k)=0. + enddo + enddo + enddo +! + endif + + if(south_bc)then +! + is_south=lbound(BC_t1%south%divgd_BC,1) + ie_south=ubound(BC_t1%south%divgd_BC,1) + js_south=lbound(BC_t1%south%divgd_BC,2) + je_south=ubound(BC_t1%south%divgd_BC,2) +! + do k=1,klev_out + do j=js_south,je_south + do i=is_south,ie_south + BC_t1%south%divgd_BC(i,j,k)=0. + enddo + enddo + enddo + endif +! + if(east_bc)then +! + is_east=lbound(BC_t1%east%divgd_BC,1) + ie_east=ubound(BC_t1%east%divgd_BC,1) + js_east=lbound(BC_t1%east%divgd_BC,2) + je_east=ubound(BC_t1%east%divgd_BC,2) +! + do k=1,klev_out + do j=js_east,je_east + do i=is_east,ie_east + BC_t1%east%divgd_BC(i,j,k)=0. + enddo + enddo + enddo +! + endif +! + if(west_bc)then +! + is_west=lbound(BC_t1%west%divgd_BC,1) + ie_west=ubound(BC_t1%west%divgd_BC,1) + js_west=lbound(BC_t1%west%divgd_BC,2) + je_west=ubound(BC_t1%west%divgd_BC,2) +! + do k=1,klev_out + do j=js_west,je_west + do i=is_west,ie_west + BC_t1%west%divgd_BC(i,j,k)=0. + enddo + enddo + enddo + endif +! +!----------------------------------------------------------------------- +! + end subroutine fill_divgd_BC +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine fill_q_con_BC +! +!----------------------------------------------------------------------- +!*** For now fill the total condensate in the boundary regiona +!*** with only the liquid water content. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!-------------------- +!*** Local variables +!-------------------- +! + integer :: i,ie,is,j,je,js,k +! +#ifdef USE_COND +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + if(north_bc)then +! + is_north=lbound(BC_t1%north%q_con_BC,1) + ie_north=ubound(BC_t1%north%q_con_BC,1) + js_north=lbound(BC_t1%north%q_con_BC,2) + je_north=ubound(BC_t1%north%q_con_BC,2) +! + do k=1,klev_out + do j=js_north,je_north + do i=is_north,ie_north + BC_t1%north%q_con_BC(i,j,k)=BC_t1%north%q_BC(i,j,k,liq_water_index) + enddo + enddo + enddo +! + endif + + if(south_bc)then +! + is_south=lbound(BC_t1%south%q_con_BC,1) + ie_south=ubound(BC_t1%south%q_con_BC,1) + js_south=lbound(BC_t1%south%q_con_BC,2) + je_south=ubound(BC_t1%south%q_con_BC,2) +! + do k=1,klev_out + do j=js_south,je_south + do i=is_south,ie_south + BC_t1%south%q_con_BC(i,j,k)=BC_t1%south%q_BC(i,j,k,liq_water_index) + enddo + enddo + enddo + endif +! + if(east_bc)then +! + is_east=lbound(BC_t1%east%q_con_BC,1) + ie_east=ubound(BC_t1%east%q_con_BC,1) + js_east=lbound(BC_t1%east%q_con_BC,2) + je_east=ubound(BC_t1%east%q_con_BC,2) +! + do k=1,klev_out + do j=js_east,je_east + do i=is_east,ie_east + BC_t1%east%q_con_BC(i,j,k)=BC_t1%east%q_BC(i,j,k,liq_water_index) + enddo + enddo + enddo +! + endif + + if(west_bc)then +! + is_west=lbound(BC_t1%west%q_con_BC,1) + ie_west=ubound(BC_t1%west%q_con_BC,1) + js_west=lbound(BC_t1%west%q_con_BC,2) + je_west=ubound(BC_t1%west%q_con_BC,2) +! + do k=1,klev_out + do j=js_west,je_west + do i=is_west,ie_west + BC_t1%west%q_con_BC(i,j,k)=BC_t1%west%q_BC(i,j,k,liq_water_index) + enddo + enddo + enddo + endif +! +!----------------------------------------------------------------------- +! +#endif USE_COND + end subroutine fill_q_con_BC +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine fill_cappa_BC +! +!----------------------------------------------------------------------- +!*** Compute cappa in the regional domain boundary area following +!*** Zhao-Carr microphysics. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i1,i2,j1,j2 +! + real,dimension(:,:,:),pointer :: cappa,temp,liq_wat,sphum +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +#ifdef MOIST_CAPPA + if(north_bc)then + i1=lbound(BC_t1%north%cappa_BC,1) + i2=ubound(BC_t1%north%cappa_BC,1) + j1=lbound(BC_t1%north%cappa_BC,2) + j2=ubound(BC_t1%north%cappa_BC,2) + cappa =>BC_t1%north%cappa_BC + temp =>BC_t1%north%pt_BC + liq_wat=>BC_t1%north%q_BC(:,:,:,liq_water_index) + sphum =>BC_t1%north%q_BC(:,:,:,sphum_index) + call compute_cappa(i1,i2,j1,j2,cappa,temp,liq_wat,sphum) + endif +! + if(south_BC)then + i1=lbound(BC_t1%south%cappa_BC,1) + i2=ubound(BC_t1%south%cappa_BC,1) + j1=lbound(BC_t1%south%cappa_BC,2) + j2=ubound(BC_t1%south%cappa_BC,2) + cappa =>BC_t1%south%cappa_BC + temp =>BC_t1%south%pt_BC + liq_wat=>BC_t1%south%q_BC(:,:,:,liq_water_index) + sphum =>BC_t1%south%q_BC(:,:,:,sphum_index) + call compute_cappa(i1,i2,j1,j2,cappa,temp,liq_wat,sphum) + endif +! + if(east_bc)then + i1=lbound(BC_t1%east%cappa_BC,1) + i2=ubound(BC_t1%east%cappa_BC,1) + j1=lbound(BC_t1%east%cappa_BC,2) + j2=ubound(BC_t1%east%cappa_BC,2) + cappa =>BC_t1%east%cappa_BC + temp =>BC_t1%east%pt_BC + liq_wat=>BC_t1%east%q_BC(:,:,:,liq_water_index) + sphum =>BC_t1%east%q_BC(:,:,:,sphum_index) + call compute_cappa(i1,i2,j1,j2,cappa,temp,liq_wat,sphum) + endif +! + if(west_bc)then + i1=lbound(BC_t1%west%cappa_BC,1) + i2=ubound(BC_t1%west%cappa_BC,1) + j1=lbound(BC_t1%west%cappa_BC,2) + j2=ubound(BC_t1%west%cappa_BC,2) + cappa =>BC_t1%west%cappa_BC + temp =>BC_t1%west%pt_BC + liq_wat=>BC_t1%west%q_BC(:,:,:,liq_water_index) + sphum =>BC_t1%west%q_BC(:,:,:,sphum_index) + call compute_cappa(i1,i2,j1,j2,cappa,temp,liq_wat,sphum) + endif +! +!----------------------------------------------------------------------- +! +#endif MOIST_CAPPA + end subroutine fill_cappa_BC +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + subroutine compute_cappa(i1,i2,j1,j2,cappa,temp,liq_wat,sphum) +! +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!--------------------- +!*** Input variables +!--------------------- +! + integer,intent(in) :: i1,i2,j1,j2 +! + real,dimension(i1:i2,j1:j2,1:npz) :: cappa,temp,liq_wat,sphum +! +!---------------------- +!*** Output variables +!---------------------- +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,ie,is,j,je,js,k +! + real :: cvm,qd,ql,qs,qv +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + is=lbound(cappa,1) + ie=ubound(cappa,1) + js=lbound(cappa,2) + je=ubound(cappa,2) +! + do k=1,klev_out + do j=js,je + do i=is,ie + qd=max(0.,liq_wat(i,j,k)) + if( temp(i,j,k) > tice )then + qs=0. + elseif( temp(i,j,k) < tice-t_i0 )then + qs=qd + else + qs=qd*(tice-temp(i,j,k))/t_i0 + endif + ql=qd-qs + qv=max(0.,sphum(i,j,k)) + cvm=(1.-(qv+qd))*cv_air + qv*cv_vap + ql*c_liq + qs*c_ice + ! + cappa(i,j,k)=rdgas/(rdgas+cvm/(1.+zvir*sphum(i,j,k))) +! + enddo + enddo + enddo +! +!----------------------------------------------------------------------- +! + end subroutine compute_cappa +! +!----------------------------------------------------------------------- +! + end subroutine regional_bc_data + +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- + subroutine read_regional_bc_file(is_input,ie_input & + ,js_input,je_input & + ,nlev & + ,ntracers & + ,var_name_root & + ,array_3d & + ,array_4d & + ,tlev ) +!----------------------------------------------------------------------- +!*** Read the boundary data from the external file generated by +!*** chgres. +!----------------------------------------------------------------------- + use netcdf +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! +!---------- +!*** Input +!---------- +! + integer,intent(in) :: is_input,ie_input,js_input,je_input,nlev + integer,intent(in) :: ntracers +! + integer,intent(in),optional :: tlev !<-- Position of current tracer among all of them +! + character(len= 7),intent(in) :: var_name_root !<-- Root of variable name in the boundary file +! +!------------ +!*** Output +!------------ +! + real,dimension(is_input:ie_input,js_input:je_input,1:nlev),intent(out),optional :: array_3d !<-- The input 3-D variable's coverage of task subdomain +! + real,dimension(is_input:ie_input,js_input:je_input,1:nlev,1:ntracers),intent(out),optional :: array_4d !<-- The input 4-D variable's coverage of subdomain +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: halo,lat,lev,lon +! + integer :: i_count,i_start_array,i_start_data,i_end_array & + ,j_count,j_start_array,j_start_data,j_end_array +! + integer :: dim_id,nctype,ndims,var_id +! + character(len=5) :: dim_name_x & !<-- Dimension names in + ,dim_name_y ! the BC file +! + character(len=20) :: var_name !<-- Variable name in the boundary NetCDF file +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** Set the dimension information for the given side of the domain. +!----------------------------------------------------------------------- +! +!----------------------------------------------------------------------- +!*** First consider the north and south sides of the regional domain. +!*** Take care of the dimensions' names, IDs, and lengths. +!----------------------------------------------------------------------- +! + if(north_bc)then +! + dim_name_x='lon' + if(var_name_root=='u_w'.or.var_name_root=='v_w')then + dim_name_x='lonp' !<-- Wind components on west/east sides of cells + endif +! + call check(nf90_inq_dimid(ncid,dim_name_x,dim_id)) !<-- Obtain the dimension ID of the x coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=lon)) !<-- # of points in the x dimension (lon) +! + dim_name_y='halo' + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + dim_name_y='halop' !<-- Wind components on south/north sides of cells + endif +! + call check(nf90_inq_dimid(ncid,dim_name_y,dim_id)) !<-- Obtain the dimension ID of the y coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=halo)) !<-- # of points in the y dimension (halo) +! +!----------------------------------------------------------------------- +!*** Construct the variable's name in the NetCDF file and set +!*** the start locations and point counts for the data file and +!*** for the BC arrays being filled. The input array begins +!*** receiving data at (i_start_array,j_start_array), etc. +!*** The read of the data for the given input array begins at +!*** (i_start_data,j_start_data) and encompasses i_count by +!*** j_count datapoints in each direction. +!----------------------------------------------------------------------- +! + var_name=trim(var_name_root)//"_bottom" +! + i_start_array=is_input + i_end_array =ie_input + j_start_array=js_input + if(trim(var_name_root)=='u_s'.or.trim(var_name_root)=='v_s')then + j_end_array=js_input+nhalo_data + else + j_end_array =js_input+nhalo_data-1 + endif +! + i_start_data=i_start_array+nhalo_data !<-- File data begins at 1. + i_count=i_end_array-i_start_array+1 + j_start_data=1 + j_count=j_end_array-j_start_array+1 +! +!----------------------------------------------------------------------- +!*** Fill this task's subset of north boundary data for +!*** this 3-D or 4-D variable. +!----------------------------------------------------------------------- +! + call check(nf90_inq_varid(ncid,var_name,var_id)) !<-- Get this variable's ID. +! + if(present(array_4d))then !<-- 4-D variable + call check(nf90_get_var(ncid,var_id & + ,array_4d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev, tlev) & + ,start=(/i_start_data,j_start_data,1,tlev/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev,1/))) !<-- Extent of data to read in each dimension. +! + else !<-- 3-D variable + call check(nf90_get_var(ncid,var_id & + ,array_3d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev) & + ,start=(/i_start_data,j_start_data,1/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev/))) !<-- Extent of data to read in each dimension. + endif +! + endif ! north_bc +! + if(south_bc)then +! + dim_name_x='lon' + if(var_name_root=='u_w'.or.var_name_root=='v_w')then + dim_name_x='lonp' !<-- Wind components on west/east sides of cells + endif +! + call check(nf90_inq_dimid(ncid,dim_name_x,dim_id)) !<-- Obtain the dimension ID of the x coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=lon)) !<-- # of points in the x dimension (lon) +! + dim_name_y='halo' + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + dim_name_y='halop' !<-- Wind components on south/north sides of cells + endif +! + call check(nf90_inq_dimid(ncid,dim_name_y,dim_id)) !<-- Obtain the dimension ID of the y coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=halo)) !<-- # of points in the y dimension (halo) +! +!----------------------------------------------------------------------- +!*** Construct the variable's name in the NetCDF file and set +!*** the start locations and point counts for the data file and +!*** for the BC arrays being filled. The input array begins +!*** receiving data at (i_start_array,j_start_array), etc. +!*** The read of the data for the given input array begins at +!*** (i_start_data,j_start_data) and encompasses i_count by +!*** j_count datapoints in each direction. +!----------------------------------------------------------------------- +! + var_name=trim(var_name_root)//"_top" +! + i_start_array=is_input + i_end_array =ie_input + j_start_array=je_input-nhalo_data+1 + j_end_array =je_input +! + i_start_data=i_start_array+nhalo_data !<-- File data begins at 1. + i_count=i_end_array-i_start_array+1 + j_start_data=1 + j_count=j_end_array-j_start_array+1 +! +!----------------------------------------------------------------------- +!*** Fill this task's subset of south boundary data for +!*** this 3-D or 4-D variable. +!----------------------------------------------------------------------- +! + call check(nf90_inq_varid(ncid,var_name,var_id)) !<-- Get this variable's ID. +! + if(present(array_4d))then !<-- 4-D variable + call check(nf90_get_var(ncid,var_id & + ,array_4d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev, tlev) & + ,start=(/i_start_data,j_start_data,1,tlev/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev,1/))) !<-- Extent of data to read in each dimension. +! + else !<-- 3-D variable + call check(nf90_get_var(ncid,var_id & + ,array_3d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev) & + ,start=(/i_start_data,j_start_data,1/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev/))) !<-- Extent of data to read in each dimension. + endif +! + endif ! south_bc +! +!----------------------------------------------------------------------- +!*** Now consider the east and west sides of the regional domain. +!*** Take care of the dimensions' names, IDs, and lengths. +!----------------------------------------------------------------------- +! + if(east_bc)then +! + dim_name_x='halo' + if(var_name_root=='u_w'.or.var_name_root=='v_w')then + dim_name_x='halop' !<-- Wind components on west/east sides of cells + endif +! + call check(nf90_inq_dimid(ncid,dim_name_x,dim_id)) !<-- Obtain the dimension ID of the x coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=halo)) !<-- # of points in the x dimension (halo) +! + dim_name_y='lat' + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + dim_name_y='latm' !<-- Wind components on south/north sides of cells +! +!----------------------------------------------------------------------- +!*** Note that latm=lat-1. The reason the y extent of u_s and v_s +!*** is 1 less than the regular y extent of the west/east sides is +!*** that the north/south pieces of data for those variables already +!*** includes the values on both the south and north ends of the +!*** west and east sides which reduces the total number of values +!*** of u_s and v_s by 1. +!----------------------------------------------------------------------- +! + endif +! + call check(nf90_inq_dimid(ncid,dim_name_y,dim_id)) !<-- Obtain the dimension ID of the y coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=lat)) !<-- # of points in the y dimension (lat) +! +!----------------------------------------------------------------------- +!*** Construct the variable's name in the NetCDF file and set +!*** the start locations and point counts in the data file and +!*** in the BC arrays being filled. +!----------------------------------------------------------------------- +! + j_start_array=js_input + j_end_array =je_input +! + var_name=trim(var_name_root)//"_left" +! + i_start_array=is_input +! + if(var_name_root=='u_w'.or.var_name_root=='v_w')then + i_end_array=is_input+nhalo_data + else + i_end_array=is_input+nhalo_data-1 + endif +! + if(north_bc)then + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + j_start_array=js_input+nhalo_data+1 + else + j_start_array=js_input+nhalo_data + endif + endif + if(south_bc)then + j_end_array =je_input-nhalo_data + endif +! + i_start_data=1 + i_count=i_end_array-i_start_array+1 + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + j_start_data=j_start_array-1 + else + j_start_data=j_start_array + endif + j_count=j_end_array-j_start_array+1 +! +!----------------------------------------------------------------------- +!*** Fill this task's subset of east boundary data. +!----------------------------------------------------------------------- +! + call check(nf90_inq_varid(ncid,var_name,var_id)) !<-- Get this variable's ID. +! + if(present(array_4d))then !<-- 4-D variable + call check(nf90_get_var(ncid,var_id & + ,array_4d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev, tlev) & + ,start=(/i_start_data,j_start_data,1,tlev/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev,1/))) !<-- Extent of data to read in each dimension. +! + else !<-- 3-D variable + call check(nf90_get_var(ncid,var_id & + ,array_3d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev) & + ,start=(/i_start_data,j_start_data/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev/))) !<-- Extent of data to read in each dimension. + endif +! + endif ! east_bc +! + if(west_bc)then +! + dim_name_x='halo' + if(var_name_root=='u_w'.or.var_name_root=='v_w')then + dim_name_x='halop' !<-- Wind components on west/east sides of cells + endif +! + call check(nf90_inq_dimid(ncid,dim_name_x,dim_id)) !<-- Obtain the dimension ID of the x coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=halo)) !<-- # of points in the x dimension (halo) +! + dim_name_y='lat' + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + dim_name_y='latm' !<-- Wind components on south/north sides of cells +! +!----------------------------------------------------------------------- +!*** Note that latm=lat-1. The reason the y extent of u_s and v_s +!*** is 1 less than the regular y extent of the west/east sides is +!*** that the north/south pieces of data for those variables already +!*** includes the values on both the south and north ends of the +!*** west and east sides which reduces the total number of values +!*** of u_s and v_s by 1. +!----------------------------------------------------------------------- +! + endif +! + call check(nf90_inq_dimid(ncid,dim_name_y,dim_id)) !<-- Obtain the dimension ID of the y coordinate. + call check(nf90_inquire_dimension(ncid,dim_id,len=lat)) !<-- # of points in the y dimension (lat) +! +!----------------------------------------------------------------------- +!*** Construct the variable's name in the NetCDF file and set +!*** the start locations and point counts in the data file and +!*** in the BC arrays being filled. +!----------------------------------------------------------------------- +! + j_start_array=js_input + j_end_array =je_input +! + var_name=trim(var_name_root)//"_right" +! + i_start_array=ie_input-nhalo_data+1 + i_end_array=ie_input +! + if(north_bc)then + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + j_start_array=js_input+nhalo_data+1 + else + j_start_array=js_input+nhalo_data + endif + endif + if(south_bc)then + j_end_array =je_input-nhalo_data + endif +! + i_start_data=1 + i_count=i_end_array-i_start_array+1 + if(var_name_root=='u_s'.or.var_name_root=='v_s')then + j_start_data=j_start_array-1 + else + j_start_data=j_start_array + endif + j_count=j_end_array-j_start_array+1 +! +!----------------------------------------------------------------------- +!*** Fill this task's subset of east or west boundary data. +!----------------------------------------------------------------------- +! + call check(nf90_inq_varid(ncid,var_name,var_id)) !<-- Get this variable's ID. +! + if(present(array_4d))then !<-- 4-D variable + call check(nf90_get_var(ncid,var_id & + ,array_4d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev, tlev) & + ,start=(/i_start_data,j_start_data,1,tlev/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev,1/))) !<-- Extent of data to read in each dimension. +! + else !<-- 3-D variable + call check(nf90_get_var(ncid,var_id & + ,array_3d(i_start_array:i_end_array & !<-- Fill this task's domain boundary halo. + ,j_start_array:j_end_array & + ,1:nlev) & + ,start=(/i_start_data,j_start_data/) & !<-- Start reading the data array here. + ,count=(/i_count,j_count,nlev/))) !<-- Extent of data to read in each dimension. + endif +! + endif ! west_bc +! +!----------------------------------------------------------------------- +! + end subroutine read_regional_bc_file +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine check(status) + use netcdf + integer,intent(in) :: status +! + if(status /= nf90_noerr) then + write(0,*)' check netcdf status=',status + call mpp_error(FATAL, ' NetCDF error ' // trim(nf90_strerror(status))) + endif + end subroutine check +! +!----------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!----------------------------------------------------------------------- +! + subroutine allocate_regional_BC_arrays(side & + ,north_bc,south_bc & + ,east_bc,west_bc & + ,is_0,ie_0,js_0,je_0 & + ,is_sn,ie_sn,js_sn,je_sn & + ,is_we,ie_we,js_we,je_we & + ,klev & + ,ntracers & + ,BC_side ) +! +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + integer,intent(in) :: klev,ntracers +! + integer,intent(in) :: is_0,ie_0,js_0,je_0 !<-- Start/end BC indices for cell centers + integer,intent(in) :: is_sn,ie_sn,js_sn,je_sn !<-- Start/end BC indices for south/north cell edges + integer,intent(in) :: is_we,ie_we,js_we,je_we !<-- Start/end BC indices for west/east cell edges +! + character(len=5),intent(in) :: side !<-- Which side are we allocating? +! + logical,intent(in) :: north_bc,south_bc,east_bc,west_bc !<-- Which sides is this task on? +! + type(fv_regional_BC_variables),intent(out) :: BC_side +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! + if(allocated(BC_side%delp_BC))then + return !<-- The BC arrays are already allocated so exit. + endif +! + allocate(BC_side%delp_BC (is_0:ie_0,js_0:je_0,klev)) ; BC_side%delp_BC=real_snan +! + allocate(BC_side%q_BC (is_0:ie_0,js_0:je_0,1:klev,1:ntracers)) ; BC_side%q_BC=real_snan +! +#ifndef SW_DYNAMICS + allocate(BC_side%pt_BC (is_0:ie_0,js_0:je_0,klev)) ; BC_side%pt_BC=real_snan + allocate(BC_side%w_BC (is_0:ie_0,js_0:je_0,klev)) ; BC_side%w_BC=real_snan + allocate(BC_side%delz_BC (is_0:ie_0,js_0:je_0,klev)) ; BC_side%delz_BC=real_snan +#ifdef USE_COND + allocate(BC_side%q_con_BC(is_0:ie_0,js_0:je_0,klev)) ; BC_side%q_con_BC=real_snan +#ifdef MOIST_CAPPA + allocate(BC_side%cappa_BC(is_0:ie_0,js_0:je_0,klev)) ; BC_side%cappa_BC=real_snan +#endif +#endif +#endif +! +!-------------------- +!*** Wind components +!-------------------- +! +!** D-grid u, C-grid v +! + allocate(BC_side%u_BC (is_sn:ie_sn, js_sn:je_sn, klev)) ; BC_side%u_BC=real_snan + allocate(BC_side%vc_BC(is_sn:ie_sn, js_sn:je_sn, klev)) ; BC_side%vc_BC=real_snan +! +!** C-grid u, D-grid v +! + allocate(BC_side%uc_BC(is_we:ie_we, js_we:je_we, klev)) ; BC_side%uc_BC=real_snan + allocate(BC_side%v_BC (is_we:ie_we, js_we:je_we, klev)) ; BC_side%v_BC=real_snan + allocate(BC_side%divgd_BC(is_we:ie_we,js_sn:je_sn,klev)) ; BC_side%divgd_BC=real_snan +! +!--------------------------------------------------------------------- +! + end subroutine allocate_regional_BC_arrays +! +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + +subroutine remap_scalar_nggps_regional_bc(Atm & + ,side & + ,isd,ied,jsd,jed & + ,is_bc,ie_bc,js_bc,je_bc & + ,km, npz, ncnst, ak0, bk0 & + ,psc, qa, omga, zh & + ,phis_reg & + ,ps & + ,BC_side ) + + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in):: isd,ied,jsd,jed !<-- index limits of the Atm arrays w/halo=nhalo_model + integer, intent(in):: is_bc,ie_bc,js_bc,je_bc !<-- index limits of working arrays on boundary task subdomains (halo=nhalo_data) + integer, intent(in):: km & !<-- # of levels in 3-D input variables + ,npz & !<-- # of levels in final 3-D integration variables + ,ncnst !<-- # of tracer variables + real, intent(in):: ak0(km+1), bk0(km+1) + real, intent(in), dimension(is_bc:ie_bc,js_bc:je_bc):: psc + real, intent(in), dimension(is_bc:ie_bc,js_bc:je_bc,km):: omga + real, intent(in), dimension(is_bc:ie_bc,js_bc:je_bc,km,ncnst):: qa + real, intent(in), dimension(is_bc:ie_bc,js_bc:je_bc,km+1):: zh +!xreal, intent(in), dimension(isd-1:ied+1,jsd-1:jed+1):: phis_reg !<-- Filtered sfc geopotential from preprocessing. + real, intent(inout), dimension(isd-1:ied+1,jsd-1:jed+1):: phis_reg !<-- Filtered sfc geopotential from preprocessing. + real, intent(out),dimension(is_bc:ie_bc,js_bc:je_bc) :: ps !<-- sfc p in regional domain boundary region + character(len=5),intent(in) :: side + type(fv_regional_BC_variables),intent(inout) :: BC_side !<-- The BC variables on a domain side at the final integration levels. + +! local: +! + real, dimension(:,:),allocatable :: pe0 + real, dimension(:,:),allocatable :: qn1 + real, dimension(:,:),allocatable :: dp2 + real, dimension(:,:),allocatable :: pe1 + real, dimension(:,:),allocatable :: qp +! + real wk(is_bc:ie_bc,js_bc:je_bc) + real, dimension(is_bc:ie_bc,js_bc:je_bc):: phis + +!!! High-precision + real(kind=R_GRID), dimension(is_bc:ie_bc,npz+1):: pn1 + real(kind=R_GRID):: gz_fv(npz+1) + real(kind=R_GRID), dimension(2*km+1):: gz, pn + real(kind=R_GRID), dimension(is_bc:ie_bc,km+1):: pn0 + real(kind=R_GRID):: pst +!!! High-precision + integer i,ie,is,je,js,k,l,m, k2,iq + integer sphum, o3mr, liq_wat, ice_wat, rainwat, snowwat, graupel, cld_amt +! +!--------------------------------------------------------------------------------- +! + sphum = get_tracer_index(MODEL_ATMOS, 'sphum') + liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') + ice_wat = get_tracer_index(MODEL_ATMOS, 'ice_wat') + rainwat = get_tracer_index(MODEL_ATMOS, 'rainwat') + snowwat = get_tracer_index(MODEL_ATMOS, 'snowwat') + graupel = get_tracer_index(MODEL_ATMOS, 'graupel') + cld_amt = get_tracer_index(MODEL_ATMOS, 'cld_amt') + o3mr = get_tracer_index(MODEL_ATMOS, 'o3mr') + + k2 = max(10, km/2) + + if (mpp_pe()==1) then + print *, 'sphum = ', sphum + print *, 'clwmr = ', liq_wat + print *, ' o3mr = ', o3mr + print *, 'ncnst = ', ncnst + endif + + if ( sphum/=1 ) then + call mpp_error(FATAL,'SPHUM must be 1st tracer') + endif +! +!--------------------------------------------------------------------------------- +!*** First compute over the extended boundary regions with halo=nhalo_data. +!*** This is needed to obtain pressures that will surround the wind points. +!--------------------------------------------------------------------------------- +! + is=is_bc + if(side=='west')then + is=ie_bc-nhalo_data+1 + endif +! + ie=ie_bc + if(side=='east')then + ie=is_bc+nhalo_data-1 + endif +! + js=js_bc + if(side=='south')then + js=je_bc-nhalo_data+1 + endif +! + je=je_bc + if(side=='north')then + je=js_bc+nhalo_data-1 + endif +! + + allocate(pe0(is:ie,km+1)) ; pe0=real_snan + allocate(qn1(is:ie,npz)) ; qn1=real_snan + allocate(dp2(is:ie,npz)) ; dp2=real_snan + allocate(pe1(is:ie,npz+1)) ; pe1=real_snan + allocate(qp (is:ie,km)) ; qp=real_snan +! +!--------------------------------------------------------------------------------- + jloop1: do j=js,je +!--------------------------------------------------------------------------------- +! + do k=1,km+1 + do i=is,ie + pe0(i,k) = ak0(k) + bk0(k)*psc(i,j) + pn0(i,k) = log(pe0(i,k)) + enddo + enddo + + do i=is,ie + do k=1,km+1 + pn(k) = pn0(i,k) + gz(k) = zh(i,j,k)*grav + enddo +! Use log-p for interpolation/extrapolation +! mirror image method: + do k=km+2, km+k2 + l = 2*(km+1) - k + gz(k) = 2.*gz(km+1) - gz(l) + pn(k) = 2.*pn(km+1) - pn(l) + enddo + + do k=km+k2-1, 2, -1 + if( phis_reg(i,j).le.gz(k) .and. phis_reg(i,j).ge.gz(k+1) ) then + pst = pn(k) + (pn(k+1)-pn(k))*(gz(k)-phis_reg(i,j))/(gz(k)-gz(k+1)) + go to 123 + endif + enddo + 123 ps(i,j) = exp(pst) + + enddo ! i-loop + +!--------------------------------------------------------------------------------- + enddo jloop1 +!--------------------------------------------------------------------------------- + +!--------------------------------------------------------------------------------- +!*** Transfer values from the expanded boundary array for sfc pressure into +!*** the Atm object. +!--------------------------------------------------------------------------------- +! + is=lbound(Atm%ps,1) + ie=ubound(Atm%ps,1) + js=lbound(Atm%ps,2) + je=ubound(Atm%ps,2) +! + do j=js,je + do i=is,ie + Atm%ps(i,j)=ps(i,j) + enddo + enddo +! +!--------------------------------------------------------------------------------- +!*** Now compute over the normal boundary regions with halo=nhalo_model. +!*** Use the dimensions of one of the permanent BC variables in Atm +!*** as the loop limits so any side of the domain can be addressed. +!--------------------------------------------------------------------------------- +! + is=lbound(BC_side%delp_BC,1) + ie=ubound(BC_side%delp_BC,1) + js=lbound(BC_side%delp_BC,2) + je=ubound(BC_side%delp_BC,2) +! +!--------------------------------------------------------------------------------- + jloop2: do j=js,je +!--------------------------------------------------------------------------------- + do k=1,km+1 + do i=is,ie + pe0(i,k) = ak0(k) + bk0(k)*psc(i,j) + pn0(i,k) = log(pe0(i,k)) + enddo + enddo +! + do i=is,ie + pe1(i,1) = Atm%ak(1) + pn1(i,1) = log(pe1(i,1)) + enddo + do k=2,npz+1 + do i=is,ie + pe1(i,k) = Atm%ak(k) + Atm%bk(k)*ps(i,j) + pn1(i,k) = log(pe1(i,k)) + enddo + enddo + +! * Compute delp + do k=1,npz + do i=is,ie + dp2(i,k) = pe1(i,k+1) - pe1(i,k) + BC_side%delp_BC(i,j,k) = dp2(i,k) + enddo + enddo + +! Need to set unassigned tracers to 0?? +! map shpum, o3mr, liq_wat tracers + do iq=1,ncnst + do k=1,km + do i=is,ie + qp(i,k) = qa(i,j,k,iq) + enddo + enddo + + call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, 0, 8, Atm%ptop) + + if ( iq==sphum ) then + call fillq(ie-is+1, npz, 1, qn1, dp2) + else + call fillz(ie-is+1, npz, 1, qn1, dp2) + endif +! The HiRam step of blending model sphum with NCEP data is obsolete because nggps is always cold starting... + do k=1,npz + do i=is,ie + BC_side%q_BC(i,j,k,iq) = qn1(i,k) + enddo + enddo + enddo + +!--------------------------------------------------- +! Retrieve temperature using GFS geopotential height +!--------------------------------------------------- +! + i_loop: do i=is,ie +! +! Make sure FV3 top is lower than GFS; can not do extrapolation above the top at this point + if ( pn1(i,1) .lt. pn0(i,1) ) then + call mpp_error(FATAL,'FV3 top higher than NCEP/GFS') + endif + + do k=1,km+1 + pn(k) = pn0(i,k) + gz(k) = zh(i,j,k)*grav + enddo +!------------------------------------------------- + do k=km+2, km+k2 + l = 2*(km+1) - k + gz(k) = 2.*gz(km+1) - gz(l) + pn(k) = 2.*pn(km+1) - pn(l) + enddo +!------------------------------------------------- + + gz_fv(npz+1) = phis_reg(i,j) + + m = 1 + + do k=1,npz +! Searching using FV3 log(pe): pn1 +#ifdef USE_ISOTHERMO + do l=m,km + if ( (pn1(i,k).le.pn(l+1)) .and. (pn1(i,k).ge.pn(l)) ) then + gz_fv(k) = gz(l) + (gz(l+1)-gz(l))*(pn1(i,k)-pn(l))/(pn(l+1)-pn(l)) + goto 555 + elseif ( pn1(i,k) .gt. pn(km+1) ) then +! Isothermal under ground; linear in log-p extra-polation + gz_fv(k) = gz(km+1) + (gz_fv(npz+1)-gz(km+1))*(pn1(i,k)-pn(km+1))/(pn1(i,npz+1)-pn(km+1)) + goto 555 + endif + enddo +#else + do l=m,km+k2-1 + if ( (pn1(i,k).le.pn(l+1)) .and. (pn1(i,k).ge.pn(l)) ) then + gz_fv(k) = gz(l) + (gz(l+1)-gz(l))*(pn1(i,k)-pn(l))/(pn(l+1)-pn(l)) + goto 555 + endif + enddo +#endif +555 m = l + enddo + +!xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +!xxx DO WE NEED Atm%peln to have values in the boundary region? +!xxx FOR NOW COMMENT IT OUT. +!xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +!xxx do k=1,npz+1 +!xxx Atm%peln(i,k,j) = pn1(i,k) +!xxx enddo + +! Compute true temperature using hydrostatic balance + do k=1,npz + BC_side%pt_BC(i,j,k) = (gz_fv(k)-gz_fv(k+1))/( rdgas*(pn1(i,k+1)-pn1(i,k))*(1.+zvir*BC_side%q_BC(i,j,k,sphum)) ) + enddo + + if ( .not. Atm%flagstruct%hydrostatic ) then + do k=1,npz + BC_side%delz_BC(i,j,k) = (gz_fv(k+1) - gz_fv(k)) / grav + enddo + endif + + enddo i_loop + +!----------------------------------------------------------------------- +! seperate cloud water and cloud ice +! From Jan-Huey Chen's HiRAM code +!----------------------------------------------------------------------- + + if ( Atm%flagstruct%nwat .eq. 6 ) then + do k=1,npz + do i=is,ie + qn1(i,k) = BC_side%q_BC(i,j,k,liq_wat) + BC_side%q_BC(i,j,k,rainwat) = 0. + BC_side%q_BC(i,j,k,snowwat) = 0. + BC_side%q_BC(i,j,k,graupel) = 0. + if (cld_amt .gt. 0) BC_side%q_BC(i,j,k,cld_amt) = 0. + if ( BC_side%pt_BC(i,j,k) > 273.16 ) then ! > 0C all liq_wat + BC_side%q_BC(i,j,k,liq_wat) = qn1(i,k) + BC_side%q_BC(i,j,k,ice_wat) = 0. +#ifdef ORIG_CLOUDS_PART + else if ( BC_side%pt_BC(i,j,k) < 258.16 ) then ! < -15C all ice_wat + BC_side%q_BC(i,j,k,liq_wat) = 0. + BC_side%q_BC(i,j,k,ice_wat) = qn1(i,k) + else ! between -15~0C: linear interpolation + BC_side%q_BC(i,j,k,liq_wat) = qn1(i,k)*((BC_side%pt_BC(i,j,k)-258.16)/15.) + BC_side%q_BC(i,j,k,ice_wat) = qn1(i,k) - BC_side%q_BC(i,j,k,liq_wat) + endif +#else + else if ( BC_side%pt_BC(i,j,k) < 233.16 ) then ! < -40C all ice_wat + BC_side%q_BC(i,j,k,liq_wat) = 0. + BC_side%q_BC(i,j,k,ice_wat) = qn1(i,k) + else + if ( k.eq.1 ) then ! between [-40,0]: linear interpolation + BC_side%q_BC(i,j,k,liq_wat) = qn1(i,k)*((BC_side%pt_BC(i,j,k)-233.16)/40.) + BC_side%q_BC(i,j,k,ice_wat) = qn1(i,k) - BC_side%q_BC(i,j,k,liq_wat) + else + if (BC_side%pt_BC(i,j,k)<258.16 .and. BC_side%q_BC(i,j,k-1,ice_wat)>1.e-5 ) then + BC_side%q_BC(i,j,k,liq_wat) = 0. + BC_side%q_BC(i,j,k,ice_wat) = qn1(i,k) + else ! between [-40,0]: linear interpolation + BC_side%q_BC(i,j,k,liq_wat) = qn1(i,k)*((BC_side%pt_BC(i,j,k)-233.16)/40.) + BC_side%q_BC(i,j,k,ice_wat) = qn1(i,k) - BC_side%q_BC(i,j,k,liq_wat) + endif + endif + endif +#endif + call mp_auto_conversion(BC_side%q_BC(i,j,k,liq_wat), BC_side%q_BC(i,j,k,rainwat), & + BC_side%q_BC(i,j,k,ice_wat), BC_side%q_BC(i,j,k,snowwat) ) + enddo + enddo + endif + +!------------------------------------------------------------- +! map omega +!------- ------------------------------------------------------ + if ( .not. Atm%flagstruct%hydrostatic ) then + do k=1,km + do i=is,ie + qp(i,k) = omga(i,j,k) + enddo + enddo + call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, -1, 4, Atm%ptop) + do k=1,npz + do i=is,ie + BC_side%w_BC(i,j,k) = qn1(i,k)/BC_side%delp_BC(i,j,k)*BC_side%delz_BC(i,j,k) + enddo + enddo + endif + + enddo jloop2 + +! Add some diagnostics: +!xxxcall p_maxmin('PS_model (mb)', Atm%ps(is:ie,js:je), is, ie, js, je, 1, 0.01) +!xxxcall p_maxmin('PT_model', Atm%pt(is:ie,js:je,1:npz), is, ie, js, je, npz, 1.) + do j=js,je + do i=is,ie + wk(i,j) = phis_reg(i,j)/grav - zh(i,j,km+1) + enddo + enddo +!xxxcall pmaxmn('ZS_diff (m)', wk, is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) + + do j=js,je + do i=is,ie + wk(i,j) = ps(i,j) - psc(i,j) + enddo + enddo +!xxxcall pmaxmn('PS_diff (mb)', wk, is, ie, js, je, 1, 0.01, Atm%gridstruct%area_64, Atm%domain) + deallocate (pe0,qn1,dp2,pe1,qp) + if (is_master()) write(*,*) 'done remap_scalar_nggps_regional_bc' +!--------------------------------------------------------------------- + + end subroutine remap_scalar_nggps_regional_bc + +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + + subroutine remap_dwinds_regional_bc(Atm & + ,is_input,ie_input & + ,js_input,je_input & + ,is_u,ie_u,js_u,je_u & + ,is_v,ie_v,js_v,je_v & + ,km, npz & + ,ak0, bk0 & + ,psc, ud, vd, uc, vc & + ,BC_side ) + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in):: is_input, ie_input, js_input, je_input !<-- index limits of the boundary arrays with nahlo=nhalo_data + integer, intent(in):: is_u,ie_u,js_u,je_u !<-- index limits of D-grid u in this boundary region + integer, intent(in):: is_v,ie_v,js_v,je_v !<-- index limits of D-grid v in this boundary region + integer, intent(in):: km & !<-- # of levels in 3-D input variables + ,npz !<-- # of levels in final 3-D integration variables + real, intent(in):: ak0(km+1), bk0(km+1) + + real, intent(in) :: psc(is_input:ie_input,js_input:je_input) + + real, intent(in):: ud(is_u:ie_u,js_u:je_u,km) + real, intent(in):: vc(is_u:ie_u,js_u:je_u,km) + real, intent(in):: vd(is_v:ie_v,js_v:je_v,km) + real, intent(in):: uc(is_v:ie_v,js_v:je_v,km) + type(fv_regional_BC_variables),intent(inout) :: BC_side !<-- The BC variables on a domain side at the final integration levels. +! local: + real, dimension(:,:),allocatable :: pe0 + real, dimension(:,:),allocatable :: pe1 + real, dimension(:,:),allocatable :: qn1_d,qn1_c + integer i,j,k + + allocate(pe0 (is_u:ie_u, km+1)) ; pe0=real_snan + allocate(pe1 (is_u:ie_u, npz+1)) ; pe1=real_snan + allocate(qn1_d(is_u:ie_u, npz)) ; qn1_d=real_snan + allocate(qn1_c(is_u:ie_u, npz)) ; qn1_c=real_snan + +!---------------------------------------------------------------------------------------------- + j_loopu: do j=js_u,je_u +!---------------------------------------------------------------------------------------------- + +!------ +! map u +!------ + do k=1,km+1 + do i=is_u,ie_u + pe0(i,k) = ak0(k) + bk0(k)*0.5*(psc(i,j-1)+psc(i,j)) + enddo + enddo + do k=1,npz+1 + do i=is_u,ie_u + pe1(i,k) = Atm%ak(k) + Atm%bk(k)*0.5*(psc(i,j-1)+psc(i,j)) + enddo + enddo + call mappm(km, pe0(is_u:ie_u,1:km+1), ud(is_u:ie_u,j,1:km), npz, pe1(is_u:ie_u,1:npz+1), & + qn1_d(is_u:ie_u,1:npz), is_u,ie_u, -1, 8, Atm%ptop ) + call mappm(km, pe0(is_u:ie_u,1:km+1), vc(is_u:ie_u,j,1:km), npz, pe1(is_u:ie_u,1:npz+1), & + qn1_c(is_u:ie_u,1:npz), is_u,ie_u, -1, 8, Atm%ptop ) + do k=1,npz + do i=is_u,ie_u + BC_side%u_BC(i,j,k) = qn1_d(i,k) + BC_side%vc_BC(i,j,k) = qn1_c(i,k) + enddo + enddo + + enddo j_loopu + + deallocate(pe0) + deallocate(pe1) + deallocate(qn1_d) + deallocate(qn1_c) + + allocate(pe0 (is_v:ie_v, km+1)) ; pe0=real_snan + allocate(pe1 (is_v:ie_v, npz+1)) ; pe1=real_snan + allocate(qn1_d(is_v:ie_v, npz)) ; qn1_d=real_snan + allocate(qn1_c(is_v:ie_v, npz)) ; qn1_c=real_snan + +!---------------------------------------------------------------------------------------------- + j_loopv: do j=js_v,je_v +!---------------------------------------------------------------------------------------------- +! +!------ +! map v +!------ + + do k=1,km+1 + do i=is_v,ie_v + pe0(i,k) = ak0(k) + bk0(k)*0.5*(psc(i-1,j)+psc(i,j)) + enddo + enddo + do k=1,npz+1 + do i=is_v,ie_v + pe1(i,k) = Atm%ak(k) + Atm%bk(k)*0.5*(psc(i-1,j)+psc(i,j)) + enddo + enddo + call mappm(km, pe0(is_v:ie_v,1:km+1), vd(is_v:ie_v,j,1:km), npz, pe1(is_v:ie_v,1:npz+1), & + qn1_d(is_v:ie_v,1:npz), is_v,ie_v, -1, 8, Atm%ptop) + call mappm(km, pe0(is_v:ie_v,1:km+1), uc(is_v:ie_v,j,1:km), npz, pe1(is_v:ie_v,1:npz+1), & + qn1_c(is_v:ie_v,1:npz), is_v,ie_v, -1, 8, Atm%ptop) + do k=1,npz + do i=is_v,ie_v + BC_side%v_BC(i,j,k) = qn1_d(i,k) + BC_side%uc_BC(i,j,k) = qn1_c(i,k) + enddo + enddo + + enddo j_loopv + + deallocate(pe0) + deallocate(pe1) + deallocate(qn1_d) + deallocate(qn1_c) + + if (is_master()) write(*,*) 'done remap_dwinds' + + end subroutine remap_dwinds_regional_bc + +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + + subroutine set_regional_BCs(delp,delz,w & + ,pt,q_con,cappa & + ,q & + ,u,v,uc,vc & + ,bd, nlayers, ntracers & + ,fcst_time ) +! +!--------------------------------------------------------------------- +!*** Select the given variable's boundary data at the two +!*** bracketing time levels and apply them to the updating +!*** of the variable's boundary region at the appropriate +!*** forecast time. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!-------------------- +!*** Input variables +!-------------------- +! + integer,intent(in) :: nlayers, ntracers +! + real,intent(in) :: fcst_time !<-- Current forecast time (sec) +! + type(fv_grid_bounds_type),intent(in) :: bd !<-- Task subdomain indices +! +!---------------------- +!*** Output variables +!---------------------- +! + real,dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz),intent(out) :: & + delp & + ,pt +! + real,dimension(bd%isd:,bd%jsd:,1:),intent(out) :: q_con,w + real,dimension(bd%is:, bd%js:, 1:),intent(out) :: delz +! + real,dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz,ntracers),intent(out) :: q +! +#ifdef MOIST_CAPPA + real,dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz),intent(out) :: cappa +#else + real,dimension(bd%isd:bd%isd,bd%jsd:bd%jsd,1),intent(out) :: cappa +#endif +! + real,dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1,npz),intent(out) :: u,vc +! + real,dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz),intent(out) :: uc,v +! +!--------------------- +!*** Local variables +!--------------------- +! + real :: fraction_interval +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! +!--------------------------------------------------------------------- +!*** The current forecast time is this fraction of the way from +!*** time level 0 to time level 1. +!--------------------------------------------------------------------- +! + fraction_interval=mod(fcst_time,(bc_time_interval*3600.))/(bc_time_interval*3600.) +! +!--------------------------------------------------------------------- +! + if(north_bc)then !north BC is really our SOUTH bc ?!? + call bc_values_into_arrays(BC_t0%north,BC_t1%north & + ,'north' & !side + ,bd%isd & !i1 + ,bd%ied & !i2 + ,bd%jsd & !j1 + ,bd%js-1 & !j2 + ,bd%isd & !i1_uvs + ,bd%ied & !i2_uvs + ,bd%jsd & !j1_uvs + ,bd%js-1 & !j2_uvs + ,bd%isd & !i1_uvw + ,bd%ied+1 & !i2_uvw + ,bd%jsd & !j1_uvw + ,bd%js-1) !j2_uvw + endif +! + if(south_bc)then + call bc_values_into_arrays(BC_t0%south,BC_t1%south & + ,'south' & + ,bd%isd & + ,bd%ied & + ,bd%je+1 & + ,bd%jed & + ,bd%isd & + ,bd%ied & + ,bd%je+2 & + ,bd%jed+1 & + ,bd%isd & + ,bd%ied+1 & + ,bd%je+1 & + ,bd%jed ) + endif +! + if(east_bc)then + call bc_values_into_arrays(BC_t0%east,BC_t1%east & + ,'east ' & + ,bd%isd & + ,bd%is-1 & + ,bd%js & + ,bd%je & + ,bd%isd & + ,bd%is-1 & + ,bd%js & + ,bd%je+1 & + ,bd%isd & + ,bd%is-1 & + ,bd%js & + ,bd%je ) + endif +! + if(west_bc)then + call bc_values_into_arrays(BC_t0%west,BC_t1%west & + ,'west ' & + ,bd%ie+1 & + ,bd%ied & + ,bd%js & + ,bd%je & + ,bd%ie+1 & + ,bd%ied & + ,bd%js & + ,bd%je+1 & + ,bd%ie+2 & + ,bd%ied+1 & + ,bd%js & + ,bd%je ) + endif +! +!--------------------------------------------------------------------- + + contains + +!--------------------------------------------------------------------- +! + subroutine bc_values_into_arrays(side_t0,side_t1 & + ,side & + ,i1,i2,j1,j2 & + ,i1_uvs,i2_uvs,j1_uvs,j2_uvs & + ,i1_uvw,i2_uvw,j1_uvw,j2_uvw ) +! +!--------------------------------------------------------------------- +!*** Apply boundary values to the prognostic arrays at the +!*** desired time. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!--------------------- +!*** Input arguments +!--------------------- +! + type(fv_regional_BC_variables),intent(in) :: side_t0 & + ,side_t1 +! + character(len=*),intent(in) :: side +! + integer,intent(in) :: i1,i2,j1,j2 & + ,i1_uvs,i2_uvs,j1_uvs,j2_uvs & + ,i1_uvw,i2_uvw,j1_uvw,j2_uvw +! +!--------------------- +!*** Local arguments +!--------------------- +! + integer :: i,ie,j,je,jend,jend_uvs,jend_uvw & + ,jstart,jstart_uvs,jstart_uvw,k,nt,nz +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! + jstart=j1 + jend =j2 + jstart_uvs=j1_uvs + jend_uvs =j2_uvs + jstart_uvw=j1_uvw + jend_uvw =j2_uvw + if((trim(side)=='east'.or.trim(side)=='west').and..not.north_bc)then + jstart=j1-nhalo_model + jstart_uvs=j1_uvs-nhalo_model + jstart_uvw=j1_uvw-nhalo_model + endif + if((trim(side)=='east'.or.trim(side)=='west').and..not.south_bc)then + jend=j2+nhalo_model + jend_uvs=j2_uvs+nhalo_model + jend_uvw=j2_uvw+nhalo_model + endif +! + do k=1,nlayers + do j=jstart,jend + do i=i1,i2 + delp(i,j,k)=side_t0%delp_BC(i,j,k) & + +(side_t1%delp_BC(i,j,k)-side_t0%delp_BC(i,j,k)) & + *fraction_interval + pt(i,j,k)=side_t0%pt_BC(i,j,k) & + +(side_t1%pt_BC(i,j,k)-side_t0%pt_BC(i,j,k)) & + *fraction_interval +#ifdef MOIST_CAPPA + cappa(i,j,k)=side_t0%cappa_BC(i,j,k) & + +(side_t1%cappa_BC(i,j,k)-side_t0%cappa_BC(i,j,k)) & + *fraction_interval +#endif + enddo + enddo +! + do j=jstart_uvs,jend_uvs + do i=i1_uvs,i2_uvs + u(i,j,k)=side_t0%u_BC(i,j,k) & + +(side_t1%u_BC(i,j,k)-side_t0%u_BC(i,j,k)) & + *fraction_interval + vc(i,j,k)=side_t0%vc_BC(i,j,k) & + +(side_t1%vc_BC(i,j,k)-side_t0%vc_BC(i,j,k)) & + *fraction_interval + enddo + enddo +! + do j=jstart_uvw,jend_uvw + do i=i1_uvw,i2_uvw + v(i,j,k)=side_t0%v_BC(i,j,k) & + +(side_t1%v_BC(i,j,k)-side_t0%v_BC(i,j,k)) & + *fraction_interval + uc(i,j,k)=side_t0%uc_BC(i,j,k) & + +(side_t1%uc_BC(i,j,k)-side_t0%uc_BC(i,j,k)) & + *fraction_interval + enddo + enddo + enddo +! + ie=min(ubound(side_t0%w_BC,1),ubound(w,1)) + je=min(ubound(side_t0%w_BC,2),ubound(w,2)) + nz=ubound(w,3) +! + do k=1,nz + do j=jstart,jend + do i=i1,ie +!!$ delz(i,j,k)=side_t0%delz_BC(i,j,k) & +!!$ +(side_t1%delz_BC(i,j,k)-side_t0%delz_BC(i,j,k)) & +!!$ *fraction_interval +#ifdef USE_COND + q_con(i,j,k)=side_t0%q_con_BC(i,j,k) & + +(side_t1%q_con_BC(i,j,k)-side_t0%q_con_BC(i,j,k)) & + *fraction_interval +#endif + w(i,j,k)=side_t0%w_BC(i,j,k) & + +(side_t1%w_BC(i,j,k)-side_t0%w_BC(i,j,k)) & + *fraction_interval + enddo + enddo + enddo +! + do nt=1,ntracers + do k=1,nz + do j=jstart,jend + do i=i1,i2 + q(i,j,k,nt)=side_t0%q_BC(i,j,k,nt) & + +(side_t1%q_BC(i,j,k,nt)-side_t0%q_BC(i,j,k,nt)) & + *fraction_interval + enddo + enddo + enddo + enddo +! +!--------------------------------------------------------------------- +! + end subroutine bc_values_into_arrays +! +!--------------------------------------------------------------------- +! + end subroutine set_regional_BCs +! +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + subroutine regional_boundary_update(array & + ,bc_vbl_name & + ,lbnd_x,ubnd_x & + ,lbnd_y,ubnd_y & + ,ubnd_z & + ,is,ie,js,je & + ,isd,ied,jsd,jed & + ,fcst_time & + ,index4 ) +! +!--------------------------------------------------------------------- +!*** Select the given variable's boundary data at the two +!*** bracketing time levels and apply them to the updating +!*** of the variable's boundary region at the appropriate +!*** forecast time. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!-------------------- +!*** Input variables +!-------------------- +! + integer,intent(in) :: lbnd_x,ubnd_x,lbnd_y,ubnd_y,ubnd_z !<-- Dimensions of full prognostic array to be updated. +! + integer,intent(in) :: is,ie,js,je & !<-- Compute limits + ,isd,ied,jsd,jed !<-- Memory limits +! + integer,intent(in),optional :: index4 !<-- Index for the 4-D tracer array. +! + real,intent(in) :: fcst_time !<-- Forecast time (sec) at which BC update is applied. +! + character(len=*),intent(in) :: bc_vbl_name !<-- Name of the variable to be updated. +! +!---------------------- +!*** Output variables +!---------------------- +! + real,dimension(lbnd_x:ubnd_x,lbnd_y:ubnd_y,1:ubnd_z) & + ,intent(out) :: array !<-- Update this full array's boundary region. +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i1,i2,j1,j2 !<-- Horizontal limits of region updated. + integer :: lbnd1,ubnd1,lbnd2,ubnd2 !<-- Horizontal limits of BC update arrays. + integer :: iq !<-- Tracer index +! + real,dimension(:,:,:),pointer :: bc_t0,bc_t1 !<-- Boundary data at the two bracketing times. +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! + if(.not.(north_bc.or.south_bc.or.east_bc.or.west_bc))then + return + endif +! + iq=0 + if(present(index4))then + iq=index4 + endif +! +!--------------------------------------------------------------------- +!*** Get the pointers pointing at the boundary arrays holding the +!*** two time levels of the given prognostic array's boundary region +!*** then update the boundary points. +!*** Start with tasks on the north or south side of the domain. +!--------------------------------------------------------------------- +! + if(north_bc)then +! + call retrieve_bc_variable_data(bc_vbl_name & +! ,BC_t0%north,BC_t1%north & + ,bc_north_t0,bc_north_t1 & !<-- Boundary data objects + ,bc_t0,bc_t1 & !<-- Pointer to boundary arrays + ,lbnd1,ubnd1,lbnd2,ubnd2 & !<-- Bounds of the boundary data objects + ,iq ) +!----------------------------------------------------- +!*** Limits of the region to update in the boundary. +!----------------------------------------------------- +! + i1=isd + i2=ied + if(trim(bc_vbl_name)=='uc'.or.trim(bc_vbl_name)=='v'.or.trim(bc_vbl_name)=='divgd')then + i2=ied+1 + endif +! + j1=jsd + j2=js-1 +! + call bc_time_interpolation(array & + ,lbnd_x,ubnd_x,lbnd_y,ubnd_y,ubnd_z & + ,bc_t0,bc_t1 & + ,lbnd1,ubnd1,lbnd2,ubnd2 & + ,i1,i2,j1,j2 & + ,fcst_time & + ,bc_time_interval ) +! + endif +! + if(south_bc)then +! + call retrieve_bc_variable_data(bc_vbl_name & +! ,BC_t0%south,BC_t1%south & + ,bc_south_t0,bc_south_t1 & !<-- Boundary data objects + ,bc_t0,bc_t1 & !<-- Pointer to boundary arrays + ,lbnd1,ubnd1,lbnd2,ubnd2 & !<-- Bounds of the boundary data objects + ,iq ) +!----------------------------------------------------- +!*** Limits of the region to update in the boundary. +!----------------------------------------------------- +! + i1=isd + i2=ied + if(trim(bc_vbl_name)=='uc'.or.trim(bc_vbl_name)=='v'.or.trim(bc_vbl_name)=='divgd')then + i2=ied+1 + endif +! + j1=je+1 + j2=jed + if(trim(bc_vbl_name)=='u'.or.trim(bc_vbl_name)=='vc'.or.trim(bc_vbl_name)=='divgd')then + j1=je+2 + j2=jed+1 + endif +! + call bc_time_interpolation(array & + ,lbnd_x,ubnd_x,lbnd_y,ubnd_y,ubnd_z & + ,bc_t0,bc_t1 & + ,lbnd1,ubnd1,lbnd2,ubnd2 & + ,i1,i2,j1,j2 & + ,fcst_time & + ,bc_time_interval ) +! + endif +! +!--------------------------------------------------------------------- +!*** Now update the west and east sides of the domain. +!--------------------------------------------------------------------- + if(east_bc)then +! + call retrieve_bc_variable_data(bc_vbl_name & +! ,BC_t0%east,BC_t1%east & + ,bc_east_t0,bc_east_t1 & !<-- Boundary data objects + ,bc_t0,bc_t1 & !<-- Pointer to boundary arrays + ,lbnd1,ubnd1,lbnd2,ubnd2 & !<-- Bounds of the boundary data objects + ,iq ) +!----------------------------------------------------- +!*** Limits of the region to update in the boundary. +!----------------------------------------------------- +! + j1=jsd + j2=jed +! + i1=isd + i2=is-1 +! + if(north_bc)then + j1=js + endif + if(south_bc)then + j2=je + if(trim(bc_vbl_name)=='u'.or.trim(bc_vbl_name)=='vc'.or.trim(bc_vbl_name)=='divgd')then + j2=je+1 + endif + endif +! + call bc_time_interpolation(array & + ,lbnd_x,ubnd_x,lbnd_y,ubnd_y,ubnd_z & + ,bc_t0,bc_t1 & + ,lbnd1,ubnd1,lbnd2,ubnd2 & + ,i1,i2,j1,j2 & + ,fcst_time & + ,bc_time_interval ) + endif ! east_bc +! + if(west_bc)then +! + call retrieve_bc_variable_data(bc_vbl_name & +! ,BC_t0%west,BC_t1%west & + ,bc_west_t0,bc_west_t1 & !<-- Boundary data objects + ,bc_t0,bc_t1 & !<-- Pointer to boundary arrays + ,lbnd1,ubnd1,lbnd2,ubnd2 & !<-- Bounds of the boundary data objects + ,iq ) +!----------------------------------------------------- +!*** Limits of the region to update in the boundary. +!----------------------------------------------------- +! + j1=jsd + j2=jed +! + i1=ie+1 + i2=ied + if(trim(bc_vbl_name)=='uc'.or.trim(bc_vbl_name)=='v'.or.trim(bc_vbl_name)=='divgd')then + i1=ie+2 + i2=ied+1 + endif +! + if(north_bc)then + j1=js + endif + if(south_bc)then + j2=je + if(trim(bc_vbl_name)=='u'.or.trim(bc_vbl_name)=='vc'.or.trim(bc_vbl_name)=='divgd')then + j2=je+1 + endif + endif +! + call bc_time_interpolation(array & + ,lbnd_x,ubnd_x,lbnd_y,ubnd_y,ubnd_z & + ,bc_t0,bc_t1 & + ,lbnd1,ubnd1,lbnd2,ubnd2 & + ,i1,i2,j1,j2 & + ,fcst_time & + ,bc_time_interval ) + endif ! west_bc +! +!--------------------------------------------------------------------- + + end subroutine regional_boundary_update + +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + + subroutine retrieve_bc_variable_data(bc_vbl_name & + ,bc_side_t0,bc_side_t1 & + ,bc_t0,bc_t1 & + ,lbnd1,ubnd1,lbnd2,ubnd2 & + ,iq ) + +!--------------------------------------------------------------------- +!*** Select the boundary variable associated with the prognostic +!*** array that needs its boundary region to be updated. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!--------------------- +!*** Input variables +!--------------------- +! + integer,intent(in) :: iq !<-- Index used by 4-D tracer array. +! + character(len=*),intent(in) :: bc_vbl_name +! + type(fv_regional_BC_variables),pointer :: bc_side_t0,bc_side_t1 !<-- Boundary states for the given domain side. +! +! +!---------------------- +!*** Output variables +!---------------------- +! + integer,intent(out) :: lbnd1,ubnd1,lbnd2,ubnd2 !<-- Horizontal dimensions of boundary array +! + real,dimension(:,:,:),pointer :: bc_t0,bc_t1 !<-- Boundary state values for the desired variable. +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! + select case (bc_vbl_name) +! + case ('delp') + bc_t0=>bc_side_t0%delp_BC + bc_t1=>bc_side_t1%delp_BC + case ('delz') + bc_t0=>bc_side_t0%delz_BC + bc_t1=>bc_side_t1%delz_BC + case ('pt') + bc_t0=>bc_side_t0%pt_BC + bc_t1=>bc_side_t1%pt_BC + case ('w') + bc_t0=>bc_side_t0%w_BC + bc_t1=>bc_side_t1%w_BC + case ('divgd') + bc_t0=>bc_side_t0%divgd_BC + bc_t1=>bc_side_t1%divgd_BC +#ifdef USE_COND +#ifdef MOIST_CAPPA + case ('cappa') + bc_t0=>bc_side_t0%cappa_BC + bc_t1=>bc_side_t1%cappa_BC +#endif + case ('q_con') + bc_t0=>bc_side_t0%q_con_BC + bc_t1=>bc_side_t1%q_con_BC +#endif + case ('q') + if(iq<1)then + write(0,101) + 101 format(' iq<1 is not a valid index for q_BC array in retrieve_bc_variable_data') + endif + lbnd1=lbound(bc_side_t0%q_BC,1) + lbnd2=lbound(bc_side_t0%q_BC,2) + ubnd1=ubound(bc_side_t0%q_BC,1) + ubnd2=ubound(bc_side_t0%q_BC,2) + bc_t0=>bc_side_t0%q_BC(:,:,:,iq) + bc_t1=>bc_side_t1%q_BC(:,:,:,iq) + case ('u') + bc_t0=>bc_side_t0%u_BC + bc_t1=>bc_side_t1%u_BC + case ('v') + bc_t0=>bc_side_t0%v_BC + bc_t1=>bc_side_t1%v_BC + case ('uc') + bc_t0=>bc_side_t0%uc_BC + bc_t1=>bc_side_t1%uc_BC + case ('vc') + bc_t0=>bc_side_t0%vc_BC + bc_t1=>bc_side_t1%vc_BC +! + end select +! + if(trim(bc_vbl_name)/='q')then + lbnd1=lbound(bc_t0,1) + lbnd2=lbound(bc_t0,2) + ubnd1=ubound(bc_t0,1) + ubnd2=ubound(bc_t0,2) + endif +! +!--------------------------------------------------------------------- +! + end subroutine retrieve_bc_variable_data +! +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- +! + subroutine bc_time_interpolation(array & + ,lbnd_x, ubnd_x & + ,lbnd_y, ubnd_y & + ,ubnd_z & + ,bc_t0, bc_t1 & + ,lbnd1, ubnd1 & + ,lbnd2, ubnd2 & + ,i1,i2,j1,j2 & + ,fcst_time & + ,bc_time_interval ) + +!--------------------------------------------------------------------- +!*** Update the boundary region of the input array at the given +!*** forecast time that is within the interval bracketed by the +!*** two current boundary region states. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!--------------------- +!*** Input variables +!--------------------- +! + integer,intent(in) :: lbnd_x,ubnd_x,lbnd_y,ubnd_y,ubnd_z !<-- Dimensions of the array to be updated. +! + integer,intent(in) :: lbnd1,ubnd1,lbnd2,ubnd2 !<-- Index limits of the BC arrays. +! + integer,intent(in) :: i1,i2,j1,j2 !<-- Index limits of the updated region. +! + integer,intent(in) :: bc_time_interval !<-- Time (hours) between BC data states +! + real,intent(in) :: fcst_time !<-- Current forecast time (sec) +! + real,dimension(lbnd1:ubnd1,lbnd2:ubnd2,1:ubnd_z) :: bc_t0 & !<-- Interpolate between these + ,bc_t1 ! two boundary region states. +! +!--------------------- +!*** Output variables +!--------------------- +! + real,dimension(lbnd_x:ubnd_x,lbnd_y:ubnd_y,1:ubnd_z) & + ,intent(out) :: array !<-- Update boundary points in this array. +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,j,k +! + real :: fraction_interval +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! +!--------------------------------------------------------------------- +!*** The current forecast time is this fraction of the way from +!*** time level 0 to time level 1. +!--------------------------------------------------------------------- +! + fraction_interval=mod(fcst_time,(bc_time_interval*3600.))/(bc_time_interval*3600.) +! +!--------------------------------------------------------------------- +! + do k=1,ubnd_z + do j=j1,j2 + do i=i1,i2 + array(i,j,k)=bc_t0(i,j,k) & + +(bc_t1(i,j,k)-bc_t0(i,j,k))*fraction_interval + enddo + enddo + enddo +! +!--------------------------------------------------------------------- + + end subroutine bc_time_interpolation +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- +! + subroutine bc_time_interpolation_general(is,ie,js,je & + ,is_s,ie_s,js_s,je_s & + ,is_w,ie_w,js_w,je_w & + ,fraction & + ,t0,t1 & + ,Atm ) +! +!--------------------------------------------------------------------- +!*** Execute the linear time interpolation between t0 and t1 +!*** generically for any side of the regional domain's boundary +!*** region. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + integer,intent(in) :: is,ie,js,je & !<-- Index limits for centers of grid cells + ,is_s,ie_s,js_s,je_s & !<-- Index limits for south/north edges of grid cells + ,is_w,ie_w,js_w,je_w !<-- Index limits for west/east edges of grid cells +! + real,intent(in) :: fraction !<-- Current time is this fraction between t0 ad t1. +! + type(fv_regional_BC_variables),intent(in) :: t0,t1 !<-- BC variables at time levels t0 and t1. +! + type(fv_atmos_type),intent(inout) :: Atm !<-- The Atm object +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,j,k,n,nlayers,ntracers +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! + nlayers =Atm%npz !<-- # of layers in vertical configuration of integration + ntracers=Atm%ncnst !<-- # of advected tracers +! +!--------------------------------------------------------------------- +! + k_loop: do k=1,nlayers +! +!--------------------------------------------------------------------- +! +!------------- +!*** Scalars +!------------- +! + do j=js,je + do i=is,ie +! + Atm%delp(i,j,k)=t0%delp_BC(i,j,k) & !<-- Update layer pressure thickness. + +(t1%delp_BC(i,j,k)-t0%delp_BC(i,j,k)) & + *fraction +! +#ifndef SW_DYNAMICS + Atm%delz(i,j,k)=t0%delz_BC(i,j,k) & !<-- Update layer height thickness. + +(t1%delz_BC(i,j,k)-t0%delz_BC(i,j,k)) & + *fraction +! + Atm%w(i,j,k)=t0%w_BC(i,j,k) & !<-- Update vertical motion. + +(t1%w_BC(i,j,k)-t0%w_BC(i,j,k)) & + *fraction +! + Atm%pt(i,j,k)=t0%pt_BC(i,j,k) & !<-- Update thetav. + +(t1%pt_BC(i,j,k)-t0%pt_BC(i,j,k)) & + *fraction +#ifdef USE_COND + Atm%q_con(i,j,k)=t0%q_con_BC(i,j,k) & !<-- Update water condensate. + +(t1%q_con_BC(i,j,k)-t0%q_con_BC(i,j,k)) & + *fraction +#ifdef MOIST_CAPPA +! Atm%cappa(i,j,k)=t0%pt_BC(i,j,k) & !<-- Update cappa. +! +(t1%cappa_BC(i,j,k)-t0%cappa_BC(i,j,k)) & +! *fraction +#endif +#endif +#endif +! + enddo + enddo +! + do n=1,ntracers +! + do j=js,je + do i=is,ie + Atm%q(i,j,k,n)=t0%q_BC(i,j,k,n) & !<-- Update tracers. + +(t1%q_BC(i,j,k,n)-t0%q_BC(i,j,k,n)) & + *fraction + enddo + enddo +! + enddo +! +!----------- +!*** Winds +!----------- +! + do j=js_s,je_s + do i=is_s,ie_s + Atm%u(i,j,k)=t0%u_BC(i,j,k) & !<-- Update D-grid u component. + +(t1%u_BC(i,j,k)-t0%u_BC(i,j,k)) & + *fraction + Atm%vc(i,j,k)=t0%vc_BC(i,j,k) & !<-- Update C-grid v component. + +(t1%vc_BC(i,j,k)-t0%vc_BC(i,j,k)) & + *fraction + enddo + enddo +! +! + do j=js_w,je_w + do i=is_w,ie_w + Atm%v(i,j,k)=t0%v_BC(i,j,k) & !<-- Update D-grid v component. + +(t1%v_BC(i,j,k)-t0%v_BC(i,j,k)) & + *fraction + Atm%uc(i,j,k)=t0%uc_BC(i,j,k) & !<-- Update C-grid u component. + +(t1%uc_BC(i,j,k)-t0%uc_BC(i,j,k)) & + *fraction + enddo + enddo +! +!--------------------------------------------------------------------- +! + enddo k_loop +! +!--------------------------------------------------------------------- +! + end subroutine bc_time_interpolation_general +! +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- +! + subroutine regional_bc_t1_to_t0(BC_t1,BC_t0 & + ,nlev,ntracers,bnds ) +! +!--------------------------------------------------------------------- +!*** BC data has been read into the time level t1 object. Now +!*** move the t1 data into the t1 object before refilling t1 +!*** with the next data from the BC file. +!--------------------------------------------------------------------- + implicit none +!--------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + integer,intent(in) :: nlev & !<-- # of model layers. + ,ntracers !<-- # of advected tracers +! + type(fv_regional_bc_bounds_type),intent(in) :: bnds !<-- Index limits for all types of vbls in boundary region +! + type(fv_domain_sides),intent(in) :: BC_t1 +! + type(fv_domain_sides),intent(inout) :: BC_t0 +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,ie,is,j,je,js,k,n +! +!--------------------------------------------------------------------- +!********************************************************************* +!--------------------------------------------------------------------- +! +!----------- +!*** North +!----------- +! + if(north_bc)then +! + is=bnds%is_north !<-- + ie=bnds%ie_north ! North BC index limits + js=bnds%js_north ! for centers of grid cells + je=bnds%je_north !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%north%delp_BC(i,j,k)=BC_t1%north%delp_BC(i,j,k) + enddo + enddo + enddo +! + do n=1,ntracers + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%north%q_BC(i,j,k,n)=BC_t1%north%q_BC(i,j,k,n) + enddo + enddo + enddo + enddo +! + do k=1,nlev + do j=js,je + do i=is,ie +#ifndef SW_DYNAMICS + BC_t0%north%w_BC(i,j,k) =BC_t1%north%w_BC(i,j,k) + BC_t0%north%pt_BC(i,j,k) =BC_t1%north%pt_BC(i,j,k) + BC_t0%north%delz_BC(i,j,k)=BC_t1%north%delz_BC(i,j,k) +#ifdef USE_COND + BC_t0%north%q_con_BC(i,j,k)=BC_t1%north%q_con_BC(i,j,k) +#ifdef MOIST_CAPPA + BC_t0%north%cappa_BC(i,j,k)=BC_t1%north%cappa_BC(i,j,k) +#endif +#endif +#endif + enddo + enddo + enddo +! + is=bnds%is_north_uvs !<-- + ie=bnds%ie_north_uvs ! North BC index limits + js=bnds%js_north_uvs ! for winds on N/S sides of grid cells. + je=bnds%je_north_uvs !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%north%u_BC(i,j,k) =BC_t1%north%u_BC(i,j,k) + BC_t0%north%vc_BC(i,j,k)=BC_t1%north%vc_BC(i,j,k) + enddo + enddo + enddo +! + is=bnds%is_north_uvw !<-- + ie=bnds%ie_north_uvw ! North BC index limits + js=bnds%js_north_uvw ! for winds on E/W sides of grid cells. + je=bnds%je_north_uvw !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%north%v_BC(i,j,k) =BC_t1%north%v_BC(i,j,k) + BC_t0%north%uc_BC(i,j,k)=BC_t1%north%uc_BC(i,j,k) + enddo + enddo + enddo +! + BC_t0%north%divgd_BC =0. ! TEMPORARY + endif +! +!----------- +!*** South +!----------- +! + if(south_bc)then +! + is=bnds%is_south !<-- + ie=bnds%ie_south ! South BC index limits + js=bnds%js_south ! for centers of grid cells + je=bnds%je_south !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%south%delp_BC(i,j,k)=BC_t1%south%delp_BC(i,j,k) + enddo + enddo + enddo +! + do n=1,ntracers + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%south%q_BC(i,j,k,n)=BC_t1%south%q_BC(i,j,k,n) + enddo + enddo + enddo + enddo +! + do k=1,nlev + do j=js,je + do i=is,ie +#ifndef SW_DYNAMICS + BC_t0%south%w_BC(i,j,k) =BC_t1%south%w_BC(i,j,k) + BC_t0%south%pt_BC(i,j,k) =BC_t1%south%pt_BC(i,j,k) + BC_t0%south%delz_BC(i,j,k)=BC_t1%south%delz_BC(i,j,k) +#ifdef USE_COND + BC_t0%south%q_con_BC(i,j,k)=BC_t1%south%q_con_BC(i,j,k) +#ifdef MOIST_CAPPA + BC_t0%south%cappa_BC(i,j,k)=BC_t1%south%cappa_BC(i,j,k) +#endif +#endif +#endif + enddo + enddo + enddo +! + is=bnds%is_south_uvs !<-- + ie=bnds%ie_south_uvs ! South BC index limits + js=bnds%js_south_uvs ! for winds on N/S sides of grid cells. + je=bnds%je_south_uvs !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%south%u_BC(i,j,k) =BC_t1%south%u_BC(i,j,k) + BC_t0%south%vc_BC(i,j,k)=BC_t1%south%vc_BC(i,j,k) + enddo + enddo + enddo +! + is=bnds%is_south_uvw !<-- + ie=bnds%ie_south_uvw ! South BC index limits + js=bnds%js_south_uvw ! for winds on E/W sides of grid cells. + je=bnds%je_south_uvw !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%south%v_BC(i,j,k) =BC_t1%south%v_BC(i,j,k) + BC_t0%south%uc_BC(i,j,k)=BC_t1%south%uc_BC(i,j,k) + enddo + enddo + enddo +! + BC_t0%south%divgd_BC =0. ! TEMPORARY + endif +! +!---------- +!*** East +!---------- +! + if(east_bc)then +! + is=bnds%is_east !<-- + ie=bnds%ie_east ! East BC index limits + js=bnds%js_east ! for centers of grid cells + je=bnds%je_east !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%east%delp_BC(i,j,k)=BC_t1%east%delp_BC(i,j,k) + enddo + enddo + enddo +! + do n=1,ntracers + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%east%q_BC(i,j,k,n)=BC_t1%east%q_BC(i,j,k,n) + enddo + enddo + enddo + enddo +! + do k=1,nlev + do j=js,je + do i=is,ie +#ifndef SW_DYNAMICS + BC_t0%east%w_BC(i,j,k) =BC_t1%east%w_BC(i,j,k) + BC_t0%east%pt_BC(i,j,k) =BC_t1%east%pt_BC(i,j,k) + BC_t0%east%delz_BC(i,j,k)=BC_t1%east%delz_BC(i,j,k) +#ifdef USE_COND + BC_t0%east%q_con_BC(i,j,k)=BC_t1%east%q_con_BC(i,j,k) +#ifdef MOIST_CAPPA + BC_t0%east%cappa_BC(i,j,k)=BC_t1%east%cappa_BC(i,j,k) +#endif +#endif +#endif + enddo + enddo + enddo +! + is=bnds%is_east_uvs !<-- + ie=bnds%ie_east_uvs ! East BC index limits + js=bnds%js_east_uvs ! for winds on N/S sides of grid cells. + je=bnds%je_east_uvs !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%east%u_BC(i,j,k) =BC_t1%east%u_BC(i,j,k) + BC_t0%east%vc_BC(i,j,k)=BC_t1%east%vc_BC(i,j,k) + enddo + enddo + enddo +! + is=bnds%is_east_uvw !<-- + ie=bnds%ie_east_uvw ! East BC index limits + js=bnds%js_east_uvw ! for winds on E/W sides of grid cells. + je=bnds%je_east_uvw !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%east%v_BC(i,j,k) =BC_t1%east%v_BC(i,j,k) + BC_t0%east%uc_BC(i,j,k)=BC_t1%east%uc_BC(i,j,k) + enddo + enddo + enddo +! + BC_t0%east%divgd_BC =0. ! TEMPORARY + endif +! +!---------- +!*** West +!---------- +! + if(west_bc)then +! + is=bnds%is_west !<-- + ie=bnds%ie_west ! West BC index limits + js=bnds%js_west ! for centers of grid cells + je=bnds%je_west !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%west%delp_BC(i,j,k)=BC_t1%west%delp_BC(i,j,k) + enddo + enddo + enddo +! + do n=1,ntracers + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%west%q_BC(i,j,k,n)=BC_t1%west%q_BC(i,j,k,n) + enddo + enddo + enddo + enddo +! + do k=1,nlev + do j=js,je + do i=is,ie +#ifndef SW_DYNAMICS + BC_t0%west%w_BC(i,j,k) =BC_t1%west%w_BC(i,j,k) + BC_t0%west%pt_BC(i,j,k) =BC_t1%west%pt_BC(i,j,k) + BC_t0%west%delz_BC(i,j,k)=BC_t1%west%delz_BC(i,j,k) +#ifdef USE_COND + BC_t0%west%q_con_BC(i,j,k)=BC_t1%west%q_con_BC(i,j,k) +#ifdef MOIST_CAPPA + BC_t0%west%cappa_BC(i,j,k)=BC_t1%west%cappa_BC(i,j,k) +#endif +#endif +#endif + enddo + enddo + enddo +! + is=bnds%is_west_uvs !<-- + ie=bnds%ie_west_uvs ! West BC index limits + js=bnds%js_west_uvs ! for winds on N/S sides of grid cells. + je=bnds%je_west_uvs !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%west%u_BC(i,j,k) =BC_t1%west%u_BC(i,j,k) + BC_t0%west%vc_BC(i,j,k)=BC_t1%west%vc_BC(i,j,k) + enddo + enddo + enddo +! + is=bnds%is_west_uvw !<-- + ie=bnds%ie_west_uvw ! West BC index limits + js=bnds%js_west_uvw ! for winds on E/W sides of grid cells. + je=bnds%je_west_uvw !<-- +! + do k=1,nlev + do j=js,je + do i=is,ie + BC_t0%west%v_BC(i,j,k) =BC_t1%west%v_BC(i,j,k) + BC_t0%west%uc_BC(i,j,k)=BC_t1%west%uc_BC(i,j,k) + enddo + enddo + enddo +! + BC_t0%west%divgd_BC =0. ! TEMPORARY + endif +! +!--------------------------------------------------------------------- +! + end subroutine regional_bc_t1_to_t0 +! +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- +! + subroutine convert_to_virt_pot_temp(isd,ied,jsd,jed,npz & + ,sphum,liq_wat ) +! +!----------------------------------------------------------------------- +!*** Convert the incoming sensible temperature to virtual potential +!*** temperature. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!--------------------- +!*** Input arguments +!--------------------- +! + integer,intent(in) :: isd,ied,jsd,jed,npz +! + integer,intent(in) :: liq_wat,sphum +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i1,i2,j1,j2 +! + real :: rdg +! + real,dimension(:,:,:),pointer :: cappa,delp,delz,pt,q_con +! + real,dimension(:,:,:,:),pointer :: q +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + if(.not.(north_bc.or.south_bc.or.east_bc.or.west_bc))then + return + endif +! + rdg=-rdgas/grav +! + if(north_bc)then + i1=regional_bounds%is_north + i2=regional_bounds%ie_north + j1=regional_bounds%js_north + j2=regional_bounds%je_north + q =>BC_t1%north%q_BC + delp =>BC_t1%north%delp_BC + delz =>BC_t1%north%delz_BC +#ifdef USE_COND + q_con=>BC_t1%north%q_con_BC +#ifdef MOIST_CAPPA + cappa=>BC_t1%north%cappa_BC +#endif +#endif + pt =>BC_t1%north%pt_BC + call compute_vpt !<-- Compute the virtual potential temperature. + endif +! + if(south_bc)then + i1=regional_bounds%is_south + i2=regional_bounds%ie_south + j1=regional_bounds%js_south + j2=regional_bounds%je_south + q =>BC_t1%south%q_BC + delp =>BC_t1%south%delp_BC + delz =>BC_t1%south%delz_BC +#ifdef USE_COND + q_con=>BC_t1%south%q_con_BC +#ifdef MOIST_CAPPA + cappa=>BC_t1%south%cappa_BC +#endif +#endif + pt =>BC_t1%south%pt_BC + call compute_vpt !<-- Compute the virtual potential temperature. + endif +! + if(east_bc)then + i1=regional_bounds%is_east + i2=regional_bounds%ie_east + j1=regional_bounds%js_east + j2=regional_bounds%je_east + q =>BC_t1%east%q_BC + delp =>BC_t1%east%delp_BC + delz =>BC_t1%east%delz_BC +#ifdef USE_COND + q_con=>BC_t1%east%q_con_BC +#ifdef MOIST_CAPPA + cappa=>BC_t1%east%cappa_BC +#endif +#endif + pt =>BC_t1%east%pt_BC + call compute_vpt !<-- Compute the virtual potential temperature. + endif +! + if(west_bc)then + i1=regional_bounds%is_west + i2=regional_bounds%ie_west + j1=regional_bounds%js_west + j2=regional_bounds%je_west + q =>BC_t1%west%q_BC + delp =>BC_t1%west%delp_BC + delz =>BC_t1%west%delz_BC +#ifdef USE_COND + q_con=>BC_t1%west%q_con_BC +#ifdef MOIST_CAPPA + cappa=>BC_t1%west%cappa_BC +#endif +#endif + pt =>BC_t1%west%pt_BC + call compute_vpt !<-- Compute the virtual potential temperature. + endif +! +!----------------------------------------------------------------------- + + contains + +!----------------------------------------------------------------------- +! + subroutine compute_vpt +! +!----------------------------------------------------------------------- +!*** Compute the virtual potential temperature as done in fv_dynamics. +!----------------------------------------------------------------------- +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,j,k +! + real :: cvm,dp1,pkz +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + do k=1,npz +! + do j=j1,j2 + do i=i1,i2 + dp1 = zvir*q(i,j,k,sphum) +#ifdef USE_COND +#ifdef MOIST_CAPPA + cvm=(1.-q(i,j,k,sphum)+q_con(i,j,k))*cv_air & + +q(i,j,k,sphum)*cv_vap+q(i,j,k,liq_wat)*c_liq + pkz=exp(cappa(i,j,k)*log(rdg*delp(i,j,k)*pt(i,j,k) & + *(1.+dp1)*(1.-q_con(i,j,k))/delz(i,j,k))) +#else + pkz=exp(kappa*log(rdg*delp(i,j,k)*pt(i,j,k) & + *(1.+dp1)*(1.-q_con(i,j,k))/delz(i,j,k))) +#endif + pt(i,j,k)=pt(i,j,k)*(1.+dp1)*(1.-q_con(i,j,k))/pkz +#else + pkz=exp(kappa*log(rdg*delp(i,j,k)*pt(i,j,k) & + *(1.+dp1)/delz(i,j,k))) + pt(i,j,k)=pt(i,j,k)*(1.+dp1)/pkz +#endif + enddo + enddo +! + enddo +! +!----------------------------------------------------------------------- +! + end subroutine compute_vpt +! +!----------------------------------------------------------------------- +! + end subroutine convert_to_virt_pot_temp +! +!----------------------------------------------------------------------- +! +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- +!*** The following four subroutines are exact copies from +!*** external_ic_mod. That module must USE this module therefore +!*** this module cannout USE external_IC_mod to get at those +!*** subroutines. The routines may be moved to their own module. +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + subroutine p_maxmin(qname, q, is, ie, js, je, km, fac) + character(len=*), intent(in):: qname + integer, intent(in):: is, ie, js, je, km + real, intent(in):: q(is:ie, js:je, km) + real, intent(in):: fac + real qmin, qmax + integer i,j,k + + qmin = q(is,js,1) + qmax = qmin + do k=1,km + do j=js,je + do i=is,ie + if( q(i,j,k) < qmin ) then + qmin = q(i,j,k) + elseif( q(i,j,k) > qmax ) then + qmax = q(i,j,k) + endif + enddo + enddo + enddo + call mp_reduce_min(qmin) + call mp_reduce_max(qmax) + if(is_master()) write(6,*) qname, qmax*fac, qmin*fac + + end subroutine p_maxmin + + + subroutine pmaxmn(qname, q, is, ie, js, je, km, fac, area, domain) + character(len=*), intent(in):: qname + integer, intent(in):: is, ie, js, je + integer, intent(in):: km + real, intent(in):: q(is:ie, js:je, km) + real, intent(in):: fac + real(kind=R_GRID), intent(IN):: area(is-3:ie+3, js-3:je+3) + type(domain2d), intent(INOUT) :: domain +!---local variables + real qmin, qmax, gmean + integer i,j,k + + qmin = q(is,js,1) + qmax = qmin + gmean = 0. + + do k=1,km + do j=js,je + do i=is,ie + if( q(i,j,k) < qmin ) then + qmin = q(i,j,k) + elseif( q(i,j,k) > qmax ) then + qmax = q(i,j,k) + endif + enddo + enddo + enddo + + call mp_reduce_min(qmin) + call mp_reduce_max(qmax) + + gmean = g_sum(domain, q(is,js,km), is, ie, js, je, 3, area, 1, reproduce=.true.) + if(is_master()) write(6,*) qname, qmax*fac, qmin*fac, gmean*fac + + end subroutine pmaxmn + + + subroutine fillq(im, km, nq, q, dp) + integer, intent(in):: im ! No. of longitudes + integer, intent(in):: km ! No. of levels + integer, intent(in):: nq ! Total number of tracers + real , intent(in):: dp(im,km) ! pressure thickness + real , intent(inout) :: q(im,km,nq) ! tracer mixing ratio +! !LOCAL VARIABLES: + integer i, k, ic, k1 + + do ic=1,nq +! Bottom up: + do k=km,2,-1 + k1 = k-1 + do i=1,im + if( q(i,k,ic) < 0. ) then + q(i,k1,ic) = q(i,k1,ic) + q(i,k,ic)*dp(i,k)/dp(i,k1) + q(i,k ,ic) = 0. + endif + enddo + enddo +! Top down: + do k=1,km-1 + k1 = k+1 + do i=1,im + if( q(i,k,ic) < 0. ) then + q(i,k1,ic) = q(i,k1,ic) + q(i,k,ic)*dp(i,k)/dp(i,k1) + q(i,k ,ic) = 0. + endif + enddo + enddo + + enddo + + end subroutine fillq + + subroutine mp_auto_conversion(ql, qr, qi, qs) + real, intent(inout):: ql, qr, qi, qs + real, parameter:: qi0_max = 2.0e-3 + real, parameter:: ql0_max = 2.5e-3 + +! Convert excess cloud water into rain: + if ( ql > ql0_max ) then + qr = ql - ql0_max + ql = ql0_max + endif +! Convert excess cloud ice into snow: + if ( qi > qi0_max ) then + qs = qi - qi0_max + qi = qi0_max + endif + + end subroutine mp_auto_conversion + +!----------------------------------------------------------------------- +! + subroutine nudge_qv_bc(Atm,isd,ied,jsd,jed) +! +!----------------------------------------------------------------------- +!*** When nudging of specific humidity is selected then we must also +!*** nudge the values in the regional boundary. +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!------------------------ +!*** Argument variables +!------------------------ +! + integer,intent(in) :: isd,ied,jsd,jed !<-- Memory limits of task subdomain +! + type(fv_atmos_type),target,intent(inout) :: Atm !<-- Atm object for the current domain +! +!--------------------- +!*** Local variables +!--------------------- +! + integer :: i,i_x,ie,is,j,j_x,je,js,k +! + real, parameter:: q1_h2o = 2.2E-6 + real, parameter:: q7_h2o = 3.8E-6 + real, parameter:: q100_h2o = 3.8E-6 + real, parameter:: q1000_h2o = 3.1E-6 + real, parameter:: q2000_h2o = 2.8E-6 + real, parameter:: q3000_h2o = 3.0E-6 + real, parameter:: wt=2., xt=1./(1.+wt) +! + real :: p00,q00 +! + type(fv_regional_bc_bounds_type),pointer :: bnds +! +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + bnds=>Atm%regional_bc_bounds +! +!----------- +!*** North +!----------- +! + if(north_bc)then + is=lbound(BC_t1%north%q_BC,1) + ie=ubound(BC_t1%north%q_BC,1) + js=lbound(BC_t1%north%q_BC,2) + je=ubound(BC_t1%north%q_BC,2) +! + i_x=isd !<-- Use column at + j_x=jsd ! this location. +! + p00=Atm%ptop !<-- Use layer interface pressures. +! + n_loopk: do k=1,npz + if(p00<3000.)then !<-- Apply nudging only if pressure < 30 mb. + call get_q00 + do j=js,je + do i=is,ie + BC_t1%north%q_BC(i,j,k,sphum_index)= & !<-- Nudge the north boundary sphum at time t1. + xt*(BC_t1%north%q_BC(i,j,k,sphum_index)+wt*q00) + enddo + enddo + else + exit n_loopk + endif + p00=p00+BC_t1%north%delp_BC(i_x,j_x,k) + enddo n_loopk + endif +! +!----------- +!*** South +!----------- +! + if(south_bc)then + is=lbound(BC_t1%south%q_BC,1) + ie=ubound(BC_t1%south%q_BC,1) + js=lbound(BC_t1%south%q_BC,2) + je=ubound(BC_t1%south%q_BC,2) +! + i_x=isd !<-- Use column at + j_x=jed ! this location. +! + p00=Atm%ptop !<-- Use layer interface pressures. +! + s_loopk: do k=1,npz + if(p00<3000.)then !<-- Apply nudging only if pressure < 30 mb. + call get_q00 + do j=js,je + do i=is,ie + BC_t1%south%q_BC(i,j,k,sphum_index)= & !<-- Nudge the south boundary sphum at time t1. + xt*(BC_t1%south%q_BC(i,j,k,sphum_index)+wt*q00) + enddo + enddo + else + exit s_loopk + endif + p00=p00+BC_t1%south%delp_BC(i_x,j_x,k) + enddo s_loopk + endif +! +!---------- +!*** East +!---------- +! + if(east_bc)then + is=lbound(BC_t1%east%q_BC,1) + ie=ubound(BC_t1%east%q_BC,1) + js=lbound(BC_t1%east%q_BC,2) + je=ubound(BC_t1%east%q_BC,2) +! + i_x=isd !<-- Use column at + j_x=jsd+nhalo_model ! this location. +! + p00=Atm%ptop !<-- Use layer interface pressures. +! + e_loopk: do k=1,npz + if(p00<3000.)then !<-- Apply nudging only if pressure < 30 mb. + call get_q00 + do j=js,je + do i=is,ie + BC_t1%east%q_BC(i,j,k,sphum_index)= & !<-- Nudge the east boundary sphum at time t1. + xt*(BC_t1%east%q_BC(i,j,k,sphum_index)+wt*q00) + enddo + enddo + else + exit e_loopk + endif + p00=p00+BC_t1%east%delp_BC(i_x,j_x,k) + enddo e_loopk + endif +! +!---------- +!*** West +!---------- +! + if(west_bc)then + is=lbound(BC_t1%west%q_BC,1) + ie=ubound(BC_t1%west%q_BC,1) + js=lbound(BC_t1%west%q_BC,2) + je=ubound(BC_t1%west%q_BC,2) +! + i_x=ied !<-- Use column at + j_x=jsd+nhalo_model ! this location. +! + p00=Atm%ptop !<-- Use layer interface pressures. +! + w_loopk: do k=1,npz + if(p00<3000.)then !<-- Apply nudging only if pressure < 30 mb. + call get_q00 + do j=js,je + do i=is,ie + BC_t1%west%q_BC(i,j,k,sphum_index)= & !<-- Nudge the west boundary sphum at time t1. + xt*(BC_t1%west%q_BC(i,j,k,sphum_index)+wt*q00) + enddo + enddo + else + exit w_loopk + endif + p00=p00+BC_t1%west%delp_BC(i_x,j_x,k) + enddo w_loopk + endif +! +!----------------------------------------------------------------------- +! + contains +! +!----------------------------------------------------------------------- +! + subroutine get_q00 +! +!----------------------------------------------------------------------- +!*** This is an internal subroutine to subroutine nudge_qv_bc that +!*** computes the climatological contribution to the nudging ot the +!*** input specific humidity. +!----------------------------------------------------------------------- +!*********************************************************************** +!----------------------------------------------------------------------- +! + if ( p00 < 30.E2 ) then + if ( p00 < 1. ) then + q00 = q1_h2o + elseif ( p00 <= 7. .and. p00 >= 1. ) then + q00 = q1_h2o + (q7_h2o-q1_h2o)*log(pref(k)/1.)/log(7.) + elseif ( p00 < 100. .and. p00 >= 7. ) then + q00 = q7_h2o + (q100_h2o-q7_h2o)*log(pref(k)/7.)/log(100./7.) + elseif ( p00 < 1000. .and. p00 >= 100. ) then + q00 = q100_h2o + (q1000_h2o-q100_h2o)*log(pref(k)/1.E2)/log(10.) + elseif ( p00 < 2000. .and. p00 >= 1000. ) then + q00 = q1000_h2o + (q2000_h2o-q1000_h2o)*log(pref(k)/1.E3)/log(2.) + else + q00 = q2000_h2o + (q3000_h2o-q2000_h2o)*log(pref(k)/2.E3)/log(1.5) + endif + endif +! +!----------------------------------------------------------------------- +! + end subroutine get_q00 +! +!----------------------------------------------------------------------- +! + end subroutine nudge_qv_bc +! +!----------------------------------------------------------------------- +!----------------------------------------------------------------------- + subroutine dump_field_3d (domain, name, field, isd, ied, jsd, jed, nlev, stag) + + type(domain2d), intent(INOUT) :: domain + character(len=*), intent(IN) :: name + real, dimension(isd:ied,jsd:jed,1:nlev), intent(INOUT) :: field + integer, intent(IN) :: isd, ied, jsd, jed, nlev + integer, intent(IN) :: stag + + integer :: unit + character(len=128) :: fname + type(axistype) :: x, y, z + type(fieldtype) :: f + type(domain1D) :: xdom, ydom + integer :: nz + integer :: is, ie, js, je + integer :: isg, ieg, jsg, jeg, nxg, nyg, npx, npy + integer :: i, j, halo, iext, jext + logical :: is_root_pe + real, allocatable, dimension(:,:,:) :: glob_field + integer, allocatable, dimension(:) :: pelist + character(len=1) :: stagname + integer :: isection_s, isection_e, jsection_s, jsection_e + + write(fname,"(A,A,A,I1.1,A)") "regional_",name,".tile", 7 , ".nc" + write(0,*)'dump_field_3d: file name = |', trim(fname) , '|' + + call mpp_get_domain_components( domain, xdom, ydom ) + call mpp_get_compute_domain( domain, is, ie, js, je ) + call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg, xsize=npx, ysize=npy, position=CENTER ) + + halo = is - isd + if ( halo /= 3 ) then + write(0,*) 'dusan- halo should be 3 ', halo + endif + + iext = 0 + jext = 0 + stagname = "h"; + if (stag == U_STAGGER) then + jext = 1 + stagname = "u"; + endif + if (stag == V_STAGGER) then + iext = 1 + stagname = "v"; + endif + + nxg = npx + 2*halo + iext + nyg = npy + 2*halo + jext + nz = size(field,dim=3) + + allocate( glob_field(isg-halo:ieg+halo+iext, jsg-halo:jeg+halo+jext, 1:nz) ) + + isection_s = is + isection_e = ie + jsection_s = js + jsection_e = je + + if ( isd < 0 ) isection_s = isd + if ( ied > npx-1 ) isection_e = ied + if ( jsd < 0 ) jsection_s = jsd + if ( jed > npy-1 ) jsection_e = jed + + allocate( pelist(mpp_npes()) ) + call mpp_get_current_pelist(pelist) + + is_root_pe = (mpp_pe()==mpp_root_pe()) + + call mpp_gather(isection_s,isection_e,jsection_s,jsection_e, nz, & + pelist, field(isection_s:isection_e,jsection_s:jsection_e,:), glob_field, is_root_pe, halo, halo) + + call mpp_open( unit, trim(fname), action=MPP_WRONLY, form=MPP_NETCDF, threading=MPP_SINGLE) + + call mpp_write_meta( unit, x, 'grid_xt', 'km', 'X distance', 'X', domain=xdom, data=(/(i*1.0,i=1,nxg)/) ) + call mpp_write_meta( unit, y, 'grid_yt', 'km', 'Y distance', 'Y', domain=ydom, data=(/(j*1.0,j=1,nyg)/) ) + call mpp_write_meta( unit, z, 'lev', 'km', 'Z distance', data=(/(i*1.0,i=1,nz)/) ) + + call mpp_write_meta( unit, f, (/x,y,z/), name, 'unit', name) + call mpp_write_meta( unit, "stretch_factor", rval=stretch_factor ) + call mpp_write_meta( unit, "target_lon", rval=target_lon ) + call mpp_write_meta( unit, "target_lat", rval=target_lat ) + call mpp_write_meta( unit, "cube_res", ival= cube_res) + call mpp_write_meta( unit, "parent_tile", ival=parent_tile ) + call mpp_write_meta( unit, "refine_ratio", ival=refine_ratio ) + call mpp_write_meta( unit, "istart_nest", ival=istart_nest ) + call mpp_write_meta( unit, "jstart_nest", ival=jstart_nest ) + call mpp_write_meta( unit, "iend_nest", ival=iend_nest ) + call mpp_write_meta( unit, "jend_nest", ival=jend_nest ) + call mpp_write_meta( unit, "ihalo_shift", ival=halo ) + call mpp_write_meta( unit, "jhalo_shift", ival=halo ) + call mpp_write_meta( unit, mpp_get_id(f), "hstagger", cval=stagname ) + call mpp_write( unit, x ) + call mpp_write( unit, y ) + call mpp_write( unit, z ) + call mpp_write( unit, f, glob_field ) + + call mpp_close( unit ) + + end subroutine dump_field_3d + + subroutine dump_field_2d (domain, name, field, isd, ied, jsd, jed, stag) + + type(domain2d), intent(INOUT) :: domain + character(len=*), intent(IN) :: name + real, dimension(isd:ied,jsd:jed), intent(INOUT) :: field + integer, intent(IN) :: isd, ied, jsd, jed + integer, intent(IN) :: stag + + integer :: unit + character(len=128) :: fname + type(axistype) :: x, y + type(fieldtype) :: f + type(domain1D) :: xdom, ydom + integer :: is, ie, js, je + integer :: isg, ieg, jsg, jeg, nxg, nyg, npx, npy + integer :: i, j, halo, iext, jext + logical :: is_root_pe + real, allocatable, dimension(:,:) :: glob_field + integer, allocatable, dimension(:) :: pelist + character(len=1) :: stagname + integer :: isection_s, isection_e, jsection_s, jsection_e + + write(fname,"(A,A,A,I1.1,A)") "regional_",name,".tile", 7 , ".nc" +! write(0,*)'dump_field_3d: file name = |', trim(fname) , '|' + + call mpp_get_domain_components( domain, xdom, ydom ) + call mpp_get_compute_domain( domain, is, ie, js, je ) + call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg, xsize=npx, ysize=npy, position=CENTER ) + + halo = is - isd + if ( halo /= 3 ) then + write(0,*) 'dusan- halo should be 3 ', halo + endif + + iext = 0 + jext = 0 + stagname = "h"; + if (stag == U_STAGGER) then + jext = 1 + stagname = "u"; + endif + if (stag == V_STAGGER) then + iext = 1 + stagname = "v"; + endif + + nxg = npx + 2*halo + iext + nyg = npy + 2*halo + jext + + allocate( glob_field(isg-halo:ieg+halo+iext, jsg-halo:jeg+halo+jext) ) + + isection_s = is + isection_e = ie + jsection_s = js + jsection_e = je + + if ( isd < 0 ) isection_s = isd + if ( ied > npx-1 ) isection_e = ied + if ( jsd < 0 ) jsection_s = jsd + if ( jed > npy-1 ) jsection_e = jed + + allocate( pelist(mpp_npes()) ) + call mpp_get_current_pelist(pelist) + + is_root_pe = (mpp_pe()==mpp_root_pe()) + + call mpp_gather(isection_s,isection_e,jsection_s,jsection_e, & + pelist, field(isection_s:isection_e,jsection_s:jsection_e), glob_field, is_root_pe, halo, halo) + + call mpp_open( unit, trim(fname), action=MPP_WRONLY, form=MPP_NETCDF, threading=MPP_SINGLE) + + call mpp_write_meta( unit, x, 'grid_xt', 'km', 'X distance', 'X', domain=xdom, data=(/(i*1.0,i=1,nxg)/) ) + call mpp_write_meta( unit, y, 'grid_yt', 'km', 'Y distance', 'Y', domain=ydom, data=(/(j*1.0,j=1,nyg)/) ) + + call mpp_write_meta( unit, f, (/x,y/), name, 'unit', name) + call mpp_write_meta( unit, "stretch_factor", rval=stretch_factor ) + call mpp_write_meta( unit, "target_lon", rval=target_lon ) + call mpp_write_meta( unit, "target_lat", rval=target_lat ) + call mpp_write_meta( unit, "cube_res", ival= cube_res) + call mpp_write_meta( unit, "parent_tile", ival=parent_tile ) + call mpp_write_meta( unit, "refine_ratio", ival=refine_ratio ) + call mpp_write_meta( unit, "istart_nest", ival=istart_nest ) + call mpp_write_meta( unit, "jstart_nest", ival=jstart_nest ) + call mpp_write_meta( unit, "iend_nest", ival=iend_nest ) + call mpp_write_meta( unit, "jend_nest", ival=jend_nest ) + call mpp_write_meta( unit, "ihalo_shift", ival=halo ) + call mpp_write_meta( unit, "jhalo_shift", ival=halo ) + call mpp_write_meta( unit, mpp_get_id(f), "hstagger", cval=stagname ) + call mpp_write( unit, x ) + call mpp_write( unit, y ) + call mpp_write( unit, f, glob_field ) + + call mpp_close( unit ) + + end subroutine dump_field_2d + +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + + subroutine exch_uv(domain, bd, npz, u, v) + use mpi + + implicit none + + type(domain2d), intent(inout) :: domain + type(fv_grid_bounds_type), intent(in) :: bd + integer, intent(in) :: npz + real, intent(inout) :: u (bd%isd:bd%ied ,bd%jsd:bd%jed+1,1:npz) + real, intent(inout) :: v (bd%isd:bd%ied+1,bd%jsd:bd%jed ,1:npz) + + integer,parameter :: ibufexch=2500000 + real,dimension(ibufexch) :: buf1,buf2,buf3,buf4 + integer :: ihandle1,ihandle2,ihandle3,ihandle4 + integer,dimension(MPI_STATUS_SIZE) :: istat + integer :: ic, i, j, k, is, ie, js, je + integer :: irecv, isend, ierr + + integer :: mype + integer :: north_pe, south_pe, east_pe, west_pe + + + mype = mpp_pe() + call mpp_get_neighbor_pe( domain, NORTH, north_pe) + call mpp_get_neighbor_pe( domain, SOUTH, south_pe) + call mpp_get_neighbor_pe( domain, WEST, west_pe) + call mpp_get_neighbor_pe( domain, EAST, east_pe) + + ! write(0,*) ' north_pe = ', north_pe + ! write(0,*) ' south_pe = ', south_pe + ! write(0,*) ' west_pe = ', west_pe + ! write(0,*) ' east_pe = ', east_pe + + is=bd%is + ie=bd%ie + js=bd%js + je=bd%je + +! FIXME: MPI_COMM_WORLD + + +! Receive from north + if( north_pe /= NULL_PE )then + call MPI_Irecv(buf1,ibufexch,MPI_REAL,north_pe,north_pe & + ,MPI_COMM_WORLD,ihandle1,irecv) + endif + +! Receive from south + if( south_pe /= NULL_PE )then + call MPI_Irecv(buf2,ibufexch,MPI_REAL,south_pe,south_pe & + ,MPI_COMM_WORLD,ihandle2,irecv) + endif + +! Send to north + if( north_pe /= NULL_PE )then + ic=0 + do k=1,npz + + do j=je-3+1,je-1+1 + do i=is-3,is-1 + ic=ic+1 + buf3(ic)=u(i,j,k) + enddo + do i=ie+1,ie+3 + ic=ic+1 + buf3(ic)=u(i,j,k) + enddo + enddo + + do j=je-2,je + do i=is-3,is-1 + ic=ic+1 + buf3(ic)=v(i,j,k) + enddo + do i=ie+1,ie+3 + ic=ic+1 + buf3(ic)=v(i,j,k) + enddo + enddo + + enddo + call MPI_Issend(buf3,ic,MPI_REAL,north_pe,mype & + ,MPI_COMM_WORLD,ihandle3,isend) + endif + +! Send to south + if( south_pe /= NULL_PE )then + ic=0 + do k=1,npz + + do j=js+2,js+3 + do i=is-3,is-1 + ic=ic+1 + buf4(ic)=u(i,j,k) + enddo + do i=ie+1,ie+3 + ic=ic+1 + buf4(ic)=u(i,j,k) + enddo + enddo + + do j=js+1,js+2 + do i=is-3,is-1 + ic=ic+1 + buf4(ic)=v(i,j,k) + enddo + do i=ie+1,ie+3 + ic=ic+1 + buf4(ic)=v(i,j,k) + enddo + enddo + + enddo + call MPI_Issend(buf4,ic,MPI_REAL,south_pe,mype & + ,MPI_COMM_WORLD,ihandle4,isend) + endif + +! Store from south + if( south_pe /= NULL_PE )then + ic=0 + call MPI_Wait(ihandle2,istat,ierr) + do k=1,npz + + do j=js-3,js-1 + do i=is-3,is-1 + ic=ic+1 + u(i,j,k)=buf2(ic) + enddo + do i=ie+1,ie+3 + ic=ic+1 + u(i,j,k)=buf2(ic) + enddo + enddo + + do j=js-3,js-1 + do i=is-3,is-1 + ic=ic+1 + v(i,j,k)=buf2(ic) + enddo + do i=ie+1,ie+3 + ic=ic+1 + v(i,j,k)=buf2(ic) + enddo + enddo + + enddo + endif + +! Store from north + if( north_pe /= NULL_PE )then + ic=0 + call MPI_Wait(ihandle1,istat,ierr) + do k=1,npz + + do j=je+2+1,je+3+1 + do i=is-3,is-1 + ic=ic+1 + u(i,j,k)=buf1(ic) + enddo + do i=ie+1,ie+3 + ic=ic+1 + u(i,j,k)=buf1(ic) + enddo + enddo + + do j=je+2,je+3 + do i=is-3,is-1 + ic=ic+1 + v(i,j,k)=buf1(ic) + enddo + do i=ie+1,ie+3 + ic=ic+1 + v(i,j,k)=buf1(ic) + enddo + enddo + + enddo + endif + + end subroutine exch_uv + +!--------------------------------------------------------------------- +!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +!--------------------------------------------------------------------- + +end module fv_regional_mod + +!--------------------------------------------------------------------- diff --git a/model/fv_sg.F90 b/model/fv_sg.F90 index 7f611ab57..d283d746d 100644 --- a/model/fv_sg.F90 +++ b/model/fv_sg.F90 @@ -26,7 +26,7 @@ module fv_sg_mod use constants_mod, only: rdgas, rvgas, cp_air, cp_vapor, hlv, hlf, kappa, grav use tracer_manager_mod, only: get_tracer_index use field_manager_mod, only: MODEL_ATMOS - use lin_cld_microphys_mod, only: wqs2, wqsat2_moist + use gfdl_cloud_microphys_mod, only: wqs1, wqs2, wqsat2_moist use fv_mp_mod, only: mp_reduce_min, is_master implicit none @@ -59,16 +59,12 @@ module fv_sg_mod real, parameter:: t2_max = 315. real, parameter:: t3_max = 325. real, parameter:: Lv0 = hlv0 - dc_vap*t_ice ! = 3.147782e6 - real, parameter:: Li0 = hlf0 - dc_ice*t_ice ! = -2.431928e5 + real, parameter:: Li0 = hlf0 - dc_ice*t_ice ! = -2.431928e5 real, parameter:: zvir = rvgas/rdgas - 1. ! = 0.607789855 real, allocatable:: table(:),des(:) real:: lv00, d0_vap -!---- version number ----- - character(len=128) :: version = '$Id: fv_sg.F90,v 17.0.2.4.2.3.2.6.2.10.4.1 2014/11/12 03:46:32 Lucas.Harris Exp $' - character(len=128) :: tagname = '$Name: $' - contains @@ -82,25 +78,25 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & integer, intent(in):: isd, ied, jsd, jed integer, intent(in):: tau ! Relaxation time scale real, intent(in):: dt ! model time step - real, intent(in):: pe(is-1:ie+1,km+1,js-1:je+1) + real, intent(in):: pe(is-1:ie+1,km+1,js-1:je+1) real, intent(in):: peln(is :ie, km+1,js :je) real, intent(in):: delp(isd:ied,jsd:jed,km) ! Delta p at each model level - real, intent(in):: delz(isd:,jsd:,1:) ! Delta z at each model level + real, intent(in):: delz(is:,js:,1:) ! Delta z at each model level real, intent(in):: pkz(is:ie,js:je,km) logical, intent(in):: hydrostatic integer, intent(in), optional:: k_bot -! +! real, intent(inout):: ua(isd:ied,jsd:jed,km) real, intent(inout):: va(isd:ied,jsd:jed,km) real, intent(inout):: w(isd:,jsd:,1:) real, intent(inout):: ta(isd:ied,jsd:jed,km) ! Temperature real, intent(inout):: qa(isd:ied,jsd:jed,km,nq) ! Specific humidity & tracers - real, intent(inout):: u_dt(isd:ied,jsd:jed,km) - real, intent(inout):: v_dt(isd:ied,jsd:jed,km) - real, intent(inout):: t_dt(is:ie,js:je,km) + real, intent(inout):: u_dt(isd:ied,jsd:jed,km) + real, intent(inout):: v_dt(isd:ied,jsd:jed,km) + real, intent(inout):: t_dt(is:ie,js:je,km) !---------------------------Local variables----------------------------- real, dimension(is:ie,km):: u0, v0, w0, t0, hd, te, gz, tvm, pm, den - real q0(is:ie,km,nq), qcon(is:ie,km) + real q0(is:ie,km,nq), qcon(is:ie,km) real, dimension(is:ie):: gzh, lcp2, icp2, cvm, cpm, qs real ri_ref, ri, pt1, pt2, ratio, tv, cv, tmp, q_liq, q_sol real tv1, tv2, g2, h0, mc, fra, rk, rz, rdt, tvd, tv_surf @@ -166,7 +162,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & !$OMP private(kk,lcp2,icp2,tcp3,dh,dq,den,qs,qsw,dqsdt,qcon,q0, & !$OMP t0,u0,v0,w0,h0,pm,gzh,tvm,tmp,cpm,cvm,q_liq,q_sol, & !$OMP tv,gz,hd,te,ratio,pt1,pt2,tv1,tv2,ri_ref, ri,mc,km1) - do 1000 j=js,je + do 1000 j=js,je do iq=1, nq do k=1,kbot @@ -219,7 +215,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & enddo elseif ( nwat==3 ) then do i=is,ie - q_liq = q0(i,k,liq_wat) + q_liq = q0(i,k,liq_wat) q_sol = q0(i,k,ice_wat) cpm(i) = (1.-(q0(i,k,sphum)+q_liq+q_sol))*cp_air + q0(i,k,sphum)*cp_vapor + q_liq*c_liq + q_sol*c_ice cvm(i) = (1.-(q0(i,k,sphum)+q_liq+q_sol))*cv_air + q0(i,k,sphum)*cv_vap + q_liq*c_liq + q_sol*c_ice @@ -314,7 +310,7 @@ subroutine fv_subgrid_z( isd, ied, jsd, jed, is, ie, js, je, km, nq, dt, & ! top layer unphysically warm ri = 0. elseif ( tv2 0. ) then dq = min(-qv(i,j,k)*dp(i,j,k), qv(i,j,k-1)*dp(i,j,k-1)) - qv(i,j,k-1) = qv(i,j,k-1) - dq/dp(i,j,k-1) - qv(i,j,k ) = qv(i,j,k ) + dq/dp(i,j,k ) + qv(i,j,k-1) = qv(i,j,k-1) - dq/dp(i,j,k-1) + qv(i,j,k ) = qv(i,j,k ) + dq/dp(i,j,k ) endif if( qv(i,j,k) < 0. ) then qv(i,j,k+1) = qv(i,j,k+1) + qv(i,j,k)*dp(i,j,k)/dp(i,j,k+1) @@ -1417,7 +1413,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & enddo enddo enddo - + ! Bottom layer; Borrow from above !$OMP parallel do default(none) shared(is,ie,js,je,kbot,qv,dp) private(dq) do j=js, je @@ -1427,8 +1423,8 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & if ( qv(i,j,kbot)>=0. ) goto 123 if ( qv(i,j,k) > 0. ) then dq = min(-qv(i,j,kbot)*dp(i,j,kbot), qv(i,j,k)*dp(i,j,k)) - qv(i,j,k ) = qv(i,j,k ) - dq/dp(i,j,k) - qv(i,j,kbot) = qv(i,j,kbot) + dq/dp(i,j,kbot) + qv(i,j,k ) = qv(i,j,k ) - dq/dp(i,j,k) + qv(i,j,kbot) = qv(i,j,kbot) + dq/dp(i,j,kbot) endif enddo ! k-loop 123 continue @@ -1436,7 +1432,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & enddo ! i-loop enddo ! j-loop - + if (present(qa)) then !----------------------------------- ! Fix negative cloud fraction @@ -1453,7 +1449,7 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & enddo enddo enddo - + ! Bottom layer; Borrow from above !$OMP parallel do default(none) shared(is,ie,js,je,qa,kbot,dp) & !$OMP private(dq) @@ -1461,8 +1457,8 @@ subroutine neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, & do i=is, ie if( qa(i,j,kbot) < 0. .and. qa(i,j,kbot-1)>0.) then dq = min(-qa(i,j,kbot)*dp(i,j,kbot), qa(i,j,kbot-1)*dp(i,j,kbot-1)) - qa(i,j,kbot-1) = qa(i,j,kbot-1) - dq/dp(i,j,kbot-1) - qa(i,j,kbot ) = qa(i,j,kbot ) + dq/dp(i,j,kbot ) + qa(i,j,kbot-1) = qa(i,j,kbot-1) - dq/dp(i,j,kbot-1) + qa(i,j,kbot ) = qa(i,j,kbot ) + dq/dp(i,j,kbot ) endif ! if qa is still < 0 qa(i,j,kbot) = max(0., qa(i,j,kbot)) diff --git a/model/fv_tracer2d.F90 b/model/fv_tracer2d.F90 index e99fc049b..c86eb6c0a 100644 --- a/model/fv_tracer2d.F90 +++ b/model/fv_tracer2d.F90 @@ -21,12 +21,14 @@ module fv_tracer2d_mod use tp_core_mod, only: fv_tp_2d, copy_corners use fv_mp_mod, only: mp_reduce_max - use fv_mp_mod, only: ng, mp_gather, is_master + use fv_mp_mod, only: mp_gather, is_master use fv_mp_mod, only: group_halo_update_type use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update use mpp_domains_mod, only: mpp_update_domains, CGRID_NE, domain2d use fv_timing_mod, only: timing_on, timing_off use boundary_mod, only: nested_grid_BC_apply_intT + use fv_regional_mod, only: regional_boundary_update + use fv_regional_mod, only: current_time_in_seconds use fv_arrays_mod, only: fv_grid_type, fv_nest_type, fv_atmos_type, fv_grid_bounds_type use mpp_mod, only: mpp_error, FATAL, mpp_broadcast, mpp_send, mpp_recv, mpp_sum, mpp_max @@ -37,10 +39,6 @@ module fv_tracer2d_mod real, allocatable, dimension(:,:,:) :: nest_fx_west_accum, nest_fx_east_accum, nest_fx_south_accum, nest_fx_north_accum -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains !----------------------------------------------------------------------- @@ -105,10 +103,10 @@ subroutine tracer_2d_1L(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, n rarea => gridstruct%rarea sin_sg => gridstruct%sin_sg - dxa => gridstruct%dxa - dya => gridstruct%dya - dx => gridstruct%dx - dy => gridstruct%dy + dxa => gridstruct%dxa + dya => gridstruct%dya + dx => gridstruct%dx + dy => gridstruct%dy !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & !$OMP sin_sg,cy,yfx,dya,dx,cmax) @@ -329,12 +327,12 @@ subroutine tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, rarea => gridstruct%rarea sin_sg => gridstruct%sin_sg - dxa => gridstruct%dxa - dya => gridstruct%dya - dx => gridstruct%dx - dy => gridstruct%dy + dxa => gridstruct%dxa + dya => gridstruct%dya + dx => gridstruct%dx + dy => gridstruct%dy -!$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & +!$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & !$OMP sin_sg,cy,yfx,dya,dx,cmax,q_split,ksplt) do k=1,npz do j=jsd,jed @@ -513,7 +511,7 @@ end subroutine tracer_2d subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, & nq, hord, q_split, dt, id_divg, q_pack, nord_tr, trdm, & - k_split, neststruct, parent_grid) + k_split, neststruct, parent_grid, n_map) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: npx @@ -521,7 +519,7 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np integer, intent(IN) :: npz integer, intent(IN) :: nq ! number of tracers to be advected integer, intent(IN) :: hord, nord_tr - integer, intent(IN) :: q_split, k_split + integer, intent(IN) :: q_split, k_split, n_map integer, intent(IN) :: id_divg real , intent(IN) :: dt, trdm type(group_halo_update_type), intent(inout) :: q_pack @@ -533,7 +531,7 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz) ! Courant Number Y-Dir type(fv_grid_type), intent(IN), target :: gridstruct type(fv_nest_type), intent(INOUT) :: neststruct - type(fv_atmos_type), intent(INOUT) :: parent_grid + type(fv_atmos_type), pointer, intent(IN) :: parent_grid type(domain2d), intent(INOUT) :: domain ! Local Arrays @@ -548,6 +546,7 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np real :: cmax_t real :: c_global real :: frac, rdt + real :: reg_bc_update_time integer :: nsplt, nsplt_parent, msg_split_steps = 1 integer :: i,j,k,it,iq @@ -571,10 +570,10 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np rarea => gridstruct%rarea sin_sg => gridstruct%sin_sg - dxa => gridstruct%dxa - dya => gridstruct%dya - dx => gridstruct%dx - dy => gridstruct%dy + dxa => gridstruct%dxa + dya => gridstruct%dya + dx => gridstruct%dx + dy => gridstruct%dy !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & !$OMP sin_sg,cy,yfx,dya,dx) @@ -684,7 +683,7 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np call complete_group_halo_update(q_pack, domain) call timing_off('COMM_TRACER') call timing_off('COMM_TOTAL') - + if (gridstruct%nested) then do iq=1,nq call nested_grid_BC_apply_intT(q(isd:ied,jsd:jed,:,iq), & @@ -694,6 +693,19 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np enddo endif + if (gridstruct%regional) then + !This is more accurate than the nested BC calculation + ! since it takes into account varying nsplit + reg_bc_update_time=current_time_in_seconds+(real(n_map-1) + real(it-1)/frac)*dt + do iq=1,nq + call regional_boundary_update(q(:,:,:,iq), 'q', & + isd, ied, jsd, jed, npz, & + is, ie, js, je, & + isd, ied, jsd, jed, & + reg_bc_update_time, & + iq ) + enddo + endif !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq, & !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm) & @@ -744,19 +756,6 @@ subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, np call timing_off('COMM_TRACER') call timing_off('COMM_TOTAL') endif - !Apply nested-grid BCs - if ( gridstruct%nested ) then - do iq=1,nq - - - call nested_grid_BC_apply_intT(q(isd:ied,jsd:jed,:,iq), & - 0, 0, npx, npy, npz, bd, & - real(neststruct%tracer_nest_timestep), real(nsplt*k_split), & - neststruct%q_BC(iq), bctype=neststruct%nestbctype ) - - end do - end if - enddo ! nsplt diff --git a/model/fv_update_phys.F90 b/model/fv_update_phys.F90 index 614abbf5a..c72520cfe 100644 --- a/model/fv_update_phys.F90 +++ b/model/fv_update_phys.F90 @@ -20,22 +20,22 @@ !*********************************************************************** module fv_update_phys_mod - use constants_mod, only: kappa, rdgas, rvgas, grav, cp_air, cp_vapor, pi=>pi_8, radius + use constants_mod, only: kappa, rdgas, rvgas, grav, cp_air, cp_vapor, pi=>pi_8, radius, TFREEZE use field_manager_mod, only: MODEL_ATMOS use mpp_domains_mod, only: mpp_update_domains, domain2d use mpp_parameter_mod, only: AGRID_PARAM=>AGRID use mpp_mod, only: FATAL, mpp_error - use mpp_mod, only: mpp_error, NOTE, WARNING + use mpp_mod, only: mpp_error, NOTE, WARNING, mpp_pe use time_manager_mod, only: time_type use tracer_manager_mod, only: get_tracer_index, adjust_mass, get_tracer_names use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update use fv_mp_mod, only: group_halo_update_type - use fv_arrays_mod, only: fv_flags_type, fv_nest_type, R_GRID + use fv_arrays_mod, only: fv_flags_type, fv_nest_type, R_GRID, phys_diag_type use boundary_mod, only: nested_grid_BC use boundary_mod, only: extrapolation_BC use fv_eta_mod, only: get_eta_level use fv_timing_mod, only: timing_on, timing_off - use fv_diagnostics_mod, only: prt_maxmin + use fv_diagnostics_mod, only: prt_maxmin, range_check use fv_mapz_mod, only: moist_cv, moist_cp #if defined (ATMOS_NUDGE) use atmos_nudge_mod, only: get_atmos_nudge, do_ps @@ -47,18 +47,13 @@ module fv_update_phys_mod use fv_nwp_nudge_mod, only: fv_nwp_nudge #endif use fv_arrays_mod, only: fv_grid_type, fv_nest_type, fv_grid_bounds_type - use fv_grid_utils_mod, only: cubed_to_latlon + use fv_grid_utils_mod, only: cubed_to_latlon, update_dwinds_phys, update2d_dwinds_phys + use fv_nesting_mod, only: set_physics_BCs + use sat_vapor_pres_mod, only: tcmin, tcmax implicit none public :: fv_update_phys, del2_phys -#ifdef ROT3 - public :: update_dwinds_phys -#endif - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' real,parameter:: con_cp = cp_air contains @@ -68,11 +63,11 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ak, bk, phis, u_srf, v_srf, ts, delz, hydrostatic, & u_dt, v_dt, t_dt, moist_phys, Time, nudge, & gridstruct, lona, lata, npx, npy, npz, flagstruct, & - neststruct, bd, domain, ptop, q_dt) + neststruct, bd, domain, ptop, phys_diag, q_dt) real, intent(in) :: dt, ptop integer, intent(in):: is, ie, js, je, ng integer, intent(in):: isd, ied, jsd, jed - integer, intent(in):: nq ! tracers modified by physics + integer, intent(in):: nq ! tracers modified by physics ! ncnst is the total nmber of tracers logical, intent(in):: moist_phys logical, intent(in):: hydrostatic @@ -82,7 +77,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, real, intent(in), dimension(npz+1):: ak, bk real, intent(in) :: phis(isd:ied,jsd:jed) - real, intent(inout):: delz(isd:,jsd:,1:) + real, intent(inout):: delz(is:,js:,1:) ! optional arguments for atmospheric nudging real, intent(in), dimension(isd:ied,jsd:jed), optional :: & @@ -96,6 +91,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt real, intent(inout):: t_dt(is:ie,js:je,npz) real, intent(inout), optional :: q_dt(is:ie,js:je,npz,nq) + type(phys_diag_type), intent(inout) :: phys_diag ! Saved Bottom winds for GFDL Physics Interface real, intent(out), dimension(is:ie,js:je):: u_srf, v_srf, ts @@ -111,7 +107,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, real, intent(inout):: qdiag(isd:ied,jsd:jed,npz,nq+1:flagstruct%ncnst) ! diagnostic tracers !----------------------------------------------------------------------- -! Auxilliary pressure arrays: +! Auxilliary pressure arrays: ! The 5 vars below can be re-computed from delp and ptop. !----------------------------------------------------------------------- ! dyn_aux: @@ -125,6 +121,8 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, type(fv_grid_type) :: gridstruct type(fv_nest_type) :: neststruct + real :: q_dt_nudge(is:ie,js:je,npz,nq) + integer, intent(IN) :: npx, npy, npz !*********** @@ -145,14 +143,13 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, type(group_halo_update_type), save :: i_pack(2) integer i, j, k, m, n, nwat integer sphum, liq_wat, ice_wat, cld_amt ! GFDL AM physics - integer rainwat, snowwat, graupel ! Lin Micro-physics + integer rainwat, snowwat, graupel ! GFDL Cloud Microphysics integer w_diff ! w-tracer for PBL diffusion - real:: qstar, dbk, rdg, zvir, p_fac, cv_air, gama_dt + real:: qstar, dbk, rdg, zvir, p_fac, cv_air, gama_dt, tbad + logical :: bad_range - real, dimension(1,1,1) :: parent_u_dt, parent_v_dt ! dummy variables for nesting - -!f1p -!account for change in air molecular weight because of H2O change +!f1p +!account for change in air molecular weight because of H2O change logical, dimension(nq) :: conv_vmr_mmr real :: adj_vmr(is:ie,js:je,npz) character(len=32) :: tracer_units, tracer_name @@ -182,7 +179,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, end if end do end if - + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') @@ -218,13 +215,36 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, call get_eta_level(npz, 1.0E5, pfull, phalf, ak, bk) + if (size(neststruct%child_grids) > 1) then + call set_physics_BCs(ps, u_dt, v_dt, flagstruct, gridstruct, neststruct, npx, npy, npz, ng, ak, bk, bd) + endif + + if (allocated(phys_diag%phys_t_dt)) phys_diag%phys_t_dt = pt(is:ie,js:je,:) + if (present(q_dt)) then + if (allocated(phys_diag%phys_qv_dt)) phys_diag%phys_qv_dt = q(is:ie,js:je,:,sphum) + if (allocated(phys_diag%phys_ql_dt)) then + if (liq_wat < 0) call mpp_error(FATAL, " phys_ql_dt needs at least one liquid water tracer defined") + phys_diag%phys_ql_dt = q(is:ie,js:je,:,liq_wat) + if (rainwat > 0) phys_diag%phys_ql_dt = q(is:ie,js:je,:,rainwat) + phys_diag%phys_ql_dt + endif + if (allocated(phys_diag%phys_qi_dt)) then + if (ice_wat < 0) then + call mpp_error(WARNING, " phys_qi_dt needs at least one ice water tracer defined") + phys_diag%phys_qi_dt = 0. + endif + phys_diag%phys_qi_dt = q(is:ie,js:je,:,ice_wat) + if (snowwat > 0) phys_diag%phys_qi_dt = q(is:ie,js:je,:,snowwat) + phys_diag%phys_qi_dt + if (graupel > 0) phys_diag%phys_qi_dt = q(is:ie,js:je,:,graupel) + phys_diag%phys_qi_dt + endif + endif + !$OMP parallel do default(none) & !$OMP shared(is,ie,js,je,npz,flagstruct,pfull,q_dt,sphum,q,qdiag, & !$OMP nq,w_diff,dt,nwat,liq_wat,rainwat,ice_wat,snowwat, & !$OMP graupel,delp,cld_amt,hydrostatic,pt,t_dt,delz,adj_vmr,& -!$OMP gama_dt,cv_air,ua,u_dt,va,v_dt,isd,ied,jsd,jed, & -!$OMP conv_vmr_mmr) & -!$OMP private(cvm, qc, qstar, ps_dt, p_fac) +!$OMP gama_dt,cv_air,ua,u_dt,va,v_dt,isd,ied,jsd,jed, & +!$OMP conv_vmr_mmr,pe,ptop,gridstruct,phys_diag) & +!$OMP private(cvm, qc, qstar, ps_dt, p_fac, tbad) do k=1, npz if (present(q_dt)) then @@ -272,7 +292,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ! Update tracers: !---------------- do m=1,nq - if( m /= w_diff ) then + if( m /= w_diff ) then do j=js,je do i=is,ie q(i,j,k,m) = q(i,j,k,m) + dt*q_dt(i,j,k,m) @@ -297,12 +317,12 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, enddo !----------------------------------------- -! Adjust mass mixing ratio of all tracers +! Adjust mass mixing ratio of all tracers !----------------------------------------- if ( nwat /=0 ) then do m=1,flagstruct%ncnst !-- check to query field_table to determine if tracer needs mass adjustment - if( m /= cld_amt .and. m /= w_diff .and. adjust_mass(MODEL_ATMOS,m)) then + if( m /= cld_amt .and. m /= w_diff .and. adjust_mass(MODEL_ATMOS,m)) then if (m <= nq) then q(is:ie,js:je,k,m) = q(is:ie,js:je,k,m) / ps_dt(is:ie,js:je) if (conv_vmr_mmr(m)) & @@ -339,7 +359,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, enddo enddo else - !NOTE: only works for either no physics or Lin MP + !NOTE: only works for either no physics or GFDL MP if (nwat == 0) then do j=js,je do i=is,ie @@ -370,6 +390,43 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, enddo ! k-loop + if (allocated(phys_diag%phys_t_dt)) phys_diag%phys_t_dt = (pt(is:ie,js:je,:) - phys_diag%phys_t_dt) / dt + if (present(q_dt)) then + if (allocated(phys_diag%phys_qv_dt)) phys_diag%phys_qv_dt = (q(is:ie,js:je,:,sphum) - phys_diag%phys_qv_dt) / dt + if (allocated(phys_diag%phys_ql_dt)) then + if (liq_wat < 0) call mpp_error(FATAL, " phys_ql_dt needs at least one liquid water tracer defined") + phys_diag%phys_ql_dt = q(is:ie,js:je,:,liq_wat) - phys_diag%phys_qv_dt + if (rainwat > 0) phys_diag%phys_ql_dt = q(is:ie,js:je,:,rainwat) + phys_diag%phys_ql_dt + phys_diag%phys_ql_dt = phys_diag%phys_ql_dt / dt + endif + if (allocated(phys_diag%phys_qi_dt)) then + if (ice_wat < 0) then + call mpp_error(WARNING, " phys_qi_dt needs at least one ice water tracer defined") + phys_diag%phys_qi_dt = 0. + endif + phys_diag%phys_qi_dt = q(is:ie,js:je,:,ice_wat) - phys_diag%phys_qi_dt + if (snowwat > 0) phys_diag%phys_qi_dt = q(is:ie,js:je,:,snowwat) + phys_diag%phys_qi_dt + if (graupel > 0) phys_diag%phys_qi_dt = q(is:ie,js:je,:,graupel) + phys_diag%phys_qi_dt + phys_diag%phys_qi_dt = phys_diag%phys_qi_dt / dt + endif + endif + + if ( flagstruct%range_warn ) then + call range_check('PT UPDATE', pt, is, ie, js, je, ng, npz, gridstruct%agrid, & + tcmin+TFREEZE, tcmax+TFREEZE, bad_range, Time) + if (bad_range) then + do k=1,npz + do j=js,je + do i=is,ie + if (pt(i,j,k) < tcmin+TFREEZE .or. pt(i,j,k) > tcmax+TFREEZE) then + write(*,*) 'PT UPDATE: ', t_dt(i,j,k)*dt, i,j,k, gridstruct%agrid(i,j,:) + endif + enddo + enddo + enddo + endif + endif + ! [delp, (ua, va), pt, q] updated. Perform nudging if requested !------- nudging of atmospheric variables toward specified data -------- @@ -383,9 +440,9 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, call get_atmos_nudge ( Time, dt, is, ie, js, je, & npz, ng, ps(is:ie,js:je), ua(is:ie, js:je,:), & va(is:ie,js:je,:), pt(is:ie,js:je,:), & - q(is:ie,js:je,:,:), ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), & + q(is:ie,js:je,:,:), ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), & v_dt(is:ie,js:je,:), t_dt(is:ie,js:je,:), & - q_dt(is:ie,js:je,:,:) ) + q_dt_nudge(is:ie,js:je,:,:) ) !-------------- ! Update delp @@ -412,7 +469,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, pt(is:ie,js:je,:), q(is:ie,js:je,:,sphum:sphum), & ps_dt(is:ie,js:je), u_dt(is:ie,js:je,:), & v_dt(is:ie,js:je,:), t_dt(is:ie,js:je,:), & - q_dt(is:ie,js:je,:,sphum:sphum) ) + q_dt_nudge(is:ie,js:je,:,sphum:sphum) ) !-------------- ! Update delp @@ -441,14 +498,14 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ps(i,j) = pe(i,npz+1,j) enddo enddo - call fv_ada_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, & + call fv_ada_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt_nudge, & zvir, ptop, ak, bk, ts, ps, delp, ua, va, pt, & nwat, q, phis, gridstruct, bd, domain ) #else ! All fields will be updated except winds; wind tendencies added !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ps) do j=js,je - do k=2,npz+1 + do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) enddo @@ -457,22 +514,23 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, ps(i,j) = pe(i,npz+1,j) enddo enddo - call fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, & + call fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt_nudge, & zvir, ptop, ak, bk, ts, ps, delp, ua, va, pt, & nwat, q, phis, gridstruct, bd, domain ) #endif - endif ! end nudging + + endif ! end nudging if ( .not.flagstruct%dwind_2d ) then call timing_on('COMM_TOTAL') - if ( gridstruct%square_domain ) then + if ( gridstruct%square_domain ) then call start_group_halo_update(i_pack(1), u_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.false.) call start_group_halo_update(i_pack(1), v_dt, domain, whalo=1, ehalo=1, shalo=1, nhalo=1, complete=.true.) else call start_group_halo_update(i_pack(1), u_dt, domain, complete=.false.) call start_group_halo_update(i_pack(1), v_dt, domain, complete=.true.) - endif + endif call timing_off('COMM_TOTAL') endif @@ -487,7 +545,7 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,peln,pk,ps,u_srf,v_srf, & !$OMP ua,va,pkz,hydrostatic) do j=js,je - do k=2,npz+1 + do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) peln(i,k,j) = log( pe(i,k,j) ) @@ -522,34 +580,98 @@ subroutine fv_update_phys ( dt, is, ie, js, je, isd, ied, jsd, jed, ng, nq, call complete_group_halo_update(i_pack(1), domain) - if (size(neststruct%child_grids) > 1) then - if (gridstruct%nested) then - call nested_grid_BC(u_dt, parent_u_dt, neststruct%nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, & - npx, npy, npz, bd, 1, npx-1, 1, npy-1) - call nested_grid_BC(v_dt, parent_v_dt, neststruct%nest_domain, neststruct%ind_h, neststruct%wt_h, 0, 0, & - npx, npy, npz, bd, 1, npx-1, 1, npy-1) - endif - do n=1,size(neststruct%child_grids) - if (neststruct%child_grids(n)) then - call nested_grid_BC(u_dt, neststruct%nest_domain_all(n), 0, 0) - call nested_grid_BC(v_dt, neststruct%nest_domain_all(n), 0, 0) - endif - enddo - endif - call timing_off('COMM_TOTAL') +! +! for regional grid need to set values for u_dt and v_dt at the edges. +! Note from Lucas:The physics only operates on the compute domain. +! One snag is that in fv_update_phys.F90 u_dt and v_dt from the physics need to be interpolated to the D-grids, +! which requires BCs for u_dt and v_dt. For the nested grid I can simply get the BCs from the coarse grid, but +! in your case I would recommend just setting the boundary conditions to 0 or to constant values (ie the value +! of the cell closest to the boundary). + if (gridstruct%regional) then + if (is == 1) then + do k=1,npz + do j = js,je + u_dt(is-1,j,k) = u_dt(is,j,k) + v_dt(is-1,j,k) = v_dt(is,j,k) + enddo + enddo + endif + if (ie == npx) then + do k=1,npz + do j = js,je + u_dt(ie+1,j,k) = u_dt(ie,j,k) + v_dt(ie+1,j,k) = v_dt(ie,j,k) + enddo + enddo + endif + if (js == 1) then + do k=1,npz + do i = is,ie + u_dt(i,js-1,k) = u_dt(i,js,k) + v_dt(i,js-1,k) = v_dt(i,js,k) + enddo + enddo + endif + if (je == npy) then + do k=1,npz + do i = is,ie + u_dt(i,je+1,k) = u_dt(i,je,k) + v_dt(i,je+1,k) = v_dt(i,je,k) + enddo + enddo + endif +! +! corners +! + do k=1,npz + if (is == 1 .and. js == 1) then + u_dt(is-1,js-1,k) = u_dt(is,js,k) + v_dt(is-1,js-1,k) = v_dt(is,js,k) + elseif (is == 1 .and. je == npy) then + u_dt(is-1,je+1,k) = u_dt(is,je,k) + v_dt(is-1,je+1,k) = v_dt(is,je,k) + elseif (ie == npx .and. js == 1) then + u_dt(ie+1,js-1,k) = u_dt(ie,je,k) + v_dt(ie+1,js-1,k) = v_dt(ie,je,k) + elseif (ie == npx .and. je == npy) then + u_dt(ie+1,je+1,k) = u_dt(ie,je,k) + v_dt(ie+1,je+1,k) = v_dt(ie,je,k) + endif + enddo + endif !regional +! call update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) endif call timing_off(' Update_dwinds') #ifdef GFS_PHYS call cubed_to_latlon(u, v, ua, va, gridstruct, & - npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%nested, flagstruct%c2l_ord, bd) + npx, npy, npz, 1, gridstruct%grid_type, domain, gridstruct%bounded_domain, flagstruct%c2l_ord, bd) #endif if ( flagstruct%fv_debug ) then call prt_maxmin('PS_a_update', ps, is, ie, js, je, ng, 1, 0.01) endif + if (allocated(phys_diag%phys_u_dt)) then + do k=1,npz + do j=js,je + do i=is,ie + phys_diag%phys_u_dt(i,j,k) = u_dt(i,j,k) + enddo + enddo + enddo + endif + if (allocated(phys_diag%phys_v_dt)) then + do k=1,npz + do j=js,je + do i=is,ie + phys_diag%phys_v_dt(i,j,k) = v_dt(i,j,k) + enddo + enddo + enddo + endif + end subroutine fv_update_phys @@ -563,7 +685,7 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & real, intent(inout):: qdt(is-ngc:ie+ngc,js-ngc:je+ngc,km) type(fv_grid_type), intent(IN), target :: gridstruct type(domain2d), intent(INOUT) :: domain - + real, pointer, dimension(:,:) :: rarea, dx, dy, sina_u, sina_v, rdxc, rdyc real, pointer, dimension(:,:,:) :: sin_sg ! @@ -625,15 +747,15 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & (mask(i,j)+mask(i,j+1))*dy(i,j)*sina_u(i,j)* & (q(i-1,j,k)-q(i,j,k))*rdxc(i,j) enddo - if (is == 1 .and. .not. gridstruct%nested) fx(i,j) = & + if (is == 1 .and. .not. gridstruct%bounded_domain) fx(i,j) = & (mask(is,j)+mask(is,j+1))*dy(is,j)*(q(is-1,j,k)-q(is,j,k))*rdxc(is,j)* & 0.5*(sin_sg(1,j,1) + sin_sg(0,j,3)) - if (ie+1==npx .and. .not. gridstruct%nested) fx(i,j) = & - (mask(ie+1,j)+mask(ie+1,j+1))*dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* & + if (ie+1==npx .and. .not. gridstruct%bounded_domain) fx(i,j) = & + (mask(ie+1,j)+mask(ie+1,j+1))*dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* & 0.5*(sin_sg(npx,j,1) + sin_sg(npx-1,j,3)) enddo do j=js,je+1 - if ((j == 1 .OR. j == npy) .and. .not. gridstruct%nested) then + if ((j == 1 .OR. j == npy) .and. .not. gridstruct%bounded_domain) then do i=is,ie fy(i,j) = (mask(i,j)+mask(i+1,j))*dx(i,j)*& (q(i,j-1,k)-q(i,j,k))*rdyc(i,j) & @@ -655,335 +777,4 @@ subroutine del2_phys(qdt, delp, gridstruct, cd, npx, npy, km, is, ie, js, je, & end subroutine del2_phys - - subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) - -! Purpose; Transform wind tendencies on A grid to D grid for the final update - - integer, intent(in):: is, ie, js, je - integer, intent(in):: isd, ied, jsd, jed - integer, intent(IN) :: npx,npy, npz - real, intent(in):: dt - real, intent(inout):: u(isd:ied, jsd:jed+1,npz) - real, intent(inout):: v(isd:ied+1,jsd:jed ,npz) - real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt - type(fv_grid_type), intent(IN), target :: gridstruct - type(domain2d), intent(INOUT) :: domain - -! local: - real v3(is-1:ie+1,js-1:je+1,3) - real ue(is-1:ie+1,js:je+1,3) ! 3D winds at edges - real ve(is:ie+1,js-1:je+1, 3) ! 3D winds at edges - real, dimension(is:ie):: ut1, ut2, ut3 - real, dimension(js:je):: vt1, vt2, vt3 - real dt5, gratio - integer i, j, k, m, im2, jm2 - - real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat - real(kind=R_GRID), pointer, dimension(:,:,:,:) :: es, ew - real(kind=R_GRID), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n - - es => gridstruct%es - ew => gridstruct%ew - vlon => gridstruct%vlon - vlat => gridstruct%vlat - - edge_vect_w => gridstruct%edge_vect_w - edge_vect_e => gridstruct%edge_vect_e - edge_vect_s => gridstruct%edge_vect_s - edge_vect_n => gridstruct%edge_vect_n - - dt5 = 0.5 * dt - im2 = (npx-1)/2 - jm2 = (npy-1)/2 - -!$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,u,dt5,u_dt,v,v_dt, & -!$OMP vlon,vlat,jm2,edge_vect_w,npx,edge_vect_e,im2, & -!$OMP edge_vect_s,npy,edge_vect_n,es,ew) & -!$OMP private(ut1, ut2, ut3, vt1, vt2, vt3, ue, ve, v3) - do k=1, npz - - if ( gridstruct%grid_type > 3 ) then ! Local & one tile configurations - - do j=js,je+1 - do i=is,ie - u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k) + u_dt(i,j,k)) - enddo - enddo - do j=js,je - do i=is,ie+1 - v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k) + v_dt(i,j,k)) - enddo - enddo - - else -! Compute 3D wind tendency on A grid - do j=js-1,je+1 - do i=is-1,ie+1 - v3(i,j,1) = u_dt(i,j,k)*vlon(i,j,1) + v_dt(i,j,k)*vlat(i,j,1) - v3(i,j,2) = u_dt(i,j,k)*vlon(i,j,2) + v_dt(i,j,k)*vlat(i,j,2) - v3(i,j,3) = u_dt(i,j,k)*vlon(i,j,3) + v_dt(i,j,k)*vlat(i,j,3) - enddo - enddo - -! Interpolate to cell edges - do j=js,je+1 - do i=is-1,ie+1 - ue(i,j,1) = v3(i,j-1,1) + v3(i,j,1) - ue(i,j,2) = v3(i,j-1,2) + v3(i,j,2) - ue(i,j,3) = v3(i,j-1,3) + v3(i,j,3) - enddo - enddo - - do j=js-1,je+1 - do i=is,ie+1 - ve(i,j,1) = v3(i-1,j,1) + v3(i,j,1) - ve(i,j,2) = v3(i-1,j,2) + v3(i,j,2) - ve(i,j,3) = v3(i-1,j,3) + v3(i,j,3) - enddo - enddo - -! --- E_W edges (for v-wind): - if ( is==1 .and. .not. gridstruct%nested ) then - i = 1 - do j=js,je - if ( j>jm2 ) then - vt1(j) = edge_vect_w(j)*ve(i,j-1,1)+(1.-edge_vect_w(j))*ve(i,j,1) - vt2(j) = edge_vect_w(j)*ve(i,j-1,2)+(1.-edge_vect_w(j))*ve(i,j,2) - vt3(j) = edge_vect_w(j)*ve(i,j-1,3)+(1.-edge_vect_w(j))*ve(i,j,3) - else - vt1(j) = edge_vect_w(j)*ve(i,j+1,1)+(1.-edge_vect_w(j))*ve(i,j,1) - vt2(j) = edge_vect_w(j)*ve(i,j+1,2)+(1.-edge_vect_w(j))*ve(i,j,2) - vt3(j) = edge_vect_w(j)*ve(i,j+1,3)+(1.-edge_vect_w(j))*ve(i,j,3) - endif - enddo - do j=js,je - ve(i,j,1) = vt1(j) - ve(i,j,2) = vt2(j) - ve(i,j,3) = vt3(j) - enddo - endif - if ( (ie+1)==npx .and. .not. gridstruct%nested ) then - i = npx - do j=js,je - if ( j>jm2 ) then - vt1(j) = edge_vect_e(j)*ve(i,j-1,1)+(1.-edge_vect_e(j))*ve(i,j,1) - vt2(j) = edge_vect_e(j)*ve(i,j-1,2)+(1.-edge_vect_e(j))*ve(i,j,2) - vt3(j) = edge_vect_e(j)*ve(i,j-1,3)+(1.-edge_vect_e(j))*ve(i,j,3) - else - vt1(j) = edge_vect_e(j)*ve(i,j+1,1)+(1.-edge_vect_e(j))*ve(i,j,1) - vt2(j) = edge_vect_e(j)*ve(i,j+1,2)+(1.-edge_vect_e(j))*ve(i,j,2) - vt3(j) = edge_vect_e(j)*ve(i,j+1,3)+(1.-edge_vect_e(j))*ve(i,j,3) - endif - enddo - do j=js,je - ve(i,j,1) = vt1(j) - ve(i,j,2) = vt2(j) - ve(i,j,3) = vt3(j) - enddo - endif -! N-S edges (for u-wind): - if ( js==1 .and. .not. gridstruct%nested) then - j = 1 - do i=is,ie - if ( i>im2 ) then - ut1(i) = edge_vect_s(i)*ue(i-1,j,1)+(1.-edge_vect_s(i))*ue(i,j,1) - ut2(i) = edge_vect_s(i)*ue(i-1,j,2)+(1.-edge_vect_s(i))*ue(i,j,2) - ut3(i) = edge_vect_s(i)*ue(i-1,j,3)+(1.-edge_vect_s(i))*ue(i,j,3) - else - ut1(i) = edge_vect_s(i)*ue(i+1,j,1)+(1.-edge_vect_s(i))*ue(i,j,1) - ut2(i) = edge_vect_s(i)*ue(i+1,j,2)+(1.-edge_vect_s(i))*ue(i,j,2) - ut3(i) = edge_vect_s(i)*ue(i+1,j,3)+(1.-edge_vect_s(i))*ue(i,j,3) - endif - enddo - do i=is,ie - ue(i,j,1) = ut1(i) - ue(i,j,2) = ut2(i) - ue(i,j,3) = ut3(i) - enddo - endif - if ( (je+1)==npy .and. .not. gridstruct%nested) then - j = npy - do i=is,ie - if ( i>im2 ) then - ut1(i) = edge_vect_n(i)*ue(i-1,j,1)+(1.-edge_vect_n(i))*ue(i,j,1) - ut2(i) = edge_vect_n(i)*ue(i-1,j,2)+(1.-edge_vect_n(i))*ue(i,j,2) - ut3(i) = edge_vect_n(i)*ue(i-1,j,3)+(1.-edge_vect_n(i))*ue(i,j,3) - else - ut1(i) = edge_vect_n(i)*ue(i+1,j,1)+(1.-edge_vect_n(i))*ue(i,j,1) - ut2(i) = edge_vect_n(i)*ue(i+1,j,2)+(1.-edge_vect_n(i))*ue(i,j,2) - ut3(i) = edge_vect_n(i)*ue(i+1,j,3)+(1.-edge_vect_n(i))*ue(i,j,3) - endif - enddo - do i=is,ie - ue(i,j,1) = ut1(i) - ue(i,j,2) = ut2(i) - ue(i,j,3) = ut3(i) - enddo - endif - do j=js,je+1 - do i=is,ie - u(i,j,k) = u(i,j,k) + dt5*( ue(i,j,1)*es(1,i,j,1) + & - ue(i,j,2)*es(2,i,j,1) + & - ue(i,j,3)*es(3,i,j,1) ) - enddo - enddo - do j=js,je - do i=is,ie+1 - v(i,j,k) = v(i,j,k) + dt5*( ve(i,j,1)*ew(1,i,j,2) + & - ve(i,j,2)*ew(2,i,j,2) + & - ve(i,j,3)*ew(3,i,j,2) ) - enddo - enddo -! Update: - endif ! end grid_type - - enddo ! k-loop - - end subroutine update_dwinds_phys - - - subroutine update2d_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain) - -! Purpose; Transform wind tendencies on A grid to D grid for the final update - - integer, intent(in):: is, ie, js, je - integer, intent(in):: isd, ied, jsd, jed - real, intent(in):: dt - real, intent(inout):: u(isd:ied, jsd:jed+1,npz) - real, intent(inout):: v(isd:ied+1,jsd:jed ,npz) - real, intent(inout), dimension(isd:ied,jsd:jed,npz):: u_dt, v_dt - type(fv_grid_type), intent(IN), target :: gridstruct - integer, intent(IN) :: npx,npy, npz - type(domain2d), intent(INOUT) :: domain - -! local: - real ut(isd:ied,jsd:jed) - real:: dt5, gratio - integer i, j, k - - real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat - real(kind=R_GRID), pointer, dimension(:,:,:,:) :: es, ew - real(kind=R_GRID), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n - real, pointer, dimension(:,:) :: z11, z12, z21, z22, dya, dxa - - es => gridstruct%es - ew => gridstruct%ew - vlon => gridstruct%vlon - vlat => gridstruct%vlat - - edge_vect_w => gridstruct%edge_vect_w - edge_vect_e => gridstruct%edge_vect_e - edge_vect_s => gridstruct%edge_vect_s - edge_vect_n => gridstruct%edge_vect_n - - z11 => gridstruct%z11 - z21 => gridstruct%z21 - z12 => gridstruct%z12 - z22 => gridstruct%z22 - - dxa => gridstruct%dxa - dya => gridstruct%dya - -! Transform wind tendency on A grid to local "co-variant" components: - -!$OMP parallel do default(none) shared(is,ie,js,je,npz,z11,u_dt,z12,v_dt,z21,z22) & -!$OMP private(ut) - do k=1,npz - do j=js,je - do i=is,ie - ut(i,j) = z11(i,j)*u_dt(i,j,k) + z12(i,j)*v_dt(i,j,k) - v_dt(i,j,k) = z21(i,j)*u_dt(i,j,k) + z22(i,j)*v_dt(i,j,k) - u_dt(i,j,k) = ut(i,j) - enddo - enddo - enddo -! (u_dt,v_dt) are now on local coordinate system - call timing_on('COMM_TOTAL') - call mpp_update_domains(u_dt, v_dt, domain, gridtype=AGRID_PARAM) - call timing_off('COMM_TOTAL') - - dt5 = 0.5 * dt - -!$OMP parallel do default(none) shared(is,ie,js,je,npz,gridstruct,u,dt5,u_dt,v,v_dt, & -!$OMP dya,npy,dxa,npx) & -!$OMP private(gratio) - do k=1, npz - - if ( gridstruct%grid_type > 3 .or. gridstruct%nested) then ! Local & one tile configurations - - do j=js,je+1 - do i=is,ie - u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k) + u_dt(i,j,k)) - enddo - enddo - do j=js,je - do i=is,ie+1 - v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k) + v_dt(i,j,k)) - enddo - enddo - - else - -!-------- -! u-wind -!-------- -! Edges: - if ( js==1 ) then - do i=is,ie - gratio = dya(i,2) / dya(i,1) - u(i,1,k) = u(i,1,k) + dt5*((2.+gratio)*(u_dt(i,0,k)+u_dt(i,1,k)) & - -(u_dt(i,-1,k)+u_dt(i,2,k)))/(1.+gratio) - enddo - endif - -! Interior - do j=max(2,js),min(npy-1,je+1) - do i=is,ie - u(i,j,k) = u(i,j,k) + dt5*(u_dt(i,j-1,k)+u_dt(i,j,k)) - enddo - enddo - - if ( (je+1)==npy ) then - do i=is,ie - gratio = dya(i,npy-2) / dya(i,npy-1) - u(i,npy,k) = u(i,npy,k) + dt5*((2.+gratio)*(u_dt(i,npy-1,k)+u_dt(i,npy,k)) & - -(u_dt(i,npy-2,k)+u_dt(i,npy+1,k)))/(1.+gratio) - enddo - endif - -!-------- -! v-wind -!-------- -! West Edges: - if ( is==1 ) then - do j=js,je - gratio = dxa(2,j) / dxa(1,j) - v(1,j,k) = v(1,j,k) + dt5*((2.+gratio)*(v_dt(0,j,k)+v_dt(1,j,k)) & - -(v_dt(-1,j,k)+v_dt(2,j,k)))/(1.+gratio) - enddo - endif - -! Interior - do j=js,je - do i=max(2,is),min(npx-1,ie+1) - v(i,j,k) = v(i,j,k) + dt5*(v_dt(i-1,j,k)+v_dt(i,j,k)) - enddo - enddo - -! East Edges: - if ( (ie+1)==npx ) then - do j=js,je - gratio = dxa(npx-2,j) / dxa(npx-1,j) - v(npx,j,k) = v(npx,j,k) + dt5*((2.+gratio)*(v_dt(npx-1,j,k)+v_dt(npx,j,k)) & - -(v_dt(npx-2,j,k)+v_dt(npx+1,j,k)))/(1.+gratio) - enddo - endif - - endif ! end grid_type - - enddo ! k-loop - - end subroutine update2d_dwinds_phys - end module fv_update_phys_mod diff --git a/model_nh/nh_core.F90 b/model/nh_core.F90 similarity index 97% rename from model_nh/nh_core.F90 rename to model/nh_core.F90 index 26df89c98..9dcd7a302 100644 --- a/model_nh/nh_core.F90 +++ b/model/nh_core.F90 @@ -25,7 +25,7 @@ module nh_core_mod !------------------------------ use constants_mod, only: rdgas, cp_air, grav use tp_core_mod, only: fv_tp_2d - use nh_utils_mod, only: update_dz_c, update_dz_d, nest_halo_nh + use nh_utils_mod, only: update_dz_c, update_dz_d, nh_bc use nh_utils_mod, only: sim_solver, sim1_solver, sim3_solver use nh_utils_mod, only: sim3p0_solver, rim_2d use nh_utils_mod, only: Riem_Solver_c @@ -33,10 +33,10 @@ module nh_core_mod implicit none private - public Riem_Solver3, Riem_Solver_c, update_dz_c, update_dz_d, nest_halo_nh + public Riem_Solver3, Riem_Solver_c, update_dz_c, update_dz_d, nh_bc real, parameter:: r3 = 1./3. -CONTAINS +CONTAINS subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & isd, ied, jsd, jed, akap, cappa, cp, & @@ -64,7 +64,7 @@ subroutine Riem_Solver3(ms, dt, is, ie, js, je, km, ng, & real, intent(inout):: pe(is-1:ie+1,km+1,js-1:je+1) real, intent(out):: peln(is:ie,km+1,js:je) ! ln(pe) real, intent(out), dimension(isd:ied,jsd:jed,km+1):: ppe - real, intent(out):: delz(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(out):: delz(is:ie,js:je,km) real, intent(out):: pk(is:ie,js:je,km+1) real, intent(out):: pk3(isd:ied,jsd:jed,km+1) ! Local: diff --git a/model_nh/nh_utils.F90 b/model/nh_utils.F90 similarity index 77% rename from model_nh/nh_utils.F90 rename to model/nh_utils.F90 index 36ea4e926..2733fde67 100644 --- a/model_nh/nh_utils.F90 +++ b/model/nh_utils.F90 @@ -26,12 +26,12 @@ module nh_utils_mod use constants_mod, only: rdgas, cp_air, grav use tp_core_mod, only: fv_tp_2d use sw_core_mod, only: fill_4corners, del6_vt_flux - use fv_arrays_mod, only: fv_grid_bounds_type, fv_grid_type + use fv_arrays_mod, only: fv_grid_bounds_type, fv_grid_type, fv_nest_BC_type_3d implicit none private - public update_dz_c, update_dz_d, nest_halo_nh + public update_dz_c, update_dz_d, nh_bc public sim_solver, sim1_solver, sim3_solver public sim3p0_solver, rim_2d public Riem_Solver_c @@ -39,7 +39,7 @@ module nh_utils_mod real, parameter:: dz_min = 2. real, parameter:: r3 = 1./3. -CONTAINS +CONTAINS subroutine update_dz_c(is, ie, js, je, km, ng, dt, dp0, zs, area, ut, vt, gz, ws, & npx, npy, sw_corner, se_corner, ne_corner, nw_corner, bd, grid_type) @@ -182,7 +182,7 @@ end subroutine update_dz_c subroutine update_dz_d(ndif, damp, hord, is, ie, js, je, km, ng, npx, npy, area, rarea, & - dp0, zs, zh, crx, cry, xfx, yfx, delz, ws, rdt, gridstruct, bd) + dp0, zs, zh, crx, cry, xfx, yfx, ws, rdt, gridstruct, bd) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: is, ie, js, je, ng, km, npx, npy @@ -195,7 +195,6 @@ subroutine update_dz_d(ndif, damp, hord, is, ie, js, je, km, ng, npx, npy, area, integer, intent(inout):: ndif(km+1) real, intent(in ) :: zs(is-ng:ie+ng,js-ng:je+ng) real, intent(inout) :: zh(is-ng:ie+ng,js-ng:je+ng,km+1) - real, intent( out) ::delz(is-ng:ie+ng,js-ng:je+ng,km) real, intent(inout), dimension(is:ie+1,js-ng:je+ng,km):: crx, xfx real, intent(inout), dimension(is-ng:ie+ng,js:je+1,km):: cry, yfx real, intent(out) :: ws(is:ie,js:je) @@ -219,7 +218,7 @@ subroutine update_dz_d(ndif, damp, hord, is, ie, js, je, km, ng, npx, npy, area, damp(km+1) = damp(km) ndif(km+1) = ndif(km) - + isd = is - ng; ied = ie + ng jsd = js - ng; jed = je + ng @@ -307,7 +306,7 @@ subroutine Riem_Solver_c(ms, dt, is, ie, js, je, km, ng, & real, intent(in), dimension(is-ng:,js-ng:,1:):: q_con, cappa real, intent(in):: hs(is-ng:ie+ng,js-ng:je+ng) real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: w3 -! OUTPUT PARAMETERS +! OUTPUT PARAMETERS real, intent(inout), dimension(is-ng:ie+ng,js-ng:je+ng,km+1):: gz real, intent( out), dimension(is-ng:ie+ng,js-ng:je+ng,km+1):: pef ! Local: @@ -433,7 +432,7 @@ subroutine Riem_Solver3test(ms, dt, is, ie, js, je, km, ng, & real, intent(inout):: pe(is-1:ie+1,km+1,js-1:je+1) real, intent(out):: peln(is:ie,km+1,js:je) ! ln(pe) real, intent(out), dimension(isd:ied,jsd:jed,km+1):: ppe - real, intent(out):: delz(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(out):: delz(is:ie,js:je,km) real, intent(out):: pk(is:ie,js:je,km+1) real, intent(out):: pk3(isd:ied,jsd:jed,km+1) ! Local: @@ -577,7 +576,7 @@ end subroutine Riem_Solver3test subroutine imp_diff_w(j, is, ie, js, je, ng, km, cd, delz, ws, w, w3) integer, intent(in) :: j, is, ie, js, je, km, ng real, intent(in) :: cd - real, intent(in) :: delz(is-ng:ie+ng, km) ! delta-height (m) + real, intent(in) :: delz(is:ie, km) ! delta-height (m) real, intent(in) :: w(is:ie, km) ! vertical vel. (m/s) real, intent(in) :: ws(is:ie) real, intent(out) :: w3(is-ng:ie+ng,js-ng:je+ng,km) @@ -621,7 +620,7 @@ subroutine imp_diff_w(j, is, ie, js, je, ng, km, cd, delz, ws, w, w3) wt(i,km) = (w(i,km) + 2.*ws(i)*cd/delz(i,km)**2 & + a*wt(i,km-1))/(1. + a + (cd+cd)/delz(i,km)**2 + a*gam(i,km)) enddo - + do k=km-1,1,-1 do i=is,ie wt(i,k) = wt(i,k) - gam(i,k+1)*wt(i,k+1) @@ -660,7 +659,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & integer:: i, k, n, ke, kt1, ktop integer:: ks0, ks1 - grg = gama * rgas + grg = gama * rgas rdt = 1. / bdt dt = bdt / real(ms) @@ -697,7 +696,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & dts(k) = -dz(k)/sqrt(grg*pf1(k)/rden) #endif if ( bdt > dts(k) ) then - ks0 = k-1 + ks0 = k-1 goto 222 endif enddo @@ -806,7 +805,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & m_top(ke) = m_top(ke) + z_frac*dm(k) r_top(ke) = r_top(ke) + z_frac*r_hi(k) go to 444 ! next level - endif + endif enddo 444 continue @@ -822,7 +821,7 @@ subroutine RIM_2D(ms, bdt, is, ie, km, rgas, gama, gm2, pe2, & time_left = time_left - dts(k) m_bot(ke) = m_bot(ke) + dm(k) r_bot(ke) = r_bot(ke) + r_lo(k) - else + else z_frac = time_left/dts(k) m_bot(ke) = m_bot(ke) + z_frac* dm(k) r_bot(ke) = r_bot(ke) + z_frac*r_lo(k) @@ -1201,12 +1200,12 @@ subroutine SIM1_solver(dt, is, ie, km, rgas, gama, gm2, cp2, kappa, pe, dm2, do k=1,km do i=is, ie - w1(i,k) = w2(i,k) #ifdef MOIST_CAPPA pe(i,k) = exp(gm2(i,k)*log(-dm2(i,k)/dz2(i,k)*rgas*pt2(i,k))) - pm2(i,k) #else pe(i,k) = exp(gama*log(-dm2(i,k)/dz2(i,k)*rgas*pt2(i,k))) - pm2(i,k) #endif + w1(i,k) = w2(i,k) enddo enddo @@ -1475,7 +1474,7 @@ subroutine edge_scalar(q1, qe, i1, i2, km, id) real, intent(out), dimension(i1:i2,km+1):: qe !----------------------------------------------------------------------- real, parameter:: r2o3 = 2./3. - real, parameter:: r4o3 = 4./3. + real, parameter:: r4o3 = 4./3. real gak(km) real bet integer i, k @@ -1585,7 +1584,7 @@ subroutine edge_profile(q1, q2, q1e, q2e, i1, i2, j1, j2, j, km, dp0, uniform_gr gam(i,k) = gk / bet enddo enddo - + a_bot = 1. + gk*(gk+1.5) xt1 = 2.*gk*(gk+1.) do i=i1,i2 @@ -1625,7 +1624,8 @@ subroutine edge_profile(q1, q2, q1e, q2e, i1, i2, j1, j2, j, km, dp0, uniform_gr end subroutine edge_profile - subroutine nest_halo_nh(ptop, grav, kappa, cp, delp, delz, pt, phis, & +!TODO LMH 25may18: do not need delz defined on full compute domain; pass appropriate BCs instead + subroutine nh_bc(ptop, grav, kappa, cp, delp, delzBC, pt, phis, & #ifdef USE_COND q_con, & #ifdef MOIST_CAPPA @@ -1633,16 +1633,18 @@ subroutine nest_halo_nh(ptop, grav, kappa, cp, delp, delz, pt, phis, & #endif #endif pkc, gz, pk3, & - npx, npy, npz, nested, pkc_pertn, computepk3, fullhalo, bd) + BC_step, BC_split, & + npx, npy, npz, bounded_domain, pkc_pertn, computepk3, fullhalo, bd) - !INPUT: delp, delz, pt + !INPUT: delp, delz (BC), pt !OUTPUT: gz, pkc, pk3 (optional) integer, intent(IN) :: npx, npy, npz - logical, intent(IN) :: pkc_pertn, computepk3, fullhalo, nested - real, intent(IN) :: ptop, kappa, cp, grav + logical, intent(IN) :: pkc_pertn, computepk3, fullhalo, bounded_domain + real, intent(IN) :: ptop, kappa, cp, grav, BC_step, BC_split type(fv_grid_bounds_type), intent(IN) :: bd real, intent(IN) :: phis(bd%isd:bd%ied,bd%jsd:bd%jed) - real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: pt, delp, delz + real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: pt, delp + type(fv_nest_BC_type_3d), intent(IN) :: delzBC #ifdef USE_COND real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: q_con #ifdef MOIST_CAPPA @@ -1652,19 +1654,8 @@ subroutine nest_halo_nh(ptop, grav, kappa, cp, delp, delz, pt, phis, & real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1):: gz, pkc, pk3 integer :: i,j,k - real :: gama !'gamma' - real :: ptk, rgrav, rkap, peln1, rdg - real, dimension(bd%isd:bd%ied, npz+1, bd%jsd:bd%jed ) :: pe, peln -#ifdef USE_COND - real, dimension(bd%isd:bd%ied, npz+1 ) :: peg, pelng -#endif - real, dimension(bd%isd:bd%ied, npz) :: gam, bb, dd, pkz - real, dimension(bd%isd:bd%ied, npz-1) :: g_rat - real, dimension(bd%isd:bd%ied) :: bet - real :: pm - - integer :: ifirst, ilast, jfirst, jlast + integer :: istart, iend integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -1678,485 +1669,248 @@ subroutine nest_halo_nh(ptop, grav, kappa, cp, delp, delz, pt, phis, & jsd = bd%jsd jed = bd%jed - if (.not. nested) return - ifirst = isd - jfirst = jsd - ilast = ied - jlast = jed - - !Remember we want to compute these in the HALO. Note also this routine - !requires an appropriate - - rgrav = 1./grav - gama = 1./(1.-kappa) - ptk = ptop ** kappa - rkap = 1./kappa - peln1 = log(ptop) - rdg = - rdgas * rgrav - - !NOTE: Compiler does NOT like this sort of nested-grid BC code. Is it trying to do some ugly optimization? + if (.not. bounded_domain) return if (is == 1) then - do j=jfirst,jlast - - !GZ - do i=ifirst,0 - gz(i,j,npz+1) = phis(i,j) - enddo - do k=npz,1,-1 - do i=ifirst,0 - gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)*grav - enddo - enddo - - !Hydrostatic interface pressure - do i=ifirst,0 - pe(i,1,j) = ptop - peln(i,1,j) = peln1 + call nh_BC_k(ptop, grav, kappa, cp, delp, delzBC%west_t0, delzBC%west_t1, pt, phis, & #ifdef USE_COND - peg(i,1) = ptop - pelng(i,1) = peln1 -#endif - enddo - do k=2,npz+1 - do i=ifirst,0 - pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) -#ifdef USE_COND - peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1)) - pelng(i,k) = log(peg(i,k)) -#endif - enddo - enddo - - !Perturbation nonhydro layer-mean pressure (NOT to the kappa) - do k=1,npz - do i=ifirst,0 - !Full p + q_con, & #ifdef MOIST_CAPPA - pkz(i,k) = exp(1./(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) -#else - pkz(i,k) = exp(gama*log(-delp(i,j,k)*rgrav/delz(i,j,k)*rdgas*pt(i,j,k))) -#endif - !hydro -#ifdef USE_COND - pm = (peg(i,k+1)-peg(i,k))/(pelng(i,k+1)-pelng(i,k)) -#else - pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) + cappa, & #endif - !Remove hydro cell-mean pressure - pkz(i,k) = pkz(i,k) - pm - enddo - enddo - - !pressure solver - do k=1,npz-1 - do i=ifirst,0 - g_rat(i,k) = delp(i,j,k)/delp(i,j,k+1) - bb(i,k) = 2.*(1. + g_rat(i,k)) - dd(i,k) = 3.*(pkz(i,k) + g_rat(i,k)*pkz(i,k+1)) - enddo - enddo - - do i=ifirst,0 - bet(i) = bb(i,1) - pkc(i,j,1) = 0. - pkc(i,j,2) = dd(i,1)/bet(i) - bb(i,npz) = 2. - dd(i,npz) = 3.*pkz(i,npz) - enddo - do k=2,npz - do i=ifirst,0 - gam(i,k) = g_rat(i,k-1)/bet(i) - bet(i) = bb(i,k) - gam(i,k) - pkc(i,j,k+1) = (dd(i,k) - pkc(i,j,k))/bet(i) - enddo - enddo - do k=npz,2,-1 - do i=ifirst,0 - pkc(i,j,k) = pkc(i,j,k) - gam(i,k)*pkc(i,j,k+1) -#ifdef NHNEST_DEBUG - if (abs(pkc(i,j,k)) > 1.e5) then - print*, mpp_pe(), i,j,k, 'PKC: ', pkc(i,j,k) - endif #endif - enddo - enddo - - enddo - - do j=jfirst,jlast - - if (.not. pkc_pertn) then - do k=npz+1,1,-1 - do i=ifirst,0 - pkc(i,j,k) = pkc(i,j,k) + pe(i,k,j) - enddo - enddo - endif - - !pk3 if necessary; doesn't require condenstate loading calculation - if (computepk3) then - do i=ifirst,0 - pk3(i,j,1) = ptk - enddo - do k=2,npz+1 - do i=ifirst,0 - pk3(i,j,k) = exp(kappa*log(pe(i,k,j))) - enddo - enddo - endif - - enddo + pkc, gz, pk3, & + BC_step, BC_split, & + pkc_pertn, computepk3, isd, ied, isd, 0, isd, 0, jsd, jed, jsd, jed, npz) endif if (ie == npx-1) then - do j=jfirst,jlast - - !GZ - do i=npx,ilast - gz(i,j,npz+1) = phis(i,j) - enddo - do k=npz,1,-1 - do i=npx,ilast - gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)*grav - enddo - enddo - - !Hydrostatic interface pressure - do i=npx,ilast - pe(i,1,j) = ptop - peln(i,1,j) = peln1 -#ifdef USE_COND - peg(i,1) = ptop - pelng(i,1) = peln1 -#endif - enddo - do k=2,npz+1 - do i=npx,ilast - pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) + call nh_BC_k(ptop, grav, kappa, cp, delp, delzBC%east_t0, delzBC%east_t1, pt, phis, & #ifdef USE_COND - peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1)) - pelng(i,k) = log(peg(i,k)) -#endif - enddo - enddo - - !Perturbation nonhydro layer-mean pressure (NOT to the kappa) - do k=1,npz - do i=npx,ilast - !Full p + q_con, & #ifdef MOIST_CAPPA - pkz(i,k) = exp(1./(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) -#else - pkz(i,k) = exp(gama*log(-delp(i,j,k)*rgrav/delz(i,j,k)*rdgas*pt(i,j,k))) + cappa, & #endif - !hydro -#ifdef USE_COND - pm = (peg(i,k+1)-peg(i,k))/(pelng(i,k+1)-pelng(i,k)) -#else - pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) #endif - !Remove hydro cell-mean pressure - pkz(i,k) = pkz(i,k) - pm - enddo - enddo - - !pressure solver - do k=1,npz-1 - do i=npx,ilast - g_rat(i,k) = delp(i,j,k)/delp(i,j,k+1) - bb(i,k) = 2.*(1. + g_rat(i,k)) - dd(i,k) = 3.*(pkz(i,k) + g_rat(i,k)*pkz(i,k+1)) - enddo - enddo - - do i=npx,ilast - bet(i) = bb(i,1) - pkc(i,j,1) = 0. - pkc(i,j,2) = dd(i,1)/bet(i) - bb(i,npz) = 2. - dd(i,npz) = 3.*pkz(i,npz) - enddo - do k=2,npz - do i=npx,ilast - gam(i,k) = g_rat(i,k-1)/bet(i) - bet(i) = bb(i,k) - gam(i,k) - pkc(i,j,k+1) = (dd(i,k) - pkc(i,j,k))/bet(i) - enddo - enddo - do k=npz,2,-1 - do i=npx,ilast - pkc(i,j,k) = pkc(i,j,k) - gam(i,k)*pkc(i,j,k+1) - enddo - enddo + pkc, gz, pk3, & + BC_step, BC_split, & + pkc_pertn, computepk3, isd, ied, npx, ied, npx, ied, jsd, jed, jsd, jed, npz) + endif - enddo + if (is == 1) then + istart = is + else + istart = isd + end if + if (ie == npx-1) then + iend = ie + else + iend = ied + end if - do j=jfirst,jlast + if (js == 1) then - if (.not. pkc_pertn) then - do k=npz+1,1,-1 - do i=npx,ilast - pkc(i,j,k) = pkc(i,j,k) + pe(i,k,j) - enddo - enddo - endif + call nh_BC_k(ptop, grav, kappa, cp, delp, delzBC%south_t0, delzBC%south_t1, pt, phis, & +#ifdef USE_COND + q_con, & +#ifdef MOIST_CAPPA + cappa, & +#endif +#endif + pkc, gz, pk3, & + BC_step, BC_split, & + pkc_pertn, computepk3, isd, ied, isd, ied, istart, iend, jsd, jed, jsd, 0, npz) - !pk3 if necessary - if (computepk3) then - do i=npx,ilast - pk3(i,j,1) = ptk - enddo - do k=2,npz+1 - do i=npx,ilast - pk3(i,j,k) = exp(kappa*log(pe(i,k,j))) - enddo - enddo - endif + end if - enddo + if (je == npy-1) then + call nh_BC_k(ptop, grav, kappa, cp, delp, delzBC%north_t0, delzBC%north_t1, pt, phis, & +#ifdef USE_COND + q_con, & +#ifdef MOIST_CAPPA + cappa, & +#endif +#endif + pkc, gz, pk3, & + BC_step, BC_split, & + pkc_pertn, computepk3, isd, ied, isd, ied, istart, iend, jsd, jed, npy, jed, npz) endif - if (js == 1) then - - do j=jfirst,0 - - !GZ - do i=ifirst,ilast - gz(i,j,npz+1) = phis(i,j) - enddo - do k=npz,1,-1 - do i=ifirst,ilast - gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)*grav - enddo - enddo +end subroutine nh_bc - !Hydrostatic interface pressure - do i=ifirst,ilast - pe(i,1,j) = ptop - peln(i,1,j) = peln1 +subroutine nh_BC_k(ptop, grav, kappa, cp, delp, delzBC_t0, delzBC_t1, pt, phis, & #ifdef USE_COND - peg(i,1) = ptop - pelng(i,1) = peln1 + q_con, & +#ifdef MOIST_CAPPA + cappa, & #endif - enddo - do k=2,npz+1 - do i=ifirst,ilast - pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) -#ifdef USE_COND - peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1)) - pelng(i,k) = log(peg(i,k)) #endif - enddo - enddo + pkc, gz, pk3, & + BC_step, BC_split, & + pkc_pertn, computepk3, isd, ied, isd_BC, ied_BC, istart, iend, jsd, jed, jstart, jend, npz) + + integer, intent(IN) :: isd, ied, isd_BC, ied_BC, istart, iend, jsd, jed, jstart, jend, npz + real, intent(IN), dimension(isd_BC:ied_BC,jstart:jend,npz) :: delzBC_t0, delzBC_t1 + real, intent(IN) :: BC_step, BC_split - !Perturbation nonhydro layer-mean pressure (NOT to the kappa) - do k=1,npz - do i=ifirst,ilast - !Full p + logical, intent(IN) :: pkc_pertn, computepk3 + real, intent(IN) :: ptop, kappa, cp, grav + real, intent(IN) :: phis(isd:ied,jsd:jed) + real, intent(IN), dimension(isd:ied,jsd:jed,npz):: pt, delp +#ifdef USE_COND + real, intent(IN), dimension(isd:ied,jsd:jed,npz):: q_con #ifdef MOIST_CAPPA - pkz(i,k) = exp(1./(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) -#else - pkz(i,k) = exp(gama*log(-delp(i,j,k)*rgrav/delz(i,j,k)*rdgas*pt(i,j,k))) + real, intent(INOUT), dimension(isd:ied,jsd:jed,npz):: cappa #endif - !hydro -#ifdef USE_COND - pm = (peg(i,k+1)-peg(i,k))/(pelng(i,k+1)-pelng(i,k)) -#else - pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) #endif - !hydro - pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) - !Remove hydro cell-mean pressure - pkz(i,k) = pkz(i,k) - pm - enddo - enddo + real, intent(INOUT), dimension(isd:ied,jsd:jed,npz+1):: gz, pkc, pk3 - !pressure solver - do k=1,npz-1 - do i=ifirst,ilast - g_rat(i,k) = delp(i,j,k)/delp(i,j,k+1) - bb(i,k) = 2.*(1. + g_rat(i,k)) - dd(i,k) = 3.*(pkz(i,k) + g_rat(i,k)*pkz(i,k+1)) - enddo - enddo + integer :: i,j,k + real :: gama !'gamma' + real :: ptk, rgrav, rkap, peln1, rdg, denom - do i=ifirst,ilast - bet(i) = bb(i,1) - pkc(i,j,1) = 0. - pkc(i,j,2) = dd(i,1)/bet(i) - bb(i,npz) = 2. - dd(i,npz) = 3.*pkz(i,npz) - enddo - do k=2,npz - do i=ifirst,ilast - gam(i,k) = g_rat(i,k-1)/bet(i) - bet(i) = bb(i,k) - gam(i,k) - pkc(i,j,k+1) = (dd(i,k) - pkc(i,j,k))/bet(i) - enddo - enddo - do k=npz,2,-1 - do i=ifirst,ilast - pkc(i,j,k) = pkc(i,j,k) - gam(i,k)*pkc(i,j,k+1) -#ifdef NHNEST_DEBUG - if (abs(pkc(i,j,k)) > 1.e5) then - print*, mpp_pe(), i,j,k, 'PKC: ', pkc(i,j,k) - endif + real, dimension(istart:iend, npz+1, jstart:jend ) :: pe, peln +#ifdef USE_COND + real, dimension(istart:iend, npz+1 ) :: peg, pelng #endif - enddo - enddo + real, dimension(istart:iend, npz) :: gam, bb, dd, pkz + real, dimension(istart:iend, npz-1) :: g_rat + real, dimension(istart:iend) :: bet + real :: pm, delz_int - enddo - do j=jfirst,0 + real :: pealn, pebln, rpkz - if (.not. pkc_pertn) then - do k=npz+1,1,-1 - do i=ifirst,ilast - pkc(i,j,k) = pkc(i,j,k) + pe(i,k,j) - enddo - enddo - endif + rgrav = 1./grav + gama = 1./(1.-kappa) + ptk = ptop ** kappa + rkap = 1./kappa + peln1 = log(ptop) + rdg = - rdgas * rgrav + denom = 1./BC_split - !pk3 if necessary - if (computepk3) then - do i=ifirst,ilast - pk3(i,j,1) = ptk - enddo - do k=2,npz+1 - do i=ifirst,ilast - pk3(i,j,k) = exp(kappa*log(pe(i,k,j))) - enddo - enddo - endif + do j=jstart,jend + !GZ + do i=istart,iend + gz(i,j,npz+1) = phis(i,j) + enddo + do k=npz,1,-1 + do i=istart,iend + delz_int = (delzBC_t0(i,j,k)*(BC_split-BC_step) + BC_step*delzBC_t1(i,j,k))*denom + gz(i,j,k) = gz(i,j,k+1) - delz_int*grav enddo + enddo - endif - - if (je == npy-1) then - - do j=npy,jlast - - !GZ - do i=ifirst,ilast - gz(i,j,npz+1) = phis(i,j) - enddo - do k=npz,1,-1 - do i=ifirst,ilast - gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)*grav - enddo - enddo - - !Hydrostatic interface pressure - do i=ifirst,ilast - pe(i,1,j) = ptop - peln(i,1,j) = peln1 + !Hydrostatic interface pressure + do i=istart,iend + pe(i,1,j) = ptop + peln(i,1,j) = peln1 #ifdef USE_COND - peg(i,1) = ptop - pelng(i,1) = peln1 + peg(i,1) = ptop + pelng(i,1) = peln1 #endif - enddo - do k=2,npz+1 - do i=ifirst,ilast - pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) + enddo + do k=2,npz+1 + do i=istart,iend + pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) + peln(i,k,j) = log(pe(i,k,j)) #ifdef USE_COND - peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1)) - pelng(i,k) = log(peg(i,k)) + peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1)) + pelng(i,k) = log(peg(i,k)) #endif - enddo - enddo + enddo + enddo + + !Perturbation nonhydro layer-mean pressure (NOT to the kappa) + do k=1,npz + do i=istart,iend + delz_int = (delzBC_t0(i,j,k)*(BC_split-BC_step) + BC_step*delzBC_t1(i,j,k))*denom - !Perturbation nonhydro layer-mean pressure (NOT to the kappa) - do k=1,npz - do i=ifirst,ilast - !Full p + !Full p #ifdef MOIST_CAPPA - pkz(i,k) = exp(1./(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k))) + pkz(i,k) = exp(1./(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz_int*pt(i,j,k))) #else - pkz(i,k) = exp(gama*log(-delp(i,j,k)*rgrav/delz(i,j,k)*rdgas*pt(i,j,k))) + pkz(i,k) = exp(gama*log(-delp(i,j,k)*rgrav/delz_int*rdgas*pt(i,j,k))) #endif - !hydro + !hydro #ifdef USE_COND - pm = (peg(i,k+1)-peg(i,k))/(pelng(i,k+1)-pelng(i,k)) + pm = (peg(i,k+1)-peg(i,k))/(pelng(i,k+1)-pelng(i,k)) #else - pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) + pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) #endif - !hydro - pm = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) - !Remove hydro cell-mean pressure - pkz(i,k) = pkz(i,k) - pm - enddo - enddo - - !Reversible interpolation on layer NH pressure perturbation - ! to recover lastge NH pressure perturbation - do k=1,npz-1 - do i=ifirst,ilast - g_rat(i,k) = delp(i,j,k)/delp(i,j,k+1) - bb(i,k) = 2.*(1. + g_rat(i,k)) - dd(i,k) = 3.*(pkz(i,k) + g_rat(i,k)*pkz(i,k+1)) - enddo - enddo - - do i=ifirst,ilast - bet(i) = bb(i,1) - pkc(i,j,1) = 0. - pkc(i,j,2) = dd(i,1)/bet(i) - bb(i,npz) = 2. - dd(i,npz) = 3.*pkz(i,npz) - enddo - do k=2,npz - do i=ifirst,ilast - gam(i,k) = g_rat(i,k-1)/bet(i) - bet(i) = bb(i,k) - gam(i,k) - pkc(i,j,k+1) = (dd(i,k) - pkc(i,j,k))/bet(i) - enddo - enddo - do k=npz,2,-1 - do i=ifirst,ilast - pkc(i,j,k) = pkc(i,j,k) - gam(i,k)*pkc(i,j,k+1) - enddo - enddo + !Remove hydro cell-mean pressure + pkz(i,k) = pkz(i,k) - pm + enddo + enddo + !pressure solver + do k=1,npz-1 + do i=istart,iend + g_rat(i,k) = delp(i,j,k)/delp(i,j,k+1) + bb(i,k) = 2.*(1. + g_rat(i,k)) + dd(i,k) = 3.*(pkz(i,k) + g_rat(i,k)*pkz(i,k+1)) + enddo + enddo + do i=istart,iend + bet(i) = bb(i,1) + pkc(i,j,1) = 0. + pkc(i,j,2) = dd(i,1)/bet(i) + bb(i,npz) = 2. + dd(i,npz) = 3.*pkz(i,npz) + enddo + do k=2,npz + do i=istart,iend + gam(i,k) = g_rat(i,k-1)/bet(i) + bet(i) = bb(i,k) - gam(i,k) + pkc(i,j,k+1) = (dd(i,k) - pkc(i,j,k))/bet(i) + enddo + enddo + do k=npz,2,-1 + do i=istart,iend + pkc(i,j,k) = pkc(i,j,k) - gam(i,k)*pkc(i,j,k+1) +#ifdef NHNEST_DEBUG + if (abs(pkc(i,j,k)) > 1.e5) then + print*, mpp_pe(), i,j,k, 'PKC: ', pkc(i,j,k) + endif +#endif enddo + enddo - do j=npy,jlast - if (.not. pkc_pertn) then - do k=npz+1,1,-1 - do i=ifirst,ilast - pkc(i,j,k) = pkc(i,j,k) + pe(i,k,j) - enddo - enddo - endif + enddo - !pk3 if necessary - if (computepk3) then - do i=ifirst,ilast - pk3(i,j,1) = ptk - enddo - do k=2,npz+1 - do i=ifirst,ilast - pk3(i,j,k) = exp(kappa*log(pe(i,k,j))) - enddo - enddo - endif + do j=jstart,jend + if (.not. pkc_pertn) then + do k=npz+1,1,-1 + do i=istart,iend + pkc(i,j,k) = pkc(i,j,k) + pe(i,k,j) + enddo enddo + endif + !pk3 if necessary; doesn't require condenstate loading calculation + if (computepk3) then + do i=istart,iend + pk3(i,j,1) = ptk + enddo + do k=2,npz+1 + do i=istart,iend + pk3(i,j,k) = exp(kappa*log(pe(i,k,j))) + enddo + enddo endif -end subroutine nest_halo_nh + enddo + +end subroutine nh_BC_k + end module nh_utils_mod diff --git a/model/sw_core.F90 b/model/sw_core.F90 index 49f0c305c..99f079ad6 100644 --- a/model/sw_core.F90 +++ b/model/sw_core.F90 @@ -20,7 +20,6 @@ !*********************************************************************** module sw_core_mod - use fv_mp_mod, only: ng use tp_core_mod, only: fv_tp_2d, pert_ppm, copy_corners use fv_mp_mod, only: fill_corners, XDir, YDir use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type, fv_flags_type @@ -32,7 +31,7 @@ module sw_core_mod implicit none - real, parameter:: r3 = 1./3. + real, parameter:: r3 = 1./3. real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. real, parameter:: s11=11./14., s13=-13./14., s14=4./7., s15=3./14. real, parameter:: near_zero = 1.E-9 ! for KE limiter @@ -69,10 +68,6 @@ module sw_core_mod real, parameter:: b5 = -0.05 -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - private public :: c_sw, d_sw, fill_4corners, del6_vt_flux, divergence_corner, divergence_corner_nest @@ -98,7 +93,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & type(fv_flags_type), intent(IN), target :: flagstruct ! Local: - logical:: sw_corner, se_corner, ne_corner, nw_corner + logical:: sw_corner, se_corner, ne_corner, nw_corner real, dimension(bd%is-1:bd%ie+1,bd%js-1:bd%je+1):: vort, ke real, dimension(bd%is-1:bd%ie+2,bd%js-1:bd%je+1):: fx, fx1, fx2 real, dimension(bd%is-1:bd%ie+1,bd%js-1:bd%je+2):: fy, fy1, fy2 @@ -109,7 +104,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & integer :: is, ie, js, je integer :: isd, ied, jsd, jed integer :: npx, npy - logical :: nested + logical :: bounded_domain real, pointer, dimension(:,:,:) :: sin_sg, cos_sg real, pointer, dimension(:,:) :: cosa_u, cosa_v @@ -128,7 +123,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & npx = flagstruct%npx npy = flagstruct%npy - nested = gridstruct%nested + bounded_domain = gridstruct%bounded_domain sin_sg => gridstruct%sin_sg cos_sg => gridstruct%cos_sg @@ -149,10 +144,10 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & iep1 = ie+1; jep1 = je+1 call d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, bd, & - npx, npy, nested, flagstruct%grid_type) + npx, npy, bounded_domain, flagstruct%grid_type) if( nord > 0 ) then - if (nested) then + if (bounded_domain) then call divergence_corner_nest(u, v, ua, va, divg_d, gridstruct, flagstruct, bd) else call divergence_corner(u, v, ua, va, divg_d, gridstruct, flagstruct, bd) @@ -162,7 +157,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & do j=js-1,jep1 do i=is-1,iep1+1 if (ut(i,j) > 0.) then - ut(i,j) = dt2*ut(i,j)*dy(i,j)*sin_sg(i-1,j,3) + ut(i,j) = dt2*ut(i,j)*dy(i,j)*sin_sg(i-1,j,3) else ut(i,j) = dt2*ut(i,j)*dy(i,j)*sin_sg(i,j,1) end if @@ -171,7 +166,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & do j=js-1,je+2 do i=is-1,iep1 if (vt(i,j) > 0.) then - vt(i,j) = dt2*vt(i,j)*dx(i,j)*sin_sg(i,j-1,4) + vt(i,j) = dt2*vt(i,j)*dx(i,j)*sin_sg(i,j-1,4) else vt(i,j) = dt2*vt(i,j)*dx(i,j)*sin_sg(i,j, 2) end if @@ -182,7 +177,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & ! Transport delp: !---------------- ! Xdir: - if (flagstruct%grid_type < 3 .and. .not. nested) call fill2_4corners(delp, pt, 1, bd, npx, npy, sw_corner, se_corner, ne_corner, nw_corner) + if (flagstruct%grid_type < 3 .and. .not. bounded_domain) call fill2_4corners(delp, pt, 1, bd, npx, npy, sw_corner, se_corner, ne_corner, nw_corner) if ( hydrostatic ) then #ifdef SW_DYNAMICS @@ -215,7 +210,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & if (flagstruct%grid_type < 3) & call fill_4corners(w, 1, bd, npx, npy, sw_corner, se_corner, ne_corner, nw_corner) do j=js-1,je+1 - do i=is-1,ie+2 + do i=is-1,ie+2 if ( ut(i,j) > 0. ) then fx1(i,j) = delp(i-1,j) fx(i,j) = pt(i-1,j) @@ -233,10 +228,10 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & endif ! Ydir: - if (flagstruct%grid_type < 3 .and. .not. nested) call fill2_4corners(delp, pt, 2, bd, npx, npy, sw_corner, se_corner, ne_corner, nw_corner) + if (flagstruct%grid_type < 3 .and. .not. bounded_domain) call fill2_4corners(delp, pt, 2, bd, npx, npy, sw_corner, se_corner, ne_corner, nw_corner) if ( hydrostatic ) then do j=js-1,jep1+1 - do i=is-1,iep1 + do i=is-1,iep1 if ( vt(i,j) > 0. ) then fy1(i,j) = delp(i,j-1) fy(i,j) = pt(i,j-1) @@ -249,7 +244,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & enddo enddo do j=js-1,jep1 - do i=is-1,iep1 + do i=is-1,iep1 delpc(i,j) = delp(i,j) + (fx1(i,j)-fx1(i+1,j)+fy1(i,j)-fy1(i,j+1))*gridstruct%rarea(i,j) #ifdef SW_DYNAMICS ptc(i,j) = pt(i,j) @@ -262,7 +257,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & else if (flagstruct%grid_type < 3) call fill_4corners(w, 2, bd, npx, npy, sw_corner, se_corner, ne_corner, nw_corner) do j=js-1,je+2 - do i=is-1,ie+1 + do i=is-1,ie+1 if ( vt(i,j) > 0. ) then fy1(i,j) = delp(i,j-1) fy(i,j) = pt(i,j-1) @@ -278,7 +273,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & enddo enddo do j=js-1,je+1 - do i=is-1,ie+1 + do i=is-1,ie+1 delpc(i,j) = delp(i,j) + (fx1(i,j)-fx1(i+1,j)+fy1(i,j)-fy1(i,j+1))*gridstruct%rarea(i,j) ptc(i,j) = (pt(i,j)*delp(i,j) + & (fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*gridstruct%rarea(i,j))/delpc(i,j) @@ -292,12 +287,12 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & ! Compute KE: !------------ -!Since uc = u*, i.e. the covariant wind perpendicular to the face edge, if we want to compute kinetic energy we will need the true coordinate-parallel covariant wind, computed through u = uc*sina + v*cosa. +!Since uc = u*, i.e. the covariant wind perpendicular to the face edge, if we want to compute kinetic energy we will need the true coordinate-parallel covariant wind, computed through u = uc*sina + v*cosa. !Use the alpha for the cell KE is being computed in. !!! TO DO: !!! Need separate versions for nesting/single-tile !!! and for cubed-sphere - if (nested .or. flagstruct%grid_type >=3 ) then + if (bounded_domain .or. flagstruct%grid_type >=3 ) then do j=js-1,jep1 do i=is-1,iep1 if ( ua(i,j) > 0. ) then @@ -364,7 +359,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & dt4 = 0.5*dt2 do j=js-1,jep1 do i=is-1,iep1 - ke(i,j) = dt4*(ua(i,j)*ke(i,j) + va(i,j)*vort(i,j)) + ke(i,j) = dt4*(ua(i,j)*ke(i,j) + va(i,j)*vort(i,j)) enddo enddo @@ -414,7 +409,7 @@ subroutine c_sw(delpc, delp, ptc, pt, u,v, w, uc,vc, ua,va, wc, & ! (For the same reason we only divide by sin instead of sin**2 in the interior) !! TO DO: separate versions for nesting/single-tile and cubed-sphere - if (nested .or. flagstruct%grid_type >= 3) then + if (bounded_domain .or. flagstruct%grid_type >= 3) then do j=js,je do i=is,iep1 fy1(i,j) = dt2*(v(i,j)-uc(i,j)*cosa_u(i,j))/sina_u(i,j) @@ -493,7 +488,7 @@ end subroutine c_sw ! d_sw :: D-Grid Shallow Water Routine - + subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & ua, va, divg_d, xflux, yflux, cx, cy, & crx_adv, cry_adv, xfx_adv, yfx_adv, q_con, z_rat, kgb, heat_source, & @@ -534,7 +529,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & type(fv_grid_type), intent(IN), target :: gridstruct type(fv_flags_type), intent(IN), target :: flagstruct ! Local: - logical:: sw_corner, se_corner, ne_corner, nw_corner + logical:: sw_corner, se_corner, ne_corner, nw_corner real :: ut(bd%isd:bd%ied+1,bd%jsd:bd%jed) real :: vt(bd%isd:bd%ied, bd%jsd:bd%jed+1) !--- @@ -550,7 +545,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & real :: fy(bd%is:bd%ie ,bd%js:bd%je+1) ! 1-D Y-direction Fluxes real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed) real :: ra_y(bd%isd:bd%ied,bd%js:bd%je) - real :: gx(bd%is:bd%ie+1,bd%js:bd%je ) + real :: gx(bd%is:bd%ie+1,bd%js:bd%je ) real :: gy(bd%is:bd%ie ,bd%js:bd%je+1) ! work Y-dir flux array logical :: fill_c @@ -571,8 +566,8 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & integer :: is, ie, js, je integer :: isd, ied, jsd, jed - integer :: npx, npy - logical :: nested + integer :: npx, npy, ng + logical :: bounded_domain is = bd%is ie = bd%ie @@ -582,42 +577,43 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & ied = bd%ied jsd = bd%jsd jed = bd%jed + ng = bd%ng npx = flagstruct%npx npy = flagstruct%npy - nested = gridstruct%nested - - area => gridstruct%area - rarea => gridstruct%rarea - sin_sg => gridstruct%sin_sg - cosa_u => gridstruct%cosa_u - cosa_v => gridstruct%cosa_v - cosa_s => gridstruct%cosa_s - sina_u => gridstruct%sina_u - sina_v => gridstruct%sina_v - rsin_u => gridstruct%rsin_u - rsin_v => gridstruct%rsin_v - rsina => gridstruct%rsina - f0 => gridstruct%f0 - rsin2 => gridstruct%rsin2 - divg_u => gridstruct%divg_u - divg_v => gridstruct%divg_v - cosa => gridstruct%cosa - dx => gridstruct%dx - dy => gridstruct%dy - dxc => gridstruct%dxc - dyc => gridstruct%dyc - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - rdx => gridstruct%rdx - rdy => gridstruct%rdy + bounded_domain = gridstruct%bounded_domain + + area => gridstruct%area + rarea => gridstruct%rarea + sin_sg => gridstruct%sin_sg + cosa_u => gridstruct%cosa_u + cosa_v => gridstruct%cosa_v + cosa_s => gridstruct%cosa_s + sina_u => gridstruct%sina_u + sina_v => gridstruct%sina_v + rsin_u => gridstruct%rsin_u + rsin_v => gridstruct%rsin_v + rsina => gridstruct%rsina + f0 => gridstruct%f0 + rsin2 => gridstruct%rsin2 + divg_u => gridstruct%divg_u + divg_v => gridstruct%divg_v + cosa => gridstruct%cosa + dx => gridstruct%dx + dy => gridstruct%dy + dxc => gridstruct%dxc + dyc => gridstruct%dyc + rdxa => gridstruct%rdxa + rdya => gridstruct%rdya + rdx => gridstruct%rdx + rdy => gridstruct%rdy sw_corner = gridstruct%sw_corner se_corner = gridstruct%se_corner nw_corner = gridstruct%nw_corner ne_corner = gridstruct%ne_corner -#ifdef SW_DYNAMICS +#ifdef SW_DYNAMICS if ( test_case == 1 ) then do j=jsd,jed do i=is,ie+1 @@ -647,7 +643,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & if ( flagstruct%grid_type < 3 ) then !!! TO DO: separate versions for nesting and for cubed-sphere - if (nested) then + if (bounded_domain) then do j=jsd,jed do i=is-1,ie+2 ut(i,j) = ( uc(i,j) - 0.25 * cosa_u(i,j) * & @@ -681,7 +677,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo endif - if (.not. nested) then + if (.not. bounded_domain) then ! West edge: if ( is==1 ) then do j=jsd,jed @@ -753,10 +749,10 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo endif -! The following code solves a 2x2 system to get the interior parallel-to-edge uc,vc values -! near the corners (ex: for the sw corner ut(2,1) and vt(1,2) are solved for simultaneously). -! It then computes the halo uc, vc values so as to be consistent with the computations on -! the facing panel. +! The following code solves a 2x2 system to get the interior parallel-to-edge uc,vc values +! near the corners (ex: for the sw corner ut(2,1) and vt(1,2) are solved for simultaneously). +! It then computes the halo uc, vc values so as to be consistent with the computations on +! the facing panel. !The system solved is: ! ut(2,1) = uc(2,1) - avg(vt)*cosa_u(2,1) @@ -837,7 +833,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & 0.25*cosa_u(2,npy-1)*(vt(1,npy)+vt(2,npy)+vt(2,npy-1))) ) * damp endif - end if !.not. nested + end if !.not. bounded_domain else ! flagstruct%grid_type >= 3 @@ -846,10 +842,10 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & ut(i,j) = uc(i,j) enddo enddo - + do j=js,je+1 do i=isd,ied - vt(i,j) = vc(i,j) + vt(i,j) = vc(i,j) enddo enddo endif ! end grid_type choices @@ -867,7 +863,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo ! Explanation of the following code: -! xfx_adv = dt*ut*dy +! xfx_adv = dt*ut*dy ! crx_adv = dt*ut/dx do j=jsd,jed @@ -890,7 +886,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & yfx_adv(i,j) = dx(i,j)*yfx_adv(i,j)*sin_sg(i,j-1,4) else cry_adv(i,j) = yfx_adv(i,j) * rdya(i,j) - yfx_adv(i,j) = dx(i,j)*yfx_adv(i,j)*sin_sg(i,j,2) + yfx_adv(i,j) = dx(i,j)*yfx_adv(i,j)*sin_sg(i,j,2) endif enddo enddo @@ -919,12 +915,12 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & do i=is,ie+1 cx(i,j) = cx(i,j) + crx_adv(i,j) enddo - enddo + enddo do j=js,je do i=is,ie+1 xflux(i,j) = xflux(i,j) + fx(i,j) enddo - enddo + enddo do j=js,je+1 do i=isd,ied cy(i,j) = cy(i,j) + cry_adv(i,j) @@ -932,7 +928,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & do i=is,ie yflux(i,j) = yflux(i,j) + fy(i,j) enddo - enddo + enddo #ifndef SW_DYNAMICS do j=js,je @@ -983,7 +979,8 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & ! endif call fv_tp_2d(pt, crx_adv,cry_adv, npx, npy, hord_tm, gx, gy, & xfx_adv,yfx_adv, gridstruct, bd, ra_x, ra_y, & - mfx=fx, mfy=fy, mass=delp, nord=nord_t, damp_c=damp_t) + mfx=fx, mfy=fy, mass=delp, nord=nord_v, damp_c=damp_v) +! mfx=fx, mfy=fy, mass=delp, nord=nord_t, damp_c=damp_t) #endif if ( inline_q ) then @@ -1001,7 +998,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo do iq=1,nq call fv_tp_2d(q(isd,jsd,k,iq), crx_adv,cry_adv, npx, npy, hord_tr, gx, gy, & - xfx_adv,yfx_adv, gridstruct, bd, ra_x, ra_y, & + xfx_adv,yfx_adv, gridstruct, bd, ra_x, ra_y, & mfx=fx, mfy=fy, mass=delp, nord=nord_t, damp_c=damp_t) do j=js,je do i=is,ie @@ -1047,7 +1044,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & dt5 = 0.5 *dt dt4 = 0.25*dt - if (nested) then + if (bounded_domain) then is2 = is; ie1 = ie+1 js2 = js; je1 = je+1 else @@ -1055,10 +1052,9 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & js2 = max(2,js); je1 = min(npy-1,je+1) end if -!!! TO DO: separate versions for nested and for cubed-sphere if (flagstruct%grid_type < 3) then - if (nested) then + if (bounded_domain) then do j=js2,je1 do i=is2,ie1 vb(i,j) = dt5*(vc(i-1,j)+vc(i,j)-(uc(i,j-1)+uc(i,j))*cosa(i,j))*rsina(i,j) @@ -1092,7 +1088,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo endif endif - + else do j=js,je+1 do i=is,ie+1 @@ -1102,7 +1098,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & endif call ytp_v(is,ie,js,je,isd,ied,jsd,jed, vb, u, v, ub, hord_mt, gridstruct%dy, gridstruct%rdy, & - npx, npy, flagstruct%grid_type, nested) + npx, npy, flagstruct%grid_type, bounded_domain) do j=js,je+1 do i=is,ie+1 @@ -1112,10 +1108,10 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & if (flagstruct%grid_type < 3) then - if (nested) then + if (bounded_domain) then do j=js,je+1 - + do i=is2,ie1 ub(i,j) = dt5*(uc(i,j-1)+uc(i,j)-(vc(i-1,j)+vc(i,j))*cosa(i,j))*rsina(i,j) enddo @@ -1149,7 +1145,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & enddo endif endif - + else do j=js,je+1 do i=is,ie+1 @@ -1159,7 +1155,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & endif call xtp_u(is,ie,js,je, isd,ied,jsd,jed, ub, u, v, vb, hord_mt, gridstruct%dx, gridstruct%rdx, & - npx, npy, flagstruct%grid_type, nested) + npx, npy, flagstruct%grid_type, bounded_domain) do j=js,je+1 do i=is,ie+1 @@ -1170,7 +1166,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & !----------------------------------------- ! Fix KE at the 4 corners of the face: !----------------------------------------- - if (.not. nested) then + if (.not. bounded_domain) then dt6 = dt / 6. if ( sw_corner ) then ke(1,1) = dt6*( (ut(1,1) + ut(1,0)) * u(1,1) + & @@ -1260,7 +1256,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & if ( nord==0 ) then ! area ~ dxb*dyb*sin(alpha) - if (nested) then + if (bounded_domain) then do j=js,je+1 do i=is-1,ie+1 @@ -1307,9 +1303,9 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & vort(1, j) = v(1, j)*dxc(1, j)*sin_sg(1,j,1) end if end if - if ( (ie+1)==npx ) then + if ( (ie+1)==npx ) then if (uc(npx,j) > 0) then - vort(npx,j) = v(npx,j)*dxc(npx,j)* & + vort(npx,j) = v(npx,j)*dxc(npx,j)* & sin_sg(npx-1,j,3) else vort(npx,j) = v(npx,j)*dxc(npx,j)* & @@ -1356,7 +1352,7 @@ subroutine d_sw(delpc, delp, ptc, pt, u, v, w, uc,vc, & fill_c = (nt/=0) .and. (flagstruct%grid_type<3) .and. & ( sw_corner .or. se_corner .or. ne_corner .or. nw_corner ) & - .and. .not. nested + .and. .not. bounded_domain if ( fill_c ) call fill_corners(divg_d, npx, npy, FILL=XDir, BGRID=.true.) do j=js-nt,je+1+nt @@ -1558,7 +1554,7 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) real, intent(out):: fx2(bd%isd:bd%ied+1,bd%jsd:bd%jed), fy2(bd%isd:bd%ied,bd%jsd:bd%jed+1) integer i,j, nt, n, i1, i2, j1, j2 - logical :: nested + logical :: bounded_domain #ifdef USE_SG real, pointer, dimension(:,:,:) :: sin_sg @@ -1569,18 +1565,18 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) #ifdef USE_SG sin_sg => gridstruct%sin_sg - rdxc => gridstruct%rdxc - rdyc => gridstruct%rdyc - dx => gridstruct%dx - dy => gridstruct%dy + rdxc => gridstruct%rdxc + rdyc => gridstruct%rdyc + dx => gridstruct%dx + dy => gridstruct%dy #endif - nested = gridstruct%nested + bounded_domain = gridstruct%bounded_domain is = bd%is ie = bd%ie js = bd%js je = bd%je - + i1 = is-1-nord; i2 = ie+1+nord j1 = js-1-nord; j2 = je+1+nord @@ -1590,7 +1586,7 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) enddo enddo - if( nord>0 ) call copy_corners(d2, npx, npy, 1, nested, bd, gridstruct%sw_corner, & + if( nord>0 .and. .not. bounded_domain) call copy_corners(d2, npx, npy, 1, bounded_domain, bd, gridstruct%sw_corner, & gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nord,je+nord do i=is-nord,ie+nord+1 @@ -1602,7 +1598,7 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) enddo enddo - if( nord>0 ) call copy_corners(d2, npx, npy, 2, nested, bd, gridstruct%sw_corner, & + if( nord>0 .and. .not. bounded_domain) call copy_corners(d2, npx, npy, 2, bounded_domain, bd, gridstruct%sw_corner, & gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nord,je+nord+1 do i=is-nord,ie+nord @@ -1623,7 +1619,7 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) enddo enddo - call copy_corners(d2, npx, npy, 1, nested, bd, gridstruct%sw_corner, & + if (.not. bounded_domain) call copy_corners(d2, npx, npy, 1, bounded_domain, bd, gridstruct%sw_corner, & gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nt,je+nt @@ -1636,7 +1632,7 @@ subroutine del6_vt_flux(nord, npx, npy, damp, q, d2, fx2, fy2, gridstruct, bd) enddo enddo - call copy_corners(d2, npx, npy, 2, nested, bd, & + if (.not. bounded_domain) call copy_corners(d2, npx, npy, 2, bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nt,je+nt+1 @@ -1673,7 +1669,7 @@ subroutine divergence_corner(u, v, ua, va, divg_d, gridstruct, flagstruct, bd) integer :: is, ie, js, je integer :: npx, npy - logical :: nested + logical :: bounded_domain is = bd%is ie = bd%ie @@ -1682,14 +1678,14 @@ subroutine divergence_corner(u, v, ua, va, divg_d, gridstruct, flagstruct, bd) npx = flagstruct%npx npy = flagstruct%npy - nested = gridstruct%nested + bounded_domain = gridstruct%bounded_domain - sin_sg => gridstruct%sin_sg - cos_sg => gridstruct%cos_sg - dxc => gridstruct%dxc - dyc => gridstruct%dyc + sin_sg => gridstruct%sin_sg + cos_sg => gridstruct%cos_sg + dxc => gridstruct%dxc + dyc => gridstruct%dyc - if (nested) then + if (bounded_domain) then is2 = is; ie1 = ie+1 else is2 = max(2,is); ie1 = min(npx-1,ie+1) @@ -1785,7 +1781,6 @@ subroutine divergence_corner_nest(u, v, ua, va, divg_d, gridstruct, flagstruct, integer :: isd, ied, jsd, jed integer :: npx, npy - logical :: nested isd = bd%isd ied = bd%ied @@ -1794,17 +1789,16 @@ subroutine divergence_corner_nest(u, v, ua, va, divg_d, gridstruct, flagstruct, npx = flagstruct%npx npy = flagstruct%npy - nested = gridstruct%nested rarea_c => gridstruct%rarea_c - sin_sg => gridstruct%sin_sg - cos_sg => gridstruct%cos_sg - cosa_u => gridstruct%cosa_u - cosa_v => gridstruct%cosa_v - sina_u => gridstruct%sina_u - sina_v => gridstruct%sina_v - dxc => gridstruct%dxc - dyc => gridstruct%dyc + sin_sg => gridstruct%sin_sg + cos_sg => gridstruct%cos_sg + cosa_u => gridstruct%cosa_u + cosa_v => gridstruct%cosa_v + sina_u => gridstruct%sina_u + sina_v => gridstruct%sina_v + dxc => gridstruct%dxc + dyc => gridstruct%dyc divg_d = 1.e25 @@ -1896,7 +1890,7 @@ subroutine smag_corner(dt, u, v, ua, va, smag_c, bd, npx, npy, gridstruct, ng) integer :: is, ie, js, je integer :: isd, ied, jsd, jed - + is = bd%is ie = bd%ie js = bd%js @@ -1963,7 +1957,7 @@ subroutine smag_corner(dt, u, v, ua, va, smag_c, bd, npx, npy, gridstruct, ng) end subroutine smag_corner - subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, npy, grid_type, nested) + subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, npy, grid_type, bounded_domain) integer, intent(in):: is,ie,js,je, isd,ied,jsd,jed real, INTENT(IN):: u(isd:ied,jsd:jed+1) @@ -1973,10 +1967,11 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, real, INTENT(IN) :: dx(isd:ied, jsd:jed+1) real, INTENT(IN) :: rdx(isd:ied, jsd:jed+1) integer, INTENT(IN) :: iord, npx, npy, grid_type - logical, INTENT(IN) :: nested + logical, INTENT(IN) :: bounded_domain ! Local real, dimension(is-1:ie+1):: bl, br, b0 logical, dimension(is-1:ie+1):: smt5, smt6 + logical, dimension(is:ie+1):: hi5, hi6 real:: fx0(is:ie+1) real al(is-1:ie+2), dm(is-2:ie+2) real dq(is-3:ie+2) @@ -1987,26 +1982,15 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, integer is3, ie3 integer is2, ie2 - if ( nested .or. grid_type>3 ) then + if ( bounded_domain .or. grid_type>3 ) then is3 = is-1 ; ie3 = ie+1 else is3 = max(3,is-1) ; ie3 = min(npx-3,ie+1) end if - if ( iord==1 ) then - do j=js,je+1 - do i=is,ie+1 - if( c(i,j)>0. ) then - flux(i,j) = u(i-1,j) - else - flux(i,j) = u(i,j) - endif - enddo - enddo - - elseif ( iord < 8 ) then -! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 + if ( iord < 8 ) then +! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 do j=js,je+1 @@ -2018,7 +2002,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, br(i) = al(i+1) - u(i,j) enddo - if ( (.not.nested) .and. grid_type < 3) then + if ( (.not.bounded_domain) .and. grid_type < 3) then if ( is==1 ) then xt = c3*u(1,j) + c2*u(2,j) + c1*u(3,j) br(1) = xt - u(1,j) @@ -2077,6 +2061,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, enddo elseif ( iord==3 ) then + do i=is-1, ie+1 x0 = abs(b0(i)) x1 = abs(bl(i)-br(i)) @@ -2107,6 +2092,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, enddo elseif ( iord==4 ) then ! more damp than ord5 but less damp than ord6 + do i=is-1, ie+1 x0 = abs(b0(i)) x1 = abs(bl(i)-br(i)) @@ -2131,7 +2117,6 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, endif enddo - else ! iord=5,6,7 if ( iord==5 ) then @@ -2143,6 +2128,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, smt5(i) = abs(3.*b0(i)) < abs(bl(i)-br(i)) enddo endif + !DEC$ VECTOR ALWAYS do i=is,ie+1 if( c(i,j)>0. ) then @@ -2226,8 +2212,8 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, !-------------- ! fix the edges !-------------- -!!! TO DO: separate versions for nested and for cubed-sphere - if ( is==1 .and. .not. nested) then +!!! TO DO: separate versions for bounded_domain and for cubed-sphere + if ( is==1 .and. .not. bounded_domain) then br(2) = al(3) - u(2,j) xt = s15*u(1,j) + s11*u(2,j) - s14*dm(2) bl(2) = xt - u(2,j) @@ -2250,7 +2236,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, call pert_ppm(1, u(2,j), bl(2), br(2), -1) endif - if ( (ie+1)==npx .and. .not. nested) then + if ( (ie+1)==npx .and. .not. bounded_domain) then bl(npx-2) = al(npx-2) - u(npx-2,j) xt = s15*u(npx-1,j) + s11*u(npx-2,j) + s14*dm(npx-2) br(npx-2) = xt - u(npx-2,j) @@ -2288,7 +2274,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, br(i) = min(max(0., pmp, lac), max(al(i+1)-u(i,j), min(0.,pmp, lac))) enddo endif - + do i=is,ie+1 if( c(i,j)>0. ) then cfl = c(i,j)*rdx(i-1,j) @@ -2305,7 +2291,7 @@ subroutine xtp_u(is,ie,js,je,isd,ied,jsd,jed,c, u, v, flux, iord, dx, rdx, npx, end subroutine xtp_u - subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, npy, grid_type, nested) + subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, npy, grid_type, bounded_domain) integer, intent(in):: is,ie,js,je, isd,ied,jsd,jed integer, intent(IN):: jord real, INTENT(IN) :: u(isd:ied,jsd:jed+1) @@ -2315,7 +2301,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, real, INTENT(IN) :: dy(isd:ied+1,jsd:jed) real, INTENT(IN) :: rdy(isd:ied+1,jsd:jed) integer, INTENT(IN) :: npx, npy, grid_type - logical, INTENT(IN) :: nested + logical, INTENT(IN) :: bounded_domain ! Local: logical, dimension(is:ie+1,js-1:je+1):: smt5, smt6 real:: fx0(is:ie+1) @@ -2328,7 +2314,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, real x0, x1, x0R, x0L integer i, j, is1, ie1, js3, je3 - if ( nested .or. grid_type>3 ) then + if ( bounded_domain .or. grid_type>3 ) then js3 = js-1; je3 = je+1 else js3 = max(3,js-1); je3 = min(npy-3,je+1) @@ -2347,7 +2333,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, enddo elseif ( jord<8 ) then -! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 +! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 do j=js3,je3+1 do i=is,ie+1 @@ -2361,7 +2347,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, enddo enddo - if ( (.not.nested) .and. grid_type < 3) then + if ( (.not.bounded_domain) .and. grid_type < 3) then if( js==1 ) then do i=is,ie+1 bl(i,0) = c1*v(i,-2) + c2*v(i,-1) + c3*v(i,0) - v(i,0) @@ -2448,7 +2434,6 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, smt6(i,j) = 3.*x0 < x1 enddo enddo - do j=js,je+1 do i=is,ie+1 fx0(i) = 0. @@ -2504,10 +2489,8 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, enddo enddo - else ! jord = 5,6,7 ! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 < ord7 - if ( jord==5 ) then do j=js-1,je+1 do i=is,ie+1 @@ -2563,7 +2546,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, al(i,j) = 0.5*(v(i,j-1)+v(i,j)) + r3*(dm(i,j-1)-dm(i,j)) enddo enddo - + if ( jord==8 ) then do j=js3,je3 do i=is,ie+1 @@ -2575,7 +2558,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, elseif ( jord==9 ) then do j=js3,je3 do i=is,ie+1 - pmp_1 = -2.*dq(i,j) + pmp_1 = -2.*dq(i,j) lac_1 = pmp_1 + 1.5*dq(i,j+1) bl(i,j) = min(max(0., pmp_1, lac_1), max(al(i,j)-v(i,j), min(0., pmp_1, lac_1))) pmp_2 = 2.*dq(i,j-1) @@ -2595,7 +2578,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, br(i,j) = 0. endif elseif( abs(3.*(bl(i,j)+br(i,j))) > abs(bl(i,j)-br(i,j)) ) then - pmp_1 = -2.*dq(i,j) + pmp_1 = -2.*dq(i,j) lac_1 = pmp_1 + 1.5*dq(i,j+1) bl(i,j) = min(max(0., pmp_1, lac_1), max(bl(i,j), min(0., pmp_1, lac_1))) pmp_2 = 2.*dq(i,j-1) @@ -2613,11 +2596,11 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, enddo enddo endif - + !-------------- ! fix the edges !-------------- - if( js==1 .and. .not. nested) then + if( js==1 .and. .not. bounded_domain) then do i=is,ie+1 br(i,2) = al(i,3) - v(i,2) xt = s15*v(i,1) + s11*v(i,2) - s14*dm(i,2) @@ -2657,7 +2640,7 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, j=2 call pert_ppm(ie-is+2, v(is,j), bl(is,j), br(is,j), -1) endif - if( (je+1)==npy .and. .not. nested) then + if( (je+1)==npy .and. .not. bounded_domain) then do i=is,ie+1 bl(i,npy-2) = al(i,npy-2) - v(i,npy-2) xt = s15*v(i,npy-1) + s11*v(i,npy-2) + s14*dm(i,npy-2) @@ -2703,18 +2686,18 @@ subroutine ytp_v(is,ie,js,je,isd,ied,jsd,jed, c, u, v, flux, jord, dy, rdy, npx, al(i,j) = 0.5*(v(i,j-1)+v(i,j)) + r3*(dm(i,j-1)-dm(i,j)) enddo enddo - + do j=js-1,je+1 do i=is,ie+1 pmp = 2.*dq(i,j-1) lac = pmp - 1.5*dq(i,j-2) br(i,j) = min(max(0.,pmp,lac), max(al(i,j+1)-v(i,j), min(0.,pmp,lac))) - pmp = -2.*dq(i,j) + pmp = -2.*dq(i,j) lac = pmp + 1.5*dq(i,j+1) bl(i,j) = min(max(0.,pmp,lac), max(al(i,j)-v(i,j), min(0.,pmp,lac))) enddo enddo - + endif do j=js,je+1 @@ -2736,11 +2719,11 @@ end subroutine ytp_v !There is a limit to how far this routine can fill uc and vc in the ! halo, and so either mpp_update_domains or some sort of boundary -! routine (extrapolation, outflow, interpolation from a nested grid) +! routine (extrapolation, outflow, interpolation from a bounded_domain grid) ! is needed after c_sw is completed if these variables are needed ! in the halo subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & - bd, npx, npy, nested, grid_type) + bd, npx, npy, bounded_domain, grid_type) type(fv_grid_bounds_type), intent(IN) :: bd logical, intent(in):: dord4 real, intent(in) :: u(bd%isd:bd%ied,bd%jsd:bd%jed+1) @@ -2749,9 +2732,9 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & real, intent(out), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1):: vc real, intent(out), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed ):: ua, va, ut, vt integer, intent(IN) :: npx, npy, grid_type - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain type(fv_grid_type), intent(IN), target :: gridstruct -! Local +! Local real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: utmp, vtmp integer npt, i, j, ifirst, ilast, id integer :: is, ie, js, je @@ -2771,15 +2754,15 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & jsd = bd%jsd jed = bd%jed - sin_sg => gridstruct%sin_sg - cosa_u => gridstruct%cosa_u - cosa_v => gridstruct%cosa_v - cosa_s => gridstruct%cosa_s - rsin_u => gridstruct%rsin_u - rsin_v => gridstruct%rsin_v - rsin2 => gridstruct%rsin2 - dxa => gridstruct%dxa - dya => gridstruct%dya + sin_sg => gridstruct%sin_sg + cosa_u => gridstruct%cosa_u + cosa_v => gridstruct%cosa_v + cosa_s => gridstruct%cosa_s + rsin_u => gridstruct%rsin_u + rsin_v => gridstruct%rsin_v + rsin2 => gridstruct%rsin2 + dxa => gridstruct%dxa + dya => gridstruct%dya if ( dord4 ) then id = 1 @@ -2787,7 +2770,7 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & id = 0 endif - if (grid_type < 3 .and. .not. nested) then + if (grid_type < 3 .and. .not. bounded_domain) then npt = 4 else npt = -2 @@ -2795,9 +2778,9 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & ! Initialize the non-existing corner regions utmp(:,:) = big_number - vtmp(:,:) = big_number + vtmp(:,:) = big_number - if ( nested) then + if ( bounded_domain) then do j=jsd+1,jed-1 do i=isd,ied @@ -2816,7 +2799,7 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) enddo i = isd - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) + vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) i = ied vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) enddo @@ -2920,7 +2903,7 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & enddo endif - if (grid_type < 3 .and. .not. nested) then + if (grid_type < 3 .and. .not. bounded_domain) then ifirst = max(3, is-1) ilast = min(npx-2,ie+2) else @@ -2941,24 +2924,24 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & ! Xdir: if( gridstruct%sw_corner ) then ua(-1,0) = -va(0,2) - ua( 0,0) = -va(0,1) + ua( 0,0) = -va(0,1) endif if( gridstruct%se_corner ) then ua(npx, 0) = va(npx,1) - ua(npx+1,0) = va(npx,2) + ua(npx+1,0) = va(npx,2) endif if( gridstruct%ne_corner ) then ua(npx, npy) = -va(npx,npy-1) - ua(npx+1,npy) = -va(npx,npy-2) + ua(npx+1,npy) = -va(npx,npy-2) endif if( gridstruct%nw_corner ) then ua(-1,npy) = va(0,npy-2) - ua( 0,npy) = va(0,npy-1) + ua( 0,npy) = va(0,npy-1) endif - if( is==1 .and. .not. nested ) then + if( is==1 .and. .not. bounded_domain ) then do j=js-1,je+1 - uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j) + uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j) ut(1,j) = edge_interpolate4(ua(-1:2,j), dxa(-1:2,j)) !Want to use the UPSTREAM value if (ut(1,j) > 0.) then @@ -2972,16 +2955,16 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & enddo endif - if( (ie+1)==npx .and. .not. nested ) then + if( (ie+1)==npx .and. .not. bounded_domain ) then do j=js-1,je+1 - uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j) + uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j) ut(npx, j) = edge_interpolate4(ua(npx-2:npx+1,j), dxa(npx-2:npx+1,j)) if (ut(npx,j) > 0.) then uc(npx,j) = ut(npx,j)*sin_sg(npx-1,j,3) else uc(npx,j) = ut(npx,j)*sin_sg(npx,j,1) end if - uc(npx+1,j) = c3*utmp(npx,j) + c2*utmp(npx+1,j) + c1*utmp(npx+2,j) + uc(npx+1,j) = c3*utmp(npx,j) + c2*utmp(npx+1,j) + c1*utmp(npx+2,j) ut(npx-1,j) = (uc(npx-1,j)-v(npx-1,j)*cosa_u(npx-1,j))*rsin_u(npx-1,j) ut(npx+1,j) = (uc(npx+1,j)-v(npx+1,j)*cosa_u(npx+1,j))*rsin_u(npx+1,j) enddo @@ -3032,7 +3015,7 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & if (grid_type < 3) then do j=js-1,je+2 - if ( j==1 .and. .not. nested ) then + if ( j==1 .and. .not. bounded_domain ) then do i=is-1,ie+1 vt(i,j) = edge_interpolate4(va(i,-1:2), dya(i,-1:2)) if (vt(i,j) > 0.) then @@ -3041,17 +3024,17 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & vc(i,j) = vt(i,j)*sin_sg(i,j,2) end if enddo - elseif ( j==0 .or. j==(npy-1) .and. .not. nested ) then + elseif ( j==0 .or. j==(npy-1) .and. .not. bounded_domain ) then do i=is-1,ie+1 vc(i,j) = c1*vtmp(i,j-2) + c2*vtmp(i,j-1) + c3*vtmp(i,j) vt(i,j) = (vc(i,j) - u(i,j)*cosa_v(i,j))*rsin_v(i,j) enddo - elseif ( j==2 .or. j==(npy+1) .and. .not. nested ) then + elseif ( j==2 .or. j==(npy+1) .and. .not. bounded_domain ) then do i=is-1,ie+1 vc(i,j) = c1*vtmp(i,j+1) + c2*vtmp(i,j) + c3*vtmp(i,j-1) vt(i,j) = (vc(i,j) - u(i,j)*cosa_v(i,j))*rsin_v(i,j) enddo - elseif ( j==npy .and. .not. nested ) then + elseif ( j==npy .and. .not. bounded_domain ) then do i=is-1,ie+1 vt(i,j) = edge_interpolate4(va(i,j-2:j+1), dya(i,j-2:j+1)) if (vt(i,j) > 0.) then @@ -3080,7 +3063,7 @@ subroutine d2a2c_vect(u, v, ua, va, uc, vc, ut, vt, dord4, gridstruct, & end subroutine d2a2c_vect - + real function edge_interpolate4(ua, dxa) real, intent(in) :: ua(4) diff --git a/model/tp_core.F90 b/model/tp_core.F90 index e446a33f9..0846ea567 100644 --- a/model/tp_core.F90 +++ b/model/tp_core.F90 @@ -23,7 +23,6 @@ module tp_core_mod ! ! !MODULE: tp_core --- A collection of routines to support FV transport ! - use fv_mp_mod, only: ng use fv_grid_utils_mod, only: big_number use fv_arrays_mod, only: fv_grid_type, fv_grid_bounds_type @@ -70,10 +69,6 @@ module tp_core_mod ! q(i+0.5) = p1*(q(i-1)+q(i)) + p2*(q(i-2)+q(i+1)) ! integer:: is, ie, js, je, isd, ied, jsd, jed -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - ! !EOP !----------------------------------------------------------------------- @@ -132,14 +127,15 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & endif ord_ou = hord - if (.not. gridstruct%nested) call copy_corners(q, npx, npy, 2, gridstruct%nested, bd, & - gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) + if (.not. gridstruct%bounded_domain) & + call copy_corners(q, npx, npy, 2, gridstruct%bounded_domain, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) - call yppm(fy2, q, cry, ord_in, isd,ied,isd,ied, js,je,jsd,jed, npx,npy, gridstruct%dya, gridstruct%nested, gridstruct%grid_type) + call yppm(fy2, q, cry, ord_in, isd,ied,isd,ied, js,je,jsd,jed, npx,npy, gridstruct%dya, gridstruct%bounded_domain, gridstruct%grid_type) do j=js,je+1 do i=isd,ied - fyy(i,j) = yfx(i,j) * fy2(i,j) + fyy(i,j) = yfx(i,j) * fy2(i,j) enddo enddo do j=js,je @@ -148,12 +144,13 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & enddo enddo - call xppm(fx, q_i, crx(is,js), ord_ou, is,ie,isd,ied, js,je,jsd,jed, npx,npy, gridstruct%dxa, gridstruct%nested, gridstruct%grid_type) + call xppm(fx, q_i, crx(is,js), ord_ou, is,ie,isd,ied, js,je,jsd,jed, npx,npy, gridstruct%dxa, gridstruct%bounded_domain, gridstruct%grid_type) - if (.not. gridstruct%nested) call copy_corners(q, npx, npy, 1, gridstruct%nested, bd, & + if (.not. gridstruct%bounded_domain) & + call copy_corners(q, npx, npy, 1, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) - call xppm(fx2, q, crx, ord_in, is,ie,isd,ied, jsd,jed,jsd,jed, npx,npy, gridstruct%dxa, gridstruct%nested, gridstruct%grid_type) + call xppm(fx2, q, crx, ord_in, is,ie,isd,ied, jsd,jed,jsd,jed, npx,npy, gridstruct%dxa, gridstruct%bounded_domain, gridstruct%grid_type) do j=jsd,jed do i=is,ie+1 @@ -164,7 +161,7 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & enddo enddo - call yppm(fy, q_j, cry, ord_ou, is,ie,isd,ied, js,je,jsd,jed, npx, npy, gridstruct%dya, gridstruct%nested, gridstruct%grid_type) + call yppm(fy, q_j, cry, ord_ou, is,ie,isd,ied, js,je,jsd,jed, npx, npy, gridstruct%dya, gridstruct%bounded_domain, gridstruct%grid_type) !---------------- ! Flux averaging: @@ -216,15 +213,17 @@ end subroutine fv_tp_2d !Weird arguments are because this routine is called in a lot of !places outside of tp_core, sometimes very deeply nested in the call tree. - subroutine copy_corners(q, npx, npy, dir, nested, bd, & + subroutine copy_corners(q, npx, npy, dir, bounded_domain, bd, & sw_corner, se_corner, nw_corner, ne_corner) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy, dir real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed) - logical, intent(IN) :: nested, sw_corner, se_corner, nw_corner, ne_corner - integer i,j + logical, intent(IN) :: bounded_domain, sw_corner, se_corner, nw_corner, ne_corner + integer i,j, ng + + ng = bd%ng - if (nested) return + if (bounded_domain) return if ( dir == 1 ) then ! XDir: @@ -290,10 +289,10 @@ subroutine copy_corners(q, npx, npy, dir, nested, bd, & endif endif - + end subroutine copy_corners - subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, dxa, nested, grid_type) + subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, dxa, bounded_domain, grid_type) integer, INTENT(IN) :: is, ie, isd, ied, jsd, jed integer, INTENT(IN) :: jfirst, jlast ! compute domain integer, INTENT(IN) :: iord @@ -301,7 +300,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, real , INTENT(IN) :: q(isd:ied,jfirst:jlast) real , INTENT(IN) :: c(is:ie+1,jfirst:jlast) ! Courant N (like FLUX) real , intent(IN) :: dxa(isd:ied,jsd:jed) - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain integer, intent(IN) :: grid_type ! !OUTPUT PARAMETERS: real , INTENT(OUT) :: flux(is:ie+1,jfirst:jlast) ! Flux @@ -316,7 +315,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, integer:: i, j, ie3, is1, ie1 real:: x0, x1, xt, qtmp, pmp_1, lac_1, pmp_2, lac_2 - if ( .not. nested .and. grid_type<3 ) then + if ( .not. bounded_domain .and. grid_type<3 ) then is1 = max(3,is-1); ie3 = min(npx-2,ie+2) ie1 = min(npx-3,ie+1) else @@ -332,7 +331,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, if ( iord < 8 ) then ! ord = 2: perfectly linear ppm scheme -! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 +! Diffusivity: ord2 < ord5 < ord3 < ord4 < ord6 do i=is1, ie3 al(i) = p1*(q1(i-1)+q1(i)) + p2*(q1(i-2)+q1(i+1)) @@ -343,7 +342,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, enddo endif - if ( .not.nested .and. grid_type<3 ) then + if ( .not.bounded_domain .and. grid_type<3 ) then if ( is==1 ) then al(0) = c1*q1(-2) + c2*q1(-1) + c3*q1(0) al(1) = 0.5*(((2.*dxa(0,j)+dxa(-1,j))*q1(0)-dxa(0,j)*q1(-1))/(dxa(-1,j)+dxa(0,j)) & @@ -376,10 +375,10 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, xt = c(i,j) if ( xt > 0. ) then qtmp = q1(i-1) - flux(i,j) = qtmp + (1.-xt)*(al(i)-qtmp-xt*(al(i-1)+al(i)-(qtmp+qtmp))) + flux(i,j) = qtmp + (1.-xt)*(al(i)-qtmp-xt*(al(i-1)+al(i)-(qtmp+qtmp))) else qtmp = q1(i) - flux(i,j) = qtmp + (1.+xt)*(al(i)-qtmp+xt*(al(i)+al(i+1)-(qtmp+qtmp))) + flux(i,j) = qtmp + (1.+xt)*(al(i)-qtmp+xt*(al(i)+al(i+1)-(qtmp+qtmp))) endif ! x0 = sign(dim(xt, 0.), 1.) ! x1 = sign(dim(0., xt), 1.) @@ -469,7 +468,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, fx1(i) = (1.+c(i,j))*(bl(i) + c(i,j)*b0(i)) flux(i,j) = q1(i) endif - if (smt5(i-1).or.smt5(i)) flux(i,j) = flux(i,j) + fx1(i) + if (smt5(i-1).or.smt5(i)) flux(i,j) = flux(i,j) + fx1(i) enddo endif goto 666 @@ -526,7 +525,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, ! Positive definite constraint: if(iord==9 .or. iord==13) call pert_ppm(ie1-is1+1, q1(is1), bl(is1), br(is1), 0) - if (.not. nested .and. grid_type<3) then + if (.not. bounded_domain .and. grid_type<3) then if ( is==1 ) then bl(0) = s14*dm(-1) + s11*(q1(-1)-q1(0)) @@ -581,7 +580,7 @@ subroutine xppm(flux, q, c, iord, is,ie,isd,ied, jfirst,jlast,jsd,jed, npx, npy, end subroutine xppm - subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy, dya, nested, grid_type) + subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy, dya, bounded_domain, grid_type) integer, INTENT(IN) :: ifirst,ilast ! Compute domain integer, INTENT(IN) :: isd,ied, js,je,jsd,jed integer, INTENT(IN) :: jord @@ -590,7 +589,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy real , intent(in) :: c(isd:ied,js:je+1 ) ! Courant number real , INTENT(OUT):: flux(ifirst:ilast,js:je+1) ! Flux real , intent(IN) :: dya(isd:ied,jsd:jed) - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain integer, intent(IN) :: grid_type ! Local: real:: dm(ifirst:ilast,js-2:je+2) @@ -602,12 +601,12 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy real:: x0, xt, qtmp, pmp_1, lac_1, pmp_2, lac_2, r1 integer:: i, j, js1, je3, je1 - if ( .not.nested .and. grid_type < 3 ) then + if ( .not.bounded_domain .and. grid_type < 3 ) then ! Cubed-sphere: js1 = max(3,js-1); je3 = min(npy-2,je+2) je1 = min(npy-3,je+1) else -! Nested grid OR Doubly periodic domain: +! Bounded_domain grid OR Doubly periodic domain: js1 = js-1; je3 = je+2 je1 = je+1 endif @@ -627,7 +626,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy enddo endif - if ( .not. nested .and. grid_type<3 ) then + if ( .not. bounded_domain .and. grid_type<3 ) then if( js==1 ) then do i=ifirst,ilast al(i,0) = c1*q(i,-2) + c2*q(i,-1) + c3*q(i,0) @@ -683,7 +682,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy bl(i,j) = al(i,j ) - q(i,j) br(i,j) = al(i,j+1) - q(i,j) b0(i,j) = bl(i,j) + br(i,j) - x0 = abs(b0(i,j)) + x0 = abs(b0(i,j)) xt = abs(bl(i,j)-br(i,j)) smt5(i,j) = x0 < xt smt6(i,j) = 3.*x0 < xt @@ -720,7 +719,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy bl(i,j) = al(i,j ) - q(i,j) br(i,j) = al(i,j+1) - q(i,j) b0(i,j) = bl(i,j) + br(i,j) - x0 = abs(b0(i,j)) + x0 = abs(b0(i,j)) xt = abs(bl(i,j)-br(i,j)) smt5(i,j) = x0 < xt smt6(i,j) = 3.*x0 < xt @@ -773,7 +772,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy fx1(i) = (1.+c(i,j))*(bl(i,j) + c(i,j)*b0(i,j)) flux(i,j) = q(i,j) endif - if (smt5(i,j-1).or.smt5(i,j)) flux(i,j) = flux(i,j) + fx1(i) + if (smt5(i,j-1).or.smt5(i,j)) flux(i,j) = flux(i,j) + fx1(i) enddo enddo endif @@ -783,7 +782,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy ! Monotonic constraints: ! ord = 8: PPM with Lin's PPM fast monotone constraint ! ord > 8: PPM with Lin's modification of Huynh 2nd constraint - + do j=js-2,je+2 do i=ifirst,ilast xt = 0.25*(q(i,j+1) - q(i,j-1)) @@ -830,7 +829,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy pmp_2 = dq(i,j-1) lac_2 = pmp_2 - 0.75*dq(i,j-2) br(i,j) = min(max(0.,pmp_2,lac_2), max(br(i,j), min(0.,pmp_2,lac_2))) - pmp_1 = -dq(i,j) + pmp_1 = -dq(i,j) lac_1 = pmp_1 + 0.75*dq(i,j+1) bl(i,j) = min(max(0.,pmp_1,lac_1), max(bl(i,j), min(0.,pmp_1,lac_1))) endif @@ -844,7 +843,7 @@ subroutine yppm(flux, q, c, jord, ifirst,ilast, isd,ied, js,je,jsd,jed, npx, npy enddo endif - if (.not. nested .and. grid_type<3) then + if (.not. bounded_domain .and. grid_type<3) then if( js==1 ) then do i=ifirst,ilast bl(i,0) = s14*dm(i,-1) + s11*(q(i,-1)-q(i,0)) @@ -922,7 +921,7 @@ subroutine mp_ghost_ew(im, jm, km, nq, ifirst, ilast, jfirst, jlast, & ! ! !DESCRIPTION: ! -! Ghost 4d east/west +! Ghost 4d east/west ! ! !REVISION HISTORY: ! 2005.08.22 Putman @@ -1029,7 +1028,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd integer, intent(in):: nord ! del-n integer, intent(in):: is,ie,js,je, npx, npy real, intent(in):: damp - real, intent(in):: q(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng) ! q ghosted on input + real, intent(in):: q(bd%isd:bd%ied, bd%jsd:bd%jed) ! q ghosted on input type(fv_grid_type), intent(IN), target :: gridstruct real, optional, intent(in):: mass(bd%isd:bd%ied, bd%jsd:bd%jed) ! q ghosted on input ! diffusive fluxes: @@ -1043,11 +1042,11 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd #ifdef USE_SG real, pointer, dimension(:,:) :: dx, dy, rdxc, rdyc real, pointer, dimension(:,:,:) :: sin_sg - dx => gridstruct%dx - dy => gridstruct%dy - rdxc => gridstruct%rdxc - rdyc => gridstruct%rdyc - sin_sg => gridstruct%sin_sg + dx => gridstruct%dx + dy => gridstruct%dy + rdxc => gridstruct%rdxc + rdyc => gridstruct%rdyc + sin_sg => gridstruct%sin_sg #endif i1 = is-1-nord; i2 = ie+1+nord @@ -1067,7 +1066,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd enddo endif - if( nord>0 ) call copy_corners(d2, npx, npy, 1, gridstruct%nested, bd, & + if( nord>0 ) call copy_corners(d2, npx, npy, 1, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nord,je+nord @@ -1080,7 +1079,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd enddo enddo - if( nord>0 ) call copy_corners(d2, npx, npy, 2, gridstruct%nested, bd, & + if( nord>0 ) call copy_corners(d2, npx, npy, 2, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nord,je+nord+1 do i=is-nord,ie+nord @@ -1108,7 +1107,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd enddo enddo - call copy_corners(d2, npx, npy, 1, gridstruct%nested, bd, & + call copy_corners(d2, npx, npy, 1, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nt,je+nt do i=is-nt,ie+nt+1 @@ -1120,7 +1119,7 @@ subroutine deln_flux(nord,is,ie,js,je, npx, npy, damp, q, fx, fy, gridstruct, bd enddo enddo - call copy_corners(d2, npx, npy, 2, gridstruct%nested, bd, & + call copy_corners(d2, npx, npy, 2, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) do j=js-nt,je+nt+1 do i=is-nt,ie+nt diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index 6e1653d57..de747b7ee 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -19,7 +19,7 @@ !* If not, see . !*********************************************************************** #ifdef OVERLOAD_R4 -#define _GET_VAR1 get_var1_real +#define _GET_VAR1 get_var1_real #else #define _GET_VAR1 get_var1_double #endif @@ -32,7 +32,7 @@ module external_ic_mod use fms_mod, only: get_mosaic_tile_file, read_data, error_mesg use fms_io_mod, only: get_tile_string, field_size, free_restart_type use fms_io_mod, only: restart_file_type, register_restart_field - use fms_io_mod, only: save_restart, restore_state, set_filename_appendix + use fms_io_mod, only: save_restart, restore_state, set_filename_appendix, get_global_att_value use mpp_mod, only: mpp_error, FATAL, NOTE, mpp_pe, mpp_root_pe use mpp_mod, only: stdlog, input_nml_file use mpp_parameter_mod, only: AGRID_PARAM=>AGRID @@ -43,18 +43,20 @@ module external_ic_mod use constants_mod, only: pi=>pi_8, omega, grav, kappa, rdgas, rvgas, cp_air use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_grid_bounds_type, R_GRID - use fv_diagnostics_mod,only: prt_maxmin + use fv_diagnostics_mod,only: prt_maxmin, prt_gb_nh_sh, prt_height use fv_grid_utils_mod, only: ptop_min, g_sum,mid_pt_sphere,get_unit_vect2,get_latlon_vector,inner_prod - use fv_io_mod, only: fv_io_read_tracers + use fv_io_mod, only: fv_io_read_tracers use fv_mapz_mod, only: mappm - use fv_mp_mod, only: ng, is_master, fill_corners, YDir, mp_reduce_min, mp_reduce_max + use fv_regional_mod, only: dump_field, H_STAGGER, U_STAGGER, V_STAGGER + use fv_mp_mod, only: is_master, fill_corners, YDir, mp_reduce_min, mp_reduce_max + use fv_regional_mod, only: start_regional_cold_start use fv_surf_map_mod, only: surfdrv, FV3_zs_filter use fv_surf_map_mod, only: sgh_g, oro_g use fv_surf_map_mod, only: del2_cubed_sphere, del4_cubed_sphere use fv_timing_mod, only: timing_on, timing_off use init_hydro_mod, only: p_var use fv_fill_mod, only: fillz - use fv_eta_mod, only: set_eta + use fv_eta_mod, only: set_eta, set_external_eta use sim_nc_mod, only: open_ncfile, close_ncfile, get_ncdim1, get_var1_double, get_var2_real, & get_var3_r4, get_var2_r4, get_var1_real, get_var_att_double use fv_nwp_nudge_mod, only: T_is_Tv @@ -72,18 +74,20 @@ module external_ic_mod real, parameter:: zvir = rvgas/rdgas - 1. real(kind=R_GRID), parameter :: cnst_0p20=0.20d0 real :: deg2rad + character (len = 80) :: source + character(len=27), parameter :: source_fv3gfs = 'FV3GFS GAUSSIAN NEMSIO FILE' - public get_external_ic, get_cubed_sphere_terrain +! version number of this module +! Include variable "version" to be written to log file. +#include -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' + public get_external_ic, get_cubed_sphere_terrain contains subroutine get_external_ic( Atm, fv_domain, cold_start ) - type(fv_atmos_type), intent(inout), target :: Atm(:) + type(fv_atmos_type), intent(inout), target :: Atm type(domain2d), intent(inout) :: fv_domain logical, intent(IN) :: cold_start real:: alpha = 0. @@ -94,26 +98,27 @@ subroutine get_external_ic( Atm, fv_domain, cold_start ) real, pointer, dimension(:,:) :: fC, f0 integer :: is, ie, js, je - integer :: isd, ied, jsd, jed + integer :: isd, ied, jsd, jed, ng integer :: sphum, liq_wat, ice_wat, rainwat, snowwat, graupel, o3mr - is = Atm(1)%bd%is - ie = Atm(1)%bd%ie - js = Atm(1)%bd%js - je = Atm(1)%bd%je - isd = Atm(1)%bd%isd - ied = Atm(1)%bd%ied - jsd = Atm(1)%bd%jsd - jed = Atm(1)%bd%jed + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed + ng = Atm%bd%ng - grid => Atm(1)%gridstruct%grid - agrid => Atm(1)%gridstruct%agrid + grid => Atm%gridstruct%grid + agrid => Atm%gridstruct%agrid - fC => Atm(1)%gridstruct%fC - f0 => Atm(1)%gridstruct%f0 + fC => Atm%gridstruct%fC + f0 => Atm%gridstruct%f0 ! * Initialize coriolis param: - + do j=jsd,jed+1 do i=isd,ied+1 fc(i,j) = 2.*omega*( -1.*cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) + & @@ -129,17 +134,19 @@ subroutine get_external_ic( Atm, fv_domain, cold_start ) enddo call mpp_update_domains( f0, fv_domain ) - if ( Atm(1)%gridstruct%cubed_sphere .and. .not. Atm(1)%neststruct%nested) call fill_corners(f0, Atm(1)%npx, Atm(1)%npy, YDir) - + if ( Atm%gridstruct%cubed_sphere .and. (.not. Atm%gridstruct%bounded_domain))then + call fill_corners(f0, Atm%npx, Atm%npy, YDir) + endif + ! Read in cubed_sphere terrain - if ( Atm(1)%flagstruct%mountain ) then + if ( Atm%flagstruct%mountain ) then call get_cubed_sphere_terrain(Atm, fv_domain) else - if (.not. Atm(1)%neststruct%nested) Atm(1)%phis = 0. + if (.not. Atm%neststruct%nested) Atm%phis = 0. !TODO: Not sure about this line --- lmh 30 may 18 endif - + ! Read in the specified external dataset and do all the needed transformation - if ( Atm(1)%flagstruct%ncep_ic ) then + if ( Atm%flagstruct%ncep_ic ) then nq = 1 call timing_on('NCEP_IC') call get_ncep_ic( Atm, fv_domain, nq ) @@ -150,11 +157,11 @@ subroutine get_external_ic( Atm, fv_domain, cold_start ) if(is_master()) write(*,*) 'All tracers except sphum replaced by FV IC' endif #endif - elseif ( Atm(1)%flagstruct%nggps_ic ) then + elseif ( Atm%flagstruct%nggps_ic ) then call timing_on('NGGPS_IC') call get_nggps_ic( Atm, fv_domain ) call timing_off('NGGPS_IC') - elseif ( Atm(1)%flagstruct%ecmwf_ic ) then + elseif ( Atm%flagstruct%ecmwf_ic ) then if( is_master() ) write(*,*) 'Calling get_ecmwf_ic' call timing_on('ECMWF_IC') call get_ecmwf_ic( Atm, fv_domain ) @@ -162,18 +169,18 @@ subroutine get_external_ic( Atm, fv_domain, cold_start ) else ! The following is to read in legacy lat-lon FV core restart file ! is Atm%q defined in all cases? - nq = size(Atm(1)%q,4) + nq = size(Atm%q,4) call get_fv_ic( Atm, fv_domain, nq ) endif - call prt_maxmin('PS', Atm(1)%ps, is, ie, js, je, ng, 1, 0.01) - call prt_maxmin('T', Atm(1)%pt, is, ie, js, je, ng, Atm(1)%npz, 1.) - if (.not.Atm(1)%flagstruct%hydrostatic) call prt_maxmin('W', Atm(1)%w, is, ie, js, je, ng, Atm(1)%npz, 1.) - call prt_maxmin('SPHUM', Atm(1)%q(:,:,:,1), is, ie, js, je, ng, Atm(1)%npz, 1.) - if ( Atm(1)%flagstruct%nggps_ic ) then - call prt_maxmin('TS', Atm(1)%ts, is, ie, js, je, 0, 1, 1.) + call prt_maxmin('PS', Atm%ps, is, ie, js, je, ng, 1, 0.01) + call prt_maxmin('T', Atm%pt, is, ie, js, je, ng, Atm%npz, 1.) + if (.not.Atm%flagstruct%hydrostatic) call prt_maxmin('W', Atm%w, is, ie, js, je, ng, Atm%npz, 1.) + call prt_maxmin('SPHUM', Atm%q(:,:,:,1), is, ie, js, je, ng, Atm%npz, 1.) + if ( Atm%flagstruct%nggps_ic ) then + call prt_maxmin('TS', Atm%ts, is, ie, js, je, 0, 1, 1.) endif - if ( Atm(1)%flagstruct%nggps_ic .or. Atm(1)%flagstruct%ecmwf_ic ) then + if ( Atm%flagstruct%nggps_ic .or. Atm%flagstruct%ecmwf_ic ) then sphum = get_tracer_index(MODEL_ATMOS, 'sphum') liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') ice_wat = get_tracer_index(MODEL_ATMOS, 'ice_wat') @@ -182,106 +189,100 @@ subroutine get_external_ic( Atm, fv_domain, cold_start ) graupel = get_tracer_index(MODEL_ATMOS, 'graupel') o3mr = get_tracer_index(MODEL_ATMOS, 'o3mr') if ( liq_wat > 0 ) & - call prt_maxmin('liq_wat', Atm(1)%q(:,:,:,liq_wat), is, ie, js, je, ng, Atm(1)%npz, 1.) + call prt_maxmin('liq_wat', Atm%q(:,:,:,liq_wat), is, ie, js, je, ng, Atm%npz, 1.) if ( ice_wat > 0 ) & - call prt_maxmin('ice_wat', Atm(1)%q(:,:,:,ice_wat), is, ie, js, je, ng, Atm(1)%npz, 1.) + call prt_maxmin('ice_wat', Atm%q(:,:,:,ice_wat), is, ie, js, je, ng, Atm%npz, 1.) if ( rainwat > 0 ) & - call prt_maxmin('rainwat', Atm(1)%q(:,:,:,rainwat), is, ie, js, je, ng, Atm(1)%npz, 1.) + call prt_maxmin('rainwat', Atm%q(:,:,:,rainwat), is, ie, js, je, ng, Atm%npz, 1.) if ( snowwat > 0 ) & - call prt_maxmin('snowwat', Atm(1)%q(:,:,:,snowwat), is, ie, js, je, ng, Atm(1)%npz, 1.) + call prt_maxmin('snowwat', Atm%q(:,:,:,snowwat), is, ie, js, je, ng, Atm%npz, 1.) if ( graupel > 0 ) & - call prt_maxmin('graupel', Atm(1)%q(:,:,:,graupel), is, ie, js, je, ng, Atm(1)%npz, 1.) + call prt_maxmin('graupel', Atm%q(:,:,:,graupel), is, ie, js, je, ng, Atm%npz, 1.) if ( o3mr > 0 ) & - call prt_maxmin('O3MR', Atm(1)%q(:,:,:,o3mr), is, ie, js, je, ng, Atm(1)%npz, 1.) + call prt_maxmin('O3MR', Atm%q(:,:,:,o3mr), is, ie, js, je, ng, Atm%npz, 1.) endif - call p_var(Atm(1)%npz, is, ie, js, je, Atm(1)%ak(1), ptop_min, & - Atm(1)%delp, Atm(1)%delz, Atm(1)%pt, Atm(1)%ps, & - Atm(1)%pe, Atm(1)%peln, Atm(1)%pk, Atm(1)%pkz, & - kappa, Atm(1)%q, ng, Atm(1)%ncnst, Atm(1)%gridstruct%area_64, Atm(1)%flagstruct%dry_mass, & - Atm(1)%flagstruct%adjust_dry_mass, Atm(1)%flagstruct%mountain, Atm(1)%flagstruct%moist_phys, & - Atm(1)%flagstruct%hydrostatic, Atm(1)%flagstruct%nwat, Atm(1)%domain, Atm(1)%flagstruct%make_nh) +!Now in fv_restart +!!$ call p_var(Atm%npz, is, ie, js, je, Atm%ak(1), ptop_min, & +!!$ Atm%delp, Atm%delz, Atm%pt, Atm%ps, & +!!$ Atm%pe, Atm%peln, Atm%pk, Atm%pkz, & +!!$ kappa, Atm%q, ng, Atm%ncnst, Atm%gridstruct%area_64, Atm%flagstruct%dry_mass, & +!!$ Atm%flagstruct%adjust_dry_mass, Atm%flagstruct%mountain, Atm%flagstruct%moist_phys, & +!!$ Atm%flagstruct%hydrostatic, Atm%flagstruct%nwat, Atm%domain, Atm%flagstruct%adiabatic, Atm%flagstruct%make_nh) end subroutine get_external_ic !------------------------------------------------------------------ subroutine get_cubed_sphere_terrain( Atm, fv_domain ) - type(fv_atmos_type), intent(inout), target :: Atm(:) + type(fv_atmos_type), intent(inout), target :: Atm type(domain2d), intent(inout) :: fv_domain - integer :: ntileMe - integer, allocatable :: tile_id(:) + integer :: tile_id(1) character(len=64) :: fname character(len=7) :: gn - integer :: n + integer :: n=1 integer :: jbeg, jend real ftop real, allocatable :: g_dat2(:,:,:) real, allocatable :: pt_coarse(:,:,:) integer isc_p, iec_p, jsc_p, jec_p, isg, ieg, jsg,jeg - integer :: is, ie, js, je - integer :: isd, ied, jsd, jed - - is = Atm(1)%bd%is - ie = Atm(1)%bd%ie - js = Atm(1)%bd%js - je = Atm(1)%bd%je - isd = Atm(1)%bd%isd - ied = Atm(1)%bd%ied - jsd = Atm(1)%bd%jsd - jed = Atm(1)%bd%jed - - if (Atm(1)%grid_number > 1) then - !write(gn,'(A2, I1)') ".g", Atm(1)%grid_number - write(gn,'(A5, I2.2)') ".nest", Atm(1)%grid_number + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed, ng + + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed + ng = Atm%bd%ng + + if (Atm%grid_number > 1) then + !write(gn,'(A2, I1)') ".g", Atm%grid_number + write(gn,'(A5, I2.2)') ".nest", Atm%grid_number else gn = '' end if - ntileMe = size(Atm(:)) ! This will have to be modified for mult tiles per PE - ! ASSUMED always one at this point - - allocate( tile_id(ntileMe) ) tile_id = mpp_get_tile_id( fv_domain ) - do n=1,ntileMe - call get_tile_string(fname, 'INPUT/fv_core.res'//trim(gn)//'.tile', tile_id(n), '.nc' ) - if (mpp_pe() == mpp_root_pe()) print*, 'external_ic: looking for ', fname + call get_tile_string(fname, 'INPUT/fv_core.res'//trim(gn)//'.tile', tile_id(n), '.nc' ) + if (mpp_pe() == mpp_root_pe()) print*, 'external_ic: looking for ', fname - - if( file_exist(fname) ) then - call read_data(fname, 'phis', Atm(n)%phis(is:ie,js:je), & - domain=fv_domain, tile_count=n) - else - call surfdrv( Atm(n)%npx, Atm(n)%npy, Atm(n)%gridstruct%grid_64, Atm(n)%gridstruct%agrid_64, & - Atm(n)%gridstruct%area_64, Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & - Atm(n)%gridstruct%dxa, Atm(n)%gridstruct%dya, & - Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & - Atm(n)%phis, Atm(n)%flagstruct%stretch_fac, & - Atm(n)%neststruct%nested, Atm(n)%neststruct%npx_global, Atm(N)%domain, & - Atm(n)%flagstruct%grid_number, Atm(n)%bd ) - call mpp_error(NOTE,'terrain datasets generated using USGS data') - endif - end do + if( file_exist(fname) ) then + call read_data(fname, 'phis', Atm%phis(is:ie,js:je), & + domain=fv_domain, tile_count=n) + else + call surfdrv( Atm%npx, Atm%npy, Atm%gridstruct%grid_64, Atm%gridstruct%agrid_64, & + Atm%gridstruct%area_64, Atm%gridstruct%dx, Atm%gridstruct%dy, & + Atm%gridstruct%dxa, Atm%gridstruct%dya, & + Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & + Atm%phis, Atm%flagstruct%stretch_fac, & + Atm%neststruct%nested, Atm%gridstruct%bounded_domain, & + Atm%neststruct%npx_global, Atm%domain, & + Atm%flagstruct%grid_number, Atm%bd ) + call mpp_error(NOTE,'terrain datasets generated using USGS data') + endif + - !Needed for reproducibility. DON'T REMOVE THIS!! - call mpp_update_domains( Atm(1)%phis, Atm(1)%domain ) - ftop = g_sum(Atm(1)%domain, Atm(1)%phis(is:ie,js:je), is, ie, js, je, ng, Atm(1)%gridstruct%area_64, 1) - - call prt_maxmin('ZS', Atm(1)%phis, is, ie, js, je, ng, 1, 1./grav) + !Needed for reproducibility. DON'T REMOVE THIS!! + call mpp_update_domains( Atm%phis, Atm%domain ) + ftop = g_sum(Atm%domain, Atm%phis(is:ie,js:je), is, ie, js, je, ng, Atm%gridstruct%area_64, 1) + + call prt_maxmin('ZS', Atm%phis, is, ie, js, je, ng, 1, 1./grav) if(is_master()) write(*,*) 'mean terrain height (m)=', ftop/grav - - deallocate( tile_id ) end subroutine get_cubed_sphere_terrain subroutine get_nggps_ic (Atm, fv_domain) -! read in data after it has been preprocessed with +! read in data after it has been preprocessed with ! NCEP/EMC orography maker and global_chgres -! and has been horiztontally interpolated to the +! and has been horiztontally interpolated to the ! current cubed-sphere grid ! !--- variables read in from 'gfs_ctrl.nc' @@ -300,24 +301,23 @@ subroutine get_nggps_ic (Atm, fv_domain) ! U_S - D-grid south face tangential wind component (m/s) ! V_S - D-grid south face normal wind component (m/s) ! W - vertical velocity 'omega' (Pa/s) -! Q - prognostic tracer fields (Specific Humidity, +! Q - prognostic tracer fields (Specific Humidity, ! O3 mixing ratio, ! Cloud mixing ratio) -!--- Namelist variables +!--- Namelist variables ! filtered_terrain - use orography maker filtered terrain mapping -! ncep_plevels - use NCEP pressure levels (implies no vertical remapping) - type(fv_atmos_type), intent(inout) :: Atm(:) + type(fv_atmos_type), intent(inout) :: Atm type(domain2d), intent(inout) :: fv_domain ! local: real, dimension(:), allocatable:: ak, bk real, dimension(:,:), allocatable:: wk2, ps, oro_g - real, dimension(:,:,:), allocatable:: ud, vd, u_w, v_w, u_s, v_s, omga + real, dimension(:,:,:), allocatable:: ud, vd, u_w, v_w, u_s, v_s, omga, temp real, dimension(:,:,:), allocatable:: zh(:,:,:) ! 3D height at 65 edges real, dimension(:,:,:,:), allocatable:: q real, dimension(:,:), allocatable :: phis_coarse ! lmh - real rdg, wt, qt, m_fac + real rdg, wt, qt, m_fac, pe1 integer:: n, npx, npy, npz, itoa, nt, ntprog, ntdiag, ntracers, ntrac, iq integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -331,7 +331,6 @@ subroutine get_nggps_ic (Atm, fv_domain) character(len=64) :: fn_oro_ics = 'oro_data.nc' logical :: remap logical :: filtered_terrain = .true. - logical :: ncep_plevels = .false. logical :: gfs_dwinds = .true. integer :: levp = 64 logical :: checker_tr = .false. @@ -339,149 +338,11 @@ subroutine get_nggps_ic (Atm, fv_domain) real(kind=R_GRID), dimension(2):: p1, p2, p3 real(kind=R_GRID), dimension(3):: e1, e2, ex, ey integer:: i,j,k,nts, ks - integer:: liq_wat, ice_wat, rainwat, snowwat, graupel - namelist /external_ic_nml/ filtered_terrain, ncep_plevels, levp, gfs_dwinds, & + integer:: liq_wat, ice_wat, rainwat, snowwat, graupel, tke, ntclamt + namelist /external_ic_nml/ filtered_terrain, levp, gfs_dwinds, & checker_tr, nt_checker -#ifdef GFSL64 - real, dimension(65):: ak_sj, bk_sj - data ak_sj/20.00000, 68.00000, 137.79000, & - 221.95800, 318.26600, 428.43400, & - 554.42400, 698.45700, 863.05803, & - 1051.07995, 1265.75194, 1510.71101, & - 1790.05098, 2108.36604, 2470.78817, & - 2883.03811, 3351.46002, 3883.05187, & - 4485.49315, 5167.14603, 5937.04991, & - 6804.87379, 7780.84698, 8875.64338, & - 9921.40745, 10760.99844, 11417.88354, & - 11911.61193, 12258.61668, 12472.89642, & - 12566.58298, 12550.43517, 12434.26075, & - 12227.27484, 11938.39468, 11576.46910, & - 11150.43640, 10669.41063, 10142.69482, & - 9579.72458, 8989.94947, 8382.67090, & - 7766.85063, 7150.91171, 6542.55077, & - 5948.57894, 5374.81094, 4825.99383, & - 4305.79754, 3816.84622, 3360.78848, & - 2938.39801, 2549.69756, 2194.08449, & - 1870.45732, 1577.34218, 1313.00028, & - 1075.52114, 862.90778, 673.13815, & - 504.22118, 354.22752, 221.32110, & - 103.78014, 0./ - data bk_sj/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00179, 0.00705, 0.01564, & - 0.02749, 0.04251, 0.06064, & - 0.08182, 0.10595, 0.13294, & - 0.16266, 0.19492, 0.22950, & - 0.26615, 0.30455, 0.34435, & - 0.38516, 0.42656, 0.46815, & - 0.50949, 0.55020, 0.58989, & - 0.62825, 0.66498, 0.69987, & - 0.73275, 0.76351, 0.79208, & - 0.81845, 0.84264, 0.86472, & - 0.88478, 0.90290, 0.91923, & - 0.93388, 0.94697, 0.95865, & - 0.96904, 0.97826, 0.98642, & - 0.99363, 1./ -#else -! The following L63 setting is the same as NCEP GFS's L64 except the top layer - real, dimension(64):: ak_sj, bk_sj - data ak_sj/64.247, 137.790, 221.958, & - 318.266, 428.434, 554.424, & - 698.457, 863.05803, 1051.07995, & - 1265.75194, 1510.71101, 1790.05098, & - 2108.36604, 2470.78817, 2883.03811, & - 3351.46002, 3883.05187, 4485.49315, & - 5167.14603, 5937.04991, 6804.87379, & - 7780.84698, 8875.64338, 10100.20534, & - 11264.35673, 12190.64366, 12905.42546, & - 13430.87867, 13785.88765, 13986.77987, & - 14047.96335, 13982.46770, 13802.40331, & - 13519.33841, 13144.59486, 12689.45608, & - 12165.28766, 11583.57006, 10955.84778, & - 10293.60402, 9608.08306, 8910.07678, & - 8209.70131, 7516.18560, 6837.69250, & - 6181.19473, 5552.39653, 4955.72632, & - 4394.37629, 3870.38682, 3384.76586, & - 2937.63489, 2528.37666, 2155.78385, & - 1818.20722, 1513.68173, 1240.03585, & - 994.99144, 776.23591, 581.48797, & - 408.53400, 255.26520, 119.70243, 0. / - - data bk_sj/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00201, 0.00792, 0.01755, & - 0.03079, 0.04751, 0.06761, & - 0.09097, 0.11746, 0.14690, & - 0.17911, 0.21382, 0.25076, & - 0.28960, 0.32994, 0.37140, & - 0.41353, 0.45589, 0.49806, & - 0.53961, 0.58015, 0.61935, & - 0.65692, 0.69261, 0.72625, & - 0.75773, 0.78698, 0.81398, & - 0.83876, 0.86138, 0.88192, & - 0.90050, 0.91722, 0.93223, & - 0.94565, 0.95762, 0.96827, & - 0.97771, 0.98608, 0.99347, 1./ -#endif -#ifdef TEMP_GFSPLV - real, dimension(64):: ak_sj, bk_sj - data ak_sj/64.247, 137.79, 221.958, & - 318.266, 428.434, 554.424, & - 698.457, 863.058, 1051.08, & - 1265.752, 1510.711, 1790.051, & - 2108.366, 2470.788, 2883.038, & - 3351.46, 3883.052, 4485.493, & - 5167.146, 5937.05, 6804.874, & - 7777.15, 8832.537, 9936.614, & - 11054.85, 12152.94, 13197.07, & - 14154.32, 14993.07, 15683.49, & - 16197.97, 16511.74, 16611.6, & - 16503.14, 16197.32, 15708.89, & - 15056.34, 14261.43, 13348.67, & - 12344.49, 11276.35, 10171.71, & - 9057.051, 7956.908, 6893.117, & - 5884.206, 4945.029, 4086.614, & - 3316.217, 2637.553, 2051.15, & - 1554.789, 1143.988, 812.489, & - 552.72, 356.223, 214.015, & - 116.899, 55.712, 21.516, & - 5.741, 0.575, 0., 0. / - - data bk_sj/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00003697, 0.00043106, 0.00163591, & - 0.00410671, 0.00829402, 0.01463712, & - 0.02355588, 0.03544162, 0.05064684, & - 0.06947458, 0.09216691, 0.1188122, & - 0.1492688, 0.1832962, 0.2205702, & - 0.2606854, 0.3031641, 0.3474685, & - 0.3930182, 0.4392108, 0.4854433, & - 0.5311348, 0.5757467, 0.6187996, & - 0.659887, 0.6986829, 0.7349452, & - 0.7685147, 0.7993097, 0.8273188, & - 0.8525907, 0.8752236, 0.895355, & - 0.913151, 0.9287973, 0.9424911, & - 0.9544341, 0.9648276, 0.9738676, & - 0.9817423, 0.9886266, 0.9946712, 1./ -#endif + n = 1 !?? call mpp_error(NOTE,'Using external_IC::get_nggps_ic which is valid only for data which has been & &horizontally interpolated to the current cubed-sphere grid') @@ -496,19 +357,19 @@ subroutine get_nggps_ic (Atm, fv_domain) #endif unit = stdlog() - call write_version_number ( 'NGGPS_release', 'get_nggps_ic' ) + call write_version_number ( 'EXTERNAL_IC_MOD::get_nggps_ic', version ) write(unit, nml=external_ic_nml) remap = .true. - if (ncep_plevels) then + if (Atm%flagstruct%external_eta) then if (filtered_terrain) then call mpp_error(NOTE,'External_IC::get_nggps_ic - use externally-generated, filtered terrain & - &and NCEP pressure levels (vertical remapping)') + &and NCEP pressure levels (no vertical remapping)') else if (.not. filtered_terrain) then call mpp_error(NOTE,'External_IC::get_nggps_ic - use externally-generated, raw terrain & - &and NCEP pressure levels (vertical remapping)') + &and NCEP pressure levels (no vertical remapping)') endif - else ! (.not.ncep_plevels) + else ! (.not.external_eta) if (filtered_terrain) then call mpp_error(NOTE,'External_IC::get_nggps_ic - use externally-generated, filtered terrain & &and FV3 pressure levels (vertical remapping)') @@ -518,21 +379,21 @@ subroutine get_nggps_ic (Atm, fv_domain) endif endif - is = Atm(1)%bd%is - ie = Atm(1)%bd%ie - js = Atm(1)%bd%js - je = Atm(1)%bd%je - isd = Atm(1)%bd%isd - ied = Atm(1)%bd%ied - jsd = Atm(1)%bd%jsd - jed = Atm(1)%bd%jed - npz = Atm(1)%npz + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed + npz = Atm%npz call get_number_tracers(MODEL_ATMOS, num_tracers=ntracers, num_prog=ntprog) ntdiag = ntracers-ntprog !--- set the 'nestXX' appendix for all files using fms_io - if (Atm(1)%grid_number > 1) then - write(gn,'(A4, I2.2)') "nest", Atm(1)%grid_number + if (Atm%grid_number > 1) then + write(gn,'(A4, I2.2)') "nest", Atm%grid_number else gn = '' end if @@ -549,26 +410,34 @@ subroutine get_nggps_ic (Atm, fv_domain) if (ntrac > ntracers) call mpp_error(FATAL,'==> External_ic::get_nggps_ic: more NGGPS tracers & &than defined in field_table '//trim(fn_gfs_ctl)//' for NGGPS IC') +! + call get_data_source(source,Atm%flagstruct%regional) + if (trim(source) == source_fv3gfs) then + call mpp_error(NOTE, "READING FROM REGRIDDED FV3GFS NEMSIO FILE") + levp = 65 + endif +! !--- read in ak and bk from the gfs control file using fms_io read_data --- allocate (wk2(levp+1,2)) allocate (ak(levp+1)) allocate (bk(levp+1)) + call read_data('INPUT/'//trim(fn_gfs_ctl),'vcoord',wk2, no_domain=.TRUE.) ak(1:levp+1) = wk2(1:levp+1,1) bk(1:levp+1) = wk2(1:levp+1,2) deallocate (wk2) - if (.not. file_exist('INPUT/'//trim(fn_oro_ics), domain=Atm(1)%domain)) then + if (.not. file_exist('INPUT/'//trim(fn_oro_ics), domain=Atm%domain)) then call mpp_error(FATAL,'==> Error in External_ic::get_nggps_ic: tiled file '//trim(fn_oro_ics)//' for NGGPS IC does not exist') endif call mpp_error(NOTE,'==> External_ic::get_nggps_ic: using tiled data file '//trim(fn_oro_ics)//' for NGGPS IC') - if (.not. file_exist('INPUT/'//trim(fn_sfc_ics), domain=Atm(1)%domain)) then + if (.not. file_exist('INPUT/'//trim(fn_sfc_ics), domain=Atm%domain)) then call mpp_error(FATAL,'==> Error in External_ic::get_nggps_ic: tiled file '//trim(fn_sfc_ics)//' for NGGPS IC does not exist') endif call mpp_error(NOTE,'==> External_ic::get_nggps_ic: using tiled data file '//trim(fn_sfc_ics)//' for NGGPS IC') - if (.not. file_exist('INPUT/'//trim(fn_gfs_ics), domain=Atm(1)%domain)) then + if (.not. file_exist('INPUT/'//trim(fn_gfs_ics), domain=Atm%domain)) then call mpp_error(FATAL,'==> Error in External_ic::get_nggps_ic: tiled file '//trim(fn_gfs_ics)//' for NGGPS IC does not exist') endif call mpp_error(NOTE,'==> External_ic::get_nggps_ic: using tiled data file '//trim(fn_gfs_ics)//' for NGGPS IC') @@ -581,81 +450,85 @@ subroutine get_nggps_ic (Atm, fv_domain) allocate ( v_w(is:ie+1, js:je, 1:levp) ) allocate ( u_s(is:ie, js:je+1, 1:levp) ) allocate ( v_s(is:ie, js:je+1, 1:levp) ) - - do n = 1,size(Atm(:)) + if (trim(source) == source_fv3gfs) allocate (temp(is:ie,js:je,1:levp)) !!! If a nested grid, save the filled coarse-grid topography for blending - if (Atm(n)%neststruct%nested) then + if (Atm%neststruct%nested) then allocate(phis_coarse(isd:ied,jsd:jed)) do j=jsd,jed do i=isd,ied - phis_coarse(i,j) = Atm(n)%phis(i,j) + phis_coarse(i,j) = Atm%phis(i,j) enddo enddo endif !--- read in surface temperature (k) and land-frac ! surface skin temperature - id_res = register_restart_field (SFC_restart, fn_sfc_ics, 'tsea', Atm(n)%ts, domain=Atm(n)%domain) + id_res = register_restart_field (SFC_restart, fn_sfc_ics, 'tsea', Atm%ts, domain=Atm%domain) ! terrain surface height -- (needs to be transformed into phis = zs*grav) if (filtered_terrain) then - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_filt', Atm(n)%phis, domain=Atm(n)%domain) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_filt', Atm%phis, domain=Atm%domain) elseif (.not. filtered_terrain) then - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_raw', Atm(n)%phis, domain=Atm(n)%domain) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_raw', Atm%phis, domain=Atm%domain) endif - if ( Atm(n)%flagstruct%full_zs_filter) then + if ( Atm%flagstruct%full_zs_filter) then allocate (oro_g(isd:ied,jsd:jed)) + oro_g = 0. ! land-frac - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'land_frac', oro_g, domain=Atm(n)%domain) - call mpp_update_domains(oro_g, Atm(n)%domain) - if (Atm(n)%neststruct%nested) then - call extrapolation_BC(oro_g, 0, 0, Atm(n)%npx, Atm(n)%npy, Atm(n)%bd, .true.) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'land_frac', oro_g, domain=Atm%domain) + call mpp_update_domains(oro_g, Atm%domain) + if (Atm%neststruct%nested) then + call extrapolation_BC(oro_g, 0, 0, Atm%npx, Atm%npy, Atm%bd, .true.) endif endif - - if ( Atm(n)%flagstruct%fv_land ) then + + if ( Atm%flagstruct%fv_land ) then ! stddev - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'stddev', Atm(n)%sgh, domain=Atm(n)%domain) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'stddev', Atm%sgh, domain=Atm%domain) ! land-frac - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'land_frac', Atm(n)%oro, domain=Atm(n)%domain) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'land_frac', Atm%oro, domain=Atm%domain) endif - + ! surface pressure (Pa) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ps', ps, domain=Atm(n)%domain) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ps', ps, domain=Atm%domain) ! D-grid west face tangential wind component (m/s) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'u_w', u_w, domain=Atm(n)%domain,position=EAST) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'u_w', u_w, domain=Atm%domain,position=EAST) ! D-grid west face normal wind component (m/s) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'v_w', v_w, domain=Atm(n)%domain,position=EAST) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'v_w', v_w, domain=Atm%domain,position=EAST) ! D-grid south face tangential wind component (m/s) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'u_s', u_s, domain=Atm(n)%domain,position=NORTH) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'u_s', u_s, domain=Atm%domain,position=NORTH) ! D-grid south face normal wind component (m/s) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'v_s', v_s, domain=Atm(n)%domain,position=NORTH) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'v_s', v_s, domain=Atm%domain,position=NORTH) ! vertical velocity 'omega' (Pa/s) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'w', omga, domain=Atm(n)%domain) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'w', omga, domain=Atm%domain) ! GFS grid height at edges (including surface height) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ZH', zh, domain=Atm(n)%domain) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ZH', zh, domain=Atm%domain) + ! real temperature (K) + if (trim(source) == source_fv3gfs) id_res = register_restart_field (GFS_restart, fn_gfs_ics, 't', temp, mandatory=.false., & + domain=Atm%domain) ! prognostic tracers do nt = 1, ntracers + q(:,:,:,nt) = -999.99 call get_tracer_names(MODEL_ATMOS, nt, tracer_name) id_res = register_restart_field (GFS_restart, fn_gfs_ics, trim(tracer_name), q(:,:,:,nt), & - mandatory=.false.,domain=Atm(n)%domain) + mandatory=.false.,domain=Atm%domain) enddo ! initialize all tracers to default values prior to being input do nt = 1, ntprog call get_tracer_names(MODEL_ATMOS, nt, tracer_name) ! set all tracers to an initial profile value - call set_tracer_profile (MODEL_ATMOS, nt, Atm(n)%q(:,:,:,nt) ) + call set_tracer_profile (MODEL_ATMOS, nt, Atm%q(:,:,:,nt) ) enddo do nt = ntprog+1, ntracers call get_tracer_names(MODEL_ATMOS, nt, tracer_name) ! set all tracers to an initial profile value - call set_tracer_profile (MODEL_ATMOS, nt, Atm(n)%qdiag(:,:,:,nt) ) + call set_tracer_profile (MODEL_ATMOS, nt, Atm%qdiag(:,:,:,nt) ) enddo ! read in the restart @@ -668,28 +541,47 @@ subroutine get_nggps_ic (Atm, fv_domain) call free_restart_type(GFS_restart) ! multiply NCEP ICs terrain 'phis' by gravity to be true geopotential - Atm(n)%phis = Atm(n)%phis*grav - + Atm%phis = Atm%phis*grav + ! set the pressure levels and ptop to be used - if (ncep_plevels) then + ! else eta is set in grid_init + if (Atm%flagstruct%external_eta) then itoa = levp - npz + 1 - Atm(n)%ptop = ak(itoa) - Atm(n)%ak(1:npz+1) = ak(itoa:levp+1) - Atm(n)%bk(1:npz+1) = bk(itoa:levp+1) - else - if ( npz <= 64 ) then - Atm(n)%ak(:) = ak_sj(:) - Atm(n)%bk(:) = bk_sj(:) - Atm(n)%ptop = Atm(n)%ak(1) - else - call set_eta(npz, ks, Atm(n)%ptop, Atm(n)%ak, Atm(n)%bk) - endif + Atm%ptop = ak(itoa) + Atm%ak(1:npz+1) = ak(itoa:levp+1) + Atm%bk(1:npz+1) = bk(itoa:levp+1) + call set_external_eta (Atm%ak, Atm%bk, Atm%ptop, Atm%ks) +!!$ else +!!$ if ( (npz == 63 .or. npz == 64) .and. len(trim(Atm%flagstruct%npz_type)) == 0 ) then +!!$ if (is_master()) print*, 'Using default GFS levels' +!!$ Atm%ak(:) = ak_sj(:) +!!$ Atm%bk(:) = bk_sj(:) +!!$ Atm%ptop = Atm%ak(1) +!!$ else +!!$ call set_eta(npz, ks, Atm%ptop, Atm%ak, Atm%bk, Atm%flagstruct%npz_type) +!!$ endif endif ! call vertical remapping algorithms if(is_master()) write(*,*) 'GFS ak(1)=', ak(1), ' ak(2)=', ak(2) ak(1) = max(1.e-9, ak(1)) - call remap_scalar_nggps(Atm(n), levp, npz, ntracers, ak, bk, ps, q, omga, zh) +!*** For regional runs read in each of the BC variables from the NetCDF boundary file +!*** and remap in the vertical from the input levels to the model integration levels. +!*** Here in the initialization we begn by allocating the regional domain's boundary +!*** objects. Then we need to read the first two regional BC files so the integration +!*** can begin interpolating between those two times as the forecast proceeds. + + if (n==1.and.Atm%flagstruct%regional) then !<-- Select the parent regional domain. + + call start_regional_cold_start(Atm, ak, bk, levp, & + is, ie, js, je, & + isd, ied, jsd, jed ) + endif + +! +!*** Remap the variables in the compute domain. +! + call remap_scalar(Atm, levp, npz, ntracers, ak, bk, ps, q, zh, omga, temp) allocate ( ud(is:ie, js:je+1, 1:levp) ) allocate ( vd(is:ie+1,js:je, 1:levp) ) @@ -699,8 +591,8 @@ subroutine get_nggps_ic (Atm, fv_domain) do k=1,levp do j=js,je+1 do i=is,ie - p1(:) = Atm(1)%gridstruct%grid(i, j,1:2) - p2(:) = Atm(1)%gridstruct%grid(i+1,j,1:2) + p1(:) = Atm%gridstruct%grid(i, j,1:2) + p2(:) = Atm%gridstruct%grid(i+1,j,1:2) call mid_pt_sphere(p1, p2, p3) call get_unit_vect2(p1, p2, e1) call get_latlon_vector(p3, ex, ey) @@ -709,8 +601,8 @@ subroutine get_nggps_ic (Atm, fv_domain) enddo do j=js,je do i=is,ie+1 - p1(:) = Atm(1)%gridstruct%grid(i,j ,1:2) - p2(:) = Atm(1)%gridstruct%grid(i,j+1,1:2) + p1(:) = Atm%gridstruct%grid(i,j ,1:2) + p2(:) = Atm%gridstruct%grid(i,j+1,1:2) call mid_pt_sphere(p1, p2, p3) call get_unit_vect2(p1, p2, e2) call get_latlon_vector(p3, ex, ey) @@ -722,125 +614,166 @@ subroutine get_nggps_ic (Atm, fv_domain) deallocate ( v_w ) deallocate ( u_s ) deallocate ( v_s ) - - call remap_dwinds(levp, npz, ak, bk, ps, ud, vd, Atm(n)) + + call remap_dwinds(levp, npz, ak, bk, ps, ud, vd, Atm) deallocate ( ud ) deallocate ( vd ) - - if (Atm(n)%neststruct%nested) then + + if (Atm%neststruct%nested) then if (is_master()) write(*,*) 'Blending nested and coarse grid topography' - npx = Atm(n)%npx - npy = Atm(n)%npy + npx = Atm%npx + npy = Atm%npy do j=jsd,jed do i=isd,ied wt = max(0.,min(1.,real(5 - min(i,j,npx-i,npy-j,5))/5. )) - Atm(n)%phis(i,j) = (1.-wt)*Atm(n)%phis(i,j) + wt*phis_coarse(i,j) + Atm%phis(i,j) = (1.-wt)*Atm%phis(i,j) + wt*phis_coarse(i,j) enddo enddo endif !!! Perform terrain smoothing, if desired - if ( Atm(n)%flagstruct%full_zs_filter ) then + if ( Atm%flagstruct%full_zs_filter ) then - call mpp_update_domains(Atm(n)%phis, Atm(n)%domain) + call mpp_update_domains(Atm%phis, Atm%domain) - call FV3_zs_filter( Atm(n)%bd, isd, ied, jsd, jed, npx, npy, Atm(n)%neststruct%npx_global, & - Atm(n)%flagstruct%stretch_fac, Atm(n)%neststruct%nested, Atm(n)%domain, & - Atm(n)%gridstruct%area_64, Atm(n)%gridstruct%dxa, Atm(n)%gridstruct%dya, & - Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, Atm(n)%gridstruct%dxc, & - Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%grid_64, Atm(n)%gridstruct%agrid_64, & - Atm(n)%gridstruct%sin_sg, Atm(n)%phis, oro_g) + call FV3_zs_filter( Atm%bd, isd, ied, jsd, jed, npx, npy, Atm%neststruct%npx_global, & + Atm%flagstruct%stretch_fac, Atm%gridstruct%bounded_domain, Atm%domain, & + Atm%gridstruct%area_64, Atm%gridstruct%dxa, Atm%gridstruct%dya, & + Atm%gridstruct%dx, Atm%gridstruct%dy, Atm%gridstruct%dxc, & + Atm%gridstruct%dyc, Atm%gridstruct%grid_64, Atm%gridstruct%agrid_64, & + Atm%gridstruct%sin_sg, Atm%phis, oro_g) deallocate(oro_g) endif - if ( Atm(n)%flagstruct%n_zs_filter > 0 ) then + if ( Atm%flagstruct%n_zs_filter > 0 ) then - if ( Atm(n)%flagstruct%nord_zs_filter == 2 ) then - call del2_cubed_sphere(Atm(n)%npx, Atm(n)%npy, Atm(n)%phis, & - Atm(n)%gridstruct%area_64, Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & - Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & - Atm(n)%flagstruct%n_zs_filter, cnst_0p20*Atm(n)%gridstruct%da_min, & - .false., oro_g, Atm(n)%neststruct%nested, Atm(n)%domain, Atm(n)%bd) + if ( Atm%flagstruct%nord_zs_filter == 2 ) then + call del2_cubed_sphere(Atm%npx, Atm%npy, Atm%phis, & + Atm%gridstruct%area_64, Atm%gridstruct%dx, Atm%gridstruct%dy, & + Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & + Atm%flagstruct%n_zs_filter, cnst_0p20*Atm%gridstruct%da_min, & + .false., oro_g, Atm%gridstruct%bounded_domain, & + Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-2 terrain filter has been applied ', & - Atm(n)%flagstruct%n_zs_filter, ' times' - else if( Atm(n)%flagstruct%nord_zs_filter == 4 ) then - call del4_cubed_sphere(Atm(n)%npx, Atm(n)%npy, Atm(n)%phis, Atm(n)%gridstruct%area_64, & - Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & - Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & - Atm(n)%flagstruct%n_zs_filter, .false., oro_g, Atm(n)%neststruct%nested, & - Atm(n)%domain, Atm(n)%bd) + Atm%flagstruct%n_zs_filter, ' times' + else if( Atm%flagstruct%nord_zs_filter == 4 ) then + call del4_cubed_sphere(Atm%npx, Atm%npy, Atm%phis, Atm%gridstruct%area_64, & + Atm%gridstruct%dx, Atm%gridstruct%dy, & + Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & + Atm%flagstruct%n_zs_filter, .false., oro_g, & + Atm%gridstruct%bounded_domain, & + Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & - Atm(n)%flagstruct%n_zs_filter, ' times' + Atm%flagstruct%n_zs_filter, ' times' endif endif - if ( Atm(n)%neststruct%nested .and. ( Atm(n)%flagstruct%n_zs_filter > 0 .or. Atm(n)%flagstruct%full_zs_filter ) ) then - npx = Atm(n)%npx - npy = Atm(n)%npy + if ( Atm%neststruct%nested .and. ( Atm%flagstruct%n_zs_filter > 0 .or. Atm%flagstruct%full_zs_filter ) ) then + npx = Atm%npx + npy = Atm%npy do j=jsd,jed do i=isd,ied wt = max(0.,min(1.,real(5 - min(i,j,npx-i,npy-j,5))/5. )) - Atm(n)%phis(i,j) = (1.-wt)*Atm(n)%phis(i,j) + wt*phis_coarse(i,j) + Atm%phis(i,j) = (1.-wt)*Atm%phis(i,j) + wt*phis_coarse(i,j) enddo enddo deallocate(phis_coarse) endif - call mpp_update_domains( Atm(n)%phis, Atm(n)%domain, complete=.true. ) + call mpp_update_domains( Atm%phis, Atm%domain, complete=.true. ) liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') ice_wat = get_tracer_index(MODEL_ATMOS, 'ice_wat') rainwat = get_tracer_index(MODEL_ATMOS, 'rainwat') snowwat = get_tracer_index(MODEL_ATMOS, 'snowwat') graupel = get_tracer_index(MODEL_ATMOS, 'graupel') -!--- Add cloud condensate from GFS to total MASS -! 20160928: Adjust the mixing ratios consistently... + ntclamt = get_tracer_index(MODEL_ATMOS, 'cld_amt') + if (trim(source) == source_fv3gfs) then do k=1,npz do j=js,je do i=is,ie - wt = Atm(n)%delp(i,j,k) - if ( Atm(n)%flagstruct%nwat .eq. 2 ) then - qt = wt*(1.+Atm(n)%q(i,j,k,liq_wat)) - elseif ( Atm(n)%flagstruct%nwat .eq. 6 ) then - qt = wt*(1. + Atm(n)%q(i,j,k,liq_wat) + & - Atm(n)%q(i,j,k,ice_wat) + & - Atm(n)%q(i,j,k,rainwat) + & - Atm(n)%q(i,j,k,snowwat) + & - Atm(n)%q(i,j,k,graupel)) + wt = Atm%delp(i,j,k) + if ( Atm%flagstruct%nwat == 6 ) then + qt = wt*(1. + Atm%q(i,j,k,liq_wat) + & + Atm%q(i,j,k,ice_wat) + & + Atm%q(i,j,k,rainwat) + & + Atm%q(i,j,k,snowwat) + & + Atm%q(i,j,k,graupel)) + else ! all other values of nwat + qt = wt*(1. + sum(Atm%q(i,j,k,2:Atm%flagstruct%nwat))) endif - m_fac = wt / qt - do iq=1,ntracers - Atm(n)%q(i,j,k,iq) = m_fac * Atm(n)%q(i,j,k,iq) - enddo - Atm(n)%delp(i,j,k) = qt + Atm%delp(i,j,k) = qt + if (ntclamt > 0) Atm%q(i,j,k,ntclamt) = 0.0 ! Moorthi enddo enddo enddo -!--- reset the tracers beyond condensate to a checkerboard pattern + else +!--- Add cloud condensate from GFS to total MASS +! 20160928: Adjust the mixing ratios consistently... + do k=1,npz + do j=js,je + do i=is,ie + wt = Atm%delp(i,j,k) + if ( Atm%flagstruct%nwat == 6 ) then + qt = wt*(1. + Atm%q(i,j,k,liq_wat) + & + Atm%q(i,j,k,ice_wat) + & + Atm%q(i,j,k,rainwat) + & + Atm%q(i,j,k,snowwat) + & + Atm%q(i,j,k,graupel)) + else ! all other values of nwat + qt = wt*(1. + sum(Atm%q(i,j,k,2:Atm%flagstruct%nwat))) + endif + m_fac = wt / qt + do iq=1,ntracers + Atm%q(i,j,k,iq) = m_fac * Atm%q(i,j,k,iq) + enddo + Atm%delp(i,j,k) = qt + if (ntclamt > 0) Atm%q(i,j,k,ntclamt) = 0.0 ! Moorthi + enddo + enddo + + enddo + endif !end trim(source) test + + + tke = get_tracer_index(MODEL_ATMOS, 'sgs_tke') + if (tke > 0) then + do k=1,npz + do j=js,je + do i=is,ie + !pe1 = Atm%ak(k+1) + Atm%bk(k+1)*Atm%ps(i,j) + Atm%q(i,j,k,tke) = 0.02 ! 1.*exp(-(Atm%ps(i,j) - pe1)**2) + enddo + enddo + enddo + endif + +!--- reset the tracers beyond condensate to a checkerboard pattern if (checker_tr) then nts = ntracers - nt_checker+1 call checker_tracers(is,ie, js,je, isd,ied, jsd,jed, nt_checker, & - npz, Atm(n)%q(:,:,:,nts:ntracers), & - Atm(n)%gridstruct%agrid_64(is:ie,js:je,1), & - Atm(n)%gridstruct%agrid_64(is:ie,js:je,2), 9., 9.) + npz, Atm%q(:,:,:,nts:ntracers), & + Atm%gridstruct%agrid_64(is:ie,js:je,1), & + Atm%gridstruct%agrid_64(is:ie,js:je,2), 9., 9.) endif - enddo ! n-loop - Atm(1)%flagstruct%make_nh = .false. + Atm%flagstruct%make_nh = .false. deallocate (ak) deallocate (bk) deallocate (ps) deallocate (q ) + if (trim(source) == source_fv3gfs) deallocate (temp) + deallocate (omga) end subroutine get_nggps_ic !------------------------------------------------------------------ !------------------------------------------------------------------ subroutine get_ncep_ic( Atm, fv_domain, nq ) - type(fv_atmos_type), intent(inout) :: Atm(:) + type(fv_atmos_type), intent(inout) :: Atm type(domain2d), intent(inout) :: fv_domain integer, intent(in):: nq ! local: @@ -871,42 +804,56 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) #endif character(len=128) :: fname real(kind=4), allocatable:: wk1(:), wk2(:,:), wk3(:,:,:) - real, allocatable:: tp(:,:,:), qp(:,:,:) - real, allocatable:: ua(:,:,:), va(:,:,:) - real, allocatable:: lat(:), lon(:), ak0(:), bk0(:) - real:: s2c(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je,4) - integer, dimension(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je):: id1, id2, jdc - real psc(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je) - real gzc(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je) - real tmean + real, dimension(:), allocatable:: lat, lon, ak0, bk0 + real, dimension(:,:,:), allocatable:: ud, vd + real, dimension(:,:,:,:), allocatable:: qp + real(kind=4), dimension(:,:), allocatable:: psncep, zsncep, psc + real(kind=4), dimension(:,:,:), allocatable:: uncep, vncep, tncep, zhncep + real(kind=4), dimension(:,:,:,:), allocatable:: qncep + real, dimension(:,:), allocatable:: psc_r8 + real, dimension(:,:,:), allocatable:: pt_c, pt_d, gzc + real:: s2c(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,4) + real:: s2c_c(Atm%bd%is:Atm%bd%ie+1,Atm%bd%js:Atm%bd%je,4) + real:: s2c_d(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je+1,4) + integer, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: id1, id2, jdc + integer, dimension(Atm%bd%is:Atm%bd%ie+1,Atm%bd%js:Atm%bd%je):: & + id1_c, id2_c, jdc_c + integer, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je+1):: & + id1_d, id2_d, jdc_d + real :: tmean, utmp, vtmp integer:: i, j, k, im, jm, km, npz, npt integer:: i1, i2, j1, ncid - integer:: jbeg, jend - integer tsize(3) + integer:: jbeg, jend, jn + integer tsize(3) logical:: read_ts = .true. logical:: land_ts = .false. logical:: found integer :: is, ie, js, je integer :: isd, ied, jsd, jed + real(kind=R_GRID), dimension(2):: p1, p2, p3 + real(kind=R_GRID), dimension(3):: e1, e2, ex, ey + integer :: id_res, ntprog, ntracers, ks, iq, nt - is = Atm(1)%bd%is - ie = Atm(1)%bd%ie - js = Atm(1)%bd%js - je = Atm(1)%bd%je - isd = Atm(1)%bd%isd - ied = Atm(1)%bd%ied - jsd = Atm(1)%bd%jsd - jed = Atm(1)%bd%jed + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed deg2rad = pi/180. - npz = Atm(1)%npz + npz = Atm%npz + call get_number_tracers(MODEL_ATMOS, num_tracers=ntracers, num_prog=ntprog) + if(is_master()) write(*,*) 'ntracers = ', ntracers, 'ntprog = ',ntprog ! Zero out all initial tracer fields: ! SJL: 20110716 -! Atm(1)%q = 0. +! Atm%q = 0. - fname = Atm(1)%flagstruct%res_latlon_dynamics + fname = Atm%flagstruct%res_latlon_dynamics if( file_exist(fname) ) then call open_ncfile( fname, ncid ) ! open the file @@ -921,7 +868,7 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) allocate ( lon(im) ) allocate ( lat(jm) ) - + call _GET_VAR1(ncid, 'lon', im, lon ) call _GET_VAR1(ncid, 'lat', jm, lat ) @@ -966,56 +913,107 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) ! Initialize lat-lon to Cubed bi-linear interpolation coeff: call remap_coef( is, ie, js, je, isd, ied, jsd, jed, & - im, jm, lon, lat, id1, id2, jdc, s2c , Atm(1)%gridstruct%agrid) + im, jm, lon, lat, id1, id2, jdc, s2c , Atm%gridstruct%agrid) ! Find bounding latitudes: jbeg = jm-1; jend = 2 do j=js,je do i=is,ie j1 = jdc(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo -! remap surface pressure and height: + if(is_master()) write(*,*) 'jbeg, jend = ', jbeg, jend +! read in surface pressure and height: + allocate ( psncep(im,jbeg:jend) ) + allocate ( zsncep(im,jbeg:jend) ) + + call get_var3_r4( ncid, 'PS', 1,im, jbeg,jend, 1,1, psncep ) + if(is_master()) write(*,*) 'done reading psncep' + call get_var3_r4( ncid, 'PHIS', 1,im, jbeg,jend, 1,1, zsncep ) + zsncep(:,:) = zsncep(:,:)/grav + if(is_master()) write(*,*) 'done reading zsncep' +! read in temperatuer: + allocate ( tncep(1:im,jbeg:jend, 1:km) ) + call get_var3_r4( ncid, 'T', 1,im, jbeg,jend, 1,km, tncep ) + if(is_master()) write(*,*) 'done reading tncep' +! read in specific humidity and cloud water cond: + allocate ( wk3(1:im,jbeg:jend, 1:km) ) + allocate ( qncep(1:im,jbeg:jend, 1:km,2) ) + call get_var3_r4( ncid, 'Q', 1,im, jbeg,jend, 1,km, wk3 ) + if(is_master()) write(*,*) 'done reading sphumncep' + qncep(:,:,:,1) = wk3(:,:,:) + call get_var3_r4( ncid, 'CWAT', 1,im, jbeg,jend, 1,km, wk3 ) + if(is_master()) write(*,*) 'done reading cwatncep' + qncep(:,:,:,2) = wk3(:,:,:) + deallocate (wk3) + + if ( T_is_Tv ) then + ! The "T" field in NCEP analysis is actually virtual temperature (Larry H. post processing) + ! BEFORE 20051201 + do i=1,im + do j=jbeg,jend + do k=1,km + tncep(i,j,k) = tncep(i,j,k)/(1.+zvir*qncep(i,j,k,1)) + enddo + enddo + enddo + endif + +!!!! Compute height on edges, zhncep [ use psncep, zsncep, tncep, sphumncep] + allocate ( zhncep(1:im,jbeg:jend, km+1) ) + jn = jend - jbeg + 1 + + call compute_zh(im, jn, km, ak0, bk0, psncep, zsncep, tncep, qncep, 2, zhncep ) + deallocate (zsncep) + deallocate (tncep) - allocate ( wk2(im,jbeg:jend) ) - call get_var3_r4( ncid, 'PS', 1,im, jbeg,jend, 1,1, wk2 ) + if(is_master()) write(*,*) 'done compute zhncep' + +! convert zhncep, psncep from NCEP grid to cubic grid + allocate (psc(is:ie,js:je)) + allocate (psc_r8(is:ie,js:je)) do j=js,je do i=is,ie i1 = id1(i,j) i2 = id2(i,j) j1 = jdc(i,j) - psc(i,j) = s2c(i,j,1)*wk2(i1,j1 ) + s2c(i,j,2)*wk2(i2,j1 ) + & - s2c(i,j,3)*wk2(i2,j1+1) + s2c(i,j,4)*wk2(i1,j1+1) + psc(i,j) = s2c(i,j,1)*psncep(i1,j1 ) + s2c(i,j,2)*psncep(i2,j1 ) + & + s2c(i,j,3)*psncep(i2,j1+1) + s2c(i,j,4)*psncep(i1,j1+1) enddo enddo + deallocate ( psncep ) - call get_var3_r4( ncid, 'PHIS', 1,im, jbeg,jend, 1,1, wk2 ) - do j=js,je + + allocate (gzc(is:ie,js:je,km+1)) + do k=1,km+1 + do j=js,je do i=is,ie i1 = id1(i,j) i2 = id2(i,j) j1 = jdc(i,j) - gzc(i,j) = s2c(i,j,1)*wk2(i1,j1 ) + s2c(i,j,2)*wk2(i2,j1 ) + & - s2c(i,j,3)*wk2(i2,j1+1) + s2c(i,j,4)*wk2(i1,j1+1) + gzc(i,j,k) = s2c(i,j,1)*zhncep(i1,j1 ,k) + s2c(i,j,2)*zhncep(i2,j1 ,k) + & + s2c(i,j,3)*zhncep(i2,j1+1,k) + s2c(i,j,4)*zhncep(i1,j1+1,k) enddo + enddo enddo + deallocate ( zhncep ) - deallocate ( wk2 ) - allocate ( wk2(im,jm) ) + if(is_master()) write(*,*) 'done interpolate psncep/zhncep into cubic grid psc/gzc!' +! read skin temperature; could be used for SST + allocate ( wk2(im,jm) ) if ( read_ts ) then ! read skin temperature; could be used for SST - call get_var2_real( ncid, 'TS', im, jm, wk2 ) if ( .not. land_ts ) then allocate ( wk1(im) ) do j=1,jm -! Read NCEP ORO (1; land; 0: ocean; 2: sea_ice) + ! Read NCEP ORO (1; land; 0: ocean; 2: sea_ice) call get_var3_r4( ncid, 'ORO', 1,im, j,j, 1,1, wk1 ) tmean = 0. npt = 0 @@ -1025,9 +1023,9 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) npt = npt + 1 endif enddo -!------------------------------------------------------ -! Replace TS over interior land with zonal mean SST/Ice -!------------------------------------------------------ + !------------------------------------------------------ + ! Replace TS over interior land with zonal mean SST/Ice + !------------------------------------------------------ if ( npt /= 0 ) then tmean= tmean / real(npt) do i=1,im @@ -1058,11 +1056,11 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) i1 = id1(i,j) i2 = id2(i,j) j1 = jdc(i,j) - Atm(1)%ts(i,j) = s2c(i,j,1)*wk2(i1,j1 ) + s2c(i,j,2)*wk2(i2,j1 ) + & + Atm%ts(i,j) = s2c(i,j,1)*wk2(i1,j1 ) + s2c(i,j,2)*wk2(i2,j1 ) + & s2c(i,j,3)*wk2(i2,j1+1) + s2c(i,j,4)*wk2(i1,j1+1) enddo enddo - call prt_maxmin('SST_model', Atm(1)%ts, is, ie, js, je, 0, 1, 1.) + call prt_maxmin('SST_model', Atm%ts, is, ie, js, je, 0, 1, 1.) ! Perform interp to FMS SST format/grid #ifndef DYCORE_SOLO @@ -1076,79 +1074,153 @@ subroutine get_ncep_ic( Atm, fv_domain, nq ) deallocate ( wk2 ) -! Read in temperature: - allocate ( wk3(1:im,jbeg:jend, 1:km) ) - call get_var3_r4( ncid, 'T', 1,im, jbeg,jend, 1,km, wk3 ) - - allocate ( tp(is:ie,js:je,km) ) - do k=1,km - do j=js,je - do i=is,ie - i1 = id1(i,j) - i2 = id2(i,j) - j1 = jdc(i,j) - tp(i,j,k) = s2c(i,j,1)*wk3(i1,j1 ,k) + s2c(i,j,2)*wk3(i2,j1 ,k) + & - s2c(i,j,3)*wk3(i2,j1+1,k) + s2c(i,j,4)*wk3(i1,j1+1,k) - enddo - enddo - enddo - -! Read in tracers: only sphum at this point - call get_var3_r4( ncid, 'Q', 1,im, jbeg,jend, 1,km, wk3 ) - - allocate ( qp(is:ie,js:je,km) ) +! convert qncep from NCEP grid to cubic grid + allocate ( qp(is:ie,js:je,km,2) ) do k=1,km do j=js,je do i=is,ie i1 = id1(i,j) i2 = id2(i,j) j1 = jdc(i,j) - qp(i,j,k) = s2c(i,j,1)*wk3(i1,j1 ,k) + s2c(i,j,2)*wk3(i2,j1 ,k) + & - s2c(i,j,3)*wk3(i2,j1+1,k) + s2c(i,j,4)*wk3(i1,j1+1,k) + qp(i,j,k,1) = s2c(i,j,1)*qncep(i1,j1 ,k,1) + s2c(i,j,2)*qncep(i2,j1 ,k,1) + & + s2c(i,j,3)*qncep(i2,j1+1,k,1) + s2c(i,j,4)*qncep(i1,j1+1,k,1) + qp(i,j,k,2) = s2c(i,j,1)*qncep(i1,j1 ,k,2) + s2c(i,j,2)*qncep(i2,j1 ,k,2) + & + s2c(i,j,3)*qncep(i2,j1+1,k,2) + s2c(i,j,4)*qncep(i1,j1+1,k,2) enddo enddo enddo - call remap_scalar(im, jm, km, npz, nq, nq, ak0, bk0, psc, gzc, tp, qp, Atm(1)) - deallocate ( tp ) + deallocate (qncep) + + psc_r8(:,:) = psc(:,:) + deallocate (psc) + + + call remap_scalar(Atm, km, npz, 2, ak0, bk0, psc_r8, qp, gzc) + call mpp_update_domains(Atm%phis, Atm%domain) + if(is_master()) write(*,*) 'done remap_scalar' deallocate ( qp ) + deallocate ( gzc ) ! Winds: - call get_var3_r4( ncid, 'U', 1,im, jbeg,jend, 1,km, wk3 ) + ! get lat/lon values of pt_c and pt_d from grid data (pt_b) + allocate (pt_c(isd:ied+1,jsd:jed ,2)) + allocate (pt_d(isd:ied ,jsd:jed+1,2)) + allocate (ud(is:ie , js:je+1, km)) + allocate (vd(is:ie+1, js:je , km)) + + call get_staggered_grid( is, ie, js, je, & + isd, ied, jsd, jed, & + Atm%gridstruct%grid, pt_c, pt_d) + + !------ pt_c part ------ + ! Initialize lat-lon to Cubed bi-linear interpolation coeff: + call remap_coef( is, ie+1, js, je, isd, ied+1, jsd, jed, & + im, jm, lon, lat, id1_c, id2_c, jdc_c, s2c_c, pt_c) + + ! Find bounding latitudes: + jbeg = jm-1; jend = 2 + do j=js,je + do i=is,ie+1 + j1 = jdc_c(i,j) + jbeg = min(jbeg, j1) + jend = max(jend, j1+1) + enddo + enddo - allocate ( ua(is:ie,js:je,km) ) + ! read in NCEP wind data + allocate ( uncep(1:im,jbeg:jend, 1:km) ) + allocate ( vncep(1:im,jbeg:jend, 1:km) ) + + call get_var3_r4( ncid, 'U', 1,im, jbeg,jend, 1,km, uncep ) + if(is_master()) write(*,*) 'first time done reading Uncep' + call get_var3_r4( ncid, 'V', 1,im, jbeg,jend, 1,km, vncep ) + if(is_master()) write(*,*) 'first time done reading Vncep' + +!$OMP parallel do default(none) shared(is,ie,js,je,km,s2c_c,id1_c,id2_c,jdc_c,uncep,vncep,Atm,vd) & +!$OMP private(i1,i2,j1,p1,p2,p3,e2,ex,ey,utmp,vtmp) do k=1,km do j=js,je - do i=is,ie - i1 = id1(i,j) - i2 = id2(i,j) - j1 = jdc(i,j) - ua(i,j,k) = s2c(i,j,1)*wk3(i1,j1 ,k) + s2c(i,j,2)*wk3(i2,j1 ,k) + & - s2c(i,j,3)*wk3(i2,j1+1,k) + s2c(i,j,4)*wk3(i1,j1+1,k) + do i=is,ie+1 + i1 = id1_c(i,j) + i2 = id2_c(i,j) + j1 = jdc_c(i,j) + p1(:) = Atm%gridstruct%grid(i,j ,1:2) + p2(:) = Atm%gridstruct%grid(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + utmp = s2c_c(i,j,1)*uncep(i1,j1 ,k) + & + s2c_c(i,j,2)*uncep(i2,j1 ,k) + & + s2c_c(i,j,3)*uncep(i2,j1+1,k) + & + s2c_c(i,j,4)*uncep(i1,j1+1,k) + vtmp = s2c_c(i,j,1)*vncep(i1,j1 ,k) + & + s2c_c(i,j,2)*vncep(i2,j1 ,k) + & + s2c_c(i,j,3)*vncep(i2,j1+1,k) + & + s2c_c(i,j,4)*vncep(i1,j1+1,k) + vd(i,j,k) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) enddo enddo enddo - call get_var3_r4( ncid, 'V', 1,im, jbeg,jend, 1,km, wk3 ) - call close_ncfile ( ncid ) + deallocate ( uncep, vncep ) + + !------ pt_d part ------ + ! Initialize lat-lon to Cubed bi-linear interpolation coeff: + call remap_coef( is, ie, js, je+1, isd, ied, jsd, jed+1, & + im, jm, lon, lat, id1_d, id2_d, jdc_d, s2c_d, pt_d) + deallocate ( pt_c, pt_d ) + + ! Find bounding latitudes: + jbeg = jm-1; jend = 2 + do j=js,je+1 + do i=is,ie + j1 = jdc_d(i,j) + jbeg = min(jbeg, j1) + jend = max(jend, j1+1) + enddo + enddo - allocate ( va(is:ie,js:je,km) ) + ! read in NCEP wind data + allocate ( uncep(1:im,jbeg:jend, 1:km) ) + allocate ( vncep(1:im,jbeg:jend, 1:km) ) + + call get_var3_r4( ncid, 'U', 1,im, jbeg,jend, 1,km, uncep ) + if(is_master()) write(*,*) 'second time done reading uec' + + call get_var3_r4( ncid, 'V', 1,im, jbeg,jend, 1,km, vncep ) + if(is_master()) write(*,*) 'second time done reading vec' + +!$OMP parallel do default(none) shared(is,ie,js,je,km,id1_d,id2_d,jdc_d,s2c_d,uncep,vncep,Atm,ud) & +!$OMP private(i1,i2,j1,p1,p2,p3,e1,ex,ey,utmp,vtmp) do k=1,km - do j=js,je + do j=js,je+1 do i=is,ie - i1 = id1(i,j) - i2 = id2(i,j) - j1 = jdc(i,j) - va(i,j,k) = s2c(i,j,1)*wk3(i1,j1 ,k) + s2c(i,j,2)*wk3(i2,j1 ,k) + & - s2c(i,j,3)*wk3(i2,j1+1,k) + s2c(i,j,4)*wk3(i1,j1+1,k) + i1 = id1_d(i,j) + i2 = id2_d(i,j) + j1 = jdc_d(i,j) + p1(:) = Atm%gridstruct%grid(i, j,1:2) + p2(:) = Atm%gridstruct%grid(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + utmp = s2c_d(i,j,1)*uncep(i1,j1 ,k) + & + s2c_d(i,j,2)*uncep(i2,j1 ,k) + & + s2c_d(i,j,3)*uncep(i2,j1+1,k) + & + s2c_d(i,j,4)*uncep(i1,j1+1,k) + vtmp = s2c_d(i,j,1)*vncep(i1,j1 ,k) + & + s2c_d(i,j,2)*vncep(i2,j1 ,k) + & + s2c_d(i,j,3)*vncep(i2,j1+1,k) + & + s2c_d(i,j,4)*vncep(i1,j1+1,k) + ud(i,j,k) = utmp*inner_prod(e1,ex) + vtmp*inner_prod(e1,ey) enddo enddo enddo - deallocate ( wk3 ) - call remap_winds(im, jm, km, npz, ak0, bk0, psc, ua, va, Atm(1)) + deallocate ( uncep, vncep ) - deallocate ( ua ) - deallocate ( va ) + call remap_dwinds(km, npz, ak0, bk0, psc_r8, ud, vd, Atm) + deallocate ( ud, vd ) + call close_ncfile ( ncid ) deallocate ( ak0 ) deallocate ( bk0 ) @@ -1159,59 +1231,59 @@ end subroutine get_ncep_ic !------------------------------------------------------------------ !------------------------------------------------------------------ subroutine get_ecmwf_ic( Atm, fv_domain ) - type(fv_atmos_type), intent(inout) :: Atm(:) + type(fv_atmos_type), intent(inout) :: Atm type(domain2d), intent(inout) :: fv_domain ! local: real :: ak_ec(138), bk_ec(138) - data ak_ec/ 0.000000, 2.000365, 3.102241, 4.666084, 6.827977, 9.746966, & - 13.605424, 18.608931, 24.985718, 32.985710, 42.879242, 54.955463, & - 69.520576, 86.895882, 107.415741, 131.425507, 159.279404, 191.338562, & - 227.968948, 269.539581, 316.420746, 368.982361, 427.592499, 492.616028, & - 564.413452, 643.339905, 729.744141, 823.967834, 926.344910, 1037.201172, & - 1156.853638, 1285.610352, 1423.770142, 1571.622925, 1729.448975, 1897.519287, & - 2076.095947, 2265.431641, 2465.770508, 2677.348145, 2900.391357, 3135.119385, & - 3381.743652, 3640.468262, 3911.490479, 4194.930664, 4490.817383, 4799.149414, & - 5119.895020, 5452.990723, 5798.344727, 6156.074219, 6526.946777, 6911.870605, & - 7311.869141, 7727.412109, 8159.354004, 8608.525391, 9076.400391, 9562.682617, & - 10065.978516, 10584.631836, 11116.662109, 11660.067383, 12211.547852, 12766.873047, & - 13324.668945, 13881.331055, 14432.139648, 14975.615234, 15508.256836, 16026.115234, & - 16527.322266, 17008.789063, 17467.613281, 17901.621094, 18308.433594, 18685.718750, & - 19031.289063, 19343.511719, 19620.042969, 19859.390625, 20059.931641, 20219.664063, & - 20337.863281, 20412.308594, 20442.078125, 20425.718750, 20361.816406, 20249.511719, & - 20087.085938, 19874.025391, 19608.572266, 19290.226563, 18917.460938, 18489.707031, & - 18006.925781, 17471.839844, 16888.687500, 16262.046875, 15596.695313, 14898.453125, & - 14173.324219, 13427.769531, 12668.257813, 11901.339844, 11133.304688, 10370.175781, & - 9617.515625, 8880.453125, 8163.375000, 7470.343750, 6804.421875, 6168.531250, & - 5564.382813, 4993.796875, 4457.375000, 3955.960938, 3489.234375, 3057.265625, & - 2659.140625, 2294.242188, 1961.500000, 1659.476563, 1387.546875, 1143.250000, & - 926.507813, 734.992188, 568.062500, 424.414063, 302.476563, 202.484375, & + data ak_ec/ 0.000000, 2.000365, 3.102241, 4.666084, 6.827977, 9.746966, & + 13.605424, 18.608931, 24.985718, 32.985710, 42.879242, 54.955463, & + 69.520576, 86.895882, 107.415741, 131.425507, 159.279404, 191.338562, & + 227.968948, 269.539581, 316.420746, 368.982361, 427.592499, 492.616028, & + 564.413452, 643.339905, 729.744141, 823.967834, 926.344910, 1037.201172, & + 1156.853638, 1285.610352, 1423.770142, 1571.622925, 1729.448975, 1897.519287, & + 2076.095947, 2265.431641, 2465.770508, 2677.348145, 2900.391357, 3135.119385, & + 3381.743652, 3640.468262, 3911.490479, 4194.930664, 4490.817383, 4799.149414, & + 5119.895020, 5452.990723, 5798.344727, 6156.074219, 6526.946777, 6911.870605, & + 7311.869141, 7727.412109, 8159.354004, 8608.525391, 9076.400391, 9562.682617, & + 10065.978516, 10584.631836, 11116.662109, 11660.067383, 12211.547852, 12766.873047, & + 13324.668945, 13881.331055, 14432.139648, 14975.615234, 15508.256836, 16026.115234, & + 16527.322266, 17008.789063, 17467.613281, 17901.621094, 18308.433594, 18685.718750, & + 19031.289063, 19343.511719, 19620.042969, 19859.390625, 20059.931641, 20219.664063, & + 20337.863281, 20412.308594, 20442.078125, 20425.718750, 20361.816406, 20249.511719, & + 20087.085938, 19874.025391, 19608.572266, 19290.226563, 18917.460938, 18489.707031, & + 18006.925781, 17471.839844, 16888.687500, 16262.046875, 15596.695313, 14898.453125, & + 14173.324219, 13427.769531, 12668.257813, 11901.339844, 11133.304688, 10370.175781, & + 9617.515625, 8880.453125, 8163.375000, 7470.343750, 6804.421875, 6168.531250, & + 5564.382813, 4993.796875, 4457.375000, 3955.960938, 3489.234375, 3057.265625, & + 2659.140625, 2294.242188, 1961.500000, 1659.476563, 1387.546875, 1143.250000, & + 926.507813, 734.992188, 568.062500, 424.414063, 302.476563, 202.484375, & 122.101563, 62.781250, 22.835938, 3.757813, 0.000000, 0.000000 / - data bk_ec/ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000007, 0.000024, 0.000059, 0.000112, 0.000199, & - 0.000340, 0.000562, 0.000890, 0.001353, 0.001992, 0.002857, & - 0.003971, 0.005378, 0.007133, 0.009261, 0.011806, 0.014816, & - 0.018318, 0.022355, 0.026964, 0.032176, 0.038026, 0.044548, & - 0.051773, 0.059728, 0.068448, 0.077958, 0.088286, 0.099462, & - 0.111505, 0.124448, 0.138313, 0.153125, 0.168910, 0.185689, & - 0.203491, 0.222333, 0.242244, 0.263242, 0.285354, 0.308598, & - 0.332939, 0.358254, 0.384363, 0.411125, 0.438391, 0.466003, & - 0.493800, 0.521619, 0.549301, 0.576692, 0.603648, 0.630036, & - 0.655736, 0.680643, 0.704669, 0.727739, 0.749797, 0.770798, & - 0.790717, 0.809536, 0.827256, 0.843881, 0.859432, 0.873929, & - 0.887408, 0.899900, 0.911448, 0.922096, 0.931881, 0.940860, & - 0.949064, 0.956550, 0.963352, 0.969513, 0.975078, 0.980072, & + data bk_ec/ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000007, 0.000024, 0.000059, 0.000112, 0.000199, & + 0.000340, 0.000562, 0.000890, 0.001353, 0.001992, 0.002857, & + 0.003971, 0.005378, 0.007133, 0.009261, 0.011806, 0.014816, & + 0.018318, 0.022355, 0.026964, 0.032176, 0.038026, 0.044548, & + 0.051773, 0.059728, 0.068448, 0.077958, 0.088286, 0.099462, & + 0.111505, 0.124448, 0.138313, 0.153125, 0.168910, 0.185689, & + 0.203491, 0.222333, 0.242244, 0.263242, 0.285354, 0.308598, & + 0.332939, 0.358254, 0.384363, 0.411125, 0.438391, 0.466003, & + 0.493800, 0.521619, 0.549301, 0.576692, 0.603648, 0.630036, & + 0.655736, 0.680643, 0.704669, 0.727739, 0.749797, 0.770798, & + 0.790717, 0.809536, 0.827256, 0.843881, 0.859432, 0.873929, & + 0.887408, 0.899900, 0.911448, 0.922096, 0.931881, 0.940860, & + 0.949064, 0.956550, 0.963352, 0.969513, 0.975078, 0.980072, & 0.984542, 0.988500, 0.991984, 0.995003, 0.997630, 1.000000 / -! The following L63 will be used in the model +! The following L63 will be used in the model ! The setting is the same as NCEP GFS's L64 except the top layer real, dimension(64):: ak_sj, bk_sj data ak_sj/64.247, 137.790, 221.958, & @@ -1270,22 +1342,20 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) real, allocatable:: psc_r8(:,:), zhc(:,:,:), qc(:,:,:,:) real, allocatable:: lat(:), lon(:), ak0(:), bk0(:) real, allocatable:: pt_c(:,:,:), pt_d(:,:,:) - real:: s2c(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je,4) - real:: s2c_c(Atm(1)%bd%is:Atm(1)%bd%ie+1,Atm(1)%bd%js:Atm(1)%bd%je,4) - real:: s2c_d(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je+1,4) - integer, dimension(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je):: & + real:: s2c(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,4) + real:: s2c_c(Atm%bd%is:Atm%bd%ie+1,Atm%bd%js:Atm%bd%je,4) + real:: s2c_d(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je+1,4) + integer, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: & id1, id2, jdc - integer, dimension(Atm(1)%bd%is:Atm(1)%bd%ie+1,Atm(1)%bd%js:Atm(1)%bd%je):: & + integer, dimension(Atm%bd%is:Atm%bd%ie+1,Atm%bd%js:Atm%bd%je):: & id1_c, id2_c, jdc_c - integer, dimension(Atm(1)%bd%is:Atm(1)%bd%ie,Atm(1)%bd%js:Atm(1)%bd%je+1):: & + integer, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je+1):: & id1_d, id2_d, jdc_d real:: utmp, vtmp integer:: i, j, k, n, im, jm, km, npz, npt integer:: i1, i2, j1, ncid integer:: jbeg, jend, jn - integer tsize(3) - logical:: read_ts = .true. - logical:: land_ts = .false. + integer tsize(3) logical:: found integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -1306,20 +1376,20 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) logical :: filtered_terrain = .true. namelist /external_ic_nml/ filtered_terrain - is = Atm(1)%bd%is - ie = Atm(1)%bd%ie - js = Atm(1)%bd%js - je = Atm(1)%bd%je - isd = Atm(1)%bd%isd - ied = Atm(1)%bd%ied - jsd = Atm(1)%bd%jsd - jed = Atm(1)%bd%jed + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed deg2rad = pi/180. - npz = Atm(1)%npz + npz = Atm%npz call get_number_tracers(MODEL_ATMOS, num_tracers=ntracers, num_prog=ntprog) - if(is_master()) write(*,*) 'ntracers = ', ntracers, 'ntprog = ',ntprog + if(is_master()) write(*,*) 'ntracers = ', ntracers, 'ntprog = ',ntprog sphum = get_tracer_index(MODEL_ATMOS, 'sphum') liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') @@ -1332,46 +1402,50 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) if (is_master()) then print *, 'sphum = ', sphum print *, 'liq_wat = ', liq_wat - if ( Atm(1)%flagstruct%nwat .eq. 6 ) then + if ( Atm%flagstruct%nwat .eq. 6 ) then print *, 'rainwat = ', rainwat print *, 'iec_wat = ', ice_wat print *, 'snowwat = ', snowwat - print *, 'graupel = ', graupel + print *, 'graupel = ', graupel endif print *, ' o3mr = ', o3mr endif - + ! Set up model's ak and bk - if ( npz <= 64 ) then - Atm(1)%ak(:) = ak_sj(:) - Atm(1)%bk(:) = bk_sj(:) - Atm(1)%ptop = Atm(1)%ak(1) - else - call set_eta(npz, ks, Atm(1)%ptop, Atm(1)%ak, Atm(1)%bk) + if (Atm%flagstruct%external_eta) then + call set_external_eta (Atm%ak, Atm%bk, Atm%ptop, Atm%ks) endif +!!$ if ( (npz == 64 .or. npz == 63) .and. len(trim(Atm%flagstruct%npz_type)) == 0 ) then +!!$ if (is_master()) print*, 'Using default GFS levels' +!!$ Atm%ak(:) = ak_sj(:) +!!$ Atm%bk(:) = bk_sj(:) +!!$ Atm%ptop = Atm%ak(1) +!!$ else +!!$ call set_eta(npz, ks, Atm%ptop, Atm%ak, Atm%bk, Atm%flagstruct%npz_type) +!!$ endif !! Read in model terrain from oro_data.tile?.nc if (filtered_terrain) then - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_filt', Atm(1)%phis, domain=Atm(1)%domain) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_filt', Atm%phis, domain=Atm%domain) elseif (.not. filtered_terrain) then - id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_raw', Atm(1)%phis, domain=Atm(1)%domain) + id_res = register_restart_field (ORO_restart, fn_oro_ics, 'orog_raw', Atm%phis, domain=Atm%domain) endif call restore_state (ORO_restart) call free_restart_type(ORO_restart) - Atm(1)%phis = Atm(1)%phis*grav + Atm%phis = Atm%phis*grav if(is_master()) write(*,*) 'done reading model terrain from oro_data.nc' - call mpp_update_domains( Atm(1)%phis, Atm(1)%domain ) + call mpp_update_domains( Atm%phis, Atm%domain ) !! Read in o3mr, ps and zh from GFS_data.tile?.nc allocate (o3mr_gfs(is:ie,js:je,levp_gfs)) allocate (ps_gfs(is:ie,js:je)) allocate (zh_gfs(is:ie,js:je,levp_gfs+1)) - + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'o3mr', o3mr_gfs, & - mandatory=.false.,domain=Atm(1)%domain) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ps', ps_gfs, domain=Atm(1)%domain) - id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ZH', zh_gfs, domain=Atm(1)%domain) + mandatory=.false.,domain=Atm%domain) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ps', ps_gfs, domain=Atm%domain) + id_res = register_restart_field (GFS_restart, fn_gfs_ics, 'ZH', zh_gfs, domain=Atm%domain) call restore_state (GFS_restart) call free_restart_type(GFS_restart) @@ -1384,24 +1458,24 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) ak_gfs(1:levp_gfs+1) = wk2(1:levp_gfs+1,1) bk_gfs(1:levp_gfs+1) = wk2(1:levp_gfs+1,2) deallocate (wk2) - + if ( bk_gfs(1) < 1.E-9 ) ak_gfs(1) = max(1.e-9, ak_gfs(1)) - + iq = o3mr if(is_master()) write(*,*) 'Reading o3mr from GFS_data.nc:' if(is_master()) write(*,*) 'o3mr =', iq - call remap_scalar_single(Atm(1), levp_gfs, npz, ak_gfs, bk_gfs, ps_gfs, o3mr_gfs, zh_gfs, iq) + call remap_scalar_single(Atm, levp_gfs, npz, ak_gfs, bk_gfs, ps_gfs, o3mr_gfs, zh_gfs, iq) deallocate (ak_gfs, bk_gfs) deallocate (ps_gfs, zh_gfs) deallocate (o3mr_gfs) !! Start to read EC data - fname = Atm(1)%flagstruct%res_latlon_dynamics + fname = Atm%flagstruct%res_latlon_dynamics if( file_exist(fname) ) then call open_ncfile( fname, ncid ) ! open the file - + call get_ncdim1( ncid, 'longitude', tsize(1) ) call get_ncdim1( ncid, 'latitude', tsize(2) ) call get_ncdim1( ncid, 'level', tsize(3) ) @@ -1413,7 +1487,7 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) allocate ( lon(im) ) allocate ( lat(jm) ) - + call _GET_VAR1(ncid, 'longitude', im, lon ) call _GET_VAR1(ncid, 'latitude', jm, lat ) @@ -1449,14 +1523,14 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) ! Initialize lat-lon to Cubed bi-linear interpolation coeff: call remap_coef( is, ie, js, je, isd, ied, jsd, jed, & - im, jm, lon, lat, id1, id2, jdc, s2c , Atm(1)%gridstruct%agrid ) + im, jm, lon, lat, id1, id2, jdc, s2c , Atm%gridstruct%agrid ) ! Find bounding latitudes: jbeg = jm-1; jend = 2 do j=js,je do i=is,ie j1 = jdc(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -1543,8 +1617,10 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) call compute_zh(im, jn, km, ak0, bk0, psec, zsec, tec, qec, 5, zhec ) if(is_master()) write(*,*) 'done compute zhec' + deallocate ( zsec ) + deallocate ( tec ) -! convert zhec, psec, zsec from EC grid to cubic grid +! convert zhec, psec from EC grid to cubic grid allocate (psc(is:ie,js:je)) allocate (psc_r8(is:ie,js:je)) @@ -1571,7 +1647,6 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) enddo enddo deallocate ( psec ) - deallocate ( zsec ) allocate (zhc(is:ie,js:je,km+1)) !$OMP parallel do default(none) shared(is,ie,js,je,km,s2c,id1,id2,jdc,zhc,zhec) & @@ -1589,7 +1664,7 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) enddo deallocate ( zhec ) - if(is_master()) write(*,*) 'done interpolate psec/zsec/zhec into cubic grid psc/zhc!' + if(is_master()) write(*,*) 'done interpolate psec/zhec into cubic grid psc/zhc!' ! Read in other tracers from EC data and remap them into cubic sphere grid: allocate ( qc(is:ie,js:je,km,6) ) @@ -1647,9 +1722,10 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) psc_r8(:,:) = psc(:,:) deallocate ( psc ) - call remap_scalar_ec(Atm(1), km, npz, 6, ak0, bk0, psc_r8, qc, wc, zhc ) - if(is_master()) write(*,*) 'done remap_scalar_ec' - + call remap_scalar(Atm, km, npz, 6, ak0, bk0, psc_r8, qc, zhc, wc) + call mpp_update_domains(Atm%phis, Atm%domain) + if(is_master()) write(*,*) 'done remap_scalar' + deallocate ( zhc ) deallocate ( wc ) deallocate ( qc ) @@ -1663,7 +1739,7 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) call get_staggered_grid( is, ie, js, je, & isd, ied, jsd, jed, & - Atm(1)%gridstruct%grid, pt_c, pt_d) + Atm%gridstruct%grid, pt_c, pt_d) !------ pt_c part ------ ! Initialize lat-lon to Cubed bi-linear interpolation coeff: @@ -1717,8 +1793,8 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) i1 = id1_c(i,j) i2 = id2_c(i,j) j1 = jdc_c(i,j) - p1(:) = Atm(1)%gridstruct%grid(i,j ,1:2) - p2(:) = Atm(1)%gridstruct%grid(i,j+1,1:2) + p1(:) = Atm%gridstruct%grid(i,j ,1:2) + p2(:) = Atm%gridstruct%grid(i,j+1,1:2) call mid_pt_sphere(p1, p2, p3) call get_unit_vect2(p1, p2, e2) call get_latlon_vector(p3, ex, ey) @@ -1752,7 +1828,7 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) jend = max(jend, j1+1) enddo enddo - + ! read in EC wind data allocate ( uec(1:im,jbeg:jend, 1:km) ) allocate ( vec(1:im,jbeg:jend, 1:km) ) @@ -1777,8 +1853,8 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) i1 = id1_d(i,j) i2 = id2_d(i,j) j1 = jdc_d(i,j) - p1(:) = Atm(1)%gridstruct%grid(i, j,1:2) - p2(:) = Atm(1)%gridstruct%grid(i+1,j,1:2) + p1(:) = Atm%gridstruct%grid(i, j,1:2) + p2(:) = Atm%gridstruct%grid(i+1,j,1:2) call mid_pt_sphere(p1, p2, p3) call get_unit_vect2(p1, p2, e1) call get_latlon_vector(p3, ex, ey) @@ -1796,7 +1872,7 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) enddo deallocate ( uec, vec ) - call remap_dwinds(km, npz, ak0, bk0, psc_r8, ud, vd, Atm(1)) + call remap_dwinds(km, npz, ak0, bk0, psc_r8, ud, vd, Atm) deallocate ( ud, vd ) #ifndef COND_IFS_IC @@ -1805,21 +1881,21 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) do k=1,npz do j=js,je do i=is,ie - wt = Atm(1)%delp(i,j,k) - if ( Atm(1)%flagstruct%nwat .eq. 2 ) then - qt = wt*(1.+Atm(1)%q(i,j,k,liq_wat)) - elseif ( Atm(1)%flagstruct%nwat .eq. 6 ) then - qt = wt*(1. + Atm(1)%q(i,j,k,liq_wat) + & - Atm(1)%q(i,j,k,ice_wat) + & - Atm(1)%q(i,j,k,rainwat) + & - Atm(1)%q(i,j,k,snowwat) + & - Atm(1)%q(i,j,k,graupel)) + wt = Atm%delp(i,j,k) + if ( Atm%flagstruct%nwat .eq. 2 ) then + qt = wt*(1.+Atm%q(i,j,k,liq_wat)) + elseif ( Atm%flagstruct%nwat .eq. 6 ) then + qt = wt*(1. + Atm%q(i,j,k,liq_wat) + & + Atm%q(i,j,k,ice_wat) + & + Atm%q(i,j,k,rainwat) + & + Atm%q(i,j,k,snowwat) + & + Atm%q(i,j,k,graupel)) endif m_fac = wt / qt do iq=1,ntracers - Atm(1)%q(i,j,k,iq) = m_fac * Atm(1)%q(i,j,k,iq) + Atm%q(i,j,k,iq) = m_fac * Atm%q(i,j,k,iq) enddo - Atm(1)%delp(i,j,k) = qt + Atm%delp(i,j,k) = qt enddo enddo enddo @@ -1830,13 +1906,13 @@ subroutine get_ecmwf_ic( Atm, fv_domain ) deallocate ( psc_r8 ) deallocate ( lat, lon ) - Atm(1)%flagstruct%make_nh = .false. + Atm%flagstruct%make_nh = .false. end subroutine get_ecmwf_ic !------------------------------------------------------------------ !------------------------------------------------------------------ subroutine get_fv_ic( Atm, fv_domain, nq ) - type(fv_atmos_type), intent(inout) :: Atm(:) + type(fv_atmos_type), intent(inout) :: Atm type(domain2d), intent(inout) :: fv_domain integer, intent(in):: nq @@ -1849,17 +1925,17 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) ! integer sphum, liq_wat, ice_wat, cld_amt ! GFDL AM2 physics logical found - npz = Atm(1)%npz + npz = Atm%npz ! Zero out all initial tracer fields: - Atm(1)%q = 0. + Atm%q = 0. ! Read in lat-lon FV core restart file - fname = Atm(1)%flagstruct%res_latlon_dynamics + fname = Atm%flagstruct%res_latlon_dynamics if( file_exist(fname) ) then call field_size(fname, 'T', tsize, field_found=found) - if(is_master()) write(*,*) 'Using lat-lon FV restart:', fname + if(is_master()) write(*,*) 'Using lat-lon FV restart:', fname if ( found ) then im = tsize(1); jm = tsize(2); km = tsize(3) @@ -1877,9 +1953,9 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) enddo do j=1,jm - lat(j) = -0.5*pi + real(j-1)*pi/real(jm-1) ! SP to NP + lat(j) = -0.5*pi + real(j-1)*pi/real(jm-1) ! SP to NP enddo - + allocate ( ak0(1:km+1) ) allocate ( bk0(1:km+1) ) allocate ( ps0(1:im,1:jm) ) @@ -1910,12 +1986,12 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) endif ! Read in tracers: only AM2 "physics tracers" at this point - fname = Atm(1)%flagstruct%res_latlon_tracers + fname = Atm%flagstruct%res_latlon_tracers if( file_exist(fname) ) then - if(is_master()) write(*,*) 'Using lat-lon tracer restart:', fname + if(is_master()) write(*,*) 'Using lat-lon tracer restart:', fname - allocate ( q0(im,jm,km,Atm(1)%ncnst) ) + allocate ( q0(im,jm,km,Atm%ncnst) ) q0 = 0. do tr_ind = 1, nq @@ -1936,8 +2012,8 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) call d2a3d(u0, v0, ua, va, im, jm, km, lon) - deallocate ( u0 ) - deallocate ( v0 ) + deallocate ( u0 ) + deallocate ( v0 ) if(mpp_pe()==4) call pmaxmin( 'UA', ua, im*jm, km, 1.) if(mpp_pe()==4) call pmaxmin( 'VA', va, im*jm, km, 1.) @@ -1961,24 +2037,24 @@ subroutine get_fv_ic( Atm, fv_domain, nq ) ! Horizontal interpolation to the cubed sphere grid center ! remap vertically with terrain adjustment - call remap_xyz( im, 1, jm, jm, km, npz, nq, Atm(1)%ncnst, lon, lat, ak0, bk0, & - ps0, gz0, ua, va, t0, q0, Atm(1) ) - - deallocate ( ak0 ) - deallocate ( bk0 ) - deallocate ( ps0 ) - deallocate ( gz0 ) - deallocate ( t0 ) - deallocate ( q0 ) - deallocate ( dp0 ) - deallocate ( ua ) - deallocate ( va ) - deallocate ( lat ) - deallocate ( lon ) + call remap_xyz( im, 1, jm, jm, km, npz, nq, Atm%ncnst, lon, lat, ak0, bk0, & + ps0, gz0, ua, va, t0, q0, Atm ) - end subroutine get_fv_ic -!------------------------------------------------------------------ -!------------------------------------------------------------------ + deallocate ( ak0 ) + deallocate ( bk0 ) + deallocate ( ps0 ) + deallocate ( gz0 ) + deallocate ( t0 ) + deallocate ( q0 ) + deallocate ( dp0 ) + deallocate ( ua ) + deallocate ( va ) + deallocate ( lat ) + deallocate ( lon ) + + end subroutine get_fv_ic +!------------------------------------------------------------------ +!------------------------------------------------------------------ #ifndef DYCORE_SOLO subroutine ncep2fms(im, jm, lon, lat, wk) @@ -2152,177 +2228,12 @@ subroutine remap_coef( is, ie, js, je, isd, ied, jsd, jed, & end subroutine remap_coef - subroutine remap_scalar(im, jm, km, npz, nq, ncnst, ak0, bk0, psc, gzc, ta, qa, Atm) - type(fv_atmos_type), intent(inout) :: Atm - integer, intent(in):: im, jm, km, npz, nq, ncnst - real, intent(in):: ak0(km+1), bk0(km+1) - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: psc, gzc - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km):: ta - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km,ncnst):: qa -! local: - real, dimension(Atm%bd%is:Atm%bd%ie,km):: tp - real, dimension(Atm%bd%is:Atm%bd%ie,km+1):: pe0, pn0 - real, dimension(Atm%bd%is:Atm%bd%ie,npz):: qn1 - real, dimension(Atm%bd%is:Atm%bd%ie,npz+1):: pe1, pn1 - real(kind=R_GRID), dimension(2*km+1):: gz, pn - real pk0(km+1) - real qp(Atm%bd%is:Atm%bd%ie,km,ncnst) - real p1, p2, alpha, rdg - real(kind=R_GRID):: pst, pt0 - integer i,j,k, k2,l, iq - integer sphum, o3mr, clwmr - integer :: is, ie, js, je - integer :: isd, ied, jsd, jed - - is = Atm%bd%is - ie = Atm%bd%ie - js = Atm%bd%js - je = Atm%bd%je - isd = Atm%bd%isd - ied = Atm%bd%ied - jsd = Atm%bd%jsd - jed = Atm%bd%jed - - k2 = max(10, km/2) - -! nq is always 1 - sphum = get_tracer_index(MODEL_ATMOS, 'sphum') - - if (mpp_pe()==1) then - print *, 'sphum = ', sphum, ' ncnst=', ncnst - print *, 'T_is_Tv = ', T_is_Tv, ' zvir=', zvir, ' kappa=', kappa - endif - - if ( sphum/=1 ) then - call mpp_error(FATAL,'SPHUM must be 1st tracer') - endif - - call prt_maxmin('ZS_FV3', Atm%phis, is, ie, js, je, 3, 1, 1./grav) - call prt_maxmin('ZS_GFS', gzc, is, ie, js, je, 0, 1, 1.) - call prt_maxmin('PS_Data', psc, is, ie, js, je, 0, 1, 0.01) - call prt_maxmin('T_Data', ta, is, ie, js, je, 0, km, 1.) - call prt_maxmin('q_Data', qa(is:ie,js:je,1:km,1), is, ie, js, je, 0, km, 1.) - - do 5000 j=js,je - - do i=is,ie - - do iq=1,ncnst - do k=1,km - qp(i,k,iq) = qa(i,j,k,iq) - enddo - enddo - - if ( T_is_Tv ) then -! The "T" field in NCEP analysis is actually virtual temperature (Larry H. post processing) -! BEFORE 20051201 - do k=1,km - tp(i,k) = ta(i,j,k) - enddo - else - do k=1,km - tp(i,k) = ta(i,j,k)*(1.+zvir*qp(i,k,sphum)) - enddo - endif -! Tracers: - - do k=1,km+1 - pe0(i,k) = ak0(k) + bk0(k)*psc(i,j) - pn0(i,k) = log(pe0(i,k)) - pk0(k) = pe0(i,k)**kappa - enddo -! gzc is height - -! Note the following line, gz is actully Z (from Jeff's data). - gz(km+1) = gzc(i,j)*grav - do k=km,1,-1 - gz(k) = gz(k+1) + rdgas*tp(i,k)*(pn0(i,k+1)-pn0(i,k)) - enddo - - do k=1,km+1 - pn(k) = pn0(i,k) - enddo -! Use log-p for interpolation/extrapolation -! mirror image method: - do k=km+2, km+k2 - l = 2*(km+1) - k - gz(k) = 2.*gz(km+1) - gz(l) - pn(k) = 2.*pn(km+1) - pn(l) - enddo - do k=km+k2-1, 2, -1 - if( Atm%phis(i,j).le.gz(k) .and. Atm%phis(i,j).ge.gz(k+1) ) then - pst = pn(k) + (pn(k+1)-pn(k))*(gz(k)-Atm%phis(i,j))/(gz(k)-gz(k+1)) - go to 123 - endif - enddo -123 Atm%ps(i,j) = exp(pst) - enddo ! i-loop - - do i=is,ie - pe1(i,1) = Atm%ak(1) - pn1(i,1) = log(pe1(i,1)) - enddo - do k=2,npz+1 - do i=is,ie - pe1(i,k) = Atm%ak(k) + Atm%bk(k)*Atm%ps(i,j) - pn1(i,k) = log(pe1(i,k)) - enddo - enddo - -! * Compute delp - do k=1,npz - do i=is,ie - Atm%delp(i,j,k) = pe1(i,k+1) - pe1(i,k) - enddo - enddo - -!--------------- -! map shpum, o3mr, clwmr tracers -!---------------- - do iq=1,ncnst - call mappm(km, pe0, qp(is,1,iq), npz, pe1, qn1, is,ie, 0, 11, Atm%ptop) - do k=1,npz - do i=is,ie - Atm%q(i,j,k,iq) = qn1(i,k) - enddo - enddo - enddo - -!------------------------------------------------------------- -! map virtual temperature using geopotential conserving scheme. -!------------------------------------------------------------- - call mappm(km, pn0, tp, npz, pn1, qn1, is,ie, 1, 9, Atm%ptop) - do k=1,npz - do i=is,ie - Atm%pt(i,j,k) = qn1(i,k)/(1.+zvir*Atm%q(i,j,k,sphum)) - enddo - enddo - - if ( .not. Atm%flagstruct%hydrostatic .and. Atm%flagstruct%ncep_ic ) then -! Replace delz with NCEP hydrostatic state - rdg = -rdgas / grav - do k=1,npz - do i=is,ie - atm%delz(i,j,k) = rdg*qn1(i,k)*(pn1(i,k+1)-pn1(i,k)) - enddo - enddo - endif - -5000 continue - - call prt_maxmin('PS_model', Atm%ps(is:ie,js:je), is, ie, js, je, 0, 1, 0.01) - - if (is_master()) write(*,*) 'done remap_scalar' - - end subroutine remap_scalar - - - subroutine remap_scalar_nggps(Atm, km, npz, ncnst, ak0, bk0, psc, qa, omga, zh) + subroutine remap_scalar(Atm, km, npz, ncnst, ak0, bk0, psc, qa, zh, omga, t_in) type(fv_atmos_type), intent(inout) :: Atm integer, intent(in):: km, npz, ncnst real, intent(in):: ak0(km+1), bk0(km+1) real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: psc - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km):: omga + real, intent(in), optional, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km):: omga, t_in real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km,ncnst):: qa real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km+1):: zh ! local: @@ -2357,23 +2268,39 @@ subroutine remap_scalar_nggps(Atm, km, npz, ncnst, ak0, bk0, psc, qa, omga, zh) cld_amt = get_tracer_index(MODEL_ATMOS, 'cld_amt') o3mr = get_tracer_index(MODEL_ATMOS, 'o3mr') - k2 = max(10, km/2) - if (mpp_pe()==1) then - print *, 'sphum = ', sphum - print *, 'clwmr = ', liq_wat - print *, ' o3mr = ', o3mr + print *, 'In remap_scalar:' print *, 'ncnst = ', ncnst + print *, 'nwat = ', Atm%flagstruct%nwat + print *, 'sphum = ', sphum + print *, 'liq_wat = ', liq_wat + if ( Atm%flagstruct%nwat .eq. 6 ) then + print *, 'rainwat = ', rainwat + print *, 'ice_wat = ', ice_wat + print *, 'snowwat = ', snowwat + print *, 'graupel = ', graupel + endif endif if ( sphum/=1 ) then call mpp_error(FATAL,'SPHUM must be 1st tracer') endif + k2 = max(10, km/2) + +#ifdef USE_GFS_ZS + Atm%phis(is:ie,js:je) = zh(is:ie,js:je,km+1)*grav +#endif + + if (Atm%flagstruct%ecmwf_ic) then + if (cld_amt .gt. 0) Atm%q(i,j,k,cld_amt) = 0. + endif + !$OMP parallel do default(none) & -!$OMP shared(sphum,liq_wat,rainwat,ice_wat,snowwat,graupel,& -!$OMP cld_amt,ncnst,npz,is,ie,js,je,km,k2,ak0,bk0,psc,zh,omga,qa,Atm,z500) & +!$OMP shared(sphum,liq_wat,rainwat,ice_wat,snowwat,graupel,source,& +!$OMP cld_amt,ncnst,npz,is,ie,js,je,km,k2,ak0,bk0,psc,zh,omga,qa,Atm,z500,t_in) & !$OMP private(l,m,pst,pn,gz,pe0,pn0,pe1,pn1,dp2,qp,qn1,gz_fv) + do 5000 j=js,je do k=1,km+1 do i=is,ie @@ -2436,34 +2363,36 @@ subroutine remap_scalar_nggps(Atm, km, npz, ncnst, ak0, bk0, psc, qa, omga, zh) enddo enddo -! map shpum, o3mr, liq_wat tracers +! map tracers do iq=1,ncnst - do k=1,km - do i=is,ie - qp(i,k) = qa(i,j,k,iq) + if (floor(qa(is,j,1,iq)) > -999) then !skip missing scalars + do k=1,km + do i=is,ie + qp(i,k) = qa(i,j,k,iq) + enddo enddo - enddo - call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, 0, 8, Atm%ptop) - if ( iq==sphum ) then - call fillq(ie-is+1, npz, 1, qn1, dp2) - else - call fillz(ie-is+1, npz, 1, qn1, dp2) - endif -! The HiRam step of blending model sphum with NCEP data is obsolete because nggps is always cold starting... - do k=1,npz - do i=is,ie - Atm%q(i,j,k,iq) = qn1(i,k) + call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, 0, 8, Atm%ptop) + if ( iq==sphum ) then + call fillq(ie-is+1, npz, 1, qn1, dp2) + else + call fillz(ie-is+1, npz, 1, qn1, dp2) + endif + ! The HiRam step of blending model sphum with NCEP data is obsolete because nggps is always cold starting... + do k=1,npz + do i=is,ie + Atm%q(i,j,k,iq) = qn1(i,k) + enddo enddo - enddo + endif enddo !--------------------------------------------------- -! Retrive temperature using GFS geopotential height +! Retrive temperature using geopotential height from external data !--------------------------------------------------- do i=is,ie ! Make sure FV3 top is lower than GFS; can not do extrapolation above the top at this point if ( pn1(i,1) .lt. pn0(i,1) ) then - call mpp_error(FATAL,'FV3 top higher than NCEP/GFS') + call mpp_error(FATAL,'FV3 top higher than external data') endif do k=1,km+1 @@ -2496,7 +2425,7 @@ subroutine remap_scalar_nggps(Atm, km, npz, ncnst, ak0, bk0, psc, qa, omga, zh) endif enddo #else - do l=m,km+k2 + do l=m,km+k2-1 if ( (pn1(i,k).le.pn(l+1)) .and. (pn1(i,k).ge.pn(l)) ) then gz_fv(k) = gz(l) + (gz(l+1)-gz(l))*(pn1(i,k)-pn(l))/(pn(l+1)-pn(l)) goto 555 @@ -2506,10 +2435,33 @@ subroutine remap_scalar_nggps(Atm, km, npz, ncnst, ak0, bk0, psc, qa, omga, zh) 555 m = l enddo -! Compute true temperature using hydrostatic balance - do k=1,npz - Atm%pt(i,j,k) = (gz_fv(k)-gz_fv(k+1))/( rdgas*(pn1(i,k+1)-pn1(i,k))*(1.+zvir*Atm%q(i,j,k,sphum)) ) + do k=1,npz+1 + Atm%peln(i,k,j) = pn1(i,k) enddo + +!---------------------------------------------------- +! Compute true temperature using hydrostatic balance +!---------------------------------------------------- + if (trim(source) /= source_fv3gfs .or. .not. present(t_in)) then + do k=1,npz +! qc = 1.-(Atm%q(i,j,k,liq_wat)+Atm%q(i,j,k,rainwat)+Atm%q(i,j,k,ice_wat)+Atm%q(i,j,k,snowwat)) +! Atm%pt(i,j,k) = (gz_fv(k)-gz_fv(k+1))*qc/( rdgas*(pn1(i,k+1)-pn1(i,k))*(1.+zvir*Atm%q(i,j,k,sphum)) ) + Atm%pt(i,j,k) = (gz_fv(k)-gz_fv(k+1))/( rdgas*(pn1(i,k+1)-pn1(i,k))*(1.+zvir*Atm%q(i,j,k,sphum)) ) + enddo +!------------------------------ +! Remap input T logarithmically in p. +!------------------------------ + else + do k=1,km + qp(i,k) = t_in(i,j,k) + enddo + + call mappm(km, log(pe0), qp, npz, log(pe1), qn1, is,ie, 2, 4, Atm%ptop) ! pn0 and pn1 are higher-precision + ! and cannot be passed to mappm + do k=1,npz + Atm%pt(i,j,k) = qn1(i,k) + enddo + endif if ( .not. Atm%flagstruct%hydrostatic ) then do k=1,npz Atm%delz(i,j,k) = (gz_fv(k+1) - gz_fv(k)) / grav @@ -2519,320 +2471,96 @@ subroutine remap_scalar_nggps(Atm, km, npz, ncnst, ak0, bk0, psc, qa, omga, zh) enddo ! i-loop !----------------------------------------------------------------------- -! seperate cloud water and cloud ice -! From Jan-Huey Chen's HiRAM code +! seperate cloud water and cloud ice from Jan-Huey Chen's HiRAM code +! only use for NCEP IC and GFDL microphy !----------------------------------------------------------------------- + if (trim(source) /= source_fv3gfs) then + if ((Atm%flagstruct%nwat .eq. 3 .or. Atm%flagstruct%nwat .eq. 6) .and. & + (Atm%flagstruct%ncep_ic .or. Atm%flagstruct%nggps_ic)) then + do k=1,npz + do i=is,ie - if ( Atm%flagstruct%nwat .eq. 6 ) then - do k=1,npz - do i=is,ie - qn1(i,k) = Atm%q(i,j,k,liq_wat) - Atm%q(i,j,k,rainwat) = 0. - Atm%q(i,j,k,snowwat) = 0. - Atm%q(i,j,k,graupel) = 0. - if (cld_amt .gt. 0) Atm%q(i,j,k,cld_amt) = 0. - if ( Atm%pt(i,j,k) > 273.16 ) then ! > 0C all liq_wat - Atm%q(i,j,k,liq_wat) = qn1(i,k) - Atm%q(i,j,k,ice_wat) = 0. + qn1(i,k) = Atm%q(i,j,k,liq_wat) + if (cld_amt .gt. 0) Atm%q(i,j,k,cld_amt) = 0. + + if ( Atm%pt(i,j,k) > 273.16 ) then ! > 0C all liq_wat + Atm%q(i,j,k,liq_wat) = qn1(i,k) + Atm%q(i,j,k,ice_wat) = 0. #ifdef ORIG_CLOUDS_PART - else if ( Atm%pt(i,j,k) < 258.16 ) then ! < -15C all ice_wat - Atm%q(i,j,k,liq_wat) = 0. - Atm%q(i,j,k,ice_wat) = qn1(i,k) - else ! between -15~0C: linear interpolation - Atm%q(i,j,k,liq_wat) = qn1(i,k)*((Atm%pt(i,j,k)-258.16)/15.) - Atm%q(i,j,k,ice_wat) = qn1(i,k) - Atm%q(i,j,k,liq_wat) - endif -#else - else if ( Atm%pt(i,j,k) < 233.16 ) then ! < -40C all ice_wat - Atm%q(i,j,k,liq_wat) = 0. - Atm%q(i,j,k,ice_wat) = qn1(i,k) - else - if ( k.eq.1 ) then ! between [-40,0]: linear interpolation - Atm%q(i,j,k,liq_wat) = qn1(i,k)*((Atm%pt(i,j,k)-233.16)/40.) + else if ( Atm%pt(i,j,k) < 258.16 ) then ! < -15C all ice_wat + Atm%q(i,j,k,liq_wat) = 0. + Atm%q(i,j,k,ice_wat) = qn1(i,k) + else ! between -15~0C: linear interpolation + Atm%q(i,j,k,liq_wat) = qn1(i,k)*((Atm%pt(i,j,k)-258.16)/15.) Atm%q(i,j,k,ice_wat) = qn1(i,k) - Atm%q(i,j,k,liq_wat) + endif +#else + else if ( Atm%pt(i,j,k) < 233.16 ) then ! < -40C all ice_wat + Atm%q(i,j,k,liq_wat) = 0. + Atm%q(i,j,k,ice_wat) = qn1(i,k) else - if (Atm%pt(i,j,k)<258.16 .and. Atm%q(i,j,k-1,ice_wat)>1.e-5 ) then - Atm%q(i,j,k,liq_wat) = 0. - Atm%q(i,j,k,ice_wat) = qn1(i,k) - else ! between [-40,0]: linear interpolation - Atm%q(i,j,k,liq_wat) = qn1(i,k)*((Atm%pt(i,j,k)-233.16)/40.) - Atm%q(i,j,k,ice_wat) = qn1(i,k) - Atm%q(i,j,k,liq_wat) - endif + if ( k.eq.1 ) then ! between [-40,0]: linear interpolation + Atm%q(i,j,k,liq_wat) = qn1(i,k)*((Atm%pt(i,j,k)-233.16)/40.) + Atm%q(i,j,k,ice_wat) = qn1(i,k) - Atm%q(i,j,k,liq_wat) + else + if (Atm%pt(i,j,k)<258.16 .and. Atm%q(i,j,k-1,ice_wat)>1.e-5 ) then + Atm%q(i,j,k,liq_wat) = 0. + Atm%q(i,j,k,ice_wat) = qn1(i,k) + else ! between [-40,0]: linear interpolation + Atm%q(i,j,k,liq_wat) = qn1(i,k)*((Atm%pt(i,j,k)-233.16)/40.) + Atm%q(i,j,k,ice_wat) = qn1(i,k) - Atm%q(i,j,k,liq_wat) + endif + endif endif - endif #endif - call mp_auto_conversion(Atm%q(i,j,k,liq_wat), Atm%q(i,j,k,rainwat), & - Atm%q(i,j,k,ice_wat), Atm%q(i,j,k,snowwat) ) + if (Atm%flagstruct%nwat .eq. 6 ) then + Atm%q(i,j,k,rainwat) = 0. + Atm%q(i,j,k,snowwat) = 0. + Atm%q(i,j,k,graupel) = 0. + call mp_auto_conversion(Atm%q(i,j,k,liq_wat), Atm%q(i,j,k,rainwat), & + Atm%q(i,j,k,ice_wat), Atm%q(i,j,k,snowwat) ) + endif + enddo enddo - enddo - endif + endif + endif ! data source /= FV3GFS GAUSSIAN NEMSIO FILE !------------------------------------------------------------- -! map omega +! map omega or w !------- ------------------------------------------------------ - if ( .not. Atm%flagstruct%hydrostatic ) then + if ( (.not. Atm%flagstruct%hydrostatic) .and. (.not. Atm%flagstruct%ncep_ic) ) then do k=1,km do i=is,ie qp(i,k) = omga(i,j,k) enddo enddo call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, -1, 4, Atm%ptop) + if (trim(source) == source_fv3gfs) then do k=1,npz do i=is,ie - atm%w(i,j,k) = qn1(i,k)/atm%delp(i,j,k)*atm%delz(i,j,k) + atm%w(i,j,k) = qn1(i,k) enddo enddo - endif - -5000 continue - -! Add some diagnostics: - call p_maxmin('PS_model (mb)', Atm%ps(is:ie,js:je), is, ie, js, je, 1, 0.01) - call p_maxmin('PT_model', Atm%pt(is:ie,js:je,1:npz), is, ie, js, je, npz, 1.) - do j=js,je - do i=is,ie - wk(i,j) = Atm%phis(i,j)/grav - zh(i,j,km+1) - enddo - enddo - call pmaxmn('ZS_diff (m)', wk, is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) - call pmaxmn('Z500 (m)', z500, is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) - - do j=js,je - do i=is,ie - wk(i,j) = Atm%ps(i,j) - psc(i,j) - enddo - enddo - call pmaxmn('PS_diff (mb)', wk, is, ie, js, je, 1, 0.01, Atm%gridstruct%area_64, Atm%domain) - - if (is_master()) write(*,*) 'done remap_scalar_nggps' - - end subroutine remap_scalar_nggps - - subroutine remap_scalar_ec(Atm, km, npz, ncnst, ak0, bk0, psc, qa, wc, zh) - type(fv_atmos_type), intent(inout) :: Atm - integer, intent(in):: km, npz, ncnst - real, intent(in):: ak0(km+1), bk0(km+1) - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: psc - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km):: wc - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km,ncnst):: qa - real, intent(in), dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,km+1):: zh -! local: - real, dimension(Atm%bd%is:Atm%bd%ie,km+1):: pe0 - real, dimension(Atm%bd%is:Atm%bd%ie,npz):: qn1, dp2 - real, dimension(Atm%bd%is:Atm%bd%ie,npz+1):: pe1 - real qp(Atm%bd%is:Atm%bd%ie,km) - real wk(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je) -!!! High-precision - real(kind=R_GRID), dimension(Atm%bd%is:Atm%bd%ie,npz+1):: pn1 - real(kind=R_GRID):: gz_fv(npz+1) - real(kind=R_GRID), dimension(2*km+1):: gz, pn - real(kind=R_GRID), dimension(Atm%bd%is:Atm%bd%ie,km+1):: pn0 - real(kind=R_GRID):: pst - real, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: z500 -!!! High-precision - integer i,j,k,l,m,k2, iq - integer sphum, o3mr, liq_wat, ice_wat, rainwat, snowwat, graupel - integer :: is, ie, js, je - - is = Atm%bd%is - ie = Atm%bd%ie - js = Atm%bd%js - je = Atm%bd%je - - sphum = get_tracer_index(MODEL_ATMOS, 'sphum') - liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') - - if ( Atm%flagstruct%nwat .eq. 6 ) then - ice_wat = get_tracer_index(MODEL_ATMOS, 'ice_wat') - rainwat = get_tracer_index(MODEL_ATMOS, 'rainwat') - snowwat = get_tracer_index(MODEL_ATMOS, 'snowwat') - graupel = get_tracer_index(MODEL_ATMOS, 'graupel') - endif - - k2 = max(10, km/2) - - if (mpp_pe()==1) then - print *, 'In remap_scalar_ec:' - print *, 'ncnst = ', ncnst - print *, 'sphum = ', sphum - print *, 'liq_wat = ', liq_wat - if ( Atm%flagstruct%nwat .eq. 6 ) then - print *, 'rainwat = ', rainwat - print *, 'ice_wat = ', ice_wat - print *, 'snowwat = ', snowwat - print *, 'graupel = ', graupel - endif - endif - -!$OMP parallel do default(none) shared(sphum,ncnst,npz,is,ie,js,je,km,k2,ak0,bk0,psc,zh,qa,wc,Atm,z500) & -!$OMP private(l,m,pst,pn,gz,pe0,pn0,pe1,pn1,dp2,qp,qn1,gz_fv) - do 5000 j=js,je - do k=1,km+1 - do i=is,ie - pe0(i,k) = ak0(k) + bk0(k)*psc(i,j) - pn0(i,k) = log(pe0(i,k)) - enddo - enddo - - do i=is,ie - do k=1,km+1 - pn(k) = pn0(i,k) - gz(k) = zh(i,j,k)*grav - enddo -! Use log-p for interpolation/extrapolation -! mirror image method: - do k=km+2, km+k2 - l = 2*(km+1) - k - gz(k) = 2.*gz(km+1) - gz(l) - pn(k) = 2.*pn(km+1) - pn(l) - enddo - - do k=km+k2-1, 2, -1 - if( Atm%phis(i,j).le.gz(k) .and. Atm%phis(i,j).ge.gz(k+1) ) then - pst = pn(k) + (pn(k+1)-pn(k))*(gz(k)-Atm%phis(i,j))/(gz(k)-gz(k+1)) - go to 123 - endif - enddo -123 Atm%ps(i,j) = exp(pst) - -! ------------------ -! Find 500-mb height -! ------------------ - pst = log(500.e2) - do k=km+k2-1, 2, -1 - if( pst.le.pn(k+1) .and. pst.ge.pn(k) ) then - z500(i,j) = (gz(k+1) + (gz(k)-gz(k+1))*(pn(k+1)-pst)/(pn(k+1)-pn(k)))/grav - go to 125 - endif - enddo -125 continue - - enddo ! i-loop - - do i=is,ie - pe1(i,1) = Atm%ak(1) - pn1(i,1) = log(pe1(i,1)) - enddo - do k=2,npz+1 - do i=is,ie - pe1(i,k) = Atm%ak(k) + Atm%bk(k)*Atm%ps(i,j) - pn1(i,k) = log(pe1(i,k)) - enddo - enddo - -! * Compute delp - do k=1,npz - do i=is,ie - dp2(i,k) = pe1(i,k+1) - pe1(i,k) - Atm%delp(i,j,k) = dp2(i,k) - enddo - enddo - -! map shpum, liq_wat, ice_wat, rainwat, snowwat tracers - do iq=1,ncnst - do k=1,km - do i=is,ie - qp(i,k) = qa(i,j,k,iq) - enddo - enddo - call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, 0, 8, Atm%ptop) - if ( iq==1 ) then - call fillq(ie-is+1, npz, 1, qn1, dp2) - else - call fillz(ie-is+1, npz, 1, qn1, dp2) - endif -! The HiRam step of blending model sphum with NCEP data is obsolete because nggps is always cold starting... - do k=1,npz - do i=is,ie - Atm%q(i,j,k,iq) = qn1(i,k) - enddo - enddo - enddo -!--------------------------------------------------- -! Retrive temperature using EC geopotential height -!--------------------------------------------------- - do i=is,ie -! Make sure FV3 top is lower than GFS; can not do extrapolation above the top at this point - if ( pn1(i,1) .lt. pn0(i,1) ) then - call mpp_error(FATAL,'FV3 top higher than ECMWF') - endif - - do k=1,km+1 - pn(k) = pn0(i,k) - gz(k) = zh(i,j,k)*grav - enddo -!------------------------------------------------- - do k=km+2, km+k2 - l = 2*(km+1) - k - gz(k) = 2.*gz(km+1) - gz(l) - pn(k) = 2.*pn(km+1) - pn(l) - enddo -!------------------------------------------------- - gz_fv(npz+1) = Atm%phis(i,j) - - m = 1 - do k=1,npz -! Searching using FV3 log(pe): pn1 -#ifdef USE_ISOTHERMO - do l=m,km - if ( (pn1(i,k).le.pn(l+1)) .and. (pn1(i,k).ge.pn(l)) ) then - gz_fv(k) = gz(l) + (gz(l+1)-gz(l))*(pn1(i,k)-pn(l))/(pn(l+1)-pn(l)) - goto 555 - elseif ( pn1(i,k) .gt. pn(km+1) ) then -! Isothermal under ground; linear in log-p extra-polation - gz_fv(k) = gz(km+1) + (gz_fv(npz+1)-gz(km+1))*(pn1(i,k)-pn(km+1))/(pn1(i,npz+1)-pn(km+1)) - goto 555 - endif - enddo -#else - do l=m,km+k2 - if ( (pn1(i,k).le.pn(l+1)) .and. (pn1(i,k).ge.pn(l)) ) then - gz_fv(k) = gz(l) + (gz(l+1)-gz(l))*(pn1(i,k)-pn(l))/(pn(l+1)-pn(l)) - goto 555 - endif - enddo -#endif -555 m = l - enddo - -! Compute true temperature using hydrostatic balance - do k=1,npz -! qc = 1.-(Atm%q(i,j,k,liq_wat)+Atm%q(i,j,k,rainwat)+Atm%q(i,j,k,ice_wat)+Atm%q(i,j,k,snowwat)) -! Atm%pt(i,j,k) = (gz_fv(k)-gz_fv(k+1))*qc/( rdgas*(pn1(i,k+1)-pn1(i,k))*(1.+zvir*Atm%q(i,j,k,sphum)) ) - Atm%pt(i,j,k) = (gz_fv(k)-gz_fv(k+1))/( rdgas*(pn1(i,k+1)-pn1(i,k))*(1.+zvir*Atm%q(i,j,k,sphum)) ) - enddo - if ( .not. Atm%flagstruct%hydrostatic ) then - do k=1,npz - Atm%delz(i,j,k) = (gz_fv(k+1) - gz_fv(k)) / grav - enddo - endif - - enddo ! i-loop - -!------------------------------------------------------------- -! map omega -!------- ------------------------------------------------------ - if ( .not. Atm%flagstruct%hydrostatic ) then - do k=1,km - do i=is,ie - qp(i,k) = wc(i,j,k) - enddo - enddo - call mappm(km, pe0, qp, npz, pe1, qn1, is,ie, -1, 4, Atm%ptop) + else do k=1,npz do i=is,ie atm%w(i,j,k) = qn1(i,k)/atm%delp(i,j,k)*atm%delz(i,j,k) enddo enddo + endif endif 5000 continue ! Add some diagnostics: + if (.not. Atm%flagstruct%hydrostatic) call p_maxmin('delz_model', Atm%delz, is, ie, js, je, npz, 1.) + call p_maxmin('sphum_model', Atm%q(is:ie,js:je,1:npz,sphum), is, ie, js, je, npz, 1.) + call p_maxmin('liq_wat_model', Atm%q(is:ie,js:je,1:npz,liq_wat), is, ie, js, je, npz, 1.) + if (ice_wat .gt. 0) call p_maxmin('ice_wat_model', Atm%q(is:ie,js:je,1:npz,ice_wat), is, ie, js, je, npz, 1.) call p_maxmin('PS_model (mb)', Atm%ps(is:ie,js:je), is, ie, js, je, 1, 0.01) call p_maxmin('PT_model', Atm%pt(is:ie,js:je,1:npz), is, ie, js, je, npz, 1.) call pmaxmn('ZS_model', Atm%phis(is:ie,js:je)/grav, is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) - call pmaxmn('ZS_EC', zh(is:ie,js:je,km+1), is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) + call pmaxmn('ZS_data', zh(is:ie,js:je,km+1), is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) do j=js,je do i=is,ie wk(i,j) = Atm%phis(i,j)/grav - zh(i,j,km+1) @@ -2844,7 +2572,13 @@ subroutine remap_scalar_ec(Atm, km, npz, ncnst, ak0, bk0, psc, qa, wc, zh) enddo enddo call pmaxmn('ZS_diff (m)', wk, is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) - call pmaxmn('Z500 (m)', z500, is, ie, js, je, 1, 1., Atm%gridstruct%area_64, Atm%domain) + + if (.not.Atm%gridstruct%bounded_domain) then + call prt_gb_nh_sh('DATA_IC Z500', is,ie, js,je, z500, Atm%gridstruct%area_64(is:ie,js:je), Atm%gridstruct%agrid_64(is:ie,js:je,2)) + if ( .not. Atm%flagstruct%hydrostatic ) & + call prt_height('fv3_IC Z500', is,ie, js,je, 3, npz, 500.E2, Atm%phis, Atm%delz, Atm%peln, & + Atm%gridstruct%area_64(is:ie,js:je), Atm%gridstruct%agrid_64(is:ie,js:je,2)) + endif do j=js,je do i=is,ie @@ -2853,7 +2587,9 @@ subroutine remap_scalar_ec(Atm, km, npz, ncnst, ak0, bk0, psc, qa, wc, zh) enddo call pmaxmn('PS_diff (mb)', wk, is, ie, js, je, 1, 0.01, Atm%gridstruct%area_64, Atm%domain) - end subroutine remap_scalar_ec + if (is_master()) write(*,*) 'done remap_scalar' + + end subroutine remap_scalar subroutine remap_scalar_single(Atm, km, npz, ak0, bk0, psc, qa, zh ,iq) type(fv_atmos_type), intent(inout) :: Atm @@ -2917,7 +2653,7 @@ subroutine remap_scalar_single(Atm, km, npz, ak0, bk0, psc, qa, zh ,iq) enddo 123 ps_temp(i,j) = exp(pst) enddo ! i-loop - + do i=is,ie pe1(i,1) = Atm%ak(1) pn1(i,1) = log(pe1(i,1)) @@ -2957,7 +2693,7 @@ subroutine remap_scalar_single(Atm, km, npz, ak0, bk0, psc, qa, zh ,iq) 5000 continue call p_maxmin('o3mr remap', Atm%q(is:ie,js:je,1:npz,iq), is, ie, js, je, npz, 1.) - + deallocate(ps_temp) end subroutine remap_scalar_single @@ -3007,7 +2743,8 @@ subroutine remap_dwinds(km, npz, ak0, bk0, psc, ud, vd, Atm) jsd = Atm%bd%jsd jed = Atm%bd%jed - if (Atm%neststruct%nested) then +!Not sure what this is for + if (Atm%gridstruct%bounded_domain) then do j=jsd,jed do i=isd,ied psd(i,j) = Atm%ps(i,j) @@ -3093,6 +2830,7 @@ subroutine remap_winds(im, jm, km, npz, ak0, bk0, psc, ua, va, Atm) integer :: is, ie, js, je integer :: isd, ied, jsd, jed + integer :: ng is = Atm%bd%is ie = Atm%bd%ie @@ -3102,6 +2840,7 @@ subroutine remap_winds(im, jm, km, npz, ak0, bk0, psc, ua, va, Atm) ied = Atm%bd%ied jsd = Atm%bd%jsd jed = Atm%bd%jed + ng = Atm%bd%ng do 5000 j=js,je @@ -3166,7 +2905,7 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 real, pointer, dimension(:,:,:) :: agrid ! local: - real, dimension(Atm%bd%isd:Atm%bd%ied,Atm%bd%jsd:Atm%bd%jed,npz):: ut, vt ! winds + real, dimension(Atm%bd%isd:Atm%bd%ied,Atm%bd%jsd:Atm%bd%jed,npz):: ut, vt ! winds real, dimension(Atm%bd%is:Atm%bd%ie,km):: up, vp, tp real, dimension(Atm%bd%is:Atm%bd%ie,km+1):: pe0, pn0 real pt0(km), gz(km+1), pk0(km+1) @@ -3181,7 +2920,7 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 ! integer sphum, liq_wat, ice_wat, cld_amt integer sphum integer :: is, ie, js, je - integer :: isd, ied, jsd, jed + integer :: isd, ied, jsd, jed, ng is = Atm%bd%is ie = Atm%bd%ie @@ -3191,6 +2930,7 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 ied = Atm%bd%ied jsd = Atm%bd%jsd jed = Atm%bd%jed + ng = Atm%bd%ng !!NOTE: Only Atm is used in this routine. agrid => Atm%gridstruct%agrid @@ -3201,7 +2941,7 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 call mpp_error(FATAL,'SPHUM must be 1st tracer') endif - pk0(1) = ak0(1)**kappa + pk0(1) = ak0(1)**kappa do i=1,im-1 rdlon(i) = 1. / (lon(i+1) - lon(i)) @@ -3309,9 +3049,9 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 #else ! * Adjust interpolated ps to model terrain - gz(km+1) = gzc + gz(km+1) = gzc do k=km,1,-1 - gz(k) = gz(k+1) + rdgas*tp(i,k)*(pn0(i,k+1)-pn0(i,k)) + gz(k) = gz(k+1) + rdgas*tp(i,k)*(pn0(i,k+1)-pn0(i,k)) enddo ! Only lowest layer potential temp is needed pt0(km) = tp(i,km)/(pk0(km+1)-pk0(km))*(kappa*(pn0(i,km+1)-pn0(i,km))) @@ -3331,7 +3071,7 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 123 Atm%ps(i,j) = pst**(1./kappa) #endif enddo !i-loop - + ! * Compute delp from ps do i=is,ie @@ -3350,7 +3090,7 @@ subroutine remap_xyz( im, jbeg, jend, jm, km, npz, nq, ncnst, lon, lat, ak0, bk0 Atm%delp(i,j,k) = pe1(i,k+1) - pe1(i,k) enddo enddo - + ! Use kord=9 for winds; kord=11 for tracers !------ ! map u @@ -3456,7 +3196,7 @@ subroutine cubed_a2d( npx, npy, npz, ua, va, u, v, gridstruct, fv_domain, bd ) edge_vect_e => gridstruct%edge_vect_e edge_vect_s => gridstruct%edge_vect_s edge_vect_n => gridstruct%edge_vect_n - + ew => gridstruct%ew es => gridstruct%es @@ -3495,7 +3235,7 @@ subroutine cubed_a2d( npx, npy, npz, ua, va, u, v, gridstruct, fv_domain, bd ) enddo ! --- E_W edges (for v-wind): - if (.not. gridstruct%nested) then + if (.not. gridstruct%bounded_domain) then if ( is==1) then i = 1 do j=js,je @@ -3577,7 +3317,7 @@ subroutine cubed_a2d( npx, npy, npz, ua, va, u, v, gridstruct, fv_domain, bd ) enddo endif - endif ! .not. nested + endif ! .not. bounded_domain do j=js,je+1 do i=is,ie @@ -3593,7 +3333,7 @@ subroutine cubed_a2d( npx, npy, npz, ua, va, u, v, gridstruct, fv_domain, bd ) ve(3,i,j)*ew(3,i,j,2) enddo enddo - + enddo ! k-loop end subroutine cubed_a2d @@ -3821,7 +3561,7 @@ subroutine compute_zh(im, jm, levp, ak0, bk0, ps, zs, t, q, nq, zh ) real, dimension(im,levp+1):: pe0, pn0 ! real:: qc integer:: i,j,k - + !$OMP parallel do default(none) shared(im,jm,levp,ak0,bk0,zs,ps,t,q,zh) & !$OMP private(pe0,pn0) do j = 1, jm @@ -3846,8 +3586,6 @@ subroutine compute_zh(im, jm, levp, ak0, bk0, ps, zs, t, q, nq, zh ) enddo enddo - !if(is_master()) call pmaxmin( 'zh levp+1', zh(:,:,levp+1), im, jm, 1.) - end subroutine compute_zh subroutine get_staggered_grid( is, ie, js, je, isd, ied, jsd, jed, pt_b, pt_c, pt_d) @@ -3879,5 +3617,27 @@ subroutine get_staggered_grid( is, ie, js, je, isd, ied, jsd, jed, pt_b, pt_c, p end subroutine get_staggered_grid + subroutine get_data_source(source,regional) +! +! This routine extracts the data source information if it is present in the datafile. +! + character (len = 80) :: source + integer :: ncids,sourceLength + logical :: lstatus,regional +! +! Use the fms call here so we can actually get the return code value. +! + if (regional) then + lstatus = get_global_att_value('INPUT/gfs_data.nc',"source", source) + else + lstatus = get_global_att_value('INPUT/gfs_data.tile1.nc',"source", source) + endif + if (.not. lstatus) then + if (mpp_pe() == 0) write(0,*) 'INPUT source not found ',lstatus,' set source=No Source Attribute' + source='No Source Attribute' + endif + end subroutine get_data_source + + end module external_ic_mod diff --git a/tools/external_sst.F90 b/tools/external_sst.F90 index d9dd496d3..96b531928 100644 --- a/tools/external_sst.F90 +++ b/tools/external_sst.F90 @@ -34,8 +34,4 @@ module external_sst_mod public i_sst, j_sst, sst_ncep, sst_anom, forecast_mode, use_ncep_sst -!---- version number ----- -character(len=128) :: version = '$Id$' -character(len=128) :: tagname = '$Name$' - end module external_sst_mod diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index e18140cd0..68c1621b3 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -21,16 +21,17 @@ module fv_diagnostics_mod use constants_mod, only: grav, rdgas, rvgas, pi=>pi_8, radius, kappa, WTMAIR, WTMCO2, & - omega, hlv, cp_air, cp_vapor - use fms_io_mod, only: set_domain, nullify_domain + omega, hlv, cp_air, cp_vapor, TFREEZE + use fms_mod, only: write_version_number + use fms_io_mod, only: set_domain, nullify_domain, write_version_number use time_manager_mod, only: time_type, get_date, get_time - use mpp_domains_mod, only: domain2d, mpp_update_domains, DGRID_NE + use mpp_domains_mod, only: domain2d, mpp_update_domains, DGRID_NE, NORTH, EAST use diag_manager_mod, only: diag_axis_init, register_diag_field, & - register_static_field, send_data, diag_grid_init - use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_diag_type, fv_grid_bounds_type, & + register_static_field, send_data, diag_grid_init, & + diag_field_add_attribute + use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_diag_type, fv_grid_bounds_type, & R_GRID - !!! CLEANUP needs rem oval? - use fv_mapz_mod, only: E_Flux, moist_cv + use fv_mapz_mod, only: E_Flux, moist_cv, moist_cp use fv_mp_mod, only: mp_reduce_sum, mp_reduce_min, mp_reduce_max, is_master use fv_eta_mod, only: get_eta_level, gw_1d use fv_grid_utils_mod, only: g_sum @@ -40,16 +41,30 @@ module fv_diagnostics_mod use tracer_manager_mod, only: get_tracer_names, get_number_tracers, get_tracer_index use field_manager_mod, only: MODEL_ATMOS - use mpp_mod, only: mpp_error, FATAL, stdlog, mpp_pe, mpp_root_pe, mpp_sum, mpp_max + use mpp_mod, only: mpp_error, FATAL, stdlog, mpp_pe, mpp_root_pe, mpp_sum, mpp_max, NOTE, input_nml_file + use mpp_io_mod, only: mpp_flush use sat_vapor_pres_mod, only: compute_qs, lookup_es - use fv_arrays_mod, only: max_step + use fv_arrays_mod, only: max_step + use gfdl_cloud_microphys_mod, only: wqs1, qsmith_init + + use column_diagnostics_mod, only: column_diagnostics_init, & + initialize_diagnostic_columns, & + column_diagnostics_header, & + close_column_diagnostics_units + implicit none private + interface range_check + module procedure range_check_3d + module procedure range_check_2d + end interface range_check real, parameter:: missing_value = -1.e10 + real, parameter:: missing_value2 = -1.e3 ! for variables with many missing values + real, parameter:: missing_value3 = 1.e10 ! for variables where we look for smallest values real :: ginv real :: pk0 logical master @@ -63,9 +78,9 @@ module fv_diagnostics_mod logical :: module_is_initialized=.false. logical :: prt_minmax =.false. logical :: m_calendar - integer sphum, liq_wat, ice_wat ! GFDL physics - integer rainwat, snowwat, graupel - integer :: istep + integer sphum, liq_wat, ice_wat, cld_amt ! GFDL physics + integer rainwat, snowwat, graupel, o3mr + integer :: istep, mp_top real :: ptop real, parameter :: rad2deg = 180./pi @@ -77,15 +92,50 @@ module fv_diagnostics_mod public :: fv_diag_init, fv_time, fv_diag, prt_mxm, prt_maxmin, range_check!, id_divg, id_te public :: prt_mass, prt_minmax, ppme, fv_diag_init_gn, z_sum, sphum_ll_fix, eqv_pot, qcly0, gn - public :: get_height_given_pressure, interpolate_vertical, rh_calc, get_height_field - - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' + public :: prt_height, prt_gb_nh_sh, interpolate_vertical, rh_calc, get_height_field + public :: get_height_given_pressure, get_vorticity +#ifdef FEWER_PLEVS + integer, parameter :: nplev = 10 ! 31 ! lmh +#else integer, parameter :: nplev = 31 +#endif integer :: levs(nplev) + integer :: k100, k200, k500 + + integer, parameter :: MAX_DIAG_COLUMN = 100 + logical, allocatable, dimension(:,:) :: do_debug_diag_column + integer, allocatable, dimension(:) :: diag_debug_units, diag_debug_i, diag_debug_j + real, allocatable, dimension(:) :: diag_debug_lon, diag_debug_lat + character(16), dimension(MAX_DIAG_COLUMN) :: diag_debug_names + real, dimension(MAX_DIAG_COLUMN) :: diag_debug_lon_in, diag_debug_lat_in + + logical, allocatable, dimension(:,:) :: do_sonde_diag_column + integer, allocatable, dimension(:) :: diag_sonde_units, diag_sonde_i, diag_sonde_j + real, allocatable, dimension(:) :: diag_sonde_lon, diag_sonde_lat + character(16), dimension(MAX_DIAG_COLUMN) :: diag_sonde_names + real, dimension(MAX_DIAG_COLUMN) :: diag_sonde_lon_in, diag_sonde_lat_in + + logical :: do_diag_debug = .false. + logical :: do_diag_sonde = .false. + logical :: prt_sounding = .false. + integer :: sound_freq = 3 + integer :: num_diag_debug = 0 + integer :: num_diag_sonde = 0 + character(100) :: runname = 'test' + integer :: yr_init, mo_init, dy_init, hr_init, mn_init, sec_init + + real :: vrange(2), vsrange(2), wrange(2), trange(2), slprange(2), rhrange(2) + + + + namelist /fv_diag_column_nml/ do_diag_debug, do_diag_sonde, sound_freq, & + diag_debug_lon_in, diag_debug_lat_in, diag_debug_names, & + diag_sonde_lon_in, diag_sonde_lat_in, diag_sonde_names, runname + +! version number of this module +! Include variable "version" to be written to log file. +#include contains @@ -98,7 +148,6 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) real, allocatable :: grid_xt(:), grid_yt(:), grid_xe(:), grid_ye(:), grid_xn(:), grid_yn(:) real, allocatable :: grid_x(:), grid_y(:) - real :: vrange(2), vsrange(2), wrange(2), trange(2), slprange(2), rhrange(2) real, allocatable :: a3(:,:,:) real :: pfull(npz) real :: hyam(npz), hybm(npz) @@ -107,7 +156,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) integer :: id_bk, id_pk, id_area, id_lon, id_lat, id_lont, id_latt, id_phalf, id_pfull integer :: id_hyam, id_hybm integer :: id_plev - integer :: i, j, k, n, ntileMe, id_xt, id_yt, id_x, id_y, id_xe, id_ye, id_xn, id_yn + integer :: i, j, k, m, n, ntileMe, id_xt, id_yt, id_x, id_y, id_xe, id_ye, id_xn, id_yn integer :: isc, iec, jsc, jec logical :: used @@ -120,6 +169,11 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) integer :: ncnst integer :: axe2(3) + character(len=64) :: errmsg + logical :: exists + integer :: nlunit, ios + + call write_version_number ( 'FV_DIAGNOSTICS_MOD', version ) idiag => Atm(1)%idiag @@ -140,6 +194,8 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat') snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') graupel = get_tracer_index (MODEL_ATMOS, 'graupel') + o3mr = get_tracer_index (MODEL_ATMOS, 'o3mr') + cld_amt = get_tracer_index (MODEL_ATMOS, 'cld_amt') ! valid range for some fields @@ -153,7 +209,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) #ifdef HIWPP trange = (/ 5., 350. /) ! temperature #else - trange = (/ 100., 350. /) ! temperature + trange = (/ 100., 400. /) ! temperature #endif slprange = (/800., 1200./) ! sea-level-pressure @@ -163,6 +219,15 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) allocate ( idiag%phalf(npz+1) ) call get_eta_level(Atm(1)%npz, p_ref, pfull, idiag%phalf, Atm(1)%ak, Atm(1)%bk, 0.01) + mp_top = 1 + do k=1,npz + if ( pfull(k) > 30.e2 ) then + mp_top = k + exit + endif + enddo + if ( is_master() ) write(*,*) 'mp_top=', mp_top, 'pfull=', pfull(mp_top) + ! allocate(grid_xt(npx-1), grid_yt(npy-1), grid_xe(npx), grid_ye(npy-1), grid_xn(npx-1), grid_yn(npy)) allocate(grid_xt(npx-1), grid_yt(npy-1)) grid_xt = (/ (i, i=1,npx-1) /) @@ -209,9 +274,9 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) ! set_name=trim(field), Domain2=Domain, tile_count=n) id_x = diag_axis_init('grid_x',grid_x,'degrees_E','x','cell corner longitude', & - set_name=trim(field),Domain2=Atm(n)%Domain, tile_count=n) + set_name=trim(field), Domain2=Atm(n)%Domain, tile_count=n, domain_position=EAST) id_y = diag_axis_init('grid_y',grid_y,'degrees_N','y','cell corner latitude', & - set_name=trim(field), Domain2=Atm(n)%Domain, tile_count=n) + set_name=trim(field), Domain2=Atm(n)%Domain, tile_count=n, domain_position=NORTH) ! end do ! deallocate(grid_xt, grid_yt, grid_xe, grid_ye, grid_xn, grid_yn) @@ -263,7 +328,18 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) ! Selected pressure levels ! SJL note: 31 is enough here; if you need more levels you should do it OFF line ! do not add more to prevent the model from slowing down too much. +#ifdef FEWER_PLEVS + levs = (/50,100,200,250,300,500,750,850,925,1000/) ! lmh mini-levs for MJO simulations + k100 = 2 + k200 = 3 + k500 = 6 +#else levs = (/1,2,3,5,7,10,20,30,50,70,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,925,950,975,1000/) + k100 = 11 + k200 = 13 + k500 = 19 +#endif + ! id_plev = diag_axis_init('plev', levs(:)*1.0, 'mb', 'z', & 'actual pressure level', direction=-1, set_name="dynamics") @@ -287,9 +363,12 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) 'latitude', 'degrees_N' ) id_area = register_static_field ( trim(field), 'area', axes(1:2), & 'cell area', 'm**2' ) + if (id_area > 0) then + call diag_field_add_attribute (id_area, 'cell_methods', 'area: sum') + endif #ifndef DYNAMICS_ZS idiag%id_zsurf = register_static_field ( trim(field), 'zsurf', axes(1:2), & - 'surface height', 'm' ) + 'surface height', 'm', interp_method='conserve_order1' ) #endif idiag%id_zs = register_static_field ( trim(field), 'zs', axes(1:2), & 'Original Mean Terrain', 'm' ) @@ -397,8 +476,8 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) allocate(idiag%id_tracer_dvmr(ncnst)) allocate(idiag%w_mr(ncnst)) idiag%id_tracer(:) = 0 - idiag%id_tracer_dmmr(:) = 0 - idiag%id_tracer_dvmr(:) = 0 + idiag%id_tracer_dmmr(:) = 0 + idiag%id_tracer_dvmr(:) = 0 idiag%w_mr(:) = 0.E0 allocate(idiag%id_u(nplev)) @@ -420,13 +499,13 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) #ifdef DYNAMICS_ZS idiag%id_zsurf = register_diag_field ( trim(field), 'zsurf', axes(1:2), Time, & - 'surface height', 'm') + 'surface height', 'm', interp_method='conserve_order1') #endif !------------------- ! Surface pressure !------------------- idiag%id_ps = register_diag_field ( trim(field), 'ps', axes(1:2), Time, & - 'surface pressure', 'Pa', missing_value=missing_value ) + 'surface pressure', 'Pa', missing_value=missing_value, range=(/40000.0, 110000.0/)) !------------------- ! Mountain torque @@ -441,6 +520,32 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_amdt = register_diag_field ( trim(field), 'amdt', axes(1:2), Time, & 'angular momentum error', 'kg*m^2/s^2', missing_value=missing_value ) +!------------------- +!! 3D Tendency terms from physics +!------------------- + if (Atm(n)%flagstruct%write_3d_diags) then + + idiag%id_T_dt_phys = register_diag_field ( trim(field), 'T_dt_phys', axes(1:3), Time, & + 'temperature tendency from physics', 'K/s', missing_value=missing_value ) + if (idiag%id_T_dt_phys > 0) allocate (Atm(n)%phys_diag%phys_t_dt(isc:iec,jsc:jec,npz)) + idiag%id_u_dt_phys = register_diag_field ( trim(field), 'u_dt_phys', axes(1:3), Time, & + 'zonal wind tendency from physics', 'm/s/s', missing_value=missing_value ) + if (idiag%id_u_dt_phys > 0) allocate (Atm(n)%phys_diag%phys_u_dt(isc:iec,jsc:jec,npz)) + idiag%id_v_dt_phys = register_diag_field ( trim(field), 'v_dt_phys', axes(1:3), Time, & + 'meridional wind tendency from physics', 'm/s/s', missing_value=missing_value ) + if (idiag%id_v_dt_phys > 0) allocate (Atm(n)%phys_diag%phys_v_dt(isc:iec,jsc:jec,npz)) + + idiag%id_qv_dt_phys = register_diag_field ( trim(field), 'qv_dt_phys', axes(1:3), Time, & + 'water vapor specific humidity tendency from physics', 'kg/kg/s', missing_value=missing_value ) + if (idiag%id_qv_dt_phys > 0) allocate (Atm(n)%phys_diag%phys_qv_dt(isc:iec,jsc:jec,npz)) + idiag%id_ql_dt_phys = register_diag_field ( trim(field), 'ql_dt_phys', axes(1:3), Time, & + 'total liquid water tendency from physics', 'kg/kg/s', missing_value=missing_value ) + if (idiag%id_ql_dt_phys > 0) allocate (Atm(n)%phys_diag%phys_ql_dt(isc:iec,jsc:jec,npz)) + idiag%id_qi_dt_phys = register_diag_field ( trim(field), 'qi_dt_phys', axes(1:3), Time, & + 'total ice water tendency from physics', 'kg/kg/s', missing_value=missing_value ) + if (idiag%id_qi_dt_phys > 0) allocate (Atm(n)%phys_diag%phys_qi_dt(isc:iec,jsc:jec,npz)) + endif + ! do i=1,nplev write(plev,'(I5)') levs(i) @@ -464,34 +569,38 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) trim(adjustl(plev))//'-mb omega', 'Pa/s', missing_value=missing_value) enddo - idiag%id_u_plev = register_diag_field ( trim(field), 'u_plev', axe2(1:3), Time, & - 'zonal wind', 'm/sec', missing_value=missing_value, range=vrange ) - idiag%id_v_plev = register_diag_field ( trim(field), 'v_plev', axe2(1:3), Time, & - 'meridional wind', 'm/sec', missing_value=missing_value, range=vrange ) - idiag%id_t_plev = register_diag_field ( trim(field), 't_plev', axe2(1:3), Time, & - 'temperature', 'K', missing_value=missing_value, range=trange ) - idiag%id_h_plev = register_diag_field ( trim(field), 'h_plev', axe2(1:3), Time, & - 'height', 'm', missing_value=missing_value ) - idiag%id_q_plev = register_diag_field ( trim(field), 'q_plev', axe2(1:3), Time, & - 'specific humidity', 'kg/kg', missing_value=missing_value ) - idiag%id_omg_plev = register_diag_field ( trim(field), 'omg_plev', axe2(1:3), Time, & - 'omega', 'Pa/s', missing_value=missing_value ) + if (Atm(n)%flagstruct%write_3d_diags) then + idiag%id_u_plev = register_diag_field ( trim(field), 'u_plev', axe2(1:3), Time, & + 'zonal wind', 'm/sec', missing_value=missing_value, range=vrange ) + idiag%id_v_plev = register_diag_field ( trim(field), 'v_plev', axe2(1:3), Time, & + 'meridional wind', 'm/sec', missing_value=missing_value, range=vrange ) + idiag%id_t_plev = register_diag_field ( trim(field), 't_plev', axe2(1:3), Time, & + 'temperature', 'K', missing_value=missing_value, range=trange ) + idiag%id_h_plev = register_diag_field ( trim(field), 'h_plev', axe2(1:3), Time, & + 'height', 'm', missing_value=missing_value ) + idiag%id_q_plev = register_diag_field ( trim(field), 'q_plev', axe2(1:3), Time, & + 'specific humidity', 'kg/kg', missing_value=missing_value ) + idiag%id_omg_plev = register_diag_field ( trim(field), 'omg_plev', axe2(1:3), Time, & + 'omega', 'Pa/s', missing_value=missing_value ) + endif + ! flag for calculation of geopotential if ( all(idiag%id_h(minloc(abs(levs-10)))>0) .or. all(idiag%id_h(minloc(abs(levs-50)))>0) .or. & all(idiag%id_h(minloc(abs(levs-100)))>0) .or. all(idiag%id_h(minloc(abs(levs-200)))>0) .or. & all(idiag%id_h(minloc(abs(levs-250)))>0) .or. all(idiag%id_h(minloc(abs(levs-300)))>0) .or. & all(idiag%id_h(minloc(abs(levs-500)))>0) .or. all(idiag%id_h(minloc(abs(levs-700)))>0) .or. & - all(idiag%id_h(minloc(abs(levs-850)))>0) .or. all(idiag%id_h(minloc(abs(levs-1000)))>0) ) then - idiag%id_hght = 1 + all(idiag%id_h(minloc(abs(levs-850)))>0) .or. all(idiag%id_h(minloc(abs(levs-925)))>0) .or. & + all(idiag%id_h(minloc(abs(levs-1000)))>0) ) then + idiag%id_any_hght = 1 else - idiag%id_hght = 0 + idiag%id_any_hght = 0 endif !----------------------------- ! mean temp between 300-500 mb !----------------------------- idiag%id_tm = register_diag_field (trim(field), 'tm', axes(1:2), Time, & - 'mean 300-500 mb temp', 'K', missing_value=missing_value ) + 'mean 300-500 mb temp', 'K', missing_value=missing_value, range=(/140.0,400.0/) ) !------------------- ! Sea-level-pressure @@ -510,7 +619,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) !------------------------------------------ idiag%id_pmaskv2 = register_diag_field(TRIM(field), 'pmaskv2', axes(1:2), Time,& & 'masking pressure at lowest level', 'mb', missing_value=missing_value) - + !------------------- ! Hurricane scales: !------------------- @@ -535,84 +644,173 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) !------------------- ! A grid winds (lat-lon) !------------------- - idiag%id_ua = register_diag_field ( trim(field), 'ucomp', axes(1:3), Time, & - 'zonal wind', 'm/sec', missing_value=missing_value, range=vrange ) - idiag%id_va = register_diag_field ( trim(field), 'vcomp', axes(1:3), Time, & - 'meridional wind', 'm/sec', missing_value=missing_value, range=vrange) - if ( .not. Atm(n)%flagstruct%hydrostatic ) & - idiag%id_w = register_diag_field ( trim(field), 'w', axes(1:3), Time, & + if (Atm(n)%flagstruct%write_3d_diags) then + idiag%id_ua = register_diag_field ( trim(field), 'ucomp', axes(1:3), Time, & + 'zonal wind', 'm/sec', missing_value=missing_value, range=vrange ) + idiag%id_va = register_diag_field ( trim(field), 'vcomp', axes(1:3), Time, & + 'meridional wind', 'm/sec', missing_value=missing_value, range=vrange) + if ( .not. Atm(n)%flagstruct%hydrostatic ) & + idiag%id_w = register_diag_field ( trim(field), 'w', axes(1:3), Time, & 'vertical wind', 'm/sec', missing_value=missing_value, range=wrange ) - idiag%id_pt = register_diag_field ( trim(field), 'temp', axes(1:3), Time, & - 'temperature', 'K', missing_value=missing_value, range=trange ) - idiag%id_ppt = register_diag_field ( trim(field), 'ppt', axes(1:3), Time, & - 'potential temperature perturbation', 'K', missing_value=missing_value ) - idiag%id_theta_e = register_diag_field ( trim(field), 'theta_e', axes(1:3), Time, & - 'theta_e', 'K', missing_value=missing_value ) - idiag%id_omga = register_diag_field ( trim(field), 'omega', axes(1:3), Time, & - 'omega', 'Pa/s', missing_value=missing_value ) - idiag%id_divg = register_diag_field ( trim(field), 'divg', axes(1:3), Time, & - 'mean divergence', '1/s', missing_value=missing_value ) - - idiag%id_rh = register_diag_field ( trim(field), 'rh', axes(1:3), Time, & - 'Relative Humidity', '%', missing_value=missing_value ) -! 'Relative Humidity', '%', missing_value=missing_value, range=rhrange ) + idiag%id_pt = register_diag_field ( trim(field), 'temp', axes(1:3), Time, & + 'temperature', 'K', missing_value=missing_value, range=trange ) + idiag%id_ppt = register_diag_field ( trim(field), 'ppt', axes(1:3), Time, & + 'potential temperature perturbation', 'K', missing_value=missing_value ) + idiag%id_theta_e = register_diag_field ( trim(field), 'theta_e', axes(1:3), Time, & + 'theta_e', 'K', missing_value=missing_value ) + idiag%id_omga = register_diag_field ( trim(field), 'omega', axes(1:3), Time, & + 'omega', 'Pa/s', missing_value=missing_value ) + idiag%id_divg = register_diag_field ( trim(field), 'divg', axes(1:3), Time, & + 'mean divergence', '1/s', missing_value=missing_value ) + + idiag%id_hght3d = register_diag_field( trim(field), 'hght', axes(1:3), Time, & + 'height', 'm', missing_value=missing_value ) + + idiag%id_rh = register_diag_field ( trim(field), 'rh', axes(1:3), Time, & + 'Relative Humidity', '%', missing_value=missing_value ) + ! 'Relative Humidity', '%', missing_value=missing_value, range=rhrange ) + idiag%id_delp = register_diag_field ( trim(field), 'delp', axes(1:3), Time, & + 'pressure thickness', 'pa', missing_value=missing_value ) + if ( .not. Atm(n)%flagstruct%hydrostatic ) & + idiag%id_delz = register_diag_field ( trim(field), 'delz', axes(1:3), Time, & + 'height thickness', 'm', missing_value=missing_value ) + if( Atm(n)%flagstruct%hydrostatic ) then + idiag%id_pfhy = register_diag_field ( trim(field), 'pfhy', axes(1:3), Time, & + 'hydrostatic pressure', 'pa', missing_value=missing_value ) + else + idiag%id_pfnh = register_diag_field ( trim(field), 'pfnh', axes(1:3), Time, & + 'non-hydrostatic pressure', 'pa', missing_value=missing_value ) + endif + !-------------------- + ! 3D Condensate + !-------------------- + idiag%id_qn = register_diag_field ( trim(field), 'qn', axes(1:3), Time, & + 'cloud condensate', 'kg/m/s^2', missing_value=missing_value ) + idiag%id_qp = register_diag_field ( trim(field), 'qp', axes(1:3), Time, & + 'precip condensate', 'kg/m/s^2', missing_value=missing_value ) + ! fast moist phys tendencies: + idiag%id_mdt = register_diag_field ( trim(field), 'mdt', axes(1:3), Time, & + 'DT/Dt: fast moist phys', 'deg/sec', missing_value=missing_value ) + idiag%id_qdt = register_diag_field ( trim(field), 'qdt', axes(1:3), Time, & + 'Dqv/Dt: fast moist phys', 'kg/kg/sec', missing_value=missing_value ) + idiag%id_dbz = register_diag_field ( trim(field), 'reflectivity', axes(1:3), time, & + 'Stoelinga simulated reflectivity', 'dBz', missing_value=missing_value) + + !-------------------- + ! Relative vorticity + !-------------------- + idiag%id_vort = register_diag_field ( trim(field), 'vort', axes(1:3), Time, & + 'vorticity', '1/s', missing_value=missing_value ) + !-------------------- + ! Potential vorticity + !-------------------- + idiag%id_pv = register_diag_field ( trim(field), 'pv', axes(1:3), Time, & + 'potential vorticity', '1/s', missing_value=missing_value ) + + ! ------------------- + ! Vertical flux correlation terms (good for averages) + ! ------------------- + idiag%id_uw = register_diag_field ( trim(field), 'uw', axes(1:3), Time, & + 'vertical zonal momentum flux', 'N/m**2', missing_value=missing_value ) + idiag%id_vw = register_diag_field ( trim(field), 'vw', axes(1:3), Time, & + 'vertical meridional momentum flux', 'N/m**', missing_value=missing_value ) + idiag%id_hw = register_diag_field ( trim(field), 'hw', axes(1:3), Time, & + 'vertical heat flux', 'W/m**2', missing_value=missing_value ) + idiag%id_qvw = register_diag_field ( trim(field), 'qvw', axes(1:3), Time, & + 'vertical water vapor flux', 'kg/m**2/s', missing_value=missing_value ) + idiag%id_qlw = register_diag_field ( trim(field), 'qlw', axes(1:3), Time, & + 'vertical liquid water flux', 'kg/m**2/s', missing_value=missing_value ) + idiag%id_qiw = register_diag_field ( trim(field), 'qiw', axes(1:3), Time, & + 'vertical ice water flux', 'kg/m**2/s', missing_value=missing_value ) + idiag%id_o3w = register_diag_field ( trim(field), 'o3w', axes(1:3), Time, & + 'vertical ozone flux', 'kg/m**2/s', missing_value=missing_value ) + +!-------------------- +! 3D flux terms +!-------------------- + idiag%id_uq = register_diag_field ( trim(field), 'uq', axes(1:3), Time, & + 'zonal moisture flux', 'Kg/Kg*m/sec', missing_value=missing_value ) + idiag%id_vq = register_diag_field ( trim(field), 'vq', axes(1:3), Time, & + 'meridional moisture flux', 'Kg/Kg*m/sec', missing_value=missing_value ) + + idiag%id_ut = register_diag_field ( trim(field), 'ut', axes(1:3), Time, & + 'zonal heat flux', 'K*m/sec', missing_value=missing_value ) + idiag%id_vt = register_diag_field ( trim(field), 'vt', axes(1:3), Time, & + 'meridional heat flux', 'K*m/sec', missing_value=missing_value ) + + idiag%id_uu = register_diag_field ( trim(field), 'uu', axes(1:3), Time, & + 'zonal flux of zonal wind', '(m/sec)^2', missing_value=missing_value ) + idiag%id_uv = register_diag_field ( trim(field), 'uv', axes(1:3), Time, & + 'zonal flux of meridional wind', '(m/sec)^2', missing_value=missing_value ) + idiag%id_vv = register_diag_field ( trim(field), 'vv', axes(1:3), Time, & + 'meridional flux of meridional wind', '(m/sec)^2', missing_value=missing_value ) + + if(.not.Atm(n)%flagstruct%hydrostatic) then + idiag%id_wq = register_diag_field ( trim(field), 'wq', axes(1:3), Time, & + 'vertical moisture flux', 'Kg/Kg*m/sec', missing_value=missing_value ) + idiag%id_wt = register_diag_field ( trim(field), 'wt', axes(1:3), Time, & + 'vertical heat flux', 'K*m/sec', missing_value=missing_value ) + idiag%id_ww = register_diag_field ( trim(field), 'ww', axes(1:3), Time, & + 'vertical flux of vertical wind', '(m/sec)^2', missing_value=missing_value ) + endif + +!-------------------- +! vertical integral of 3D flux terms +!-------------------- + idiag%id_iuq = register_diag_field ( trim(field), 'uq_vi', axes(1:2), Time, & + 'vertical integral of uq', 'Kg/Kg*m/sec*Pa', missing_value=missing_value ) + idiag%id_ivq = register_diag_field ( trim(field), 'vq_vi', axes(1:2), Time, & + 'vertical integral of vq', 'Kg/Kg*m/sec*Pa', missing_value=missing_value ) + + idiag%id_iut = register_diag_field ( trim(field), 'ut_vi', axes(1:2), Time, & + 'vertical integral of ut', 'K*m/sec*Pa', missing_value=missing_value ) + idiag%id_ivt = register_diag_field ( trim(field), 'vt_vi', axes(1:2), Time, & + 'vertical integral of vt', 'K*m/sec*Pa', missing_value=missing_value ) + + idiag%id_iuu = register_diag_field ( trim(field), 'uu_vi', axes(1:2), Time, & + 'vertical integral of uu', '(m/sec)^2*Pa', missing_value=missing_value ) + idiag%id_iuv = register_diag_field ( trim(field), 'uv_vi', axes(1:2), Time, & + 'vertical integral of uv', '(m/sec)^2*Pa', missing_value=missing_value ) + idiag%id_ivv = register_diag_field ( trim(field), 'vv_vi', axes(1:2), Time, & + 'vertical integral of vv', '(m/sec)^2*Pa', missing_value=missing_value ) + + if(.not.Atm(n)%flagstruct%hydrostatic) then + idiag%id_iwq = register_diag_field ( trim(field), 'wq_vi', axes(1:2), Time, & + 'vertical integral of wq', 'Kg/Kg*m/sec*Pa', missing_value=missing_value ) + idiag%id_iwt = register_diag_field ( trim(field), 'wt_vi', axes(1:2), Time, & + 'vertical integral of wt', 'K*m/sec*Pa', missing_value=missing_value ) + idiag%id_iuw = register_diag_field ( trim(field), 'uw_vi', axes(1:2), Time, & + 'vertical integral of uw', '(m/sec)^2*Pa', missing_value=missing_value ) + idiag%id_ivw = register_diag_field ( trim(field), 'vw_vi', axes(1:2), Time, & + 'vertical integral of vw', '(m/sec)^2*Pa', missing_value=missing_value ) + idiag%id_iww = register_diag_field ( trim(field), 'ww_vi', axes(1:2), Time, & + 'vertical integral of ww', '(m/sec)^2*Pa', missing_value=missing_value ) + endif + + endif + ! Total energy (only when moist_phys = .T.) idiag%id_te = register_diag_field ( trim(field), 'te', axes(1:2), Time, & 'Total Energy', 'J/kg', missing_value=missing_value ) ! Total Kinetic energy idiag%id_ke = register_diag_field ( trim(field), 'ke', axes(1:2), Time, & 'Total KE', 'm^2/s^2', missing_value=missing_value ) - idiag%id_delp = register_diag_field ( trim(field), 'delp', axes(1:3), Time, & - 'pressure thickness', 'pa', missing_value=missing_value ) - if ( .not. Atm(n)%flagstruct%hydrostatic ) & - idiag%id_delz = register_diag_field ( trim(field), 'delz', axes(1:3), Time, & - 'height thickness', 'm', missing_value=missing_value ) - if( Atm(n)%flagstruct%hydrostatic ) then - idiag%id_pfhy = register_diag_field ( trim(field), 'pfhy', axes(1:3), Time, & - 'hydrostatic pressure', 'pa', missing_value=missing_value ) - else - idiag%id_pfnh = register_diag_field ( trim(field), 'pfnh', axes(1:3), Time, & - 'non-hydrostatic pressure', 'pa', missing_value=missing_value ) - endif - idiag%id_zratio = register_diag_field ( trim(field), 'zratio', axes(1:3), Time, & - 'nonhydro_ratio', 'n/a', missing_value=missing_value ) idiag%id_ws = register_diag_field ( trim(field), 'ws', axes(1:2), Time, & 'Terrain W', 'm/s', missing_value=missing_value ) -!-------------------- -! 3D Condensate -!-------------------- - idiag%id_qn = register_diag_field ( trim(field), 'qn', axes(1:3), Time, & - 'cloud condensate', 'kg/m/s^2', missing_value=missing_value ) - idiag%id_qp = register_diag_field ( trim(field), 'qp', axes(1:3), Time, & - 'precip condensate', 'kg/m/s^2', missing_value=missing_value ) -! fast moist phys tendencies: - idiag%id_mdt = register_diag_field ( trim(field), 'mdt', axes(1:3), Time, & - 'DT/Dt: fast moist phys', 'deg/sec', missing_value=missing_value ) - idiag%id_qdt = register_diag_field ( trim(field), 'qdt', axes(1:3), Time, & - 'Dqv/Dt: fast moist phys', 'kg/kg/sec', missing_value=missing_value ) - idiag%id_dbz = register_diag_field ( trim(field), 'reflectivity', axes(1:3), time, & - 'Stoelinga simulated reflectivity', 'dBz', missing_value=missing_value) idiag%id_maxdbz = register_diag_field ( trim(field), 'max_reflectivity', axes(1:2), time, & 'Stoelinga simulated maximum (composite) reflectivity', 'dBz', missing_value=missing_value) idiag%id_basedbz = register_diag_field ( trim(field), 'base_reflectivity', axes(1:2), time, & 'Stoelinga simulated base (1 km AGL) reflectivity', 'dBz', missing_value=missing_value) idiag%id_dbz4km = register_diag_field ( trim(field), '4km_reflectivity', axes(1:2), time, & 'Stoelinga simulated base reflectivity', 'dBz', missing_value=missing_value) - -!-------------------- -! Relative vorticity -!-------------------- - idiag%id_vort = register_diag_field ( trim(field), 'vort', axes(1:3), Time, & - 'vorticity', '1/s', missing_value=missing_value ) -!-------------------- -! Potential vorticity -!-------------------- - idiag%id_pv = register_diag_field ( trim(field), 'pv', axes(1:3), Time, & - 'potential vorticity', '1/s', missing_value=missing_value ) + idiag%id_dbztop = register_diag_field ( trim(field), 'echo_top', axes(1:2), time, & + 'Echo top ( <= 18.5 dBz )', 'm', missing_value=missing_value2) + idiag%id_dbz_m10C = register_diag_field ( trim(field), 'm10C_reflectivity', axes(1:2), time, & + 'Reflectivity at -10C level', 'm', missing_value=missing_value) !-------------------------- -! Extra surface diagnistics: +! Extra surface diagnostics: !-------------------------- ! Surface (lowest layer) vorticity: for tropical cyclones diag. idiag%id_vorts = register_diag_field ( trim(field), 'vorts', axes(1:2), Time, & @@ -632,9 +830,31 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_tb = register_diag_field ( trim(field), 'tb', axes(1:2), Time, & 'lowest layer temperature', 'K' ) idiag%id_ctt = register_diag_field( trim(field), 'ctt', axes(1:2), Time, & - 'cloud_top temperature', 'K' ) + 'cloud_top temperature', 'K', missing_value=missing_value3 ) idiag%id_ctp = register_diag_field( trim(field), 'ctp', axes(1:2), Time, & - 'cloud_top pressure', 'hPa' ) + 'cloud_top pressure', 'hPa' , missing_value=missing_value3 ) + idiag%id_ctz = register_diag_field( trim(field), 'ctz', axes(1:2), Time, & + 'cloud_top height', 'hPa' , missing_value=missing_value2 ) + idiag%id_cape = register_diag_field( trim(field), 'cape', axes(1:2), Time, & + 'Convective available potential energy (surface-based)', 'J/kg' , missing_value=missing_value ) + idiag%id_cin = register_diag_field( trim(field), 'cin', axes(1:2), Time, & + 'Convective inhibition (surface-based)', 'J/kg' , missing_value=missing_value ) +!-------------------------- +! Vertically integrated tracers for GFDL MP +!-------------------------- + idiag%id_intqv = register_diag_field ( trim(field), 'intqv', axes(1:2), Time, & + 'Vertically Integrated Water Vapor', 'kg/m**2', missing_value=missing_value ) + idiag%id_intql = register_diag_field ( trim(field), 'intql', axes(1:2), Time, & + 'Vertically Integrated Cloud Water', 'kg/m**2', missing_value=missing_value ) + idiag%id_intqi = register_diag_field ( trim(field), 'intqi', axes(1:2), Time, & + 'Vertically Integrated Cloud Ice', 'kg/m**2', missing_value=missing_value ) + idiag%id_intqr = register_diag_field ( trim(field), 'intqr', axes(1:2), Time, & + 'Vertically Integrated Rain', 'kg/m**2', missing_value=missing_value ) + idiag%id_intqs = register_diag_field ( trim(field), 'intqs', axes(1:2), Time, & + 'Vertically Integrated Snow', 'kg/m**2', missing_value=missing_value ) + idiag%id_intqg = register_diag_field ( trim(field), 'intqg', axes(1:2), Time, & + 'Vertically Integrated Graupel', 'kg/m**2', missing_value=missing_value ) + #ifdef HIWPP idiag%id_acl = register_diag_field ( trim(field), 'acl', axes(1:2), Time, & 'Column-averaged Cl mixing ratio', 'kg/kg', missing_value=missing_value ) @@ -650,10 +870,6 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_vort850 = register_diag_field ( trim(field), 'vort850', axes(1:2), Time, & '850-mb vorticity', '1/s', missing_value=missing_value ) - if ( .not. Atm(n)%flagstruct%hydrostatic ) & - idiag%id_w200 = register_diag_field ( trim(field), 'w200', axes(1:2), Time, & - '200-mb w-wind', 'm/s', missing_value=missing_value ) - idiag%id_vort200 = register_diag_field ( trim(field), 'vort200', axes(1:2), Time, & '200-mb vorticity', '1/s', missing_value=missing_value ) @@ -673,28 +889,36 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_qn850 = register_diag_field ( trim(field), 'qn850', axes(1:2), Time, & '850mb condensate', 'kg/m/s^2', missing_value=missing_value ) - if( .not. Atm(n)%flagstruct%hydrostatic ) & - idiag%id_w500 = register_diag_field ( trim(field), 'w500', axes(1:2), Time, & - '500-mb w-wind', 'm/s', missing_value=missing_value ) idiag%id_vort500 = register_diag_field ( trim(field), 'vort500', axes(1:2), Time, & '500-mb vorticity', '1/s', missing_value=missing_value ) - idiag%id_w700 = register_diag_field ( trim(field), 'w700', axes(1:2), Time, & - '700-mb w-wind', 'm/s', missing_value=missing_value ) - - if( .not. Atm(n)%flagstruct%hydrostatic ) & - idiag%id_w850 = register_diag_field ( trim(field), 'w850', axes(1:2), Time, & - '850-mb w-wind', 'm/s', missing_value=missing_value ) -!-------------------------- -! 5km: -!-------------------------- idiag%id_rain5km = register_diag_field ( trim(field), 'rain5km', axes(1:2), Time, & '5-km AGL liquid water', 'kg/kg', missing_value=missing_value ) +!-------------------------- +! w on height or pressure levels +!-------------------------- if( .not. Atm(n)%flagstruct%hydrostatic ) then + idiag%id_w200 = register_diag_field ( trim(field), 'w200', axes(1:2), Time, & + '200-mb w-wind', 'm/s', missing_value=missing_value ) + idiag%id_w500 = register_diag_field ( trim(field), 'w500', axes(1:2), Time, & + '500-mb w-wind', 'm/s', missing_value=missing_value ) + idiag%id_w700 = register_diag_field ( trim(field), 'w700', axes(1:2), Time, & + '700-mb w-wind', 'm/s', missing_value=missing_value ) + + idiag%id_w850 = register_diag_field ( trim(field), 'w850', axes(1:2), Time, & + '850-mb w-wind', 'm/s', missing_value=missing_value ) idiag%id_w5km = register_diag_field ( trim(field), 'w5km', axes(1:2), Time, & '5-km AGL w-wind', 'm/s', missing_value=missing_value ) idiag%id_w2500m = register_diag_field ( trim(field), 'w2500m', axes(1:2), Time, & '2.5-km AGL w-wind', 'm/s', missing_value=missing_value ) + idiag%id_w1km = register_diag_field ( trim(field), 'w1km', axes(1:2), Time, & + '1-km AGL w-wind', 'm/s', missing_value=missing_value ) + + idiag%id_wmaxup = register_diag_field ( trim(field), 'wmaxup', axes(1:2), Time, & + 'column-maximum updraft', 'm/s', missing_value=missing_value ) + idiag%id_wmaxdn = register_diag_field ( trim(field), 'wmaxdn', axes(1:2), Time, & + 'column-maximum downdraft', 'm/s', missing_value=missing_value ) + endif ! helicity @@ -706,13 +930,18 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) ! '2-5 km vertical comp. of helicity', 'm**2/s**2', missing_value=missing_value ) ! Storm Relative Helicity - idiag%id_srh = register_diag_field ( trim(field), 'srh', axes(1:2), Time, & + idiag%id_srh1 = register_diag_field ( trim(field), 'srh01', axes(1:2), Time, & + '0-1 km Storm Relative Helicity', 'm/s**2', missing_value=missing_value ) + idiag%id_srh3 = register_diag_field ( trim(field), 'srh03', axes(1:2), Time, & '0-3 km Storm Relative Helicity', 'm/s**2', missing_value=missing_value ) + idiag%id_ustm = register_diag_field ( trim(field), 'ustm', axes(1:2), Time, & + 'u Component of Storm Motion', 'm/s', missing_value=missing_value ) + idiag%id_vstm = register_diag_field ( trim(field), 'vstm', axes(1:2), Time, & + 'v Component of Storm Motion', 'm/s', missing_value=missing_value ) + idiag%id_srh25 = register_diag_field ( trim(field), 'srh25', axes(1:2), Time, & '2-5 km Storm Relative Helicity', 'm/s**2', missing_value=missing_value ) - idiag%id_srh01 = register_diag_field ( trim(field), 'srh01', axes(1:2), Time, & - '0-1 km Storm Relative Helicity', 'm/s**2', missing_value=missing_value ) - + if( .not. Atm(n)%flagstruct%hydrostatic ) then idiag%id_uh03 = register_diag_field ( trim(field), 'uh03', axes(1:2), Time, & '0-3 km Updraft Helicity', 'm/s**2', missing_value=missing_value ) @@ -723,6 +952,10 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) if( .not. Atm(n)%flagstruct%hydrostatic ) & idiag%id_w100m = register_diag_field ( trim(field), 'w100m', axes(1:2), Time, & '100-m AGL w-wind', 'm/s', missing_value=missing_value ) + idiag%id_u100m = register_diag_field ( trim(field), 'u100m', axes(1:2), Time, & + '100-m AGL u-wind', 'm/s', missing_value=missing_value ) + idiag%id_v100m = register_diag_field ( trim(field), 'v100m', axes(1:2), Time, & + '100-m AGL v-wind', 'm/s', missing_value=missing_value ) !-------------------------- ! relative humidity (physics definition): !-------------------------- @@ -749,6 +982,31 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_rh1000 = register_diag_field ( trim(field), 'rh1000', axes(1:2), Time, & '1000-mb relative humidity', '%', missing_value=missing_value ) !-------------------------- +! Dew Point +!-------------------------- + idiag%id_dp10 = register_diag_field ( trim(field), 'dp10', axes(1:2), Time, & + '10-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp50 = register_diag_field ( trim(field), 'dp50', axes(1:2), Time, & + '50-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp100 = register_diag_field ( trim(field), 'dp100', axes(1:2), Time, & + '100-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp200 = register_diag_field ( trim(field), 'dp200', axes(1:2), Time, & + '200-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp250 = register_diag_field ( trim(field), 'dp250', axes(1:2), Time, & + '250-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp300 = register_diag_field ( trim(field), 'dp300', axes(1:2), Time, & + '300-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp500 = register_diag_field ( trim(field), 'dp500', axes(1:2), Time, & + '500-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp700 = register_diag_field ( trim(field), 'dp700', axes(1:2), Time, & + '700-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp850 = register_diag_field ( trim(field), 'dp850', axes(1:2), Time, & + '850-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp925 = register_diag_field ( trim(field), 'dp925', axes(1:2), Time, & + '925-mb dew point', 'K', missing_value=missing_value ) + idiag%id_dp1000 = register_diag_field ( trim(field), 'dp1000', axes(1:2), Time, & + '1000-mb dew point', 'K', missing_value=missing_value ) +!-------------------------- ! relative humidity (CMIP definition): !-------------------------- idiag%id_rh10_cmip = register_diag_field ( trim(field), 'rh10_cmip', axes(1:2), Time, & @@ -772,73 +1030,216 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) idiag%id_rh1000_cmip = register_diag_field ( trim(field), 'rh1000_cmip', axes(1:2), Time, & '1000-mb relative humidity (CMIP)', '%', missing_value=missing_value ) - do i=1, ncnst -!-------------------- -! Tracer diagnostics: -!-------------------- - call get_tracer_names ( MODEL_ATMOS, i, tname, tlongname, tunits ) - idiag%id_tracer(i) = register_diag_field ( field, trim(tname), & - axes(1:3), Time, trim(tlongname), & - trim(tunits), missing_value=missing_value) - if (master) then - if (idiag%id_tracer(i) > 0) then + if (Atm(n)%flagstruct%write_3d_diags) then + do i=1, ncnst + !-------------------- + ! Tracer diagnostics: + !-------------------- + call get_tracer_names ( MODEL_ATMOS, i, tname, tlongname, tunits ) + idiag%id_tracer(i) = register_diag_field ( field, trim(tname), & + axes(1:3), Time, trim(tlongname), & + trim(tunits), missing_value=missing_value) + if (master) then + if (idiag%id_tracer(i) > 0) then unit = stdlog() write(unit,'(a,a,a,a)') & & 'Diagnostics available for tracer ',trim(tname), & ' in module ', trim(field) - end if - endif -!---------------------------------- -! ESM Tracer dmmr/dvmr diagnostics: -! for specific elements only -!---------------------------------- -!---co2 - if (trim(tname).eq.'co2') then - idiag%w_mr(:) = WTMCO2 - idiag%id_tracer_dmmr(i) = register_diag_field ( field, trim(tname)//'_dmmr', & - axes(1:3), Time, trim(tlongname)//" (dry mmr)", & - trim(tunits), missing_value=missing_value) - idiag%id_tracer_dvmr(i) = register_diag_field ( field, trim(tname)//'_dvmr', & - axes(1:3), Time, trim(tlongname)//" (dry vmr)", & - 'mol/mol', missing_value=missing_value) - if (master) then + end if + endif + !---------------------------------- + ! ESM Tracer dmmr/dvmr diagnostics: + ! for specific elements only + !---------------------------------- + !---co2 + if (trim(tname).eq.'co2') then + idiag%w_mr(:) = WTMCO2 + idiag%id_tracer_dmmr(i) = register_diag_field ( field, trim(tname)//'_dmmr', & + axes(1:3), Time, trim(tlongname)//" (dry mmr)", & + trim(tunits), missing_value=missing_value) + idiag%id_tracer_dvmr(i) = register_diag_field ( field, trim(tname)//'_dvmr', & + axes(1:3), Time, trim(tlongname)//" (dry vmr)", & + 'mol/mol', missing_value=missing_value) + if (master) then unit = stdlog() if (idiag%id_tracer_dmmr(i) > 0) then - write(unit,'(a,a,a,a)') 'Diagnostics available for '//trim(tname)//' dry mmr ', & - trim(tname)//'_dmmr', ' in module ', trim(field) + write(unit,'(a,a,a,a)') 'Diagnostics available for '//trim(tname)//' dry mmr ', & + trim(tname)//'_dmmr', ' in module ', trim(field) end if if (idiag%id_tracer_dvmr(i) > 0) then - write(unit,'(a,a,a,a)') 'Diagnostics available for '//trim(tname)//' dry vmr ', & - trim(tname)//'_dvmr', ' in module ', trim(field) + write(unit,'(a,a,a,a)') 'Diagnostics available for '//trim(tname)//' dry vmr ', & + trim(tname)//'_dvmr', ' in module ', trim(field) end if - endif - endif -!---end co2 + endif + endif + !---end co2 - enddo + enddo + endif if ( Atm(1)%flagstruct%consv_am .or. idiag%id_mq > 0 .or. idiag%id_amdt > 0 ) then - allocate ( idiag%zxg(isc:iec,jsc:jec) ) -! Initialize gradient of terrain for mountain torque computation: - call init_mq(Atm(n)%phis, Atm(n)%gridstruct, & - npx, npy, isc, iec, jsc, jec, Atm(n)%ng) + allocate ( idiag%zxg(isc:iec,jsc:jec) ) + ! Initialize gradient of terrain for mountain torque computation: + call init_mq(Atm(n)%phis, Atm(n)%gridstruct, & + npx, npy, isc, iec, jsc, jec, Atm(n)%ng) endif ! end do #ifdef TEST_TRACER - call prt_mass(npz, Atm(n)%ncnst, isc, iec, jsc, jec, Atm(n)%ng, max(1,Atm(n)%flagstruct%nwat), & + call prt_mass(npz, Atm(n)%ncnst, isc, iec, jsc, jec, Atm(n)%ng, max(1,Atm(n)%flagstruct%nwat), & Atm(n)%ps, Atm(n)%delp, Atm(n)%q, Atm(n)%gridstruct%area_64, Atm(n)%domain) #else - call prt_mass(npz, Atm(n)%ncnst, isc, iec, jsc, jec, Atm(n)%ng, Atm(n)%flagstruct%nwat, & + call prt_mass(npz, Atm(n)%ncnst, isc, iec, jsc, jec, Atm(n)%ng, Atm(n)%flagstruct%nwat, & Atm(n)%ps, Atm(n)%delp, Atm(n)%q, Atm(n)%gridstruct%area_64, Atm(n)%domain) #endif + + !Set up debug column diagnostics, if desired + !Start by hard-coding one diagnostic column then add options for more later + + diag_debug_names(:) = '' + diag_debug_lon_in(:) = -999. + diag_debug_lat_in(:) = -999. + + !diag_debug_names(1:2) = (/'ORD','Princeton'/) + !diag_debug_lon_in(1:2) = (/272.,285.33/) + !diag_debug_lat_in(1:2) = (/42.,40.36/) + + diag_sonde_names(:) = '' + diag_sonde_lon_in(:) = -999. + diag_sonde_lat_in(:) = -999. + + !diag_sonde_names(1:4) = (/'OUN','MYNN','PIT', 'ORD'/) + !diag_sonde_lon_in(1:4) = (/285.33,282.54,279.78,272./) + !diag_sonde_lat_in(1:4) = (/35.18,25.05,40.53,42./) + + +#ifdef INTERNAL_FILE_NML + read(input_nml_file, nml=fv_diag_column_nml,iostat=ios) +#else + inquire (file=trim(Atm(n)%nml_filename), exist=exists) + if (.not. exists) then + write(errmsg,*) 'fv_diag_column_nml: namelist file ',trim(Atm(n)%nml_filename),' does not exist' + call mpp_error(FATAL, errmsg) + else + open (unit=nlunit, file=Atm(n)%nml_filename, READONLY, status='OLD', iostat=ios) + endif + rewind(nlunit) + read (nlunit, nml=fv_diag_column_nml, iostat=ios) + close (nlunit) +#endif + + call column_diagnostics_init + + if (do_diag_debug) then + + !Determine number of debug columns + do m=1,MAX_DIAG_COLUMN + !if (is_master()) print*, i, diag_debug_names(m), len(trim(diag_debug_names(m))), diag_debug_lon_in(m), diag_debug_lat_in(m) + if (len(trim(diag_debug_names(m))) == 0 .or. diag_debug_lon_in(m) < -180. .or. diag_debug_lat_in(m) < -90.) exit + num_diag_debug = num_diag_debug + 1 + if (diag_debug_lon_in(m) < 0.) diag_debug_lon_in(m) = diag_debug_lon_in(m) + 360. + enddo + + if (num_diag_debug == 0) do_diag_debug = .FALSE. + + endif + + if (do_diag_debug) then + + allocate(do_debug_diag_column(isc:iec,jsc:jec)) + allocate(diag_debug_lon(num_diag_debug)) + allocate(diag_debug_lat(num_diag_debug)) + allocate(diag_debug_i(num_diag_debug)) + allocate(diag_debug_j(num_diag_debug)) + allocate(diag_debug_units(num_diag_debug)) + + + call initialize_diagnostic_columns("DEBUG", num_diag_pts_latlon=num_diag_debug, num_diag_pts_ij=0, & + global_i=(/1/), global_j=(/1/), & + global_lat_latlon=diag_debug_lat_in, global_lon_latlon=diag_debug_lon_in, & + lonb_in=Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,1), latb_in=Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,2), & + do_column_diagnostics=do_debug_diag_column, & + diag_lon=diag_debug_lon, diag_lat=diag_debug_lat, diag_i=diag_debug_i, diag_j=diag_debug_j, diag_units=diag_debug_units) + + do m=1,num_diag_debug + diag_debug_i(m) = diag_debug_i(m) + isc - 1 + diag_debug_j(m) = diag_debug_j(m) + jsc - 1 + + if (diag_debug_i(m) >= isc .and. diag_debug_i(m) <= iec .and. & + diag_debug_j(m) >= jsc .and. diag_debug_j(m) <= jec ) then + write(*,'(A, 1x, I04, 1x, A, 4F7.2, 2I5)') 'DEBUG POINT: ', mpp_pe(), diag_debug_names(m), diag_debug_lon_in(m), diag_debug_lat_in(m), & + Atm(n)%gridstruct%agrid(diag_debug_i(m), diag_debug_j(m),1)*rad2deg, Atm(n)%gridstruct%agrid(diag_debug_i(m), diag_debug_j(m),2)*rad2deg, & + diag_debug_i(m), diag_debug_j(m) + endif + enddo + + endif + + + !Radiosondes + if (do_diag_sonde) then + + !Determine number of sonde columns + do m=1,MAX_DIAG_COLUMN + if (len(trim(diag_sonde_names(m))) == 0 .or. diag_sonde_lon_in(m) < -180. .or. diag_sonde_lat_in(m) < -90.) exit + !if (is_master()) print*, i, diag_sonde_names(m), len(trim(diag_sonde_names(m))), diag_sonde_lon_in(m), diag_sonde_lat_in(m) + num_diag_sonde = num_diag_sonde + 1 + if (diag_sonde_lon_in(m) < 0.) diag_sonde_lon_in(m) = diag_sonde_lon_in(m) + 360. + enddo + + if (num_diag_sonde == 0) do_diag_sonde = .FALSE. + + endif + + if (do_diag_sonde) then + + allocate(do_sonde_diag_column(isc:iec,jsc:jec)) + allocate(diag_sonde_lon(num_diag_sonde)) + allocate(diag_sonde_lat(num_diag_sonde)) + allocate(diag_sonde_i(num_diag_sonde)) + allocate(diag_sonde_j(num_diag_sonde)) + allocate(diag_sonde_units(num_diag_sonde)) + + call initialize_diagnostic_columns("Sounding", num_diag_pts_latlon=num_diag_sonde, num_diag_pts_ij=0, & + global_i=(/1/), global_j=(/1/), & + global_lat_latlon=diag_sonde_lat_in, global_lon_latlon=diag_sonde_lon_in, & + lonb_in=Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,1), latb_in=Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,2), & + do_column_diagnostics=do_sonde_diag_column, & + diag_lon=diag_sonde_lon, diag_lat=diag_sonde_lat, diag_i=diag_sonde_i, diag_j=diag_sonde_j, diag_units=diag_sonde_units) + + do m=1,num_diag_sonde + diag_sonde_i(m) = diag_sonde_i(m) + isc - 1 + diag_sonde_j(m) = diag_sonde_j(m) + jsc - 1 + + if (diag_sonde_i(m) >= isc .and. diag_sonde_i(m) <= iec .and. & + diag_sonde_j(m) >= jsc .and. diag_sonde_j(m) <= jec ) then + write(*,'(A, 1x, I04, 1x, A, 4F7.2, 2I5)') 'SONDE POINT: ', mpp_pe(), diag_sonde_names(m), diag_sonde_lon_in(m), diag_sonde_lat_in(m), & + Atm(n)%gridstruct%agrid(diag_sonde_i(m), diag_sonde_j(m),1)*rad2deg, Atm(n)%gridstruct%agrid(diag_sonde_i(m), diag_sonde_j(m),2)*rad2deg, & + diag_sonde_i(m), diag_sonde_j(m) + endif + enddo + + endif + + !Model initialization time (not necessarily the time this simulation is started, + ! conceivably a restart could be done + if (m_calendar) then + call get_date(Atm(n)%Time_init, yr_init, mo_init, dy_init, hr_init, mn_init, sec_init) + else + call get_time(Atm(n)%Time_init, sec_init, dy_init) + yr_init = 0 ; mo_init = 0 ; hr_init = 0 ; mn_init = 0 + endif + call nullify_domain() ! Nullify set_domain info module_is_initialized=.true. istep = 0 +#ifndef GFS_PHYS + if(idiag%id_theta_e >0 ) call qsmith_init +#endif end subroutine fv_diag_init @@ -921,7 +1322,8 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) integer :: isd, ied, jsd, jed, npz, itrac integer :: ngc, nwater - real, allocatable :: a2(:,:),a3(:,:,:), wk(:,:,:), wz(:,:,:), ucoor(:,:,:), vcoor(:,:,:) + real, allocatable :: a2(:,:), a3(:,:,:), a4(:,:,:), wk(:,:,:), wz(:,:,:), ucoor(:,:,:), vcoor(:,:,:) + real, allocatable :: ustm(:,:), vstm(:,:) real, allocatable :: slp(:,:), depress(:,:), ws_max(:,:), tc_count(:,:) real, allocatable :: u2(:,:), v2(:,:), x850(:,:), var1(:,:), var2(:,:), var3(:,:) real, allocatable :: dmmr(:,:,:), dvmr(:,:,:) @@ -929,7 +1331,8 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) real:: plevs(nplev), pout(nplev) integer:: idg(nplev), id1(nplev) real :: tot_mq, tmp, sar, slon, slat - real :: t_gb, t_nh, t_sh, t_eq, area_gb, area_nh, area_sh, area_eq + real :: a1d(Atm(1)%npz) +! real :: t_gb, t_nh, t_sh, t_eq, area_gb, area_nh, area_sh, area_eq logical :: do_cs_intp logical :: used logical :: bad_range @@ -937,10 +1340,11 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) character(len=128) :: tname real, parameter:: ws_0 = 16. ! minimum max_wind_speed within the 7x7 search box real, parameter:: ws_1 = 20. - real, parameter:: vort_c0= 2.2e-5 + real, parameter:: vort_c0= 2.2e-5 logical, allocatable :: storm(:,:), cat_crt(:,:) - real :: tmp2, pvsum, e2, einf, qm, mm, maxdbz, allmax, rgrav - integer :: Cl, Cl2 + real :: tmp2, pvsum, e2, einf, qm, mm, maxdbz, allmax, rgrav, cv_vapor + real, allocatable :: cvm(:) + integer :: Cl, Cl2, k1, k2 !!! CLEANUP: does it really make sense to have this routine loop over Atm% anymore? We assume n=1 below anyway @@ -994,6 +1398,12 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) else prt_minmax = mod(hr, print_freq) == 0 .and. mn==0 .and. seconds==0 endif + + if ( sound_freq == 0 .or. .not. do_diag_sonde ) then + prt_sounding = .false. + else + prt_sounding = mod(hr, sound_freq) == 0 .and. mn == 0 .and. seconds == 0 + endif else call get_time (fv_time, seconds, days) if( print_freq == 0 ) then @@ -1004,6 +1414,13 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) else prt_minmax = mod(seconds, 3600*print_freq) == 0 endif + + if ( sound_freq == 0 .or. .not. do_diag_sonde ) then + prt_sounding = .false. + else + prt_sounding = mod(seconds, 3600*sound_freq) == 0 + endif + endif if(prt_minmax) then @@ -1102,19 +1519,21 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) elseif ( Atm(n)%flagstruct%range_warn ) then call range_check('DELP', Atm(n)%delp, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - 0.01*ptop, 200.E2, bad_range) + 0.01*ptop, 200.E2, bad_range, Time) call range_check('UA', Atm(n)%ua, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - -250., 250., bad_range) + -250., 250., bad_range, Time) call range_check('VA', Atm(n)%va, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - -250., 250., bad_range) + -250., 250., bad_range, Time) #ifndef SW_DYNAMICS call range_check('TA', Atm(n)%pt, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & #ifdef HIWPP - 130., 350., bad_range) !DCMIP ICs have very low temperatures + 130., 350., bad_range, Time) !DCMIP ICs have very low temperatures #else - 150., 350., bad_range) + 150., 350., bad_range, Time) #endif #endif + call range_check('Qv', Atm(n)%q(:,:,:,sphum), isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & + -1.e-8, 1.e20, bad_range, Time) endif @@ -1134,6 +1553,13 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) #endif if(idiag%id_ps > 0) used=send_data(idiag%id_ps, Atm(n)%ps(isc:iec,jsc:jec), Time) + if (idiag%id_qv_dt_phys > 0) used=send_data(idiag%id_qv_dt_phys, Atm(n)%phys_diag%phys_qv_dt(isc:iec,jsc:jec,1:npz), Time) + if (idiag%id_ql_dt_phys > 0) used=send_data(idiag%id_ql_dt_phys, Atm(n)%phys_diag%phys_ql_dt(isc:iec,jsc:jec,1:npz), Time) + if (idiag%id_qi_dt_phys > 0) used=send_data(idiag%id_qi_dt_phys, Atm(n)%phys_diag%phys_qi_dt(isc:iec,jsc:jec,1:npz), Time) + if (idiag%id_t_dt_phys > 0) used=send_data(idiag%id_t_dt_phys, Atm(n)%phys_diag%phys_t_dt(isc:iec,jsc:jec,1:npz), Time) + if (idiag%id_u_dt_phys > 0) used=send_data(idiag%id_u_dt_phys, Atm(n)%phys_diag%phys_u_dt(isc:iec,jsc:jec,1:npz), Time) + if (idiag%id_v_dt_phys > 0) used=send_data(idiag%id_v_dt_phys, Atm(n)%phys_diag%phys_v_dt(isc:iec,jsc:jec,1:npz), Time) + if(idiag%id_c15>0 .or. idiag%id_c25>0 .or. idiag%id_c35>0 .or. idiag%id_c45>0) then call wind_max(isc, iec, jsc, jec ,isd, ied, jsd, jed, Atm(n)%ua(isc:iec,jsc:jec,npz), & Atm(n)%va(isc:iec,jsc:jec,npz), ws_max, Atm(n)%domain) @@ -1163,7 +1589,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) do i=isc,iec if ( storm(i,j) ) & storm(i,j) = (Atm(n)%gridstruct%agrid(i,j,2)>0. .and. wk(i,j,npz)> vort_c0) .or. & - (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. wk(i,j,npz)<-vort_c0) + (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. wk(i,j,npz)<-vort_c0) enddo enddo endif @@ -1183,14 +1609,14 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) call interpolate_vertical(isc, iec, jsc, jec, npz, & 850.e2, Atm(n)%peln, wk, a2) used=send_data(idiag%id_vort850, a2, Time) - if ( idiag%id_x850>0 ) x850(:,:) = a2(:,:) + if ( idiag%id_x850>0 ) x850(:,:) = a2(:,:) if(idiag%id_c15>0) then do j=jsc,jec do i=isc,iec if ( storm(i,j) ) & storm(i,j) = (Atm(n)%gridstruct%agrid(i,j,2)>0. .and. a2(i,j)> vort_c0) .or. & - (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. a2(i,j)<-vort_c0) + (Atm(n)%gridstruct%agrid(i,j,2)<0. .and. a2(i,j)<-vort_c0) enddo enddo endif @@ -1227,6 +1653,84 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif + if ( idiag%id_srh1 > 0 .or. idiag%id_srh3 > 0 .or. idiag%id_srh25 > 0 .or. idiag%id_ustm > 0 .or. idiag%id_vstm > 0) then + allocate(ustm(isc:iec,jsc:jec), vstm(isc:iec,jsc:jec)) + + call bunkers_vector(isc, iec, jsc, jec, ngc, npz, zvir, sphum, ustm, vstm, & + Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & + Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav) + + if ( idiag%id_ustm > 0 ) then + used = send_data ( idiag%id_ustm, ustm, Time ) + endif + if ( idiag%id_vstm > 0 ) then + used = send_data ( idiag%id_vstm, vstm, Time ) + endif + + if ( idiag%id_srh1 > 0 ) then + call helicity_relative_CAPS(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, ustm, vstm, & + Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & + Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 0., 1.e3) + used = send_data ( idiag%id_srh1, a2, Time ) + if(prt_minmax) then + do j=jsc,jec + do i=isc,iec + tmp = rad2deg * Atm(n)%gridstruct%agrid(i,j,1) + tmp2 = rad2deg * Atm(n)%gridstruct%agrid(i,j,2) + if ( tmp2<25. .or. tmp2>50. & + .or. tmp<235. .or. tmp>300. ) then + a2(i,j) = 0. + endif + enddo + enddo + call prt_maxmin('SRH (0-1 km) over CONUS', a2, isc, iec, jsc, jec, 0, 1, 1.) + endif + endif + + if ( idiag%id_srh3 > 0 ) then + call helicity_relative_CAPS(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, ustm, vstm, & + Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & + Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 0., 3e3) + used = send_data ( idiag%id_srh3, a2, Time ) + if(prt_minmax) then + do j=jsc,jec + do i=isc,iec + tmp = rad2deg * Atm(n)%gridstruct%agrid(i,j,1) + tmp2 = rad2deg * Atm(n)%gridstruct%agrid(i,j,2) + if ( tmp2<25. .or. tmp2>50. & + .or. tmp<235. .or. tmp>300. ) then + a2(i,j) = 0. + endif + enddo + enddo + call prt_maxmin('SRH (0-3 km) over CONUS', a2, isc, iec, jsc, jec, 0, 1, 1.) + endif + endif + + if ( idiag%id_srh25 > 0 ) then + call helicity_relative_CAPS(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, ustm, vstm, & + Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & + Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 2.e3, 5e3) + used = send_data ( idiag%id_srh25, a2, Time ) + if(prt_minmax) then + do j=jsc,jec + do i=isc,iec + tmp = rad2deg * Atm(n)%gridstruct%agrid(i,j,1) + tmp2 = rad2deg * Atm(n)%gridstruct%agrid(i,j,2) + if ( tmp2<25. .or. tmp2>50. & + .or. tmp<235. .or. tmp>300. ) then + a2(i,j) = 0. + endif + enddo + enddo + call prt_maxmin('SRH (2-5 km) over CONUS', a2, isc, iec, jsc, jec, 0, 1, 1.) + endif + endif + + deallocate(ustm, vstm) + endif + + if ( idiag%id_pv > 0 ) then ! Note: this is expensive computation. call pv_entropy(isc, iec, jsc, jec, ngc, npz, wk, & @@ -1238,39 +1742,33 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif - - if ( idiag%id_srh > 0 ) then - call helicity_relative(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, & - Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & - Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 0., 3.e3) - used = send_data ( idiag%id_srh, a2, Time ) - if(prt_minmax) then - do j=jsc,jec - do i=isc,iec - tmp = rad2deg * Atm(n)%gridstruct%agrid(i,j,1) - tmp2 = rad2deg * Atm(n)%gridstruct%agrid(i,j,2) - if ( tmp2<25. .or. tmp2>50. & - .or. tmp<235. .or. tmp>300. ) then - a2(i,j) = 0. - endif - enddo - enddo - call prt_maxmin('SRH over CONUS', a2, isc, iec, jsc, jec, 0, 1, 1.) - endif - endif - if ( idiag%id_srh25 > 0 ) then - call helicity_relative(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, & - Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & - Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 2.e3, 5.e3) - used = send_data ( idiag%id_srh25, a2, Time ) - endif - if ( idiag%id_srh01 > 0 ) then - call helicity_relative(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, & - Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & - Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 0.e3, 1.e3) - used = send_data ( idiag%id_srh01, a2, Time ) - endif +!!$ if ( idiag%id_srh > 0 ) then +!!$ call helicity_relative(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, & +!!$ Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & +!!$ Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 0., 3.e3) +!!$ used = send_data ( idiag%id_srh, a2, Time ) +!!$ if(prt_minmax) then +!!$ do j=jsc,jec +!!$ do i=isc,iec +!!$ tmp = rad2deg * Atm(n)%gridstruct%agrid(i,j,1) +!!$ tmp2 = rad2deg * Atm(n)%gridstruct%agrid(i,j,2) +!!$ if ( tmp2<25. .or. tmp2>50. & +!!$ .or. tmp<235. .or. tmp>300. ) then +!!$ a2(i,j) = 0. +!!$ endif +!!$ enddo +!!$ enddo +!!$ call prt_maxmin('SRH over CONUS', a2, isc, iec, jsc, jec, 0, 1, 1.) +!!$ endif +!!$ endif + +!!$ if ( idiag%id_srh25 > 0 ) then +!!$ call helicity_relative(isc, iec, jsc, jec, ngc, npz, zvir, sphum, a2, & +!!$ Atm(n)%ua, Atm(n)%va, Atm(n)%delz, Atm(n)%q, & +!!$ Atm(n)%flagstruct%hydrostatic, Atm(n)%pt, Atm(n)%peln, Atm(n)%phis, grav, 2.e3, 5.e3) +!!$ used = send_data ( idiag%id_srh25, a2, Time ) +!!$ endif ! Relative Humidity @@ -1300,7 +1798,10 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) ! rel hum from physics at selected press levels (for IPCC) if (idiag%id_rh50>0 .or. idiag%id_rh100>0 .or. idiag%id_rh200>0 .or. idiag%id_rh250>0 .or. & idiag%id_rh300>0 .or. idiag%id_rh500>0 .or. idiag%id_rh700>0 .or. idiag%id_rh850>0 .or. & - idiag%id_rh925>0 .or. idiag%id_rh1000>0) then + idiag%id_rh925>0 .or. idiag%id_rh1000>0 .or. & + idiag%id_dp50>0 .or. idiag%id_dp100>0 .or. idiag%id_dp200>0 .or. idiag%id_dp250>0 .or. & + idiag%id_dp300>0 .or. idiag%id_dp500>0 .or. idiag%id_dp700>0 .or. idiag%id_dp850>0 .or. & + idiag%id_dp925>0 .or. idiag%id_dp1000>0) then ! compute mean pressure do k=1,npz do j=jsc,jec @@ -1351,6 +1852,68 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) call interpolate_vertical(isc, iec, jsc, jec, npz, 1000.e2, Atm(n)%peln, wk(isc:iec,jsc:jec,:), a2) used=send_data(idiag%id_rh1000, a2, Time) endif + + if (idiag%id_dp50>0 .or. idiag%id_dp100>0 .or. idiag%id_dp200>0 .or. idiag%id_dp250>0 .or. & + idiag%id_dp300>0 .or. idiag%id_dp500>0 .or. idiag%id_dp700>0 .or. idiag%id_dp850>0 .or. & + idiag%id_dp925>0 .or. idiag%id_dp1000>0 ) then + + if (allocated(a3)) deallocate(a3) + allocate(a3(isc:iec,jsc:jec,1:npz)) + !compute dew point (K) + !using formula at https://cals.arizona.edu/azmet/dewpoint.html + do k=1,npz + do j=jsc,jec + do i=isc,iec + tmp = ( log(max(wk(i,j,k)*1.e-2,1.e-2)) + 17.27 * ( Atm(n)%pt(i,j,k) - 273.14 )/ ( -35.84 + Atm(n)%pt(i,j,k)) ) / 17.27 + a3(i,j,k) = 273.14 + 237.3*tmp/ ( 1. - tmp ) + enddo + enddo + enddo + + if (idiag%id_dp50>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 50.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp50, a2, Time) + endif + if (idiag%id_dp100>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 100.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp100, a2, Time) + endif + if (idiag%id_dp200>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 200.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp200, a2, Time) + endif + if (idiag%id_dp250>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 250.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp250, a2, Time) + endif + if (idiag%id_dp300>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 300.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp300, a2, Time) + endif + if (idiag%id_dp500>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 500.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp500, a2, Time) + endif + if (idiag%id_dp700>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 700.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp700, a2, Time) + endif + if (idiag%id_dp850>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp850, a2, Time) + endif + if (idiag%id_dp925>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 925.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp925, a2, Time) + endif + if (idiag%id_dp1000>0) then + call interpolate_vertical(isc, iec, jsc, jec, npz, 1000.e2, Atm(n)%peln, a3, a2) + used=send_data(idiag%id_dp1000, a2, Time) + endif + deallocate(a3) + + endif + endif ! rel hum (CMIP definition) at selected press levels (for IPCC) @@ -1424,7 +1987,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) - if( idiag%id_slp>0 .or. idiag%id_tm>0 .or. idiag%id_hght>0 .or. idiag%id_c15>0 ) then + if( idiag%id_slp>0 .or. idiag%id_tm>0 .or. idiag%id_any_hght>0 .or. idiag%id_hght3d .or. idiag%id_c15>0 .or. idiag%id_ctz ) then allocate ( wz(isc:iec,jsc:jec,npz+1) ) call get_height_field(isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%delz, & @@ -1433,11 +1996,20 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) call prt_mxm('ZTOP',wz(isc:iec,jsc:jec,1), isc, iec, jsc, jec, 0, 1, 1.E-3, Atm(n)%gridstruct%area_64, Atm(n)%domain) ! call prt_maxmin('ZTOP', wz(isc:iec,jsc:jec,1), isc, iec, jsc, jec, 0, 1, 1.E-3) + if (idiag%id_hght3d > 0) then + used = send_data(idiag%id_hght3d, 0.5*(wz(isc:iec,jsc:jec,1:npz)+wz(isc:iec,jsc:jec,2:npz+1)), Time) + endif + if(idiag%id_slp > 0) then ! Cumpute SLP (pressure at height=0) allocate ( slp(isc:iec,jsc:jec) ) call get_pressure_given_height(isc, iec, jsc, jec, ngc, npz, wz, 1, height(2), & Atm(n)%pt(:,:,npz), Atm(n)%peln, slp, 0.01) + + if ( Atm(n)%flagstruct%range_warn ) then + call range_check('SLP', slp, isc, iec, jsc, jec, 0, Atm(n)%gridstruct%agrid, & + slprange(1), slprange(2), bad_range, Time) + endif used = send_data (idiag%id_slp, slp, Time) if( prt_minmax ) then call prt_maxmin('SLP', slp, isc, iec, jsc, jec, 0, 1, 1.) @@ -1457,7 +2029,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif ! Compute H3000 and/or H500 - if( idiag%id_tm>0 .or. idiag%id_hght>0 .or. idiag%id_ppt>0) then + if( idiag%id_tm>0 .or. idiag%id_any_hght>0 .or. idiag%id_ppt>0) then allocate( a3(isc:iec,jsc:jec,nplev) ) @@ -1471,8 +2043,8 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) idg(minloc(abs(levs-500))) = idiag%id_h(minloc(abs(levs-500))) endif - call get_height_given_pressure(isc, iec, jsc, jec, ngc, npz, wz, nplev, idg, plevs, Atm(n)%peln, a3) - ! reset + call get_height_given_pressure(isc, iec, jsc, jec, npz, wz, nplev, idg, plevs, Atm(n)%peln, a3) + ! reset idg(minloc(abs(levs-300))) = idiag%id_h(minloc(abs(levs-300))) idg(minloc(abs(levs-500))) = idiag%id_h(minloc(abs(levs-500))) @@ -1482,49 +2054,21 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if (idiag%id_h_plev>0) then id1(:) = 1 - call get_height_given_pressure(isc, iec, jsc, jec, ngc, npz, wz, nplev, id1, plevs, Atm(n)%peln, a3) + call get_height_given_pressure(isc, iec, jsc, jec, npz, wz, nplev, id1, plevs, Atm(n)%peln, a3) used=send_data(idiag%id_h_plev, a3(isc:iec,jsc:jec,:), Time) endif if( prt_minmax ) then - + if(all(idiag%id_h(minloc(abs(levs-100)))>0)) & - call prt_mxm('Z100',a3(isc:iec,jsc:jec,11),isc,iec,jsc,jec,0,1,1.E-3,Atm(n)%gridstruct%area_64,Atm(n)%domain) + call prt_mxm('Z100',a3(isc:iec,jsc:jec,k100),isc,iec,jsc,jec,0,1,1.E-3,Atm(n)%gridstruct%area_64,Atm(n)%domain) if(all(idiag%id_h(minloc(abs(levs-500)))>0)) then -! call prt_mxm('Z500',a3(isc:iec,jsc:jec,19),isc,iec,jsc,jec,0,1,1.,Atm(n)%gridstruct%area_64,Atm(n)%domain) - if (.not. Atm(n)%neststruct%nested) then - t_eq = 0. ; t_nh = 0.; t_sh = 0.; t_gb = 0. - area_eq = 0.; area_nh = 0.; area_sh = 0.; area_gb = 0. - do j=jsc,jec - do i=isc,iec - slat = Atm(n)%gridstruct%agrid(i,j,2)*rad2deg - area_gb = area_gb + Atm(n)%gridstruct%area(i,j) - t_gb = t_gb + a3(i,j,19)*Atm(n)%gridstruct%area(i,j) - if( (slat>-20. .and. slat<20.) ) then -! Tropics: - area_eq = area_eq + Atm(n)%gridstruct%area(i,j) - t_eq = t_eq + a3(i,j,19)*Atm(n)%gridstruct%area(i,j) - elseif( slat>=20. .and. slat<80. ) then -! NH - area_nh = area_nh + Atm(n)%gridstruct%area(i,j) - t_nh = t_nh + a3(i,j,19)*Atm(n)%gridstruct%area(i,j) - elseif( slat<=-20. .and. slat>-80. ) then -! SH - area_sh = area_sh + Atm(n)%gridstruct%area(i,j) - t_sh = t_sh + a3(i,j,19)*Atm(n)%gridstruct%area(i,j) - endif - enddo - enddo - call mp_reduce_sum(area_gb) - call mp_reduce_sum( t_gb) - call mp_reduce_sum(area_nh) - call mp_reduce_sum( t_nh) - call mp_reduce_sum(area_sh) - call mp_reduce_sum( t_sh) - call mp_reduce_sum(area_eq) - call mp_reduce_sum( t_eq) - if (master) write(*,*) 'Z500 GB_NH_SH_EQ=', t_gb/area_gb, t_nh/area_nh, t_sh/area_sh, t_eq/area_eq + if (Atm(n)%gridstruct%bounded_domain) then + call prt_mxm('Z500',a3(isc:iec,jsc:jec,k500),isc,iec,jsc,jec,0,1,1.,Atm(n)%gridstruct%area_64,Atm(n)%domain) + else + call prt_gb_nh_sh('fv_GFS Z500', isc,iec, jsc,jec, a3(isc,jsc,k500), Atm(n)%gridstruct%area_64(isc:iec,jsc:jec), & + Atm(n)%gridstruct%agrid_64(isc:iec,jsc:jec,2)) endif endif @@ -1532,12 +2076,31 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) ! mean virtual temp 300mb to 500mb if( idiag%id_tm>0 ) then + k1 = -1 + k2 = -1 + do k=1,nplev + if (abs(levs(k)-500.) < 1.) then + k2 = k + exit + endif + enddo + do k=1,nplev + if (abs(levs(k)-300.) < 1.) then + k1 = k + exit + endif + enddo + if (k1 <= 0 .or. k2 <= 0) then + call mpp_error(NOTE, "Could not find levs for 300--500 mb mean temperature, setting to -1") + a2 = -1. + else do j=jsc,jec do i=isc,iec - a2(i,j) = grav*(a3(i,j,15)-a3(i,j,19))/(rdgas*(plevs(19)-plevs(15))) + a2(i,j) = grav*(a3(i,j,k2)-a3(i,j,k1))/(rdgas*(plevs(k1)-plevs(k2))) enddo enddo - used = send_data ( idiag%id_tm, a2, Time ) + endif + used = send_data ( idiag%id_tm, a2, Time ) endif if(idiag%id_c15>0 .or. idiag%id_c25>0 .or. idiag%id_c35>0 .or. idiag%id_c45>0) then @@ -1689,7 +2252,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if ( all(idiag%id_t(minloc(abs(levs-100)))>0) .and. prt_minmax ) then call prt_mxm('T100:', a3(isc:iec,jsc:jec,11), isc, iec, jsc, jec, 0, 1, 1., & Atm(n)%gridstruct%area_64, Atm(n)%domain) - if (.not. Atm(n)%neststruct%nested) then + if (.not. Atm(n)%gridstruct%bounded_domain) then tmp = 0. sar = 0. ! Compute mean temp at 100 mb near EQ @@ -1712,9 +2275,9 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif endif if ( all(idiag%id_t(minloc(abs(levs-200)))>0) .and. prt_minmax ) then - call prt_mxm('T200:', a3(isc:iec,jsc:jec,13), isc, iec, jsc, jec, 0, 1, 1., & + call prt_mxm('T200:', a3(isc:iec,jsc:jec,k200), isc, iec, jsc, jec, 0, 1, 1., & Atm(n)%gridstruct%area_64, Atm(n)%domain) - if (.not. Atm(n)%neststruct%nested) then + if (.not. Atm(n)%gridstruct%bounded_domain) then tmp = 0. sar = 0. do j=jsc,jec @@ -1722,7 +2285,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) slat = Atm(n)%gridstruct%agrid(i,j,2)*rad2deg if( (slat>-20 .and. slat<20) ) then sar = sar + Atm(n)%gridstruct%area(i,j) - tmp = tmp + a3(i,j,13)*Atm(n)%gridstruct%area(i,j) + tmp = tmp + a3(i,j,k200)*Atm(n)%gridstruct%area(i,j) endif enddo enddo @@ -1755,7 +2318,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo used = send_data(idiag%id_mq, a2, Time) if( prt_minmax ) then - tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 0) + tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 0) idiag%mtq_sum = idiag%mtq_sum + tot_mq if ( idiag%steps <= max_step ) idiag%mtq(idiag%steps) = tot_mq if(master) write(*,*) 'Total (global) mountain torque (Hadleys)=', tot_mq @@ -1856,13 +2419,13 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) einf = max(einf, abs(a2(i,j) - qcly0)) enddo enddo - if (prt_minmax .and. .not. Atm(n)%neststruct%nested) then + if (prt_minmax .and. .not. Atm(n)%gridstruct%bounded_domain) then call mp_reduce_sum(qm) call mp_reduce_max(einf) call mp_reduce_sum(e2) if (master) then write(*,*) ' TERMINATOR TEST: ' - write(*,*) ' chlorine mass: ', real(qm)/(4.*pi*RADIUS*RADIUS) + write(*,*) ' chlorine mass: ', qm/(4.*pi*RADIUS*RADIUS) write(*,*) ' L2 err: ', sqrt(e2)/sqrt(4.*pi*RADIUS*RADIUS)/qcly0 write(*,*) ' max err: ', einf/qcly0 endif @@ -1930,49 +2493,141 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) used = send_data(idiag%id_lw, a2*ginv, Time) endif -! Cloud top temperature & cloud top press: - if ( (idiag%id_ctt>0 .or. idiag%id_ctp>0).and. Atm(n)%flagstruct%nwat==6) then - allocate ( var1(isc:iec,jsc:jec) ) -!$OMP parallel do default(shared) private(tmp) - do j=jsc,jec - do i=isc,iec - do k=2,npz - tmp = atm(n)%q(i,j,k,liq_wat)+atm(n)%q(i,j,k,rainwat)+atm(n)%q(i,j,k,ice_wat)+ & - atm(n)%q(i,j,k,snowwat)+atm(n)%q(i,j,k,graupel) - if( tmp>5.e-6 ) then - a2(i,j) = Atm(n)%pt(i,j,k) - var1(i,j) = 0.01*Atm(n)%pe(i,k,j) - exit - elseif( k==npz ) then - a2(i,j) = Atm(n)%pt(i,j,k) - var1(i,j) = 0.01*Atm(n)%pe(i,k+1,j) ! surface pressure - endif - enddo - enddo - enddo - if ( idiag%id_ctt>0 ) then - used = send_data(idiag%id_ctt, a2, Time) - if(prt_minmax) call prt_maxmin('Cloud_top_T (K)', a2, isc, iec, jsc, jec, 0, 1, 1.) - endif - if ( idiag%id_ctp>0 ) then - used = send_data(idiag%id_ctp, var1, Time) - if(prt_minmax) call prt_maxmin('Cloud_top_P (mb)', var1, isc, iec, jsc, jec, 0, 1, 1.) - endif - deallocate ( var1 ) - endif - -! Condensates: - if ( idiag%id_qn>0 .or. idiag%id_qn200>0 .or. idiag%id_qn500>0 .or. idiag%id_qn850>0 ) then -!$OMP parallel do default(shared) - do k=1,npz - do j=jsc,jec - do i=isc,iec - wk(i,j,k) = 0. - enddo - enddo - enddo - if (liq_wat > 0) then -!$OMP parallel do default(shared) +!-------------------------- +! Vertically integrated tracers for GFDL MP +!-------------------------- + if ( idiag%id_intqv>0 ) then + a2 = 0. + if (sphum > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%q(i,j,k,sphum)*Atm(n)%delp(i,j,k) + enddo + enddo + enddo + endif + used = send_data(idiag%id_intqv, a2*ginv, Time) + endif + if ( idiag%id_intql>0 ) then + a2 = 0. + if (liq_wat > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%q(i,j,k,liq_wat)*Atm(n)%delp(i,j,k) + enddo + enddo + enddo + endif + used = send_data(idiag%id_intql, a2*ginv, Time) + endif + if ( idiag%id_intqi>0 ) then + a2 = 0. + if (ice_wat > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%q(i,j,k,ice_wat)*Atm(n)%delp(i,j,k) + enddo + enddo + enddo + endif + used = send_data(idiag%id_intqi, a2*ginv, Time) + endif + if ( idiag%id_intqr>0 ) then + a2 = 0. + if (rainwat > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%q(i,j,k,rainwat)*Atm(n)%delp(i,j,k) + enddo + enddo + enddo + endif + used = send_data(idiag%id_intqr, a2*ginv, Time) + endif + if ( idiag%id_intqs>0 ) then + a2 = 0. + if (snowwat > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%q(i,j,k,snowwat)*Atm(n)%delp(i,j,k) + enddo + enddo + enddo + endif + used = send_data(idiag%id_intqs, a2*ginv, Time) + endif + if ( idiag%id_intqg>0 ) then + a2 = 0. + if (graupel > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%q(i,j,k,graupel)*Atm(n)%delp(i,j,k) + enddo + enddo + enddo + endif + used = send_data(idiag%id_intqg, a2*ginv, Time) + endif + +! Cloud top temperature & cloud top press: + if ( (idiag%id_ctt>0 .or. idiag%id_ctp>0 .or. idiag%id_ctz>0).and. Atm(n)%flagstruct%nwat==6) then + allocate ( var1(isc:iec,jsc:jec) ) + allocate ( var2(isc:iec,jsc:jec) ) +!$OMP parallel do default(shared) private(tmp) + do j=jsc,jec + do i=isc,iec + do k=2,npz + tmp = atm(n)%q(i,j,k,liq_wat)+atm(n)%q(i,j,k,rainwat)+atm(n)%q(i,j,k,ice_wat)+ & + atm(n)%q(i,j,k,snowwat)+atm(n)%q(i,j,k,graupel) + if( tmp>5.e-6 ) then + a2(i,j) = Atm(n)%pt(i,j,k) + var1(i,j) = 0.01*Atm(n)%pe(i,k,j) + var2(i,j) = wz(i,j,k) - wz(i,j,npz+1) ! height AGL + exit + elseif( k==npz ) then + a2(i,j) = missing_value3 + var1(i,j) = missing_value3 + var2(i,j) = missing_value2 +!!$ a2(i,j) = Atm(n)%pt(i,j,k) +!!$ var1(i,j) = 0.01*Atm(n)%pe(i,k+1,j) ! surface pressure + endif + enddo + enddo + enddo + if ( idiag%id_ctt>0 ) then + used = send_data(idiag%id_ctt, a2, Time) + if(prt_minmax) call prt_maxmin('Cloud_top_T (K)', a2, isc, iec, jsc, jec, 0, 1, 1.) + endif + if ( idiag%id_ctp>0 ) then + used = send_data(idiag%id_ctp, var1, Time) + if(prt_minmax) call prt_maxmin('Cloud_top_P (mb)', var1, isc, iec, jsc, jec, 0, 1, 1.) + endif + deallocate ( var1 ) + if ( idiag%id_ctz>0 ) then + used = send_data(idiag%id_ctz, var2, Time) + if(prt_minmax) call prt_maxmin('Cloud_top_z (m)', var2, isc, iec, jsc, jec, 0, 1, 1.) + endif + deallocate ( var2 ) + endif + +! Condensates: + if ( idiag%id_qn>0 .or. idiag%id_qn200>0 .or. idiag%id_qn500>0 .or. idiag%id_qn850>0 ) then +!$OMP parallel do default(shared) + do k=1,npz + do j=jsc,jec + do i=isc,iec + wk(i,j,k) = 0. + enddo + enddo + enddo + if (liq_wat > 0) then +!$OMP parallel do default(shared) do k=1,npz do j=jsc,jec do i=isc,iec @@ -2071,6 +2726,112 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if(idiag%id_ua > 0) used=send_data(idiag%id_ua, Atm(n)%ua(isc:iec,jsc:jec,:), Time) if(idiag%id_va > 0) used=send_data(idiag%id_va, Atm(n)%va(isc:iec,jsc:jec,:), Time) + if(idiag%id_uw > 0 .or. idiag%id_vw > 0 .or. idiag%id_hw > 0 .or. idiag%id_qvw > 0 .or. & + idiag%id_qlw > 0 .or. idiag%id_qiw > 0 .or. idiag%id_o3w > 0 ) then + allocate( a3(isc:iec,jsc:jec,npz) ) + + do k=1,npz + do j=jsc,jec + do i=isc,iec + wk(i,j,k) = Atm(n)%w(i,j,k)*Atm(n)%delp(i,j,k)*ginv + enddo + enddo + enddo + + if (idiag%id_uw > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = Atm(n)%ua(i,j,k)*wk(i,j,k) + enddo + enddo + enddo + used = send_data(idiag%id_uw, a3, Time) + endif + if (idiag%id_vw > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = Atm(n)%va(i,j,k)*wk(i,j,k) + enddo + enddo + enddo + used = send_data(idiag%id_vw, a3, Time) + endif + + if (idiag%id_hw > 0) then + allocate(cvm(isc:iec)) + do k=1,npz + do j=jsc,jec +#ifdef USE_COND + call moist_cv(isc,iec,isd,ied,jsd,jed,npz,j,k,Atm(n)%flagstruct%nwat,sphum,liq_wat,rainwat, & + ice_wat,snowwat,graupel,Atm(n)%q,Atm(n)%q_con(isc:iec,j,k),cvm) + do i=isc,iec + a3(i,j,k) = Atm(n)%pt(i,j,k)*cvm(i)*wk(i,j,k) + enddo +#else + cv_vapor = cp_vapor - rvgas + do i=isc,iec + a3(i,j,k) = Atm(n)%pt(i,j,k)*cv_vapor*wk(i,j,k) + enddo +#endif + enddo + enddo + used = send_data(idiag%id_hw, a3, Time) + deallocate(cvm) + endif + + if (idiag%id_qvw > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = Atm(n)%q(i,j,k,sphum)*wk(i,j,k) + enddo + enddo + enddo + used = send_data(idiag%id_qvw, a3, Time) + endif + if (idiag%id_qlw > 0) then + if (liq_wat < 0 .or. rainwat < 0) call mpp_error(FATAL, 'qlw does not work without liq_wat and rainwat defined') + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = (Atm(n)%q(i,j,k,liq_wat)+Atm(n)%q(i,j,k,rainwat))*wk(i,j,k) + enddo + enddo + enddo + used = send_data(idiag%id_qlw, a3, Time) + endif + if (idiag%id_qiw > 0) then + if (ice_wat < 0 .or. snowwat < 0 .or. graupel < 0) then + call mpp_error(FATAL, 'qiw does not work without ice_wat, snowwat, and graupel defined') + endif + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = (Atm(n)%q(i,j,k,ice_wat)+Atm(n)%q(i,j,k,snowwat)+Atm(n)%q(i,j,k,graupel))*wk(i,j,k) + enddo + enddo + enddo + used = send_data(idiag%id_qiw, a3, Time) + endif + if (idiag%id_o3w > 0) then + if (o3mr < 0) then + call mpp_error(FATAL, 'o3w does not work without o3mr defined') + endif + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = Atm(n)%q(i,j,k,o3mr)*wk(i,j,k) + enddo + enddo + enddo + used = send_data(idiag%id_o3w, a3, Time) + endif + + deallocate(a3) + endif + if(idiag%id_ke > 0) then a2(:,:) = 0. do k=1,npz @@ -2080,7 +2841,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo enddo enddo -! Mass weighted KE +! Mass weighted KE do j=jsc,jec do i=isc,iec a2(i,j) = 0.5*a2(i,j)/(Atm(n)%ps(i,j)-ptop) @@ -2088,38 +2849,30 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo used=send_data(idiag%id_ke, a2, Time) if(prt_minmax) then - tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 1) - if (master) write(*,*) 'SQRT(2.*KE; m/s)=', sqrt(2.*tot_mq) + tot_mq = g_sum( Atm(n)%domain, a2, isc, iec, jsc, jec, ngc, Atm(n)%gridstruct%area_64, 1) + if (master) write(*,*) 'SQRT(2.*KE; m/s)=', sqrt(2.*tot_mq) endif endif #ifdef GFS_PHYS - if(idiag%id_delp > 0 .or. ((.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0)) then + if(idiag%id_delp > 0 .or. idiag%id_cape > 0 .or. idiag%id_cin > 0 .or. ((.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0)) then do k=1,npz do j=jsc,jec - do i=isc,iec - if ( Atm(n)%flagstruct%nwat .eq. 2) then - wk(i,j,k) = Atm(n)%delp(i,j,k)*(1.-Atm(n)%q(i,j,k,liq_wat)) - elseif ( Atm(n)%flagstruct%nwat .eq. 6) then - wk(i,j,k) = Atm(n)%delp(i,j,k)*(1.-Atm(n)%q(i,j,k,liq_wat)-& - Atm(n)%q(i,j,k,ice_wat)-& - Atm(n)%q(i,j,k,rainwat)-& - Atm(n)%q(i,j,k,snowwat)-& - Atm(n)%q(i,j,k,graupel)) - endif + do i=isc,iec + wk(i,j,k) = Atm(n)%delp(i,j,k)*(1.-sum(Atm(n)%q(i,j,k,2:Atm(n)%flagstruct%nwat))) enddo enddo enddo if (idiag%id_delp > 0) used=send_data(idiag%id_delp, wk, Time) endif - if( (.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0) then + if( ( (.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0) .or. idiag%id_cape > 0 .or. idiag%id_cin > 0) then do k=1,npz do j=jsc,jec - do i=isc,iec + do i=isc,iec wk(i,j,k) = -wk(i,j,k)/(Atm(n)%delz(i,j,k)*grav)*rdgas* & - Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)) + Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)) enddo enddo enddo @@ -2131,7 +2884,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) #else if(idiag%id_delp > 0) used=send_data(idiag%id_delp, Atm(n)%delp(isc:iec,jsc:jec,:), Time) - if( (.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_pfnh > 0) then + if( (.not. Atm(n)%flagstruct%hydrostatic) .and. (idiag%id_pfnh > 0 .or. idiag%id_cape > 0 .or. idiag%id_cin > 0)) then do k=1,npz do j=jsc,jec do i=isc,iec @@ -2144,27 +2897,65 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif #endif - if((.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_delz > 0) then + if( Atm(n)%flagstruct%hydrostatic .and. (idiag%id_pfhy > 0 .or. idiag%id_cape > 0 .or. idiag%id_cin > 0) ) then do k=1,npz do j=jsc,jec do i=isc,iec - wk(i,j,k) = -Atm(n)%delz(i,j,k) + wk(i,j,k) = 0.5 *(Atm(n)%pe(i,k,j)+Atm(n)%pe(i,k+1,j)) enddo enddo enddo - used=send_data(idiag%id_delz, wk, Time) + used=send_data(idiag%id_pfhy, wk, Time) + endif + + if (idiag%id_cape > 0 .or. idiag%id_cin > 0) then + !wk here contains layer-mean pressure + + allocate(var2(isc:iec,jsc:jec)) + allocate(a3(isc:iec,jsc:jec,npz)) + + call eqv_pot(a3, Atm(n)%pt, Atm(n)%delp, Atm(n)%delz, Atm(n)%peln, Atm(n)%pkz, Atm(n)%q(isd,jsd,1,sphum), & + isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%flagstruct%moist_phys) + +!$OMP parallel do default(shared) + do j=jsc,jec + do i=isc,iec + a2(i,j) = 0. + var2(i,j) = 0. + + call getcape(npz, wk(i,j,:), Atm(n)%pt(i,j,:), -Atm(n)%delz(i,j,:), Atm(n)%q(i,j,:,sphum), a3(i,j,:), a2(i,j), var2(i,j), source_in=1) + enddo + enddo + + if (idiag%id_cape > 0) then + if (prt_minmax) then + call prt_maxmin(' CAPE (J/kg)', a2, isc,iec,jsc,jec, 0, 1, 1.) + endif + used=send_data(idiag%id_cape, a2, Time) + endif + if (idiag%id_cin > 0) then + if (prt_minmax) then + call prt_maxmin(' CIN (J/kg)', var2, isc,iec,jsc,jec, 0, 1, 1.) + endif + used=send_data(idiag%id_cin, var2, Time) + endif + + deallocate(var2) + deallocate(a3) + endif - - if( Atm(n)%flagstruct%hydrostatic .and. idiag%id_pfhy > 0 ) then + + + if((.not. Atm(n)%flagstruct%hydrostatic) .and. idiag%id_delz > 0) then do k=1,npz do j=jsc,jec - do i=isc,iec - wk(i,j,k) = 0.5 *(Atm(n)%pe(i,k,j)+Atm(n)%pe(i,k+1,j)) + do i=isc,iec + wk(i,j,k) = -Atm(n)%delz(i,j,k) enddo enddo enddo - used=send_data(idiag%id_pfhy, wk, Time) - endif + used=send_data(idiag%id_delz, wk, Time) + endif ! pressure for masking p-level fields @@ -2190,7 +2981,8 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) used=send_data(idiag%id_pmaskv2, a2, Time) endif - if ( idiag%id_u100m>0 .or. idiag%id_v100m>0 .or. idiag%id_w100m>0 .or. idiag%id_w5km>0 .or. idiag%id_w2500m>0 .or. idiag%id_basedbz .or. idiag%id_dbz4km) then + if ( idiag%id_u100m>0 .or. idiag%id_v100m>0 .or. idiag%id_w100m>0 .or. idiag%id_w5km>0 .or. idiag%id_w2500m>0 & + & .or. idiag%id_w1km>0 .or. idiag%id_basedbz>0 .or. idiag%id_dbz4km>0) then if (.not.allocated(wz)) allocate ( wz(isc:iec,jsc:jec,npz+1) ) if ( Atm(n)%flagstruct%hydrostatic) then rgrav = 1. / grav @@ -2230,17 +3022,22 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) used=send_data(idiag%id_rain5km, a2, Time) if(prt_minmax) call prt_maxmin('rain5km', a2, isc, iec, jsc, jec, 0, 1, 1.) endif - if ( idiag%id_w5km>0 ) then + if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. idiag%id_w5km>0 ) then call interpolate_z(isc, iec, jsc, jec, npz, 5.e3, wz, Atm(n)%w(isc:iec,jsc:jec,:), a2) used=send_data(idiag%id_w5km, a2, Time) if(prt_minmax) call prt_maxmin('W5km', a2, isc, iec, jsc, jec, 0, 1, 1.) endif - if ( idiag%id_w2500m>0 ) then + if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. idiag%id_w2500m>0 ) then call interpolate_z(isc, iec, jsc, jec, npz, 2.5e3, wz, Atm(n)%w(isc:iec,jsc:jec,:), a2) used=send_data(idiag%id_w2500m, a2, Time) if(prt_minmax) call prt_maxmin('W2500m', a2, isc, iec, jsc, jec, 0, 1, 1.) endif - if ( idiag%id_w100m>0 ) then + if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. idiag%id_w1km>0 ) then + call interpolate_z(isc, iec, jsc, jec, npz, 1.e3, wz, Atm(n)%w(isc:iec,jsc:jec,:), a2) + used=send_data(idiag%id_w1km, a2, Time) + if(prt_minmax) call prt_maxmin('W1km', a2, isc, iec, jsc, jec, 0, 1, 1.) + endif + if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. idiag%id_w100m>0 ) then call interpolate_z(isc, iec, jsc, jec, npz, 100., wz, Atm(n)%w(isc:iec,jsc:jec,:), a2) used=send_data(idiag%id_w100m, a2, Time) if(prt_minmax) call prt_maxmin('w100m', a2, isc, iec, jsc, jec, 0, 1, 1.) @@ -2256,30 +3053,61 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if(prt_minmax) call prt_maxmin('v100m', a2, isc, iec, jsc, jec, 0, 1, 1.) endif - if ( rainwat > 0 .and. (idiag%id_dbz>0 .or. idiag%id_maxdbz>0 .or. idiag%id_basedbz>0 .or. idiag%id_dbz4km)) then + if ( rainwat > 0 .and. (idiag%id_dbz>0 .or. idiag%id_maxdbz>0 .or. idiag%id_basedbz>0 .or. idiag%id_dbz4km>0 & + & .or. idiag%id_dbztop>0 .or. idiag%id_dbz_m10C>0)) then if (.not. allocated(a3)) allocate(a3(isc:iec,jsc:jec,npz)) +! call dbzcalc_smithxue(Atm(n)%q, Atm(n)%pt, Atm(n)%delp, Atm(n)%peln, Atm(n)%delz, & call dbzcalc(Atm(n)%q, Atm(n)%pt, Atm(n)%delp, Atm(n)%peln, Atm(n)%delz, & a3, a2, allmax, Atm(n)%bd, npz, Atm(n)%ncnst, Atm(n)%flagstruct%hydrostatic, & - zvir, .false., .false., .false., .true. ) ! Lin MP has constant N_0 intercept + zvir, .false., .false., .false., .true. ) ! GFDL MP has constant N_0 intercept + + if (idiag%id_dbz > 0) used=send_data(idiag%id_dbz, a3, time) + if (idiag%id_maxdbz > 0) used=send_data(idiag%id_maxdbz, a2, time) - if (idiag%id_dbz > 0) then - used=send_data(idiag%id_dbz, a3, time) - endif - if (idiag%id_maxdbz > 0) then - used=send_data(idiag%id_maxdbz, a2, time) - endif if (idiag%id_basedbz > 0) then !interpolate to 1km dbz - call interpolate_z(isc, iec, jsc, jec, npz, 1000., wz, a3, a2) + call cs_interpolator(isc, iec, jsc, jec, npz, a3, 1000., wz, a2, -20.) used=send_data(idiag%id_basedbz, a2, time) + if(prt_minmax) call prt_maxmin('Base_dBz', a2, isc, iec, jsc, jec, 0, 1, 1.) endif + if (idiag%id_dbz4km > 0) then !interpolate to 1km dbz - call interpolate_z(isc, iec, jsc, jec, npz, 4000., wz, a3, a2) + call cs_interpolator(isc, iec, jsc, jec, npz, a3, 4000., wz, a2, -20.) used=send_data(idiag%id_dbz4km, a2, time) endif + if (idiag%id_dbztop > 0) then + do j=jsc,jec + do i=isc,iec + a2(i,j) = missing_value2 + do k=2,npz + if (wz(i,j,k) >= 25000. ) continue ! nothing above 25 km + if (a3(i,j,k) >= 18.5 ) then + a2(i,j) = wz(i,j,k) + exit + endif + enddo + enddo + enddo + used=send_data(idiag%id_dbztop, a2, time) + endif + if (idiag%id_dbz_m10C > 0) then + do j=jsc,jec + do i=isc,iec + a2(i,j) = missing_value + do k=npz,1,-1 + if (wz(i,j,k) >= 25000. ) exit ! nothing above 25 km + if (Atm(n)%pt(i,j,k) <= 263.14 ) then + a2(i,j) = a3(i,j,k) + exit + endif + enddo + enddo + enddo + used=send_data(idiag%id_dbz_m10C, a2, time) + endif if (prt_minmax) then call mpp_max(allmax) @@ -2288,8 +3116,6 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) deallocate(a3) endif - if( allocated(wz) ) deallocate (wz) - !------------------------------------------------------- ! Applying cubic-spline as the intepolator for (u,v,T,q) @@ -2402,6 +3228,12 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) used=send_data(idiag%id_omg_plev, a3(isc:iec,jsc:jec,:), Time) endif + if ( idiag%id_x850>0 .and. idiag%id_vort850>0 ) then + x850(:,:) = x850(:,:)*a2(:,:) + used=send_data(idiag%id_x850, x850, Time) + deallocate ( x850 ) + endif + if( allocated(a3) ) deallocate (a3) ! *** End cs_intp @@ -2444,7 +3276,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) used=send_data(idiag%id_w850, a2, Time) if ( idiag%id_x850>0 .and. idiag%id_vort850>0 ) then - x850(:,:) = x850(:,:)*a2(:,:) + x850(:,:) = x850(:,:)*a2(:,:) used=send_data(idiag%id_x850, x850, Time) deallocate ( x850 ) endif @@ -2454,54 +3286,75 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) if ( .not.Atm(n)%flagstruct%hydrostatic .and. idiag%id_w>0 ) then used=send_data(idiag%id_w, Atm(n)%w(isc:iec,jsc:jec,:), Time) endif + if ( .not. Atm(n)%flagstruct%hydrostatic .and. (idiag%id_wmaxup>0 .or. idiag%id_wmaxdn>0) ) then + allocate(var2(isc:iec,jsc:jec)) + do j=jsc,jec + do i=isc,iec + a2(i,j) = 0. + var2(i,j) = 0. + do k=3,npz + if (Atm(n)%pe(i,k,j) <= 400.e2) continue + a2(i,j) = max(a2(i,j),Atm(n)%w(i,j,k)) + var2(i,j) = min(var2(i,j),Atm(n)%w(i,j,k)) + enddo + enddo + enddo + if (idiag%id_wmaxup > 0) then + used=send_data(idiag%id_wmaxup, a2, Time) + endif + if (idiag%id_wmaxdn > 0) then + used=send_data(idiag%id_wmaxdn, var2, Time) + endif + deallocate(var2) + endif if(idiag%id_pt > 0) used=send_data(idiag%id_pt , Atm(n)%pt (isc:iec,jsc:jec,:), Time) if(idiag%id_omga > 0) used=send_data(idiag%id_omga, Atm(n)%omga(isc:iec,jsc:jec,:), Time) allocate( a3(isc:iec,jsc:jec,npz) ) - if(idiag%id_theta_e > 0) then - - if ( Atm(n)%flagstruct%adiabatic .and. Atm(n)%flagstruct%kord_tm>0 ) then - do k=1,npz - do j=jsc,jec - do i=isc,iec - a3(i,j,k) = Atm(n)%pt(i,j,k) - enddo - enddo - enddo - else - call eqv_pot(a3, Atm(n)%pt, Atm(n)%delp, Atm(n)%delz, Atm(n)%peln, Atm(n)%pkz, Atm(n)%q(isd,jsd,1,sphum), & - isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%flagstruct%moist_phys) - endif + if(idiag%id_theta_e > 0 ) then - if( prt_minmax ) call prt_maxmin('Theta_E', a3, isc, iec, jsc, jec, 0, npz, 1.) - used=send_data(idiag%id_theta_e, a3, Time) - theta_d = get_tracer_index (MODEL_ATMOS, 'theta_d') - if ( theta_d>0 ) then -! - if( prt_minmax ) then -! Check level-34 ~ 300 mb - a2(:,:) = 0. + if ( Atm(n)%flagstruct%adiabatic .and. Atm(n)%flagstruct%kord_tm>0 ) then do k=1,npz do j=jsc,jec do i=isc,iec - a2(i,j) = a2(i,j) + Atm(n)%delp(i,j,k)*(Atm(n)%q(i,j,k,theta_d)-a3(i,j,k))**2 + a3(i,j,k) = Atm(n)%pt(i,j,k) enddo enddo enddo - call prt_mxm('PT_SUM', a2, isc, iec, jsc, jec, 0, 1, 1.e-5, Atm(n)%gridstruct%area_64, Atm(n)%domain) - - do k=1,npz + else + call eqv_pot(a3, Atm(n)%pt, Atm(n)%delp, Atm(n)%delz, Atm(n)%peln, Atm(n)%pkz, Atm(n)%q(isd,jsd,1,sphum), & + isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%flagstruct%moist_phys) + endif + if( prt_minmax ) call prt_maxmin('Theta_E', a3, isc, iec, jsc, jec, 0, npz, 1.) + used=send_data(idiag%id_theta_e, a3, Time) + + theta_d = get_tracer_index (MODEL_ATMOS, 'theta_d') + if ( theta_d>0 ) then + if( prt_minmax ) then + ! Check level-34 ~ 300 mb + a2(:,:) = 0. + do k=1,npz do j=jsc,jec - do i=isc,iec - a3(i,j,k) = Atm(n)%q(i,j,k,theta_d)/a3(i,j,k) - 1. - enddo + do i=isc,iec + a2(i,j) = a2(i,j) + Atm(n)%delp(i,j,k)*(Atm(n)%q(i,j,k,theta_d)-a3(i,j,k))**2 enddo - enddo - call prt_maxmin('Theta_Err (%)', a3, isc, iec, jsc, jec, 0, npz, 100.) -! if ( master ) write(*,*) 'PK0=', pk0, 'KAPPA=', kappa - endif + enddo + enddo + call prt_mxm('PT_SUM', a2, isc, iec, jsc, jec, 0, 1, 1.e-5, Atm(n)%gridstruct%area_64, Atm(n)%domain) + + do k=1,npz + do j=jsc,jec + do i=isc,iec + a3(i,j,k) = Atm(n)%q(i,j,k,theta_d)/a3(i,j,k) - 1. + enddo + enddo + enddo + call prt_maxmin('Theta_Err (%)', a3, isc, iec, jsc, jec, 0, npz, 100.) + ! if ( master ) write(*,*) 'PK0=', pk0, 'KAPPA=', kappa + endif endif + endif if(idiag%id_ppt> 0) then @@ -2511,8 +3364,20 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) #ifdef TEST_GWAVES call gw_1d(npz, 1000.E2, Atm(n)%ak, Atm(n)%ak, Atm(n)%ak(1), 10.E3, idiag%pt1) #else - idiag%pt1 = 0. + idiag%pt1 = 0. #endif + if (.not. Atm(n)%flagstruct%hydrostatic) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + wk(i,j,k) = (Atm(n)%pt(i,j,k)*exp(-kappa*log(-Atm(n)%delp(i,j,k)/(Atm(n)%delz(i,j,k)*grav)*rdgas* & + Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)))) - idiag%pt1(k)) * pk0 +! Atm(n)%pkz(i,j,k) = exp(kappa*log(-Atm(n)%delp(i,j,k)/(Atm(n)%delz(i,j,k)*grav)*rdgas* & +! Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)))) + enddo + enddo + enddo + else do k=1,npz do j=jsc,jec do i=isc,iec @@ -2521,6 +3386,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) enddo enddo enddo + endif used=send_data(idiag%id_ppt, wk, Time) if( prt_minmax ) then @@ -2532,51 +3398,269 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) endif -#ifndef SW_DYNAMICS - do itrac=1, Atm(n)%ncnst - call get_tracer_names (MODEL_ATMOS, itrac, tname) - if (idiag%id_tracer(itrac) > 0 .and. itrac.gt.nq) then - used = send_data (idiag%id_tracer(itrac), Atm(n)%qdiag(isc:iec,jsc:jec,:,itrac), Time ) - else - used = send_data (idiag%id_tracer(itrac), Atm(n)%q(isc:iec,jsc:jec,:,itrac), Time ) - endif - if (itrac .le. nq) then - if( prt_minmax ) call prt_maxmin(trim(tname), Atm(n)%q(:,:,1,itrac), & - isc, iec, jsc, jec, ngc, npz, 1.) - else - if( prt_minmax ) call prt_maxmin(trim(tname), Atm(n)%qdiag(:,:,1,itrac), & - isc, iec, jsc, jec, ngc, npz, 1.) - endif -!------------------------------- -! ESM TRACER diagnostics output: -! jgj: per SJ email (jul 17 2008): q_dry = q_moist/(1-sphum) -! mass mixing ratio: q_dry = mass_tracer/mass_dryair = mass_tracer/(mass_air - mass_water) ~ q_moist/(1-sphum) -! co2_mmr = (wco2/wair) * co2_vmr -! Note: There is a check to ensure tracer number one is sphum +#ifndef SW_DYNAMICS + do itrac=1, Atm(n)%ncnst + call get_tracer_names (MODEL_ATMOS, itrac, tname) + if (idiag%id_tracer(itrac) > 0 .and. itrac.gt.nq) then + used = send_data (idiag%id_tracer(itrac), Atm(n)%qdiag(isc:iec,jsc:jec,:,itrac), Time ) + else + used = send_data (idiag%id_tracer(itrac), Atm(n)%q(isc:iec,jsc:jec,:,itrac), Time ) + endif + if (itrac .le. nq) then + if( prt_minmax ) call prt_maxmin(trim(tname), Atm(n)%q(:,:,1,itrac), & + isc, iec, jsc, jec, ngc, npz, 1.) + else + if( prt_minmax ) call prt_maxmin(trim(tname), Atm(n)%qdiag(:,:,1,itrac), & + isc, iec, jsc, jec, ngc, npz, 1.) + endif +!------------------------------- +! ESM TRACER diagnostics output: +! jgj: per SJ email (jul 17 2008): q_dry = q_moist/(1-sphum) +! mass mixing ratio: q_dry = mass_tracer/mass_dryair = mass_tracer/(mass_air - mass_water) ~ q_moist/(1-sphum) +! co2_mmr = (wco2/wair) * co2_vmr +! Note: There is a check to ensure tracer number one is sphum + + if (idiag%id_tracer_dmmr(itrac) > 0 .or. idiag%id_tracer_dvmr(itrac) > 0) then + if (itrac .gt. nq) then + dmmr(:,:,:) = Atm(n)%qdiag(isc:iec,jsc:jec,1:npz,itrac) & + /(1.0-Atm(n)%q(isc:iec,jsc:jec,1:npz,1)) + else + dmmr(:,:,:) = Atm(n)%q(isc:iec,jsc:jec,1:npz,itrac) & + /(1.0-Atm(n)%q(isc:iec,jsc:jec,1:npz,1)) + endif + dvmr(:,:,:) = dmmr(isc:iec,jsc:jec,1:npz) * WTMAIR/idiag%w_mr(itrac) + used = send_data (idiag%id_tracer_dmmr(itrac), dmmr, Time ) + used = send_data (idiag%id_tracer_dvmr(itrac), dvmr, Time ) + if( prt_minmax ) then + call prt_maxmin(trim(tname)//'_dmmr', dmmr, & + isc, iec, jsc, jec, 0, npz, 1.) + call prt_maxmin(trim(tname)//'_dvmr', dvmr, & + isc, iec, jsc, jec, 0, npz, 1.) + endif + endif + enddo +!---------------------------------- +! compute 3D flux terms +!---------------------------------- + allocate ( a4(isc:iec,jsc:jec,npz) ) + + ! zonal moisture flux + if(idiag%id_uq > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%ua(i,j,k) * Atm(n)%q(i,j,k,sphum) + enddo + enddo + enddo + used=send_data(idiag%id_uq, a4, Time) + if(idiag%id_iuq > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iuq, a2, Time) + endif + endif + ! meridional moisture flux + if(idiag%id_vq > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%va(i,j,k) * Atm(n)%q(i,j,k,sphum) + enddo + enddo + enddo + used=send_data(idiag%id_vq, a4, Time) + if(idiag%id_ivq > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_ivq, a2, Time) + endif + endif + + ! zonal heat flux + if(idiag%id_ut > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%ua(i,j,k) * Atm(n)%pt(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_ut, a4, Time) + if(idiag%id_iut > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iut, a2, Time) + endif + endif + ! meridional heat flux + if(idiag%id_vt > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%va(i,j,k) * Atm(n)%pt(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_vt, a4, Time) + if(idiag%id_ivt > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_ivt, a2, Time) + endif + endif + + ! zonal flux of u + if(idiag%id_uu > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%ua(i,j,k) * Atm(n)%ua(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_uu, a4, Time) + if(idiag%id_iuu > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iuu, a2, Time) + endif + endif + ! zonal flux of v + if(idiag%id_uv > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%ua(i,j,k) * Atm(n)%va(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_uv, a4, Time) + if(idiag%id_iuv > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iuv, a2, Time) + endif + endif + ! meridional flux of v + if(idiag%id_vv > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%va(i,j,k) * Atm(n)%va(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_vv, a4, Time) + if(idiag%id_ivv > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_ivv, a2, Time) + endif + endif + +! terms related with vertical wind ( Atm(n)%w ): + if(.not.Atm(n)%flagstruct%hydrostatic) then + ! vertical moisture flux + if(idiag%id_wq > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%w(i,j,k) * Atm(n)%q(i,j,k,sphum) + enddo + enddo + enddo + used=send_data(idiag%id_wq, a4, Time) + if(idiag%id_iwq > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iwq, a2, Time) + endif + endif + ! vertical heat flux + if(idiag%id_wt > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%w(i,j,k) * Atm(n)%pt(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_wt, a4, Time) + if(idiag%id_iwt > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iwt, a2, Time) + endif + endif + ! zonal flux of w + if(idiag%id_uw > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%ua(i,j,k) * Atm(n)%w(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_uw, a4, Time) + if(idiag%id_iuw > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iuw, a2, Time) + endif + endif + ! meridional flux of w + if(idiag%id_vw > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%va(i,j,k) * Atm(n)%w(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_vw, a4, Time) + if(idiag%id_ivw > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_ivw, a2, Time) + endif + endif + ! vertical flux of w + if(idiag%id_ww > 0) then + do k=1,npz + do j=jsc,jec + do i=isc,iec + a4(i,j,k) = Atm(n)%w(i,j,k) * Atm(n)%w(i,j,k) + enddo + enddo + enddo + used=send_data(idiag%id_ww, a4, Time) + if(idiag%id_iww > 0) then + call z_sum(isc, iec, jsc, jec, npz, 0, Atm(n)%delp(isc:iec,jsc:jec,1:npz), a4, a2) + used=send_data(idiag%id_iww, a2, Time) + endif + endif + endif + + deallocate ( a4 ) - if (idiag%id_tracer_dmmr(itrac) > 0 .or. idiag%id_tracer_dvmr(itrac) > 0) then - if (itrac .gt. nq) then - dmmr(:,:,:) = Atm(n)%qdiag(isc:iec,jsc:jec,1:npz,itrac) & - /(1.0-Atm(n)%q(isc:iec,jsc:jec,1:npz,1)) - else - dmmr(:,:,:) = Atm(n)%q(isc:iec,jsc:jec,1:npz,itrac) & - /(1.0-Atm(n)%q(isc:iec,jsc:jec,1:npz,1)) - endif - dvmr(:,:,:) = dmmr(isc:iec,jsc:jec,1:npz) * WTMAIR/idiag%w_mr(itrac) - used = send_data (idiag%id_tracer_dmmr(itrac), dmmr, Time ) - used = send_data (idiag%id_tracer_dvmr(itrac), dvmr, Time ) - if( prt_minmax ) then - call prt_maxmin(trim(tname)//'_dmmr', dmmr, & - isc, iec, jsc, jec, 0, npz, 1.) - call prt_maxmin(trim(tname)//'_dvmr', dvmr, & - isc, iec, jsc, jec, 0, npz, 1.) - endif - endif - enddo +! Maximum overlap cloud fraction + if ( .not. Atm(n)%gridstruct%bounded_domain ) then + if ( cld_amt > 0 .and. prt_minmax ) then + a2(:,:) = 0. + do k=1,npz + do j=jsc,jec + do i=isc,iec + a2(i,j) = max(a2(i,j), Atm(n)%q(i,j,k,cld_amt) ) + enddo + enddo + enddo + call prt_gb_nh_sh('Max_cld GB_NH_SH_EQ',isc,iec, jsc,jec, a2, Atm(n)%gridstruct%area_64(isc:iec,jsc:jec), & + Atm(n)%gridstruct%agrid_64(isc:iec,jsc:jec,2)) + endif + endif +#endif + if (do_diag_debug) then + call debug_column(Atm(n)%pt, Atm(n)%delp, Atm(n)%delz, Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%q, & + Atm(n)%npz, Atm(n)%ncnst, sphum, Atm(n)%flagstruct%nwat, Atm(n)%flagstruct%hydrostatic, Atm(n)%bd, Time) + endif -#endif + if (prt_sounding) then + call sounding_column(Atm(n)%pt, Atm(n)%delp, Atm(n)%delz, Atm(n)%u, Atm(n)%v, Atm(n)%q, Atm(n)%peln, Atm(n)%pkz, Atm(n)%phis, & + Atm(n)%npz, Atm(n)%ncnst, sphum, Atm(n)%flagstruct%nwat, Atm(n)%flagstruct%hydrostatic, Atm(n)%flagstruct%moist_phys, & + zvir, Atm(n)%ng, Atm(n)%bd, Time) + endif ! enddo ! end ntileMe do-loop @@ -2668,7 +3752,7 @@ subroutine get_height_field(is, ie, js, je, ng, km, hydrostatic, delz, wz, pt, q real, intent(in):: peln(is:ie,km+1,js:je) real, intent(in):: pt(is-ng:ie+ng,js-ng:je+ng,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) ! water vapor - real, intent(in):: delz(is-ng:,js-ng:,1:) + real, intent(in):: delz(is:,js:,1:) real, intent(in):: zvir logical, intent(in):: hydrostatic real, intent(out):: wz(is:ie,js:je,km+1) @@ -2700,7 +3784,7 @@ subroutine get_height_field(is, ie, js, je, ng, km, hydrostatic, delz, wz, pt, q end subroutine get_height_field - subroutine range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_range) + subroutine range_check_3d(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_range, Time) character(len=*), intent(in):: qname integer, intent(in):: is, ie, js, je integer, intent(in):: n_g, km @@ -2708,11 +3792,13 @@ subroutine range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_ real, intent(in):: pos(is-n_g:ie+n_g, js-n_g:je+n_g,2) real, intent(in):: q_low, q_hi logical, optional, intent(out):: bad_range + type(time_type), optional, intent(IN) :: Time ! real qmin, qmax integer i,j,k + integer year, month, day, hour, minute, second - if ( present(bad_range) ) bad_range = .false. + if ( present(bad_range) ) bad_range = .false. qmin = q(is,js,1) qmax = qmin @@ -2733,8 +3819,13 @@ subroutine range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_ if( qminq_hi ) then if(master) write(*,*) 'Range_check Warning:', qname, ' max = ', qmax, ' min = ', qmin + if (present(Time)) then + call get_date(Time, year, month, day, hour, minute, second) + if (master) write(*,999) year, month, day, hour, minute, second +999 format(' Range violation on: ', I4, '/', I02, '/', I02, ' ', I02, ':', I02, ':', I02) + endif if ( present(bad_range) ) then - bad_range = .true. + bad_range = .true. endif endif @@ -2745,17 +3836,78 @@ subroutine range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_ do j=js,je do i=is,ie if( q(i,j,k)q_hi ) then - write(*,*) 'Crash_K=',k,'(i,j)=',i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, q(i,j,k) - if ( k/= 1 ) write(*,*) k-1, q(i,j,k-1) - if ( k/=km ) write(*,*) k+1, q(i,j,k+1) + write(*,998) k,i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, qname, q(i,j,k) +! write(*,*) 'Warn_K=',k,'(i,j)=',i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, q(i,j,k) +998 format('Warn_K=',I4,' (i,j)=',2I5,' (lon,lat)=',f7.3,1x,f7.3,1x, A,' =',f10.5) +997 format(' K=',I4,3x,f10.5) + if ( k/= 1 ) write(*,997) k-1, q(i,j,k-1) + if ( k/=km ) write(*,997) k+1, q(i,j,k+1) endif enddo enddo enddo - call mpp_error(FATAL,'==> Error from range_check: data out of bound') + call mpp_error(NOTE,'==> Error from range_check: data out of bound') + endif + + end subroutine range_check_3d + + subroutine range_check_2d(qname, q, is, ie, js, je, n_g, pos, q_low, q_hi, bad_range, Time) + character(len=*), intent(in):: qname + integer, intent(in):: is, ie, js, je + integer, intent(in):: n_g + real, intent(in):: q(is-n_g:ie+n_g, js-n_g:je+n_g) + real, intent(in):: pos(is-n_g:ie+n_g, js-n_g:je+n_g,2) + real, intent(in):: q_low, q_hi + logical, optional, intent(out):: bad_range + type(time_type), optional, intent(IN) :: Time +! + real qmin, qmax + integer i,j + integer year, month, day, hour, minute, second + + if ( present(bad_range) ) bad_range = .false. + qmin = q(is,js) + qmax = qmin + + do j=js,je + do i=is,ie + if( q(i,j) < qmin ) then + qmin = q(i,j) + elseif( q(i,j) > qmax ) then + qmax = q(i,j) + endif + enddo + enddo + + call mp_reduce_min(qmin) + call mp_reduce_max(qmax) + + if( qminq_hi ) then + if(master) write(*,*) 'Range_check Warning:', qname, ' max = ', qmax, ' min = ', qmin + if (present(Time)) then + call get_date(Time, year, month, day, hour, minute, second) + if (master) write(*,999) year, month, day, hour, minute, second +999 format(' Range violation on: ', I4, '/', I02, '/', I02, ' ', I02, ':', I02, ':', I02) + endif + if ( present(bad_range) ) then + bad_range = .true. + endif + endif + + if ( present(bad_range) ) then +! Print out where the bad value(s) is (are) + if ( bad_range .EQV. .false. ) return + do j=js,je + do i=is,ie + if( q(i,j)q_hi ) then + write(*,*) 'Warn_(i,j)=',i,j, pos(i,j,1)*rad2deg, pos(i,j,2)*rad2deg, q(i,j) + endif + enddo + enddo + call mpp_error(NOTE,'==> Error from range_check: data out of bound') endif - end subroutine range_check + end subroutine range_check_2d subroutine prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac) character(len=*), intent(in):: qname @@ -2777,7 +3929,7 @@ subroutine prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac) do i=is,ie ! qmin = min(qmin, q(i,j,k)) ! qmax = max(qmax, q(i,j,k)) - if( q(i,j,k) < qmin ) then + if( q(i,j,k) < qmin ) then qmin = q(i,j,k) elseif( q(i,j,k) > qmax ) then qmax = q(i,j,k) @@ -2833,8 +3985,8 @@ subroutine prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain) call mp_reduce_max(qmax) ! SJL: BUG!!! -! gmean = g_sum(domain, q(is,js,km), is, ie, js, je, 3, area, 1) - gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1) +! gmean = g_sum(domain, q(is,js,km), is, ie, js, je, 3, area, 1) + gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1) if(master) write(6,*) qname, gn, qmax*fac, qmin*fac, gmean*fac @@ -2867,14 +4019,14 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain graupel = get_tracer_index (MODEL_ATMOS, 'graupel') if ( nwat==0 ) then - psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1) + psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1) if( master ) write(*,*) 'Total surface pressure (mb)', trim(gn), ' = ', 0.01*psmo - call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,1 ), psqv(is,js)) + call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,1 ), psqv(is,js)) return endif psq(:,:,:) = 0. - call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,sphum ), psq(is,js,sphum )) + call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,sphum ), psq(is,js,sphum )) if (liq_wat > 0) & call z_sum(is, ie, js, je, km, n_g, delp, q(is-n_g,js-n_g,1,liq_wat), psq(is,js,liq_wat)) @@ -2899,7 +4051,7 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain if ( idiag%phalf(k+1) > 75. ) exit kstrat = k enddo - call z_sum(is, ie, js, je, kstrat, n_g, delp, q(is-n_g,js-n_g,1,sphum), q_strat(is,js)) + call z_sum(is, ie, js, je, kstrat, n_g, delp, q(is-n_g,js-n_g,1,sphum), q_strat(is,js)) psmo = g_sum(domain, q_strat(is,js), is, ie, js, je, n_g, area, 1) * 1.e6 & / p_sum(is, ie, js, je, kstrat, n_g, delp, area, domain) if(master) write(*,*) 'Mean specific humidity (mg/kg) above 75 mb', trim(gn), '=', psmo @@ -2909,10 +4061,10 @@ subroutine prt_mass(km, nq, is, ie, js, je, n_g, nwat, ps, delp, q, area, domain !------------------- ! Check global means !------------------- - psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1) + psmo = g_sum(domain, ps(is:ie,js:je), is, ie, js, je, n_g, area, 1) do n=1,nwat - qtot(n) = g_sum(domain, psq(is,js,n), is, ie, js, je, n_g, area, 1) + qtot(n) = g_sum(domain, psq(is,js,n), is, ie, js, je, n_g, area, 1) enddo totw = sum(qtot(1:nwat)) @@ -3042,8 +4194,9 @@ subroutine get_pressure_given_height(is, ie, js, je, ng, km, wz, kd, height, & end subroutine get_pressure_given_height - subroutine get_height_given_pressure(is, ie, js, je, ng, km, wz, kd, id, log_p, peln, a2) - integer, intent(in):: is, ie, js, je, ng, km + + subroutine get_height_given_pressure(is, ie, js, je, km, wz, kd, id, log_p, peln, a2) + integer, intent(in):: is, ie, js, je, km integer, intent(in):: kd ! vertical dimension of the ouput height integer, intent(in):: id(kd) real, intent(in):: log_p(kd) ! must be monotonically increasing with increasing k @@ -3052,34 +4205,140 @@ subroutine get_height_given_pressure(is, ie, js, je, ng, km, wz, kd, id, log_p, real, intent(in):: peln(is:ie,km+1,js:je) real, intent(out):: a2(is:ie,js:je,kd) ! height (m) ! local: - integer n,i,j,k, k1 + real, dimension(2*km+1):: pn, gz + integer n,i,j,k, k1, k2, l -!$OMP parallel do default(none) shared(is,ie,js,je,km,kd,id,log_p,peln,a2,wz) & -!$OMP private(i,j,n,k,k1) + k2 = max(12, km/2+1) + +!$OMP parallel do default(none) shared(k2,is,ie,js,je,km,kd,id,log_p,peln,a2,wz) & +!$OMP private(i,j,n,k,k1,l,pn,gz) do j=js,je do i=is,ie +!--------------- +! Mirror method: +!--------------- + do k=1,km+1 + pn(k) = peln(i,k,j) + gz(k) = wz(i,j,k) + enddo + do k=km+2, km+k2 + l = 2*(km+1) - k + gz(k) = 2.*gz(km+1) - gz(l) + pn(k) = 2.*pn(km+1) - pn(l) + enddo k1 = 1 do 1000 n=1,kd if( id(n)<0 ) goto 1000 - do k=k1,km - if( log_p(n) <= peln(i,k+1,j) .and. log_p(n) >= peln(i,k,j) ) then - a2(i,j,n) = wz(i,j,k) + (wz(i,j,k+1) - wz(i,j,k)) * & - (log_p(n)-peln(i,k,j)) / (peln(i,k+1,j)-peln(i,k,j) ) + do k=k1,km+k2-1 + if( log_p(n) <= pn(k+1) .and. log_p(n) >= pn(k) ) then + a2(i,j,n) = gz(k) + (gz(k+1)-gz(k))*(log_p(n)-pn(k))/(pn(k+1)-pn(k)) k1 = k go to 1000 endif enddo -! a2(i,j,n) = missing_value -! Extrapolation into ground: use lowest 4-layer mean - a2(i,j,n) = wz(i,j,km+1) + (wz(i,j,km+1) - wz(i,j,km-3)) * & - (log_p(n)-peln(i,km+1,j)) / (peln(i,km+1,j)-peln(i,km-3,j) ) - k1 = km 1000 continue enddo enddo end subroutine get_height_given_pressure + subroutine prt_height(qname, is, ie, js, je, ng, km, press, phis, delz, peln, area, lat) + character(len=*), intent(in):: qname + integer, intent(in):: is, ie, js, je, ng, km + real, intent(in):: press + real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) + real, intent(in):: delz(is:,js:,1:) + real(kind=R_GRID), intent(in), dimension(is:ie, js:je):: area, lat +! local: + real:: a2(is:ie,js:je) ! height (m) + real(kind=R_GRID), dimension(2*km+1):: pn, gz + real(kind=R_GRID):: log_p + integer i,j,k, k2, l + + log_p = log(press) + k2 = max(12, km/2+1) + +!$OMP parallel do default(none) shared(k2,is,ie,js,je,km,log_p,peln,phis,delz,a2) & +!$OMP private(i,j,k,l,pn,gz) + do j=js,je + do 1000 i=is,ie +!--------------- +! Mirror method: +!--------------- + do k=1,km+1 + pn(k) = peln(i,k,j) + enddo + gz(km+1) = phis(i,j)/grav + do k=km,1,-1 + gz(k) = gz(k+1) - delz(i,j,k) + enddo + do k=km+2, km+k2 + l = 2*(km+1) - k + gz(k) = 2.*gz(km+1) - gz(l) + pn(k) = 2.*pn(km+1) - pn(l) + enddo + + do k=1,km+k2-1 + if( log_p <= pn(k+1) .and. log_p >= pn(k) ) then + a2(i,j) = gz(k) + (gz(k+1)-gz(k))*(log_p-pn(k))/(pn(k+1)-pn(k)) + go to 1000 + endif + enddo +1000 continue + enddo + call prt_gb_nh_sh(qname, is,ie, js,je, a2, area, lat) + + end subroutine prt_height + + subroutine prt_gb_nh_sh(qname, is,ie, js,je, a2, area, lat) + character(len=*), intent(in):: qname + integer, intent(in):: is, ie, js, je + real, intent(in), dimension(is:ie, js:je):: a2 + real(kind=R_GRID), intent(in), dimension(is:ie, js:je):: area, lat +! Local: + real(R_GRID), parameter:: rad2deg = 180./pi + real(R_GRID):: slat + real:: t_eq, t_nh, t_sh, t_gb + real:: area_eq, area_nh, area_sh, area_gb + integer:: i,j + + t_eq = 0. ; t_nh = 0.; t_sh = 0.; t_gb = 0. + area_eq = 0.; area_nh = 0.; area_sh = 0.; area_gb = 0. + do j=js,je + do i=is,ie + slat = lat(i,j)*rad2deg + area_gb = area_gb + area(i,j) + t_gb = t_gb + a2(i,j)*area(i,j) + if( (slat>-20. .and. slat<20.) ) then + area_eq = area_eq + area(i,j) + t_eq = t_eq + a2(i,j)*area(i,j) + elseif( slat>=20. .and. slat<80. ) then + area_nh = area_nh + area(i,j) + t_nh = t_nh + a2(i,j)*area(i,j) + elseif( slat<=-20. .and. slat>-80. ) then + area_sh = area_sh + area(i,j) + t_sh = t_sh + a2(i,j)*area(i,j) + endif + enddo + enddo + call mp_reduce_sum(area_gb) + call mp_reduce_sum( t_gb) + call mp_reduce_sum(area_nh) + call mp_reduce_sum( t_nh) + call mp_reduce_sum(area_sh) + call mp_reduce_sum( t_sh) + call mp_reduce_sum(area_eq) + call mp_reduce_sum( t_eq) + !Bugfix for non-global domains + if (area_gb <= 1.) area_gb = -1.0 + if (area_nh <= 1.) area_nh = -1.0 + if (area_sh <= 1.) area_sh = -1.0 + if (area_eq <= 1.) area_eq = -1.0 + if (is_master()) write(*,*) qname, t_gb/area_gb, t_nh/area_nh, t_sh/area_sh, t_eq/area_eq + + end subroutine prt_gb_nh_sh + subroutine cs3_interpolator(is, ie, js, je, km, qin, kd, pout, wz, pe, id, qout, iv) ! iv =-1: winds ! iv = 0: positive definite scalars @@ -3099,7 +4358,7 @@ subroutine cs3_interpolator(is, ie, js, je, km, qin, kd, pout, wz, pe, id, qout, real:: s0, a6 integer:: i,j,k, n, k1 -!$OMP parallel do default(none) shared(iv,id,is,ie,js,je,km,kd,pout,qin,qout,pe,wz) & +!$OMP parallel do default(none) shared(iv,id,is,ie,js,je,km,kd,pout,qin,qout,pe,wz) & !$OMP private(k1,s0,a6,q2,dp,qe) do j=js,je @@ -3130,7 +4389,7 @@ subroutine cs3_interpolator(is, ie, js, je, km, qin, kd, pout, wz, pe, id, qout, else qout(i,j,n) = qe(i,km+1) endif - else + else do k=k1,km if ( pout(n)>=pe(i,k,j) .and. pout(n) <= pe(i,k+1,j) ) then ! PPM distribution: f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) @@ -3152,52 +4411,54 @@ subroutine cs3_interpolator(is, ie, js, je, km, qin, kd, pout, wz, pe, id, qout, end subroutine cs3_interpolator - subroutine cs_interpolator(is, ie, js, je, km, qin, kd, pout, pe, id, qout, iv) -! This is the old-style linear in log-p interpolation + subroutine cs_interpolator(is, ie, js, je, km, qin, zout, wz, qout, qmin) integer, intent(in):: is, ie, js, je, km - integer, intent(in):: kd ! vertical dimension of the ouput height - integer, intent(in):: id(kd) - integer, optional, intent(in):: iv - real, intent(in):: pout(kd) ! must be monotonically increasing with increasing k - real, intent(in):: pe(is:ie,km+1,js:je) - real, intent(in):: qin(is:ie,js:je,km) - real, intent(out):: qout(is:ie,js:je,kd) + real, intent(in):: zout, qmin + real, intent(in):: qin(is:ie,js:je,km) + real, intent(in):: wz(is:ie,js:je,km+1) + real, intent(out):: qout(is:ie,js:je) ! local: - real:: pm(km) - integer i,j,k, n, k1 + real:: qe(is:ie,km+1) + real, dimension(is:ie,km):: q2, dz + real:: s0, a6 + integer:: i,j,k -!$OMP parallel do default(none) shared(id,is,ie,js,je,km,kd,pout,qin,qout,pe) & -!$OMP private(k1,pm) +!$OMP parallel do default(none) shared(qmin,is,ie,js,je,km,zout,qin,qout,wz) & +!$OMP private(s0,a6,q2,dz,qe) do j=js,je - do i=is,ie - do k=1,km -! consider using true log(p) here for non-hydro? - pm(k) = 0.5*(pe(i,k,j)+pe(i,k+1,j)) - enddo - k1 = 1 - do n=1,kd - if ( id(n) < 0 ) go to 500 - if( pout(n) <= pm(1) ) then -! Higher than the top: using constant value - qout(i,j,n) = qin(i,j,1) - elseif ( pout(n) >= pm(km) ) then -! lower than the bottom surface: - qout(i,j,n) = qin(i,j,km) - else - do k=k1,km-1 - if ( pout(n)>=pm(k) .and. pout(n) <= pm(k+1) ) then - qout(i,j,n) = qin(i,j,k) + (qin(i,j,k+1)-qin(i,j,k))*(pout(n)-pm(k))/(pm(k+1)-pm(k)) - k1 = k ! next level - go to 500 - endif - enddo - endif -500 continue - enddo - enddo + do i=is,ie + do k=1,km + dz(i,k) = wz(i,j,k) - wz(i,j,k+1) + q2(i,k) = qin(i,j,k) + enddo + enddo + + call cs_prof(q2, dz, qe, km, is, ie, 1) + + do i=is,ie + if( zout >= wz(i,j,1) ) then +! Higher than the top: + qout(i,j) = qe(i,1) + elseif ( zout <= wz(i,j,km+1) ) then + qout(i,j) = qe(i,km+1) + else + do k=1,km + if ( zout<=wz(i,j,k) .and. zout >= wz(i,j,k+1) ) then +! PPM distribution: f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) + a6 = 3.*(2.*q2(i,k) - (qe(i,k)+qe(i,k+1))) + s0 = (wz(i,j,k)-zout) / dz(i,k) + qout(i,j) = qe(i,k) + s0*(qe(i,k+1)-qe(i,k)+a6*(1.-s0)) + go to 500 + endif + enddo + endif +500 qout(i,j) = max(qmin, qout(i,j)) + enddo enddo +! Send_data here + end subroutine cs_interpolator subroutine cs_prof(q2, delp, q, km, i1, i2, iv) @@ -3228,7 +4489,7 @@ subroutine cs_prof(q2, delp, q, km, i1, i2, iv) gam(i,k) = d4(i) / bet enddo enddo - + do i=i1,i2 a_bot = 1. + d4(i)*(d4(i)+1.5) q(i,km+1) = (2.*d4(i)*(d4(i)+1.)*q2(i,km)+q2(i,km-1)-a_bot*q(i,km)) & @@ -3241,7 +4502,7 @@ subroutine cs_prof(q2, delp, q, km, i1, i2, iv) enddo enddo -! Apply *large-scale* constraints +! Apply *large-scale* constraints do i=i1,i2 q(i,2) = min( q(i,2), max(q2(i,1), q2(i,2)) ) q(i,2) = max( q(i,2), min(q2(i,1), q2(i,2)) ) @@ -3278,7 +4539,7 @@ subroutine cs_prof(q2, delp, q, km, i1, i2, iv) q(i,km) = min( q(i,km), max(q2(i,km-1), q2(i,km)) ) q(i,km) = max( q(i,km), min(q2(i,km-1), q2(i,km)) ) enddo - + end subroutine cs_prof @@ -3296,7 +4557,7 @@ subroutine interpolate_vertical(is, ie, js, je, km, plev, peln, a3, a2) logp = log(plev) -!$OMP parallel do default(none) shared(is,ie,js,je,km,peln,logp,a2,a3) & +!$OMP parallel do default(none) shared(is,ie,js,je,km,peln,logp,a2,a3) & !$OMP private(pm) do j=js,je do 1000 i=is,ie @@ -3309,7 +4570,7 @@ subroutine interpolate_vertical(is, ie, js, je, km, plev, peln, a3, a2) a2(i,j) = a3(i,j,1) elseif ( logp >= pm(km) ) then a2(i,j) = a3(i,j,km) - else + else do k=1,km-1 if( logp <= pm(k+1) .and. logp >= pm(k) ) then a2(i,j) = a3(i,j,k) + (a3(i,j,k+1)-a3(i,j,k))*(logp-pm(k))/(pm(k+1)-pm(k)) @@ -3345,7 +4606,7 @@ subroutine interpolate_z(is, ie, js, je, km, zl, hght, a3, a2) a2(i,j) = a3(i,j,1) elseif ( zl <= zm(km) ) then a2(i,j) = a3(i,j,km) - else + else do k=1,km-1 if( zl <= zm(k) .and. zl >= zm(k+1) ) then a2(i,j) = a3(i,j,k) + (a3(i,j,k+1)-a3(i,j,k))*(zm(k)-zl)/(zm(k)-zm(k+1)) @@ -3364,10 +4625,10 @@ subroutine helicity_relative(is, ie, js, je, ng, km, zvir, sphum, srh, & integer, intent(in):: is, ie, js, je, ng, km, sphum real, intent(in):: grav, zvir, z_bot, z_top real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: pt, ua, va - real, intent(in):: delz(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(in):: delz(is:ie,js:je,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) - real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: peln(is:ie,km+1,js:je) logical, intent(in):: hydrostatic real, intent(out):: srh(is:ie,js:je) ! unit: (m/s)**2 ! real, parameter:: z_crit = 3.e3 ! lowest 3-km @@ -3420,26 +4681,180 @@ subroutine helicity_relative(is, ie, js, je, ng, km, zvir, sphum, srh, & vc(i) = vc(i) + va(i,j,k)*dz(i) k0 = k else - uc(i) = uc(i) / (zh(i)-dz(i) - zh0(i)) - vc(i) = vc(i) / (zh(i)-dz(i) - zh0(i)) + uc(i) = uc(i) / (zh(i)-dz(i) - zh0(i)) + vc(i) = vc(i) / (zh(i)-dz(i) - zh0(i)) + goto 123 + endif + enddo +123 continue + +! Lowest layer wind shear computed betw top edge and mid-layer + k = k1 + srh(i,j) = 0.5*(va(i,j,k1)-vc(i))*(ua(i,j,k1-1)-ua(i,j,k1)) - & + 0.5*(ua(i,j,k1)-uc(i))*(va(i,j,k1-1)-va(i,j,k1)) + do k=k0, k1-1 + srh(i,j) = srh(i,j) + 0.5*(va(i,j,k)-vc(i))*(ua(i,j,k-1)-ua(i,j,k+1)) - & + 0.5*(ua(i,j,k)-uc(i))*(va(i,j,k-1)-va(i,j,k+1)) + enddo +! endif + enddo ! i-loop + enddo ! j-loop + + end subroutine helicity_relative + + subroutine helicity_relative_CAPS(is, ie, js, je, ng, km, zvir, sphum, srh, uc, vc, & + ua, va, delz, q, hydrostatic, pt, peln, phis, grav, z_bot, z_top) +! !INPUT PARAMETERS: + integer, intent(in):: is, ie, js, je, ng, km, sphum + real, intent(in):: grav, zvir, z_bot, z_top + real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: pt, ua, va + real, intent(in):: delz(is:ie,js:je,km) + real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) + real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) + real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: uc(is:ie,js:je), vc(is:ie,js:je) + logical, intent(in):: hydrostatic + real, intent(out):: srh(is:ie,js:je) ! unit: (m/s)**2 +!--------------------------------------------------------------------------------- +! SRH = 150-299 ... supercells possible with weak tornadoes +! SRH = 300-449 ... very favourable to supercells development and strong tornadoes +! SRH > 450 ... violent tornadoes +!--------------------------------------------------------------------------------- +! if z_crit = 1E3, the threshold for supercells is 100 (m/s)**2 +! Coded by S.-J. Lin for CONUS regional climate simulations +! + real:: rdg + real, dimension(is:ie):: zh, dz, zh0 + integer i, j, k, k0, k1 + logical below + + rdg = rdgas / grav + +!$OMP parallel do default(none) shared(is,ie,js,je,km,hydrostatic,rdg,pt,zvir,sphum, & +!$OMP peln,delz,ua,va,srh,uc,vc,z_bot,z_top) & +!$OMP private(zh,dz,k0,k1,zh0,below) + do j=js,je + + do i=is,ie + srh(i,j) = 0. + zh(i) = 0. + zh0 = 0. + below = .true. + + do k=km,1,-1 + if ( hydrostatic ) then + dz(i) = rdg*pt(i,j,k)*(1.+zvir*q(i,j,k,sphum))*(peln(i,k+1,j)-peln(i,k,j)) + else + dz(i) = -delz(i,j,k) + endif + + zh(i) = zh(i) + dz(i) + if (zh(i) <= z_bot ) continue + if (zh(i) > z_bot .and. below) then + zh0(i) = zh(i) - dz(i) + k1 = k + below = .false. +! Compute mean winds below z_top + elseif ( zh(i) < z_top ) then + k0 = k + else + goto 123 + endif + + enddo +123 continue + +! Lowest layer wind shear computed betw top edge and mid-layer + k = k1 + srh(i,j) = 0.5*(va(i,j,k1)-vc(i,j))*(ua(i,j,k1-1)-ua(i,j,k1)) - & + 0.5*(ua(i,j,k1)-uc(i,j))*(va(i,j,k1-1)-va(i,j,k1)) + do k=k0, k1-1 + srh(i,j) = srh(i,j) + 0.5*(va(i,j,k)-vc(i,j))*(ua(i,j,k-1)-ua(i,j,k+1)) - & + 0.5*(ua(i,j,k)-uc(i,j))*(va(i,j,k-1)-va(i,j,k+1)) + enddo + enddo ! i-loop + enddo ! j-loop + + end subroutine helicity_relative_CAPS + + + subroutine bunkers_vector(is, ie, js, je, ng, km, zvir, sphum, uc, vc, & + ua, va, delz, q, hydrostatic, pt, peln, phis, grav) + + integer, intent(in):: is, ie, js, je, ng, km, sphum + real, intent(in):: grav, zvir + real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: pt, ua, va + real, intent(in):: delz(is:ie,js:je,km) + real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) + real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) + real, intent(in):: peln(is:ie,km+1,js:je) + logical, intent(in):: hydrostatic + real, intent(out):: uc(is:ie,js:je), vc(is:ie,js:je) + + real:: rdg + real :: zh, dz, usfc, vsfc, u6km, v6km, umn, vmn + real :: ushr, vshr, shrmag + integer i, j, k + real, parameter :: bunkers_d = 7.5 ! Empirically derived parameter + logical :: has_sfc, has_6km + + rdg = rdgas / grav + +!$OMP parallel do default(none) shared(is,ie,js,je,km,hydrostatic,rdg,pt,zvir,sphum, & +!$OMP peln,delz,ua,va,uc,vc) & +!$OMP private(zh,dz,usfc,vsfc,u6km,v6km,umn,vmn, & +!$OMP ushr,vshr,shrmag) + do j=js,je + do i=is,ie + zh = 0. + usfc = 0. + vsfc = 0. + u6km = 0. + v6km = 0. + umn = 0. + vmn = 0. + + usfc = ua(i,j,km) + vsfc = va(i,j,km) + + do k=km,1,-1 + if ( hydrostatic ) then + dz = rdg*pt(i,j,k)*(1.+zvir*q(i,j,k,sphum))*(peln(i,k+1,j)-peln(i,k,j)) + else + dz = -delz(i,j,k) + endif + zh = zh + dz + + if (zh < 6000) then + u6km = ua(i,j,k) + v6km = va(i,j,k) + + umn = umn + ua(i,j,k)*dz + vmn = vmn + va(i,j,k)*dz + else goto 123 endif + enddo 123 continue -! Lowest layer wind shear computed betw top edge and mid-layer - k = k1 - srh(i,j) = 0.5*(va(i,j,k1)-vc(i))*(ua(i,j,k1-1)-ua(i,j,k1)) - & - 0.5*(ua(i,j,k1)-uc(i))*(va(i,j,k1-1)-va(i,j,k1)) - do k=k0, k1-1 - srh(i,j) = srh(i,j) + 0.5*(va(i,j,k)-vc(i))*(ua(i,j,k-1)-ua(i,j,k+1)) - & - 0.5*(ua(i,j,k)-uc(i))*(va(i,j,k-1)-va(i,j,k+1)) - enddo -! endif + u6km = u6km + (ua(i,j,k) - u6km) / dz * (6000. - (zh - dz)) + v6km = v6km + (va(i,j,k) - v6km) / dz * (6000. - (zh - dz)) + + umn = umn / (zh - dz) + vmn = vmn / (zh - dz) + + ushr = u6km - usfc + vshr = v6km - vsfc + shrmag = sqrt(ushr * ushr + vshr * vshr) + uc(i,j) = umn + bunkers_d * vshr / shrmag + vc(i,j) = vmn - bunkers_d * ushr / shrmag + enddo ! i-loop enddo ! j-loop - end subroutine helicity_relative + end subroutine bunkers_vector + subroutine updraft_helicity(is, ie, js, je, ng, km, zvir, sphum, uh, & w, vort, delz, q, hydrostatic, pt, peln, phis, grav, z_bot, z_top) @@ -3448,10 +4863,10 @@ subroutine updraft_helicity(is, ie, js, je, ng, km, zvir, sphum, uh, & real, intent(in):: grav, zvir, z_bot, z_top real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,km):: pt, w real, intent(in), dimension(is:ie,js:je,km):: vort - real, intent(in):: delz(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(in):: delz(is:ie,js:je,km) real, intent(in):: q(is-ng:ie+ng,js-ng:je+ng,km,*) real, intent(in):: phis(is-ng:ie+ng,js-ng:je+ng) - real, intent(in):: peln(is:ie,km+1,js:je) + real, intent(in):: peln(is:ie,km+1,js:je) logical, intent(in):: hydrostatic real, intent(out):: uh(is:ie,js:je) ! unit: (m/s)**2 ! Coded by S.-J. Lin for CONUS regional climate simulations @@ -3491,7 +4906,7 @@ subroutine updraft_helicity(is, ie, js, je, ng, km, zvir, sphum, uh, & elseif ( zh(i) < z_top ) then uh(i,j) = uh(i,j) + vort(i,j,k)*w(i,j,k)*dz(i) else - uh(i,j) = uh(i,j) + vort(i,j,k)*w(i,j,k)*(z_top - (zh(i)-dz(i)) ) + uh(i,j) = uh(i,j) + vort(i,j,k)*w(i,j,k)*(z_top - (zh(i)-dz(i)) ) goto 123 endif enddo @@ -3509,10 +4924,10 @@ subroutine pv_entropy(is, ie, js, je, ng, km, vort, f_d, pt, pkz, delp, grav) ! !INPUT PARAMETERS: integer, intent(in):: is, ie, js, je, ng, km real, intent(in):: grav - real, intent(in):: pt(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(in):: pkz(is:ie,js:je,km) + real, intent(in):: pt(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(in):: pkz(is:ie,js:je,km) real, intent(in):: delp(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(in):: f_d(is-ng:ie+ng,js-ng:je+ng) + real, intent(in):: f_d(is-ng:ie+ng,js-ng:je+ng) ! vort is relative vorticity as input. Becomes PV on output real, intent(inout):: vort(is:ie,js:je,km) @@ -3526,9 +4941,9 @@ subroutine pv_entropy(is, ie, js, je, ng, km, vort, f_d, pt, pkz, delp, grav) ! z-surface is not that different from the hybrid sigma-p coordinate. ! See page 39, Pedlosky 1979: Geophysical Fluid Dynamics ! -! The follwoing simplified form is strictly correct only if vort is computed on +! The follwoing simplified form is strictly correct only if vort is computed on ! constant z surfaces. In addition hydrostatic approximation is made. -! EPV = - GRAV * (vort+f_d) / del(p) * del(pt) / pt +! EPV = - GRAV * (vort+f_d) / del(p) * del(pt) / pt ! where del() is the vertical difference operator. ! ! programmer: S.-J. Lin, shian-jiann.lin@noaa.gov @@ -3551,7 +4966,7 @@ subroutine pv_entropy(is, ie, js, je, ng, km, vort, f_d, pt, pkz, delp, grav) #else ! Compute PT at layer edges. !$OMP parallel do default(none) shared(is,ie,js,je,km,pt,pkz,w3d,delp,te2,te) & -!$OMP private(t2, delp2) +!$OMP private(t2, delp2) do j=js,je do k=1,km do i=is,ie @@ -3699,7 +5114,7 @@ end subroutine ppme !####################################################################### subroutine rh_calc (pfull, t, qv, rh, do_cmip) - + real, intent (in), dimension(:,:) :: pfull, t, qv real, intent (out), dimension(:,:) :: rh real, dimension(size(t,1),size(t,2)) :: esat @@ -3729,6 +5144,88 @@ subroutine rh_calc (pfull, t, qv, rh, do_cmip) end subroutine rh_calc +#ifdef SIMPLIFIED_THETA_E +subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, npz, & + hydrostatic, moist) +! calculate the equvalent potential temperature +! Simplified form coded by SJL + integer, intent(in):: is,ie,js,je,ng,npz + real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,npz):: pt, delp, q + real, intent(in), dimension(is: ,js: ,1: ):: delz + real, intent(in), dimension(is:ie,npz+1,js:je):: peln + real, intent(in):: pkz(is:ie,js:je,npz) + logical, intent(in):: hydrostatic, moist +! Output: + real, dimension(is:ie,js:je,npz), intent(out) :: theta_e !< eqv pot +! local + real, parameter:: tice = 273.16 + real, parameter:: c_liq = 4190. ! heat capacity of water at 0C +#ifdef SIM_NGGPS + real, parameter:: dc_vap = 0. +#else + real, parameter:: dc_vap = cp_vapor - c_liq ! = -2344. isobaric heating/cooling +#endif + real(kind=R_GRID), dimension(is:ie):: pd, rq + real(kind=R_GRID) :: wfac + integer :: i,j,k + + if ( moist ) then + wfac = 1. + else + wfac = 0. + endif + +!$OMP parallel do default(none) shared(pk0,wfac,moist,pkz,is,ie,js,je,npz,pt,q,delp,peln,delz,theta_e,hydrostatic) & +!$OMP private(pd, rq) + do k = 1,npz + do j = js,je + + if ( hydrostatic ) then + do i=is,ie + rq(i) = max(0., wfac*q(i,j,k)) + pd(i) = (1.-rq(i))*delp(i,j,k) / (peln(i,k+1,j) - peln(i,k,j)) + enddo + else +! Dry pressure: p = r R T + do i=is,ie + rq(i) = max(0., wfac*q(i,j,k)) + pd(i) = -rdgas*pt(i,j,k)*(1.-rq(i))*delp(i,j,k)/(grav*delz(i,j,k)) + enddo + endif + + if ( moist ) then + do i=is,ie + rq(i) = max(0., q(i,j,k)) +! rh(i) = max(1.e-12, rq(i)/wqs1(pt(i,j,k),den(i))) ! relative humidity +! theta_e(i,j,k) = exp(rq(i)/cp_air*((hlv+dc_vap*(pt(i,j,k)-tice))/pt(i,j,k) - & +! rvgas*log(rh(i))) + kappa*log(1.e5/pd(i))) * pt(i,j,k) +! Simplified form: (ignoring the RH term) +#ifdef SIM_NGGPS + theta_e(i,j,k) = pt(i,j,k)*exp(kappa*log(1.e5/pd(i))) * & + exp(rq(i)*hlv/(cp_air*pt(i,j,k))) +#else + theta_e(i,j,k) = pt(i,j,k)*exp( rq(i)/(cp_air*pt(i,j,k))*(hlv+dc_vap*(pt(i,j,k)-tice)) & + + kappa*log(1.e5/pd(i)) ) +#endif + enddo + else + if ( hydrostatic ) then + do i=is,ie + theta_e(i,j,k) = pt(i,j,k)*pk0/pkz(i,j,k) + enddo + else + do i=is,ie +! theta_e(i,j,k) = pt(i,j,k)*(1.e5/pd(i))**kappa + theta_e(i,j,k) = pt(i,j,k)*exp( kappa*log(1.e5/pd(i)) ) + enddo + endif + endif + enddo ! j-loop + enddo ! k-loop + +end subroutine eqv_pot + +#else subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, npz, & hydrostatic, moist) ! calculate the equvalent potential temperature @@ -3737,9 +5234,9 @@ subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, np ! Modified by SJL integer, intent(in):: is,ie,js,je,ng,npz real, intent(in), dimension(is-ng:ie+ng,js-ng:je+ng,npz):: pt, delp, q - real, intent(in), dimension(is-ng: ,js-ng: ,1: ):: delz + real, intent(in), dimension(is: ,js: ,1: ):: delz real, intent(in), dimension(is:ie,npz+1,js:je):: peln - real, intent(in):: pkz(is:ie,js:je,npz) + real, intent(in):: pkz(is:ie,js:je,npz) logical, intent(in):: hydrostatic, moist ! Output: real, dimension(is:ie,js:je,npz), intent(out) :: theta_e !< eqv pot @@ -3807,6 +5304,7 @@ subroutine eqv_pot(theta_e, pt, delp, delz, peln, pkz, q, is, ie, js, je, ng, np end subroutine eqv_pot +#endif subroutine nh_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, & w, delz, pt, delp, q, hs, area, domain, & @@ -3818,7 +5316,8 @@ subroutine nh_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, & ! !INPUT PARAMETERS: integer, intent(in):: km, is, ie, js, je, isd, ied, jsd, jed integer, intent(in):: nwat, sphum, liq_wat, rainwat, ice_wat, snowwat, graupel - real, intent(in), dimension(isd:ied,jsd:jed,km):: ua, va, pt, delp, w, delz + real, intent(in), dimension(isd:ied,jsd:jed,km):: ua, va, pt, delp, w + real, intent(in), dimension(is:ie,js:je,km) :: delz real, intent(in), dimension(isd:ied,jsd:jed,km,nwat):: q real, intent(in):: hs(isd:ied,jsd:jed) ! surface geopotential real, intent(in):: area(isd:ied, jsd:jed) @@ -3876,7 +5375,7 @@ subroutine nh_total_energy(is, ie, js, je, isd, ied, jsd, jed, km, & enddo enddo - psm = g_sum(domain, te, is, ie, js, je, 3, area_l, 1) + psm = g_sum(domain, te, is, ie, js, je, 3, area_l, 1) if( master ) write(*,*) 'TE ( Joule/m^2 * E9) =', psm * 1.E-9 end subroutine nh_total_energy @@ -3886,7 +5385,7 @@ subroutine dbzcalc(q, pt, delp, peln, delz, & dbz, maxdbz, allmax, bd, npz, ncnst, & hydrostatic, zvir, in0r, in0s, in0g, iliqskin) - !Code from Mark Stoelinga's dbzcalc.f from the RIP package. + !Code from Mark Stoelinga's dbzcalc.f from the RIP package. !Currently just using values taken directly from that code, which is ! consistent for the MM5 Reisner-2 microphysics. From that file: @@ -3920,16 +5419,18 @@ subroutine dbzcalc(q, pt, delp, peln, delz, & ! ! More information on the derivation of simulated reflectivity in RIP ! can be found in Stoelinga (2005, unpublished write-up). Contact -! Mark Stoelinga (stoeling@atmos.washington.edu) for a copy. +! Mark Stoelinga (stoeling@atmos.washington.edu) for a copy. -! 22sep16: Modifying to use the Lin MP parameters. If doing so remember -! that the Lin MP assumes a constant intercept (in0X = .false.) +! 22sep16: Modifying to use the GFDL MP parameters. If doing so remember +! that the GFDL MP assumes a constant intercept (in0X = .false.) ! Ferrier-Aligo has an option for fixed slope (rather than fixed intercept). ! Thompson presumably is an extension of Reisner MP. + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: npz, ncnst - real, intent(IN), dimension(bd%isd:bd%ied, bd%jsd:bd%jed, npz) :: pt, delp, delz + real, intent(IN), dimension(bd%isd:bd%ied, bd%jsd:bd%jed, npz) :: pt, delp + real, intent(IN), dimension(bd%is:, bd%js:, 1:) :: delz real, intent(IN), dimension(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst) :: q real, intent(IN), dimension(bd%is :bd%ie, npz+1, bd%js:bd%je) :: peln real, intent(OUT), dimension(bd%is :bd%ie, bd%js :bd%je , npz) :: dbz @@ -3939,10 +5440,13 @@ subroutine dbzcalc(q, pt, delp, peln, delz, & real, intent(OUT) :: allmax !Parameters for constant intercepts (in0[rsg] = .false.) - !Using Lin MP values - real, parameter :: rn0_r = 8.e6 ! m^-4 - real, parameter :: rn0_s = 3.e6 ! m^-4 - real, parameter :: rn0_g = 4.e6 ! m^-4 + !Using GFDL MP values + real(kind=R_GRID), parameter:: vconr = 2503.23638966667 + real(kind=R_GRID), parameter:: vcong = 87.2382675 + real(kind=R_GRID), parameter:: vcons = 6.6280504 + real(kind=R_GRID), parameter:: normr = 25132741228.7183 + real(kind=R_GRID), parameter:: normg = 5026548245.74367 + real(kind=R_GRID), parameter:: norms = 942477796.076938 !Constants for variable intercepts !Will need to be changed based on MP scheme @@ -3956,134 +5460,759 @@ subroutine dbzcalc(q, pt, delp, peln, delz, & real, parameter :: ron_delqr0 = 0.25*ron_qr0 real, parameter :: ron_const1r = (ron2-ron_min)*0.5 real, parameter :: ron_const2r = (ron2+ron_min)*0.5 + real, parameter :: rnzs = 3.0e6 ! lin83 !Other constants real, parameter :: gamma_seven = 720. - real, parameter :: koch_correction = 161.3 - !The following values are also used in Lin-Lin MP - real, parameter :: rho_r = 1.0e3 ! LFO83 - real, parameter :: rho_s = 100. ! kg m^-3 - real, parameter :: rho_g = 400. ! kg m^-3 + !The following values are also used in GFDL MP + real, parameter :: rhor = 1.0e3 ! LFO83 + real, parameter :: rhos = 100. ! kg m^-3 + real, parameter :: rhog0 = 400. ! kg m^-3 + real, parameter :: rhog = 500. ! graupel-hail mix +! real, parameter :: rho_g = 900. ! hail/frozen rain real, parameter :: alpha = 0.224 - real, parameter :: factor_r = gamma_seven * 1.e18 * (1./(pi*rho_r))**1.75 - real, parameter :: factor_s = koch_correction * 1.e18 * (1./(pi*rho_s))**1.75 & - * (rho_s/rho_r)**2 * alpha - real, parameter :: factor_g = koch_correction * 1.e18 * (1./(pi*rho_g))**1.75 & - * (rho_g/rho_r)**2 * alpha -!!$ real, parameter :: factor_s = gamma_seven * 1.e18 * (1./(pi*rho_s))**1.75 & -!!$ * (rho_s/rho_r)**2 * alpha -!!$ real, parameter :: factor_g = gamma_seven * 1.e18 * (1./(pi*rho_g))**1.75 & -!!$ * (rho_g/rho_r)**2 * alpha + real(kind=R_GRID), parameter :: factor_s = gamma_seven * 1.e18 * (1./(pi*rhos))**1.75 & + * (rhos/rhor)**2 * alpha + real, parameter :: qmin = 1.E-12 real, parameter :: tice = 273.16 - integer :: i,j,k - real :: factorb_s, factorb_g, rhoair - real :: temp_c, pres, sonv, gonv, ronv, z_e - real :: qr1, qs1, qg1 +! Double precision + real(kind=R_GRID), dimension(bd%is:bd%ie) :: rhoair, denfac, z_e + real(kind=R_GRID):: qr1, qs1, qg1, t1, t2, t3, rwat, vtr, vtg, vts + real(kind=R_GRID):: factorb_s, factorb_g + real(kind=R_GRID):: temp_c, pres, sonv, gonv, ronv + integer :: i,j,k integer :: is, ie, js, je is = bd%is ie = bd%ie js = bd%js je = bd%je + if (rainwat < 1) return + dbz(:,:,1:mp_top) = -20. maxdbz(:,:) = -20. !Minimum value - allmax = -20. - - if (rainwat < 1) return + allmax = -20. - do k=1, npz +!$OMP parallel do default(shared) private(rhoair,t1,t2,t3,denfac,vtr,vtg,vts,z_e) + do k=mp_top+1, npz do j=js, je - do i=is, ie - if (hydrostatic) then - rhoair = delp(i,j,k)/( (peln(i,k+1,j)-peln(i,k,j)) * rdgas * pt(i,j,k) * ( 1. + zvir*q(i,j,k,sphum) ) ) + do i=is, ie + rhoair(i) = delp(i,j,k)/( (peln(i,k+1,j)-peln(i,k,j)) * rdgas * pt(i,j,k) * ( 1. + zvir*q(i,j,k,sphum) ) ) + denfac(i) = sqrt(min(10., 1.2/rhoair(i))) + z_e(i) = 0. + enddo else - rhoair = -delp(i,j,k)/(grav*delz(i,j,k)) ! air density + do i=is, ie + rhoair(i) = -delp(i,j,k)/(grav*delz(i,j,k)) ! moist air density + denfac(i) = sqrt(min(10., 1.2/rhoair(i))) + z_e(i) = 0. + enddo endif - - ! Adjust factor for brightband, where snow or graupel particle - ! scatters like liquid water (alpha=1.0) because it is assumed to - ! have a liquid skin. - - !lmh: celkel in dbzcalc.f presumably freezing temperature - if (iliqskin .and. pt(i,j,k) .gt. tice) then - factorb_s=factor_s/alpha - factorb_g=factor_g/alpha - else - factorb_s=factor_s - factorb_g=factor_g + if (rainwat > 0) then + do i=is, ie +! The following form vectorizes better & more consistent with GFDL_MP +! SJL notes: Marshall-Palmer, dBZ = 200*precip**1.6, precip = 3.6e6*t1/rhor*vtr ! [mm/hr] +! GFDL_MP terminal fall speeds are used +! Date modified 20170701 +! Account for excessively high cloud water -> autoconvert (diag only) excess cloud water + t1 = rhoair(i)*max(qmin, q(i,j,k,rainwat)+dim(q(i,j,k,liq_wat), 1.0e-3)) + vtr = max(1.e-3, vconr*denfac(i)*exp(0.2 *log(t1/normr))) + z_e(i) = 200.*exp(1.6*log(3.6e6*t1/rhor*vtr)) + enddo endif - - !Calculate variable intercept parameters if necessary - ! using definitions from Thompson et al - if (in0s) then - temp_c = min(-0.001, pt(i,j,k) - tice) - sonv = min(2.0e8, 2.0e6*exp(-0.12*temp_c)) - else - sonv = rn0_s - end if - - qr1 = max(0., q(i,j,k,rainwat)) if (graupel > 0) then - qg1 = max(0., q(i,j,k,graupel)) - else - qg1 = 0. + do i=is, ie + t3 = rhoair(i)*max(qmin, q(i,j,k,graupel)) + vtg = max(1.e-3, vcong*denfac(i)*exp(0.125 *log(t3/normg))) + z_e(i) = z_e(i) + 200.*exp(1.6*log(3.6e6*t3/rhog*vtg)) + enddo endif if (snowwat > 0) then - qs1 = max(0., q(i,j,k,snowwat)) - else - qs1 = 0. + do i=is, ie + t2 = rhoair(i)*max(qmin, q(i,j,k,snowwat)) + ! vts = max(1.e-3, vcons*denfac*exp(0.0625*log(t2/norms))) + z_e(i) = z_e(i) + (factor_s/alpha)*t2*exp(0.75*log(t2/rnzs)) + enddo + endif + do i=is,ie + dbz(i,j,k) = 10.*log10( max(0.01, z_e(i)) ) + enddo + enddo + enddo + +!$OMP parallel do default(shared) + do j=js, je + do k=mp_top+1, npz + do i=is, ie + maxdbz(i,j) = max(dbz(i,j,k), maxdbz(i,j)) + enddo + enddo + enddo + + do j=js, je + do i=is, ie + allmax = max(maxdbz(i,j), allmax) + enddo + enddo + + end subroutine dbzcalc + +!####################################################################### + + subroutine fv_diag_init_gn(Atm) + type(fv_atmos_type), intent(inout), target :: Atm + + if (Atm%grid_Number > 1) then + write(gn,"(A2,I1)") " g", Atm%grid_number + else + gn = "" + end if + + end subroutine fv_diag_init_gn + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + subroutine getcape( nk , p , t , dz, q, the, cape , cin, source_in ) + implicit none + + integer, intent(in) :: nk + real, dimension(nk), intent(in) :: p,t,dz,q,the + real, intent(out) :: cape,cin + integer, intent(IN), OPTIONAL :: source_in + +!----------------------------------------------------------------------- +! +! getcape - a fortran90 subroutine to calculate Convective Available +! Potential Energy (CAPE) from a sounding. +! +! Version 1.02 Last modified: 10 October 2008 +! +! Author: George H. Bryan +! Mesoscale and Microscale Meteorology Division +! National Center for Atmospheric Research +! Boulder, Colorado, USA +! gbryan@ucar.edu +! +! Disclaimer: This code is made available WITHOUT WARRANTY. +! +! References: Bolton (1980, MWR, p. 1046) (constants and definitions) +! Bryan and Fritsch (2004, MWR, p. 2421) (ice processes) +! +!----------------------------------------------------------------------- +! +! Input: nk - number of levels in the sounding (integer) +! +! p - one-dimensional array of pressure (Pa) (real) +! +! t - one-dimensional array of temperature (K) (real) +! +! dz - one-dimensional array of height thicknesses (m) (real) +! +! q - one-dimensional array of specific humidity (kg/kg) (real) +! +! source - source parcel: +! 1 = surface (default) +! 2 = most unstable (max theta-e) +! 3 = mixed-layer (specify ml_depth) +! +! Output: cape - Convective Available Potential Energy (J/kg) (real) +! +! cin - Convective Inhibition (J/kg) (real) +! +!----------------------------------------------------------------------- +! User options: + + real, parameter :: pinc = 10000.0 ! Pressure increment (Pa) + ! (smaller number yields more accurate + ! results,larger number makes code + ! go faster) + + + real, parameter :: ml_depth = 200.0 ! depth (m) of mixed layer + ! for source=3 + + integer, parameter :: adiabat = 1 ! Formulation of moist adiabat: + ! 1 = pseudoadiabatic, liquid only + ! 2 = reversible, liquid only + ! 3 = pseudoadiabatic, with ice + ! 4 = reversible, with ice + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- +! No need to modify anything below here: +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + integer :: source = 1 + logical :: doit,ice,cloud,not_converged + integer :: k,kmin,n,nloop,i,orec + real, dimension(nk) :: pi,th,thv,z,pt,pb,pc,pn,ptv + + real :: maxthe,parea,narea,lfc + real :: th1,p1,t1,qv1,ql1,qi1,b1,pi1,thv1,qt,dp,frac + real :: th2,p2,t2,qv2,ql2,qi2,b2,pi2,thv2 + real :: thlast,fliq,fice,tbar,qvbar,qlbar,qibar,lhv,lhs,lhf,rm,cpm + real*8 :: avgth,avgqv + +!----------------------------------------------------------------------- + + real, parameter :: g = 9.81 + real, parameter :: p00 = 100000.0 + real, parameter :: cp = 1005.7 + real, parameter :: rd = 287.04 + real, parameter :: rv = 461.5 + real, parameter :: xlv = 2501000.0 + real, parameter :: xls = 2836017.0 + real, parameter :: t0 = 273.15 + real, parameter :: cpv = 1875.0 + real, parameter :: cpl = 4190.0 + real, parameter :: cpi = 2118.636 + real, parameter :: lv1 = xlv+(cpl-cpv)*t0 + real, parameter :: lv2 = cpl-cpv + real, parameter :: ls1 = xls+(cpi-cpv)*t0 + real, parameter :: ls2 = cpi-cpv + + real, parameter :: rp00 = 1.0/p00 + real, parameter :: eps = rd/rv + real, parameter :: reps = rv/rd + real, parameter :: rddcp = rd/cp + real, parameter :: cpdrd = cp/rd + real, parameter :: cpdg = cp/g + + real, parameter :: converge = 0.1 + + integer, parameter :: debug_level = 0 + + if (present(source_in)) source = source_in + +!----------------------------------------------------------------------- + +!---- convert p,t to mks units; get pi,th,thv ----! + + do k=1,nk + pi(k) = (p(k)*rp00)**rddcp + th(k) = t(k)/pi(k) + thv(k) = th(k)*(1.0+reps*q(k))/(1.0+q(k)) + enddo + +!---- get height using the hydrostatic equation ----! + + z(nk) = 0.5*dz(nk) + do k=nk-1,1,-1 + z(k) = z(k+1) + 0.5*(dz(k+1)+dz(k)) + enddo + +!---- find source parcel ----! + + IF(source.eq.1)THEN + ! use surface parcel + kmin = nk + + ELSEIF(source.eq.2)THEN + ! use most unstable parcel (max theta-e) + + IF(p(1).lt.50000.0)THEN + ! first report is above 500 mb ... just use the first level reported + kmin = nk + maxthe = the(nk) + ELSE + ! find max thetae below 500 mb + maxthe = 0.0 + do k=nk,1,-1 + if(p(k).ge.50000.0)then + if( the(nk).gt.maxthe )then + maxthe = the(nk) + kmin = k + endif + endif + enddo + ENDIF + if(debug_level.ge.100) print *,' kmin,maxthe = ',kmin,maxthe + +!!$ ELSEIF(source.eq.3)THEN +!!$ ! use mixed layer +!!$ +!!$ IF( dz(nk).gt.ml_depth )THEN +!!$ ! the second level is above the mixed-layer depth: just use the +!!$ ! lowest level +!!$ +!!$ avgth = th(nk) +!!$ avgqv = q(nk) +!!$ kmin = nk +!!$ +!!$ ELSEIF( z(1).lt.ml_depth )THEN +!!$ ! the top-most level is within the mixed layer: just use the +!!$ ! upper-most level (not +!!$ +!!$ avgth = th(1) +!!$ avgqv = q(1) +!!$ kmin = 1 +!!$ +!!$ ELSE +!!$ ! calculate the mixed-layer properties: +!!$ +!!$ avgth = 0.0 +!!$ avgqv = 0.0 +!!$ k = nk-1 +!!$ if(debug_level.ge.100) print *,' ml_depth = ',ml_depth +!!$ if(debug_level.ge.100) print *,' k,z,th,q:' +!!$ if(debug_level.ge.100) print *,nk,z(nk),th(nk),q(nk) +!!$ +!!$ do while( (z(k).le.ml_depth) .and. (k.ge.1) ) +!!$ +!!$ if(debug_level.ge.100) print *,k,z(k),th(k),q(k) +!!$ +!!$ avgth = avgth + dz(k)*th(k) +!!$ avgqv = avgqv + dz(k)*q(k) +!!$ +!!$ k = k - 1 +!!$ +!!$ enddo +!!$ +!!$ th2 = th(k+1)+(th(k)-th(k+1))*(ml_depth-z(k-1))/dz(k) +!!$ qv2 = q(k+1)+( q(k)- q(k+1))*(ml_depth-z(k-1))/dz(k) +!!$ +!!$ if(debug_level.ge.100) print *,999,ml_depth,th2,qv2 +!!$ +!!$ avgth = avgth + 0.5*(ml_depth-z(k-1))*(th2+th(k-1)) +!!$ avgqv = avgqv + 0.5*(ml_depth-z(k-1))*(qv2+q(k-1)) +!!$ +!!$ if(debug_level.ge.100) print *,k,z(k),th(k),q(k) +!!$ +!!$ avgth = avgth/ml_depth +!!$ avgqv = avgqv/ml_depth +!!$ +!!$ kmin = nk +!!$ +!!$ ENDIF +!!$ +!!$ if(debug_level.ge.100) print *,avgth,avgqv + + ELSE + + print * + print *,' Unknown value for source' + print * + print *,' source = ',source + print * + call mpp_error(FATAL, " Unknown CAPE source") + + ENDIF + +!---- define parcel properties at initial location ----! + narea = 0.0 + + if( (source.eq.1).or.(source.eq.2) )then + k = kmin + th2 = th(kmin) + pi2 = pi(kmin) + p2 = p(kmin) + t2 = t(kmin) + thv2 = thv(kmin) + qv2 = q(kmin) + b2 = 0.0 + elseif( source.eq.3 )then + k = kmin + th2 = avgth + qv2 = avgqv + thv2 = th2*(1.0+reps*qv2)/(1.0+qv2) + pi2 = pi(kmin) + p2 = p(kmin) + t2 = th2*pi2 + b2 = g*( thv2-thv(kmin) )/thv(kmin) + endif + + ql2 = 0.0 + qi2 = 0.0 + qt = qv2 + + cape = 0.0 + cin = 0.0 + lfc = 0.0 + + doit = .true. + cloud = .false. + if(adiabat.eq.1.or.adiabat.eq.2)then + ice = .false. + else + ice = .true. + endif + +! the = getthe(p2,t2,t2,qv2) +! if(debug_level.ge.100) print *,' the = ',the + +!---- begin ascent of parcel ----! + + if(debug_level.ge.100)then + print *,' Start loop:' + print *,' p2,th2,qv2 = ',p2,th2,qv2 endif - if (in0g) then - gonv = gon - if ( qg1 > r1) then - gonv = 2.38 * (pi * rho_g / (rhoair*qg1))**0.92 - gonv = max(1.e4, min(gonv,gon)) - end if + do while( doit .and. (k.gt.1) ) + + k = k-1 + b1 = b2 + + dp = p(k)-p(k-1) + + if( dp.lt.pinc )then + nloop = 1 else - gonv = rn0_g - end if - - if (in0r) then - ronv = ron2 - if (qr1 > r1 ) then - ronv = ron_const1r * tanh((ron_qr0-qr1)/ron_delqr0) + ron_const2r - end if + nloop = 1 + int( dp/pinc ) + dp = dp/float(nloop) + endif + + do n=1,nloop + + p1 = p2 + t1 = t2 + pi1 = pi2 + th1 = th2 + qv1 = qv2 + ql1 = ql2 + qi1 = qi2 + thv1 = thv2 + + p2 = p2 - dp + pi2 = (p2*rp00)**rddcp + + thlast = th1 + i = 0 + not_converged = .true. + + do while( not_converged ) + i = i + 1 + t2 = thlast*pi2 + if(ice)then + fliq = max(min((t2-233.15)/(273.15-233.15),1.0),0.0) + fice = 1.0-fliq + else + fliq = 1.0 + fice = 0.0 + endif + qv2 = min( qt , fliq*getqvs(p2,t2) + fice*getqvi(p2,t2) ) + qi2 = max( fice*(qt-qv2) , 0.0 ) + ql2 = max( qt-qv2-qi2 , 0.0 ) + + tbar = 0.5*(t1+t2) + qvbar = 0.5*(qv1+qv2) + qlbar = 0.5*(ql1+ql2) + qibar = 0.5*(qi1+qi2) + + lhv = lv1-lv2*tbar + lhs = ls1-ls2*tbar + lhf = lhs-lhv + + rm=rd+rv*qvbar + cpm=cp+cpv*qvbar+cpl*qlbar+cpi*qibar + th2=th1*exp( lhv*(ql2-ql1)/(cpm*tbar) & + +lhs*(qi2-qi1)/(cpm*tbar) & + +(rm/cpm-rd/cp)*alog(p2/p1) ) + + if(i.gt.90) print *,i,th2,thlast,th2-thlast + if(i.gt.100)then + print *,' getcape() error: lack of convergence, stopping iteration' + not_converged = .false. + endif + if( abs(th2-thlast).gt.converge )then + thlast=thlast+0.3*(th2-thlast) + else + not_converged = .false. + endif + enddo + + ! Latest pressure increment is complete. Calculate some + ! important stuff: + + if( ql2.ge.1.0e-10 ) cloud = .true. + + IF(adiabat.eq.1.or.adiabat.eq.3)THEN + ! pseudoadiabat + qt = qv2 + ql2 = 0.0 + qi2 = 0.0 + ELSEIF(adiabat.le.0.or.adiabat.ge.5)THEN + print *,' getcape(): Undefined adiabat' + stop 10000 + ENDIF + + enddo + + thv2 = th2*(1.0+reps*qv2)/(1.0+qv2+ql2+qi2) + b2 = g*( thv2-thv(k) )/thv(k) + +! the = getthe(p2,t2,t2,qv2) + + ! Get contributions to CAPE and CIN: + + if( (b2.ge.0.0) .and. (b1.lt.0.0) )then + ! first trip into positive area + !ps = p(k-1)+(p(k)-p(k-1))*(0.0-b1)/(b2-b1) + frac = b2/(b2-b1) + parea = 0.5*b2*dz(k)*frac + narea = narea-0.5*b1*dz(k)*(1.0-frac) + if(debug_level.ge.200)then + print *,' b1,b2 = ',b1,b2 + !print *,' p1,ps,p2 = ',p(k-1),ps,p(k) + print *,' frac = ',frac + print *,' parea = ',parea + print *,' narea = ',narea + endif + cin = cin + narea + narea = 0.0 + elseif( (b2.lt.0.0) .and. (b1.gt.0.0) )then + ! first trip into neg area + !ps = p(k-1)+(p(k)-p(k-1))*(0.0-b1)/(b2-b1) + frac = b1/(b1-b2) + parea = 0.5*b1*dz(k)*frac + narea = -0.5*b2*dz(k)*(1.0-frac) + if(debug_level.ge.200)then + print *,' b1,b2 = ',b1,b2 + !print *,' p1,ps,p2 = ',p(k-1),ps,p(k) + print *,' frac = ',frac + print *,' parea = ',parea + print *,' narea = ',narea + endif + elseif( b2.lt.0.0 )then + ! still collecting negative buoyancy + parea = 0.0 + narea = narea-0.5*dz(k)*(b1+b2) else - ronv = rn0_r - end if + ! still collecting positive buoyancy + parea = 0.5*dz(k)*(b1+b2) + narea = 0.0 + endif - !Total equivalent reflectivity: mm^6 m^-3 - z_e = factor_r * (rhoair*qr1)**1.75 / ronv**.75 & ! rain - + factorb_s * (rhoair*qs1)**1.75 / sonv**.75 & ! snow - + factorb_g * (rhoair*qg1)**1.75 / gonv**.75 ! graupel - - !Minimum allowed dbz is -20 - z_e = max(z_e,0.01) - dbz(i,j,k) = 10. * log10(z_e) + cape = cape + max(0.0,parea) - maxdbz(i,j) = max(dbz(i,j,k), maxdbz(i,j)) - allmax = max(dbz(i,j,k), allmax) + if(debug_level.ge.200)then + write(6,102) p2,b1,b2,cape,cin,cloud +102 format(5(f13.4),2x,l1) + endif - enddo - enddo - enddo + if( (p(k).le.10000.0).and.(b2.lt.0.0) )then + ! stop if b < 0 and p < 100 mb + doit = .false. + endif - end subroutine dbzcalc + enddo + +!---- All done ----! + + return + end subroutine getcape + +!!$ subroutine divg_diagnostics(divg, ..., idiag, bd, npz,gridstruct%area_64, domain, fv_time)) +!!$ real, INPUT(IN) :: divg(bd%isd:bd%ied,bd%jsd:bd%jed,npz) +!!$ .... +!!$ +!!$ if (idiag%id_divg>0) then +!!$ used = send_data(idiag%id_divg, divg, fv_time) +!!$ +!!$ endif +!!$ +!!$ +!!$ if(flagstruct%fv_debug) call prt_mxm('divg', dp1, is, ie, js, je, 0, npz, 1.,gridstruct%area_64, domain) +!!$ end subroutine divg_diagnostics +!!$ +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + real function getqvs(p,t) + implicit none + + real :: p,t,es + + real, parameter :: eps = 287.04/461.5 + + es = 611.2*exp(17.67*(t-273.15)/(t-29.65)) + getqvs = eps*es/(p-es) + + return + end function getqvs + +!----------------------------------------------------------------------- +!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +!----------------------------------------------------------------------- + + real function getqvi(p,t) + implicit none + + real :: p,t,es + + real, parameter :: eps = 287.04/461.5 + + es = 611.2*exp(21.8745584*(t-273.15)/(t-7.66)) + getqvi = eps*es/(p-es) + + return + end function getqvi + +!----------------------------------------------------------------------- + + subroutine debug_column(pt, delp, delz, u, v, w, q, npz, ncnst, sphum, nwat, hydrostatic, bd, Time) + + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npz, ncnst, sphum, nwat + logical, intent(IN) :: hydrostatic + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz), intent(IN) :: pt, delp, w + real, dimension(bd%is:, bd%js:,1:), intent(IN) :: delz + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1,npz), intent(IN) :: u + real, dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz), intent(IN) :: v + real, dimension(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst), intent(IN) :: q + + + type(time_type), intent(IN) :: Time + integer :: i,j,k,n,l + real cond + + do n=1,size(diag_debug_i) + + i=diag_debug_i(n) + j=diag_debug_j(n) + + if (i < bd%is .or. i > bd%ie) cycle + if (j < bd%js .or. j > bd%je) cycle + + if (do_debug_diag_column(i,j)) then + call column_diagnostics_header(diag_debug_names(n), diag_debug_units(n), Time, n, & + diag_debug_lon, diag_debug_lat, diag_debug_i, diag_debug_j) + + write(diag_debug_units(n),'(A4, A7, A8, A6, A8, A8, A8, A8, A9)') 'k', 'T', 'delp', 'delz', 'u', 'v', 'w', 'sphum', 'cond' + write(diag_debug_units(n),'(A4, A7, A8, A6, A8, A8, A8, A8, A9)') '', 'K', 'mb', 'm', 'm/s', 'm/s', 'm/s', 'g/kg', 'g/kg' + if (hydrostatic) then + call mpp_error(NOTE, 'Hydrostatic debug sounding not yet supported') + else + do k=2*npz/3,npz + cond = 0. + do l=2,nwat + cond = cond + q(i,j,k,l) + enddo + write(diag_debug_units(n),'(I4, F7.2, F8.3, I6, F8.3, F8.3, F8.3, F8.3, F9.5 )') & + k, pt(i,j,k), delp(i,j,k)*0.01, -int(delz(i,j,k)), u(i,j,k), v(i,j,k), w(i,j,k), & + q(i,j,k,sphum)*1000., cond*1000. + enddo + endif + + !call mpp_flush(diag_units(n)) + + endif + + enddo + + end subroutine debug_column + + subroutine sounding_column( pt, delp, delz, u, v, q, peln, pkz, phis, & + npz, ncnst, sphum, nwat, hydrostatic, moist_phys, zvir, ng, bd, Time ) + + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npz, ncnst, sphum, nwat, ng + real, intent(IN) :: zvir + logical, intent(IN) :: hydrostatic, moist_phys + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz), intent(IN) :: pt, delp + real, dimension(bd%is:, bd%js:, 1:), intent(IN) :: delz + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1,npz), intent(IN) :: u + real, dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz), intent(IN) :: v + real, dimension(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst), intent(IN) :: q + real, dimension(bd%is:bd%ie,npz+1,bd%js:bd%je), intent(in):: peln + real, dimension(bd%is:bd%ie,bd%js:bd%je,npz), intent(in):: pkz + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed), intent(IN) :: phis + type(time_type), intent(IN) :: Time + + real :: Tv, pres, hght(npz), dewpt, rh, mixr, tmp, qs(1), wspd, wdir, rpk, theta, thetav + real :: thetae(bd%is:bd%ie,bd%js:bd%je,npz) + + real, PARAMETER :: rgrav = 1./grav + real, PARAMETER :: rdg = -rdgas*rgrav + real, PARAMETER :: sounding_top = 10.e2 + real, PARAMETER :: ms_to_knot = 1.9438445 + real, PARAMETER :: p0 = 1000.e2 + + integer :: i, j, k, n + integer :: yr_v, mo_v, dy_v, hr_v, mn_v, sec_v ! need to get numbers for these + + if (.not. any(do_sonde_diag_column)) return + call get_date(Time, yr_v, mo_v, dy_v, hr_v, mn_v, sec_v) + call eqv_pot(thetae, pt, delp, delz, peln, pkz, q(bd%isd,bd%jsd,1,sphum), & + bd%is, bd%ie, bd%js, bd%je, ng, npz, hydrostatic, moist_phys) + + do n=1,size(diag_sonde_i) + + i=diag_sonde_i(n) + j=diag_sonde_j(n) + + if (i < bd%is .or. i > bd%ie) cycle + if (j < bd%js .or. j > bd%je) cycle + + if (do_sonde_diag_column(i,j)) then + !call column_diagnostics_header(diag_sonde_names(n), diag_sonde_units(n), Time, n, & + ! diag_sonde_lon, diag_sonde_lat, diag_sonde_i, diag_sonde_j) + + write(diag_sonde_units(n),600) & + trim(diag_sonde_names(n)), yr_v, mo_v, dy_v, hr_v, yr_init, mo_init, dy_init, hr_init, trim(runname) +600 format(A,'.v', I4, I2.2, I2.2, I2.2, '.i', I4, I2.2, I2.2, I2.2, '.', A, '.dat########################################################') + write(diag_sonde_units(n),601) trim(diag_sonde_names(n)), yr_v, mo_v, dy_v, hr_v, yr_init, mo_init, dy_init, hr_init, & + trim(runname), diag_sonde_lon(n), diag_sonde_lat(n) +601 format(3x, A16, ' Valid ', I4, I2.2, I2.2, '.', I2.2, 'Z Init ', I4, I2.2, I2.2, '.', I2.2, 'Z \n', A, 2F8.3) + write(diag_sonde_units(n),*) + write(diag_sonde_units(n),*) '-------------------------------------------------------------------------------' + write(diag_sonde_units(n),'(11A7)') 'PRES', 'HGHT', "TEMP", "DWPT", "RELH", "MIXR", "DRCT", "SKNT", "THTA", "THTE", "THTV" + write(diag_sonde_units(n),'(11A7)') 'hPa', 'm', 'C', 'C', '%', 'g/kg', 'deg', 'knot', 'K', 'K', 'K' + write(diag_sonde_units(n),*) '-------------------------------------------------------------------------------' + + if (hydrostatic) then + call mpp_error(NOTE, 'Hydrostatic diagnostic sounding not yet supported') + else + hght(npz) = phis(i,j)*rgrav - 0.5*delz(i,j,npz) + do k=npz-1,1,-1 + hght(k) = hght(k+1) - 0.5*(delz(i,j,k)+delz(i,j,k+1)) + enddo + + do k=npz,1,-1 + + Tv = pt(i,j,k)*(1.+zvir*q(i,j,k,sphum)) + pres = delp(i,j,k)/delz(i,j,k)*rdg*Tv + !if (pres < sounding_top) cycle + + call qsmith(1, 1, 1, pt(i,j,k:k), & + (/pres/), q(i,j,k:k,sphum), qs) + + mixr = q(i,j,k,sphum)/(1.-sum(q(i,j,k,1:nwat))) ! convert from sphum to mixing ratio + rh = q(i,j,k,sphum)/qs(1) + tmp = ( log(max(rh,1.e-2))/ 17.27 + ( pt(i,j,k) - 273.14 )/ ( -35.84 + pt(i,j,k)) ) + dewpt = 237.3* tmp/ ( 1. - tmp ) ! deg C + wspd = 0.5*sqrt((u(i,j,k)+u(i,j+1,k))*(u(i,j,k)+u(i,j+1,k)) + (v(i,j,k)+v(i+1,j,k))*(v(i,j,k)+v(i+1,j,k)))*ms_to_knot ! convert to knots + if (wspd > 0.01) then + !https://www.eol.ucar.edu/content/wind-direction-quick-reference + wdir = atan2(u(i,j,k)+u(i,j+1,k),v(i,j,k)+v(i+1,j,k)) * rad2deg + else + wdir = 0. + endif + rpk = exp(-kappa*log(pres/p0)) + theta = pt(i,j,k)*rpk + thetav = Tv*rpk + + write(diag_sonde_units(n),'(F7.1, I7, F7.1, F7.1, I7, F7.2, I7, F7.2, F7.1, F7.1, F7.1)') & + pres*1.e-2, int(hght(k)), pt(i,j,k)-TFREEZE, dewpt, int(rh*100.), mixr*1.e3, int(wdir), wspd, theta, thetae(i,j,k), thetav + enddo + endif + + !call mpp_flush(diag_units(n)) + + endif + + enddo + + + end subroutine sounding_column -!####################################################################### -subroutine fv_diag_init_gn(Atm) - type(fv_atmos_type), intent(inout), target :: Atm - - if (Atm%grid_Number > 1) then - write(gn,"(A2,I1)") " g", Atm%grid_number - else - gn = "" - end if - -end subroutine fv_diag_init_gn end module fv_diagnostics_mod diff --git a/tools/fv_eta.F90 b/tools/fv_eta.F90 index dd59e98b0..c7d09be70 100644 --- a/tools/fv_eta.F90 +++ b/tools/fv_eta.F90 @@ -24,17 +24,17 @@ module fv_eta_mod use mpp_mod, only: FATAL, mpp_error implicit none private - public set_eta, get_eta_level, compute_dz_var, compute_dz_L32, compute_dz_L101, set_hybrid_z, compute_dz, gw_1d, sm1_edge, hybrid_z_dz - -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' -! Developer: Shian-Jiann Lin, NOAA/GFDL + public set_eta, set_external_eta, get_eta_level, compute_dz_var, & + compute_dz_L32, compute_dz_L101, set_hybrid_z, compute_dz, & + gw_1d, sm1_edge, hybrid_z_dz contains +!!!NOTE: USE_VAR_ETA not used in SHiELD +!!! This routine will be kept here +!!! for the time being to not disrupt idealized tests #ifdef USE_VAR_ETA - subroutine set_eta(km, ks, ptop, ak, bk) + subroutine set_eta(km, ks, ptop, ak, bk, npz_type) ! This is the easy to use version of the set_eta integer, intent(in):: km ! vertical dimension integer, intent(out):: ks ! number of pure p layers @@ -85,6 +85,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) real, intent(out):: ak(km+1) real, intent(out):: bk(km+1) real, intent(out):: ptop ! model top (Pa) + character(24), intent(IN) :: npz_type real pint, stretch_fac integer k real :: s_rate = -1.0 ! dummy value to not use var_les @@ -153,6 +154,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) bk(k) = b60(k) enddo #else +!!!!!!!!!!! MERGING STOPPED HERE 13 oct 17 !!!!!!!!!!!!!!!!! ptop = 3.e2 ! pint = 250.E2 pint = 300.E2 ! revised for Moist test @@ -206,7 +208,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) stretch_fac = 1.035 ! Hi-top: case (63) ! N = 8, M=4 - ptop = 1. + ptop = 1. ! c360 or c384 stretch_fac = 1.035 case (71) ! N = 9 @@ -231,7 +233,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) call mount_waves(km, ak, bk, ptop, ks, pint) #else if (s_rate > 0.) then - call var_les(km, ak, bk, ptop, ks, pint, s_rate) + call var_les(km, ak, bk, ptop, ks, pint, s_rate) else if ( km > 79 ) then call var_hi2(km, ak, bk, ptop, ks, pint, stretch_fac) @@ -240,7 +242,7 @@ subroutine set_eta(km, ks, ptop, ak, bk) ptop = 500.e2 ks = 0 do k=1,km+1 - bk(k) = real(k-1) / real (km) + bk(k) = real(k-1) / real (km) ak(k) = ptop*(1.-bk(k)) enddo else @@ -256,1236 +258,510 @@ subroutine set_eta(km, ks, ptop, ak, bk) end subroutine set_eta - subroutine mount_waves(km, ak, bk, ptop, ks, pint) - integer, intent(in):: km - real, intent(out):: ak(km+1), bk(km+1) - real, intent(out):: ptop, pint - integer, intent(out):: ks -! Local - real, parameter:: p00 = 1.E5 - real, dimension(km+1):: ze, pe1, peln, eta - real, dimension(km):: dz, dlnp - real ztop, t0, dz0, sum1, tmp1 - real ep, es, alpha, beta, gama, s_fac - integer k, k500 - - pint = 300.e2 -! s_fac = 1.05 -! dz0 = 500. - if ( km <= 60 ) then - s_fac = 1.0 - dz0 = 500. - else - s_fac = 1. - dz0 = 250. - endif - -! Basic parameters for HIWPP mountain waves - t0 = 300. -! ztop = 20.0e3; 500-m resolution in halft of the vertical domain -! ztop = real(km-1)*500. -!----------------------- -! Compute temp ptop based on isothermal atm -! ptop = p00*exp(-grav*ztop/(rdgas*t0)) - -! Lowest half has constant resolution - ze(km+1) = 0. - do k=km, km-19, -1 - ze(k) = ze(k+1) + dz0 - enddo - -! Stretching from 10-km and up: - do k=km-20, 3, -1 - dz0 = s_fac * dz0 - ze(k) = ze(k+1) + dz0 - enddo - ze(2) = ze(3) + sqrt(2.)*dz0 - ze(1) = ze(2) + 2.0*dz0 -! call sm1_edge(1, 1, 1, 1, km, 1, 1, ze, 1) +#else + !This is the version of set_eta used in SHiELD and AM4 + subroutine set_eta(km, ks, ptop, ak, bk, npz_type) -! Given z --> p - do k=1,km - dz(k) = ze(k) - ze(k+1) - dlnp(k) = grav*dz(k) / (rdgas*t0) - enddo +!Level definitions are now in this header file +#include - pe1(km+1) = p00 - peln(km+1) = log(p00) - do k=km,1,-1 - peln(k) = peln(k+1) - dlnp(k) - pe1(k) = exp(peln(k)) - enddo + integer, intent(in):: km ! vertical dimension + integer, intent(out):: ks ! number of pure p layers + real, intent(out):: ak(km+1) + real, intent(out):: bk(km+1) + real, intent(out):: ptop ! model top (Pa) + character(24), intent(IN) :: npz_type -! Comnpute new ptop - ptop = pe1(1) + real:: p0=1000.E2 + real:: pc=200.E2 -! Pe(k) = ak(k) + bk(k) * PS -! Locate pint and KS - ks = 0 - do k=2,km - if ( pint < pe1(k)) then - ks = k-1 - exit - endif - enddo + real pt, lnpe, dlnp + real press(km+1), pt1(km) + integer k + integer :: var_fn = 0 - if ( is_master() ) then - write(*,*) 'For (input) PINT=', 0.01*pint, ' KS=', ks, 'pint(computed)=', 0.01*pe1(ks+1) - write(*,*) 'Modified ptop =', ptop, ' ztop=', ze(1)/1000. - do k=1,km - write(*,*) k, 'ze =', ze(k)/1000. - enddo - endif - pint = pe1(ks+1) + real :: pint = 100.E2 + real :: stretch_fac = 1.03 + integer :: auto_routine = 0 -#ifdef NO_UKMO_HB - do k=1,ks+1 - ak(k) = pe1(k) - bk(k) = 0. - enddo - do k=ks+2,km+1 - bk(k) = (pe1(k) - pint) / (pe1(km+1)-pint) ! bk == sigma - ak(k) = pe1(k) - bk(k) * pe1(km+1) - enddo - bk(km+1) = 1. - ak(km+1) = 0. -#else -! Problematic for non-hydrostatic - do k=1,km+1 - eta(k) = pe1(k) / pe1(km+1) - enddo - ep = eta(ks+1) - es = eta(km) -! es = 1. - alpha = (ep**2-2.*ep*es) / (es-ep)**2 - beta = 2.*ep*es**2 / (es-ep)**2 - gama = -(ep*es)**2 / (es-ep)**2 + ptop = 1. -! Pure pressure: - do k=1,ks+1 - ak(k) = eta(k)*1.e5 - bk(k) = 0. - enddo + ! Definition: press(i,j,k) = ak(k) + bk(k) * ps(i,j) - do k=ks+2, km - ak(k) = alpha*eta(k) + beta + gama/eta(k) - ak(k) = ak(k)*1.e5 - enddo - ak(km+1) = 0. + if (trim(npz_type) == 'superC' .or. trim(npz_type) == 'superK') then - do k=ks+2, km - bk(k) = (pe1(k) - ak(k))/pe1(km+1) - enddo - bk(km+1) = 1. -#endif + auto_routine = 1 + select case (km) + case (20) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (24) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (30) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (40) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (50) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (60) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (80) + ptop = 56.e2 + pint = ptop + stretch_fac = 1.03 + case (90) ! super-duper cell + ptop = 40.e2 + stretch_fac = 1.025 + auto_routine = 2 + end select - if ( is_master() ) then - tmp1 = ak(ks+1) - do k=ks+1,km - tmp1 = max(tmp1, (ak(k)-ak(k+1))/max(1.E-5, (bk(k+1)-bk(k))) ) - enddo - write(*,*) 'Hybrid Sigma-P: minimum allowable surface pressure (hpa)=', tmp1/100. - endif + else - end subroutine mount_waves + select case (km) -#else - subroutine set_eta(km, ks, ptop, ak, bk) + case (5,10) ! does this work???? - integer, intent(in):: km ! vertical dimension - integer, intent(out):: ks ! number of pure p layers - real, intent(out):: ak(km+1) - real, intent(out):: bk(km+1) - real, intent(out):: ptop ! model top (Pa) -! local - real a24(25),b24(25) ! GFDL AM2L24 - real a26(27),b26(27) ! Jablonowski & Williamson 26-level - real a32(33),b32(33) - real a32w(33),b32w(33) - real a47(48),b47(48) - real a48(49),b48(49) - real a52(53),b52(53) - real a54(55),b54(55) - real a56(57),b56(57) - real a60(61),b60(61) - real a63(64),b63(64) - real a64(65),b64(65) - real a68(69),b68(69) ! cjg: grid with enhanced PBL resolution - real a96(97),b96(97) ! cjg: grid with enhanced PBL resolution - real a100(101),b100(101) - real a104(105),b104(105) - real a125(126),b125(126) - - real:: p0=1000.E2 - real:: pc=200.E2 - - real pt, pint, lnpe, dlnp - real press(km+1), pt1(km) - integer k + ! Equivalent Shallow Water: for modon test + ptop = 500.e2 + ks = 0 + do k=1,km+1 + bk(k) = real(k-1) / real (km) + ak(k) = ptop*(1.-bk(k)) + enddo -! Definition: press(i,j,k) = ak(k) + bk(k) * ps(i,j) - -!----------------------------------------------- -! GFDL AM2-L24: modified by SJL at the model top -!----------------------------------------------- -! data a24 / 100.0000, 1050.0000, 3474.7942, 7505.5556, 12787.2428, & - data a24 / 100.0000, 903.4465, 3474.7942, 7505.5556, 12787.2428, & - 19111.3683, 21854.9274, 22884.1866, 22776.3058, 21716.1604, & - 20073.2963, 18110.5123, 16004.7832, 13877.6253, 11812.5452, & - 9865.8840, 8073.9726, 6458.0834, 5027.9899, 3784.6085, & - 2722.0086, 1828.9752, 1090.2396, 487.4595, 0.0000 / - - data b24 / 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, & - 0.0000000, 0.0435679, 0.1102275, 0.1922249, 0.2817656, & - 0.3694997, 0.4532348, 0.5316253, 0.6038733, 0.6695556, & - 0.7285176, 0.7808017, 0.8265992, 0.8662148, 0.9000406, & - 0.9285364, 0.9522140, 0.9716252, 0.9873523, 1.0000000 / - -! Jablonowski & Williamson 26-level setup - data a26 / 219.4067, 489.5209, 988.2418, 1805.2010, 2983.7240, 4462.3340, & - 6160.5870, 7851.2430, 7731.2710, 7590.1310, 7424.0860, & - 7228.7440, 6998.9330, 6728.5740, 6410.5090, 6036.3220, & - 5596.1110, 5078.2250, 4468.9600, 3752.1910, 2908.9490, & - 2084.739, 1334.443, 708.499, 252.1360, 0.0, 0.0 / - - data b26 / 0.0, 0.0, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,& - 0.0000000, 0.01505309, 0.03276228, 0.05359622, 0.07810627, & - 0.1069411, 0.1408637, 0.1807720, 0.2277220, 0.2829562, & - 0.3479364, 0.4243822, 0.5143168, 0.6201202, 0.7235355, & - 0.8176768, 0.8962153, 0.9534761, 0.9851122, 1.0000000 / - - -! High-resolution troposphere setup -#ifdef OLD_32 -! Revised Apr 14, 2004: PINT = 245.027 mb - data a32/100.00000, 400.00000, 818.60211, & - 1378.88653, 2091.79519, 2983.64084, & - 4121.78960, 5579.22148, 7419.79300, & - 9704.82578, 12496.33710, 15855.26306, & - 19839.62499, 24502.73262, 28177.10152, & - 29525.28447, 29016.34358, 27131.32792, & - 24406.11225, 21326.04907, 18221.18357, & - 15275.14642, 12581.67796, 10181.42843, & - 8081.89816, 6270.86956, 4725.35001, & - 3417.39199, 2317.75459, 1398.09473, & - 632.49506, 0.00000, 0.00000 / - - data b32/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.01711, & - 0.06479, 0.13730, 0.22693, & - 0.32416, 0.42058, 0.51105, & - 0.59325, 0.66628, 0.73011, & - 0.78516, 0.83217, 0.87197, & - 0.90546, 0.93349, 0.95685, & - 0.97624, 0.99223, 1.00000 / -#else -! SJL June 26, 2012 -! pint= 55.7922 - data a32/100.00000, 400.00000, 818.60211, & - 1378.88653, 2091.79519, 2983.64084, & - 4121.78960, 5579.22148, 6907.19063, & - 7735.78639, 8197.66476, 8377.95525, & - 8331.69594, 8094.72213, 7690.85756, & - 7139.01788, 6464.80251, 5712.35727, & - 4940.05347, 4198.60465, 3516.63294, & - 2905.19863, 2366.73733, 1899.19455, & - 1497.78137, 1156.25252, 867.79199, & - 625.59324, 423.21322, 254.76613, & - 115.06646, 0.00000, 0.00000 / - - data b32/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00513, & - 0.01969, 0.04299, 0.07477, & - 0.11508, 0.16408, 0.22198, & - 0.28865, 0.36281, 0.44112, & - 0.51882, 0.59185, 0.65810, & - 0.71694, 0.76843, 0.81293, & - 0.85100, 0.88331, 0.91055, & - 0.93338, 0.95244, 0.96828, & - 0.98142, 0.99223, 1.00000 / -#endif + case (24) -!--------------------- -! Wilson's 32L settings: -!--------------------- -! Top changed to 0.01 mb - data a32w/ 1.00, 26.6378, 84.5529, 228.8592, & - 539.9597, 1131.7087, 2141.8082, 3712.0454, & - 5963.5317, 8974.1873, 12764.5388, 17294.5911, & - 20857.7007, 22221.8651, 22892.7202, 22891.1641, & - 22286.0724, 21176.0846, 19673.0671, 17889.0989, & - 15927.5060, 13877.6239, 11812.5474, 9865.8830, & - 8073.9717, 6458.0824, 5027.9893, 3784.6104, & - 2722.0093, 1828.9741, 1090.2397, 487.4575, & - 0.0000 / - - data b32w/ 0.0000, 0.0000, 0.0000, 0.0000, & - 0.0000, 0.0000, 0.0000, 0.0000, & - 0.0000, 0.0000, 0.0000, 0.0000, & - 0.0159, 0.0586, 0.1117, 0.1734, & - 0.2415, 0.3137, 0.3878, 0.4619, & - 0.5344, 0.6039, 0.6696, 0.7285, & - 0.7808, 0.8266, 0.8662, 0.9000, & - 0.9285, 0.9522, 0.9716, 0.9874, & - 1.0000 / - - -#ifdef OLD_L47 -! QBO setting with ptop = 0.1 mb and p_full=0.17 mb; pint ~ 100 mb - data a47/ 10.00000, 24.45365, 48.76776, & - 85.39458, 133.41983, 191.01402, & - 257.94919, 336.63306, 431.52741, & - 548.18995, 692.78825, 872.16512, & - 1094.18467, 1368.11917, 1704.99489, & - 2117.91945, 2622.42986, 3236.88281, & - 3982.89623, 4885.84733, 5975.43260, & - 7286.29500, 8858.72424, 10739.43477, & - 12982.41110, 15649.68745, 18811.37629, & - 22542.71275, 25724.93857, 27314.36781, & - 27498.59474, 26501.79312, 24605.92991, & - 22130.51655, 19381.30274, 16601.56419, & - 13952.53231, 11522.93244, 9350.82303, & - 7443.47723, 5790.77434, 4373.32696, & - 3167.47008, 2148.51663, 1293.15510, & - 581.62429, 0.00000, 0.00000 / - - data b47/ 0.0000, 0.0000, 0.0000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.01188, 0.04650, & - 0.10170, 0.17401, 0.25832, & - 0.34850, 0.43872, 0.52448, & - 0.60307, 0.67328, 0.73492, & - 0.78834, 0.83418, 0.87320, & - 0.90622, 0.93399, 0.95723, & - 0.97650, 0.99223, 1.00000 / -#else -! Oct 23, 2012 -! QBO setting with ptop = 0.1 mb, pint ~ 60 mb - data a47/ 10.00000, 24.45365, 48.76776, & - 85.39458, 133.41983, 191.01402, & - 257.94919, 336.63306, 431.52741, & - 548.18995, 692.78825, 872.16512, & - 1094.18467, 1368.11917, 1704.99489, & - 2117.91945, 2622.42986, 3236.88281, & - 3982.89623, 4885.84733, 5975.43260, & - 7019.26669, 7796.15848, 8346.60209, & - 8700.31838, 8878.27554, 8894.27179, & - 8756.46404, 8469.60171, 8038.92687, & - 7475.89006, 6803.68067, 6058.68992, & - 5285.28859, 4526.01565, 3813.00206, & - 3164.95553, 2589.26318, 2085.96929, & - 1651.11596, 1278.81205, 962.38875, & - 695.07046, 470.40784, 282.61654, & - 126.92745, 0.00000, 0.00000 / - data b47/ 0.0000, 0.0000, 0.0000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00267, 0.01063, 0.02393, & - 0.04282, 0.06771, 0.09917, & - 0.13786, 0.18444, 0.23925, & - 0.30193, 0.37100, 0.44379, & - 0.51695, 0.58727, 0.65236, & - 0.71094, 0.76262, 0.80757, & - 0.84626, 0.87930, 0.90731, & - 0.93094, 0.95077, 0.96733, & - 0.98105, 0.99223, 1.00000 / -#endif + ks = 5 + do k=1,km+1 + ak(k) = a24(k) + bk(k) = b24(k) + enddo - data a48/ & - 1.00000, 2.69722, 5.17136, & - 8.89455, 14.24790, 22.07157, & - 33.61283, 50.48096, 74.79993, & - 109.40055, 158.00460, 225.44108, & - 317.89560, 443.19350, 611.11558, & - 833.74392, 1125.83405, 1505.20759, & - 1993.15829, 2614.86254, 3399.78420, & - 4382.06240, 5600.87014, 7100.73115, & - 8931.78242, 11149.97021, 13817.16841, & - 17001.20930, 20775.81856, 23967.33875, & - 25527.64563, 25671.22552, 24609.29622, & - 22640.51220, 20147.13482, 17477.63530, & - 14859.86462, 12414.92533, 10201.44191, & - 8241.50255, 6534.43202, 5066.17865, & - 3815.60705, 2758.60264, 1870.64631, & - 1128.33931, 510.47983, 0.00000, & - 0.00000 / - - data b48/ & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.01253, & - 0.04887, 0.10724, 0.18455, & - 0.27461, 0.36914, 0.46103, & - 0.54623, 0.62305, 0.69099, & - 0.75016, 0.80110, 0.84453, & - 0.88127, 0.91217, 0.93803, & - 0.95958, 0.97747, 0.99223, & - 1.00000 / - -! High PBL resolution with top at 1 mb -! SJL modified May 7, 2013 to ptop ~ 100 mb - data a54/100.00000, 254.83931, 729.54278, & - 1602.41121, 2797.50667, 4100.18977, & - 5334.87140, 6455.24153, 7511.80944, & - 8580.26355, 9714.44293, 10938.62253, & - 12080.36051, 12987.13921, 13692.75084, & - 14224.92180, 14606.55444, 14856.69953, & - 14991.32121, 15023.90075, 14965.91493, & - 14827.21612, 14616.33505, 14340.72252, & - 14006.94280, 13620.82849, 13187.60470, & - 12711.98873, 12198.27003, 11650.37451, & - 11071.91608, 10466.23819, 9836.44706, & - 9185.43852, 8515.96231, 7831.01080, & - 7135.14301, 6436.71659, 5749.00215, & - 5087.67188, 4465.67510, 3889.86419, & - 3361.63433, 2879.51065, 2441.02496, & - 2043.41345, 1683.80513, 1359.31122, & - 1067.09135, 804.40101, 568.62625, & - 357.32525, 168.33263, 0.00000, & - 0.00000 / - - data b54/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00180, 0.00694, 0.01510, & - 0.02601, 0.03942, 0.05515, & - 0.07302, 0.09288, 0.11459, & - 0.13803, 0.16307, 0.18960, & - 0.21753, 0.24675, 0.27716, & - 0.30866, 0.34115, 0.37456, & - 0.40879, 0.44375, 0.47935, & - 0.51551, 0.55215, 0.58916, & - 0.62636, 0.66334, 0.69946, & - 0.73395, 0.76622, 0.79594, & - 0.82309, 0.84780, 0.87020, & - 0.89047, 0.90876, 0.92524, & - 0.94006, 0.95336, 0.96529, & - 0.97596, 0.98551, 0.99400, & - 1.00000 / - - -! The 56-L setup - data a56/ 10.00000, 24.97818, 58.01160, & - 115.21466, 199.29210, 309.39897, & - 445.31785, 610.54747, 812.28518, & - 1059.80882, 1363.07092, 1732.09335, & - 2176.91502, 2707.68972, 3334.70962, & - 4068.31964, 4918.76594, 5896.01890, & - 7009.59166, 8268.36324, 9680.41211, & - 11252.86491, 12991.76409, 14901.95764, & - 16987.01313, 19249.15733, 21689.24182, & - 23845.11055, 25330.63353, 26243.52467, & - 26663.84998, 26657.94696, 26281.61371, & - 25583.05256, 24606.03265, 23393.39510, & - 21990.28845, 20445.82122, 18811.93894, & - 17139.59660, 15473.90350, 13850.50167, & - 12294.49060, 10821.62655, 9440.57746, & - 8155.11214, 6965.72496, 5870.70511, & - 4866.83822, 3949.90019, 3115.03562, & - 2357.07879, 1670.87329, 1051.65120, & - 495.51399, 0.00000, 0.00000 / - - data b56 /0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00462, 0.01769, 0.03821, & - 0.06534, 0.09834, 0.13659, & - 0.17947, 0.22637, 0.27660, & - 0.32929, 0.38343, 0.43791, & - 0.49162, 0.54361, 0.59319, & - 0.63989, 0.68348, 0.72391, & - 0.76121, 0.79545, 0.82679, & - 0.85537, 0.88135, 0.90493, & - 0.92626, 0.94552, 0.96286, & - 0.97840, 0.99223, 1.00000 / - - data a60/ 1.7861000000e-01, 1.0805100000e+00, 3.9647100000e+00, & - 9.7516000000e+00, 1.9816580000e+01, 3.6695950000e+01, & - 6.2550570000e+01, 9.9199620000e+01, 1.4792505000e+02, & - 2.0947487000e+02, 2.8422571000e+02, 3.7241721000e+02, & - 4.7437835000e+02, 5.9070236000e+02, 7.2236063000e+02, & - 8.7076746000e+02, 1.0378138800e+03, 1.2258877300e+03, & - 1.4378924600e+03, 1.6772726600e+03, 1.9480506400e+03, & - 2.2548762700e+03, 2.6030909400e+03, 2.9988059200e+03, & - 3.4489952300e+03, 3.9616028900e+03, 4.5456641600e+03, & - 5.2114401700e+03, 5.9705644000e+03, 6.8361981800e+03, & - 7.8231906000e+03, 8.9482351000e+03, 1.0230010660e+04, & - 1.1689289750e+04, 1.3348986860e+04, 1.5234111060e+04, & - 1.7371573230e+04, 1.9789784580e+04, 2.2005564550e+04, & - 2.3550115120e+04, 2.4468583320e+04, 2.4800548800e+04, & - 2.4582445070e+04, 2.3849999620e+04, 2.2640519740e+04, & - 2.0994737150e+04, 1.8957848730e+04, 1.6579413230e+04, & - 1.4080071030e+04, 1.1753630920e+04, 9.6516996300e+03, & - 7.7938009300e+03, 6.1769062800e+03, 4.7874276000e+03, & - 3.6050497500e+03, 2.6059860700e+03, 1.7668328200e+03, & - 1.0656131200e+03, 4.8226201000e+02, 0.0000000000e+00, & - 0.0000000000e+00 / - - - data b60/ 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 5.0600000000e-03, & - 2.0080000000e-02, 4.4900000000e-02, 7.9360000000e-02, & - 1.2326000000e-01, 1.7634000000e-01, 2.3820000000e-01, & - 3.0827000000e-01, 3.8581000000e-01, 4.6989000000e-01, & - 5.5393000000e-01, 6.2958000000e-01, 6.9642000000e-01, & - 7.5458000000e-01, 8.0463000000e-01, 8.4728000000e-01, & - 8.8335000000e-01, 9.1368000000e-01, 9.3905000000e-01, & - 9.6020000000e-01, 9.7775000000e-01, 9.9223000000e-01, & - 1.0000000000e+00 / - -! This is activated by USE_GFSL63 -! Thfollowing L63 setting is the same as NCEP GFS's L64 except the top -! 3 layers - data a63/64.247, 137.790, 221.958, & - 318.266, 428.434, 554.424, & - 698.457, 863.05803, 1051.07995, & - 1265.75194, 1510.71101, 1790.05098, & - 2108.36604, 2470.78817, 2883.03811, & - 3351.46002, 3883.05187, 4485.49315, & - 5167.14603, 5937.04991, 6804.87379, & - 7780.84698, 8875.64338, 10100.20534, & - 11264.35673, 12190.64366, 12905.42546, & - 13430.87867, 13785.88765, 13986.77987, & - 14047.96335, 13982.46770, 13802.40331, & - 13519.33841, 13144.59486, 12689.45608, & - 12165.28766, 11583.57006, 10955.84778, & - 10293.60402, 9608.08306, 8910.07678, & - 8209.70131, 7516.18560, 6837.69250, & - 6181.19473, 5552.39653, 4955.72632, & - 4394.37629, 3870.38682, 3384.76586, & - 2937.63489, 2528.37666, 2155.78385, & - 1818.20722, 1513.68173, 1240.03585, & - 994.99144, 776.23591, 581.48797, & - 408.53400, 255.26520, 119.70243, 0. / + case (26) - data b63/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00201, 0.00792, 0.01755, & - 0.03079, 0.04751, 0.06761, & - 0.09097, 0.11746, 0.14690, & - 0.17911, 0.21382, 0.25076, & - 0.28960, 0.32994, 0.37140, & - 0.41353, 0.45589, 0.49806, & - 0.53961, 0.58015, 0.61935, & - 0.65692, 0.69261, 0.72625, & - 0.75773, 0.78698, 0.81398, & - 0.83876, 0.86138, 0.88192, & - 0.90050, 0.91722, 0.93223, & - 0.94565, 0.95762, 0.96827, & - 0.97771, 0.98608, 0.99347, 1./ -#ifdef GFSL64 - data a64/20.00000, 68.00000, 137.79000, & - 221.95800, 318.26600, 428.43400, & - 554.42400, 698.45700, 863.05803, & - 1051.07995, 1265.75194, 1510.71101, & - 1790.05098, 2108.36604, 2470.78817, & - 2883.03811, 3351.46002, 3883.05187, & - 4485.49315, 5167.14603, 5937.04991, & - 6804.87379, 7780.84698, 8875.64338, & - 9921.40745, 10760.99844, 11417.88354, & - 11911.61193, 12258.61668, 12472.89642, & - 12566.58298, 12550.43517, 12434.26075, & - 12227.27484, 11938.39468, 11576.46910, & - 11150.43640, 10669.41063, 10142.69482, & - 9579.72458, 8989.94947, 8382.67090, & - 7766.85063, 7150.91171, 6542.55077, & - 5948.57894, 5374.81094, 4825.99383, & - 4305.79754, 3816.84622, 3360.78848, & - 2938.39801, 2549.69756, 2194.08449, & - 1870.45732, 1577.34218, 1313.00028, & - 1075.52114, 862.90778, 673.13815, & - 504.22118, 354.22752, 221.32110, & - 103.78014, 0./ - data b64/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00179, 0.00705, 0.01564, & - 0.02749, 0.04251, 0.06064, & - 0.08182, 0.10595, 0.13294, & - 0.16266, 0.19492, 0.22950, & - 0.26615, 0.30455, 0.34435, & - 0.38516, 0.42656, 0.46815, & - 0.50949, 0.55020, 0.58989, & - 0.62825, 0.66498, 0.69987, & - 0.73275, 0.76351, 0.79208, & - 0.81845, 0.84264, 0.86472, & - 0.88478, 0.90290, 0.91923, & - 0.93388, 0.94697, 0.95865, & - 0.96904, 0.97826, 0.98642, & - 0.99363, 1./ -#else - data a64/1.00000, 3.90000, 8.70000, & - 15.42000, 24.00000, 34.50000, & - 47.00000, 61.50000, 78.60000, & - 99.13500, 124.12789, 154.63770, & - 191.69700, 236.49300, 290.38000, & - 354.91000, 431.82303, 523.09300, & - 630.92800, 757.79000, 906.45000, & - 1079.85000, 1281.00000, 1515.00000, & - 1788.00000, 2105.00000, 2470.00000, & - 2889.00000, 3362.00000, 3890.00000, & - 4475.00000, 5120.00000, 5830.00000, & - 6608.00000, 7461.00000, 8395.00000, & - 9424.46289, 10574.46880, 11864.80270, & - 13312.58890, 14937.03710, 16759.70700, & - 18804.78710, 21099.41210, 23674.03710, & - 26562.82810, 29804.11720, 32627.31640, & - 34245.89840, 34722.28910, 34155.19920, & - 32636.50390, 30241.08200, 27101.44920, & - 23362.20700, 19317.05270, 15446.17090, & - 12197.45210, 9496.39941, 7205.66992, & - 5144.64307, 3240.79346, 1518.62134, & - 0.00000, 0.00000 / - - data b64/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00813, & - 0.03224, 0.07128, 0.12445, & - 0.19063, 0.26929, 0.35799, & - 0.45438, 0.55263, 0.64304, & - 0.71703, 0.77754, 0.82827, & - 0.87352, 0.91502, 0.95235, & - 0.98511, 1.00000 / -#endif -!-->cjg - data a68/1.00000, 2.68881, 5.15524, & - 8.86683, 14.20349, 22.00278, & - 33.50807, 50.32362, 74.56680, & - 109.05958, 157.51214, 224.73844, & - 316.90481, 441.81219, 609.21090, & - 831.14537, 1122.32514, 1500.51628, & - 1986.94617, 2606.71274, 3389.18802, & - 4368.40473, 5583.41379, 7078.60015, & - 8903.94455, 11115.21886, 13774.60566, & - 16936.82070, 20340.47045, 23193.71492, & - 24870.36141, 25444.59363, 25252.57081, & - 24544.26211, 23474.29096, 22230.65331, & - 20918.50731, 19589.96280, 18296.26682, & - 17038.02866, 15866.85655, 14763.18943, & - 13736.83624, 12794.11850, 11930.72442, & - 11137.17217, 10404.78946, 9720.03954, & - 9075.54055, 8466.72650, 7887.12346, & - 7333.90490, 6805.43028, 6297.33773, & - 5805.78227, 5327.94995, 4859.88765, & - 4398.63854, 3942.81761, 3491.08449, & - 3043.04531, 2598.71608, 2157.94527, & - 1720.87444, 1287.52805, 858.02944, & - 432.71276, 8.10905, 0.00000 / - - data b68/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00283, 0.01590, & - 0.04412, 0.08487, 0.13284, & - 0.18470, 0.23828, 0.29120, & - 0.34211, 0.39029, 0.43518, & - 0.47677, 0.51536, 0.55091, & - 0.58331, 0.61263, 0.63917, & - 0.66333, 0.68552, 0.70617, & - 0.72555, 0.74383, 0.76117, & - 0.77765, 0.79335, 0.80838, & - 0.82287, 0.83693, 0.85069, & - 0.86423, 0.87760, 0.89082, & - 0.90392, 0.91689, 0.92973, & - 0.94244, 0.95502, 0.96747, & - 0.97979, 0.99200, 1.00000 / - - data a96/1.00000, 2.35408, 4.51347, & - 7.76300, 12.43530, 19.26365, & - 29.33665, 44.05883, 65.28397, & - 95.48274, 137.90344, 196.76073, & - 277.45330, 386.81095, 533.37018, & - 727.67600, 982.60677, 1313.71685, & - 1739.59104, 2282.20281, 2967.26766, & - 3824.58158, 4888.33404, 6197.38450, & - 7795.49158, 9731.48414, 11969.71024, & - 14502.88894, 17304.52434, 20134.76139, & - 22536.63814, 24252.54459, 25230.65591, & - 25585.72044, 25539.91412, 25178.87141, & - 24644.84493, 23978.98781, 23245.49366, & - 22492.11600, 21709.93990, 20949.64473, & - 20225.94258, 19513.31158, 18829.32485, & - 18192.62250, 17589.39396, 17003.45386, & - 16439.01774, 15903.91204, 15396.39758, & - 14908.02140, 14430.65897, 13967.88643, & - 13524.16667, 13098.30227, 12687.56457, & - 12287.08757, 11894.41553, 11511.54106, & - 11139.22483, 10776.01912, 10419.75711, & - 10067.11881, 9716.63489, 9369.61967, & - 9026.69066, 8687.29884, 8350.04978, & - 8013.20925, 7677.12187, 7343.12994, & - 7011.62844, 6681.98102, 6353.09764, & - 6025.10535, 5699.10089, 5375.54503, & - 5053.63074, 4732.62740, 4413.38037, & - 4096.62775, 3781.79777, 3468.45371, & - 3157.19882, 2848.25306, 2541.19150, & - 2236.21942, 1933.50628, 1632.83741, & - 1334.35954, 1038.16655, 744.22318, & - 452.71094, 194.91899, 0.00000, & - 0.00000 / - - data b96/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00193, & - 0.00974, 0.02538, 0.04876, & - 0.07817, 0.11081, 0.14514, & - 0.18007, 0.21486, 0.24866, & - 0.28088, 0.31158, 0.34030, & - 0.36701, 0.39210, 0.41554, & - 0.43733, 0.45774, 0.47707, & - 0.49540, 0.51275, 0.52922, & - 0.54495, 0.56007, 0.57459, & - 0.58850, 0.60186, 0.61471, & - 0.62715, 0.63922, 0.65095, & - 0.66235, 0.67348, 0.68438, & - 0.69510, 0.70570, 0.71616, & - 0.72651, 0.73675, 0.74691, & - 0.75700, 0.76704, 0.77701, & - 0.78690, 0.79672, 0.80649, & - 0.81620, 0.82585, 0.83542, & - 0.84492, 0.85437, 0.86375, & - 0.87305, 0.88229, 0.89146, & - 0.90056, 0.90958, 0.91854, & - 0.92742, 0.93623, 0.94497, & - 0.95364, 0.96223, 0.97074, & - 0.97918, 0.98723, 0.99460, & - 1.00000 / -!<--cjg -! -! Ultra high troposphere resolution - data a100/100.00000, 300.00000, 800.00000, & - 1762.35235, 3106.43596, 4225.71874, & - 4946.40525, 5388.77387, 5708.35540, & - 5993.33124, 6277.73673, 6571.49996, & - 6877.05339, 7195.14327, 7526.24920, & - 7870.82981, 8229.35361, 8602.30193, & - 8990.16936, 9393.46399, 9812.70768, & - 10248.43625, 10701.19980, 11171.56286, & - 11660.10476, 12167.41975, 12694.11735, & - 13240.82253, 13808.17600, 14396.83442, & - 15007.47066, 15640.77407, 16297.45067, & - 16978.22343, 17683.83253, 18415.03554, & - 19172.60771, 19957.34218, 20770.05022, & - 21559.14829, 22274.03147, 22916.87519, & - 23489.70456, 23994.40187, 24432.71365, & - 24806.25734, 25116.52754, 25364.90190, & - 25552.64670, 25680.92203, 25750.78675, & - 25763.20311, 25719.04113, 25619.08274, & - 25464.02630, 25254.49482, 24991.06137, & - 24674.32737, 24305.11235, 23884.79781, & - 23415.77059, 22901.76510, 22347.84738, & - 21759.93950, 21144.07284, 20505.73136, & - 19849.54271, 19179.31412, 18498.23400, & - 17809.06809, 17114.28232, 16416.10343, & - 15716.54833, 15017.44246, 14320.43478, & - 13627.01116, 12938.50682, 12256.11762, & - 11580.91062, 10913.83385, 10255.72526, & - 9607.32122, 8969.26427, 8342.11044, & - 7726.33606, 7122.34405, 6530.46991, & - 5950.98721, 5384.11279, 4830.01153, & - 4288.80090, 3760.55514, 3245.30920, & - 2743.06250, 2253.78294, 1777.41285, & - 1313.88054, 863.12371, 425.13088, & - 0.00000, 0.00000 / - - - data b100/0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00000, 0.00000, 0.00000, & - 0.00052, 0.00209, 0.00468, & - 0.00828, 0.01288, 0.01849, & - 0.02508, 0.03266, 0.04121, & - 0.05075, 0.06126, 0.07275, & - 0.08521, 0.09866, 0.11308, & - 0.12850, 0.14490, 0.16230, & - 0.18070, 0.20009, 0.22042, & - 0.24164, 0.26362, 0.28622, & - 0.30926, 0.33258, 0.35605, & - 0.37958, 0.40308, 0.42651, & - 0.44981, 0.47296, 0.49591, & - 0.51862, 0.54109, 0.56327, & - 0.58514, 0.60668, 0.62789, & - 0.64872, 0.66919, 0.68927, & - 0.70895, 0.72822, 0.74709, & - 0.76554, 0.78357, 0.80117, & - 0.81835, 0.83511, 0.85145, & - 0.86736, 0.88286, 0.89794, & - 0.91261, 0.92687, 0.94073, & - 0.95419, 0.96726, 0.97994, & - 0.99223, 1.00000 / - - data a104/ & - 1.8827062944e-01, 7.7977549145e-01, 2.1950593583e+00, & - 4.9874566624e+00, 9.8041418997e+00, 1.7019717163e+01, & - 2.7216579591e+01, 4.0518628401e+01, 5.6749646818e+01, & - 7.5513868331e+01, 9.6315093333e+01, 1.1866706195e+02, & - 1.4216835396e+02, 1.6653733709e+02, 1.9161605772e+02, & - 2.1735580129e+02, 2.4379516604e+02, 2.7103771847e+02, & - 2.9923284173e+02, 3.2856100952e+02, 3.5922338766e+02, & - 3.9143507908e+02, 4.2542117983e+02, 4.6141487902e+02, & - 4.9965698106e+02, 5.4039638379e+02, 5.8389118154e+02, & - 6.3041016829e+02, 6.8023459505e+02, 7.3366009144e+02, & - 7.9099869949e+02, 8.5258099392e+02, 9.1875827946e+02, & - 9.8990486716e+02, 1.0664204381e+03, 1.1487325074e+03, & - 1.2372990044e+03, 1.3326109855e+03, 1.4351954993e+03, & - 1.5456186222e+03, 1.6644886848e+03, 1.7924597105e+03, & - 1.9302350870e+03, 2.0785714934e+03, 2.2382831070e+03, & - 2.4102461133e+03, 2.5954035462e+03, 2.7947704856e+03, & - 3.0094396408e+03, 3.2405873512e+03, 3.4894800360e+03, & - 3.7574811281e+03, 4.0460585279e+03, 4.3567926151e+03, & - 4.6913848588e+03, 5.0516670674e+03, 5.4396113207e+03, & - 5.8573406270e+03, 6.3071403487e+03, 6.7914704368e+03, & - 7.3129785102e+03, 7.8745138115e+03, 8.4791420557e+03, & - 9.1301611750e+03, 9.8311179338e+03, 1.0585825354e+04, & - 1.1398380836e+04, 1.2273184781e+04, 1.3214959424e+04, & - 1.4228767429e+04, 1.5320029596e+04, 1.6494540743e+04, & - 1.7758482452e+04, 1.9118430825e+04, 2.0422798801e+04, & - 2.1520147587e+04, 2.2416813461e+04, 2.3118184510e+04, & - 2.3628790785e+04, 2.3952411814e+04, 2.4092209011e+04, & - 2.4050892106e+04, 2.3830930156e+04, 2.3434818358e+04, & - 2.2865410898e+04, 2.2126326004e+04, 2.1222420323e+04, & - 2.0160313690e+04, 1.8948920926e+04, 1.7599915822e+04, & - 1.6128019809e+04, 1.4550987232e+04, 1.2889169132e+04, & - 1.1164595563e+04, 9.4227665517e+03, 7.7259097899e+03, & - 6.1538244381e+03, 4.7808126007e+03, 3.5967415552e+03, & - 2.5886394104e+03, 1.7415964865e+03, 1.0393721271e+03, & - 4.6478852032e+02, 7.0308342481e-13, 0.0000000000e+00 / - - - data b104/ & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & - 0.0000000000e+00, 0.0000000000e+00, 1.5648447298e-03, & - 6.2617046389e-03, 1.4104157933e-02, 2.5118187415e-02, & - 3.9340510972e-02, 5.6816335609e-02, 7.7596328431e-02, & - 1.0173255472e-01, 1.2927309709e-01, 1.6025505622e-01, & - 1.9469566981e-01, 2.3258141217e-01, 2.7385520518e-01, & - 3.1840233814e-01, 3.6603639170e-01, 4.1648734767e-01, & - 4.6939496013e-01, 5.2431098738e-01, 5.8071350676e-01, & - 6.3803478105e-01, 6.9495048840e-01, 7.4963750338e-01, & - 7.9975208897e-01, 8.4315257576e-01, 8.8034012292e-01, & - 9.1184389721e-01, 9.3821231526e-01, 9.6000677644e-01, & - 9.7779792223e-01, 9.9216315122e-01, 1.0000000000e+00 / - -! IFS-like L125(top 12 levels removed from IFSL137) - data a125/ 64., & - 86.895882, 107.415741, 131.425507, 159.279404, 191.338562, & - 227.968948, 269.539581, 316.420746, 368.982361, 427.592499, 492.616028, & - 564.413452, 643.339905, 729.744141, 823.967834, 926.344910, 1037.201172, & - 1156.853638, 1285.610352, 1423.770142, 1571.622925, 1729.448975, 1897.519287, & - 2076.095947, 2265.431641, 2465.770508, 2677.348145, 2900.391357, 3135.119385, & - 3381.743652, 3640.468262, 3911.490479, 4194.930664, 4490.817383, 4799.149414, & - 5119.895020, 5452.990723, 5798.344727, 6156.074219, 6526.946777, 6911.870605, & - 7311.869141, 7727.412109, 8159.354004, 8608.525391, 9076.400391, 9562.682617, & - 10065.978516, 10584.631836, 11116.662109, 11660.067383, 12211.547852, 12766.873047, & - 13324.668945, 13881.331055, 14432.139648, 14975.615234, 15508.256836, 16026.115234, & - 16527.322266, 17008.789063, 17467.613281, 17901.621094, 18308.433594, 18685.718750, & - 19031.289063, 19343.511719, 19620.042969, 19859.390625, 20059.931641, 20219.664063, & - 20337.863281, 20412.308594, 20442.078125, 20425.718750, 20361.816406, 20249.511719, & - 20087.085938, 19874.025391, 19608.572266, 19290.226563, 18917.460938, 18489.707031, & - 18006.925781, 17471.839844, 16888.687500, 16262.046875, 15596.695313, 14898.453125, & - 14173.324219, 13427.769531, 12668.257813, 11901.339844, 11133.304688, 10370.175781, & - 9617.515625, 8880.453125, 8163.375000, 7470.343750, 6804.421875, 6168.531250, & - 5564.382813, 4993.796875, 4457.375000, 3955.960938, 3489.234375, 3057.265625, & - 2659.140625, 2294.242188, 1961.500000, 1659.476563, 1387.546875, 1143.250000, & - 926.507813, 734.992188, 568.062500, 424.414063, 302.476563, 202.484375, & - 122.101563, 62.781250, 22.835938, 3.757813, 0.000000, 0.000000 / - - data b125/ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & - 0.000000, 0.000007, 0.000024, 0.000059, 0.000112, 0.000199, & - 0.000340, 0.000562, 0.000890, 0.001353, 0.001992, 0.002857, & - 0.003971, 0.005378, 0.007133, 0.009261, 0.011806, 0.014816, & - 0.018318, 0.022355, 0.026964, 0.032176, 0.038026, 0.044548, & - 0.051773, 0.059728, 0.068448, 0.077958, 0.088286, 0.099462, & - 0.111505, 0.124448, 0.138313, 0.153125, 0.168910, 0.185689, & - 0.203491, 0.222333, 0.242244, 0.263242, 0.285354, 0.308598, & - 0.332939, 0.358254, 0.384363, 0.411125, 0.438391, 0.466003, & - 0.493800, 0.521619, 0.549301, 0.576692, 0.603648, 0.630036, & - 0.655736, 0.680643, 0.704669, 0.727739, 0.749797, 0.770798, & - 0.790717, 0.809536, 0.827256, 0.843881, 0.859432, 0.873929, & - 0.887408, 0.899900, 0.911448, 0.922096, 0.931881, 0.940860, & - 0.949064, 0.956550, 0.963352, 0.969513, 0.975078, 0.980072, & - 0.984542, 0.988500, 0.991984, 0.995003, 0.997630, 1.000000 / + ks = 7 + do k=1,km+1 + ak(k) = a26(k) + bk(k) = b26(k) + enddo - select case (km) + case (30) ! For Baroclinic Instability Test + ptop = 2.26e2 + pint = 250.E2 + stretch_fac = 1.03 + auto_routine = 1 + + case (31) ! N = 4, M=2 + if (trim(npz_type) == 'lowtop') then + ptop = 300. + pint = 100.E2 + stretch_fac = 1.035 + auto_routine = 5 + else + ptop = 100. + stretch_fac = 1.035 + auto_routine = 1 + endif - case (24) + case (32) - ks = 5 - do k=1,km+1 - ak(k) = a24(k) - bk(k) = b24(k) - enddo + if (trim(npz_type) == 'old32') then + ks = 13 ! high-res trop_32 setup + do k=1,km+1 + ak(k) = a32old(k) + bk(k) = b32old(k) + enddo + elseif (trim(npz_type) == 'lowtop') then + ptop = 100. + stretch_fac = 1.035 + auto_routine = 1 + else + ks = 7 + do k=1,km+1 + ak(k) = a32(k) + bk(k) = b32(k) + enddo + endif + !miz + case (33) + ks = 7 + do k=1,km+1 + ak(k) = a33(k) + bk(k) = b33(k) + enddo + !miz + + case (39) ! N = 5 + ptop = 100. + stretch_fac = 1.035 + auto_routine = 1 + + case (40) + ptop = 50.e2 ! For super cell test + pint = 300.E2 + stretch_fac = 1.03 + auto_routine = 1 + + case (41) + ptop = 100. + pint = 100.E2 + stretch_fac = 1.035 + auto_routine = 1 + + case (47) + + if (trim(npz_type) == 'lowtop') then + ptop = 100. + stretch_fac = 1.035 + auto_routine = 1 + else + ! ks = 27 ! high-res trop-strat + ks = 20 ! Oct 23, 2012 + do k=1,km+1 + ak(k) = a47(k) + bk(k) = b47(k) + enddo + endif - case (26) - - ks = 7 - do k=1,km+1 - ak(k) = a26(k) - bk(k) = b26(k) - enddo + case (48) + ks = 28 + do k=1,km+1 + ak(k) = a48(k) + bk(k) = b48(k) + enddo - case (32) -#ifdef OLD_32 - ks = 13 ! high-res trop_32 setup -#else - ks = 7 -#endif + case (49) + ks = 28 do k=1,km+1 - ak(k) = a32(k) - bk(k) = b32(k) + ak(k) = a49(k) + bk(k) = b49(k) enddo - case (47) -! ks = 27 ! high-res trop-strat - ks = 20 ! Oct 23, 2012 - do k=1,km+1 - ak(k) = a47(k) - bk(k) = b47(k) - enddo + case (50) + ! *Very-low top: for idealized super-cell simulation: + ptop = 50.e2 + pint = 250.E2 + stretch_fac = 1.03 + auto_routine = 1 + + case (51) + if (trim(npz_type) == 'lowtop') then + ptop = 100. + stretch_fac = 1.03 + auto_routine = 1 + elseif (trim(npz_type) == 'meso') then + ptop = 20.E2 + pint = 100.E2 + stretch_fac = 1.05 + auto_routine = 1 + elseif (trim(npz_type) == 'meso2') then + ptop = 1.E2 + pint = 100.E2 + stretch_fac = 1.05 + auto_routine = 6 + else + ptop = 100. + pint = 100.E2 + stretch_fac = 1.035 + auto_routine = 1 + endif - case (48) - ks = 28 - do k=1,km+1 - ak(k) = a48(k) - bk(k) = b48(k) - enddo + case (52) - case (52) - ks = 35 ! pint = 223 - do k=1,km+1 - ak(k) = a52(k) - bk(k) = b52(k) - enddo + if (trim(npz_type) == 'rce') then + ptop = 30.e2 ! for special DPM RCE experiments + stretch_fac = 1.03 + auto_routine = 1 + else + ks = 35 ! pint = 223 + do k=1,km+1 + ak(k) = a52(k) + bk(k) = b52(k) + enddo + endif - case (54) - ks = 11 ! pint = 109.4 - do k=1,km+1 + case (54) + ks = 11 ! pint = 109.4 + do k=1,km+1 ak(k) = a54(k) bk(k) = b54(k) - enddo + enddo - case (56) - ks = 26 - do k=1,km+1 + ! Mid-top: + case (55) ! N = 7 + ptop = 10. + pint = 100.E2 + stretch_fac = 1.035 + auto_routine = 1 + + case (56) + ks = 26 + do k=1,km+1 ak(k) = a56(k) bk(k) = b56(k) - enddo + enddo - case (60) - ks = 37 - do k=1,km+1 - ak(k) = a60(k) - bk(k) = b60(k) - enddo + case (60) + if (trim(npz_type) == 'gfs') then + ks = 20 + do k=1,km+1 + ak(k) = a60gfs(k) + bk(k) = b60gfs(k) + enddo + else if (trim(npz_type) == 'BCwave') then + ptop = 3.e2 + ! pint = 250.E2 + pint = 300.E2 ! revised for Moist test + stretch_fac = 1.03 + auto_routine = 1 + else if (trim(npz_type) == 'meso') then + + ptop = 40.e2 + pint = 250.E2 + stretch_fac = 1.03 + auto_routine = 1 - case (64) -#ifdef GFSL64 - ks = 23 -#else - ks = 46 -#endif - do k=1,km+1 - ak(k) = a64(k) - bk(k) = b64(k) - enddo -!-->cjg - case (68) - ks = 27 - do k=1,km+1 + else + ks = 37 + do k=1,km+1 + ak(k) = a60(k) + bk(k) = b60(k) + enddo + endif + + case (63) + if (trim(npz_type) == 'meso') then + ks = 11 + do k=1,km+1 + ak(k) = a63meso(k) + bk(k) = b63meso(k) + enddo + elseif (trim(npz_type) == 'hitop') then + ptop = 1. ! high top + pint = 100.E2 + stretch_fac = 1.035 + auto_routine = 1 + else!if (trim(npz_type) == 'gfs') then + !Used for SHiELD + ! GFS L64 equivalent setting + ks = 23 + do k=1,km+1 + ak(k) = a63(k) + bk(k) = b63(k) + enddo + endif + + case (64) + + if (trim(npz_type) == 'gfs') then + ks = 23 + do k=1,km+1 + ak(k) = a64gfs(k) + bk(k) = b64gfs(k) + enddo + + else + + ks = 46 + do k=1,km+1 + ak(k) = a64(k) + bk(k) = b64(k) + enddo + + endif + !-->cjg + case (68) + ks = 27 + do k=1,km+1 ak(k) = a68(k) bk(k) = b68(k) - enddo + enddo - case (96) - ks = 27 - do k=1,km+1 + case (71) ! N = 9 + ptop = 1. + stretch_fac = 1.03 + auto_routine = 1 + case (75) ! HS-SGO test configuration + pint = 100.E2 + ptop = 10.E2 + stretch_fac = 1.035 + auto_routine = 6 + case (79) ! N = 10, M=5 + if (trim(npz_type) == 'gcrm') then + pint = 100.E2 + ptop = 3.E2 + stretch_fac = 1.035 + auto_routine = 6 + else + ptop = 1. + stretch_fac = 1.03 + auto_routine = 1 + endif + case (90) ! super-duper cell + ptop = 40.e2 + stretch_fac = 1.025 + auto_routine = 2 + + ! NGGPS_GFS + case (91) + pint = 100.E2 + ptop = 40. + stretch_fac = 1.029 + auto_routine = 6 + + case (95) + ! Mid-top settings: + pint = 100.E2 + ptop = 20. + stretch_fac = 1.029 + auto_routine = 6 + + case (96) + ks = 27 + do k=1,km+1 ak(k) = a96(k) bk(k) = b96(k) - enddo -!<--cjg + enddo + !<--cjg - case (100) - ks = 38 - do k=1,km+1 + case (100) + ks = 38 + do k=1,km+1 ak(k) = a100(k) bk(k) = b100(k) - enddo + enddo - case (104) - ks = 73 - do k=1,km+1 + case (104) + ks = 73 + do k=1,km+1 ak(k) = a104(k) bk(k) = b104(k) - enddo - -#ifndef TEST_GWAVES - case (10) -!-------------------------------------------------- -! Pure sigma-coordinate with uniform spacing in "z" -!-------------------------------------------------- -! - pt = 2000. ! model top pressure (pascal) -! pt = 100. ! 1 mb - press(1) = pt - press(km+1) = p0 - dlnp = (log(p0) - log(pt)) / real(km) - - lnpe = log(press(km+1)) - do k=km,2,-1 - lnpe = lnpe - dlnp - press(k) = exp(lnpe) enddo -! Search KS - ks = 0 - do k=1,km - if(press(k) >= pc) then - ks = k-1 - goto 123 - endif - enddo -123 continue - - if(ks /= 0) then - do k=1,ks - ak(k) = press(k) - bk(k) = 0. - enddo - endif - - pint = press(ks+1) - do k=ks+1,km - ak(k) = pint*(press(km)-press(k))/(press(km)-pint) - bk(k) = (press(k) - ak(k)) / press(km+1) - enddo - ak(km+1) = 0. - bk(km+1) = 1. - -! do k=2,km -! bk(k) = real(k-1) / real(km) -! ak(k) = pt * ( 1. - bk(k) ) -! enddo -#endif - -! The following 4 selections are better for non-hydrostatic -! Low top: - case (31) - ptop = 300. - pint = 100.E2 - call var_dz(km, ak, bk, ptop, ks, pint, 1.035) -#ifndef TEST_GWAVES - case (41) - ptop = 100. - pint = 100.E2 - call var_hi(km, ak, bk, ptop, ks, pint, 1.035) -#endif - case (51) - ptop = 100. - pint = 100.E2 - call var_hi(km, ak, bk, ptop, ks, pint, 1.035) -! Mid-top: - case (55) - ptop = 10. - pint = 100.E2 -! call var_dz(km, ak, bk, ptop, ks, pint, 1.035) - call var_hi(km, ak, bk, ptop, ks, pint, 1.035) -#ifdef USE_GFSL63 -! GFS L64 equivalent setting - case (63) - ks = 23 - ptop = a63(1) - pint = a63(ks+1) - do k=1,km+1 - ak(k) = a63(k) - bk(k) = b63(k) - enddo -#else - case (63) - ptop = 1. ! high top - pint = 100.E2 - call var_hi(km, ak, bk, ptop, ks, pint, 1.035) -#endif -! NGGPS_GFS - case (91) - pint = 100.E2 - ptop = 40. - call var_gfs(km, ak, bk, ptop, ks, pint, 1.029) - case (95) -! Mid-top settings: - pint = 100.E2 - ptop = 20. - call var_gfs(km, ak, bk, ptop, ks, pint, 1.028) - case (127) - ptop = 1. - pint = 75.E2 - call var_gfs(km, ak, bk, ptop, ks, pint, 1.028) -! IFS-like L125 - case (125) - ks = 33 - ptop = a125(1) - pint = a125(ks+1) - do k=1,km+1 + ! IFS-like L125 + case (125) + ks = 33 + ptop = a125(1) + pint = a125(ks+1) + do k=1,km+1 ak(k) = a125(k) bk(k) = b125(k) - enddo - case default + enddo -#ifdef TEST_GWAVES -!-------------------------------------------------- -! Pure sigma-coordinate with uniform spacing in "z" -!-------------------------------------------------- - call gw_1d(km, 1000.E2, ak, bk, ptop, 10.E3, pt1) - ks = 0 - pint = ak(1) -#else + case (127) ! N = 10, M=5 + if (trim(npz_type) == 'hitop') then + ptop = 1. + stretch_fac = 1.03 + auto_routine = 2 + else + ptop = 1. + pint = 75.E2 + stretch_fac = 1.028 + auto_routine = 6 + endif + case (151) + !LES applications + ptop = 75.e2 + pint = 500.E2 + stretch_fac = 1.01 + auto_routine = 3 + + case default + + if(trim(npz_type) == 'hitop') then + ptop = 1. + pint = 100.E2 + elseif(trim(npz_type) == 'midtop') then + ptop = 10. + pint = 100.E2 + elseif(trim(npz_type) == 'lowtop') then + ptop = 1.E2 + pint = 100.E2 + endif + + if (trim(npz_type) == 'gfs') then + auto_routine = 6 + elseif(trim(npz_type) == 'les') then + auto_routine = 3 + elseif(trim(npz_type) == 'mountain_wave') then + auto_routine = 4 + elseif (km > 79) then + auto_routine = 2 + else + auto_routine = 1 + endif -!---------------------------------------------------------------- -! Sigma-coordinate with uniform spacing in sigma and ptop = 1 mb -!---------------------------------------------------------------- - pt = 100. -! One pressure layer - ks = 1 -! pint = pt + 0.5*1.E5/real(km) ! SJL: 20120327 - pint = pt + 1.E5/real(km) - - ak(1) = pt - bk(1) = 0. - ak(2) = pint - bk(2) = 0. - - do k=3,km+1 - bk(k) = real(k-2) / real(km-1) - ak(k) = pint - bk(k)*pint - enddo - ak(km+1) = 0. - bk(km+1) = 1. -#endif end select - ptop = ak(1) - pint = ak(ks+1) + + endif ! superC/superK + + select case (auto_routine) + + case (1) + call var_hi(km, ak, bk, ptop, ks, pint, stretch_fac) + case (2) + call var_hi2(km, ak, bk, ptop, ks, pint, stretch_fac) + case (3) + call var_les(km, ak, bk, ptop, ks, pint, stretch_fac) + case (4) + call mount_waves(km, ak, bk, ptop, ks, pint) + case (5) + call var_dz(km, ak, bk, ptop, ks, pint, stretch_fac) + case (6) + call var_gfs(km, ak, bk, ptop, ks, pint, stretch_fac) + end select + + ptop = ak(1) + pint = ak(ks+1) + + if (is_master()) then + write(*, '(A4, A13, A13, A11)') 'klev', 'ak', 'bk', 'p_ref' + do k=1,km+1 + write(*,'(I4, F13.5, F13.5, F11.2)') k, ak(k), bk(k), 1000.E2*bk(k) + ak(k) + enddo + endif + end subroutine set_eta #endif + + subroutine set_external_eta(ak, bk, ptop, ks) + implicit none + real, intent(in) :: ak(:) + real, intent(in) :: bk(:) + real, intent(out) :: ptop ! model top (Pa) + integer, intent(out) :: ks ! number of pure p layers + !--- local variables + integer :: k + real :: eps = 1.d-7 + + ptop = ak(1) + ks = 1 + do k = 1, size(bk(:)) + if (bk(k).lt.eps) ks = k + enddo + !--- change ks to layers from levels + ks = ks - 1 + if (is_master()) write(6,*) ' ptop & ks ', ptop, ks + + end subroutine set_external_eta + + subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) implicit none integer, intent(in):: km @@ -1502,7 +778,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) real ep, es, alpha, beta, gama real, parameter:: akap = 2./7. !---- Tunable parameters: - real:: k_inc = 10 ! # of layers from bottom up to near const dz region + integer:: k_inc = 10 ! # of layers from bottom up to near const dz region real:: s0 = 0.8 ! lowest layer stretch factor !----------------------- real:: s_inc @@ -1524,7 +800,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) enddo s_fac(km-k_inc-1) = 0.5*(s_fac(km-k_inc) + s_rate) - + do k=km-k_inc-2, 5, -1 s_fac(k) = s_rate * s_fac(k+1) enddo @@ -1599,8 +875,8 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -1618,7 +894,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -1626,7 +902,7 @@ subroutine var_les(km, ak, bk, ptop, ks, pint, s_rate) if ( is_master() ) then ! write(*,*) 'KS=', ks, 'PINT (mb)=', pint/100. ! do k=1,km - ! pm(k) = 0.5*(pe1(k)+pe1(k+1))/100. + ! pm(k) = 0.5*(pe1(k)+pe1(k+1))/100. ! write(*,*) k, pm(k), dz(k) ! enddo tmp1 = ak(ks+1) @@ -1672,7 +948,7 @@ subroutine var_gfs(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -1773,8 +1049,8 @@ subroutine var_gfs(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -1792,7 +1068,7 @@ subroutine var_gfs(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -1836,7 +1112,7 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -1848,7 +1124,7 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) enddo s_fac(km-k_inc-1) = 0.5*(s_fac(km-k_inc) + s_rate) - + #ifdef HIWPP do k=km-k_inc-2, 4, -1 s_fac(k) = s_rate * s_fac(k+1) @@ -1949,8 +1225,8 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -1968,7 +1244,7 @@ subroutine var_hi(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2007,7 +1283,7 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -2016,13 +1292,13 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) s_fac(km-2) = 0.30 s_fac(km-3) = 0.40 s_fac(km-4) = 0.50 - s_fac(km-5) = 0.60 - s_fac(km-6) = 0.70 + s_fac(km-5) = 0.60 + s_fac(km-6) = 0.70 s_fac(km-7) = 0.80 s_fac(km-8) = 0.90 s_fac(km-9) = 0.95 s_fac(km-10) = 0.5*(s_fac(km-9) + s_rate) - + do k=km-11, 8, -1 s_fac(k) = s_rate * s_fac(k+1) enddo @@ -2106,8 +1382,8 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2125,7 +1401,7 @@ subroutine var_hi2(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2166,7 +1442,7 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -2175,13 +1451,13 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) s_fac(km-2) = 0.30 s_fac(km-3) = 0.40 s_fac(km-4) = 0.50 - s_fac(km-5) = 0.60 - s_fac(km-6) = 0.70 + s_fac(km-5) = 0.60 + s_fac(km-6) = 0.70 s_fac(km-7) = 0.80 s_fac(km-8) = 0.90 s_fac(km-9) = 0.95 s_fac(km-10) = 0.5*(s_fac(km-9) + s_rate) - + do k=km-11, 9, -1 s_fac(k) = min(10.0, s_rate * s_fac(k+1) ) enddo @@ -2267,8 +1543,8 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2286,7 +1562,7 @@ subroutine var_dz(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2327,7 +1603,7 @@ subroutine var55_dz(km, ak, bk, ptop, ks, pint, s_rate) peln(1) = log(pe1(1)) pe1(km+1) = p00 peln(km+1) = log(pe1(km+1)) - + t0 = 270. ztop = rdgas/grav*t0*(peln(km+1) - peln(1)) @@ -2430,8 +1706,8 @@ subroutine var55_dz(km, ak, bk, ptop, ks, pint, s_rate) eta(k) = pe1(k) / pe1(km+1) enddo - ep = eta(ks+1) - es = eta(km) + ep = eta(ks+1) + es = eta(km) ! es = 1. alpha = (ep**2-2.*ep*es) / (es-ep)**2 beta = 2.*ep*es**2 / (es-ep)**2 @@ -2449,7 +1725,7 @@ subroutine var55_dz(km, ak, bk, ptop, ks, pint, s_rate) enddo ak(km+1) = 0. - do k=ks+2, km + do k=ks+2, km bk(k) = (pe1(k) - ak(k))/pe1(km+1) enddo bk(km+1) = 1. @@ -2508,35 +1784,35 @@ subroutine hybrid_z_dz(km, dz, ztop, s_rate) s_fac(1) = 1.6 *s_fac(2) sum1 = 0. - do k=1,km - sum1 = sum1 + s_fac(k) - enddo - - dz0 = ztop / sum1 - - do k=1,km - dz(k) = s_fac(k) * dz0 - enddo - - ze(km+1) = 0. - do k=km,1,-1 - ze(k) = ze(k+1) + dz(k) - enddo - - ze(1) = ztop - + do k=1,km + sum1 = sum1 + s_fac(k) + enddo + + dz0 = ztop / sum1 + + do k=1,km + dz(k) = s_fac(k) * dz0 + enddo + + ze(km+1) = 0. + do k=km,1,-1 + ze(k) = ze(k+1) + dz(k) + enddo + + ze(1) = ztop + call sm1_edge(1, 1, 1, 1, km, 1, 1, ze, 2) - - do k=1,km - dz(k) = ze(k) - ze(k+1) - enddo - end subroutine hybrid_z_dz + do k=1,km + dz(k) = ze(k) - ze(k+1) + enddo + + end subroutine hybrid_z_dz subroutine get_eta_level(npz, p_s, pf, ph, ak, bk, pscale) - integer, intent(in) :: npz + integer, intent(in) :: npz real, intent(in) :: p_s ! unit: pascal real, intent(in) :: ak(npz+1) real, intent(in) :: bk(npz+1) @@ -2545,18 +1821,18 @@ subroutine get_eta_level(npz, p_s, pf, ph, ak, bk, pscale) real, intent(out) :: ph(npz+1) integer k - ph(1) = ak(1) + ph(1) = ak(1) do k=2,npz+1 ph(k) = ak(k) + bk(k)*p_s - enddo - + enddo + if ( present(pscale) ) then do k=1,npz+1 ph(k) = pscale*ph(k) enddo - endif + endif - if( ak(1) > 1.E-8 ) then + if( ak(1) > 1.E-8 ) then pf(1) = (ph(2) - ph(1)) / log(ph(2)/ph(1)) else pf(1) = (ph(2) - ph(1)) * kappa/(kappa+1.) @@ -2581,7 +1857,7 @@ subroutine compute_dz(km, ztop, dz) ! ztop = 30.E3 - dz(1) = ztop / real(km) + dz(1) = ztop / real(km) dz(km) = 0.5*dz(1) do k=2,km-1 @@ -2622,12 +1898,12 @@ subroutine compute_dz_var(km, ztop, dz) s_fac(km-1) = 0.20 s_fac(km-2) = 0.30 s_fac(km-3) = 0.40 - s_fac(km-4) = 0.50 - s_fac(km-5) = 0.60 - s_fac(km-6) = 0.70 - s_fac(km-7) = 0.80 + s_fac(km-4) = 0.50 + s_fac(km-5) = 0.60 + s_fac(km-6) = 0.70 + s_fac(km-7) = 0.80 s_fac(km-8) = 0.90 - s_fac(km-9) = 1. + s_fac(km-9) = 1. do k=km-10, 9, -1 s_fac(k) = s_rate * s_fac(k+1) @@ -2705,7 +1981,7 @@ subroutine compute_dz_L32(km, ztop, dz) ze(2) = dz(1) dz0 = 1.5*dz0 - dz(2) = dz0 + dz(2) = dz0 ze(3) = ze(2) + dz(2) @@ -2813,8 +2089,8 @@ subroutine set_hybrid_z(is, ie, js, je, ng, km, ztop, dz, rgrav, hs, ze, dz3) do j=js,je do i=is,ie - ze(i,j, 1) = ztop - ze(i,j,km+1) = hs(i,j) * rgrav + ze(i,j, 1) = ztop + ze(i,j,km+1) = hs(i,j) * rgrav enddo enddo @@ -2985,7 +2261,7 @@ subroutine gw_1d(km, p0, ak, bk, ptop, ztop, pt1) n2 = 0.0001 endif - s0 = grav*grav / (cp_air*n2) + s0 = grav*grav / (cp_air*n2) ze(km+1) = 0. do k=km,1,-1 @@ -2998,16 +2274,16 @@ subroutine gw_1d(km, p0, ak, bk, ptop, ztop, pt1) pe1(k) = p0*( (1.-s0/t0) + s0/t0*exp(-n2*ze(k)/grav) )**(1./kappa) enddo - ptop = pe1(1) + ptop = pe1(1) ! if ( is_master() ) write(*,*) 'GW_1D: computed model top (pa)=', ptop -! Set up "sigma" coordinate +! Set up "sigma" coordinate ak(1) = pe1(1) bk(1) = 0. do k=2,km bk(k) = (pe1(k) - pe1(1)) / (pe1(km+1)-pe1(1)) ! bk == sigma - ak(k) = pe1(1)*(1.-bk(k)) - enddo + ak(k) = pe1(1)*(1.-bk(k)) + enddo ak(km+1) = 0. bk(km+1) = 1. @@ -3022,6 +2298,140 @@ subroutine gw_1d(km, p0, ak, bk, ptop, ztop, pt1) end subroutine gw_1d + subroutine mount_waves(km, ak, bk, ptop, ks, pint) + integer, intent(in):: km + real, intent(out):: ak(km+1), bk(km+1) + real, intent(out):: ptop, pint + integer, intent(out):: ks +! Local + real, parameter:: p00 = 1.E5 + real, dimension(km+1):: ze, pe1, peln, eta + real, dimension(km):: dz, dlnp + real ztop, t0, dz0, sum1, tmp1 + real ep, es, alpha, beta, gama, s_fac + integer k, k500 + + pint = 300.e2 +! s_fac = 1.05 +! dz0 = 500. + if ( km <= 60 ) then + s_fac = 1.0 + dz0 = 500. + else + s_fac = 1. + dz0 = 250. + endif + +! Basic parameters for HIWPP mountain waves + t0 = 300. +! ztop = 20.0e3; 500-m resolution in halft of the vertical domain +! ztop = real(km-1)*500. +!----------------------- +! Compute temp ptop based on isothermal atm +! ptop = p00*exp(-grav*ztop/(rdgas*t0)) + +! Lowest half has constant resolution + ze(km+1) = 0. + do k=km, km-19, -1 + ze(k) = ze(k+1) + dz0 + enddo + +! Stretching from 10-km and up: + do k=km-20, 3, -1 + dz0 = s_fac * dz0 + ze(k) = ze(k+1) + dz0 + enddo + ze(2) = ze(3) + sqrt(2.)*dz0 + ze(1) = ze(2) + 2.0*dz0 + +! call sm1_edge(1, 1, 1, 1, km, 1, 1, ze, 1) + +! Given z --> p + do k=1,km + dz(k) = ze(k) - ze(k+1) + dlnp(k) = grav*dz(k) / (rdgas*t0) + enddo + + pe1(km+1) = p00 + peln(km+1) = log(p00) + do k=km,1,-1 + peln(k) = peln(k+1) - dlnp(k) + pe1(k) = exp(peln(k)) + enddo + +! Comnpute new ptop + ptop = pe1(1) + +! Pe(k) = ak(k) + bk(k) * PS +! Locate pint and KS + ks = 0 + do k=2,km + if ( pint < pe1(k)) then + ks = k-1 + exit + endif + enddo + + if ( is_master() ) then + write(*,*) 'For (input) PINT=', 0.01*pint, ' KS=', ks, 'pint(computed)=', 0.01*pe1(ks+1) + write(*,*) 'Modified ptop =', ptop, ' ztop=', ze(1)/1000. + do k=1,km + write(*,*) k, 'ze =', ze(k)/1000. + enddo + endif + pint = pe1(ks+1) + +#ifdef NO_UKMO_HB + do k=1,ks+1 + ak(k) = pe1(k) + bk(k) = 0. + enddo + + do k=ks+2,km+1 + bk(k) = (pe1(k) - pint) / (pe1(km+1)-pint) ! bk == sigma + ak(k) = pe1(k) - bk(k) * pe1(km+1) + enddo + bk(km+1) = 1. + ak(km+1) = 0. +#else +! Problematic for non-hydrostatic + do k=1,km+1 + eta(k) = pe1(k) / pe1(km+1) + enddo + ep = eta(ks+1) + es = eta(km) +! es = 1. + alpha = (ep**2-2.*ep*es) / (es-ep)**2 + beta = 2.*ep*es**2 / (es-ep)**2 + gama = -(ep*es)**2 / (es-ep)**2 + +! Pure pressure: + do k=1,ks+1 + ak(k) = eta(k)*1.e5 + bk(k) = 0. + enddo + + do k=ks+2, km + ak(k) = alpha*eta(k) + beta + gama/eta(k) + ak(k) = ak(k)*1.e5 + enddo + ak(km+1) = 0. + + do k=ks+2, km + bk(k) = (pe1(k) - ak(k))/pe1(km+1) + enddo + bk(km+1) = 1. +#endif + + if ( is_master() ) then + tmp1 = ak(ks+1) + do k=ks+1,km + tmp1 = max(tmp1, (ak(k)-ak(k+1))/max(1.E-5, (bk(k+1)-bk(k))) ) + enddo + write(*,*) 'Hybrid Sigma-P: minimum allowable surface pressure (hpa)=', tmp1/100. + endif + + end subroutine mount_waves subroutine zflip(q, im, km) @@ -3036,9 +2446,9 @@ subroutine zflip(q, im, km) qtmp = q(i,k) q(i,k) = q(i,km+1-k) q(i,km+1-k) = qtmp - end do - end do - - end subroutine zflip + end do + end do + + end subroutine zflip end module fv_eta_mod diff --git a/tools/fv_eta.h b/tools/fv_eta.h new file mode 100644 index 000000000..f9b07e8b8 --- /dev/null +++ b/tools/fv_eta.h @@ -0,0 +1,999 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** +#ifndef _FV_ETA_ +#define _FV_ETA__ + +! -*-f90-*-* + +! local + real a24(25),b24(25) ! GFDL AM2L24 + real a26(27),b26(27) ! Jablonowski & Williamson 26-level + real a32old(33),b32old(33) + real a32(33),b32(33) + real a32w(33),b32w(33) + real a33(34),b33(34) ! miz: grid with enhanced surface-layer resolution + real a47(48),b47(48) + real a48(49),b48(49) + real a49(50),b49(50) + real a52(53),b52(53) + real a54(55),b54(55) + real a56(57),b56(57) + real a60(61),b60(61) + real a60gfs(61),b60gfs(61) + real a63(64),b63(64) + real a63meso(64),b63meso(64) + real a64(65),b64(65) + real a64gfs(65),b64gfs(65) + real a68(69),b68(69) ! cjg: grid with enhanced PBL resolution + real a96(97),b96(97) ! cjg: grid with enhanced PBL resolution + real a100(101),b100(101) + real a104(105),b104(105) + real a125(126),b125(126) + +!----------------------------------------------- +! GFDL AM2-L24: modified by SJL at the model top +!----------------------------------------------- +! data a24 / 100.0000, 1050.0000, 3474.7942, 7505.5556, 12787.2428, & + data a24 / 100.0000, 903.4465, 3474.7942, 7505.5556, 12787.2428, & + 19111.3683, 21854.9274, 22884.1866, 22776.3058, 21716.1604, & + 20073.2963, 18110.5123, 16004.7832, 13877.6253, 11812.5452, & + 9865.8840, 8073.9726, 6458.0834, 5027.9899, 3784.6085, & + 2722.0086, 1828.9752, 1090.2396, 487.4595, 0.0000 / + + data b24 / 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, & + 0.0000000, 0.0435679, 0.1102275, 0.1922249, 0.2817656, & + 0.3694997, 0.4532348, 0.5316253, 0.6038733, 0.6695556, & + 0.7285176, 0.7808017, 0.8265992, 0.8662148, 0.9000406, & + 0.9285364, 0.9522140, 0.9716252, 0.9873523, 1.0000000 / + +! Jablonowski & Williamson 26-level setup + data a26 / 219.4067, 489.5209, 988.2418, 1805.2010, 2983.7240, 4462.3340, & + 6160.5870, 7851.2430, 7731.2710, 7590.1310, 7424.0860, & + 7228.7440, 6998.9330, 6728.5740, 6410.5090, 6036.3220, & + 5596.1110, 5078.2250, 4468.9600, 3752.1910, 2908.9490, & + 2084.739, 1334.443, 708.499, 252.1360, 0.0, 0.0 / + + data b26 / 0.0, 0.0, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,& + 0.0000000, 0.01505309, 0.03276228, 0.05359622, 0.07810627, & + 0.1069411, 0.1408637, 0.1807720, 0.2277220, 0.2829562, & + 0.3479364, 0.4243822, 0.5143168, 0.6201202, 0.7235355, & + 0.8176768, 0.8962153, 0.9534761, 0.9851122, 1.0000000 / + + +! High-resolution troposphere setup +! Revised Apr 14, 2004: PINT = 245.027 mb + data a32old/100.00000, 400.00000, 818.60211, & + 1378.88653, 2091.79519, 2983.64084, & + 4121.78960, 5579.22148, 7419.79300, & + 9704.82578, 12496.33710, 15855.26306, & + 19839.62499, 24502.73262, 28177.10152, & + 29525.28447, 29016.34358, 27131.32792, & + 24406.11225, 21326.04907, 18221.18357, & + 15275.14642, 12581.67796, 10181.42843, & + 8081.89816, 6270.86956, 4725.35001, & + 3417.39199, 2317.75459, 1398.09473, & + 632.49506, 0.00000, 0.00000 / + + data b32old/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.01711, & + 0.06479, 0.13730, 0.22693, & + 0.32416, 0.42058, 0.51105, & + 0.59325, 0.66628, 0.73011, & + 0.78516, 0.83217, 0.87197, & + 0.90546, 0.93349, 0.95685, & + 0.97624, 0.99223, 1.00000 / + +! SJL June 26, 2012 +! pint= 55.7922 + data a32/100.00000, 400.00000, 818.60211, & + 1378.88653, 2091.79519, 2983.64084, & + 4121.78960, 5579.22148, 6907.19063, & + 7735.78639, 8197.66476, 8377.95525, & + 8331.69594, 8094.72213, 7690.85756, & + 7139.01788, 6464.80251, 5712.35727, & + 4940.05347, 4198.60465, 3516.63294, & + 2905.19863, 2366.73733, 1899.19455, & + 1497.78137, 1156.25252, 867.79199, & + 625.59324, 423.21322, 254.76613, & + 115.06646, 0.00000, 0.00000 / + + data b32/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00513, & + 0.01969, 0.04299, 0.07477, & + 0.11508, 0.16408, 0.22198, & + 0.28865, 0.36281, 0.44112, & + 0.51882, 0.59185, 0.65810, & + 0.71694, 0.76843, 0.81293, & + 0.85100, 0.88331, 0.91055, & + 0.93338, 0.95244, 0.96828, & + 0.98142, 0.99223, 1.00000 / + +!--------------------- +! Wilson's 32L settings: +!--------------------- +! Top changed to 0.01 mb + data a32w/ 1.00, 26.6378, 84.5529, 228.8592, & + 539.9597, 1131.7087, 2141.8082, 3712.0454, & + 5963.5317, 8974.1873, 12764.5388, 17294.5911, & + 20857.7007, 22221.8651, 22892.7202, 22891.1641, & + 22286.0724, 21176.0846, 19673.0671, 17889.0989, & + 15927.5060, 13877.6239, 11812.5474, 9865.8830, & + 8073.9717, 6458.0824, 5027.9893, 3784.6104, & + 2722.0093, 1828.9741, 1090.2397, 487.4575, & + 0.0000 / + + data b32w/ 0.0000, 0.0000, 0.0000, 0.0000, & + 0.0000, 0.0000, 0.0000, 0.0000, & + 0.0000, 0.0000, 0.0000, 0.0000, & + 0.0159, 0.0586, 0.1117, 0.1734, & + 0.2415, 0.3137, 0.3878, 0.4619, & + 0.5344, 0.6039, 0.6696, 0.7285, & + 0.7808, 0.8266, 0.8662, 0.9000, & + 0.9285, 0.9522, 0.9716, 0.9874, & + 1.0000 / + +!miz + data a33/100.00000, 400.00000, 818.60211, & + 1378.88653, 2091.79519, 2983.64084, & + 4121.78960, 5579.22148, 6907.19063, & + 7735.78639, 8197.66476, 8377.95525, & + 8331.69594, 8094.72213, 7690.85756, & + 7139.01788, 6464.80251, 5712.35727, & + 4940.05347, 4198.60465, 3516.63294, & + 2905.19863, 2366.73733, 1899.19455, & + 1497.78137, 1156.25252, 867.79199, & + 625.59324, 426.21322, 264.76613, & + 145.06646, 60.00000, 15.00000, & + 0.00000 / + + data b33/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00513, & + 0.01969, 0.04299, 0.07477, & + 0.11508, 0.16408, 0.22198, & + 0.28865, 0.36281, 0.44112, & + 0.51882, 0.59185, 0.65810, & + 0.71694, 0.76843, 0.81293, & + 0.85100, 0.88331, 0.91055, & + 0.93331, 0.95214, 0.96750, & + 0.97968, 0.98908, 0.99575, & + 1.00000 / +!miz + +#ifdef OLD_L47 +! QBO setting with ptop = 0.1 mb and p_full=0.17 mb; pint ~ 100 mb + data a47/ 10.00000, 24.45365, 48.76776, & + 85.39458, 133.41983, 191.01402, & + 257.94919, 336.63306, 431.52741, & + 548.18995, 692.78825, 872.16512, & + 1094.18467, 1368.11917, 1704.99489, & + 2117.91945, 2622.42986, 3236.88281, & + 3982.89623, 4885.84733, 5975.43260, & + 7286.29500, 8858.72424, 10739.43477, & + 12982.41110, 15649.68745, 18811.37629, & + 22542.71275, 25724.93857, 27314.36781, & + 27498.59474, 26501.79312, 24605.92991, & + 22130.51655, 19381.30274, 16601.56419, & + 13952.53231, 11522.93244, 9350.82303, & + 7443.47723, 5790.77434, 4373.32696, & + 3167.47008, 2148.51663, 1293.15510, & + 581.62429, 0.00000, 0.00000 / + + data b47/ 0.0000, 0.0000, 0.0000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.01188, 0.04650, & + 0.10170, 0.17401, 0.25832, & + 0.34850, 0.43872, 0.52448, & + 0.60307, 0.67328, 0.73492, & + 0.78834, 0.83418, 0.87320, & + 0.90622, 0.93399, 0.95723, & + 0.97650, 0.99223, 1.00000 / +#else +! Oct 23, 2012 +! QBO setting with ptop = 0.1 mb, pint ~ 60 mb + data a47/ 10.00000, 24.45365, 48.76776, & + 85.39458, 133.41983, 191.01402, & + 257.94919, 336.63306, 431.52741, & + 548.18995, 692.78825, 872.16512, & + 1094.18467, 1368.11917, 1704.99489, & + 2117.91945, 2622.42986, 3236.88281, & + 3982.89623, 4885.84733, 5975.43260, & + 7019.26669, 7796.15848, 8346.60209, & + 8700.31838, 8878.27554, 8894.27179, & + 8756.46404, 8469.60171, 8038.92687, & + 7475.89006, 6803.68067, 6058.68992, & + 5285.28859, 4526.01565, 3813.00206, & + 3164.95553, 2589.26318, 2085.96929, & + 1651.11596, 1278.81205, 962.38875, & + 695.07046, 470.40784, 282.61654, & + 126.92745, 0.00000, 0.00000 / + data b47/ 0.0000, 0.0000, 0.0000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00267, 0.01063, 0.02393, & + 0.04282, 0.06771, 0.09917, & + 0.13786, 0.18444, 0.23925, & + 0.30193, 0.37100, 0.44379, & + 0.51695, 0.58727, 0.65236, & + 0.71094, 0.76262, 0.80757, & + 0.84626, 0.87930, 0.90731, & + 0.93094, 0.95077, 0.96733, & + 0.98105, 0.99223, 1.00000 / +#endif + + data a48/ & + 1.00000, 2.69722, 5.17136, & + 8.89455, 14.24790, 22.07157, & + 33.61283, 50.48096, 74.79993, & + 109.40055, 158.00460, 225.44108, & + 317.89560, 443.19350, 611.11558, & + 833.74392, 1125.83405, 1505.20759, & + 1993.15829, 2614.86254, 3399.78420, & + 4382.06240, 5600.87014, 7100.73115, & + 8931.78242, 11149.97021, 13817.16841, & + 17001.20930, 20775.81856, 23967.33875, & + 25527.64563, 25671.22552, 24609.29622, & + 22640.51220, 20147.13482, 17477.63530, & + 14859.86462, 12414.92533, 10201.44191, & + 8241.50255, 6534.43202, 5066.17865, & + 3815.60705, 2758.60264, 1870.64631, & + 1128.33931, 510.47983, 0.00000, & + 0.00000 / + + data b48/ & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.01253, & + 0.04887, 0.10724, 0.18455, & + 0.27461, 0.36914, 0.46103, & + 0.54623, 0.62305, 0.69099, & + 0.75016, 0.80110, 0.84453, & + 0.88127, 0.91217, 0.93803, & + 0.95958, 0.97747, 0.99223, & + 1.00000 / + + data a49/ & + 1.00000, 2.69722, 5.17136, & + 8.89455, 14.24790, 22.07157, & + 33.61283, 50.48096, 74.79993, & + 109.40055, 158.00460, 225.44108, & + 317.89560, 443.19350, 611.11558, & + 833.74392, 1125.83405, 1505.20759, & + 1993.15829, 2614.86254, 3399.78420, & + 4382.06240, 5600.87014, 7100.73115, & + 8931.78242, 11149.97021, 13817.16841, & + 17001.20930, 20775.81856, 23967.33875, & + 25527.64563, 25671.22552, 24609.29622, & + 22640.51220, 20147.13482, 17477.63530, & + 14859.86462, 12414.92533, 10201.44191, & + 8241.50255, 6534.43202, 5066.178650, & + 3815.60705, 2758.60264, 1880.646310, & + 1169.33931, 618.47983, 225.000000, & + 10.00000, 0.00000 / + + data b49/ & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.01253, & + 0.04887, 0.10724, 0.18455, & + 0.27461, 0.36914, 0.46103, & + 0.54623, 0.62305, 0.69099, & + 0.75016, 0.80110, 0.84453, & + 0.88125, 0.91210, 0.93766, & + 0.95849, 0.97495, 0.98743, & + 0.99580, 1.00000 / + +! High PBL resolution with top at 1 mb +! SJL modified May 7, 2013 to ptop ~ 100 mb + data a54/100.00000, 254.83931, 729.54278, & + 1602.41121, 2797.50667, 4100.18977, & + 5334.87140, 6455.24153, 7511.80944, & + 8580.26355, 9714.44293, 10938.62253, & + 12080.36051, 12987.13921, 13692.75084, & + 14224.92180, 14606.55444, 14856.69953, & + 14991.32121, 15023.90075, 14965.91493, & + 14827.21612, 14616.33505, 14340.72252, & + 14006.94280, 13620.82849, 13187.60470, & + 12711.98873, 12198.27003, 11650.37451, & + 11071.91608, 10466.23819, 9836.44706, & + 9185.43852, 8515.96231, 7831.01080, & + 7135.14301, 6436.71659, 5749.00215, & + 5087.67188, 4465.67510, 3889.86419, & + 3361.63433, 2879.51065, 2441.02496, & + 2043.41345, 1683.80513, 1359.31122, & + 1067.09135, 804.40101, 568.62625, & + 357.32525, 168.33263, 0.00000, & + 0.00000 / + + data b54/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00180, 0.00694, 0.01510, & + 0.02601, 0.03942, 0.05515, & + 0.07302, 0.09288, 0.11459, & + 0.13803, 0.16307, 0.18960, & + 0.21753, 0.24675, 0.27716, & + 0.30866, 0.34115, 0.37456, & + 0.40879, 0.44375, 0.47935, & + 0.51551, 0.55215, 0.58916, & + 0.62636, 0.66334, 0.69946, & + 0.73395, 0.76622, 0.79594, & + 0.82309, 0.84780, 0.87020, & + 0.89047, 0.90876, 0.92524, & + 0.94006, 0.95336, 0.96529, & + 0.97596, 0.98551, 0.99400, & + 1.00000 / + + +! The 56-L setup + data a56/ 10.00000, 24.97818, 58.01160, & + 115.21466, 199.29210, 309.39897, & + 445.31785, 610.54747, 812.28518, & + 1059.80882, 1363.07092, 1732.09335, & + 2176.91502, 2707.68972, 3334.70962, & + 4068.31964, 4918.76594, 5896.01890, & + 7009.59166, 8268.36324, 9680.41211, & + 11252.86491, 12991.76409, 14901.95764, & + 16987.01313, 19249.15733, 21689.24182, & + 23845.11055, 25330.63353, 26243.52467, & + 26663.84998, 26657.94696, 26281.61371, & + 25583.05256, 24606.03265, 23393.39510, & + 21990.28845, 20445.82122, 18811.93894, & + 17139.59660, 15473.90350, 13850.50167, & + 12294.49060, 10821.62655, 9440.57746, & + 8155.11214, 6965.72496, 5870.70511, & + 4866.83822, 3949.90019, 3115.03562, & + 2357.07879, 1670.87329, 1051.65120, & + 495.51399, 0.00000, 0.00000 / + + data b56 /0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00462, 0.01769, 0.03821, & + 0.06534, 0.09834, 0.13659, & + 0.17947, 0.22637, 0.27660, & + 0.32929, 0.38343, 0.43791, & + 0.49162, 0.54361, 0.59319, & + 0.63989, 0.68348, 0.72391, & + 0.76121, 0.79545, 0.82679, & + 0.85537, 0.88135, 0.90493, & + 0.92626, 0.94552, 0.96286, & + 0.97840, 0.99223, 1.00000 / + + data a60gfs/300.0000, 430.00000, 558.00000, & + 700.00000, 863.05803, 1051.07995, & + 1265.75194, 1510.71101, 1790.05098, & + 2108.36604, 2470.78817, 2883.03811, & + 3351.46002, 3883.05187, 4485.49315, & + 5167.14603, 5937.04991, 6804.87379, & + 7780.84698, 8875.64338, 10100.20534, & + 11264.35673, 12190.64366, 12905.42546, & + 13430.87867, 13785.88765, 13986.77987, & + 14047.96335, 13982.46770, 13802.40331, & + 13519.33841, 13144.59486, 12689.45608, & + 12165.28766, 11583.57006, 10955.84778, & + 10293.60402, 9608.08306, 8910.07678, & + 8209.70131, 7516.18560, 6837.69250, & + 6181.19473, 5552.39653, 4955.72632, & + 4394.37629, 3870.38682, 3384.76586, & + 2937.63489, 2528.37666, 2155.78385, & + 1818.20722, 1513.68173, 1240.03585, & + 994.99144, 776.23591, 581.48797, & + 408.53400, 255.26520, 119.70243, 0. / + + data b60gfs/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00201, 0.00792, 0.01755, & + 0.03079, 0.04751, 0.06761, & + 0.09097, 0.11746, 0.14690, & + 0.17911, 0.21382, 0.25076, & + 0.28960, 0.32994, 0.37140, & + 0.41353, 0.45589, 0.49806, & + 0.53961, 0.58015, 0.61935, & + 0.65692, 0.69261, 0.72625, & + 0.75773, 0.78698, 0.81398, & + 0.83876, 0.86138, 0.88192, & + 0.90050, 0.91722, 0.93223, & + 0.94565, 0.95762, 0.96827, & + 0.97771, 0.98608, 0.99347, 1./ + + data a60/ 1.7861000000e-01, 1.0805100000e+00, 3.9647100000e+00, & + 9.7516000000e+00, 1.9816580000e+01, 3.6695950000e+01, & + 6.2550570000e+01, 9.9199620000e+01, 1.4792505000e+02, & + 2.0947487000e+02, 2.8422571000e+02, 3.7241721000e+02, & + 4.7437835000e+02, 5.9070236000e+02, 7.2236063000e+02, & + 8.7076746000e+02, 1.0378138800e+03, 1.2258877300e+03, & + 1.4378924600e+03, 1.6772726600e+03, 1.9480506400e+03, & + 2.2548762700e+03, 2.6030909400e+03, 2.9988059200e+03, & + 3.4489952300e+03, 3.9616028900e+03, 4.5456641600e+03, & + 5.2114401700e+03, 5.9705644000e+03, 6.8361981800e+03, & + 7.8231906000e+03, 8.9482351000e+03, 1.0230010660e+04, & + 1.1689289750e+04, 1.3348986860e+04, 1.5234111060e+04, & + 1.7371573230e+04, 1.9789784580e+04, 2.2005564550e+04, & + 2.3550115120e+04, 2.4468583320e+04, 2.4800548800e+04, & + 2.4582445070e+04, 2.3849999620e+04, 2.2640519740e+04, & + 2.0994737150e+04, 1.8957848730e+04, 1.6579413230e+04, & + 1.4080071030e+04, 1.1753630920e+04, 9.6516996300e+03, & + 7.7938009300e+03, 6.1769062800e+03, 4.7874276000e+03, & + 3.6050497500e+03, 2.6059860700e+03, 1.7668328200e+03, & + 1.0656131200e+03, 4.8226201000e+02, 0.0000000000e+00, & + 0.0000000000e+00 / + + + data b60/ 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 5.0600000000e-03, & + 2.0080000000e-02, 4.4900000000e-02, 7.9360000000e-02, & + 1.2326000000e-01, 1.7634000000e-01, 2.3820000000e-01, & + 3.0827000000e-01, 3.8581000000e-01, 4.6989000000e-01, & + 5.5393000000e-01, 6.2958000000e-01, 6.9642000000e-01, & + 7.5458000000e-01, 8.0463000000e-01, 8.4728000000e-01, & + 8.8335000000e-01, 9.1368000000e-01, 9.3905000000e-01, & + 9.6020000000e-01, 9.7775000000e-01, 9.9223000000e-01, & + 1.0000000000e+00 / + +! This is activated by USE_GFSL63 +! Thfollowing L63 setting is the same as NCEP GFS's L64 except the top +! 3 layers + data a63/64.247, 137.790, 221.958, & + 318.266, 428.434, 554.424, & + 698.457, 863.05803, 1051.07995, & + 1265.75194, 1510.71101, 1790.05098, & + 2108.36604, 2470.78817, 2883.03811, & + 3351.46002, 3883.05187, 4485.49315, & + 5167.14603, 5937.04991, 6804.87379, & + 7780.84698, 8875.64338, 10100.20534, & + 11264.35673, 12190.64366, 12905.42546, & + 13430.87867, 13785.88765, 13986.77987, & + 14047.96335, 13982.46770, 13802.40331, & + 13519.33841, 13144.59486, 12689.45608, & + 12165.28766, 11583.57006, 10955.84778, & + 10293.60402, 9608.08306, 8910.07678, & + 8209.70131, 7516.18560, 6837.69250, & + 6181.19473, 5552.39653, 4955.72632, & + 4394.37629, 3870.38682, 3384.76586, & + 2937.63489, 2528.37666, 2155.78385, & + 1818.20722, 1513.68173, 1240.03585, & + 994.99144, 776.23591, 581.48797, & + 408.53400, 255.26520, 119.70243, 0. / + + data b63/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00201, 0.00792, 0.01755, & + 0.03079, 0.04751, 0.06761, & + 0.09097, 0.11746, 0.14690, & + 0.17911, 0.21382, 0.25076, & + 0.28960, 0.32994, 0.37140, & + 0.41353, 0.45589, 0.49806, & + 0.53961, 0.58015, 0.61935, & + 0.65692, 0.69261, 0.72625, & + 0.75773, 0.78698, 0.81398, & + 0.83876, 0.86138, 0.88192, & + 0.90050, 0.91722, 0.93223, & + 0.94565, 0.95762, 0.96827, & + 0.97771, 0.98608, 0.99347, 1./ + + data a63meso/ 64.247, 234.14925, 444.32075, & + 719.10698, 1077.83197, 1545.21700, & + 2152.6203, 2939.37353, 3954.07197, & + 5255.55443, 6913.13424, 8955.12932, & + 10898.75012, 12137.76737, 12858.09331, & + 13388.26761, 13747.35846, 13951.85268, & + 14016.29356, 13953.82551, 13776.65318, & + 13496.41874, 13124.49605, 12672.19867, & + 12150.90036, 11572.06889, 10947.21741, & + 10287.78472, 9604.96173, 8909.48448, & + 8211.41625, 7519.94125, 6843.19133, & + 6188.11962, 5560.42852, 4964.55636, & + 4403.71643, 3879.97894, 3394.38835, & + 2996.77033, 2730.02573, 2530.11329, & + 2339.36720, 2157.57530, 1984.53745, & + 1820.00086, 1663.72705, 1515.43668, & + 1374.86622, 1241.72259, 1115.72934, & + 996.58895, 884.02079, 777.73138, & + 677.44387, 582.87349, 493.75161, & + 409.80694, 330.78356, 256.42688, & + 186.49670, 120.75560, 58.97959, 0. / + + data b63meso/ 0. , 0. , 0. , & + 0. , 0. , 0. , & + 0. , 0. , 0. , & + 0. , 0. , 0.0005 , & + 0.00298, 0.00885, 0.01845, & + 0.03166, 0.04836, 0.06842, & + 0.09175, 0.1182 , 0.14759, & + 0.17974, 0.21438, 0.25123, & + 0.28997, 0.33022, 0.37157, & + 0.41359, 0.45584, 0.49791, & + 0.53936, 0.57981, 0.61894, & + 0.65645, 0.6921 , 0.72571, & + 0.75717, 0.78642, 0.81343, & + 0.83547, 0.85023, 0.86128, & + 0.8718 , 0.88182, 0.89135, & + 0.9004 , 0.90898, 0.91712, & + 0.92483, 0.93213, 0.93904, & + 0.94556, 0.95172, 0.95754, & + 0.96302, 0.96819, 0.97306, & + 0.97764, 0.98196, 0.98601, & + 0.98983, 0.99341, 0.99678, 1. / + + data a64gfs/20.00000, 68.00000, 137.79000, & + 221.95800, 318.26600, 428.43400, & + 554.42400, 698.45700, 863.05803, & + 1051.07995, 1265.75194, 1510.71101, & + 1790.05098, 2108.36604, 2470.78817, & + 2883.03811, 3351.46002, 3883.05187, & + 4485.49315, 5167.14603, 5937.04991, & + 6804.87379, 7780.84698, 8875.64338, & + 9921.40745, 10760.99844, 11417.88354, & + 11911.61193, 12258.61668, 12472.89642, & + 12566.58298, 12550.43517, 12434.26075, & + 12227.27484, 11938.39468, 11576.46910, & + 11150.43640, 10669.41063, 10142.69482, & + 9579.72458, 8989.94947, 8382.67090, & + 7766.85063, 7150.91171, 6542.55077, & + 5948.57894, 5374.81094, 4825.99383, & + 4305.79754, 3816.84622, 3360.78848, & + 2938.39801, 2549.69756, 2194.08449, & + 1870.45732, 1577.34218, 1313.00028, & + 1075.52114, 862.90778, 673.13815, & + 504.22118, 354.22752, 221.32110, & + 103.78014, 0./ + data b64gfs/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00179, 0.00705, 0.01564, & + 0.02749, 0.04251, 0.06064, & + 0.08182, 0.10595, 0.13294, & + 0.16266, 0.19492, 0.22950, & + 0.26615, 0.30455, 0.34435, & + 0.38516, 0.42656, 0.46815, & + 0.50949, 0.55020, 0.58989, & + 0.62825, 0.66498, 0.69987, & + 0.73275, 0.76351, 0.79208, & + 0.81845, 0.84264, 0.86472, & + 0.88478, 0.90290, 0.91923, & + 0.93388, 0.94697, 0.95865, & + 0.96904, 0.97826, 0.98642, & + 0.99363, 1./ + + data a64/1.00000, 3.90000, 8.70000, & + 15.42000, 24.00000, 34.50000, & + 47.00000, 61.50000, 78.60000, & + 99.13500, 124.12789, 154.63770, & + 191.69700, 236.49300, 290.38000, & + 354.91000, 431.82303, 523.09300, & + 630.92800, 757.79000, 906.45000, & + 1079.85000, 1281.00000, 1515.00000, & + 1788.00000, 2105.00000, 2470.00000, & + 2889.00000, 3362.00000, 3890.00000, & + 4475.00000, 5120.00000, 5830.00000, & + 6608.00000, 7461.00000, 8395.00000, & + 9424.46289, 10574.46880, 11864.80270, & + 13312.58890, 14937.03710, 16759.70700, & + 18804.78710, 21099.41210, 23674.03710, & + 26562.82810, 29804.11720, 32627.31640, & + 34245.89840, 34722.28910, 34155.19920, & + 32636.50390, 30241.08200, 27101.44920, & + 23362.20700, 19317.05270, 15446.17090, & + 12197.45210, 9496.39941, 7205.66992, & + 5144.64307, 3240.79346, 1518.62134, & + 0.00000, 0.00000 / + + data b64/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00813, & + 0.03224, 0.07128, 0.12445, & + 0.19063, 0.26929, 0.35799, & + 0.45438, 0.55263, 0.64304, & + 0.71703, 0.77754, 0.82827, & + 0.87352, 0.91502, 0.95235, & + 0.98511, 1.00000 / + +!-->cjg + data a68/1.00000, 2.68881, 5.15524, & + 8.86683, 14.20349, 22.00278, & + 33.50807, 50.32362, 74.56680, & + 109.05958, 157.51214, 224.73844, & + 316.90481, 441.81219, 609.21090, & + 831.14537, 1122.32514, 1500.51628, & + 1986.94617, 2606.71274, 3389.18802, & + 4368.40473, 5583.41379, 7078.60015, & + 8903.94455, 11115.21886, 13774.60566, & + 16936.82070, 20340.47045, 23193.71492, & + 24870.36141, 25444.59363, 25252.57081, & + 24544.26211, 23474.29096, 22230.65331, & + 20918.50731, 19589.96280, 18296.26682, & + 17038.02866, 15866.85655, 14763.18943, & + 13736.83624, 12794.11850, 11930.72442, & + 11137.17217, 10404.78946, 9720.03954, & + 9075.54055, 8466.72650, 7887.12346, & + 7333.90490, 6805.43028, 6297.33773, & + 5805.78227, 5327.94995, 4859.88765, & + 4398.63854, 3942.81761, 3491.08449, & + 3043.04531, 2598.71608, 2157.94527, & + 1720.87444, 1287.52805, 858.02944, & + 432.71276, 8.10905, 0.00000 / + + data b68/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00283, 0.01590, & + 0.04412, 0.08487, 0.13284, & + 0.18470, 0.23828, 0.29120, & + 0.34211, 0.39029, 0.43518, & + 0.47677, 0.51536, 0.55091, & + 0.58331, 0.61263, 0.63917, & + 0.66333, 0.68552, 0.70617, & + 0.72555, 0.74383, 0.76117, & + 0.77765, 0.79335, 0.80838, & + 0.82287, 0.83693, 0.85069, & + 0.86423, 0.87760, 0.89082, & + 0.90392, 0.91689, 0.92973, & + 0.94244, 0.95502, 0.96747, & + 0.97979, 0.99200, 1.00000 / + + data a96/1.00000, 2.35408, 4.51347, & + 7.76300, 12.43530, 19.26365, & + 29.33665, 44.05883, 65.28397, & + 95.48274, 137.90344, 196.76073, & + 277.45330, 386.81095, 533.37018, & + 727.67600, 982.60677, 1313.71685, & + 1739.59104, 2282.20281, 2967.26766, & + 3824.58158, 4888.33404, 6197.38450, & + 7795.49158, 9731.48414, 11969.71024, & + 14502.88894, 17304.52434, 20134.76139, & + 22536.63814, 24252.54459, 25230.65591, & + 25585.72044, 25539.91412, 25178.87141, & + 24644.84493, 23978.98781, 23245.49366, & + 22492.11600, 21709.93990, 20949.64473, & + 20225.94258, 19513.31158, 18829.32485, & + 18192.62250, 17589.39396, 17003.45386, & + 16439.01774, 15903.91204, 15396.39758, & + 14908.02140, 14430.65897, 13967.88643, & + 13524.16667, 13098.30227, 12687.56457, & + 12287.08757, 11894.41553, 11511.54106, & + 11139.22483, 10776.01912, 10419.75711, & + 10067.11881, 9716.63489, 9369.61967, & + 9026.69066, 8687.29884, 8350.04978, & + 8013.20925, 7677.12187, 7343.12994, & + 7011.62844, 6681.98102, 6353.09764, & + 6025.10535, 5699.10089, 5375.54503, & + 5053.63074, 4732.62740, 4413.38037, & + 4096.62775, 3781.79777, 3468.45371, & + 3157.19882, 2848.25306, 2541.19150, & + 2236.21942, 1933.50628, 1632.83741, & + 1334.35954, 1038.16655, 744.22318, & + 452.71094, 194.91899, 0.00000, & + 0.00000 / + + data b96/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00193, & + 0.00974, 0.02538, 0.04876, & + 0.07817, 0.11081, 0.14514, & + 0.18007, 0.21486, 0.24866, & + 0.28088, 0.31158, 0.34030, & + 0.36701, 0.39210, 0.41554, & + 0.43733, 0.45774, 0.47707, & + 0.49540, 0.51275, 0.52922, & + 0.54495, 0.56007, 0.57459, & + 0.58850, 0.60186, 0.61471, & + 0.62715, 0.63922, 0.65095, & + 0.66235, 0.67348, 0.68438, & + 0.69510, 0.70570, 0.71616, & + 0.72651, 0.73675, 0.74691, & + 0.75700, 0.76704, 0.77701, & + 0.78690, 0.79672, 0.80649, & + 0.81620, 0.82585, 0.83542, & + 0.84492, 0.85437, 0.86375, & + 0.87305, 0.88229, 0.89146, & + 0.90056, 0.90958, 0.91854, & + 0.92742, 0.93623, 0.94497, & + 0.95364, 0.96223, 0.97074, & + 0.97918, 0.98723, 0.99460, & + 1.00000 / +!<--cjg +! +! Ultra high troposphere resolution + data a100/100.00000, 300.00000, 800.00000, & + 1762.35235, 3106.43596, 4225.71874, & + 4946.40525, 5388.77387, 5708.35540, & + 5993.33124, 6277.73673, 6571.49996, & + 6877.05339, 7195.14327, 7526.24920, & + 7870.82981, 8229.35361, 8602.30193, & + 8990.16936, 9393.46399, 9812.70768, & + 10248.43625, 10701.19980, 11171.56286, & + 11660.10476, 12167.41975, 12694.11735, & + 13240.82253, 13808.17600, 14396.83442, & + 15007.47066, 15640.77407, 16297.45067, & + 16978.22343, 17683.83253, 18415.03554, & + 19172.60771, 19957.34218, 20770.05022, & + 21559.14829, 22274.03147, 22916.87519, & + 23489.70456, 23994.40187, 24432.71365, & + 24806.25734, 25116.52754, 25364.90190, & + 25552.64670, 25680.92203, 25750.78675, & + 25763.20311, 25719.04113, 25619.08274, & + 25464.02630, 25254.49482, 24991.06137, & + 24674.32737, 24305.11235, 23884.79781, & + 23415.77059, 22901.76510, 22347.84738, & + 21759.93950, 21144.07284, 20505.73136, & + 19849.54271, 19179.31412, 18498.23400, & + 17809.06809, 17114.28232, 16416.10343, & + 15716.54833, 15017.44246, 14320.43478, & + 13627.01116, 12938.50682, 12256.11762, & + 11580.91062, 10913.83385, 10255.72526, & + 9607.32122, 8969.26427, 8342.11044, & + 7726.33606, 7122.34405, 6530.46991, & + 5950.98721, 5384.11279, 4830.01153, & + 4288.80090, 3760.55514, 3245.30920, & + 2743.06250, 2253.78294, 1777.41285, & + 1313.88054, 863.12371, 425.13088, & + 0.00000, 0.00000 / + + + data b100/0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00000, 0.00000, 0.00000, & + 0.00052, 0.00209, 0.00468, & + 0.00828, 0.01288, 0.01849, & + 0.02508, 0.03266, 0.04121, & + 0.05075, 0.06126, 0.07275, & + 0.08521, 0.09866, 0.11308, & + 0.12850, 0.14490, 0.16230, & + 0.18070, 0.20009, 0.22042, & + 0.24164, 0.26362, 0.28622, & + 0.30926, 0.33258, 0.35605, & + 0.37958, 0.40308, 0.42651, & + 0.44981, 0.47296, 0.49591, & + 0.51862, 0.54109, 0.56327, & + 0.58514, 0.60668, 0.62789, & + 0.64872, 0.66919, 0.68927, & + 0.70895, 0.72822, 0.74709, & + 0.76554, 0.78357, 0.80117, & + 0.81835, 0.83511, 0.85145, & + 0.86736, 0.88286, 0.89794, & + 0.91261, 0.92687, 0.94073, & + 0.95419, 0.96726, 0.97994, & + 0.99223, 1.00000 / + + data a104/ & + 1.8827062944e-01, 7.7977549145e-01, 2.1950593583e+00, & + 4.9874566624e+00, 9.8041418997e+00, 1.7019717163e+01, & + 2.7216579591e+01, 4.0518628401e+01, 5.6749646818e+01, & + 7.5513868331e+01, 9.6315093333e+01, 1.1866706195e+02, & + 1.4216835396e+02, 1.6653733709e+02, 1.9161605772e+02, & + 2.1735580129e+02, 2.4379516604e+02, 2.7103771847e+02, & + 2.9923284173e+02, 3.2856100952e+02, 3.5922338766e+02, & + 3.9143507908e+02, 4.2542117983e+02, 4.6141487902e+02, & + 4.9965698106e+02, 5.4039638379e+02, 5.8389118154e+02, & + 6.3041016829e+02, 6.8023459505e+02, 7.3366009144e+02, & + 7.9099869949e+02, 8.5258099392e+02, 9.1875827946e+02, & + 9.8990486716e+02, 1.0664204381e+03, 1.1487325074e+03, & + 1.2372990044e+03, 1.3326109855e+03, 1.4351954993e+03, & + 1.5456186222e+03, 1.6644886848e+03, 1.7924597105e+03, & + 1.9302350870e+03, 2.0785714934e+03, 2.2382831070e+03, & + 2.4102461133e+03, 2.5954035462e+03, 2.7947704856e+03, & + 3.0094396408e+03, 3.2405873512e+03, 3.4894800360e+03, & + 3.7574811281e+03, 4.0460585279e+03, 4.3567926151e+03, & + 4.6913848588e+03, 5.0516670674e+03, 5.4396113207e+03, & + 5.8573406270e+03, 6.3071403487e+03, 6.7914704368e+03, & + 7.3129785102e+03, 7.8745138115e+03, 8.4791420557e+03, & + 9.1301611750e+03, 9.8311179338e+03, 1.0585825354e+04, & + 1.1398380836e+04, 1.2273184781e+04, 1.3214959424e+04, & + 1.4228767429e+04, 1.5320029596e+04, 1.6494540743e+04, & + 1.7758482452e+04, 1.9118430825e+04, 2.0422798801e+04, & + 2.1520147587e+04, 2.2416813461e+04, 2.3118184510e+04, & + 2.3628790785e+04, 2.3952411814e+04, 2.4092209011e+04, & + 2.4050892106e+04, 2.3830930156e+04, 2.3434818358e+04, & + 2.2865410898e+04, 2.2126326004e+04, 2.1222420323e+04, & + 2.0160313690e+04, 1.8948920926e+04, 1.7599915822e+04, & + 1.6128019809e+04, 1.4550987232e+04, 1.2889169132e+04, & + 1.1164595563e+04, 9.4227665517e+03, 7.7259097899e+03, & + 6.1538244381e+03, 4.7808126007e+03, 3.5967415552e+03, & + 2.5886394104e+03, 1.7415964865e+03, 1.0393721271e+03, & + 4.6478852032e+02, 7.0308342481e-13, 0.0000000000e+00 / + + + data b104/ & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 0.0000000000e+00, & + 0.0000000000e+00, 0.0000000000e+00, 1.5648447298e-03, & + 6.2617046389e-03, 1.4104157933e-02, 2.5118187415e-02, & + 3.9340510972e-02, 5.6816335609e-02, 7.7596328431e-02, & + 1.0173255472e-01, 1.2927309709e-01, 1.6025505622e-01, & + 1.9469566981e-01, 2.3258141217e-01, 2.7385520518e-01, & + 3.1840233814e-01, 3.6603639170e-01, 4.1648734767e-01, & + 4.6939496013e-01, 5.2431098738e-01, 5.8071350676e-01, & + 6.3803478105e-01, 6.9495048840e-01, 7.4963750338e-01, & + 7.9975208897e-01, 8.4315257576e-01, 8.8034012292e-01, & + 9.1184389721e-01, 9.3821231526e-01, 9.6000677644e-01, & + 9.7779792223e-01, 9.9216315122e-01, 1.0000000000e+00 / + +! IFS-like L125(top 12 levels removed from IFSL137) + data a125/ 64., & + 86.895882, 107.415741, 131.425507, 159.279404, 191.338562, & + 227.968948, 269.539581, 316.420746, 368.982361, 427.592499, 492.616028, & + 564.413452, 643.339905, 729.744141, 823.967834, 926.344910, 1037.201172, & + 1156.853638, 1285.610352, 1423.770142, 1571.622925, 1729.448975, 1897.519287, & + 2076.095947, 2265.431641, 2465.770508, 2677.348145, 2900.391357, 3135.119385, & + 3381.743652, 3640.468262, 3911.490479, 4194.930664, 4490.817383, 4799.149414, & + 5119.895020, 5452.990723, 5798.344727, 6156.074219, 6526.946777, 6911.870605, & + 7311.869141, 7727.412109, 8159.354004, 8608.525391, 9076.400391, 9562.682617, & + 10065.978516, 10584.631836, 11116.662109, 11660.067383, 12211.547852, 12766.873047, & + 13324.668945, 13881.331055, 14432.139648, 14975.615234, 15508.256836, 16026.115234, & + 16527.322266, 17008.789063, 17467.613281, 17901.621094, 18308.433594, 18685.718750, & + 19031.289063, 19343.511719, 19620.042969, 19859.390625, 20059.931641, 20219.664063, & + 20337.863281, 20412.308594, 20442.078125, 20425.718750, 20361.816406, 20249.511719, & + 20087.085938, 19874.025391, 19608.572266, 19290.226563, 18917.460938, 18489.707031, & + 18006.925781, 17471.839844, 16888.687500, 16262.046875, 15596.695313, 14898.453125, & + 14173.324219, 13427.769531, 12668.257813, 11901.339844, 11133.304688, 10370.175781, & + 9617.515625, 8880.453125, 8163.375000, 7470.343750, 6804.421875, 6168.531250, & + 5564.382813, 4993.796875, 4457.375000, 3955.960938, 3489.234375, 3057.265625, & + 2659.140625, 2294.242188, 1961.500000, 1659.476563, 1387.546875, 1143.250000, & + 926.507813, 734.992188, 568.062500, 424.414063, 302.476563, 202.484375, & + 122.101563, 62.781250, 22.835938, 3.757813, 0.000000, 0.000000 / + + data b125/ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, & + 0.000000, 0.000007, 0.000024, 0.000059, 0.000112, 0.000199, & + 0.000340, 0.000562, 0.000890, 0.001353, 0.001992, 0.002857, & + 0.003971, 0.005378, 0.007133, 0.009261, 0.011806, 0.014816, & + 0.018318, 0.022355, 0.026964, 0.032176, 0.038026, 0.044548, & + 0.051773, 0.059728, 0.068448, 0.077958, 0.088286, 0.099462, & + 0.111505, 0.124448, 0.138313, 0.153125, 0.168910, 0.185689, & + 0.203491, 0.222333, 0.242244, 0.263242, 0.285354, 0.308598, & + 0.332939, 0.358254, 0.384363, 0.411125, 0.438391, 0.466003, & + 0.493800, 0.521619, 0.549301, 0.576692, 0.603648, 0.630036, & + 0.655736, 0.680643, 0.704669, 0.727739, 0.749797, 0.770798, & + 0.790717, 0.809536, 0.827256, 0.843881, 0.859432, 0.873929, & + 0.887408, 0.899900, 0.911448, 0.922096, 0.931881, 0.940860, & + 0.949064, 0.956550, 0.963352, 0.969513, 0.975078, 0.980072, & + 0.984542, 0.988500, 0.991984, 0.995003, 0.997630, 1.000000 / + + + +#endif _FV_ETA_ diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index 4cd647a42..3eae83358 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -25,11 +25,11 @@ module fv_grid_tools_mod use fv_grid_utils_mod, only: gnomonic_grids, great_circle_dist, & mid_pt_sphere, spherical_angle, & cell_center2, get_area, inner_prod, fill_ghost, & - direct_transform, dist2side_latlon, & + direct_transform, cube_transform, dist2side_latlon, & spherical_linear_interpolation, big_number use fv_timing_mod, only: timing_on, timing_off - use fv_mp_mod, only: ng, is_master, fill_corners, XDir, YDir - use fv_mp_mod, only: mp_gather, mp_bcst, mp_reduce_max, mp_stop + use fv_mp_mod, only: is_master, fill_corners, XDir, YDir + use fv_mp_mod, only: mp_gather, mp_bcst, mp_reduce_max, mp_stop, grids_master_procs use sorted_index_mod, only: sorted_inta, sorted_intb use mpp_mod, only: mpp_error, FATAL, get_unit, mpp_chksum, mpp_pe, stdout, & mpp_send, mpp_recv, mpp_sync_self, EVENT_RECV, mpp_npes, & @@ -40,14 +40,14 @@ module fv_grid_tools_mod mpp_get_data_domain, mpp_get_compute_domain, & mpp_get_global_domain, mpp_global_sum, mpp_global_max, mpp_global_min use mpp_domains_mod, only: domain2d - use mpp_io_mod, only: mpp_get_att_value + use mpp_io_mod, only: mpp_get_att_value - use mpp_parameter_mod, only: AGRID_PARAM=>AGRID, & + use mpp_parameter_mod, only: AGRID_PARAM=>AGRID, & DGRID_NE_PARAM=>DGRID_NE, & CGRID_NE_PARAM=>CGRID_NE, & CGRID_SW_PARAM=>CGRID_SW, & BGRID_NE_PARAM=>BGRID_NE, & - BGRID_SW_PARAM=>BGRID_SW, & + BGRID_SW_PARAM=>BGRID_SW, & SCALAR_PAIR, & CORNER, CENTER, XUPDATE use fms_mod, only: get_mosaic_tile_grid @@ -74,10 +74,6 @@ module fv_grid_tools_mod public :: todeg, missing, init_grid, spherical_to_cartesian - !---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine read_grid(Atm, grid_file, ndims, nregions, ng) @@ -95,9 +91,10 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) character(len=1024) :: attvalue integer :: ntiles, i, j, stdunit integer :: isc2, iec2, jsc2, jec2 - integer :: start(4), nread(4) + integer :: start(4), nread(4) integer :: is, ie, js, je integer :: isd, ied, jsd, jed + integer,save :: halo=3 ! for regional domain external tools is = Atm%bd%is ie = Atm%bd%ie @@ -124,13 +121,13 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) if(field_exist(grid_file, 'atm_mosaic_file')) then call read_data(grid_file, "atm_mosaic_file", atm_mosaic) atm_mosaic = "INPUT/"//trim(atm_mosaic) - else + else atm_mosaic = trim(grid_file) endif call get_mosaic_tile_grid(atm_hgrid, atm_mosaic, Atm%domain) - grid_form = "none" + grid_form = "none" if( get_global_att_value(atm_hgrid, "history", attvalue) ) then if( index(attvalue, "gnomonic_ed") > 0) grid_form = "gnomonic_ed" endif @@ -139,23 +136,29 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) !FIXME: Doesn't work for a nested grid ntiles = get_mosaic_ntiles(atm_mosaic) - if(ntiles .NE. 6) call mpp_error(FATAL, & - 'fv_grid_tools(read_grid): ntiles should be 6 in mosaic file '//trim(atm_mosaic) ) - if(nregions .NE. 6) call mpp_error(FATAL, & - 'fv_grid_tools(read_grid): nregions should be 6 when reading from mosaic file '//trim(grid_file) ) + if( .not. Atm%gridstruct%bounded_domain) then !<-- The regional setup has only 1 tile so do not shutdown in that case. + if(ntiles .NE. 6) call mpp_error(FATAL, & + 'fv_grid_tools(read_grid): ntiles should be 6 in mosaic file '//trim(atm_mosaic) ) + if(nregions .NE. 6) call mpp_error(FATAL, & + 'fv_grid_tools(read_grid): nregions should be 6 when reading from mosaic file '//trim(grid_file) ) + endif call get_var_att_value(atm_hgrid, 'x', 'units', units) !--- get the geographical coordinates of super-grid. isc2 = 2*is-1; iec2 = 2*ie+1 - jsc2 = 2*js-1; jec2 = 2*je+1 + jsc2 = 2*js-1; jec2 = 2*je+1 + if( Atm%gridstruct%bounded_domain ) then + isc2 = 2*(isd+halo)-1; iec2 = 2*(ied+1+halo)-1 ! For the regional domain the cell corner locations must be transferred + jsc2 = 2*(jsd+halo)-1; jec2 = 2*(jed+1+halo)-1 ! from the entire supergrid to the compute grid, including the halo region. + endif allocate(tmpx(isc2:iec2, jsc2:jec2) ) allocate(tmpy(isc2:iec2, jsc2:jec2) ) start = 1; nread = 1 start(1) = isc2; nread(1) = iec2 - isc2 + 1 start(2) = jsc2; nread(2) = jec2 - jsc2 + 1 - call read_data(atm_hgrid, 'x', tmpx, start, nread, no_domain=.TRUE.) - call read_data(atm_hgrid, 'y', tmpy, start, nread, no_domain=.TRUE.) + call read_data(atm_hgrid, 'x', tmpx, start, nread, no_domain=.TRUE.) !<-- tmpx (lon, deg east) is on the supergrid + call read_data(atm_hgrid, 'y', tmpy, start, nread, no_domain=.TRUE.) !<-- tmpy (lat, deg) is on the supergrid !--- geographic grid at cell corner grid(isd: is-1, jsd:js-1,1:ndims)=0. @@ -165,12 +168,25 @@ subroutine read_grid(Atm, grid_file, ndims, nregions, ng) if(len_trim(units) < 6) call mpp_error(FATAL, & "fv_grid_tools_mod(read_grid): the length of units must be no less than 6") if(units(1:6) == 'degree') then + if( .not. Atm%gridstruct%bounded_domain) then do j = js, je+1 do i = is, ie+1 grid(i,j,1) = tmpx(2*i-1,2*j-1)*pi/180. grid(i,j,2) = tmpy(2*i-1,2*j-1)*pi/180. enddo enddo + else +! +!*** In the regional case the halo surrounding the domain was included in the read. +!*** Transfer the compute and halo regions to the compute grid. +! + do j = jsd, jed+1 + do i = isd, ied+1 + grid(i,j,1) = tmpx(2*i+halo+2,2*j+halo+2)*pi/180. + grid(i,j,2) = tmpy(2*i+halo+2,2*j+halo+2)*pi/180. + enddo + enddo + endif else if(units(1:6) == 'radian') then do j = js, je+1 do i = is, ie+1 @@ -195,7 +211,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in) :: ishift, jshift, npes_x, npes_y real(kind=R_GRID), dimension(bd%is:bd%ie+ishift, bd%js:bd%je+jshift ), intent(in) :: data_in - real(kind=R_GRID), dimension(bd%is:bd%ie+jshift, bd%js:bd%je+ishift ), intent(out) :: data_out + real(kind=R_GRID), dimension(bd%is:bd%ie+jshift, bd%js:bd%je+ishift ), intent(out) :: data_out real(kind=R_GRID), dimension(:), allocatable :: send_buffer real(kind=R_GRID), dimension(:), allocatable :: recv_buffer integer, dimension(:), allocatable :: is_recv, ie_recv, js_recv, je_recv, pe_recv @@ -221,7 +237,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai ied = bd%ied jsd = bd%jsd jed = bd%jed - + !--- This routine will be called only for cubic sphere grid. so 6 tiles will be assumed !--- also number of processors on each tile will be the same. ntiles = mpp_get_ntile_count(domain) @@ -232,10 +248,10 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai npes_per_tile = npes/ntiles ! if(npes_x == npes_y) then ! even, simple communication - if(npes_x == npes_y .AND. mod(npx_g-1,npes_x) == 0 ) then ! even, + if(npes_x == npes_y .AND. mod(npx_g-1,npes_x) == 0 ) then ! even, msgsize = (ie-is+1+jshift)*(je-js+1+ishift) - pos = mod((mpp_pe()-mpp_root_pe()), npes_x*npes_y) + pos = mod((mpp_pe()-mpp_root_pe()), npes_x*npes_y) start_pe = mpp_pe() - pos ipos = mod(pos, npes_x) jpos = pos/npes_x @@ -265,7 +281,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai enddo enddo - call mpp_sync_self() + call mpp_sync_self() deallocate(send_buffer, recv_buffer) else @@ -284,8 +300,8 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai call mpp_get_pelist(domain, pelist) allocate(isl(0:npes-1), iel(0:npes-1), jsl(0:npes-1), jel(0:npes-1) ) call mpp_get_compute_domains(domain, xbegin=isl, xend=iel, ybegin=jsl, yend=jel) - !--- pre-post receiving - buffer_pos = 0 + !--- pre-post receiving + buffer_pos = 0 nrecv = 0 nsend = 0 recv_buf_size = 0 @@ -303,8 +319,8 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai is2 = isl(p); ie2 = iel(p) + ishift; js2 = jsl(p); je2 = jel(p) + jshift; is0 = max(is1,is2); ie0 = min(ie1,ie2) - js0 = max(js1,js2); je0 = min(je1,je2) - msgsize = 0 + js0 = max(js1,js2); je0 = min(je1,je2) + msgsize = 0 if(ie0 .GE. is0 .AND. je0 .GE. js0) then msgsize = (ie0-is0+1)*(je0-js0+1) recv_buf_size = recv_buf_size + msgsize @@ -366,7 +382,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai js0 = js_recv(p); je0 = je_recv(p) msgsize = (ie0-is0+1)*(je0-js0+1) call mpp_recv(recv_buffer(buffer_pos+1), glen=msgsize, from_pe=pe_recv(p), block=.FALSE. ) - buffer_pos = buffer_pos + msgsize + buffer_pos = buffer_pos + msgsize enddo !--- send the data @@ -384,7 +400,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai enddo enddo call mpp_send(send_buffer(buffer_pos+1), plen=msgsize, to_pe=pe_send(p) ) - buffer_pos = buffer_pos + msgsize + buffer_pos = buffer_pos + msgsize enddo call mpp_sync_self(check=EVENT_RECV) ! To ensure recv is completed. @@ -392,7 +408,7 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai !--- unpack buffer pos = 0 do p = 0, nrecv-1 - is0 = is_recv(p); ie0 = ie_recv(p) + is0 = is_recv(p); ie0 = ie_recv(p) js0 = js_recv(p); je0 = je_recv(p) do i = is0, ie0 @@ -412,10 +428,10 @@ subroutine get_symmetry(data_in, data_out, ishift, jshift, npes_x, npes_y, domai end subroutine get_symmetry - subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, ng) - + subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, ng, tile_coarse) + ! init_grid :: read grid from input file and setup grid descriptors - + !-------------------------------------------------------- type(fv_atmos_type), intent(inout), target :: Atm character(len=80), intent(IN) :: grid_name @@ -424,6 +440,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, integer, intent(IN) :: ndims integer, intent(IN) :: nregions integer, intent(IN) :: ng + integer, intent(IN) :: tile_coarse(:) !-------------------------------------------------------- real(kind=R_GRID) :: xs(npx,npy) real(kind=R_GRID) :: ys(npx,npy) @@ -449,11 +466,11 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, ! real(kind=R_GRID) :: grid_global(1-ng:npx +ng,1-ng:npy +ng,ndims,1:nregions) integer :: ios, ip, jp - + integer :: igrid - + integer :: tmplun - character(len=80) :: tmpFile + character(len=80) :: tmpFile real(kind=R_GRID), dimension(Atm%bd%is:Atm%bd%ie) :: sbuffer, nbuffer real(kind=R_GRID), dimension(Atm%bd%js:Atm%bd%je) :: wbuffer, ebuffer @@ -478,6 +495,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, type(domain2d), pointer :: domain integer :: is, ie, js, je integer :: isd, ied, jsd, jed + integer :: istart, iend, jstart, jend is = Atm%bd%is ie = Atm%bd%ie @@ -519,7 +537,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, else if( trim(grid_file) .NE. 'INPUT/grid_spec.nc') then allocate(grid_global(1-ng:npx +ng,1-ng:npy +ng,ndims,1:nregions)) endif - + iinta => Atm%gridstruct%iinta jinta => Atm%gridstruct%jinta iintb => Atm%gridstruct%iintb @@ -537,7 +555,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, have_north_pole => Atm%gridstruct%have_north_pole stretched_grid => Atm%gridstruct%stretched_grid - tile => Atm%tile + tile => Atm%tile_of_mosaic domain => Atm%domain @@ -547,7 +565,12 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, latlon = .false. cubed_sphere = .false. - if ( Atm%flagstruct%do_schmidt .and. abs(atm%flagstruct%stretch_fac-1.) > 1.E-5 ) stretched_grid = .true. + if ( (Atm%flagstruct%do_schmidt .or. Atm%flagstruct%do_cube_transform) .and. abs(atm%flagstruct%stretch_fac-1.) > 1.E-5 ) then + stretched_grid = .true. + if (Atm%flagstruct%do_schmidt .and. Atm%flagstruct%do_cube_transform) then + call mpp_error(FATAL, ' Cannot set both do_schmidt and do_cube_transform to .true.') + endif + endif if (Atm%flagstruct%grid_type>3) then if (Atm%flagstruct%grid_type == 4) then @@ -559,43 +582,45 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, else cubed_sphere = .true. - + if (Atm%neststruct%nested) then + !Read grid if it exists + ! still need to set up call setup_aligned_nest(Atm) else - if(trim(grid_file) == 'INPUT/grid_spec.nc') then - call read_grid(Atm, grid_file, ndims, nregions, ng) - else + if(trim(grid_file) == 'INPUT/grid_spec.nc') then + call read_grid(Atm, grid_file, ndims, nregions, ng) + else - if (Atm%flagstruct%grid_type>=0) call gnomonic_grids(Atm%flagstruct%grid_type, npx-1, xs, ys) + if (Atm%flagstruct%grid_type>=0) call gnomonic_grids(Atm%flagstruct%grid_type, npx-1, xs, ys) - if (is_master()) then + if (is_master()) then - if (Atm%flagstruct%grid_type>=0) then - do j=1,npy + if (Atm%flagstruct%grid_type>=0) then + do j=1,npy do i=1,npx grid_global(i,j,1,1) = xs(i,j) grid_global(i,j,2,1) = ys(i,j) enddo - enddo -! mirror_grid assumes that the tile=1 is centered on equator and greenwich meridian Lon[-pi,pi] - call mirror_grid(grid_global, ng, npx, npy, 2, 6) - do n=1,nregions + enddo +! mirror_grid assumes that the tile=1 is centered on equator and greenwich meridian Lon[-pi,pi] + call mirror_grid(grid_global, ng, npx, npy, 2, 6) + do n=1,nregions do j=1,npy - do i=1,npx + do i=1,npx !--------------------------------- ! Shift the corner away from Japan !--------------------------------- !--------------------- This will result in the corner close to east coast of China ------------------ - if ( .not.Atm%flagstruct%do_schmidt .and. (Atm%flagstruct%shift_fac)>1.E-4 ) & - grid_global(i,j,1,n) = grid_global(i,j,1,n) - pi/Atm%flagstruct%shift_fac + if ( .not. ( Atm%flagstruct%do_schmidt .or. Atm%flagstruct%do_cube_transform) .and. (Atm%flagstruct%shift_fac)>1.E-4 ) & + grid_global(i,j,1,n) = grid_global(i,j,1,n) - pi/Atm%flagstruct%shift_fac !---------------------------------------------------------------------------------------------------- - if ( grid_global(i,j,1,n) < 0. ) & - grid_global(i,j,1,n) = grid_global(i,j,1,n) + 2.*pi - if (ABS(grid_global(i,j,1,1)) < 1.d-10) grid_global(i,j,1,1) = 0.0 - if (ABS(grid_global(i,j,2,1)) < 1.d-10) grid_global(i,j,2,1) = 0.0 - enddo - enddo + if ( grid_global(i,j,1,n) < 0. ) & + grid_global(i,j,1,n) = grid_global(i,j,1,n) + 2.*pi + if (ABS(grid_global(i,j,1,1)) < 1.d-10) grid_global(i,j,1,1) = 0.0 + if (ABS(grid_global(i,j,2,1)) < 1.d-10) grid_global(i,j,2,1) = 0.0 + enddo + enddo enddo else call mpp_error(FATAL, "fv_grid_tools: reading of ASCII grid files no longer supported") @@ -622,110 +647,141 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, !------------------------ ! Schmidt transformation: !------------------------ - if ( Atm%flagstruct%do_schmidt ) then - do n=1,nregions - call direct_transform(Atm%flagstruct%stretch_fac, 1, npx, 1, npy, & - Atm%flagstruct%target_lon, Atm%flagstruct%target_lat, & - n, grid_global(1:npx,1:npy,1,n), grid_global(1:npx,1:npy,2,n)) - enddo - endif - endif - call mpp_broadcast(grid_global, size(grid_global), mpp_root_pe()) -!--- copy grid to compute domain - do n=1,ndims - do j=js,je+1 - do i=is,ie+1 - grid(i,j,n) = grid_global(i,j,n,tile) - enddo - enddo - enddo - endif + if ( Atm%flagstruct%do_schmidt ) then + do n=1,nregions + call direct_transform(Atm%flagstruct%stretch_fac, 1, npx, 1, npy, & + Atm%flagstruct%target_lon, Atm%flagstruct%target_lat, & + n, grid_global(1:npx,1:npy,1,n), grid_global(1:npx,1:npy,2,n)) + enddo + elseif (Atm%flagstruct%do_cube_transform) then + do n=1,nregions + call cube_transform(Atm%flagstruct%stretch_fac, 1, npx, 1, npy, & + Atm%flagstruct%target_lon, Atm%flagstruct%target_lat, & + n, grid_global(1:npx,1:npy,1,n), grid_global(1:npx,1:npy,2,n)) + enddo + endif + endif !is master + call mpp_broadcast(grid_global, size(grid_global), mpp_root_pe()) + !--- copy grid to compute domain + do n=1,ndims + do j=js,je+1 + do i=is,ie+1 + grid(i,j,n) = grid_global(i,j,n,tile) + enddo + enddo + enddo + endif !(trim(grid_file) == 'INPUT/grid_spec.nc') ! ! SJL: For phys/exchange grid, etc ! - call mpp_update_domains( grid, Atm%domain, position=CORNER) - if (.not. Atm%neststruct%nested) call fill_corners(grid(:,:,1), npx, npy, FILL=XDir, BGRID=.true.) - if (.not. Atm%neststruct%nested) call fill_corners(grid(:,:,2), npx, npy, FILL=XDir, BGRID=.true.) + call mpp_update_domains( grid, Atm%domain, position=CORNER) + if (.not. (Atm%gridstruct%bounded_domain)) then + call fill_corners(grid(:,:,1), npx, npy, FILL=XDir, BGRID=.true.) + call fill_corners(grid(:,:,2), npx, npy, FILL=XDir, BGRID=.true.) + endif - !--- dx and dy - do j = js, je+1 - do i = is, ie + !--- dx and dy + if( .not. Atm%gridstruct%bounded_domain) then + istart=is + iend=ie + jstart=js + jend=je + else + istart=isd + iend=ied + jstart=jsd + jend=jed + endif + + do j = jstart, jend+1 + do i = istart, iend p1(1) = grid(i ,j,1) p1(2) = grid(i ,j,2) p2(1) = grid(i+1,j,1) p2(2) = grid(i+1,j,2) dx(i,j) = great_circle_dist( p2, p1, radius ) enddo - enddo - if( stretched_grid ) then - do j = js, je - do i = is, ie+1 + enddo + if( stretched_grid .or. Atm%gridstruct%bounded_domain ) then + do j = jstart, jend + do i = istart, iend+1 p1(1) = grid(i,j, 1) p1(2) = grid(i,j, 2) p2(1) = grid(i,j+1,1) p2(2) = grid(i,j+1,2) dy(i,j) = great_circle_dist( p2, p1, radius ) enddo - enddo - else - call get_symmetry(dx(is:ie,js:je+1), dy(is:ie+1,js:je), 0, 1, Atm%layout(1), Atm%layout(2), & - Atm%domain, Atm%tile, Atm%gridstruct%npx_g, Atm%bd) - endif + enddo + else + call get_symmetry(dx(is:ie,js:je+1), dy(is:ie+1,js:je), 0, 1, Atm%layout(1), Atm%layout(2), & + Atm%domain, Atm%tile_of_mosaic, Atm%gridstruct%npx_g, Atm%bd) + endif - call mpp_get_boundary( dy, dx, Atm%domain, ebufferx=ebuffer, wbufferx=wbuffer, sbuffery=sbuffer, nbuffery=nbuffer,& - flags=SCALAR_PAIR+XUPDATE, gridtype=CGRID_NE_PARAM) - if(is == 1 .AND. mod(tile,2) .NE. 0) then ! on the west boundary - dy(is, js:je) = wbuffer(js:je) - endif - if(ie == npx-1) then ! on the east boundary - dy(ie+1, js:je) = ebuffer(js:je) - endif + call mpp_get_boundary( dy, dx, Atm%domain, ebufferx=ebuffer, wbufferx=wbuffer, sbuffery=sbuffer, nbuffery=nbuffer,& + flags=SCALAR_PAIR+XUPDATE, gridtype=CGRID_NE_PARAM) + if( .not. Atm%gridstruct%bounded_domain ) then + if(is == 1 .AND. mod(tile,2) .NE. 0) then ! on the west boundary + dy(is, js:je) = wbuffer(js:je) + endif + if(ie == npx-1) then ! on the east boundary + dy(ie+1, js:je) = ebuffer(js:je) + endif + endif + + call mpp_update_domains( dy, dx, Atm%domain, flags=SCALAR_PAIR, & + gridtype=CGRID_NE_PARAM, complete=.true.) + if (cubed_sphere .and. (.not. (Atm%gridstruct%bounded_domain))) then + call fill_corners(dx, dy, npx, npy, DGRID=.true.) + endif - call mpp_update_domains( dy, dx, Atm%domain, flags=SCALAR_PAIR, & - gridtype=CGRID_NE_PARAM, complete=.true.) - if (cubed_sphere .and. .not. Atm%neststruct%nested) call fill_corners(dx, dy, npx, npy, DGRID=.true.) + if( .not. stretched_grid ) & + call sorted_inta(isd, ied, jsd, jed, cubed_sphere, grid, iinta, jinta) - if( .not. stretched_grid ) & - call sorted_inta(isd, ied, jsd, jed, cubed_sphere, grid, iinta, jinta) + agrid(:,:,:) = -1.e25 - agrid(:,:,:) = -1.e25 - - do j=js,je - do i=is,ie - if ( stretched_grid ) then + !--- compute agrid (use same indices as for dx/dy above) + + do j=jstart,jend + do i=istart,iend + if ( stretched_grid ) then call cell_center2(grid(i,j, 1:2), grid(i+1,j, 1:2), & grid(i,j+1,1:2), grid(i+1,j+1,1:2), & agrid(i,j,1:2) ) - else + else call cell_center2(grid(iinta(1,i,j),jinta(1,i,j),1:2), & grid(iinta(2,i,j),jinta(2,i,j),1:2), & grid(iinta(3,i,j),jinta(3,i,j),1:2), & grid(iinta(4,i,j),jinta(4,i,j),1:2), & agrid(i,j,1:2) ) - endif - enddo - enddo + endif + enddo + enddo - call mpp_update_domains( agrid, Atm%domain, position=CENTER, complete=.true. ) - if (.not. Atm%neststruct%nested) call fill_corners(agrid(:,:,1), npx, npy, XDir, AGRID=.true.) - if (.not. Atm%neststruct%nested) call fill_corners(agrid(:,:,2), npx, npy, YDir, AGRID=.true.) + call mpp_update_domains( agrid, Atm%domain, position=CENTER, complete=.true. ) + if (.not. (Atm%gridstruct%bounded_domain)) then + call fill_corners(agrid(:,:,1), npx, npy, XDir, AGRID=.true.) + call fill_corners(agrid(:,:,2), npx, npy, YDir, AGRID=.true.) + endif - do j=jsd,jed - do i=isd,ied - call mid_pt_sphere(grid(i, j,1:2), grid(i, j+1,1:2), p1) - call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p2) - dxa(i,j) = great_circle_dist( p2, p1, radius ) -! - call mid_pt_sphere(grid(i,j ,1:2), grid(i+1,j ,1:2), p1) - call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p2) - dya(i,j) = great_circle_dist( p2, p1, radius ) - enddo - enddo + do j=jsd,jed + do i=isd,ied + call mid_pt_sphere(grid(i, j,1:2), grid(i, j+1,1:2), p1) + call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p2) + dxa(i,j) = great_circle_dist( p2, p1, radius ) + ! + call mid_pt_sphere(grid(i,j ,1:2), grid(i+1,j ,1:2), p1) + call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p2) + dya(i,j) = great_circle_dist( p2, p1, radius ) + enddo + enddo ! call mpp_update_domains( dxa, dya, Atm%domain, flags=SCALAR_PAIR, gridtype=AGRID_PARAM) - if (cubed_sphere .and. .not. Atm%neststruct%nested) call fill_corners(dxa, dya, npx, npy, AGRID=.true.) + if (cubed_sphere .and. (.not. (Atm%gridstruct%bounded_domain))) then + call fill_corners(dxa, dya, npx, npy, AGRID=.true.) + endif + + end if !if nested - end if !if nested ! do j=js,je ! do i=is,ie+1 @@ -733,6 +789,9 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, do i=isd+1,ied dxc(i,j) = great_circle_dist(agrid(i,j,:), agrid(i-1,j,:), radius) enddo +!xxxxxx + !Are the following 2 lines appropriate for the regional domain? +!xxxxxx dxc(isd,j) = dxc(isd+1,j) dxc(ied+1,j) = dxc(ied,j) enddo @@ -744,6 +803,9 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, dyc(i,j) = great_circle_dist(agrid(i,j,:), agrid(i,j-1,:), radius) enddo enddo +!xxxxxx + !Are the following 2 lines appropriate for the regional domain? +!xxxxxx do i=isd,ied dyc(i,jsd) = dyc(i,jsd+1) dyc(i,jed+1) = dyc(i,jed) @@ -754,13 +816,13 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, call sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & cubed_sphere, agrid, iintb, jintb) - call grid_area( npx, npy, ndims, nregions, Atm%neststruct%nested, Atm%gridstruct, Atm%domain, Atm%bd ) + call grid_area( npx, npy, ndims, nregions, Atm%gridstruct%bounded_domain, Atm%gridstruct, Atm%domain, Atm%bd ) ! stretched_grid = .false. !---------------------------------- ! Compute area_c, rarea_c, dxc, dyc !---------------------------------- - if ( .not. stretched_grid .and. .not. Atm%neststruct%nested) then + if ( .not. stretched_grid .and. (.not. (Atm%gridstruct%bounded_domain))) then ! For symmetrical grids: if ( is==1 ) then i = 1 @@ -855,18 +917,20 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, p4(1:2) = grid(i,j,1:2) area_c(i,j) = 3.*get_area(p1, p4, p2, p3, radius) endif - endif + endif !----------------- call mpp_update_domains( dxc, dyc, Atm%domain, flags=SCALAR_PAIR, & gridtype=CGRID_NE_PARAM, complete=.true.) - if (cubed_sphere .and. .not. Atm%neststruct%nested) call fill_corners(dxc, dyc, npx, npy, CGRID=.true.) + if (cubed_sphere .and. (.not. (Atm%gridstruct%bounded_domain))) then + call fill_corners(dxc, dyc, npx, npy, CGRID=.true.) + endif call mpp_update_domains( area, Atm%domain, complete=.true. ) !Handling outermost ends for area_c - if (Atm%neststruct%nested) then + if (Atm%gridstruct%bounded_domain) then if (is == 1) then do j=jsd,jed area_c(isd,j) = area_c(isd+1,j) @@ -896,7 +960,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, call mpp_update_domains( area_c, Atm%domain, position=CORNER, complete=.true.) ! Handle corner Area ghosting - if (cubed_sphere .and. .not. Atm%neststruct%nested) then + if (cubed_sphere .and. (.not. (Atm%gridstruct%bounded_domain))) then call fill_ghost(area, npx, npy, -big_number, Atm%bd) ! fill in garbage values call fill_corners(area_c, npx, npy, FILL=XDir, BGRID=.true.) endif @@ -948,7 +1012,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, angM = -missing aspN = missing aspM = -missing - if (tile == 1) then + !if (tile == 1) then ! doing a GLOBAL domain search on each grid do j=js, je do i=is, ie if(i>ceiling(npx/2.) .OR. j>ceiling(npy/2.)) cycle @@ -978,7 +1042,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, aspN = MIN(aspN,asp) enddo enddo - endif + !endif call mpp_sum(angAv) call mpp_sum(dxAV) call mpp_sum(aspAV) @@ -999,6 +1063,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, write(*,*) ' REDUCED EARTH: Radius is ', radius, ', omega is ', omega #endif write(*,* ) ' Cubed-Sphere Grid Stats : ', npx,'x',npy,'x',nregions + print*, dxN, dxM, dxAV, dxN, dxM write(*,201) ' Grid Length : min: ', dxN,' max: ', dxM,' avg: ', dxAV, ' min/max: ',dxN/dxM write(*,200) ' Deviation from Orthogonal : min: ',angN,' max: ',angM,' avg: ',angAV write(*,200) ' Aspect Ratio : min: ',aspN,' max: ',aspM,' avg: ',aspAV @@ -1006,8 +1071,26 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, endif endif!if gridtype > 3 + !SEND grid global if any child nests + !Matching receive in setup_aligned_nest + do n=1,size(Atm%neststruct%child_grids) + if (Atm%neststruct%child_grids(n) .and. is_master()) then + !need to get tile_coarse AND determine local number for tile + if (ntiles_g > 1) then ! coarse grid only!! +!!$ !!! DEBUG CODE +!!$ print*, 'SENDING GRID_GLOBAL: ', mpp_pe(), tile_coarse(n), grids_master_procs(n), grid_global(1,npy,:,tile_coarse(n)) +!!$ !!! END DEBUG CODE + call mpp_send(grid_global(:,:,:,tile_coarse(n)), & + size(grid_global)/Atm%flagstruct%ntiles,grids_master_procs(n)) + else + call mpp_send(grid_global(:,:,:,1),size(grid_global),grids_master_procs(n)) + endif + call mpp_sync_self() + endif + enddo + if (Atm%neststruct%nested .or. ANY(Atm%neststruct%child_grids)) then - nullify(grid_global) + nullify(grid_global) else if( trim(grid_file) .NE. 'INPUT/grid_spec.nc') then deallocate(grid_global) endif @@ -1022,37 +1105,37 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, nullify(sina) nullify(cosa) - nullify(dx) - nullify(dy) - nullify(dxc) - nullify(dyc) - nullify(dxa) - nullify(dya) - nullify(rdx) - nullify(rdy) + nullify(dx) + nullify(dy) + nullify(dxc) + nullify(dyc) + nullify(dxa) + nullify(dya) + nullify(rdx) + nullify(rdy) nullify(rdxc) nullify(rdyc) nullify(rdxa) nullify(rdya) - nullify(e1) - nullify(e2) - - nullify(iinta) - nullify(jinta) - nullify(iintb) - nullify(jintb) - nullify(npx_g) - nullify(npy_g) - nullify(ntiles_g) - nullify(sw_corner) - nullify(se_corner) - nullify(ne_corner) - nullify(nw_corner) - nullify(latlon) - nullify(cubed_sphere) - nullify(have_south_pole) - nullify(have_north_pole) - nullify(stretched_grid) + nullify(e1) + nullify(e2) + + nullify(iinta) + nullify(jinta) + nullify(iintb) + nullify(jintb) + nullify(npx_g) + nullify(npy_g) + nullify(ntiles_g) + nullify(sw_corner) + nullify(se_corner) + nullify(ne_corner) + nullify(nw_corner) + nullify(latlon) + nullify(cubed_sphere) + nullify(have_south_pole) + nullify(have_north_pole) + nullify(stretched_grid) nullify(tile) @@ -1061,7 +1144,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, contains subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) - + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy real(kind=R_GRID), intent(IN) :: dx_const, dy_const, deglat @@ -1087,23 +1170,23 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) rdx(:,:) = 1./dx_const dy(:,:) = dy_const rdy(:,:) = 1./dy_const - + dxc(:,:) = dx_const rdxc(:,:) = 1./dx_const dyc(:,:) = dy_const rdyc(:,:) = 1./dy_const - + dxa(:,:) = dx_const rdxa(:,:) = 1./dx_const dya(:,:) = dy_const rdya(:,:) = 1./dy_const - + area(:,:) = dx_const*dy_const rarea(:,:) = 1./(dx_const*dy_const) - + area_c(:,:) = dx_const*dy_const rarea_c(:,:) = 1./(dx_const*dy_const) - + ! The following is a hack to get pass the am2 phys init: do j=max(1,jsd),min(jed,npy) do i=max(1,isd),min(ied,npx) @@ -1114,7 +1197,7 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) agrid(:,:,1) = lon_rad agrid(:,:,2) = lat_rad - + sina(:,:) = 1. cosa(:,:) = 0. @@ -1128,6 +1211,21 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) end subroutine setup_cartesian + !This routine currently does two things: + ! 1) Create the nested grid on-the-fly from the parent + ! 2) Compute the weights and indices for the boundary conditions + ! We should split these into two routines in case we can + ! read the nest from the input mosaic. Then we only need + ! to set up the weights. + ! When creating the nest on-the-fly we need the global parent grid, + ! as we are doing now. For nests crossing a cube edge + ! new code is needed. + ! Creating the indices should be relatvely straightforward procedure + ! since we will always know ioffset and joffset, which are needed + ! to initialize the mpp nesting structure + ! Computing the weights can be simplified by simply retreiving the + ! BC agrid/grid structures? + subroutine setup_aligned_nest(Atm) type(fv_atmos_type), intent(INOUT), target :: Atm @@ -1135,7 +1233,7 @@ subroutine setup_aligned_nest(Atm) integer :: isd_p, ied_p, jsd_p, jed_p integer :: isg, ieg, jsg, jeg integer :: ic, jc, imod, jmod - + real(kind=R_GRID), allocatable, dimension(:,:,:) :: p_grid_u, p_grid_v, pa_grid, p_grid, c_grid_u, c_grid_v integer :: p_ind(1-ng:npx +ng,1-ng:npy +ng,4) !First two entries along dim 3 are @@ -1148,7 +1246,7 @@ subroutine setup_aligned_nest(Atm) real(kind=R_GRID), dimension(2) :: q1, q2 integer, pointer :: parent_tile, refinement, ioffset, joffset - integer, pointer, dimension(:,:,:) :: ind_h, ind_u, ind_v, ind_update_h + integer, pointer, dimension(:,:,:) :: ind_h, ind_u, ind_v real, pointer, dimension(:,:,:) :: wt_h, wt_u, wt_v integer, pointer, dimension(:,:,:) :: ind_b @@ -1169,16 +1267,14 @@ subroutine setup_aligned_nest(Atm) parent_tile => Atm%neststruct%parent_tile - refinement => Atm%neststruct%refinement - ioffset => Atm%neststruct%ioffset - joffset => Atm%neststruct%joffset + refinement => Atm%neststruct%refinement + ioffset => Atm%neststruct%ioffset + joffset => Atm%neststruct%joffset ind_h => Atm%neststruct%ind_h ind_u => Atm%neststruct%ind_u ind_v => Atm%neststruct%ind_v - ind_update_h => Atm%neststruct%ind_update_h - wt_h => Atm%neststruct%wt_h wt_u => Atm%neststruct%wt_u wt_v => Atm%neststruct%wt_v @@ -1199,21 +1295,31 @@ subroutine setup_aligned_nest(Atm) allocate(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2) ) p_grid = 1.e25 - !Need to RECEIVE grid_global; matching mpp_send of grid_global from parent grid is in fv_control + !Need to RECEIVE parent grid_global; + !matching mpp_send of grid_global from parent grid is in init_grid() if( is_master() ) then - p_ind = -1000000000 call mpp_recv(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2), size(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2)), & Atm%parent_grid%pelist(1)) +!!$ !!!! DEBUG CODE +!!$ print*, 'RECEIVING GRID GLOBAL: ', mpp_pe(), Atm%parent_grid%pelist(1), p_grid(1,jeg+1,:) +!!$ !!!! END DEBUG CODE + + endif + + call mpp_broadcast( p_grid(isg-ng:ieg+ng+1, jsg-ng:jeg+ng+1, :), & + (ieg-isg+2+2*ng)*(jeg-jsg+2+2*ng)*ndims, mpp_root_pe() ) + + !NOTE : Grid now allowed to lie outside of parent !Check that the grid does not lie outside its parent !3aug15: allows halo of nest to lie within halo of coarse grid. - ! NOTE: will this then work with the mpp_update_nest_fine? - if ( joffset + floor( real(1-ng) / real(refinement) ) < 1-ng .or. & - ioffset + floor( real(1-ng) / real(refinement) ) < 1-ng .or. & - joffset + floor( real(npy+ng) / real(refinement) ) > Atm%parent_grid%npy+ng .or. & - ioffset + floor( real(npx+ng) / real(refinement) ) > Atm%parent_grid%npx+ng ) then - call mpp_error(FATAL, 'nested grid lies outside its parent') - end if +!!$ ! NOTE: will this then work with the mpp_update_nest_fine? +!!$ if ( joffset + floor( real(1-ng) / real(refinement) ) < 1-ng .or. & +!!$ ioffset + floor( real(1-ng) / real(refinement) ) < 1-ng .or. & +!!$ joffset + floor( real(npy+ng) / real(refinement) ) > Atm%parent_grid%npy+ng .or. & +!!$ ioffset + floor( real(npx+ng) / real(refinement) ) > Atm%parent_grid%npx+ng ) then +!!$ call mpp_error(FATAL, 'nested grid lies outside its parent') +!!$ end if do j=1-ng,npy+ng jc = joffset + (j-1)/refinement !int( real(j-1) / real(refinement) ) @@ -1288,21 +1394,18 @@ subroutine setup_aligned_nest(Atm) end do end do - end if - - call mpp_broadcast(grid_global(1-ng:npx+ng, 1-ng:npy+ng ,:,1), & - ((npx+ng)-(1-ng)+1)*((npy+ng)-(1-ng)+1)*ndims, mpp_root_pe() ) - call mpp_broadcast( p_ind(1-ng:npx+ng, 1-ng:npy+ng ,1:4), & - ((npx+ng)-(1-ng)+1)*((npy+ng)-(1-ng)+1)*4, mpp_root_pe() ) - call mpp_broadcast( pa_grid( isg:ieg , jsg:jeg , :), & - ((ieg-isg+1))*(jeg-jsg+1)*ndims, mpp_root_pe()) - call mpp_broadcast( p_grid_u( isg:ieg , jsg:jeg+1, :), & - (ieg-isg+1)*(jeg-jsg+2)*ndims, mpp_root_pe()) - call mpp_broadcast( p_grid_v( isg:ieg+1, jsg:jeg , :), & - (ieg-isg+2)*(jeg-jsg+1)*ndims, mpp_root_pe()) - - call mpp_broadcast( p_grid(isg-ng:ieg+ng+1, jsg-ng:jeg+ng+1, :), & - (ieg-isg+2+2*ng)*(jeg-jsg+2+2*ng)*ndims, mpp_root_pe() ) +!!$ !TODO: can we just send around ONE grid and re-calculate +!!$ ! staggered grids from that?? +!!$ call mpp_broadcast(grid_global(1-ng:npx+ng, 1-ng:npy+ng ,:,1), & +!!$ ((npx+ng)-(1-ng)+1)*((npy+ng)-(1-ng)+1)*ndims, mpp_root_pe() ) +!!$ call mpp_broadcast( p_ind(1-ng:npx+ng, 1-ng:npy+ng ,1:4), & +!!$ ((npx+ng)-(1-ng)+1)*((npy+ng)-(1-ng)+1)*4, mpp_root_pe() ) +!!$ call mpp_broadcast( pa_grid( isg:ieg , jsg:jeg , :), & +!!$ ((ieg-isg+1))*(jeg-jsg+1)*ndims, mpp_root_pe()) +!!$ call mpp_broadcast( p_grid_u( isg:ieg , jsg:jeg+1, :), & +!!$ (ieg-isg+1)*(jeg-jsg+2)*ndims, mpp_root_pe()) +!!$ call mpp_broadcast( p_grid_v( isg:ieg+1, jsg:jeg , :), & +!!$ (ieg-isg+2)*(jeg-jsg+1)*ndims, mpp_root_pe()) do n=1,ndims do j=jsd,jed+1 @@ -1352,17 +1455,15 @@ subroutine setup_aligned_nest(Atm) ind_b(i,j,1) = ic ind_b(i,j,2) = jc - + ind_b(i,j,3) = imod ind_b(i,j,4) = jmod enddo enddo - !In a concurrent simulation, p_ind was passed off to the parent processes above, so they can create ind_update_h - ind_u = -99999999 !New BCs for wind components: - ! For aligned grid segments (mod(j-1,R) == 0) set + ! For aligned grid segments (mod(j-1,R) == 0) set ! identically equal to the coarse-grid value ! Do linear interpolation in the y-dir elsewhere @@ -1476,7 +1577,7 @@ subroutine setup_aligned_nest(Atm) do j=jsd,jed+1 do i=isd,ied+1 - + ic = ind_b(i,j,1) jc = ind_b(i,j,2) @@ -1631,6 +1732,7 @@ subroutine setup_aligned_nest(Atm) if (is_master()) then if (Atm%neststruct%nested) then !Nesting position information + !BUG multiply by 180 not 90.... write(*,*) 'NESTED GRID ', Atm%grid_number ic = p_ind(1,1,1) ; jc = p_ind(1,1,1) write(*,'(A, 2I5, 4F10.4)') 'SW CORNER: ', ic, jc, grid_global(1,1,:,1)*90./pi @@ -1640,8 +1742,8 @@ subroutine setup_aligned_nest(Atm) write(*,'(A, 2I5, 4F10.4)') 'NE CORNER: ', ic, jc, grid_global(npx,npy,:,1)*90./pi ic = p_ind(npx,1,1) ; jc = p_ind(npx,1,1) write(*,'(A, 2I5, 4F10.4)') 'SE CORNER: ', ic, jc, grid_global(npx,1,:,1)*90./pi - else - write(*,*) 'PARENT GRID ', Atm%parent_grid%grid_number, Atm%parent_grid%tile + else + write(*,*) 'PARENT GRID ', Atm%parent_grid%grid_number, Atm%parent_grid%global_tile ic = p_ind(1,1,1) ; jc = p_ind(1,1,1) write(*,'(A, 2I5, 4F10.4)') 'SW CORNER: ', ic, jc, Atm%parent_grid%grid_global(ic,jc,:,parent_tile)*90./pi ic = p_ind(1,npy,1) ; jc = p_ind(1,npy,1) @@ -1678,7 +1780,7 @@ subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd lon_start = deglon_start*pi/180. lat_start = deglat_start*pi/180. - + do j=jsd,jed+1 do i=isd,ied+1 grid(i,j,1) = lon_start + real(i-1)*dl @@ -1715,7 +1817,7 @@ subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd rdya(i,j) = 1./dya(i,j) enddo enddo - + do j=jsd,jed+1 do i=isd,ied dx(i,j) = dl*radius*cos(grid(i,j,2)) @@ -1764,20 +1866,20 @@ subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd sina(:,:) = 1. cosa(:,:) = 0. - + e1(1,:,:) = 1. e1(2,:,:) = 0. e1(3,:,:) = 0. - + e2(1,:,:) = 0. e2(2,:,:) = 1. e2(3,:,:) = 0. end subroutine setup_latlon - + end subroutine init_grid - subroutine cartesian_to_spherical(x, y, z, lon, lat, r) + subroutine cartesian_to_spherical(x, y, z, lon, lat, r) real(kind=R_GRID) , intent(IN) :: x, y, z real(kind=R_GRID) , intent(OUT) :: lon, lat, r @@ -1786,7 +1888,7 @@ subroutine cartesian_to_spherical(x, y, z, lon, lat, r) lon = 0. else lon = ATAN2(y,x) ! range: [-pi,pi] - endif + endif #ifdef RIGHT_HAND lat = asin(z/r) @@ -1807,7 +1909,7 @@ subroutine spherical_to_cartesian(lon, lat, r, x, y, z) z = -r * sin(lat) #endif end subroutine spherical_to_cartesian - + !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! @@ -1820,10 +1922,10 @@ subroutine rot_3d(axis, x1in, y1in, z1in, angle, x2out, y2out, z2out, degrees, c real(kind=R_GRID) , intent(IN) :: x1in, y1in, z1in real(kind=R_GRID) , intent(INOUT) :: angle ! angle to rotate in radians real(kind=R_GRID) , intent(OUT) :: x2out, y2out, z2out - integer, intent(IN), optional :: degrees ! if present convert angle + integer, intent(IN), optional :: degrees ! if present convert angle ! from degrees to radians integer, intent(IN), optional :: convert ! if present convert input point - ! from spherical to cartesian, rotate, + ! from spherical to cartesian, rotate, ! and convert back real(kind=R_GRID) :: c, s @@ -1845,7 +1947,7 @@ subroutine rot_3d(axis, x1in, y1in, z1in, angle, x2out, y2out, z2out, degrees, c s = SIN(angle) SELECT CASE(axis) - + CASE(1) x2 = x1 y2 = c*y1 + s*z1 @@ -1860,7 +1962,7 @@ subroutine rot_3d(axis, x1in, y1in, z1in, angle, x2out, y2out, z2out, degrees, c z2 = z1 CASE DEFAULT write(*,*) "Invalid axis: must be 1 for X, 2 for Y, 3 for Z." - + END SELECT if ( present(convert) ) then @@ -1879,16 +1981,16 @@ end subroutine rot_3d real(kind=R_GRID) function get_area_tri(ndims, p_1, p_2, p_3) & result (myarea) - + ! get_area_tri :: get the surface area of a cell defined as a triangle ! on the sphere. Area is computed as the spherical excess ! [area units are based on the units of radius] - + integer, intent(IN) :: ndims ! 2=lat/lon, 3=xyz - real(kind=R_GRID) , intent(IN) :: p_1(ndims) ! - real(kind=R_GRID) , intent(IN) :: p_2(ndims) ! - real(kind=R_GRID) , intent(IN) :: p_3(ndims) ! + real(kind=R_GRID) , intent(IN) :: p_1(ndims) ! + real(kind=R_GRID) , intent(IN) :: p_2(ndims) ! + real(kind=R_GRID) , intent(IN) :: p_3(ndims) ! real(kind=R_GRID) :: angA, angB, angC @@ -1916,11 +2018,11 @@ end function get_area_tri ! (determined by ndims argument 2=lat/lon, 3=xyz) ! [area is returned in m^2 on Unit sphere] ! - subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) + subroutine grid_area(nx, ny, ndims, nregions, bounded_domain, gridstruct, domain, bd ) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: nx, ny, ndims, nregions - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain type(fv_grid_type), intent(IN), target :: gridstruct type(domain2d), intent(INOUT) :: domain @@ -1937,14 +2039,14 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) integer :: i,j,n, nreg integer :: nh = 0 - real(kind=R_GRID), allocatable :: p_R8(:,:,:) + real(kind=R_GRID), allocatable :: p_R8(:,:,:) real(kind=R_GRID), pointer, dimension(:,:,:) :: grid, agrid integer, pointer, dimension(:,:,:) :: iinta, jinta, iintb, jintb real(kind=R_GRID), pointer, dimension(:,:) :: area, area_c - + integer :: is, ie, js, je - integer :: isd, ied, jsd, jed + integer :: isd, ied, jsd, jed, ng is = bd%is ie = bd%ie @@ -1954,6 +2056,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) ied = bd%ied jsd = bd%jsd jed = bd%jed + ng = bd%ng grid => gridstruct%grid_64 agrid => gridstruct%agrid_64 @@ -1965,7 +2068,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) area => gridstruct%area_64 area_c => gridstruct%area_c_64 - if (nested) nh = ng + if (bounded_domain) nh = ng maxarea = -1.e25 minarea = 1.e25 @@ -1974,7 +2077,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) do j=js-nh,je+nh do i=is-nh,ie+nh do n=1,ndims - if ( gridstruct%stretched_grid .or. nested ) then + if ( gridstruct%stretched_grid .or. bounded_domain ) then p_lL(n) = grid(i ,j ,n) p_uL(n) = grid(i ,j+1,n) p_lR(n) = grid(i+1,j ,n) @@ -2018,7 +2121,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) !!$ deallocate( p_R8 ) !!$ !!$ call mp_reduce_max(maxarea) -!!$ minarea = -minarea +!!$ minarea = -minarea !!$ call mp_reduce_max(minarea) !!$ minarea = -minarea @@ -2030,7 +2133,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) if (is_master()) write(*,209) 'GLOBAL AREA (m*m):', globalarea, ' IDEAL GLOBAL AREA (m*m):', 4.0*pi*radius**2 209 format(A,e21.14,A,e21.14) - if (nested) then + if (bounded_domain) then nh = ng-1 !cannot get rarea_c on boundary directly area_c = 1.e30 end if @@ -2038,7 +2141,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) do j=js-nh,je+nh+1 do i=is-nh,ie+nh+1 do n=1,ndims - if ( gridstruct%stretched_grid .or. nested ) then + if ( gridstruct%stretched_grid .or. bounded_domain ) then p_lL(n) = agrid(i-1,j-1,n) p_lR(n) = agrid(i ,j-1,n) p_uL(n) = agrid(i-1,j ,n) @@ -2056,7 +2159,7 @@ subroutine grid_area(nx, ny, ndims, nregions, nested, gridstruct, domain, bd ) enddo ! Corners: assuming triangular cells - if (gridstruct%cubed_sphere .and. .not. nested) then + if (gridstruct%cubed_sphere .and. .not. bounded_domain) then ! SW: i=1 j=1 @@ -2160,9 +2263,9 @@ real(kind=R_GRID) function get_angle(ndims, p1, p2, p3, rad) result (angle) endif end function get_angle - - + + subroutine mirror_grid(grid_global,ng,npx,npy,ndims,nregions) @@ -2186,7 +2289,7 @@ subroutine mirror_grid(grid_global,ng,npx,npy,ndims,nregions) grid_global(i ,npy-(j-1),1,nreg) = SIGN(x1,grid_global(i ,npy-(j-1),1,nreg)) grid_global(npx-(i-1),npy-(j-1),1,nreg) = SIGN(x1,grid_global(npx-(i-1),npy-(j-1),1,nreg)) - y1 = 0.25d0 * (ABS(grid_global(i ,j ,2,nreg)) + & + y1 = 0.25d0 * (ABS(grid_global(i ,j ,2,nreg)) + & ABS(grid_global(npx-(i-1),j ,2,nreg)) + & ABS(grid_global(i ,npy-(j-1),2,nreg)) + & ABS(grid_global(npx-(i-1),npy-(j-1),2,nreg))) @@ -2194,7 +2297,7 @@ subroutine mirror_grid(grid_global,ng,npx,npy,ndims,nregions) grid_global(npx-(i-1),j ,2,nreg) = SIGN(y1,grid_global(npx-(i-1),j ,2,nreg)) grid_global(i ,npy-(j-1),2,nreg) = SIGN(y1,grid_global(i ,npy-(j-1),2,nreg)) grid_global(npx-(i-1),npy-(j-1),2,nreg) = SIGN(y1,grid_global(npx-(i-1),npy-(j-1),2,nreg)) - + ! force dateline/greenwich-meridion consitency if (mod(npx,2) /= 0) then if ( (i==1+(npx-1)/2.0d0) ) then diff --git a/tools/fv_io.F90 b/tools/fv_io.F90 index 7a083d7c5..0d72dff7a 100644 --- a/tools/fv_io.F90 +++ b/tools/fv_io.F90 @@ -38,46 +38,43 @@ module fv_io_mod restart_file_type, register_restart_field, & save_restart, restore_state, & set_domain, nullify_domain, set_filename_appendix, & - get_mosaic_tile_file, get_instance_filename, & + get_mosaic_tile_file, get_instance_filename, & save_restart_border, restore_state_border, free_restart_type, & field_exist use mpp_mod, only: mpp_error, FATAL, NOTE, WARNING, mpp_root_pe, & mpp_sync, mpp_pe, mpp_declare_pelist use mpp_domains_mod, only: domain2d, EAST, WEST, NORTH, CENTER, SOUTH, CORNER, & - mpp_get_compute_domain, mpp_get_data_domain, & + mpp_get_compute_domain, mpp_get_data_domain, & mpp_get_layout, mpp_get_ntile_count, & mpp_get_global_domain use tracer_manager_mod, only: tr_get_tracer_names=>get_tracer_names, & get_tracer_names, get_number_tracers, & set_tracer_profile, & get_tracer_index - use field_manager_mod, only: MODEL_ATMOS + use field_manager_mod, only: MODEL_ATMOS use external_sst_mod, only: sst_ncep, sst_anom, use_ncep_sst use fv_arrays_mod, only: fv_atmos_type, fv_nest_BC_type_3D - use fv_eta_mod, only: set_eta + use fv_eta_mod, only: set_external_eta - use fv_mp_mod, only: ng, mp_gather, is_master + use fv_mp_mod, only: mp_gather, is_master use fms_io_mod, only: set_domain + use fv_treat_da_inc_mod, only: read_da_inc implicit none private public :: fv_io_init, fv_io_exit, fv_io_read_restart, remap_restart, fv_io_write_restart public :: fv_io_read_tracers, fv_io_register_restart, fv_io_register_nudge_restart - public :: fv_io_register_restart_BCs, fv_io_register_restart_BCs_NH + public :: fv_io_register_restart_BCs public :: fv_io_write_BCs, fv_io_read_BCs logical :: module_is_initialized = .FALSE. -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - integer ::grid_xtdimid, grid_ytdimid, haloid, pfullid !For writing BCs integer ::grid_xtstagdimid, grid_ytstagdimid, oneid -contains +contains !##################################################################### ! @@ -110,7 +107,7 @@ end subroutine fv_io_exit ! ! ! - ! Write the fv core restart quantities + ! Write the fv core restart quantities ! subroutine fv_io_read_restart(fv_domain,Atm) type(domain2d), intent(inout) :: fv_domain @@ -128,6 +125,9 @@ subroutine fv_io_read_restart(fv_domain,Atm) ntileMe = size(Atm(:)) ! This will need mods for more than 1 tile per pe call restore_state(Atm(1)%Fv_restart) + if (Atm(1)%flagstruct%external_eta) then + call set_external_eta(Atm(1)%ak, Atm(1)%bk, Atm(1)%ptop, Atm(1)%ks) + endif if ( use_ncep_sst .or. Atm(1)%flagstruct%nudge .or. Atm(1)%flagstruct%ncep_ic ) then call mpp_error(NOTE, 'READING FROM SST_RESTART DISABLED') @@ -141,7 +141,7 @@ subroutine fv_io_read_restart(fv_domain,Atm) else stile_name = '' endif - + do n = 1, ntileMe call restore_state(Atm(n)%Fv_tile_restart) @@ -260,6 +260,7 @@ subroutine remap_restart(fv_domain,Atm) real, allocatable:: q_r(:,:,:,:), qdiag_r(:,:,:,:) !------------------------------------------------------------------------- integer npz, npz_rst, ng + integer i,j,k npz = Atm(1)%npz ! run time z dimension npz_rst = Atm(1)%flagstruct%npz_rst ! restart z dimension @@ -311,6 +312,10 @@ subroutine remap_restart(fv_domain,Atm) stile_name = '' endif +!!!! A NOTE about file names +!!! file_exist() needs the full relative path, including INPUT/ +!!! But register_restart_field ONLY looks in INPUT/ and so JUST needs the file name!! + ! do n = 1, ntileMe n = 1 fname = 'fv_core.res'//trim(stile_name)//'.nc' @@ -336,8 +341,8 @@ subroutine remap_restart(fv_domain,Atm) domain=fv_domain, tile_count=n) call restore_state(FV_tile_restart_r) call free_restart_type(FV_tile_restart_r) - fname = 'INPUT/fv_srf_wnd.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + fname = 'fv_srf_wnd.res'//trim(stile_name)//'.nc' + if (file_exist('INPUT/'//fname)) then call restore_state(Atm(n)%Rsf_restart) Atm(n)%flagstruct%srf_init = .true. else @@ -347,15 +352,15 @@ subroutine remap_restart(fv_domain,Atm) if ( Atm(n)%flagstruct%fv_land ) then !--- restore data for mg_drag - if it exists - fname = 'INPUT/mg_drag.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + fname = 'mg_drag.res'//trim(stile_name)//'.nc' + if (file_exist('INPUT/'//fname)) then call restore_state(Atm(n)%Mg_restart) else call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') endif !--- restore data for fv_land - if it exists - fname = 'INPUT/fv_land.res'//trim(stile_name)//'.nc' - if (file_exist(fname)) then + fname = 'fv_land.res'//trim(stile_name)//'.nc' + if (file_exist('INPUT/'//fname)) then call restore_state(Atm(n)%Lnd_restart) else call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') @@ -363,7 +368,7 @@ subroutine remap_restart(fv_domain,Atm) endif fname = 'fv_tracer.res'//trim(stile_name)//'.nc' - if (file_exist('INPUT'//trim(fname))) then + if (file_exist('INPUT/'//fname)) then do nt = 1, ntprog call get_tracer_names(MODEL_ATMOS, nt, tracer_name) call set_tracer_profile (MODEL_ATMOS, nt, q_r(isc:iec,jsc:jec,:,nt) ) @@ -382,6 +387,19 @@ subroutine remap_restart(fv_domain,Atm) call mpp_error(NOTE,'==> Warning from remap_restart: Expected file '//trim(fname)//' does not exist') endif +! ====== PJP added DA functionality ====== + if (Atm(n)%flagstruct%read_increment) then + ! print point in middle of domain for a sanity check + i = (isc + iec)/2 + j = (jsc + jec)/2 + k = npz_rst/2 + if( is_master() ) write(*,*) 'Calling read_da_inc',pt_r(i,j,k) + call read_da_inc(Atm(n), Atm(n)%domain, Atm(n)%bd, npz_rst, ntprog, & + u_r, v_r, q_r, delp_r, pt_r, isc, jsc, iec, jec ) + if( is_master() ) write(*,*) 'Back from read_da_inc',pt_r(i,j,k) + endif +! ====== end PJP added DA functionailty====== + call rst_remap(npz_rst, npz, isc, iec, jsc, jec, isd, ied, jsd, jed, ntracers, ntprog, & delp_r, u_r, v_r, w_r, delz_r, pt_r, q_r, qdiag_r,& Atm(n)%delp, Atm(n)%u, Atm(n)%v, Atm(n)%w, Atm(n)%delz, Atm(n)%pt, Atm(n)%q, & @@ -412,7 +430,7 @@ end subroutine remap_restart ! ! ! - ! register restart nudge field to be written out to restart file. + ! register restart nudge field to be written out to restart file. ! subroutine fv_io_register_nudge_restart(Atm) type(fv_atmos_type), intent(inout) :: Atm(:) @@ -421,11 +439,12 @@ subroutine fv_io_register_nudge_restart(Atm) ! use_ncep_sst may not be initialized at this point? call mpp_error(NOTE, 'READING FROM SST_restart DISABLED') -!!$ if ( use_ncep_sst .or. Atm(1)%nudge .or. Atm(1)%ncep_ic ) then -!!$ fname = 'sst_ncep.res.nc' -!!$ id_restart = register_restart_field(Atm(1)%SST_restart, fname, 'sst_ncep', sst_ncep) -!!$ id_restart = register_restart_field(Atm(1)%SST_restart, fname, 'sst_anom', sst_anom) -!!$ endif + if ( use_ncep_sst .or. Atm(1)%flagstruct%nudge .or. Atm(1)%flagstruct%ncep_ic ) then +! if ( Atm(1)%nudge .or. Atm(1)%ncep_ic ) then + fname = 'sst_ncep.res.nc' + id_restart = register_restart_field(Atm(1)%SST_restart, fname, 'sst_ncep', sst_ncep) + id_restart = register_restart_field(Atm(1)%SST_restart, fname, 'sst_anom', sst_anom) + endif end subroutine fv_io_register_nudge_restart ! NAME="fv_io_register_nudge_restart" @@ -435,7 +454,7 @@ end subroutine fv_io_register_nudge_restart ! ! ! - ! register restart field to be written out to restart file. + ! register restart field to be written out to restart file. ! subroutine fv_io_register_restart(fv_domain,Atm) type(domain2d), intent(inout) :: fv_domain @@ -446,9 +465,9 @@ subroutine fv_io_register_restart(fv_domain,Atm) integer :: id_restart integer :: n, nt, ntracers, ntprog, ntdiag, ntileMe, ntiles - ntileMe = size(Atm(:)) - ntprog = size(Atm(1)%q,4) - ntdiag = size(Atm(1)%qdiag,4) + ntileMe = size(Atm(:)) + ntprog = size(Atm(1)%q,4) + ntdiag = size(Atm(1)%qdiag,4) ntracers = ntprog+ntdiag !--- set the 'nestXX' appendix for all files using fms_io @@ -469,7 +488,7 @@ subroutine fv_io_register_restart(fv_domain,Atm) ! use_ncep_sst may not be initialized at this point? #ifndef DYCORE_SOLO - call mpp_error(NOTE, 'READING FROM SST_RESTART DISABLED') +! call mpp_error(NOTE, 'READING FROM SST_RESTART DISABLED') !!$ if ( use_ncep_sst .or. Atm(1)%flagstruct%nudge .or. Atm(1)%flagstruct%ncep_ic ) then !!$ fname = 'sst_ncep'//trim(gn)//'.res.nc' !!$ id_restart = register_restart_field(Atm(1)%SST_restart, fname, 'sst_ncep', sst_ncep) @@ -479,7 +498,7 @@ subroutine fv_io_register_restart(fv_domain,Atm) fname = 'fv_core.res.nc' id_restart = register_restart_field(Atm(1)%Fv_restart, fname, 'ak', Atm(1)%ak(:), no_domain=.true.) - id_restart = register_restart_field(Atm(1)%Fv_restart, fname, 'bk', Atm(1)%bk(:), no_domain=.true.) + id_restart = register_restart_field(Atm(1)%Fv_restart, fname, 'bk', Atm(1)%bk(:), no_domain=.true.) do n = 1, ntileMe fname = 'fv_core.res'//trim(stile_name)//'.nc' @@ -504,7 +523,7 @@ subroutine fv_io_register_restart(fv_domain,Atm) id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'phis', Atm(n)%phis, & domain=fv_domain, tile_count=n) - !--- include agrid winds in restarts for use in data assimilation + !--- include agrid winds in restarts for use in data assimilation if (Atm(n)%flagstruct%agrid_vel_rst) then id_restart = register_restart_field(Atm(n)%Fv_tile_restart, fname, 'ua', Atm(n)%ua, & domain=fv_domain, tile_count=n, mandatory=.false.) @@ -527,7 +546,7 @@ subroutine fv_io_register_restart(fv_domain,Atm) ! Optional terrain deviation (sgh) and land fraction (oro) fname = 'mg_drag.res'//trim(stile_name)//'.nc' id_restart = register_restart_field(Atm(n)%Mg_restart, fname, 'ghprime', Atm(n)%sgh, & - domain=fv_domain, tile_count=n) + domain=fv_domain, tile_count=n) fname = 'fv_land.res'//trim(stile_name)//'.nc' id_restart = register_restart_field(Atm(n)%Lnd_restart, fname, 'oro', Atm(n)%oro, & @@ -550,6 +569,10 @@ subroutine fv_io_register_restart(fv_domain,Atm) domain=fv_domain, mandatory=.false., tile_count=n) enddo + if ( Atm(n)%neststruct%nested ) then + call fv_io_register_restart_BCs(Atm(n)) !TODO put into fv_io_register_restart + endif + enddo end subroutine fv_io_register_restart @@ -561,41 +584,33 @@ end subroutine fv_io_register_restart ! ! ! - ! Write the fv core restart quantities + ! Write the fv core restart quantities ! - subroutine fv_io_write_restart(Atm, grids_on_this_pe, timestamp) + subroutine fv_io_write_restart(Atm, timestamp) - type(fv_atmos_type), intent(inout) :: Atm(:) - logical, intent(IN) :: grids_on_this_pe(:) + type(fv_atmos_type), intent(inout) :: Atm character(len=*), optional, intent(in) :: timestamp - integer :: n, ntileMe - ntileMe = size(Atm(:)) ! This will need mods for more than 1 tile per pe +!!$ if ( use_ncep_sst .or. Atm%flagstruct%nudge .or. Atm%flagstruct%ncep_ic ) then +!!$ call mpp_error(NOTE, 'READING FROM SST_RESTART DISABLED') +!!$ !call save_restart(Atm%SST_restart, timestamp) +!!$ endif - if ( use_ncep_sst .or. Atm(1)%flagstruct%nudge .or. Atm(1)%flagstruct%ncep_ic ) then - call mpp_error(NOTE, 'READING FROM SST_RESTART DISABLED') - !call save_restart(Atm(1)%SST_restart, timestamp) + if ( (use_ncep_sst .or. Atm%flagstruct%nudge) .and. .not. Atm%gridstruct%nested ) then + call save_restart(Atm%SST_restart, timestamp) endif - - do n = 1, ntileMe - if (.not. grids_on_this_pe(n)) cycle - if ( (use_ncep_sst .or. Atm(n)%flagstruct%nudge) .and. .not. Atm(n)%gridstruct%nested ) then - call save_restart(Atm(n)%SST_restart, timestamp) - endif - - call save_restart(Atm(n)%Fv_restart, timestamp) - call save_restart(Atm(n)%Fv_tile_restart, timestamp) - call save_restart(Atm(n)%Rsf_restart, timestamp) + call save_restart(Atm%Fv_restart, timestamp) + call save_restart(Atm%Fv_tile_restart, timestamp) + call save_restart(Atm%Rsf_restart, timestamp) - if ( Atm(n)%flagstruct%fv_land ) then - call save_restart(Atm(n)%Mg_restart, timestamp) - call save_restart(Atm(n)%Lnd_restart, timestamp) - endif + if ( Atm%flagstruct%fv_land ) then + call save_restart(Atm%Mg_restart, timestamp) + call save_restart(Atm%Lnd_restart, timestamp) + endif - call save_restart(Atm(n)%Tra_restart, timestamp) + call save_restart(Atm%Tra_restart, timestamp) - end do end subroutine fv_io_write_restart @@ -617,8 +632,8 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & integer, allocatable, dimension(:) :: x2_pelist, y2_pelist logical :: is_root_pe - i_stag = 0 - j_stag = 0 + i_stag = 0 + j_stag = 0 if (present(istag)) i_stag = i_stag if (present(jstag)) j_stag = j_stag call mpp_get_global_domain(Atm%domain, xsize = npx, ysize = npy, position=CORNER ) @@ -662,7 +677,7 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register west halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_west_t1', & - var_bc%west_t1, & + var_bc%west_t1, & indices, global_size, y2_pelist, & is_root_pe, jshift=y_halo) !register west prognostic halo data @@ -677,7 +692,7 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register east halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_east_t1', & - var_bc%east_t1, & + var_bc%east_t1, & indices, global_size, y1_pelist, & is_root_pe, jshift=y_halo) @@ -711,7 +726,7 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register south halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_south_t1', & - var_bc%south_t1, & + var_bc%south_t1, & indices, global_size, x2_pelist, & is_root_pe, x_halo=x_halo_ns) !register south prognostic halo data @@ -726,7 +741,7 @@ subroutine register_bcs_2d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register north halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_north_t1', & - var_bc%north_t1, & + var_bc%north_t1, & indices, global_size, x1_pelist, & is_root_pe, x_halo=x_halo_ns) @@ -808,7 +823,7 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register west halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_west_t1', & - var_bc%west_t1, & + var_bc%west_t1, & indices, global_size, y2_pelist, & is_root_pe, jshift=y_halo, mandatory=mandatory) !register west prognostic halo data @@ -823,7 +838,7 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register east halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_east_t1', & - var_bc%east_t1, & + var_bc%east_t1, & indices, global_size, y1_pelist, & is_root_pe, jshift=y_halo, mandatory=mandatory) @@ -858,7 +873,7 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register south halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_sw, trim(fname_sw), & trim(var_name)//'_south_t1', & - var_bc%south_t1, & + var_bc%south_t1, & indices, global_size, x2_pelist, & is_root_pe, x_halo=x_halo_ns, mandatory=mandatory) !register south prognostic halo data @@ -873,7 +888,7 @@ subroutine register_bcs_3d(Atm, BCfile_ne, BCfile_sw, fname_ne, fname_sw, & !register north halo data in t1 if (present(var_bc)) id_restart = register_restart_field(BCfile_ne, trim(fname_ne), & trim(var_name)//'_north_t1', & - var_bc%north_t1, & + var_bc%north_t1, & indices, global_size, x1_pelist, & is_root_pe, x_halo=x_halo_ns, mandatory=mandatory) @@ -925,12 +940,13 @@ subroutine fv_io_register_restart_BCs(Atm) #ifndef SW_DYNAMICS call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & fname_ne, fname_sw, 'pt', Atm%pt, Atm%neststruct%pt_BC) - if ((.not.Atm%flagstruct%hydrostatic) .and. (.not.Atm%flagstruct%make_nh)) then - if (is_master()) print*, 'fv_io_register_restart_BCs: REGISTERING NH BCs', Atm%flagstruct%hydrostatic, Atm%flagstruct%make_nh + if ((.not.Atm%flagstruct%hydrostatic)) then + if (is_master()) print*, 'fv_io_register_restart_BCs: REGISTERING NH BCs' call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & fname_ne, fname_sw, 'w', Atm%w, Atm%neststruct%w_BC, mandatory=.false.) call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & - fname_ne, fname_sw, 'delz', Atm%delz, Atm%neststruct%delz_BC, mandatory=.false.) + fname_ne, fname_sw, 'delz', var_bc=Atm%neststruct%delz_BC, mandatory=.false.) +! fname_ne, fname_sw, 'delz', Atm%delz, Atm%neststruct%delz_BC, mandatory=.false.) endif #ifdef USE_COND call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & @@ -951,36 +967,11 @@ subroutine fv_io_register_restart_BCs(Atm) fname_ne, fname_sw, 'vc', var_bc=Atm%neststruct%vc_BC, jstag=1) call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & fname_ne, fname_sw, 'divg', var_bc=Atm%neststruct%divg_BC, istag=1,jstag=1, mandatory=.false.) - Atm%neststruct%divg_BC%initialized = field_exist(fname_ne, 'divg_north_t1', Atm%domain) - return end subroutine fv_io_register_restart_BCs - subroutine fv_io_register_restart_BCs_NH(Atm) - type(fv_atmos_type), intent(inout) :: Atm - - integer :: n - character(len=120) :: tname, fname_ne, fname_sw - - fname_ne = 'fv_BC_ne.res.nc' - fname_sw = 'fv_BC_sw.res.nc' - - call set_domain(Atm%domain) - - if (is_master()) print*, 'fv_io_register_restart_BCs_NH: REGISTERING NH BCs', Atm%flagstruct%hydrostatic, Atm%flagstruct%make_nh -#ifndef SW_DYNAMICS - call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & - fname_ne, fname_sw, 'w', Atm%w, Atm%neststruct%w_BC) - call register_bcs_3d(Atm, Atm%neststruct%BCfile_ne, Atm%neststruct%BCfile_sw, & - fname_ne, fname_sw, 'delz', Atm%delz, Atm%neststruct%delz_BC) -#endif - - return - end subroutine fv_io_register_restart_BCs_NH - - subroutine fv_io_write_BCs(Atm, timestamp) type(fv_atmos_type), intent(inout) :: Atm character(len=*), intent(in), optional :: timestamp @@ -998,6 +989,13 @@ subroutine fv_io_read_BCs(Atm) call restore_state_border(Atm%neststruct%BCfile_ne) call restore_state_border(Atm%neststruct%BCfile_sw) + !These do not work yet + !need to modify register_bcs_?d to get ids for registered variables, and then use query_initialized_id + !Atm%neststruct%divg_BC%initialized = field_exist(fname_ne, 'divg_north_t1', Atm%domain) + !Atm%neststruct%w_BC%initialized = field_exist(fname_ne, 'w_north_t1', Atm%domain) + !Atm%neststruct%delz_BC%initialized = field_exist(fname_ne, 'delz_north_t1', Atm%domain) + !if (is_master()) print*, ' BCs: ', Atm%neststruct%divg_BC%initialized, Atm%neststruct%w_BC%initialized, Atm%neststruct%delz_BC%initialized + return end subroutine fv_io_read_BCs diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index d68351b92..ced71e3f9 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -36,10 +36,10 @@ module fv_mp_mod use mpp_domains_mod, only : GLOBAL_DATA_DOMAIN, BITWISE_EXACT_SUM, BGRID_NE, FOLD_NORTH_EDGE, CGRID_NE use mpp_domains_mod, only : MPP_DOMAIN_TIME, CYCLIC_GLOBAL_DOMAIN, NUPDATE,EUPDATE, XUPDATE, YUPDATE, SCALAR_PAIR use mpp_domains_mod, only : domain1D, domain2D, DomainCommunicator2D, mpp_get_ntile_count - use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain, mpp_domains_set_stack_size + use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain use mpp_domains_mod, only : mpp_global_field, mpp_global_sum, mpp_global_max, mpp_global_min use mpp_domains_mod, only : mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain - use mpp_domains_mod, only : mpp_check_field, mpp_define_layout + use mpp_domains_mod, only : mpp_check_field, mpp_define_layout use mpp_domains_mod, only : mpp_get_neighbor_pe, mpp_define_mosaic, mpp_define_io_domain use mpp_domains_mod, only : NORTH, NORTH_EAST, EAST, SOUTH_EAST use mpp_domains_mod, only : SOUTH, SOUTH_WEST, WEST, NORTH_WEST @@ -47,14 +47,11 @@ module fv_mp_mod use mpp_domains_mod, only : mpp_group_update_initialized, mpp_do_group_update use mpp_domains_mod, only : mpp_create_group_update,mpp_reset_group_update_field use mpp_domains_mod, only : group_halo_update_type => mpp_group_update_type + use mpp_domains_mod, only: nest_domain_type use mpp_parameter_mod, only : WUPDATE, EUPDATE, SUPDATE, NUPDATE, XUPDATE, YUPDATE - use fv_arrays_mod, only: fv_atmos_type + use fv_arrays_mod, only: fv_atmos_type, fv_grid_bounds_type use fms_io_mod, only: set_domain use mpp_mod, only : mpp_get_current_pelist, mpp_set_current_pelist - use mpp_domains_mod, only : mpp_define_domains - use mpp_domains_mod, only : mpp_define_nest_domains, nest_domain_type - use mpp_domains_mod, only : mpp_get_C2F_index, mpp_update_nest_fine - use mpp_domains_mod, only : mpp_get_F2C_index, mpp_update_nest_coarse use mpp_domains_mod, only : mpp_get_domain_shift use ensemble_manager_mod, only : get_ensemble_id @@ -62,6 +59,7 @@ module fv_mp_mod private integer, parameter:: ng = 3 ! Number of ghost zones required + integer, parameter :: MAX_NNEST=20, MAX_NTILE=50 #include "mpif.h" integer, parameter :: XDir=1 @@ -79,31 +77,31 @@ module fv_mp_mod logical :: master - type(nest_domain_type), allocatable, dimension(:) :: nest_domain integer :: this_pe_grid = 0 - integer, EXTERNAL :: omp_get_thread_num, omp_get_num_threads + integer, EXTERNAL :: omp_get_thread_num, omp_get_num_threads integer :: npes_this_grid !! CLEANUP: these are currently here for convenience !! Right now calling switch_current_atm sets these to the value on the "current" grid - !! (as well as changing the "current" domain) + !! (as well as changing the "current" domain) integer :: is, ie, js, je integer :: isd, ied, jsd, jed integer :: isc, iec, jsc, jec + integer, allocatable :: grids_master_procs(:) + integer, dimension(MAX_NNEST) :: tile_fine = 0 !Global index of LAST tile in a mosaic + type(nest_domain_type) :: global_nest_domain !ONE structure for ALL levels of nesting + public mp_start, mp_assign_gid, mp_barrier, mp_stop!, npes public domain_decomp, mp_bcst, mp_reduce_max, mp_reduce_sum, mp_gather public mp_reduce_min public fill_corners, XDir, YDir public switch_current_domain, switch_current_Atm, broadcast_domains public is_master, setup_master - !The following variables are declared public by this module for convenience; - !they will need to be switched when domains are switched -!!! CLEANUP: ng is a PARAMETER and is OK to be shared by a use statement - public is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec, ng public start_group_halo_update, complete_group_halo_update - public group_halo_update_type + public group_halo_update_type, grids_master_procs, tile_fine + public global_nest_domain, MAX_NNEST, MAX_NTILE interface start_group_halo_update module procedure start_var_group_update_2d @@ -176,9 +174,6 @@ module fv_mp_mod END INTERFACE integer :: halo_update_type = 1 -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' contains @@ -242,7 +237,7 @@ subroutine setup_master(pelist_local) integer, intent(IN) :: pelist_local(:) if (ANY(gid == pelist_local)) then - + masterproc = pelist_local(1) master = (gid == masterproc) @@ -256,11 +251,11 @@ end subroutine setup_master ! mp_barrier :: Wait for all SPMD processes ! subroutine mp_barrier() - + call MPI_BARRIER(commglobal, ierror) - + end subroutine mp_barrier -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- @@ -286,55 +281,39 @@ end subroutine mp_stop ! ! domain_decomp :: Setup domain decomp ! - subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) + subroutine domain_decomp(npx,npy,nregions,grid_type,nested,layout,io_layout,bd,tile,square_domain,& + npes_per_tile,domain,domain_for_coupler,num_contact,pelist) integer, intent(IN) :: npx,npy,grid_type - integer, intent(INOUT) :: nregions + integer, intent(INOUT) :: nregions, tile logical, intent(IN):: nested - type(fv_atmos_type), intent(INOUT), target :: Atm integer, intent(INOUT) :: layout(2), io_layout(2) integer, allocatable :: pe_start(:), pe_end(:) integer :: nx,ny,n,num_alloc character(len=32) :: type = "unknown" - logical :: is_symmetry + logical :: is_symmetry logical :: debug=.false. integer, allocatable :: tile_id(:) integer i - integer :: npes_x, npes_y + integer :: npes_x, npes_y - integer, pointer :: pelist(:), grid_number, num_contact, npes_per_tile - logical, pointer :: square_domain - type(domain2D), pointer :: domain, domain_for_coupler + integer, intent(INOUT) :: pelist(:) + integer, intent(OUT) :: num_contact, npes_per_tile + logical, intent(OUT) :: square_domain + type(domain2D), intent(OUT) :: domain, domain_for_coupler + type(fv_grid_bounds_type), intent(INOUT) :: bd nx = npx-1 ny = npy-1 - !! Init pointers - pelist => Atm%pelist - grid_number => Atm%grid_number - num_contact => Atm%num_contact - domain => Atm%domain - domain_for_coupler => Atm%domain_for_coupler - npes_per_tile => Atm%npes_per_tile - npes_x = layout(1) npes_y = layout(2) - call mpp_domains_init(MPP_DOMAIN_TIME) - ! call mpp_domains_set_stack_size(10000) - ! call mpp_domains_set_stack_size(900000) - ! call mpp_domains_set_stack_size(1500000) -#ifdef SMALL_PE - call mpp_domains_set_stack_size(6000000) -#else - call mpp_domains_set_stack_size(3000000) -#endif - select case(nregions) case ( 1 ) ! Lat-Lon "cyclic" @@ -351,21 +330,21 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) is_symmetry = .true. call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout ) - if ( npes_x == 0 ) then + if ( npes_x == 0 ) then npes_x = layout(1) endif if ( npes_y == 0 ) then npes_y = layout(2) endif - if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) Atm%gridstruct%square_domain = .true. + if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true. if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y call mp_stop call exit(1) endif - + layout = (/npes_x,npes_y/) case (3) ! Lat-Lon "cyclic" type="Lat-Lon: cyclic" @@ -420,14 +399,14 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) npes_per_tile = npes_x*npes_y call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout ) - if ( npes_x == 0 ) then + if ( npes_x == 0 ) then npes_x = layout(1) endif if ( npes_y == 0 ) then npes_y = layout(2) endif - if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) Atm%gridstruct%square_domain = .true. + if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true. if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y @@ -435,7 +414,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) call mp_stop call exit(1) endif - + layout = (/npes_x,npes_y/) case default call mpp_error(FATAL, 'domain_decomp: no such test: '//type) @@ -454,7 +433,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) allocate(tile1(num_alloc), tile2(num_alloc) ) allocate(istart1(num_alloc), iend1(num_alloc), jstart1(num_alloc), jend1(num_alloc) ) allocate(istart2(num_alloc), iend2(num_alloc), jstart2(num_alloc), jend2(num_alloc) ) - + is_symmetry = .true. select case(nregions) case ( 1 ) @@ -576,8 +555,7 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) if( nregions .NE. 1 ) then call mpp_error(FATAL, 'domain_decomp: nregions should be 1 for nested region, contact developer') endif - tile_id(1) = 7 ! currently we assuming the nested tile is nested in one face of cubic sphere grid. - ! we need a more general way to deal with nested grid tile id. + tile_id(1) = 7 ! TODO need update for multiple nests else do n = 1, nregions tile_id(n) = n @@ -604,27 +582,27 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) deallocate(istart2, iend2, jstart2, jend2) !--- find the tile number - Atm%tile = (gid-pelist(1))/npes_per_tile+1 + tile = (gid-pelist(1))/npes_per_tile+1 if (ANY(pelist == gid)) then npes_this_grid = npes_per_tile*nregions - tile = Atm%tile + tile = tile call mpp_get_compute_domain( domain, is, ie, js, je ) call mpp_get_data_domain ( domain, isd, ied, jsd, jed ) - - Atm%bd%is = is - Atm%bd%js = js - Atm%bd%ie = ie - Atm%bd%je = je - - Atm%bd%isd = isd - Atm%bd%jsd = jsd - Atm%bd%ied = ied - Atm%bd%jed = jed - - Atm%bd%isc = is - Atm%bd%jsc = js - Atm%bd%iec = ie - Atm%bd%jec = je + + bd%is = is + bd%js = js + bd%ie = ie + bd%je = je + + bd%isd = isd + bd%jsd = jsd + bd%ied = ied + bd%jed = jed + + bd%isc = is + bd%jsc = js + bd%iec = ie + bd%jec = je if (debug .and. nregions==1) then tile=1 @@ -634,21 +612,21 @@ subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout) endif 200 format(i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ') else - - Atm%bd%is = 0 - Atm%bd%js = 0 - Atm%bd%ie = -1 - Atm%bd%je = -1 - - Atm%bd%isd = 0 - Atm%bd%jsd = 0 - Atm%bd%ied = -1 - Atm%bd%jed = -1 - - Atm%bd%isc = 0 - Atm%bd%jsc = 0 - Atm%bd%iec = -1 - Atm%bd%jec = -1 + + bd%is = 0 + bd%js = 0 + bd%ie = -1 + bd%je = -1 + + bd%isd = 0 + bd%jsd = 0 + bd%ied = -1 + bd%jed = -1 + + bd%isc = 0 + bd%jsc = 0 + bd%iec = -1 + bd%jec = -1 endif @@ -667,18 +645,18 @@ subroutine start_var_group_update_2d(group, array, domain, flags, position, whal logical, optional, intent(in) :: complete real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) array - The array which is having its halos points exchanged. ! (in) domain - contains domain information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) position - An optional argument indicating the position. This is ! may be CORNER, but is CENTER by default. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -690,7 +668,7 @@ subroutine start_var_group_update_2d(group, array, domain, flags, position, whal is_complete = .TRUE. if(present(complete)) is_complete = complete - if(is_complete .and. halo_update_type == 1) then + if(is_complete .and. halo_update_type == 1) then call mpp_start_group_update(group, domain, d_type) endif @@ -708,18 +686,18 @@ subroutine start_var_group_update_3d(group, array, domain, flags, position, whal real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) array - The array which is having its halos points exchanged. ! (in) domain - contains domain information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) position - An optional argument indicating the position. This is ! may be CORNER, but is CENTER by default. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -748,18 +726,18 @@ subroutine start_var_group_update_4d(group, array, domain, flags, position, whal real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) array - The array which is having its halos points exchanged. ! (in) domain - contains domain information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) position - An optional argument indicating the position. This is ! may be CORNER, but is CENTER by default. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. integer :: dirflag @@ -792,22 +770,22 @@ subroutine start_vector_group_update_2d(group, u_cmpt, v_cmpt, domain, flags, gr real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which ! is having its halos points exchanged. ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair -! which is having its halos points exchanged. +! which is having its halos points exchanged. ! (in) domain - Contains domain decomposition information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE, ! CGRID_NE or DGRID_NE, indicating where the two components of the -! vector are discretized. +! vector are discretized. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -837,22 +815,22 @@ subroutine start_vector_group_update_3d(group, u_cmpt, v_cmpt, domain, flags, gr real :: d_type logical :: is_complete -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! This data will be used in do_group_pass. ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which ! is having its halos points exchanged. ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair -! which is having its halos points exchanged. +! which is having its halos points exchanged. ! (in) domain - Contains domain decomposition information. ! (in) flags - An optional integer indicating which directions the -! data should be sent. +! data should be sent. ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE, ! CGRID_NE or DGRID_NE, indicating where the two components of the -! vector are discretized. +! vector are discretized. ! (in) complete - An optional argument indicating whether the halo updates -! should be initiated immediately or wait for second -! pass_..._start call. Omitting complete is the same as +! should be initiated immediately or wait for second +! pass_..._start call. Omitting complete is the same as ! setting complete to .true. if (mpp_group_update_initialized(group)) then @@ -877,8 +855,8 @@ subroutine complete_group_halo_update(group, domain) type(domain2d), intent(inout) :: domain real :: d_type -! Arguments: -! (inout) group - The data type that store information for group update. +! Arguments: +! (inout) group - The data type that store information for group update. ! (in) domain - Contains domain decomposition information. if( halo_update_type == 1 ) then @@ -891,12 +869,14 @@ end subroutine complete_group_halo_update +!Depreciated +subroutine broadcast_domains(Atm,current_pelist,current_npes) -subroutine broadcast_domains(Atm) - type(fv_atmos_type), intent(INOUT) :: Atm(:) + integer, intent(IN) :: current_npes + integer, intent(IN) :: current_pelist(current_npes) - integer :: n, i1, i2, j1, j2, i + integer :: n, i integer :: ens_root_pe, ensemble_id !I think the idea is that each process needs to properly be part of a pelist, @@ -909,20 +889,22 @@ subroutine broadcast_domains(Atm) !Pelist needs to be set to ALL ensemble PEs for broadcast_domain to work call mpp_set_current_pelist((/ (i,i=ens_root_pe,npes-1+ens_root_pe) /)) - do n=1,size(Atm) - call mpp_broadcast_domain(Atm(n)%domain) - call mpp_broadcast_domain(Atm(n)%domain_for_coupler) - end do + do n=1,size(Atm) + call mpp_broadcast_domain(Atm(n)%domain) + call mpp_broadcast_domain(Atm(n)%domain_for_coupler) + end do + call mpp_set_current_pelist(current_pelist) end subroutine broadcast_domains +!depreciated subroutine switch_current_domain(new_domain,new_domain_for_coupler) type(domain2D), intent(in), target :: new_domain, new_domain_for_coupler logical, parameter :: debug = .FALSE. !--- find the tile number - !tile = mpp_pe()/npes_per_tile+1 + !tile = mpp_pe()/npes_per_tile+1 !ntiles = mpp_get_ntile_count(new_domain) call mpp_get_compute_domain( new_domain, is, ie, js, je ) isc = is ; jsc = js @@ -938,6 +920,7 @@ subroutine switch_current_domain(new_domain,new_domain_for_coupler) end subroutine switch_current_domain +!depreciated subroutine switch_current_Atm(new_Atm, switch_domain) type(fv_atmos_type), intent(IN), target :: new_Atm @@ -945,13 +928,16 @@ subroutine switch_current_Atm(new_Atm, switch_domain) logical, parameter :: debug = .false. logical :: swD - if (debug .AND. (gid==masterproc)) print*, 'SWITCHING ATM STRUCTURES', new_Atm%grid_number - if (present(switch_domain)) then - swD = switch_domain - else - swD = .true. - end if - if (swD) call switch_current_domain(new_Atm%domain, new_Atm%domain_for_coupler) + + call mpp_error(FATAL, "switch_current_Atm depreciated. call set_domain instead.") + +!!$ if (debug .AND. (gid==masterproc)) print*, 'SWITCHING ATM STRUCTURES', new_Atm%grid_number +!!$ if (present(switch_domain)) then +!!$ swD = switch_domain +!!$ else +!!$ swD = .true. +!!$ end if +!!$ if (swD) call switch_current_domain(new_Atm%domain, new_Atm%domain_for_coupler) !!$ if (debug .AND. (gid==masterproc)) WRITE(*,'(A, 6I5)') 'NEW GRID DIMENSIONS: ', & !!$ isd, ied, jsd, jed, new_Atm%npx, new_Atm%npy @@ -960,12 +946,12 @@ end subroutine switch_current_Atm !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !! -! +! subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: q integer, intent(IN):: npx,npy - integer, intent(IN):: FILL ! X-Dir or Y-Dir - logical, OPTIONAL, intent(IN) :: AGRID, BGRID + integer, intent(IN):: FILL ! X-Dir or Y-Dir + logical, OPTIONAL, intent(IN) :: AGRID, BGRID integer :: i,j if (present(BGRID)) then @@ -974,7 +960,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (XDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner @@ -983,7 +969,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (YDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner @@ -992,7 +978,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case default do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner @@ -1006,7 +992,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (XDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner @@ -1015,7 +1001,7 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) case (YDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner @@ -1023,13 +1009,13 @@ subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID) enddo case default do j=1,ng - do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner + do i=1,ng + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner enddo - enddo + enddo end select endif endif @@ -1040,12 +1026,12 @@ end subroutine fill_corners_2d_r4 !------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !! -! +! subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: q integer, intent(IN):: npx,npy - integer, intent(IN):: FILL ! X-Dir or Y-Dir - logical, OPTIONAL, intent(IN) :: AGRID, BGRID + integer, intent(IN):: FILL ! X-Dir or Y-Dir + logical, OPTIONAL, intent(IN) :: AGRID, BGRID integer :: i,j if (present(BGRID)) then @@ -1054,7 +1040,7 @@ subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) case (XDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner @@ -1063,7 +1049,7 @@ subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) case (YDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner @@ -1072,7 +1058,7 @@ subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) case default do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner @@ -1086,7 +1072,7 @@ subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) case (XDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner @@ -1095,7 +1081,7 @@ subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) case (YDir) do j=1,ng do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner @@ -1103,13 +1089,13 @@ subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID) enddo case default do j=1,ng - do i=1,ng - if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner + do i=1,ng + if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner enddo - enddo + enddo end select endif endif @@ -1270,16 +1256,16 @@ subroutine fill_corners_dgrid_r8(x, y, npx, npy, mySign) real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y integer, intent(IN):: npx,npy - real(kind=8), intent(IN) :: mySign + real(kind=8), intent(IN) :: mySign integer :: i,j do j=1,ng do i=1,ng - ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner + ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner ! if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = mySign*y(j+1 ,npy-1+i) !NW Corner ! if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = mySign*y(npx-j,1-i ) !SE Corner ! if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = y(npx-j,npy-1+i) !NE Corner - if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mySign*y(1-j ,i ) !SW Corner + if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mySign*y(1-j ,i ) !SW Corner if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = y(1-j ,npy-i) !NW Corner if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = y(npx+j,i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = mySign*y(npx+j,npy-i) !NE Corner @@ -1287,11 +1273,11 @@ subroutine fill_corners_dgrid_r8(x, y, npx, npy, mySign) enddo do j=1,ng do i=1,ng - ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner + ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner ! if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = mySign*x(1-j ,npy-i) !NW Corner ! if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = mySign*x(npx-1+j,i+1 ) !SE Corner ! if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = x(npx-1+j,npy-i) !NE Corner - if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mySign*x(j ,1-i ) !SW Corner + if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mySign*x(j ,1-i ) !SW Corner if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = x(j ,npy+i) !NW Corner if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = x(npx-j ,1-i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = mySign*x(npx-j ,npy+i) !NE Corner @@ -1310,16 +1296,16 @@ subroutine fill_corners_dgrid_r4(x, y, npx, npy, mySign) real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y integer, intent(IN):: npx,npy - real(kind=4), intent(IN) :: mySign + real(kind=4), intent(IN) :: mySign integer :: i,j do j=1,ng do i=1,ng - ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner + ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner ! if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = mySign*y(j+1 ,npy-1+i) !NW Corner ! if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = mySign*y(npx-j,1-i ) !SE Corner ! if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = y(npx-j,npy-1+i) !NE Corner - if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mySign*y(1-j ,i ) !SW Corner + if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mySign*y(1-j ,i ) !SW Corner if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = y(1-j ,npy-i) !NW Corner if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = y(npx+j,i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = mySign*y(npx+j,npy-i) !NE Corner @@ -1327,11 +1313,11 @@ subroutine fill_corners_dgrid_r4(x, y, npx, npy, mySign) enddo do j=1,ng do i=1,ng - ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner + ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner ! if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = mySign*x(1-j ,npy-i) !NW Corner ! if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = mySign*x(npx-1+j,i+1 ) !SE Corner ! if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = x(npx-1+j,npy-i) !NE Corner - if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mySign*x(j ,1-i ) !SW Corner + if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mySign*x(j ,1-i ) !SW Corner if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = x(j ,npy+i) !NW Corner if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = x(npx-j ,1-i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = mySign*x(npx-j ,npy+i) !NE Corner @@ -1355,7 +1341,7 @@ subroutine fill_corners_cgrid_r4(x, y, npx, npy, mySign) do j=1,ng do i=1,ng - if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner + if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner if ((is == 1) .and. (je+1==npy)) x(1-i ,npy-1+j) = mySign*y(j ,npy+i) !NW Corner if ((ie+1==npx) .and. (js == 1)) x(npx+i ,1-j ) = mySign*y(npx-j ,1-i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) x(npx+i ,npy-1+j) = y(npx-j ,npy+i) !NE Corner @@ -1363,13 +1349,13 @@ subroutine fill_corners_cgrid_r4(x, y, npx, npy, mySign) enddo do j=1,ng do i=1,ng - if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner + if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner if ((is == 1) .and. (je+1==npy)) y(1-i ,npy+j) = mySign*x(1-j ,npy-i) !NW Corner if ((ie+1==npx) .and. (js == 1)) y(npx-1+i,1-j ) = mySign*x(npx+j,i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) y(npx-1+i,npy+j) = x(npx+j,npy-i) !NE Corner enddo enddo - + end subroutine fill_corners_cgrid_r4 ! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! @@ -1387,7 +1373,7 @@ subroutine fill_corners_cgrid_r8(x, y, npx, npy, mySign) do j=1,ng do i=1,ng - if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner + if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner if ((is == 1) .and. (je+1==npy)) x(1-i ,npy-1+j) = mySign*y(j ,npy+i) !NW Corner if ((ie+1==npx) .and. (js == 1)) x(npx+i ,1-j ) = mySign*y(npx-j ,1-i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) x(npx+i ,npy-1+j) = y(npx-j ,npy+i) !NE Corner @@ -1395,13 +1381,13 @@ subroutine fill_corners_cgrid_r8(x, y, npx, npy, mySign) enddo do j=1,ng do i=1,ng - if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner + if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner if ((is == 1) .and. (je+1==npy)) y(1-i ,npy+j) = mySign*x(1-j ,npy-i) !NW Corner if ((ie+1==npx) .and. (js == 1)) y(npx-1+i,1-j ) = mySign*x(npx+j,i ) !SE Corner if ((ie+1==npx) .and. (je+1==npy)) y(npx-1+i,npy+j) = x(npx+j,npy-i) !NE Corner enddo enddo - + end subroutine fill_corners_cgrid_r8 ! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! @@ -1470,417 +1456,39 @@ end subroutine fill_corners_agrid_r8 ! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- - -!!$!------------------------------------------------------------------------------- -!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -!!$! -!!$! mp_corner_comm :: Point-based MPI communcation routine for Cubed-Sphere -!!$! ghosted corner point on B-Grid -!!$! this routine sends 24 16-byte messages -!!$! -!!$ subroutine mp_corner_comm(q, npx, npy, tile) -!!$ integer, intent(IN) :: npx,npy, tile -!!$ real , intent(INOUT):: q(isd:ied+1,jsd:jed+1) -!!$ -!!$ integer, parameter :: ntiles = 6 -!!$ -!!$ real :: qsend(24) -!!$ real :: send_tag, recv_tag -!!$ integer :: sqest(24), rqest(24) -!!$ integer :: Stats(24*MPI_STATUS_SIZE) -!!$ integer :: nsend, nrecv, nread -!!$ integer :: dest_gid, src_gid -!!$ integer :: n -!!$ -!!$ qsend = 1.e25 -!!$ nsend=0 -!!$ nrecv=0 -!!$ -!!$ if ( mod(tile,2) == 0 ) then -!!$! Even Face LL and UR pairs 6 2-way -!!$ if ( (is==1) .and. (js==1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(is,js+1) -!!$ send_tag = 300+tile -!!$ dest_gid = (tile-2)*npes_x*npes_y - 1 -!!$ if (dest_gid < 0) dest_gid=npes+dest_gid -!!$ recv_tag = 100+(tile-2) -!!$ if (tile==2) recv_tag = 100+(ntiles) -!!$ src_gid = (tile-3)*npes_x*npes_y -!!$ src_gid = src_gid + npes_x*(npes_y-1) + npes_x - 1 -!!$ if (src_gid < 0) src_gid=npes+src_gid -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(is-1,js), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ endif -!!$ if ( (ie==npx-1) .and. (je==npy-1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(ie,je+1) -!!$ send_tag = 100+tile -!!$ dest_gid = (tile+1)*npes_x*npes_y -!!$ if (dest_gid+1 > npes) dest_gid=dest_gid-npes -!!$ recv_tag = 300+(tile+2) -!!$ if (tile==6) recv_tag = 300+2 -!!$ src_gid = (tile+1)*npes_x*npes_y -!!$ if (src_gid+1 > npes) src_gid=src_gid-npes -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ endif -!!$! wait for comm to complete -!!$ if (npes==6) then -!!$ if (nsend>0) then -!!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ if (nrecv>0) then -!!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ nsend=0 ; nrecv=0 -!!$ endif -!!$ -!!$! Even Face LR 1 pair ; 1 1-way -!!$ if ( (tile==2) .and. (ie==npx-1) .and. (js==1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(ie,js) -!!$ send_tag = 200+tile -!!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x-1 -!!$ recv_tag = 200+(tile+2) -!!$ src_gid = dest_gid -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ endif -!!$ if ( (tile==4) .and. (ie==npx-1) .and. (js==1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(ie+1,js+1) -!!$ send_tag = 200+tile -!!$ dest_gid = (tile-3)*npes_x*npes_y + npes_x-1 -!!$ recv_tag = 200+(tile-2) -!!$ src_gid = dest_gid -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(ie,js) -!!$ send_tag = 200+tile -!!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x-1 -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ endif -!!$ if ( (tile==6) .and. (ie==npx-1) .and. (js==1) ) then -!!$ recv_tag = 200+(tile-2) -!!$ src_gid = (tile-3)*npes_x*npes_y + npes_x-1 -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ -!!$! wait for comm to complete -!!$ if (npes==6) then -!!$ if (nsend>0) then -!!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ if (nrecv>0) then -!!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ nsend=0 ; nrecv=0 -!!$ endif -!!$ -!!$! Send to Odd face LR 3 1-way -!!$ if ( (is==1) .and. (js==1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(is+1,js) -!!$ send_tag = 200+tile -!!$ dest_gid = (tile-2)*npes_x*npes_y + npes_x-1 -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ endif -!!$ -!!$! Receive Even Face UL 3 1-way -!!$ if ( (is==1) .and. (je==npy-1) ) then -!!$ recv_tag = 400+(tile-1) -!!$ src_gid = (tile-2)*npes_x*npes_y + npes_x*(npes_y-1) + npes_x-1 -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ -!!$ else -!!$ -!!$! Odd Face LL and UR pairs 6 2-way -!!$ if ( (is==1) .and. (js==1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(is+1,js) -!!$ send_tag = 300+tile -!!$ dest_gid = (tile-2)*npes_x*npes_y - 1 -!!$ if (dest_gid < 0) dest_gid=npes+dest_gid -!!$ recv_tag = 100+(tile-2) -!!$ if (tile==1) recv_tag = 100+(ntiles-tile) -!!$ src_gid = (tile-3)*npes_x*npes_y -!!$ src_gid = src_gid + npes_x*(npes_y-1) + npes_x - 1 -!!$ if (src_gid < 0) src_gid=npes+src_gid -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(is-1,js), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ endif -!!$ if ( (ie==npx-1) .and. (je==npy-1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(ie+1,je) -!!$ send_tag = 100+tile -!!$ dest_gid = (tile+1)*npes_x*npes_y -!!$ if (dest_gid+1 > npes) dest_gid=dest_gid-npes -!!$ recv_tag = 300+(tile+2) -!!$ if (tile==5) recv_tag = 300+1 -!!$ src_gid = (tile+1)*npes_x*npes_y -!!$ if (src_gid+1 > npes) src_gid=src_gid-npes -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ endif -!!$! wait for comm to complete -!!$ if (npes==6) then -!!$ if (nsend>0) then -!!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ if (nrecv>0) then -!!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ nsend=0 ; nrecv=0 -!!$ endif -!!$ -!!$! Odd Face UL 1 pair ; 1 1-way -!!$ if ( (tile==1) .and. (is==1) .and. (je==npy-1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(is,je) -!!$ send_tag = 400+tile -!!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x*(npes_y-1) -!!$ recv_tag = 400+(tile+2) -!!$ src_gid = dest_gid -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ endif -!!$ if ( (tile==3) .and. (is==1) .and. (je==npy-1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(is+1,je+1) -!!$ send_tag = 400+tile -!!$ dest_gid = npes_x*(npes_y-1) -!!$ recv_tag = 400+(tile-2) -!!$ src_gid = dest_gid -!!$ if (npes>6) then -!!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, & -!!$ dest_gid, send_tag, & -!!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, & -!!$ src_gid, recv_tag, & -!!$ commglobal, Stats, ierror ) -!!$ nsend=nsend-1 -!!$ else -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(is,je) -!!$ send_tag = 400+tile -!!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x*(npes_y-1) -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ endif -!!$ if ( (tile==5) .and. (is==1) .and. (je==npy-1) ) then -!!$ recv_tag = 400+(tile-2) -!!$ src_gid = (tile-3)*npes_x*npes_y + npes_x*(npes_y-1) -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ -!!$! wait for comm to complete -!!$ if (npes==6) then -!!$ if (nsend>0) then -!!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ if (nrecv>0) then -!!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ nsend=0 ; nrecv=0 -!!$ endif -!!$ -!!$! Send to Even face UL 3 1-way -!!$ if ( (ie==npx-1) .and. (je==npy-1) ) then -!!$ nsend=nsend+1 -!!$ qsend(nsend) = q(ie,je+1) -!!$ send_tag = 400+tile -!!$ dest_gid = tile*npes_x*npes_y + npes_x*(npes_y-1) -!!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, & -!!$ send_tag, commglobal, sqest(nsend), ierror ) -!!$ endif -!!$ -!!$! Receive Odd Face LR 3 1-way -!!$ if ( (ie==npx-1) .and. (js==1) ) then -!!$ recv_tag = 200+(tile+1) -!!$ src_gid = (tile-1)*npes_x*npes_y + npes_x*npes_y -!!$ nrecv=nrecv+1 -!!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, & -!!$ recv_tag, commglobal, rqest(nrecv), ierror ) -!!$ endif -!!$ -!!$ endif -!!$ -!!$! wait for comm to complete -!!$ if (nsend>0) then -!!$ call MPI_WAITALL(nsend, sqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ if (nrecv>0) then -!!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror) -!!$ -!!$ -!!$ -!!$ endif -!!$ -!!$ end subroutine mp_corner_comm -!!$! -!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!!$!------------------------------------------------------------------------------- - !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_gather_4d_r4 :: Call SPMD Gather -! +! +! mp_gather_4d_r4 :: Call SPMD Gather +! subroutine mp_gather_4d_r4(q, i1,i2, j1,j2, idim, jdim, kdim, ldim) integer, intent(IN) :: i1,i2, j1,j2 integer, intent(IN) :: idim, jdim, kdim, ldim real(kind=4), intent(INOUT):: q(idim,jdim,kdim,ldim) - integer :: i,j,k,l,n,icnt + integer :: i,j,k,l,n,icnt integer :: Lsize, Lsize_buf(1) integer :: Gsize integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid) integer :: Ldims(5), Gdims(5*npes_this_grid) real(kind=4), allocatable, dimension(:) :: larr, garr - + Ldims(1) = i1 Ldims(2) = i2 Ldims(3) = j1 Ldims(4) = j2 - Ldims(5) = tile + Ldims(5) = tile do l=1,npes_this_grid cnts(l) = 5 Ldispl(l) = 5*(l-1) - enddo + enddo call mpp_gather(Ldims, Gdims) ! call MPI_GATHERV(Ldims, 5, MPI_INTEGER, Gdims, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror) - + Lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) ) * kdim do l=1,npes_this_grid cnts(l) = 1 Ldispl(l) = l-1 - enddo + enddo LsizeS(:)=1 Lsize_buf(1) = Lsize call mpp_gather(Lsize_buf, LsizeS) @@ -1937,18 +1545,18 @@ end subroutine mp_gather_4d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_gather_3d_r4 :: Call SPMD Gather +! mp_gather_3d_r4 :: Call SPMD Gather ! subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) integer, intent(IN) :: i1,i2, j1,j2 integer, intent(IN) :: idim, jdim, ldim real(kind=4), intent(INOUT):: q(idim,jdim,ldim) - integer :: i,j,l,n,icnt + integer :: i,j,l,n,icnt integer :: Lsize, Lsize_buf(1) integer :: Gsize integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid) integer :: Ldims(5), Gdims(5*npes_this_grid) - real(kind=4), allocatable, dimension(:) :: larr, garr + real(kind=4), allocatable, dimension(:) :: larr, garr Ldims(1) = i1 Ldims(2) = i2 @@ -1966,7 +1574,7 @@ subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) do l=1,npes_this_grid cnts(l) = 1 Ldispl(l) = l-1 - enddo + enddo LsizeS(:)=1 Lsize_buf(1) = Lsize call mpp_gather(Lsize_buf, LsizeS) @@ -1976,7 +1584,7 @@ subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) icnt = 1 do j=j1,j2 do i=i1,i2 - larr(icnt) = q(i,j,tile) + larr(icnt) = q(i,j,tile) icnt=icnt+1 enddo enddo @@ -1996,7 +1604,7 @@ subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim) do n=2,npes_this_grid icnt=1 do l=Gdims( (n-1)*5 + 5 ), Gdims( (n-1)*5 + 5 ) - do j=Gdims( (n-1)*5 + 3 ), Gdims( (n-1)*5 + 4 ) + do j=Gdims( (n-1)*5 + 3 ), Gdims( (n-1)*5 + 4 ) do i=Gdims( (n-1)*5 + 1 ), Gdims( (n-1)*5 + 2 ) q(i,j,l) = garr(Ldispl(n)+icnt) icnt=icnt+1 @@ -2016,7 +1624,7 @@ end subroutine mp_gather_3d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_gather_3d_r8 :: Call SPMD Gather +! mp_gather_3d_r8 :: Call SPMD Gather ! subroutine mp_gather_3d_r8(q, i1,i2, j1,j2, idim, jdim, ldim) integer, intent(IN) :: i1,i2, j1,j2 @@ -2096,7 +1704,7 @@ end subroutine mp_gather_3d_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_i4 :: Call SPMD broadcast +! mp_bcst_i4 :: Call SPMD broadcast ! subroutine mp_bcst_i4(q) integer, intent(INOUT) :: q @@ -2111,7 +1719,7 @@ end subroutine mp_bcst_i4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_r4 :: Call SPMD broadcast +! mp_bcst_r4 :: Call SPMD broadcast ! subroutine mp_bcst_r4(q) real(kind=4), intent(INOUT) :: q @@ -2126,7 +1734,7 @@ end subroutine mp_bcst_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_r8 :: Call SPMD broadcast +! mp_bcst_r8 :: Call SPMD broadcast ! subroutine mp_bcst_r8(q) real(kind=8), intent(INOUT) :: q @@ -2141,7 +1749,7 @@ end subroutine mp_bcst_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_3d_r4 :: Call SPMD broadcast +! mp_bcst_3d_r4 :: Call SPMD broadcast ! subroutine mp_bcst_3d_r4(q, idim, jdim, kdim) integer, intent(IN) :: idim, jdim, kdim @@ -2157,7 +1765,7 @@ end subroutine mp_bcst_3d_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_3d_r8 :: Call SPMD broadcast +! mp_bcst_3d_r8 :: Call SPMD broadcast ! subroutine mp_bcst_3d_r8(q, idim, jdim, kdim) integer, intent(IN) :: idim, jdim, kdim @@ -2172,33 +1780,33 @@ end subroutine mp_bcst_3d_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_bcst_4d_r4 :: Call SPMD broadcast +! +! mp_bcst_4d_r4 :: Call SPMD broadcast ! subroutine mp_bcst_4d_r4(q, idim, jdim, kdim, ldim) integer, intent(IN) :: idim, jdim, kdim, ldim real(kind=4), intent(INOUT) :: q(idim,jdim,kdim,ldim) call MPI_BCAST(q, idim*jdim*kdim*ldim, MPI_REAL, masterproc, commglobal, ierror) - + end subroutine mp_bcst_4d_r4 -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_bcst_4d_r8 :: Call SPMD broadcast +! +! mp_bcst_4d_r8 :: Call SPMD broadcast ! subroutine mp_bcst_4d_r8(q, idim, jdim, kdim, ldim) integer, intent(IN) :: idim, jdim, kdim, ldim real(kind=8), intent(INOUT) :: q(idim,jdim,kdim,ldim) call MPI_BCAST(q, idim*jdim*kdim*ldim, MPI_DOUBLE_PRECISION, masterproc, commglobal, ierror) - + end subroutine mp_bcst_4d_r8 -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- @@ -2237,44 +1845,44 @@ end subroutine mp_bcst_4d_i8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_reduce_max_r4_1d :: Call SPMD REDUCE_MAX +! +! mp_reduce_max_r4_1d :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r4_1d(mymax,npts) integer, intent(IN) :: npts real(kind=4), intent(INOUT) :: mymax(npts) - + real(kind=4) :: gmax(npts) - + call MPI_ALLREDUCE( mymax, gmax, npts, MPI_REAL, MPI_MAX, & commglobal, ierror ) - + mymax = gmax - + end subroutine mp_reduce_max_r4_1d -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! mp_reduce_max_r8_1d :: Call SPMD REDUCE_MAX +! +! mp_reduce_max_r8_1d :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r8_1d(mymax,npts) integer, intent(IN) :: npts real(kind=8), intent(INOUT) :: mymax(npts) - + real(kind=8) :: gmax(npts) - + call MPI_ALLREDUCE( mymax, gmax, npts, MPI_DOUBLE_PRECISION, MPI_MAX, & commglobal, ierror ) - + mymax = gmax - + end subroutine mp_reduce_max_r8_1d -! +! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- @@ -2282,7 +1890,7 @@ end subroutine mp_reduce_max_r8_1d !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_max_r4 :: Call SPMD REDUCE_MAX +! mp_reduce_max_r4 :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r4(mymax) real(kind=4), intent(INOUT) :: mymax @@ -2299,7 +1907,7 @@ end subroutine mp_reduce_max_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_max_r8 :: Call SPMD REDUCE_MAX +! mp_reduce_max_r8 :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_r8(mymax) real(kind=8), intent(INOUT) :: mymax @@ -2343,7 +1951,7 @@ end subroutine mp_reduce_min_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_bcst_4d_i4 :: Call SPMD REDUCE_MAX +! mp_bcst_4d_i4 :: Call SPMD REDUCE_MAX ! subroutine mp_reduce_max_i4(mymax) integer, intent(INOUT) :: mymax @@ -2363,7 +1971,7 @@ end subroutine mp_reduce_max_i4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r4 :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r4 :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r4(mysum) real(kind=4), intent(INOUT) :: mysum @@ -2383,7 +1991,7 @@ end subroutine mp_reduce_sum_r4 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r8 :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r8 :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r8(mysum) real(kind=8), intent(INOUT) :: mysum @@ -2403,7 +2011,7 @@ end subroutine mp_reduce_sum_r8 !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r4_1d :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r4_1d :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r4_1d(mysum, sum1d, npts) integer, intent(in) :: npts @@ -2416,7 +2024,7 @@ subroutine mp_reduce_sum_r4_1d(mysum, sum1d, npts) mysum = 0.0 do i=1,npts mysum = mysum + sum1d(i) - enddo + enddo call MPI_ALLREDUCE( mysum, gsum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & commglobal, ierror ) @@ -2431,7 +2039,7 @@ end subroutine mp_reduce_sum_r4_1d !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! mp_reduce_sum_r8_1d :: Call SPMD REDUCE_SUM +! mp_reduce_sum_r8_1d :: Call SPMD REDUCE_SUM ! subroutine mp_reduce_sum_r8_1d(mysum, sum1d, npts) integer, intent(in) :: npts @@ -2444,7 +2052,7 @@ subroutine mp_reduce_sum_r8_1d(mysum, sum1d, npts) mysum = 0.0 do i=1,npts mysum = mysum + sum1d(i) - enddo + enddo call MPI_ALLREDUCE( mysum, gsum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & commglobal, ierror ) diff --git a/tools/fv_nggps_diag.F90 b/tools/fv_nggps_diag.F90 index 03b62f1a0..e9befa306 100644 --- a/tools/fv_nggps_diag.F90 +++ b/tools/fv_nggps_diag.F90 @@ -20,7 +20,7 @@ !*********************************************************************** module fv_nggps_diags_mod -use mpp_mod, only: mpp_pe, mpp_root_pe + use mpp_mod, only: mpp_pe, mpp_root_pe use constants_mod, only: grav, rdgas use fms_io_mod, only: set_domain, nullify_domain use time_manager_mod, only: time_type @@ -35,13 +35,18 @@ module fv_nggps_diags_mod real, parameter:: missing_value = -1.e10 logical master - integer :: id_ua, id_va, id_pt, id_delp, id_pfhy, id_pfnh, id_w, id_delz + integer :: id_ua, id_va, id_pt, id_delp, id_pfhy, id_pfnh, id_w, id_delz integer, allocatable :: id_tracer(:) logical :: module_is_initialized=.false. integer :: sphum, liq_wat, ice_wat ! GFDL physics integer :: rainwat, snowwat, graupel - real :: vrange(2), wrange(2), trange(2) + real :: vrange(2) = (/ -330., 330. /) ! winds + real :: wrange(2) = (/ -100., 100. /) ! vertical wind + real :: trange(2) = (/ 100., 350. /) ! temperature + +! file name + character(len=64) :: field = 'gfs_dyn' ! tracers character(len=128) :: tname @@ -54,15 +59,12 @@ module fv_nggps_diags_mod subroutine fv_nggps_diag_init(Atm, axes, Time) type(fv_atmos_type), intent(inout), target :: Atm(:) - integer, intent(in) :: axes(4) + integer, intent(in) :: axes(4) type(time_type), intent(in) :: Time - character(len=64) :: field integer :: n, ncnst, i - vrange = (/ -330., 330. /) ! winds - wrange = (/ -100., 100. /) ! vertical wind - trange = (/ 100., 350. /) ! temperature + if (module_is_initialized) return n = 1 ncnst = Atm(1)%ncnst @@ -83,8 +85,7 @@ subroutine fv_nggps_diag_init(Atm, axes, Time) allocate(id_tracer(ncnst)) id_tracer(:) = 0 - field= 'gfs_dyn' - + if (Atm(n)%flagstruct%write_3d_diags) then !------------------- ! A grid winds (lat-lon) !------------------- @@ -94,7 +95,7 @@ subroutine fv_nggps_diag_init(Atm, axes, Time) id_va = register_diag_field ( trim(field), 'vcomp', axes(1:3), Time, & 'meridional wind', 'm/sec', missing_value=missing_value, range=vrange) - if( Atm(n)%flagstruct%hydrostatic ) then + if( Atm(n)%flagstruct%hydrostatic ) then id_pfhy = register_diag_field ( trim(field), 'pfhy', axes(1:3), Time, & 'hydrostatic pressure', 'pa', missing_value=missing_value ) else @@ -121,6 +122,9 @@ subroutine fv_nggps_diag_init(Atm, axes, Time) axes(1:3), Time, trim(tlongname), & trim(tunits), missing_value=missing_value) enddo + endif + + module_is_initialized=.true. end subroutine fv_nggps_diag_init @@ -149,13 +153,13 @@ subroutine fv_nggps_diag(Atm, zvir, Time) if ( Atm(n)%flagstruct%range_warn ) then call range_check('DELP', Atm(n)%delp, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - 0.01*ptop, 200.E2, bad_range) + 0.01*ptop, 200.E2, bad_range, Time) call range_check('UA', Atm(n)%ua, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - -220., 250., bad_range) + -250., 250., bad_range, Time) call range_check('VA', Atm(n)%va, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - -220., 220., bad_range) + -250., 250., bad_range, Time) call range_check('TA', Atm(n)%pt, isc, iec, jsc, jec, ngc, npz, Atm(n)%gridstruct%agrid, & - 130., 350., bad_range) !DCMIP ICs have very low temperatures + 150., 350., bad_range, Time) !DCMIP ICs have very low temperatures endif !--- A-GRID WINDS @@ -168,7 +172,7 @@ subroutine fv_nggps_diag(Atm, zvir, Time) endif !--- TEMPERATURE - if(id_pt > 0) used=send_data(id_pt , Atm(n)%pt (isc:iec,jsc:jec,:), Time) + if(id_pt > 0) used=send_data(id_pt, Atm(n)%pt(isc:iec,jsc:jec,:), Time) !--- TRACERS do itrac=1, Atm(n)%ncnst @@ -196,7 +200,7 @@ subroutine fv_nggps_diag(Atm, zvir, Time) if( Atm(n)%flagstruct%hydrostatic .and. id_pfhy > 0 ) then do k=1,npz do j=jsc,jec - do i=isc,iec + do i=isc,iec wk(i,j,k) = 0.5 *(Atm(n)%pe(i,k,j)+Atm(n)%pe(i,k+1,j)) enddo enddo @@ -209,8 +213,8 @@ subroutine fv_nggps_diag(Atm, zvir, Time) if(id_delp > 0 .or. ((.not. Atm(n)%flagstruct%hydrostatic) .and. id_pfnh > 0)) then do k=1,npz do j=jsc,jec - do i=isc,iec - wk(i,j,k) = Atm(n)%delp(i,j,k)*(1.-Atm(n)%q(i,j,k,liq_wat)) + do i=isc,iec + wk(i,j,k) = Atm(n)%delp(i,j,k)*(1.-sum(Atm(n)%q(i,j,k,2:Atm(n)%flagstruct%nwat))) enddo enddo enddo @@ -221,9 +225,9 @@ subroutine fv_nggps_diag(Atm, zvir, Time) if( (.not. Atm(n)%flagstruct%hydrostatic) .and. id_pfnh > 0) then do k=1,npz do j=jsc,jec - do i=isc,iec + do i=isc,iec wk(i,j,k) = -wk(i,j,k)/(Atm(n)%delz(i,j,k)*grav)*rdgas* & - Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)) + Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum)) enddo enddo enddo diff --git a/tools/fv_nudge.F90 b/tools/fv_nudge.F90 index ad50c06c8..b1a5c1e88 100644 --- a/tools/fv_nudge.F90 +++ b/tools/fv_nudge.F90 @@ -53,8 +53,10 @@ module fv_nwp_nudge_mod real(kind=R_GRID), parameter :: radius = cnst_radius - character(len=128) :: version = '' - character(len=128) :: tagname = '' +! version number of this module +! Include variable "version" to be written to log file. +#include + logical :: do_adiabatic_init public fv_nwp_nudge, fv_nwp_nudge_init, fv_nwp_nudge_end, breed_slp_inline, T_is_Tv @@ -72,7 +74,7 @@ module fv_nwp_nudge_mod real :: time_nudge = 0. integer :: time_interval = 6*3600 ! dataset time interval (seconds) ! ---> h1g, enhance the max. analysis data files, 2012-10-22 -! integer, parameter :: nfile_max = 125 +! integer, parameter :: nfile_max = 125 integer, parameter :: nfile_max = 29280 ! maximum: 20-year analysis data, 4*366*20=29280 ! <--- h1g, 2012-10-22 integer :: nfile @@ -94,20 +96,20 @@ module fv_nwp_nudge_mod ! ---> h1g, add the list of input NCEP analysis data files, 2012-10-22 character(len=128):: input_fname_list ="" ! a file lists the input NCEP analysis data character(len=128):: analysis_file_first ="" ! the first NCEP analysis file to be used for nudging, - ! by default, the first file in the "input_fname_list" - character(len=128):: analysis_file_last="" ! the last NCEP analysis file to be used for nudging + ! by default, the first file in the "input_fname_list" + character(len=128):: analysis_file_last="" ! the last NCEP analysis file to be used for nudging ! by default, the last file in the "input_fname_list" - real :: P_relax = 30.E2 ! from P_relax upwards, nudging is reduced linearly + real :: P_relax = 30.E2 ! from P_relax upwards, nudging is reduced linearly ! proportional to pfull/P_relax - real :: P_norelax = 0.0 ! from P_norelax upwards, no nudging + real :: P_norelax = 0.0 ! from P_norelax upwards, no nudging ! <--- h1g, 2012-10-22 character(len=128):: file_names(nfile_max) character(len=128):: track_file_name integer :: nfile_total = 0 ! =5 for 1-day (if datasets are 6-hr apart) - real :: p_wvp = 100.E2 ! cutoff level for specific humidity nudging + real :: p_wvp = 100.E2 ! cutoff level for specific humidity nudging integer :: kord_data = 8 real :: mask_fac = 0.25 ! [0,1] 0: no mask; 1: full strength @@ -119,8 +121,8 @@ module fv_nwp_nudge_mod logical :: conserve_mom = .true. logical :: conserve_hgt = .true. logical :: tc_mask = .false. - logical :: strong_mask = .false. - logical :: ibtrack = .true. + logical :: strong_mask = .false. + logical :: ibtrack = .true. logical :: nudge_debug = .false. logical :: do_ps_bias = .false. logical :: nudge_ps = .false. @@ -138,24 +140,24 @@ module fv_nwp_nudge_mod real :: tau_ps = 21600. ! 1-day real :: tau_q = 86400. ! 1-day real :: tau_winds = 21600. ! 6-hr - real :: tau_virt = 43200. + real :: tau_virt = 43200. real :: tau_hght = 43200. real :: q_min = 1.E-8 integer :: jbeg, jend - integer :: nf_uv = 0 - integer :: nf_ps = 0 - integer :: nf_t = 2 - integer :: nf_ht = 1 + integer :: nf_uv = 0 + integer :: nf_ps = 0 + integer :: nf_t = 2 + integer :: nf_ht = 1 ! starting layer (top layer is sponge layer and is skipped) - integer :: kstart = 2 + integer :: kstart = 2 ! skip "kbot" layers - integer :: kbot_winds = 0 - integer :: kbot_t = 0 - integer :: kbot_q = 0 + integer :: kbot_winds = 0 + integer :: kbot_t = 0 + integer :: kbot_q = 0 logical :: analysis_time !-- Tropical cyclones -------------------------------------------------------------------- @@ -166,7 +168,7 @@ module fv_nwp_nudge_mod real :: grid_size = 28.E3 real :: tau_vt_slp = 1200. real :: tau_vt_wind = 1200. - real :: tau_vt_rad = 4.0 + real :: tau_vt_rad = 4.0 real :: pt_lim = 0.2 real :: slp_env = 101010. ! storm environment pressure (pa) @@ -181,7 +183,7 @@ module fv_nwp_nudge_mod real :: r_inc = 25.E3 real, parameter:: del_r = 50.E3 real:: elapsed_time = 0.0 - real:: nudged_time = 1.E12 ! seconds + real:: nudged_time = 1.E12 ! seconds ! usage example: set to 43200. to do inline vortex breeding ! for only the first 12 hours ! In addition, specify only 3 analysis files (12 hours) @@ -214,10 +216,10 @@ module fv_nwp_nudge_mod kbot_t, kbot_q, p_wvp, time_varying, time_interval, use_pt_inc, pt_lim, & tau_vt_rad, r_lo, r_hi, use_high_top, add_bg_wind, conserve_mom, conserve_hgt, & min_nobs, min_mslp, nudged_time, r_fac, r_min, r_inc, ibtrack, track_file_name, file_names, & - input_fname_list, analysis_file_first, analysis_file_last, P_relax, P_norelax !h1g, add 3 namelist variables, 2012-20-22 + input_fname_list, analysis_file_first, analysis_file_last, P_relax, P_norelax !h1g, add 3 namelist variables, 2012-20-22 contains - + subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt, zvir, ptop, & ak, bk, ts, ps, delp, ua, va, pt, nwat, q, phis, gridstruct, & @@ -270,14 +272,14 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt real, pointer, dimension(:,:) :: sina_u, sina_v real, pointer, dimension(:,:,:) :: sin_sg real(kind=R_GRID), pointer, dimension(:,:,:) :: vlon, vlat - + real, pointer, dimension(:,:) :: dx, dy, rdxc, rdyc real(kind=R_GRID), pointer :: da_min - logical, pointer :: nested, sw_corner, se_corner, nw_corner, ne_corner + logical, pointer :: bounded_domain, sw_corner, se_corner, nw_corner, ne_corner - if ( .not. module_is_initialized ) then + if ( .not. module_is_initialized ) then call mpp_error(FATAL,'==> Error from fv_nwp_nudge: module not initialized') endif agrid => gridstruct%agrid_64 @@ -297,12 +299,11 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt da_min => gridstruct%da_min - nested => gridstruct%nested sw_corner => gridstruct%sw_corner se_corner => gridstruct%se_corner nw_corner => gridstruct%nw_corner ne_corner => gridstruct%ne_corner - + if ( no_obs ) then #ifndef DYCORE_SOLO forecast_mode = .true. @@ -328,7 +329,7 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt do k=1,npz press(k) = 0.5*(ak(k) + ak(k+1)) + 0.5*(bk(k)+bk(k+1))*1.E5 if ( press(k) < P_relax ) then - profile(k) = max(0.01, press(k)/P_relax) + profile(k) = max(0.01, press(k)/P_relax) endif ! above P_norelax, no nudging. added by h1g @@ -341,17 +342,17 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt !$OMP parallel do default(none) shared(npz,press,prof_t) do k=1,npz if ( press(k) < 10.E2 ) then - prof_t(k) = max(0.01, press(k)/10.E2) + prof_t(k) = max(0.01, press(k)/10.E2) endif enddo prof_t(1) = 0. - + ! Water vapor: prof_q(:) = 1. !$OMP parallel do default(none) shared(npz,press,prof_q) do k=1,npz if ( press(k) < 300.E2 ) then - prof_q(k) = max(0., press(k)/300.E2) + prof_q(k) = max(0., press(k)/300.E2) endif enddo prof_q(1) = 0. @@ -363,7 +364,7 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt ptmp = ak(k+1) + bk(k+1)*1.E5 if ( ptmp > p_trop ) then k_trop = k - exit + exit endif enddo endif @@ -413,7 +414,7 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt do j=js,je do i=is,ie - if ( abs(ps(i,j)-ps_obs(i,j)) > 2.e2 ) then + if ( abs(ps(i,j)-ps_obs(i,j)) > 2.e2 ) then ps_fac(i,j) = 2.e2 / abs(ps(i,j)-ps_obs(i,j)) else ps_fac(i,j) = 1. @@ -423,7 +424,7 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt if( analysis_time ) then !------------------------------------------- -! Compute RMSE, bias, and correlation of SLP +! Compute RMSE, bias, and correlation of SLP !------------------------------------------- do j=js,je do i=is,ie @@ -452,7 +453,7 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt endif enddo enddo - + call rmse_bias(m_err, rms, bias, area) call corr(slp_m, slp_n, co, area) @@ -659,8 +660,8 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt nullify(area) nullify(rarea) - nullify(vlon) - nullify(vlat) + nullify(vlon) + nullify(vlat) nullify(sina_u) nullify(sina_v) nullify(sin_sg) @@ -672,7 +673,6 @@ subroutine fv_nwp_nudge ( Time, dt, npx, npy, npz, ps_dt, u_dt, v_dt, t_dt, q_dt nullify(da_min) - nullify(nested) nullify(sw_corner) nullify(se_corner) nullify(nw_corner) @@ -790,7 +790,7 @@ subroutine ps_nudging(dt, factor, npz, ak, bk, ps_obs, mask, tm, ps, phis, delp, ps(i,j) = ak(1) enddo enddo - + rdt = dt / (tau_ps/factor + dt) do k=1,npz dbk = rdt*(bk(k+1) - bk(k)) @@ -862,14 +862,14 @@ subroutine ps_bias_correction ( ps_dt, is, ie, js, je, isd, ied, jsd, jed, area if(master .and. nudge_debug) write(*,*) 'Significant PS bias=', -bias endif - if ( bias > 0. ) then + if ( bias > 0. ) then psum = 0. do j=js,je do i=is,ie if ( ps_dt(i,j) > 0. ) then psum = psum + area(i,j) endif - enddo + enddo enddo call mp_reduce_sum( psum ) @@ -880,7 +880,7 @@ subroutine ps_bias_correction ( ps_dt, is, ie, js, je, isd, ied, jsd, jed, area if ( ps_dt(i,j) > 0.0 ) then ps_dt(i,j) = max(0.0, ps_dt(i,j) - bias) endif - enddo + enddo enddo else psum = 0. @@ -889,18 +889,18 @@ subroutine ps_bias_correction ( ps_dt, is, ie, js, je, isd, ied, jsd, jed, area if ( ps_dt(i,j) < 0. ) then psum = psum + area(i,j) endif - enddo + enddo enddo call mp_reduce_sum( psum ) - bias = bias * total_area / psum + bias = bias * total_area / psum do j=js,je do i=is,ie if ( ps_dt(i,j) < 0.0 ) then ps_dt(i,j) = min(0.0, ps_dt(i,j) - bias) endif - enddo + enddo enddo endif @@ -1050,12 +1050,12 @@ subroutine get_obs(Time, dt, zvir, ak, bk, ps, ts, ps_obs, delp, pt, nwat, q, u_ ps_obs(:,:) = alpha*ps_dat(:,:,1) + beta*ps_dat(:,:,2) !--------------------------------- -!*** nudge & update ps & delp here +!*** nudge & update ps & delp here !--------------------------------- if (nudge_ps) then allocate ( wt(is:ie,js:je,km) ) - wt(:,:,:) = alpha*t_dat(:,:,:,1) + beta*t_dat(:,:,:,2) + wt(:,:,:) = alpha*t_dat(:,:,:,1) + beta*t_dat(:,:,:,2) ! Needs gz3 for ps_nudging call get_int_hght(npz, ak, bk, ps(is:ie,js:je), delp, ps_obs(is:ie,js:je), wt) do j=js,je @@ -1063,7 +1063,7 @@ subroutine get_obs(Time, dt, zvir, ak, bk, ps, ts, ps_obs, delp, pt, nwat, q, u_ tm(i,j) = wt(i,j,km) enddo enddo - deallocate ( wt ) + deallocate ( wt ) allocate ( uu(isd:ied,jsd:jed,npz) ) allocate ( vv(isd:ied,jsd:jed,npz) ) @@ -1073,13 +1073,13 @@ subroutine get_obs(Time, dt, zvir, ak, bk, ps, ts, ps_obs, delp, pt, nwat, q, u_ do k=1,npz do j=js,je do i=is,ie - u_dt(i,j,k) = u_dt(i,j,k) + (uu(i,j,k) - ua(i,j,k)) / dt - v_dt(i,j,k) = v_dt(i,j,k) + (vv(i,j,k) - va(i,j,k)) / dt + u_dt(i,j,k) = u_dt(i,j,k) + (uu(i,j,k) - ua(i,j,k)) / dt + v_dt(i,j,k) = v_dt(i,j,k) + (vv(i,j,k) - va(i,j,k)) / dt enddo enddo enddo - deallocate (uu ) - deallocate (vv ) + deallocate (uu ) + deallocate (vv ) endif allocate ( ut(is:ie,js:je,npz) ) @@ -1112,8 +1112,8 @@ subroutine get_obs(Time, dt, zvir, ak, bk, ps, ts, ps_obs, delp, pt, nwat, q, u_ t_obs(:,:,:) = t_obs(:,:,:) + beta*ut(:,:,:) q_obs(:,:,:) = q_obs(:,:,:) + beta*vt(:,:,:) - deallocate ( ut ) - deallocate ( vt ) + deallocate ( ut ) + deallocate ( vt ) end subroutine get_obs @@ -1122,7 +1122,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct character(len=17) :: mod_name = 'fv_nudge' type(time_type), intent(in):: time integer, intent(in):: axes(4) - integer, intent(in):: npz ! vertical dimension + integer, intent(in):: npz ! vertical dimension real, intent(in):: zvir type(fv_grid_bounds_type), intent(IN) :: bd real, intent(in), dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: phis @@ -1149,7 +1149,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct ie = bd%ie js = bd%js je = bd%je - + isd = bd%isd ied = bd%ied jsd = bd%jsd @@ -1157,7 +1157,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct agrid => gridstruct%agrid - + master = is_master() do_adiabatic_init = .false. deg2rad = pi/180. @@ -1185,7 +1185,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct end do 10 call close_file ( unit ) end if - call write_version_number (version, tagname) + call write_version_number ( 'FV_NUDGE_MOD', version ) if ( master ) then f_unit=stdlog() write( f_unit, nml = fv_nwp_nudge_nml ) @@ -1200,23 +1200,23 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct do while ( io .eq. 0 ) read ( input_fname_unit, '(a)', iostat = io, end = 101 ) fname_tmp - if( trim(fname_tmp) .ne. "" ) then ! escape any empty record + if( trim(fname_tmp) .ne. "" ) then ! escape any empty record if ( trim(fname_tmp) == trim(analysis_file_last) ) then nt = nt + 1 file_names(nt) = 'INPUT/'//trim(fname_tmp) if(master .and. nudge_debug) write(*,*) 'From NCEP file list, last file: ', nt, file_names(nt) nt = 0 goto 101 ! read last analysis data and then close file - endif ! trim(fname_tmp) == trim(analysis_file_last) + endif ! trim(fname_tmp) == trim(analysis_file_last) if ( trim(analysis_file_first) == "" ) then nt = nt + 1 - file_names(nt) = 'INPUT/'//trim(fname_tmp) + file_names(nt) = 'INPUT/'//trim(fname_tmp) if(master .and. nudge_debug) then - if( nt .eq. 1 ) then - write(*,*) 'From NCEP file list, first file: ', nt, file_names(nt),trim(analysis_file_first) + if( nt .eq. 1 ) then + write(*,*) 'From NCEP file list, first file: ', nt, file_names(nt),trim(analysis_file_first) else - write(*,*) 'From NCEP file list: ', nt, file_names(nt) + write(*,*) 'From NCEP file list: ', nt, file_names(nt) endif ! nt .eq. 1 endif ! master .and. nudge_debug else @@ -1224,15 +1224,15 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct nt = nt + 1 file_names(nt) = 'INPUT/'//trim(fname_tmp) if(master .and. nudge_debug) then - if( nt .eq. 1 ) then - write(*,*) 'From NCEP file list, first file: ', nt, file_names(nt),trim(analysis_file_first) + if( nt .eq. 1 ) then + write(*,*) 'From NCEP file list, first file: ', nt, file_names(nt),trim(analysis_file_first) else - write(*,*) 'From NCEP file list: ', nt, file_names(nt) + write(*,*) 'From NCEP file list: ', nt, file_names(nt) endif ! nt .eq. 1 endif ! master .and. nudge_debug - endif ! trim(fname_tmp) == trim(analysis_file_first) .or. nt > 0 - endif ! trim(analysis_file_first) == "" - endif ! trim(fname_tmp) .ne. "" + endif ! trim(fname_tmp) == trim(analysis_file_first) .or. nt > 0 + endif ! trim(analysis_file_first) == "" + endif ! trim(fname_tmp) .ne. "" end do ! io .eq. 0 101 close( input_fname_unit ) endif @@ -1283,7 +1283,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct do j=1,jm lat(j) = lat(j) * deg2rad enddo - + allocate ( ak0(km+1) ) allocate ( bk0(km+1) ) @@ -1295,7 +1295,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct ! Note: definition of NCEP hybrid is p(k) = a(k)*1.E5 + b(k)*ps ak0(:) = ak0(:) * 1.E5 -! Limiter to prevent NAN at top during remapping +! Limiter to prevent NAN at top during remapping if ( bk0(1) < 1.E-9 ) ak0(1) = max(1.e-9, ak0(1)) if ( master ) then @@ -1318,7 +1318,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct do j=js,je do i=is,ie j1 = jdc(i,j) - jbeg = min(jbeg, j1) + jbeg = min(jbeg, j1) jend = max(jend, j1+1) enddo enddo @@ -1341,7 +1341,7 @@ subroutine fv_nwp_nudge_init(time, axes, npz, zvir, ak, bk, ts, phis, gridstruct module_is_initialized = .true. - + nullify(agrid) end subroutine fv_nwp_nudge_init @@ -1369,12 +1369,12 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) if( .not. file_exist(fname) ) then - call mpp_error(FATAL,'==> Error from get_ncep_analysis: file not found') + call mpp_error(FATAL,'==> Error from get_ncep_analysis: file not found: '//fname) else call open_ncfile( fname, ncid ) ! open the file - if(master) write(*,*) 'Reading NCEP anlysis file:', fname + if(master) write(*,*) 'Reading NCEP anlysis file:', fname endif - + if ( read_ts ) then ! read skin temperature; could be used for SST allocate ( wk1(im,jm) ) @@ -1384,7 +1384,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) if ( .not. land_ts ) then allocate ( wk0(im,jm) ) ! Read NCEP ORO (1; land; 0: ocean; 2: sea_ice) - + ! ---> h1g, read either 'ORO' or 'LAND', 2016-08-10 status = nf_inq_varid (ncid, 'ORO', var3id) if (status .eq. NF_NOERR) then @@ -1393,12 +1393,12 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) else !there is no 'ORO' status = nf_inq_varid (ncid, 'LAND', var3id) if (status .eq. NF_NOERR) then - call get_var3_r4( ncid, 'LAND', 1,im, 1,jm, 1,1, wk0 ) + call get_var3_r4( ncid, 'LAND', 1,im, 1,jm, 1,1, wk0 ) else - call mpp_error(FATAL,'Neither ORO nor LAND exists in re-analysis data') + call mpp_error(FATAL,'Neither ORO nor LAND exists in re-analysis data') endif - endif -! <--- h1g, 2016-08-10 + endif +! <--- h1g, 2016-08-10 do j=1,jm tmean = 0. @@ -1410,7 +1410,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) endif enddo !------------------------------------------------------- -! Replace TS over interior land with zonal mean SST/Ice +! Replace TS over interior land with zonal mean SST/Ice !------------------------------------------------------- if ( npt /= 0 ) then tmean= tmean / real(npt) @@ -1434,7 +1434,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) enddo endif enddo - deallocate ( wk0 ) + deallocate ( wk0 ) endif ! land_ts do j=js,je @@ -1454,7 +1454,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) if(master) call pmaxmin( 'SST_ncep', sst_ncep, i_sst, j_sst, 1.) ! if(nfile/=1 .and. master) call pmaxmin( 'SST_anom', sst_anom, i_sst, j_sst, 1.) #endif - deallocate ( wk1 ) + deallocate ( wk1 ) if (master) write(*,*) 'Done processing NCEP SST' endif ! read_ts @@ -1488,10 +1488,10 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) call get_var3_r4( ncid, 'PHI', 1,im, jbeg,jend, 1,1, wk2 ) wk2 = wk2 * grav ! convert unit from geopotential meter (m) to geopotential height (m2/s2) else - call mpp_error(FATAL,'Neither PHIS nor PHI exists in re-analysis data') + call mpp_error(FATAL,'Neither PHIS nor PHI exists in re-analysis data') endif - endif -! <--- h1g, 2016-08-10 + endif +! <--- h1g, 2016-08-10 do j=js,je @@ -1587,7 +1587,7 @@ subroutine get_ncep_analysis ( ps, u, v, t, q, zvir, ts, nfile, fname, bd ) ! endif - deallocate ( wk3 ) + deallocate ( wk3 ) ! nfile = nfile + 1 @@ -1694,8 +1694,8 @@ subroutine ncep2fms( sst ) ! lon: 0.5, 1.5, ..., 359.5 ! lat: -89.5, -88.5, ... , 88.5, 89.5 - delx = 360./real(i_sst) - dely = 180./real(j_sst) + delx = 360./real(i_sst) + dely = 180./real(j_sst) jt = 1 do 5000 j=1,j_sst @@ -1774,7 +1774,7 @@ subroutine get_int_hght(npz, ak, bk, ps, delp, ps0, tv) do i=is,ie pn0(i,k) = log( ak0(k) + bk0(k)*ps0(i,j) ) enddo - enddo + enddo do i=is,ie gz3(i,j,km+1) = gz0(i,j) ! Data Surface geopotential enddo @@ -1818,7 +1818,7 @@ subroutine remap_tq( npz, ak, bk, ps, delp, t, q, & pe0(i,k) = ak0(k) + bk0(k)*ps0(i,j) pn0(i,k) = log(pe0(i,k)) enddo - enddo + enddo !------ ! Model !------ @@ -1965,11 +1965,11 @@ subroutine fv_nwp_nudge_end deallocate ( ak0 ) deallocate ( bk0 ) - deallocate ( lat ) - deallocate ( lon ) + deallocate ( lat ) + deallocate ( lon ) - deallocate ( gz3 ) - deallocate ( gz0 ) + deallocate ( gz3 ) + deallocate ( gz0 ) end subroutine fv_nwp_nudge_end @@ -2004,7 +2004,7 @@ subroutine get_tc_mask(time, mask, agrid) do j=js, je do i=is, ie dist = great_circle_dist(pos, agrid(i,j,1:2), radius) - if( dist < 6.*r_vor ) then + if( dist < 6.*r_vor ) then mask(i,j) = mask(i,j) * ( 1. - mask_fac*exp(-(0.5*dist/r_vor)**2)*min(1.,(slp_env-slp_o)/10.E2) ) endif enddo ! i-loop @@ -2038,7 +2038,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del real, intent(inout):: pk(is:ie,js:je, npz+1) ! pe**kappa real, intent(inout):: pe(is-1:ie+1, npz+1,js-1:je+1) ! edge pressure (pascal) - real, intent(inout):: pkz(is:ie,js:je,npz) + real, intent(inout):: pkz(is:ie,js:je,npz) real, intent(out):: peln(is:ie,npz+1,js:je) ! ln(pe) type(fv_grid_type), target :: gridstruct @@ -2078,7 +2078,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del ! Advance (local) time call get_date(fv_time, year, month, day, hour, minute, second) if ( year /= year_track_data ) then - if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' + if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' return endif time = fv_time ! fv_time is the time at past time step (set in fv_diag) @@ -2194,7 +2194,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del else ! Lower top for vrotex breeding if ( slp_o > 1000.E2 ) then - pbtop = 900.E2 + pbtop = 900.E2 else pbtop = max(500.E2, 900.E2-5.*(1000.E2-slp_o)) ! mp48 endif @@ -2228,10 +2228,10 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del a_sum = 0. do j=js, je do i=is, ie - if( dist(i,j)<(r_vor+del_r) .and. dist(i,j)>r_vor .and. phis(i,j)<250.*grav ) then + if( dist(i,j)<(r_vor+del_r) .and. dist(i,j)>r_vor .and. phis(i,j)<250.*grav ) then p_count = p_count + 1. - p_sum = p_sum + slp(i,j)*area(i,j) - a_sum = a_sum + area(i,j) + p_sum = p_sum + slp(i,j)*area(i,j) + a_sum = a_sum + area(i,j) endif enddo enddo @@ -2303,7 +2303,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del p_hi = p_env - (p_env-slp_o) * exp( -r_hi*f1**2 ) ! upper bound p_lo = p_env - (p_env-slp_o) * exp( -r_lo*f1**2 ) ! lower bound - if ( ps(i,j) > p_hi .and. tm(i,j) < tm_max ) then + if ( ps(i,j) > p_hi .and. tm(i,j) < tm_max ) then ! do nothing if lowest layer is too hot ! Under-development: relx = relx0*exp( -tau_vt_rad*f1**2 ) @@ -2320,7 +2320,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del delps = relx*(slp(i,j) - p_lo) ! Note: slp is used here else goto 400 ! do nothing; proceed to next storm - endif + endif #ifdef SIM_TEST pbreed = ak(1) @@ -2362,7 +2362,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del #endif endif -400 continue +400 continue enddo ! end i-loop enddo ! end j-loop @@ -2376,7 +2376,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del do j=js, je do i=is, ie if( dist(i,j)r2 ) then - p_sum = p_sum + area(i,j) + p_sum = p_sum + area(i,j) endif enddo enddo @@ -2488,7 +2488,7 @@ subroutine breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, del nullify(agrid) nullify(area) - + end subroutine breed_slp_inline @@ -2531,17 +2531,17 @@ subroutine breed_srf_w10(time, dt, npz, ak, bk, ps, phis, slp, delp, u, v, grids real(kind=R_GRID), pointer, dimension(:,:,:) :: agrid, vlon, vlat dx => gridstruct%dx - dy => gridstruct%dy - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - a11 => gridstruct%a11 - a21 => gridstruct%a21 - a12 => gridstruct%a12 - a22 => gridstruct%a22 - area => gridstruct%area + dy => gridstruct%dy + rdxa => gridstruct%rdxa + rdya => gridstruct%rdya + a11 => gridstruct%a11 + a21 => gridstruct%a21 + a12 => gridstruct%a12 + a22 => gridstruct%a22 + area => gridstruct%area agrid => gridstruct%agrid_64 - vlon => gridstruct%vlon - vlat => gridstruct%vlat + vlon => gridstruct%vlon + vlat => gridstruct%vlat if ( nstorms==0 ) then @@ -2598,7 +2598,7 @@ subroutine breed_srf_w10(time, dt, npz, ak, bk, ps, phis, slp, delp, u, v, grids time_tc(1,n), pos(1), pos(2), w10_o, slp_o, r_vor, p_env) if ( slp_o<90000. .or. slp_o>slp_env .or. abs(pos(2))*rad2deg>35. ) goto 3000 ! next storm - + do j=js, je do i=is, ie @@ -2653,7 +2653,7 @@ subroutine breed_srf_w10(time, dt, npz, ak, bk, ps, phis, slp, delp, u, v, grids if( r_max<0. ) call mpp_error(FATAL,'==> Error in r_max') ! --------------------------------------------------- -! Determine surface wind speed and radius for nudging +! Determine surface wind speed and radius for nudging ! --------------------------------------------------- ! Compute surface roughness z0 from w10, based on Eq (4) & (5) from Moon et al. 2007 @@ -2743,7 +2743,7 @@ subroutine breed_srf_w10(time, dt, npz, ak, bk, ps, phis, slp, delp, u, v, grids us(i,j) = relx*(ut-us(i,j)) vs(i,j) = relx*(vt-vs(i,j)) endif -400 continue +400 continue enddo ! end i-loop enddo ! end j-loop #else @@ -2764,7 +2764,7 @@ subroutine breed_srf_w10(time, dt, npz, ak, bk, ps, phis, slp, delp, u, v, grids us(i,j) = relx*(ut-us(i,j)) vs(i,j) = relx*(vt-vs(i,j)) endif -400 continue +400 continue enddo ! end i-loop enddo ! end j-loop #endif @@ -2848,7 +2848,7 @@ subroutine breed_srf_winds(time, dt, npz, u_obs, v_obs, ak, bk, ps, phis, delp, time_tc(1,n), pos(1), pos(2), w10_o, slp_o, r_vor, p_env) if ( slp_o<90000. .or. slp_o>slp_env .or. abs(pos(2))*rad2deg>35. ) goto 3000 ! next storm - + do j=js, je do i=is, ie @@ -2904,7 +2904,7 @@ subroutine breed_srf_winds(time, dt, npz, u_obs, v_obs, ak, bk, ps, phis, delp, if( r_max<0. ) call mpp_error(FATAL,'==> Error in r_max') ! --------------------------------------------------- -! Determine surface wind speed and radius for nudging +! Determine surface wind speed and radius for nudging ! --------------------------------------------------- ! Compute surface roughness z0 from w10, based on Eq (4) & (5) from Moon et al. 2007 @@ -2999,7 +2999,7 @@ subroutine breed_srf_winds(time, dt, npz, u_obs, v_obs, ak, bk, ps, phis, delp, ua(i,j,k) = ua(i,j,k) + relx*(ut-ua(i,j,k)) va(i,j,k) = va(i,j,k) + relx*(vt-va(i,j,k)) endif -400 continue +400 continue enddo ! end i-loop enddo ! end j-loop @@ -3045,12 +3045,12 @@ subroutine get_slp_obs(time, nobs, lon_obs, lat_obs, w10, mslp, slp_out, r_out, real(KIND=4), intent(in):: w10(nobs) ! observed 10-m widn speed real(KIND=4), intent(in):: mslp(nobs) ! observed SLP in pa real(KIND=4), intent(in):: slp_out(nobs) ! slp at r_out - real(KIND=4), intent(in):: r_out(nobs) ! + real(KIND=4), intent(in):: r_out(nobs) ! real(KIND=4), intent(in):: time_obs(nobs) real, optional, intent(in):: stime real, optional, intent(out):: fact ! Output - real(kind=R_GRID), intent(out):: x_o , y_o ! position of the storm center + real(kind=R_GRID), intent(out):: x_o , y_o ! position of the storm center real, intent(out):: w10_o ! 10-m wind speed real, intent(out):: slp_o ! Observed sea-level-pressure (pa) real, intent(out):: r_vor, p_vor @@ -3076,7 +3076,7 @@ subroutine get_slp_obs(time, nobs, lon_obs, lat_obs, w10, mslp, slp_out, r_out, call get_date(time, year, month, day, hour, minute, second) if ( year /= year_track_data ) then - if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' + if (master) write(*,*) 'Warning: The year in storm track data is not the same as model year' return endif @@ -3177,7 +3177,7 @@ subroutine slp_obs_init call mpp_error(FATAL,'==> Error in reading best track data') endif - do while ( ts_name=='start' ) + do while ( ts_name=='start' ) nstorms = nstorms + 1 nobs_tc(nstorms) = nobs ! observation count for this storm @@ -3227,7 +3227,7 @@ subroutine slp_obs_init y_obs(nobs,nstorms) = lat_deg * deg2rad if ( GMT == 'GMT' ) then ! Transfrom x from (-180 , 180) to (0, 360) then to radian - if ( lon_deg < 0 ) then + if ( lon_deg < 0 ) then x_obs(nobs,nstorms) = (360.+lon_deg) * deg2rad else x_obs(nobs,nstorms) = (360.-lon_deg) * deg2rad @@ -3243,7 +3243,7 @@ subroutine slp_obs_init close(unit) - if(master) then + if(master) then write(*,*) 'TC vortex breeding: total storms=', nstorms if ( nstorms/=0 ) then do n=1,nstorms @@ -3272,7 +3272,7 @@ real function calday(year, month, day, hour, minute, sec) if( month /= 1 ) then do m=1, month-1 - if( m==2 .and. leap_year(year) ) then + if( m==2 .and. leap_year(year) ) then ds = ds + 29 else ds = ds + days(m) @@ -3300,7 +3300,7 @@ logical function leap_year(ny) ! ! No leap years prior to 0000 ! - parameter ( ny00 = 0000 ) ! The threshold for starting leap-year + parameter ( ny00 = 0000 ) ! The threshold for starting leap-year if( ny >= ny00 ) then if( mod(ny,100) == 0. .and. mod(ny,400) == 0. ) then @@ -3404,7 +3404,7 @@ end subroutine del2_uv subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) ! This routine is for filtering the physics tendency integer, intent(in):: kmd - integer, intent(in):: nmax ! must be no greater than 3 + integer, intent(in):: nmax ! must be no greater than 3 real, intent(in):: cd ! cd = K * da_min; 0 < K < 0.25 type(fv_grid_bounds_type), intent(IN) :: bd real, intent(inout):: qdt(is:ie,js:je,kmd) @@ -3420,12 +3420,12 @@ subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) real, pointer, dimension(:,:) :: rarea, area real, pointer, dimension(:,:) :: sina_u, sina_v real, pointer, dimension(:,:,:) :: sin_sg - + real, pointer, dimension(:,:) :: dx, dy, rdxc, rdyc real(kind=R_GRID), pointer :: da_min - logical, pointer :: nested, sw_corner, se_corner, nw_corner, ne_corner + logical, pointer :: bounded_domain, sw_corner, se_corner, nw_corner, ne_corner area => gridstruct%area rarea => gridstruct%rarea @@ -3441,12 +3441,12 @@ subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) da_min => gridstruct%da_min - nested => gridstruct%nested + bounded_domain => gridstruct%bounded_domain sw_corner => gridstruct%sw_corner se_corner => gridstruct%se_corner nw_corner => gridstruct%nw_corner ne_corner => gridstruct%ne_corner - + ntimes = min(3, nmax) damp = cd * da_min @@ -3467,13 +3467,13 @@ subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) nt = ntimes - n -!$OMP parallel do default(none) shared(is,ie,js,je,kmd,nt,dy,q,isd,jsd,npx,npy,nested, & +!$OMP parallel do default(none) shared(is,ie,js,je,kmd,nt,dy,q,isd,jsd,npx,npy,bounded_domain, & !$OMP bd,sw_corner,se_corner,nw_corner,ne_corner, & !$OMP sina_u,rdxc,sin_sg,dx,rdyc,sina_v,qdt,damp,rarea) & !$OMP private(fx, fy) do k=1,kmd - if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 1, nested, bd, & + if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 1, bounded_domain, bd, & sw_corner, se_corner, nw_corner, ne_corner) do j=js-nt,je+nt do i=is-nt,ie+1+nt @@ -3481,11 +3481,11 @@ subroutine del2_scalar(qdt, cd, kmd, nmax, bd, npx, npy, gridstruct, domain) enddo if (is == 1) fx(i,j) = dy(is,j)*(q(is-1,j,k)-q(is,j,k))*rdxc(is,j)* & 0.5*(sin_sg(1,j,1) + sin_sg(0,j,3)) - if (ie+1 == npx) fx(i,j) = dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* & + if (ie+1 == npx) fx(i,j) = dy(ie+1,j)*(q(ie,j,k)-q(ie+1,j,k))*rdxc(ie+1,j)* & 0.5*(sin_sg(npx,j,1) + sin_sg(npx-1,j,3)) enddo - if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 2, nested, bd, & + if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 2, bounded_domain, bd, & sw_corner, se_corner, nw_corner, ne_corner) do j=js-nt,je+1+nt if (j == 1 .OR. j == npy) then @@ -3559,7 +3559,7 @@ subroutine corr(a, b, co, area) call std(a, m_a, std_a, area) call std(b, m_b, std_b, area) -! Compute correlation: +! Compute correlation: co = 0. do j=js,je do i=is,ie @@ -3587,7 +3587,7 @@ subroutine std(a, mean, stdv, area) enddo enddo call mp_reduce_sum(mean) - mean = mean / total_area + mean = mean / total_area stdv = 0. do j=js,je diff --git a/tools/fv_restart.F90 b/tools/fv_restart.F90 index 0fda414e7..4fd8a9e2d 100644 --- a/tools/fv_restart.F90 +++ b/tools/fv_restart.F90 @@ -33,19 +33,21 @@ module fv_restart_mod use fv_arrays_mod, only: fv_atmos_type, fv_nest_type, fv_grid_bounds_type, R_GRID use fv_io_mod, only: fv_io_init, fv_io_read_restart, fv_io_write_restart, & remap_restart, fv_io_register_restart, fv_io_register_nudge_restart, & - fv_io_register_restart_BCs, fv_io_register_restart_BCs_NH, fv_io_write_BCs, fv_io_read_BCs + fv_io_register_restart_BCs, fv_io_write_BCs, fv_io_read_BCs use fv_grid_utils_mod, only: ptop_min, fill_ghost, g_sum, & make_eta_level, cubed_to_latlon, great_circle_dist use fv_diagnostics_mod, only: prt_maxmin use init_hydro_mod, only: p_var use mpp_domains_mod, only: mpp_update_domains, domain2d, DGRID_NE - use mpp_mod, only: mpp_chksum, stdout, mpp_error, FATAL, NOTE, get_unit, mpp_sum - use test_cases_mod, only: test_case, alpha, init_case, init_double_periodic, init_latlon - use fv_mp_mod, only: is_master, switch_current_Atm, mp_reduce_min, mp_reduce_max + use mpp_mod, only: mpp_chksum, stdout, mpp_error, FATAL, NOTE + use mpp_mod, only: get_unit, mpp_sum, mpp_broadcast + use mpp_mod, only: mpp_get_current_pelist, mpp_npes, mpp_set_current_pelist + use test_cases_mod, only: alpha, init_case, init_double_periodic!, init_latlon + use fv_mp_mod, only: is_master, mp_reduce_min, mp_reduce_max, corners_YDir => YDir, fill_corners, tile_fine, global_nest_domain use fv_surf_map_mod, only: sgh_g, oro_g use tracer_manager_mod, only: get_tracer_names use field_manager_mod, only: MODEL_ATMOS - use external_ic_mod, only: get_external_ic, get_cubed_sphere_terrain + use external_ic_mod, only: get_external_ic use fv_eta_mod, only: compute_dz_var, compute_dz_L32, set_hybrid_z use fv_surf_map_mod, only: del2_cubed_sphere, del4_cubed_sphere use boundary_mod, only: fill_nested_grid, nested_grid_BC, update_coarse_grid @@ -57,22 +59,18 @@ module fv_restart_mod use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST, mpp_get_C2F_index, WEST, SOUTH use mpp_domains_mod, only: mpp_global_field use fms_mod, only: file_exist + use fv_treat_da_inc_mod, only: read_da_inc implicit none private - public :: fv_restart_init, fv_restart_end, fv_restart, fv_write_restart, setup_nested_boundary_halo - public :: d2c_setup, d2a_setup + public :: fv_restart_init, fv_restart_end, fv_restart, fv_write_restart real(kind=R_GRID), parameter :: cnst_0p20=0.20d0 !--- private data type logical :: module_is_initialized = .FALSE. -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - -contains +contains !##################################################################### ! @@ -95,389 +93,428 @@ end subroutine fv_restart_init ! The fv core restart facility ! ! - subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_type, grids_on_this_pe) + subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_type, this_grid) type(domain2d), intent(inout) :: fv_domain type(fv_atmos_type), intent(inout) :: Atm(:) real, intent(in) :: dt_atmos integer, intent(out) :: seconds integer, intent(out) :: days logical, intent(inout) :: cold_start - integer, intent(in) :: grid_type - logical, intent(INOUT) :: grids_on_this_pe(:) - + integer, intent(in) :: grid_type, this_grid integer :: i, j, k, n, ntileMe, nt, iq - integer :: isc, iec, jsc, jec, npz, npz_rst, ncnst, ntprog, ntdiag - integer :: isd, ied, jsd, jed + integer :: isc, iec, jsc, jec, ncnst, ntprog, ntdiag + integer :: isd, ied, jsd, jed, npz integer isd_p, ied_p, jsd_p, jed_p, isc_p, iec_p, jsc_p, jec_p, isg, ieg, jsg,jeg, npx_p, npy_p real, allocatable :: g_dat(:,:,:) integer :: unit real, allocatable :: dz1(:) - real rgrav, f00, ztop, pertn + real rgrav, f00, ztop, pertn, ph logical :: hybrid - logical :: cold_start_grids(size(Atm)) character(len=128):: tname, errstring, fname, tracer_name character(len=120):: fname_ne, fname_sw character(len=3) :: gn - integer :: npts + integer :: npts, sphum + integer, allocatable :: pelist(:), smoothed_topo(:) real :: sumpertn + real :: zvir + + logical :: do_read_restart = .false. + logical :: do_read_restart_bc = .false. + integer, allocatable :: ideal_test_case(:), new_nest_topo(:) rgrav = 1. / grav if(.not.module_is_initialized) call mpp_error(FATAL, 'You must call fv_restart_init.') ntileMe = size(Atm(:)) + allocate(smoothed_topo(ntileme)) + smoothed_topo(:) = 0 + allocate(ideal_test_case(ntileme)) + ideal_test_case(:) = 0 + allocate(new_nest_topo(ntileme)) + new_nest_topo(:) = 0 - cold_start_grids(:) = cold_start do n = 1, ntileMe - if (is_master()) then - print*, 'FV_RESTART: ', n, cold_start_grids(n) - endif + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + isc = Atm(n)%bd%isc + iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc + jec = Atm(n)%bd%jec + ncnst = Atm(n)%ncnst + if( is_master() ) write(*,*) 'in fv_restart ncnst=', ncnst + npz = Atm(n)%npz + ntprog = size(Atm(n)%q,4) + ntdiag = size(Atm(n)%qdiag,4) +!!$ if (is_master()) then +!!$ print*, 'FV_RESTART: ', n, cold_start_grids(n) +!!$ endif + + !1. sort out restart, external_ic, and cold-start (idealized) if (Atm(n)%neststruct%nested) then - write(fname,'(A, I2.2, A)') 'INPUT/fv_core.res.nest', Atm(n)%grid_number, '.nc' + write(fname, '(A, I2.2, A)') 'INPUT/fv_core.res.nest', Atm(n)%grid_number, '.nc' write(fname_ne,'(A, I2.2, A)') 'INPUT/fv_BC_ne.res.nest', Atm(n)%grid_number, '.nc' write(fname_sw,'(A, I2.2, A)') 'INPUT/fv_BC_sw.res.nest', Atm(n)%grid_number, '.nc' - if (Atm(n)%flagstruct%external_ic) then - if (is_master()) print*, 'External IC set on grid', Atm(n)%grid_number, ', re-initializing grid' - cold_start_grids(n) = .true. - Atm(n)%flagstruct%warm_start = .false. !resetting warm_start flag to avoid FATAL error below - else - if (is_master()) print*, 'Searching for nested grid restart file ', trim(fname) - cold_start_grids(n) = .not. file_exist(fname, Atm(n)%domain) - Atm(n)%flagstruct%warm_start = file_exist(fname, Atm(n)%domain)!resetting warm_start flag to avoid FATAL error below + if (is_master()) print*, 'Searching for nested grid BC files ', trim(fname_ne), ' ', trim (fname_sw) + do_read_restart = file_exist(fname, Atm(n)%domain) + do_read_restart_bc = file_exist(fname_ne, Atm(n)%domain) .and. file_exist(fname_sw, Atm(n)%domain) + if (is_master()) then + print*, 'FV_RESTART: ', n, do_read_restart, do_read_restart_bc + if (.not. do_read_restart_bc) write(*,*) 'BC files not found, re-generating nested grid boundary conditions' endif + Atm(N)%neststruct%first_step = .not. do_read_restart_bc + else + fname='INPUT/fv_core.res.nc' + do_read_restart = file_exist('INPUT/fv_core.res.nc') .or. file_exist('INPUT/fv_core.res.tile1.nc') + if (is_master()) print*, 'FV_RESTART: ', n, do_read_restart, do_read_restart_bc endif - if (.not. grids_on_this_pe(n)) then - - !Even if this grid is not on this PE, if it has child grids we must send - !along the data that is needed. - !This is a VERY complicated bit of code that attempts to follow the entire decision tree - ! of the initialization without doing anything. This could very much be cleaned up. + !2. Register restarts + !--- call fv_io_register_restart to register restart field to be written out in fv_io_write_restart + if ( n==this_grid ) call fv_io_register_restart(Atm(n)%domain,Atm(n:n)) + !if (Atm(n)%neststruct%nested) call fv_io_register_restart_BCs(Atm(n)) !TODO put into fv_io_register_restart - if (Atm(n)%neststruct%nested) then - if (cold_start_grids(n)) then - if (Atm(n)%parent_grid%flagstruct%n_zs_filter > 0) call fill_nested_grid_topo_halo(Atm(n), .false.) - if (Atm(n)%flagstruct%nggps_ic) then - call fill_nested_grid_topo(Atm(n), .false.) - call fill_nested_grid_topo_halo(Atm(n), .false.) - call nested_grid_BC(Atm(n)%ps, Atm(n)%parent_grid%ps, Atm(n)%neststruct%nest_domain, & - Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy,Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.false.) - call setup_nested_boundary_halo(Atm(n),.false.) - else - call fill_nested_grid_topo(Atm(n), .false.) - call setup_nested_boundary_halo(Atm(n),.false.) - if ( Atm(n)%flagstruct%external_ic .and. grid_type < 4 ) call fill_nested_grid_data(Atm(n:n), .false.) - endif - else - if (is_master()) print*, 'Searching for nested grid BC files ', trim(fname_ne), ' ', trim (fname_sw) - - !!!! PROBLEM: file_exist doesn't know to look for fv_BC_ne.res.nest02.nc instead of fv_BC_ne.res.nc on coarse grid - if (file_exist(fname_ne, Atm(n)%domain) .and. file_exist(fname_sw, Atm(n)%domain)) then - else - if ( is_master() ) write(*,*) 'BC files not found, re-generating nested grid boundary conditions' - call fill_nested_grid_topo_halo(Atm(n), .false.) - call setup_nested_boundary_halo(Atm(n), .false.) - Atm(N)%neststruct%first_step = .true. - endif - end if - - if (.not. Atm(n)%flagstruct%hydrostatic .and. Atm(n)%flagstruct%make_nh .and. & - (.not. Atm(n)%flagstruct%nggps_ic .and. .not. Atm(n)%flagstruct%ecmwf_ic) ) then - call nested_grid_BC(Atm(n)%delz, Atm(n)%parent_grid%delz, Atm(n)%neststruct%nest_domain, & - Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.false.) - call nested_grid_BC(Atm(n)%w, Atm(n)%parent_grid%w, Atm(n)%neststruct%nest_domain, & - Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.false.) - endif + !3preN. Topography BCs for nest, including setup for blending + if (Atm(n)%neststruct%nested) then + if (.not. allocated(pelist)) then + allocate(pelist(0:mpp_npes()-1)) + call mpp_get_current_pelist(pelist) endif + call mpp_set_current_pelist() !global + call mpp_broadcast(Atm(n)%flagstruct%external_ic,Atm(n)%pelist(1)) + call mpp_sync() + call mpp_set_current_pelist(pelist) + if ( ( smoothed_topo(Atm(n)%parent_grid%grid_number) > 0 .or. & + .not. do_read_restart_bc .or. & + Atm(n)%flagstruct%external_ic ) ) then + new_nest_topo(n) = 1 + if (n==this_grid) then + + call fill_nested_grid_topo(Atm(n), n==this_grid) + call fill_nested_grid_topo_halo(Atm(n), n==this_grid) !TODO can we combine these? + call nested_grid_BC(Atm(n)%ps, Atm(n)%parent_grid%ps, global_nest_domain, & + Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & + Atm(n)%npx, Atm(n)%npy, Atm(n)%bd, 1, Atm(n)%npx-1, 1, Atm(n)%npy-1) - cycle + elseif (this_grid==Atm(n)%parent_grid%grid_number) then !this_grid is grid n's parent - endif - !This call still appears to be necessary to get isd, etc. correct - call switch_current_Atm(Atm(n)) + call fill_nested_grid_topo(Atm(n), n==this_grid) + call fill_nested_grid_topo_halo(Atm(n), n==this_grid) !TODO can we combine these? + !call mpp_get_data_domain( Atm(n)%parent_grid%domain, isd, ied, jsd, jed) + call nested_grid_BC(Atm(n)%parent_grid%ps, global_nest_domain, 0, 0, n-1) + !Atm(n)%ps, Atm(n)%parent_grid%ps, global_nest_domain, & + !Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & + !Atm(n)%npx, Atm(n)%npy, Atm(n)%bd, isd, ied, jsd, jed, proc_in=n==this_grid) - npz = Atm(1)%npz - npz_rst = Atm(1)%flagstruct%npz_rst - - !--- call fv_io_register_restart to register restart field to be written out in fv_io_write_restart - call fv_io_register_restart(Atm(n)%domain,Atm(n:n)) - if (Atm(n)%neststruct%nested) call fv_io_register_restart_BCs(Atm(n)) - if( .not.cold_start_grids(n) .and. (.not. Atm(n)%flagstruct%external_ic) ) then - - - if ( npz_rst /= 0 .and. npz_rst /= npz ) then -! Remap vertically the prognostic variables for the chosen vertical resolution - if( is_master() ) then - write(*,*) ' ' - write(*,*) '***** Important Note from FV core ********************' - write(*,*) 'Remapping dynamic IC from', npz_rst, 'levels to ', npz,'levels' - write(*,*) '***** End Note from FV core **************************' - write(*,*) ' ' endif - call remap_restart( Atm(n)%domain, Atm(n:n) ) - if( is_master() ) write(*,*) 'Done remapping dynamical IC' - else - if( is_master() ) write(*,*) 'Warm starting, calling fv_io_restart' - call fv_io_read_restart(Atm(n)%domain,Atm(n:n)) - endif - endif -!--------------------------------------------------------------------------------------------- -! Read, interpolate (latlon to cubed), then remap vertically with terrain adjustment if needed -!--------------------------------------------------------------------------------------------- - if (Atm(n)%neststruct%nested) then - if (cold_start_grids(n)) call fill_nested_grid_topo(Atm(n), .true.) - !if (cold_start_grids(n) .and. .not. Atm(n)%flagstruct%nggps_ic) call fill_nested_grid_topo(Atm(n), .true.) - if (cold_start_grids(n)) then - if (Atm(n)%parent_grid%flagstruct%n_zs_filter > 0 .or. Atm(n)%flagstruct%nggps_ic) call fill_nested_grid_topo_halo(Atm(n), .true.) - end if - if (Atm(n)%flagstruct%external_ic .and. Atm(n)%flagstruct%nggps_ic) then - !Fill nested grid halo with ps - call nested_grid_BC(Atm(n)%ps, Atm(n)%parent_grid%ps, Atm(n)%neststruct%nest_domain, & - Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy,Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.true.) + endif endif - endif - if ( Atm(n)%flagstruct%external_ic ) then - if( is_master() ) write(*,*) 'Calling get_external_ic' - call get_external_ic(Atm(n:n), Atm(n)%domain, cold_start_grids(n)) - if( is_master() ) write(*,*) 'IC generated from the specified external source' - endif - seconds = 0; days = 0 ! Restart needs to be modified to record seconds and days. + !This call still appears to be necessary to get isd, etc. correct + !call switch_current_Atm(Atm(n)) !TODO should NOT be necessary now that we manually set isd, etc. -! Notes by Jeff D. - ! This logic doesn't work very well. - ! Shouldn't have read for all tiles then loop over tiles + !--- call fv_io_register_restart to register restart field to be written out in fv_io_write_restart + !if (n==this_grid) call fv_io_register_restart(Atm(n)%domain,Atm(n:n)) + !if (Atm(n)%neststruct%nested) call fv_io_register_restart_BCs(Atm(n)) !TODO put into fv_io_register_restart - isd = Atm(n)%bd%isd - ied = Atm(n)%bd%ied - jsd = Atm(n)%bd%jsd - jed = Atm(n)%bd%jed - ncnst = Atm(n)%ncnst - if( is_master() ) write(*,*) 'in fv_restart ncnst=', ncnst - isc = Atm(n)%bd%isc; iec = Atm(n)%bd%iec; jsc = Atm(n)%bd%jsc; jec = Atm(n)%bd%jec + if (n==this_grid) then - ! Init model data - if(.not.cold_start_grids(n))then - Atm(N)%neststruct%first_step = .false. - if (Atm(n)%neststruct%nested) then - if ( npz_rst /= 0 .and. npz_rst /= npz ) then - call setup_nested_boundary_halo(Atm(n)) + !3. External_ic + if (Atm(n)%flagstruct%external_ic) then + if( is_master() ) write(*,*) 'Calling get_external_ic' + call get_external_ic(Atm(n), Atm(n)%domain, .not. do_read_restart) + if( is_master() ) write(*,*) 'IC generated from the specified external source' + + !4. Restart + elseif (do_read_restart) then + + if ( Atm(n)%flagstruct%npz_rst /= 0 .and. Atm(n)%flagstruct%npz_rst /= Atm(n)%npz ) then + !Remap vertically the prognostic variables for the chosen vertical resolution + if( is_master() ) then + write(*,*) ' ' + write(*,*) '***** Important Note from FV core ********************' + write(*,*) 'Remapping dynamic IC from', Atm(n)%flagstruct%npz_rst, 'levels to ', Atm(n)%npz,'levels' + write(*,*) '***** End Note from FV core **************************' + write(*,*) ' ' + endif + call remap_restart( Atm(n)%domain, Atm(n:n) ) + if( is_master() ) write(*,*) 'Done remapping dynamical IC' else - !If BC file is found, then read them in. Otherwise we need to initialize the BCs. - if (is_master()) print*, 'Searching for nested grid BC files ', trim(fname_ne), ' ', trim (fname_sw) - if (file_exist(fname_ne, Atm(n)%domain) .and. file_exist(fname_sw, Atm(n)%domain)) then - call fv_io_read_BCs(Atm(n)) - else - if ( is_master() ) write(*,*) 'BC files not found, re-generating nested grid boundary conditions' - call fill_nested_grid_topo_halo(Atm(n), .true.) - call setup_nested_boundary_halo(Atm(n), .true.) - Atm(N)%neststruct%first_step = .true. + if( is_master() ) write(*,*) 'Warm starting, calling fv_io_restart' + call fv_io_read_restart(Atm(n)%domain,Atm(n:n)) + !====== PJP added DA functionality ====== + if (Atm(n)%flagstruct%read_increment) then + ! print point in middle of domain for a sanity check + i = (Atm(n)%bd%isc + Atm(n)%bd%iec)/2 + j = (Atm(n)%bd%jsc + Atm(n)%bd%jec)/2 + k = Atm(n)%npz/2 + if( is_master() ) write(*,*) 'Calling read_da_inc',Atm(n)%pt(i,j,k) + call read_da_inc(Atm(n), Atm(n)%domain, Atm(n)%bd, Atm(n)%npz, Atm(n)%ncnst, & + Atm(n)%u, Atm(n)%v, Atm(n)%q, Atm(n)%delp, Atm(n)%pt, isd, jsd, ied, jed) + if( is_master() ) write(*,*) 'Back from read_da_inc',Atm(n)%pt(i,j,k) + endif + !====== end PJP added DA functionailty====== + endif + + seconds = 0; days = 0 ! Restart needs to be modified to record seconds and days. + + if (Atm(n)%neststruct%nested) then + if ( Atm(n)%flagstruct%npz_rst /= 0 .and. Atm(n)%flagstruct%npz_rst /= npz ) then + call mpp_error(FATAL, "Remap-restart not implemented for nests.") endif - !Following line to make sure u and v are consistent across processor subdomains + if (do_read_restart_BC) call fv_io_read_BCs(Atm(n)) call mpp_update_domains(Atm(n)%u, Atm(n)%v, Atm(n)%domain, gridtype=DGRID_NE, complete=.true.) endif - endif - if ( Atm(n)%flagstruct%mountain ) then -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! !!! Additional terrain filter -- should not be called repeatedly !!! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - if ( Atm(n)%flagstruct%n_zs_filter > 0 ) then - if ( Atm(n)%flagstruct%nord_zs_filter == 2 ) then - call del2_cubed_sphere(Atm(n)%npx, Atm(n)%npy, Atm(n)%phis, & - Atm(n)%gridstruct%area_64, Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & - Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & - Atm(n)%flagstruct%n_zs_filter, cnst_0p20*Atm(n)%gridstruct%da_min, & - .false., oro_g, Atm(n)%neststruct%nested, Atm(n)%domain, Atm(n)%bd) - if ( is_master() ) write(*,*) 'Warning !!! del-2 terrain filter has been applied ', & - Atm(n)%flagstruct%n_zs_filter, ' times' - else if( Atm(n)%flagstruct%nord_zs_filter == 4 ) then - call del4_cubed_sphere(Atm(n)%npx, Atm(n)%npy, Atm(n)%phis, Atm(n)%gridstruct%area_64, & - Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & - Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & - Atm(n)%flagstruct%n_zs_filter, .false., oro_g, Atm(n)%neststruct%nested, & - Atm(n)%domain, Atm(n)%bd) - if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & - Atm(n)%flagstruct%n_zs_filter, ' times' - endif - endif + if ( Atm(n)%flagstruct%mountain ) then + ! !!! Additional terrain filter -- should not be called repeatedly !!! + if ( Atm(n)%flagstruct%n_zs_filter > 0 ) then + if ( Atm(n)%flagstruct%nord_zs_filter == 2 ) then + !!! TODO: move this block into its own routine or CLEAN UP these subroutine calls + call del2_cubed_sphere(Atm(n)%npx, Atm(n)%npy, Atm(n)%phis, & + Atm(n)%gridstruct%area_64, Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & + Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & + Atm(n)%flagstruct%n_zs_filter, cnst_0p20*Atm(n)%gridstruct%da_min, & + .false., oro_g, Atm(n)%gridstruct%bounded_domain, Atm(n)%domain, Atm(n)%bd) + if ( is_master() ) write(*,*) 'Warning !!! del-2 terrain filter has been applied ', & + Atm(n)%flagstruct%n_zs_filter, ' times' + else if( Atm(n)%flagstruct%nord_zs_filter == 4 ) then + call del4_cubed_sphere(Atm(n)%npx, Atm(n)%npy, Atm(n)%phis, Atm(n)%gridstruct%area_64, & + Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, & + Atm(n)%gridstruct%dxc, Atm(n)%gridstruct%dyc, Atm(n)%gridstruct%sin_sg, & + Atm(n)%flagstruct%n_zs_filter, .false., oro_g, Atm(n)%gridstruct%bounded_domain, & + Atm(n)%domain, Atm(n)%bd) + if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & + Atm(n)%flagstruct%n_zs_filter, ' times' + endif + endif + call mpp_update_domains( Atm(n)%phis, Atm(n)%domain, complete=.true. ) + else + Atm(n)%phis = 0. + if( is_master() ) write(*,*) 'phis set to zero' + endif !mountain - call mpp_update_domains( Atm(n)%phis, Atm(n)%domain, complete=.true. ) - else - Atm(n)%phis = 0. - if( is_master() ) write(*,*) 'phis set to zero' - endif !mountain -#ifdef SW_DYNAMICS - Atm(n)%pt(:,:,:)=1. -#else - if ( .not.Atm(n)%flagstruct%hybrid_z ) then - if(Atm(n)%ptop/=Atm(n)%ak(1)) call mpp_error(FATAL,'FV restart: ptop not equal Atm(n)%ak(1)') - else - Atm(n)%ptop = Atm(n)%ak(1); Atm(n)%ks = 0 - endif - call p_var(npz, isc, iec, jsc, jec, Atm(n)%ptop, ptop_min, & - Atm(n)%delp, Atm(n)%delz, Atm(n)%pt, Atm(n)%ps, Atm(n)%pe, Atm(n)%peln, & - Atm(n)%pk, Atm(n)%pkz, kappa, Atm(n)%q, Atm(n)%ng, & - ncnst, Atm(n)%gridstruct%area_64, Atm(n)%flagstruct%dry_mass, & - Atm(n)%flagstruct%adjust_dry_mass, Atm(n)%flagstruct%mountain, & - Atm(n)%flagstruct%moist_phys, Atm(n)%flagstruct%hydrostatic, & - Atm(n)%flagstruct%nwat, Atm(n)%domain, Atm(n)%flagstruct%make_nh) + !5. Idealized test case + else -#endif - if ( grid_type < 7 .and. grid_type /= 4 ) then -! Fill big values in the non-existing corner regions: -! call fill_ghost(Atm(n)%phis, Atm(n)%npx, Atm(n)%npy, big_number) - do j=jsd,jed+1 - do i=isd,ied+1 - Atm(n)%gridstruct%fc(i,j) = 2.*omega*( -cos(Atm(n)%gridstruct%grid(i,j,1))*cos(Atm(n)%gridstruct%grid(i,j,2))*sin(alpha) + & - sin(Atm(n)%gridstruct%grid(i,j,2))*cos(alpha) ) - enddo - enddo - do j=jsd,jed - do i=isd,ied - Atm(n)%gridstruct%f0(i,j) = 2.*omega*( -cos(Atm(n)%gridstruct%agrid(i,j,1))*cos(Atm(n)%gridstruct%agrid(i,j,2))*sin(alpha) + & - sin(Atm(n)%gridstruct%agrid(i,j,2))*cos(alpha) ) - enddo - enddo - else - f00 = 2.*omega*sin(Atm(n)%flagstruct%deglat/180.*pi) - do j=jsd,jed+1 - do i=isd,ied+1 - Atm(n)%gridstruct%fc(i,j) = f00 - enddo - enddo - do j=jsd,jed - do i=isd,ied - Atm(n)%gridstruct%f0(i,j) = f00 - enddo - enddo - endif - else - if ( Atm(n)%flagstruct%warm_start ) then - call mpp_error(FATAL, 'FV restart files not found; set warm_start = .F. if cold_start is desired.') - endif -! Cold start - if ( Atm(n)%flagstruct%make_hybrid_z ) then - hybrid = .false. - else - hybrid = Atm(n)%flagstruct%hybrid_z - endif - if (grid_type < 4) then - if ( .not. Atm(n)%flagstruct%external_ic ) then - call init_case(Atm(n)%u,Atm(n)%v,Atm(n)%w,Atm(n)%pt,Atm(n)%delp,Atm(n)%q, & - Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & - Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & - Atm(n)%ak, Atm(n)%bk, Atm(n)%gridstruct, Atm(n)%flagstruct,& - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ng, & - ncnst, Atm(n)%flagstruct%nwat, & - Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, & - Atm(n)%flagstruct%dry_mass, & - Atm(n)%flagstruct%mountain, & - Atm(n)%flagstruct%moist_phys, Atm(n)%flagstruct%hydrostatic, & - hybrid, Atm(n)%delz, Atm(n)%ze0, & - Atm(n)%flagstruct%adiabatic, Atm(n)%ks, Atm(n)%neststruct%npx_global, & - Atm(n)%ptop, Atm(n)%domain, Atm(n)%tile, Atm(n)%bd) - endif - elseif (grid_type == 4) then - call init_double_periodic(Atm(n)%u,Atm(n)%v,Atm(n)%w,Atm(n)%pt, & - Atm(n)%delp,Atm(n)%q,Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, & - Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & - Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & - Atm(n)%ak, Atm(n)%bk, & - Atm(n)%gridstruct, Atm(n)%flagstruct, & - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ng, & - ncnst, Atm(n)%flagstruct%nwat, & - Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, & - Atm(n)%flagstruct%dry_mass, Atm(n)%flagstruct%mountain, & - Atm(n)%flagstruct%moist_phys, Atm(n)%flagstruct%hydrostatic, & - hybrid, Atm(n)%delz, Atm(n)%ze0, Atm(n)%ks, Atm(n)%ptop, & - Atm(n)%domain, Atm(n)%tile, Atm(n)%bd) - if( is_master() ) write(*,*) 'Doubly Periodic IC generated' - elseif (grid_type == 5 .or. grid_type == 6) then - call init_latlon(Atm(n)%u,Atm(n)%v,Atm(n)%pt,Atm(n)%delp,Atm(n)%q,& - Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, & - Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & - Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & - Atm(n)%ak, Atm(n)%bk, Atm(n)%gridstruct, & - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ng, ncnst, & - Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, & - Atm(n)%flagstruct%dry_mass, & - Atm(n)%flagstruct%mountain, & - Atm(n)%flagstruct%moist_phys, hybrid, Atm(n)%delz, & - Atm(n)%ze0, Atm(n)%domain, Atm(n)%tile) - endif + ideal_test_case(n) = 1 + + if ( Atm(n)%flagstruct%make_hybrid_z ) then + hybrid = .false. + else + hybrid = Atm(n)%flagstruct%hybrid_z + endif + if (grid_type < 4) then + if ( .not. Atm(n)%flagstruct%external_ic ) then + call init_case(Atm(n)%u,Atm(n)%v,Atm(n)%w,Atm(n)%pt,Atm(n)%delp,Atm(n)%q, & + Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & + Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & + Atm(n)%ak, Atm(n)%bk, Atm(n)%gridstruct, Atm(n)%flagstruct,& + Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ng, & + ncnst, Atm(n)%flagstruct%nwat, & + Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, & + Atm(n)%flagstruct%dry_mass, & + Atm(n)%flagstruct%mountain, & + Atm(n)%flagstruct%moist_phys, Atm(n)%flagstruct%hydrostatic, & + hybrid, Atm(n)%delz, Atm(n)%ze0, & + Atm(n)%flagstruct%adiabatic, Atm(n)%ks, Atm(n)%neststruct%npx_global, & + Atm(n)%ptop, Atm(n)%domain, Atm(n)%tile_of_mosaic, Atm(n)%bd) + endif + elseif (grid_type == 4) then + call init_double_periodic(Atm(n)%u,Atm(n)%v,Atm(n)%w,Atm(n)%pt, & + Atm(n)%delp,Atm(n)%q,Atm(n)%phis, Atm(n)%ps,Atm(n)%pe, & + Atm(n)%peln,Atm(n)%pk,Atm(n)%pkz, & + Atm(n)%uc,Atm(n)%vc, Atm(n)%ua,Atm(n)%va, & + Atm(n)%ak, Atm(n)%bk, & + Atm(n)%gridstruct, Atm(n)%flagstruct, & + Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%ng, & + ncnst, Atm(n)%flagstruct%nwat, & + Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, & + Atm(n)%flagstruct%dry_mass, Atm(n)%flagstruct%mountain, & + Atm(n)%flagstruct%moist_phys, Atm(n)%flagstruct%hydrostatic, & + hybrid, Atm(n)%delz, Atm(n)%ze0, Atm(n)%ks, Atm(n)%ptop, & + Atm(n)%domain, Atm(n)%tile_of_mosaic, Atm(n)%bd) + if( is_master() ) write(*,*) 'Doubly Periodic IC generated' + elseif (grid_type == 5 .or. grid_type == 6) then + call mpp_error(FATAL, "Idealized test cases for grid_type == 5,6 (global lat-lon) grid not supported") + endif - !Turn this off on the nested grid if you are just interpolating topography from the coarse grid! - if ( Atm(n)%flagstruct%fv_land ) then - do j=jsc,jec - do i=isc,iec - Atm(n)%sgh(i,j) = sgh_g(i,j) - Atm(n)%oro(i,j) = oro_g(i,j) + !Turn this off on the nested grid if you are just interpolating topography from the coarse grid! + !These parameters are needed in LM3/LM4, and are communicated through restart files + if ( Atm(n)%flagstruct%fv_land ) then + do j=jsc,jec + do i=isc,iec + Atm(n)%sgh(i,j) = sgh_g(i,j) + Atm(n)%oro(i,j) = oro_g(i,j) + enddo enddo - enddo - endif + endif + endif !external_ic vs. restart vs. idealized - !Set up nested grids + + endif !n==this_grid + + + !!!! NOT NEEDED?? !Currently even though we do fill in the nested-grid IC from ! init_case or external_ic we appear to overwrite it using ! coarse-grid data - !if (Atm(n)%neststruct%nested) then - ! Only fill nested-grid data if external_ic is called for the cubed-sphere grid +!!$ if (Atm(n)%neststruct%nested) then +!!$ if (.not. Atm(n)%flagstruct%external_ic .and. .not. Atm(n)%flagstruct%nggps_ic .and. grid_type < 4 ) then +!!$ call fill_nested_grid_data(Atm(n:n)) +!!$ endif +!!$ end if + +! endif !end cold_start check + + !5n. Nesting setup (part I) + + !Broadcast data for nesting + if (ntileMe > 1) then + if (.not. allocated(pelist)) then + allocate(pelist(0:mpp_npes()-1)) + call mpp_get_current_pelist(pelist) + endif + + call mpp_set_current_pelist()!global + !for remap BCs + call mpp_broadcast(Atm(n)%ptop,Atm(n)%pelist(1)) + call mpp_broadcast(Atm(n)%ak,Atm(n)%npz+1,Atm(n)%pelist(1)) + call mpp_broadcast(Atm(n)%bk,Atm(n)%npz+1,Atm(n)%pelist(1)) + !smoothed_topo + call mpp_broadcast(smoothed_topo(n),Atm(n)%pelist(1)) + + call mpp_sync() + call mpp_set_current_pelist(pelist) + + if (Atm(n)%neststruct%nested) then - call setup_nested_boundary_halo(Atm(n), .true.) - if (Atm(n)%flagstruct%external_ic .and. .not. Atm(n)%flagstruct%nggps_ic .and. grid_type < 4 ) then - call fill_nested_grid_data(Atm(n:n)) + Atm(n)%neststruct%do_remap_BC(ntileMe) = .false. + + if (Atm(n)%npz /= Atm(n)%parent_grid%npz) then + Atm(n)%neststruct%do_remap_BC(n) = .true. + else + do k=1,Atm(n)%npz+1 + if (Atm(n)%ak(k) /= Atm(n)%parent_grid%ak(k)) then + Atm(n)%neststruct%do_remap_BC(n) = .true. + exit + endif + if (Atm(n)%bk(k) /= Atm(n)%parent_grid%bk(k)) then + Atm(n)%neststruct%do_remap_BC(n) = .true. + exit + endif + enddo endif - end if - endif !end cold_start check + Atm(n)%parent_grid%neststruct%do_remap_BC(n) = Atm(n)%neststruct%do_remap_BC(n) + if (is_master() .and. n==this_grid) then + if (Atm(n)%neststruct%do_remap_BC(n)) then + print*, ' Remapping BCs ENABLED on grid', n + else + print*, ' Remapping BCs DISABLED (not necessary) on grid', n + endif + write(*,'(A, I3, A, F8.2, A)') ' Nested grid ', n, ', ptop = ', Atm(n)%ak(1), ' Pa' + write(*,'(A, I3, A, F8.2, A)') ' Parent grid ', n, ', ptop = ', Atm(n)%parent_grid%ak(1), ' Pa' + if (Atm(n)%ak(1) < Atm(n)%parent_Grid%ak(1)) then + print*, ' WARNING nested grid top above parent grid top. May have problems with remapping BCs.' + endif + endif + endif - if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. Atm(n)%flagstruct%make_nh .and. Atm(n)%neststruct%nested) then - call nested_grid_BC(Atm(n)%delz, Atm(n)%parent_grid%delz, Atm(n)%neststruct%nest_domain, & - Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.true.) - call nested_grid_BC(Atm(n)%w, Atm(n)%parent_grid%w, Atm(n)%neststruct%nest_domain, & - Atm(n)%neststruct%ind_h, Atm(n)%neststruct%wt_h, 0, 0, & - Atm(n)%npx, Atm(n)%npy, npz, Atm(n)%bd, isg, ieg, jsg, jeg, proc_in=.true.) - call fv_io_register_restart_BCs_NH(Atm(n)) !needed to register nested-grid BCs not registered earlier endif - end do + end do !break cycling loop to finish nesting setup do n = ntileMe,1,-1 - if (Atm(n)%neststruct%nested .and. Atm(n)%flagstruct%external_ic .and. & - Atm(n)%flagstruct%grid_type < 4 .and. cold_start_grids(n)) then - call fill_nested_grid_data_end(Atm(n), grids_on_this_pe(n)) + if (new_nest_topo(n)) then + call twoway_topo_update(Atm(n), n==this_grid) endif end do + !6. Data Setup do n = 1, ntileMe - if (.not. grids_on_this_pe(n)) cycle + + if (n/=this_grid) cycle isd = Atm(n)%bd%isd ied = Atm(n)%bd%ied jsd = Atm(n)%bd%jsd jed = Atm(n)%bd%jed + isc = Atm(n)%bd%isc + iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc + jec = Atm(n)%bd%jec ncnst = Atm(n)%ncnst + if( is_master() ) write(*,*) 'in fv_restart ncnst=', ncnst + npz = Atm(n)%npz ntprog = size(Atm(n)%q,4) ntdiag = size(Atm(n)%qdiag,4) - isc = Atm(n)%bd%isc; iec = Atm(n)%bd%iec; jsc = Atm(n)%bd%jsc; jec = Atm(n)%bd%jec + + + if (.not. ideal_test_case(n)) then +#ifdef SW_DYNAMICS + Atm(n)%pt(:,:,:)=1. +#else + if ( .not.Atm(n)%flagstruct%hybrid_z ) then + if(Atm(n)%ptop/=Atm(n)%ak(1)) call mpp_error(FATAL,'FV restart: ptop not equal Atm(n)%ak(1)') + else + Atm(n)%ptop = Atm(n)%ak(1); Atm(n)%ks = 0 + endif + call p_var(npz, isc, iec, jsc, jec, Atm(n)%ptop, ptop_min, & + Atm(n)%delp, Atm(n)%delz, Atm(n)%pt, Atm(n)%ps, Atm(n)%pe, Atm(n)%peln, & + Atm(n)%pk, Atm(n)%pkz, kappa, Atm(n)%q, Atm(n)%ng, & + ncnst, Atm(n)%gridstruct%area_64, Atm(n)%flagstruct%dry_mass, & + Atm(n)%flagstruct%adjust_dry_mass, Atm(n)%flagstruct%mountain, & + Atm(n)%flagstruct%moist_phys, Atm(n)%flagstruct%hydrostatic, & + Atm(n)%flagstruct%nwat, Atm(n)%domain, Atm(1)%flagstruct%adiabatic, Atm(n)%flagstruct%make_nh) +#endif + if ( grid_type < 7 .and. grid_type /= 4 ) then + ! Fill big values in the non-existing corner regions: + ! call fill_ghost(Atm(n)%phis, Atm(n)%npx, Atm(n)%npy, big_number) + do j=jsd,jed+1 + do i=isd,ied+1 + Atm(n)%gridstruct%fc(i,j) = 2.*omega*( -cos(Atm(n)%gridstruct%grid(i,j,1))*cos(Atm(n)%gridstruct%grid(i,j,2))*sin(alpha) + & + sin(Atm(n)%gridstruct%grid(i,j,2))*cos(alpha) ) + enddo + enddo + do j=jsd,jed + do i=isd,ied + Atm(n)%gridstruct%f0(i,j) = 2.*omega*( -cos(Atm(n)%gridstruct%agrid(i,j,1))*cos(Atm(n)%gridstruct%agrid(i,j,2))*sin(alpha) + & + sin(Atm(n)%gridstruct%agrid(i,j,2))*cos(alpha) ) + enddo + enddo + else + f00 = 2.*omega*sin(Atm(n)%flagstruct%deglat/180.*pi) + do j=jsd,jed+1 + do i=isd,ied+1 + Atm(n)%gridstruct%fc(i,j) = f00 + enddo + enddo + do j=jsd,jed + do i=isd,ied + Atm(n)%gridstruct%f0(i,j) = f00 + enddo + enddo + endif + call mpp_update_domains( Atm(n)%gridstruct%f0, Atm(n)%domain ) + if ( Atm(n)%gridstruct%cubed_sphere .and. (.not. Atm(n)%gridstruct%bounded_domain))then + call fill_corners(Atm(n)%gridstruct%f0, Atm(n)%npx, Atm(n)%npy, Corners_YDir) + endif + endif !--------------------------------------------------------------------------------------------- @@ -524,6 +561,17 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ call mpp_error(NOTE, errstring) endif + if (Atm(n)%flagstruct%fv_sg_adj > 0 .and. Atm(n)%flagstruct%sg_cutoff > 0) then + !Choose n_sponge from first reference level above sg_cutoff + do k=1,npz + ph = Atm(n)%ak(k+1) + Atm(n)%bk(k+1)*Atm(n)%flagstruct%p_ref + if (ph > Atm(n)%flagstruct%sg_cutoff) exit + enddo + Atm(n)%flagstruct%n_sponge = min(k,npz) + write(errstring,'(A, I3, A)') ' Override n_sponge: applying 2dz filter to ', k , ' levels' + call mpp_error(NOTE, errstring) + endif + if (Atm(n)%grid_number > 1) then write(gn,'(A2, I1)') " g", Atm(n)%grid_number else @@ -531,6 +579,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ end if unit = stdout() + !!!NOTE: Checksums not yet working in stand-alone regional model!! write(unit,*) write(unit,*) 'fv_restart u ', trim(gn),' = ', mpp_chksum(Atm(n)%u(isc:iec,jsc:jec,:)) write(unit,*) 'fv_restart v ', trim(gn),' = ', mpp_chksum(Atm(n)%v(isc:iec,jsc:jec,:)) @@ -551,6 +600,7 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ call get_tracer_names(MODEL_ATMOS, iq, tracer_name) write(unit,*) 'fv_restart '//trim(tracer_name)//' = ', mpp_chksum(Atm(n)%q(isc:iec,jsc:jec,:,iq)) enddo + !--------------- ! Check Min/Max: !--------------- @@ -571,12 +621,17 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ if ( (.not.Atm(n)%flagstruct%hydrostatic) .and. Atm(n)%flagstruct%make_nh ) then call mpp_error(NOTE, " Initializing w to 0") Atm(n)%w = 0. + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') if ( .not.Atm(n)%flagstruct%hybrid_z ) then - call mpp_error(NOTE, " Initializing delz from hydrostatic state") + if (Atm(n)%flagstruct%adiabatic .or. sphum < 0) then + zvir = 0. + else + zvir = rvgas/rdgas - 1. + endif do k=1,npz do j=jsc,jec do i=isc,iec - Atm(n)%delz(i,j,k) = (rdgas*rgrav)*Atm(n)%pt(i,j,k)*(Atm(n)%peln(i,k,j)-Atm(n)%peln(i,k+1,j)) + Atm(n)%delz(i,j,k) = (rdgas*rgrav)*Atm(n)%pt(i,j,k)*(1.+zvir*Atm(n)%q(i,j,k,sphum))*(Atm(n)%peln(i,k,j)-Atm(n)%peln(i,k+1,j)) enddo enddo enddo @@ -594,9 +649,9 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ if ( .not. Atm(n)%flagstruct%srf_init ) then call cubed_to_latlon(Atm(n)%u, Atm(n)%v, Atm(n)%ua, Atm(n)%va, & Atm(n)%gridstruct, & - Atm(n)%npx, Atm(n)%npy, npz, 1, & + Atm(n)%npx, Atm(n)%npy, npz, 1, & Atm(n)%gridstruct%grid_type, Atm(n)%domain, & - Atm(n)%gridstruct%nested, Atm(n)%flagstruct%c2l_ord, Atm(n)%bd) + Atm(n)%gridstruct%bounded_domain, Atm(n)%flagstruct%c2l_ord, Atm(n)%bd) do j=jsc,jec do i=isc,iec Atm(n)%u_srf(i,j) = Atm(n)%ua(i,j,npz) @@ -611,172 +666,30 @@ subroutine fv_restart(fv_domain, Atm, dt_atmos, seconds, days, cold_start, grid_ end subroutine fv_restart ! NAME="fv_restart" - subroutine setup_nested_boundary_halo(Atm, proc_in) - - !This routine is now taking the "easy way out" with regards - ! to pt (virtual potential temperature), q_con, and cappa; - ! their halo values are now set up when the BCs are set up - ! in fv_dynamics - - type(fv_atmos_type), intent(INOUT) :: Atm - logical, INTENT(IN), OPTIONAL :: proc_in - real, allocatable :: g_dat(:,:,:), g_dat2(:,:,:) - real, allocatable :: pt_coarse(:,:,:) - integer i,j,k,nq, sphum, ncnst, istart, iend, npz, nwat - integer isc, iec, jsc, jec, isd, ied, jsd, jed, is, ie, js, je - integer isd_p, ied_p, jsd_p, jed_p, isc_p, iec_p, jsc_p, jec_p, isg, ieg, jsg,jeg, npx_p, npy_p - real zvir - logical process - integer :: liq_wat, ice_wat, rainwat, snowwat, graupel - real :: qv, dp1, q_liq, q_sol, q_con, cvm, cappa, dp, pt, dz, pkz, rdg - - if (PRESENT(proc_in)) then - process = proc_in - else - process = .true. - endif - - isd = Atm%bd%isd - ied = Atm%bd%ied - jsd = Atm%bd%jsd - jed = Atm%bd%jed - ncnst = Atm%ncnst - isc = Atm%bd%isc; iec = Atm%bd%iec; jsc = Atm%bd%jsc; jec = Atm%bd%jec - is = Atm%bd%is ; ie = Atm%bd%ie ; js = Atm%bd%js ; je = Atm%bd%je - npz = Atm%npz - nwat = Atm%flagstruct%nwat - - if (nwat>=3 ) then - liq_wat = get_tracer_index (MODEL_ATMOS, 'liq_wat') - ice_wat = get_tracer_index (MODEL_ATMOS, 'ice_wat') - endif - if ( nwat==6 ) then - rainwat = get_tracer_index (MODEL_ATMOS, 'rainwat') - snowwat = get_tracer_index (MODEL_ATMOS, 'snowwat') - graupel = get_tracer_index (MODEL_ATMOS, 'graupel') - endif - - call mpp_get_data_domain( Atm%parent_grid%domain, & - isd_p, ied_p, jsd_p, jed_p ) - call mpp_get_compute_domain( Atm%parent_grid%domain, & - isc_p, iec_p, jsc_p, jec_p ) - call mpp_get_global_domain( Atm%parent_grid%domain, & - isg, ieg, jsg, jeg, xsize=npx_p, ysize=npy_p) - - call nested_grid_BC(Atm%delp, Atm%parent_grid%delp, Atm%neststruct%nest_domain, & - Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - do nq=1,ncnst - call nested_grid_BC(Atm%q(:,:,:,nq), & - Atm%parent_grid%q(:,:,:,nq), Atm%neststruct%nest_domain, & - Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - end do - - if (process) then - if (is_master()) print*, 'FILLING NESTED GRID HALO' - else - if (is_master()) print*, 'SENDING DATA TO FILL NESTED GRID HALO' - endif - - - !Filling phis? - !In idealized test cases, where the topography is EXACTLY known (ex case 13), - !interpolating the topography yields a much worse result. In comparison in - !real topography cases little difference is seen. - - !This is probably because the halo phis, which is used to compute - !geopotential height (gz, gh), only affects the interior by being - !used to compute corner gz in a2b_ord[24]. We might suppose this - !computation would be more accurate when using values of phis which - !are more consistent with those on the interior (ie the exactly-known - !values) than the crude values given through linear interpolation. - - !For real topography cases, or in cases in which the coarse-grid topo - ! is smoothed, we fill the boundary halo with the coarse-grid topo. - -#ifndef SW_DYNAMICS - !pt --- actually temperature - - call nested_grid_BC(Atm%pt, Atm%parent_grid%pt, Atm%neststruct%nest_domain, & - Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - - if (.not. Atm%flagstruct%hydrostatic) then - - !w - call nested_grid_BC(Atm%w(:,:,:), & - Atm%parent_grid%w(:,:,:), & - Atm%neststruct%nest_domain, Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - - - !delz - call nested_grid_BC(Atm%delz(:,:,:), & - Atm%parent_grid%delz(:,:,:), & - Atm%neststruct%nest_domain, Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - - end if - -#endif - - if (Atm%neststruct%child_proc) then - call nested_grid_BC(Atm%u, Atm%parent_grid%u(:,:,:), & - Atm%neststruct%nest_domain, Atm%neststruct%ind_u, Atm%neststruct%wt_u, 0, 1, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - call nested_grid_BC(Atm%v, Atm%parent_grid%v(:,:,:), & - Atm%neststruct%nest_domain, Atm%neststruct%ind_v, Atm%neststruct%wt_v, 1, 0, & - Atm%npx, Atm%npy, npz, Atm%bd, isg, ieg, jsg, jeg, proc_in=process) - else - call nested_grid_BC(Atm%parent_grid%u(:,:,:), & - Atm%neststruct%nest_domain, 0, 1) - call nested_grid_BC(Atm%parent_grid%v(:,:,:), & - Atm%neststruct%nest_domain, 1, 0) - endif - - - if (process) then -!!$#ifdef SW_DYNAMICS -!!$ !ps: first level only -!!$ !This is only valid for shallow-water simulations -!!$ do j=jsd,jed -!!$ do i=isd,ied -!!$ -!!$ Atm%ps(i,j) = Atm%delp(i,j,1)/grav -!!$ -!!$ end do -!!$ end do -!!$#endif - call mpp_update_domains(Atm%u, Atm%v, Atm%domain, gridtype=DGRID_NE) - call mpp_update_domains(Atm%w, Atm%domain, complete=.true.) ! needs an update-domain for rayleigh damping - endif - - call mpp_sync_self() - - end subroutine setup_nested_boundary_halo subroutine fill_nested_grid_topo_halo(Atm, proc_in) type(fv_atmos_type), intent(INOUT) :: Atm logical, intent(IN), OPTIONAL :: proc_in - integer :: isg, ieg, jsg, jeg + integer :: isd, ied, jsd, jed if (.not. Atm%neststruct%nested) return - call mpp_get_global_domain( Atm%parent_grid%domain, & - isg, ieg, jsg, jeg) + call mpp_get_data_domain( Atm%parent_grid%domain, & + isd, ied, jsd, jed) + !This is 2D and doesn't need remapping if (is_master()) print*, ' FILLING NESTED GRID HALO WITH INTERPOLATED TERRAIN' - call nested_grid_BC(Atm%phis, Atm%parent_grid%phis, Atm%neststruct%nest_domain, & + call nested_grid_BC(Atm%phis, Atm%parent_grid%phis, global_nest_domain, & Atm%neststruct%ind_h, Atm%neststruct%wt_h, 0, 0, & - Atm%npx, Atm%npy, Atm%bd, isg, ieg, jsg, jeg, proc_in=proc_in) - + Atm%npx, Atm%npy, Atm%bd, isd, ied, jsd, jed, proc_in=proc_in, nest_level=Atm%grid_number-1) + end subroutine fill_nested_grid_topo_halo !!! We call this routine to fill the nested grid with topo so that we can do the boundary smoothing. !!! Interior topography is then over-written in get_external_ic. - subroutine fill_nested_grid_topo(Atm, proc_in) +!!! Input grid is the nest; use Atm%parent_grid% to reference parent + subroutine fill_nested_grid_topo(Atm, proc_in) type(fv_atmos_type), intent(INOUT) :: Atm logical, intent(IN), OPTIONAL :: proc_in @@ -809,12 +722,14 @@ subroutine fill_nested_grid_topo(Atm, proc_in) if (is_master() .and. .not. Atm%flagstruct%external_ic ) print*, ' FILLING NESTED GRID INTERIOR WITH INTERPOLATED TERRAIN' - sending_proc = Atm%parent_grid%pelist(1) + (Atm%neststruct%parent_tile-1)*Atm%parent_grid%npes_per_tile - if (Atm%neststruct%parent_proc .and. Atm%neststruct%parent_tile == Atm%parent_grid%tile) then + sending_proc = (Atm%parent_grid%pelist(1)) + & + (Atm%neststruct%parent_tile-tile_fine(Atm%parent_grid%grid_number)+Atm%parent_grid%flagstruct%ntiles-1)*Atm%parent_grid%npes_per_tile + if (Atm%neststruct%parent_tile == Atm%parent_grid%global_tile) then + !if (Atm%neststruct%parent_proc .and. Atm%neststruct%parent_tile == Atm%parent_grid%global_tile) then call mpp_global_field( & Atm%parent_grid%domain, & Atm%parent_grid%phis(isd_p:ied_p,jsd_p:jed_p), g_dat(isg:,jsg:,1), position=CENTER) - if (mpp_pe() == sending_proc) then + if (mpp_pe() == sending_proc) then do p=1,size(Atm%pelist) call mpp_send(g_dat,size(g_dat),Atm%pelist(p)) enddo @@ -837,6 +752,9 @@ subroutine fill_nested_grid_topo(Atm, proc_in) end subroutine fill_nested_grid_topo + !This will still probably be needed for moving nests + !NOTE: this has NOT been maintained and so %global_tile is now meaningless if not referring to data on the current PE + ! needs to be re-coded to follow method in fill_nested_grid_Topo subroutine fill_nested_grid_data(Atm, proc_in) type(fv_atmos_type), intent(INOUT) :: Atm(:) !Only intended to be one element; needed for cubed_sphere_terrain @@ -852,6 +770,8 @@ subroutine fill_nested_grid_data(Atm, proc_in) integer :: p, sending_proc, gid logical process + call mpp_error(FATAL, " FILL_NESTED_GRID_DATA not yet updated for remap BCs") + if (present(proc_in)) then process = proc_in else @@ -864,8 +784,8 @@ subroutine fill_nested_grid_data(Atm, proc_in) jed = Atm(1)%bd%jed ncnst = Atm(1)%ncnst isc = Atm(1)%bd%isc; iec = Atm(1)%bd%iec; jsc = Atm(1)%bd%jsc; jec = Atm(1)%bd%jec - npz = Atm(1)%npz - + npz = Atm(1)%npz + gid = mpp_pe() sending_proc = Atm(1)%parent_grid%pelist(1) + (Atm(1)%neststruct%parent_tile-1)*Atm(1)%parent_grid%npes_per_tile @@ -877,8 +797,8 @@ subroutine fill_nested_grid_data(Atm, proc_in) call mpp_get_global_domain( Atm(1)%parent_grid%domain, & isg, ieg, jsg, jeg, xsize=npx_p, ysize=npy_p) - if (process) then - + if (process) then + call mpp_error(NOTE, "FILLING NESTED GRID DATA") else @@ -895,7 +815,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) !Call mpp_global_field on the procs that have the required data. !Then broadcast from the head PE to the receiving PEs - if (Atm(1)%neststruct%parent_proc .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (Atm(1)%neststruct%parent_proc .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%delp(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) @@ -921,7 +841,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%q(isd_p:ied_p,jsd_p:jed_p,:,nq), g_dat, position=CENTER) @@ -944,7 +864,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) end do - !Note that we do NOT fill in phis (surface geopotential), which should + !Note that we do NOT fill in phis (surface geopotential), which should !be computed exactly instead of being interpolated. @@ -953,7 +873,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%pt(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) @@ -988,7 +908,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%pkz(isc_p:iec_p,jsc_p:jec_p,:), g_dat, position=CENTER) @@ -1005,7 +925,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call mpp_sync_self call timing_off('COMM_TOTAL') - if (process) then + if (process) then allocate(pt_coarse(isd:ied,jsd:jed,npz)) call fill_nested_grid(pt_coarse, g_dat, & Atm(1)%neststruct%ind_h, Atm(1)%neststruct%wt_h, & @@ -1082,7 +1002,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) !delz call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%delz(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) @@ -1107,7 +1027,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%w(isd_p:ied_p,jsd_p:jed_p,:), g_dat, position=CENTER) @@ -1132,7 +1052,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) end if #endif - deallocate(g_dat) + deallocate(g_dat) !u @@ -1141,7 +1061,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%u(isd_p:ied_p,jsd_p:jed_p+1,:), g_dat, position=NORTH) @@ -1171,7 +1091,7 @@ subroutine fill_nested_grid_data(Atm, proc_in) call timing_on('COMM_TOTAL') - if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%tile) then + if (ANY(Atm(1)%parent_grid%pelist == gid) .and. Atm(1)%neststruct%parent_tile == Atm(1)%parent_grid%global_tile) then call mpp_global_field( & Atm(1)%parent_grid%domain, & Atm(1)%parent_grid%v(isd_p:ied_p+1,jsd_p:jed_p,:), g_dat, position=EAST) @@ -1196,9 +1116,10 @@ subroutine fill_nested_grid_data(Atm, proc_in) end subroutine fill_nested_grid_data - subroutine fill_nested_grid_data_end(Atm, proc_in) + !This routine actually sets up the coarse-grid TOPOGRAPHY. + subroutine twoway_topo_update(Atm, proc_in) - type(fv_atmos_type), intent(INOUT) :: Atm + type(fv_atmos_type), intent(INOUT) :: Atm logical, intent(IN), OPTIONAL :: proc_in real, allocatable :: g_dat(:,:,:), pt_coarse(:,:,:) integer :: i,j,k,nq, sphum, ncnst, istart, iend, npz @@ -1223,17 +1144,17 @@ subroutine fill_nested_grid_data_end(Atm, proc_in) jed = Atm%bd%jed ncnst = Atm%ncnst isc = Atm%bd%isc; iec = Atm%bd%iec; jsc = Atm%bd%jsc; jec = Atm%bd%jec - npz = Atm%npz - - isd_p = Atm%parent_grid%bd%isd - ied_p = Atm%parent_grid%bd%ied - jsd_p = Atm%parent_grid%bd%jsd - jed_p = Atm%parent_grid%bd%jed - isc_p = Atm%parent_grid%bd%isc - iec_p = Atm%parent_grid%bd%iec - jsc_p = Atm%parent_grid%bd%jsc - jec_p = Atm%parent_grid%bd%jec - sending_proc = Atm%parent_grid%pelist(1) + (Atm%neststruct%parent_tile-1)*Atm%parent_grid%npes_per_tile + npz = Atm%npz + + isd_p = Atm%parent_grid%bd%isd + ied_p = Atm%parent_grid%bd%ied + jsd_p = Atm%parent_grid%bd%jsd + jed_p = Atm%parent_grid%bd%jed + isc_p = Atm%parent_grid%bd%isc + iec_p = Atm%parent_grid%bd%iec + jsc_p = Atm%parent_grid%bd%jsc + jec_p = Atm%parent_grid%bd%jec + sending_proc = Atm%parent_grid%pelist(1) + (Atm%neststruct%parent_tile-1)*Atm%parent_grid%npes_per_tile call mpp_get_global_domain( Atm%parent_grid%domain, & isg, ieg, jsg, jeg, xsize=npx_p, ysize=npy_p) @@ -1245,14 +1166,13 @@ subroutine fill_nested_grid_data_end(Atm, proc_in) if (Atm%neststruct%twowaynest) then if (ANY(Atm%parent_grid%pelist == mpp_pe()) .or. Atm%neststruct%child_proc) then call update_coarse_grid(Atm%parent_grid%phis, & - Atm%phis, Atm%neststruct%nest_domain, & - Atm%neststruct%ind_update_h(isd_p:ied_p+1,jsd_p:jed_p+1,:), & + Atm%phis, global_nest_domain, & Atm%gridstruct%dx, Atm%gridstruct%dy, Atm%gridstruct%area, & - isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & + Atm%bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & Atm%neststruct%isu, Atm%neststruct%ieu, Atm%neststruct%jsu, Atm%neststruct%jeu, & Atm%npx, Atm%npy, 0, 0, & Atm%neststruct%refinement, Atm%neststruct%nestupdate, 0, 0, & - Atm%neststruct%parent_proc, Atm%neststruct%child_proc, Atm%parent_grid) + Atm%neststruct%parent_proc, Atm%neststruct%child_proc, Atm%parent_grid, Atm%grid_number-1) Atm%parent_grid%neststruct%parent_of_twoway = .true. !NOTE: mpp_update_nest_coarse (and by extension, update_coarse_grid) does **NOT** pass data !allowing a two-way update into the halo of the coarse grid. It only passes data so that the INTERIOR @@ -1264,8 +1184,6 @@ subroutine fill_nested_grid_data_end(Atm, proc_in) end if - - #ifdef SW_DYNAMICS !!$ !ps: first level only !!$ !This is only valid for shallow-water simulations @@ -1279,17 +1197,17 @@ subroutine fill_nested_grid_data_end(Atm, proc_in) !!$ end do !!$ endif #else - !Sets up flow to be initially hydrostatic (shouldn't be the case for all ICs?) + !Reset p_var after updating topography if (process) call p_var(npz, isc, iec, jsc, jec, Atm%ptop, ptop_min, Atm%delp, & Atm%delz, Atm%pt, Atm%ps, & Atm%pe, Atm%peln, Atm%pk, Atm%pkz, kappa, Atm%q, & Atm%ng, ncnst, Atm%gridstruct%area_64, Atm%flagstruct%dry_mass, .false., Atm%flagstruct%mountain, & - Atm%flagstruct%moist_phys, .true., Atm%flagstruct%nwat, Atm%domain) + Atm%flagstruct%moist_phys, .true., Atm%flagstruct%nwat, Atm%domain, Atm%flagstruct%adiabatic) #endif - - end subroutine fill_nested_grid_data_end + + end subroutine twoway_topo_update !####################################################################### @@ -1297,18 +1215,14 @@ end subroutine fill_nested_grid_data_end ! ! Write out restart files registered through register_restart_file ! - subroutine fv_write_restart(Atm, grids_on_this_pe, timestamp) - type(fv_atmos_type), intent(inout) :: Atm(:) + subroutine fv_write_restart(Atm, timestamp) + type(fv_atmos_type), intent(inout) :: Atm character(len=*), intent(in) :: timestamp - logical, intent(IN) :: grids_on_this_pe(:) - integer n - call fv_io_write_restart(Atm, grids_on_this_pe, timestamp) - do n=1,size(Atm) - if (Atm(n)%neststruct%nested .and. grids_on_this_pe(n)) then - call fv_io_write_BCs(Atm(n)) - endif - enddo + call fv_io_write_restart(Atm, timestamp) + if (Atm%neststruct%nested) then + call fv_io_write_BCs(Atm) + endif end subroutine fv_write_restart ! @@ -1322,12 +1236,11 @@ end subroutine fv_write_restart ! Initialize the fv core restart facilities ! ! - subroutine fv_restart_end(Atm, grids_on_this_pe) - type(fv_atmos_type), intent(inout) :: Atm(:) - logical, intent(INOUT) :: grids_on_this_pe(:) + subroutine fv_restart_end(Atm) + type(fv_atmos_type), intent(inout) :: Atm integer :: isc, iec, jsc, jec - integer :: iq, n, ntileMe, ncnst, ntprog, ntdiag + integer :: iq, ncnst, ntprog, ntdiag integer :: isd, ied, jsd, jed, npz integer :: unit integer :: file_unit @@ -1336,512 +1249,88 @@ subroutine fv_restart_end(Atm, grids_on_this_pe) character(len=3):: gn - ntileMe = size(Atm(:)) - - do n = 1, ntileMe - - if (.not. grids_on_this_pe(n)) then - cycle - endif - - call mpp_set_current_pelist(Atm(n)%pelist) + call mpp_set_current_pelist(Atm%pelist) - isc = Atm(n)%bd%isc; iec = Atm(n)%bd%iec; jsc = Atm(n)%bd%jsc; jec = Atm(n)%bd%jec + isc = Atm%bd%isc; iec = Atm%bd%iec; jsc = Atm%bd%jsc; jec = Atm%bd%jec - isd = Atm(n)%bd%isd - ied = Atm(n)%bd%ied - jsd = Atm(n)%bd%jsd - jed = Atm(n)%bd%jed - npz = Atm(n)%npz - ncnst = Atm(n)%ncnst - ntprog = size(Atm(n)%q,4) - ntdiag = size(Atm(n)%qdiag,4) + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed + npz = Atm%npz + ncnst = Atm%ncnst + ntprog = size(Atm%q,4) + ntdiag = size(Atm%qdiag,4) - if (Atm(n)%grid_number > 1) then - write(gn,'(A2, I1)') " g", Atm(n)%grid_number - else - gn = '' - end if + if (Atm%grid_number > 1) then + write(gn,'(A2, I1)') " g", Atm%grid_number + else + gn = '' + end if - unit = stdout() - write(unit,*) - write(unit,*) 'fv_restart_end u ', trim(gn),' = ', mpp_chksum(Atm(n)%u(isc:iec,jsc:jec,:)) - write(unit,*) 'fv_restart_end v ', trim(gn),' = ', mpp_chksum(Atm(n)%v(isc:iec,jsc:jec,:)) - if ( .not. Atm(n)%flagstruct%hydrostatic ) & - write(unit,*) 'fv_restart_end w ', trim(gn),' = ', mpp_chksum(Atm(n)%w(isc:iec,jsc:jec,:)) - write(unit,*) 'fv_restart_end delp', trim(gn),' = ', mpp_chksum(Atm(n)%delp(isc:iec,jsc:jec,:)) - write(unit,*) 'fv_restart_end phis', trim(gn),' = ', mpp_chksum(Atm(n)%phis(isc:iec,jsc:jec)) + unit = stdout() + write(unit,*) + write(unit,*) 'fv_restart_end u ', trim(gn),' = ', mpp_chksum(Atm%u(isc:iec,jsc:jec,:)) + write(unit,*) 'fv_restart_end v ', trim(gn),' = ', mpp_chksum(Atm%v(isc:iec,jsc:jec,:)) + if ( .not. Atm%flagstruct%hydrostatic ) & + write(unit,*) 'fv_restart_end w ', trim(gn),' = ', mpp_chksum(Atm%w(isc:iec,jsc:jec,:)) + write(unit,*) 'fv_restart_end delp', trim(gn),' = ', mpp_chksum(Atm%delp(isc:iec,jsc:jec,:)) + write(unit,*) 'fv_restart_end phis', trim(gn),' = ', mpp_chksum(Atm%phis(isc:iec,jsc:jec)) #ifndef SW_DYNAMICS - write(unit,*) 'fv_restart_end pt ', trim(gn),' = ', mpp_chksum(Atm(n)%pt(isc:iec,jsc:jec,:)) - if (ntprog>0) & - write(unit,*) 'fv_restart_end q(prog) nq ', trim(gn),' =',ntprog, mpp_chksum(Atm(n)%q(isc:iec,jsc:jec,:,:)) - if (ntdiag>0) & - write(unit,*) 'fv_restart_end q(diag) nq ', trim(gn),' =',ntdiag, mpp_chksum(Atm(n)%qdiag(isc:iec,jsc:jec,:,:)) - do iq=1,min(17, ntprog) ! Check up to 17 tracers - call get_tracer_names(MODEL_ATMOS, iq, tracer_name) - write(unit,*) 'fv_restart_end '//trim(tracer_name)// trim(gn),' = ', mpp_chksum(Atm(n)%q(isc:iec,jsc:jec,:,iq)) - enddo + write(unit,*) 'fv_restart_end pt ', trim(gn),' = ', mpp_chksum(Atm%pt(isc:iec,jsc:jec,:)) + if (ntprog>0) & + write(unit,*) 'fv_restart_end q(prog) nq ', trim(gn),' =',ntprog, mpp_chksum(Atm%q(isc:iec,jsc:jec,:,:)) + if (ntdiag>0) & + write(unit,*) 'fv_restart_end q(diag) nq ', trim(gn),' =',ntdiag, mpp_chksum(Atm%qdiag(isc:iec,jsc:jec,:,:)) + do iq=1,min(17, ntprog) ! Check up to 17 tracers + call get_tracer_names(MODEL_ATMOS, iq, tracer_name) + write(unit,*) 'fv_restart_end '//trim(tracer_name)// trim(gn),' = ', mpp_chksum(Atm%q(isc:iec,jsc:jec,:,iq)) + enddo -!--------------- -! Check Min/Max: -!--------------- -! call prt_maxmin('ZS', Atm(n)%phis, isc, iec, jsc, jec, Atm(n)%ng, 1, 1./grav) - call pmaxmn_g('ZS', Atm(n)%phis, isc, iec, jsc, jec, 1, 1./grav, Atm(n)%gridstruct%area_64, Atm(n)%domain) - call pmaxmn_g('PS ', Atm(n)%ps, isc, iec, jsc, jec, 1, 0.01 , Atm(n)%gridstruct%area_64, Atm(n)%domain) - call prt_maxmin('PS*', Atm(n)%ps, isc, iec, jsc, jec, Atm(n)%ng, 1, 0.01) - call prt_maxmin('U ', Atm(n)%u(isd:ied,jsd:jed,1:npz), isc, iec, jsc, jec, Atm(n)%ng, npz, 1.) - call prt_maxmin('V ', Atm(n)%v(isd:ied,jsd:jed,1:npz), isc, iec, jsc, jec, Atm(n)%ng, npz, 1.) - if ( .not. Atm(n)%flagstruct%hydrostatic ) & - call prt_maxmin('W ', Atm(n)%w , isc, iec, jsc, jec, Atm(n)%ng, npz, 1.) - call prt_maxmin('T ', Atm(n)%pt, isc, iec, jsc, jec, Atm(n)%ng, npz, 1.) - do iq=1, ntprog - call get_tracer_names ( MODEL_ATMOS, iq, tracer_name ) - call pmaxmn_g(trim(tracer_name), Atm(n)%q(isd:ied,jsd:jed,1:npz,iq:iq), isc, iec, jsc, jec, npz, & - 1., Atm(n)%gridstruct%area_64, Atm(n)%domain) - enddo -! Write4 energy correction term + !--------------- + ! Check Min/Max: + !--------------- + ! call prt_maxmin('ZS', Atm%phis, isc, iec, jsc, jec, Atm%ng, 1, 1./grav) + call pmaxmn_g('ZS', Atm%phis, isc, iec, jsc, jec, 1, 1./grav, Atm%gridstruct%area_64, Atm%domain) + call pmaxmn_g('PS ', Atm%ps, isc, iec, jsc, jec, 1, 0.01 , Atm%gridstruct%area_64, Atm%domain) + call prt_maxmin('PS*', Atm%ps, isc, iec, jsc, jec, Atm%ng, 1, 0.01) + call prt_maxmin('U ', Atm%u(isd:ied,jsd:jed,1:npz), isc, iec, jsc, jec, Atm%ng, npz, 1.) + call prt_maxmin('V ', Atm%v(isd:ied,jsd:jed,1:npz), isc, iec, jsc, jec, Atm%ng, npz, 1.) + if ( .not. Atm%flagstruct%hydrostatic ) & + call prt_maxmin('W ', Atm%w , isc, iec, jsc, jec, Atm%ng, npz, 1.) + call prt_maxmin('T ', Atm%pt, isc, iec, jsc, jec, Atm%ng, npz, 1.) + do iq=1, ntprog + call get_tracer_names ( MODEL_ATMOS, iq, tracer_name ) + call pmaxmn_g(trim(tracer_name), Atm%q(isd:ied,jsd:jed,1:npz,iq:iq), isc, iec, jsc, jec, npz, & + 1., Atm%gridstruct%area_64, Atm%domain) + enddo + ! Write4 energy correction term #endif - enddo + call fv_io_write_restart(Atm) + if (Atm%neststruct%nested) call fv_io_write_BCs(Atm) - call fv_io_write_restart(Atm, grids_on_this_pe) - do n=1,ntileMe - if (Atm(n)%neststruct%nested .and. grids_on_this_pe(n)) call fv_io_write_BCs(Atm(n)) - end do - - module_is_initialized = .FALSE. + module_is_initialized = .FALSE. #ifdef EFLUX_OUT - if( is_master() ) then - write(*,*) steps, 'Mean equivalent Heat flux for this integration period=',Atm(1)%idiag%efx_sum/real(max(1,Atm(1)%idiag%steps)), & - 'Mean nesting-related flux for this integration period=',Atm(1)%idiag%efx_sum_nest/real(max(1,Atm(1)%idiag%steps)), & - 'Mean mountain torque=',Atm(1)%idiag%mtq_sum/real(max(1,Atm(1)%idiag%steps)) - file_unit = get_unit() - open (unit=file_unit, file='e_flux.data', form='unformatted',status='unknown', access='sequential') - do n=1,steps - write(file_unit) Atm(1)%idiag%efx(n) - write(file_unit) Atm(1)%idiag%mtq(n) ! time series global mountain torque - !write(file_unit) Atm(1)%idiag%efx_nest(n) - enddo - close(unit=file_unit) - endif + if( is_master() ) then + write(*,*) steps, 'Mean equivalent Heat flux for this integration period=',Atm(1)%idiag%efx_sum/real(max(1,Atm(1)%idiag%steps)), & + 'Mean nesting-related flux for this integration period=',Atm(1)%idiag%efx_sum_nest/real(max(1,Atm(1)%idiag%steps)), & + 'Mean mountain torque=',Atm(1)%idiag%mtq_sum/real(max(1,Atm(1)%idiag%steps)) + file_unit = get_unit() + open (unit=file_unit, file='e_flux.data', form='unformatted',status='unknown', access='sequential') + do n=1,steps + write(file_unit) Atm(1)%idiag%efx(n) + write(file_unit) Atm(1)%idiag%mtq(n) ! time series global mountain torque + !write(file_unit) Atm(1)%idiag%efx_nest(n) + enddo + close(unit=file_unit) + endif #endif end subroutine fv_restart_end ! NAME="fv_restart_end" - subroutine d2c_setup(u, v, & - ua, va, & - uc, vc, dord4, & - isd,ied,jsd,jed, is,ie,js,je, npx,npy, & - grid_type, nested, & - se_corner, sw_corner, ne_corner, nw_corner, & - rsin_u,rsin_v,cosa_s,rsin2 ) - - logical, intent(in):: dord4 - real, intent(in) :: u(isd:ied,jsd:jed+1) - real, intent(in) :: v(isd:ied+1,jsd:jed) - real, intent(out), dimension(isd:ied ,jsd:jed ):: ua - real, intent(out), dimension(isd:ied ,jsd:jed ):: va - real, intent(out), dimension(isd:ied+1,jsd:jed ):: uc - real, intent(out), dimension(isd:ied ,jsd:jed+1):: vc - integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type - logical, intent(in) :: nested, se_corner, sw_corner, ne_corner, nw_corner - real, intent(in) :: rsin_u(isd:ied+1,jsd:jed) - real, intent(in) :: rsin_v(isd:ied,jsd:jed+1) - real, intent(in) :: cosa_s(isd:ied,jsd:jed) - real, intent(in) :: rsin2(isd:ied,jsd:jed) - -! Local - real, dimension(isd:ied,jsd:jed):: utmp, vtmp - real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. - real, parameter:: a1 = 0.5625 - real, parameter:: a2 = -0.0625 - real, parameter:: c1 = -2./14. - real, parameter:: c2 = 11./14. - real, parameter:: c3 = 5./14. - integer npt, i, j, ifirst, ilast, id - - if ( dord4) then - id = 1 - else - id = 0 - endif - - - if (grid_type < 3 .and. .not. nested) then - npt = 4 - else - npt = -2 - endif - - if ( nested) then - - do j=jsd+1,jed-1 - do i=isd,ied - utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) - enddo - enddo - do i=isd,ied - j = jsd - utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) - j = jed - utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) - end do - - do j=jsd,jed - do i=isd+1,ied-1 - vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) - enddo - i = isd - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) - i = ied - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) - enddo - - do j=jsd,jed - do i=isd,ied - ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j) - va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j) - enddo - enddo - - else - - !---------- - ! Interior: - !---------- - utmp = 0. - vtmp = 0. - - - do j=max(npt,js-1),min(npy-npt,je+1) - do i=max(npt,isd),min(npx-npt,ied) - utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) - enddo - enddo - do j=max(npt,jsd),min(npy-npt,jed) - do i=max(npt,is-1),min(npx-npt,ie+1) - vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) - enddo - enddo - - !---------- - ! edges: - !---------- - if (grid_type < 3) then - - if ( js==1 .or. jsd=(npy-npt)) then - do j=npy-npt+1,jed - do i=isd,ied - utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) - vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) - enddo - enddo - endif - - if ( is==1 .or. isd=(npx-npt)) then - do j=max(npt,jsd),min(npy-npt,jed) - do i=npx-npt+1,ied - utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) - vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) - enddo - enddo - endif - - endif - do j=js-1-id,je+1+id - do i=is-1-id,ie+1+id - ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j) - va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j) - enddo - enddo - - end if - -! A -> C -!-------------- -! Fix the edges -!-------------- -! Xdir: - if( sw_corner ) then - do i=-2,0 - utmp(i,0) = -vtmp(0,1-i) - enddo - endif - if( se_corner ) then - do i=0,2 - utmp(npx+i,0) = vtmp(npx,i+1) - enddo - endif - if( ne_corner ) then - do i=0,2 - utmp(npx+i,npy) = -vtmp(npx,je-i) - enddo - endif - if( nw_corner ) then - do i=-2,0 - utmp(i,npy) = vtmp(0,je+i) - enddo - endif - - if (grid_type < 3 .and. .not. nested) then - ifirst = max(3, is-1) - ilast = min(npx-2,ie+2) - else - ifirst = is-1 - ilast = ie+2 - endif -!--------------------------------------------- -! 4th order interpolation for interior points: -!--------------------------------------------- - do j=js-1,je+1 - do i=ifirst,ilast - uc(i,j) = a1*(utmp(i-1,j)+utmp(i,j))+a2*(utmp(i-2,j)+utmp(i+1,j)) - enddo - enddo - - if (grid_type < 3) then -! Xdir: - if( is==1 .and. .not. nested ) then - do j=js-1,je+1 - uc(0,j) = c1*utmp(-2,j) + c2*utmp(-1,j) + c3*utmp(0,j) - uc(1,j) = ( t14*(utmp( 0,j)+utmp(1,j)) & - + t12*(utmp(-1,j)+utmp(2,j)) & - + t15*(utmp(-2,j)+utmp(3,j)) )*rsin_u(1,j) - uc(2,j) = c1*utmp(3,j) + c2*utmp(2,j) + c3*utmp(1,j) - enddo - endif - - if( (ie+1)==npx .and. .not. nested ) then - do j=js-1,je+1 - uc(npx-1,j) = c1*utmp(npx-3,j)+c2*utmp(npx-2,j)+c3*utmp(npx-1,j) - uc(npx,j) = (t14*(utmp(npx-1,j)+utmp(npx,j))+ & - t12*(utmp(npx-2,j)+utmp(npx+1,j)) & - + t15*(utmp(npx-3,j)+utmp(npx+2,j)))*rsin_u(npx,j) - uc(npx+1,j) = c3*utmp(npx,j)+c2*utmp(npx+1,j)+c1*utmp(npx+2,j) - enddo - endif - - endif - -!------ -! Ydir: -!------ - if( sw_corner ) then - do j=-2,0 - vtmp(0,j) = -utmp(1-j,0) - enddo - endif - if( nw_corner ) then - do j=0,2 - vtmp(0,npy+j) = utmp(j+1,npy) - enddo - endif - if( se_corner ) then - do j=-2,0 - vtmp(npx,j) = utmp(ie+j,0) - enddo - endif - if( ne_corner ) then - do j=0,2 - vtmp(npx,npy+j) = -utmp(ie-j,npy) - enddo - endif - - if (grid_type < 3) then - - do j=js-1,je+2 - if ( j==1 .and. .not. nested) then - do i=is-1,ie+1 - vc(i,1) = (t14*(vtmp(i, 0)+vtmp(i,1)) & - + t12*(vtmp(i,-1)+vtmp(i,2)) & - + t15*(vtmp(i,-2)+vtmp(i,3)))*rsin_v(i,1) - enddo - elseif ( (j==0 .or. j==(npy-1)) .and. .not. nested) then - do i=is-1,ie+1 - vc(i,j) = c1*vtmp(i,j-2) + c2*vtmp(i,j-1) + c3*vtmp(i,j) - enddo - elseif ( (j==2 .or. j==(npy+1)) .and. .not. nested) then - do i=is-1,ie+1 - vc(i,j) = c1*vtmp(i,j+1) + c2*vtmp(i,j) + c3*vtmp(i,j-1) - enddo - elseif ( j==npy .and. .not. nested) then - do i=is-1,ie+1 - vc(i,npy) = (t14*(vtmp(i,npy-1)+vtmp(i,npy)) & - + t12*(vtmp(i,npy-2)+vtmp(i,npy+1)) & - + t15*(vtmp(i,npy-3)+vtmp(i,npy+2)))*rsin_v(i,npy) - enddo - else -! 4th order interpolation for interior points: - do i=is-1,ie+1 - vc(i,j) = a2*(vtmp(i,j-2)+vtmp(i,j+1))+a1*(vtmp(i,j-1)+vtmp(i,j)) - enddo - endif - enddo - else -! 4th order interpolation: - do j=js-1,je+2 - do i=is-1,ie+1 - vc(i,j) = a2*(vtmp(i,j-2)+vtmp(i,j+1))+a1*(vtmp(i,j-1)+vtmp(i,j)) - enddo - enddo - endif - - end subroutine d2c_setup - - subroutine d2a_setup(u, v, ua, va, dord4, & - isd,ied,jsd,jed, is,ie,js,je, npx,npy, & - grid_type, nested, & - cosa_s,rsin2 ) - - logical, intent(in):: dord4 - real, intent(in) :: u(isd:ied,jsd:jed+1) - real, intent(in) :: v(isd:ied+1,jsd:jed) - real, intent(out), dimension(isd:ied ,jsd:jed ):: ua - real, intent(out), dimension(isd:ied ,jsd:jed ):: va - integer, intent(in) :: isd,ied,jsd,jed, is,ie,js,je, npx,npy,grid_type - real, intent(in) :: cosa_s(isd:ied,jsd:jed) - real, intent(in) :: rsin2(isd:ied,jsd:jed) - logical, intent(in) :: nested - -! Local - real, dimension(isd:ied,jsd:jed):: utmp, vtmp - real, parameter:: t11=27./28., t12=-13./28., t13=3./7., t14=6./7., t15=3./28. - real, parameter:: a1 = 0.5625 - real, parameter:: a2 = -0.0625 - real, parameter:: c1 = -2./14. - real, parameter:: c2 = 11./14. - real, parameter:: c3 = 5./14. - integer npt, i, j, ifirst, ilast, id - - if ( dord4) then - id = 1 - else - id = 0 - endif - - - if (grid_type < 3 .and. .not. nested) then - npt = 4 - else - npt = -2 - endif - - if ( nested) then - - do j=jsd+1,jed-1 - do i=isd,ied - utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) - enddo - enddo - do i=isd,ied - j = jsd - utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) - j = jed - utmp(i,j) = 0.5*(u(i,j)+u(i,j+1)) - end do - - do j=jsd,jed - do i=isd+1,ied-1 - vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) - enddo - i = isd - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) - i = ied - vtmp(i,j) = 0.5*(v(i,j)+v(i+1,j)) - enddo - - else - - !---------- - ! Interior: - !---------- - - do j=max(npt,js-1),min(npy-npt,je+1) - do i=max(npt,isd),min(npx-npt,ied) - utmp(i,j) = a2*(u(i,j-1)+u(i,j+2)) + a1*(u(i,j)+u(i,j+1)) - enddo - enddo - do j=max(npt,jsd),min(npy-npt,jed) - do i=max(npt,is-1),min(npx-npt,ie+1) - vtmp(i,j) = a2*(v(i-1,j)+v(i+2,j)) + a1*(v(i,j)+v(i+1,j)) - enddo - enddo - - !---------- - ! edges: - !---------- - if (grid_type < 3) then - - if ( js==1 .or. jsd=(npy-npt)) then - do j=npy-npt+1,jed - do i=isd,ied - utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) - vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) - enddo - enddo - endif - - if ( is==1 .or. isd=(npx-npt)) then - do j=max(npt,jsd),min(npy-npt,jed) - do i=npx-npt+1,ied - utmp(i,j) = 0.5*(u(i,j) + u(i,j+1)) - vtmp(i,j) = 0.5*(v(i,j) + v(i+1,j)) - enddo - enddo - endif - - endif - - end if - - - - do j=js-1-id,je+1+id - do i=is-1-id,ie+1+id - ua(i,j) = (utmp(i,j)-vtmp(i,j)*cosa_s(i,j)) * rsin2(i,j) - va(i,j) = (vtmp(i,j)-utmp(i,j)*cosa_s(i,j)) * rsin2(i,j) - enddo - enddo - -end subroutine d2a_setup subroutine pmaxmn_g(qname, q, is, ie, js, je, km, fac, area, domain) character(len=*), intent(in):: qname @@ -1861,7 +1350,10 @@ subroutine pmaxmn_g(qname, q, is, ie, js, je, km, fac, area, domain) do k=1,km do j=js,je do i=is,ie - if( q(i,j,k) < qmin ) then + !if ( (q(i,j,k) >= 1e30) .eqv. (q(i,j,k) < 1e30) ) then !NAN checking + ! print*, ' NAN found for ', qname, mpp_pe(), i,j,k + !else + if( q(i,j,k) < qmin) then qmin = q(i,j,k) elseif( q(i,j,k) > qmax ) then qmax = q(i,j,k) @@ -1873,7 +1365,7 @@ subroutine pmaxmn_g(qname, q, is, ie, js, je, km, fac, area, domain) call mp_reduce_min(qmin) call mp_reduce_max(qmax) - gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1, .true.) + gmean = g_sum(domain, q(is:ie,js:je,km), is, ie, js, je, 3, area, 1, .true.) if(is_master()) write(6,*) qname, qmax*fac, qmin*fac, gmean*fac end subroutine pmaxmn_g diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index 48f373ac1..6fcc1b263 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -29,7 +29,6 @@ module fv_surf_map_mod use fv_grid_utils_mod, only: great_circle_dist, latlon2xyz, v_prod, normalize_vect use fv_grid_utils_mod, only: g_sum, global_mx, vect_cross - use fv_mp_mod, only: ng use fv_mp_mod, only: mp_stop, mp_reduce_min, mp_reduce_max, is_master use fv_timing_mod, only: timing_on, timing_off use fv_arrays_mod, only: fv_grid_bounds_type, R_GRID @@ -54,8 +53,8 @@ module fv_surf_map_mod ! New NASA SRTM30 data: SRTM30.nc ! nlon = 43200 ! nlat = 21600 - logical:: zs_filter = .true. - logical:: zero_ocean = .true. ! if true, no diffusive flux into water/ocean area + logical:: zs_filter = .true. + logical:: zero_ocean = .true. ! if true, no diffusive flux into water/ocean area integer :: nlon = 21600 integer :: nlat = 10800 real:: cd4 = 0.15 ! Dimensionless coeff for del-4 diffusion (with FCT) @@ -66,13 +65,13 @@ module fv_surf_map_mod integer:: n_del2_weak = 12 integer:: n_del2_strong = -1 integer:: n_del4 = -1 - + character(len=128):: surf_file = "INPUT/topo1min.nc" character(len=6) :: surf_format = 'netcdf' logical :: namelist_read = .false. - real(kind=R_GRID) da_min + real(kind=R_GRID) da_min real cos_grid character(len=3) :: grid_string = '' @@ -85,14 +84,10 @@ module fv_surf_map_mod public surfdrv public del2_cubed_sphere, del4_cubed_sphere, FV3_zs_filter -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_sg, phis, & - stretch_fac, nested, npx_global, domain,grid_number, bd) + stretch_fac, nested, bounded_domain, npx_global, domain,grid_number, bd) implicit none #include @@ -100,24 +95,24 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ ! INPUT arrays type(fv_grid_bounds_type), intent(IN) :: bd - real(kind=R_GRID), intent(in)::area(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng) - real, intent(in):: dx(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng+1) - real, intent(in):: dy(bd%is-ng:bd%ie+ng+1, bd%js-ng:bd%je+ng) - real, intent(in), dimension(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng)::dxa, dya - real, intent(in)::dxc(bd%is-ng:bd%ie+ng+1, bd%js-ng:bd%je+ng) - real, intent(in)::dyc(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng+1) - - real(kind=R_GRID), intent(in):: grid(bd%is-ng:bd%ie+ng+1, bd%js-ng:bd%je+ng+1,2) - real(kind=R_GRID), intent(in):: agrid(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng,2) + real(kind=R_GRID), intent(in)::area(bd%isd:bd%ied, bd%jsd:bd%jed) + real, intent(in):: dx(bd%isd:bd%ied, bd%jsd:bd%jed+1) + real, intent(in):: dy(bd%isd:bd%ied+1, bd%jsd:bd%jed) + real, intent(in), dimension(bd%isd:bd%ied, bd%jsd:bd%jed)::dxa, dya + real, intent(in)::dxc(bd%isd:bd%ied+1, bd%jsd:bd%jed) + real, intent(in)::dyc(bd%isd:bd%ied, bd%jsd:bd%jed+1) + + real(kind=R_GRID), intent(in):: grid(bd%isd:bd%ied+1, bd%jsd:bd%jed+1,2) + real(kind=R_GRID), intent(in):: agrid(bd%isd:bd%ied, bd%jsd:bd%jed,2) real, intent(IN):: sin_sg(bd%isd:bd%ied,bd%jsd:bd%jed,9) real(kind=R_GRID), intent(IN):: stretch_fac - logical, intent(IN) :: nested + logical, intent(IN) :: nested, bounded_domain integer, intent(IN) :: npx_global type(domain2d), intent(INOUT) :: domain integer, intent(IN) :: grid_number ! OUTPUT arrays - real, intent(out):: phis(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng) + real, intent(out):: phis(bd%isd:bd%ied, bd%jsd:bd%jed) ! Local: real, allocatable :: z2(:,:) ! Position of edges of the box containing the original data point: @@ -137,7 +132,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ integer status integer :: is, ie, js, je - integer :: isd, ied, jsd, jed + integer :: isd, ied, jsd, jed, ng real phis_coarse(bd%isd:bd%ied, bd%jsd:bd%jed) real wt @@ -149,6 +144,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ ied = bd%ied jsd = bd%jsd jed = bd%jed + ng = bd%ng if (nested) then !Divide all by grav rgrav = 1./grav @@ -179,12 +175,12 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ ! ! surface file must be in NetCDF format ! - if ( file_exist(surf_file) ) then + if ( file_exist(surf_file) ) then if (surf_format == "netcdf") then status = nf_open (surf_file, NF_NOWRITE, ncid) if (status .ne. NF_NOERR) call handle_err(status) - + status = nf_inq_dimid (ncid, 'lon', lonid) if (status .ne. NF_NOERR) call handle_err(status) status = nf_inq_dimlen (ncid, lonid, londim) @@ -204,7 +200,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ write(*,*) 'Opening USGS datset file:', surf_file, surf_format, nlon, nlat endif endif - + else call error_mesg ( 'surfdrv','Raw IEEE data format no longer supported !!!', FATAL ) endif @@ -336,7 +332,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ allocate ( sgh_g(isd:ied, jsd:jed) ) call timing_on('map_to_cubed') call map_to_cubed_raw(igh, nlon, jt, lat1(jstart:jend+1), lon1, zs, ft, grid, agrid, & - phis, oro_g, sgh_g, npx, npy, jstart, jend, stretch_fac, nested, npx_global, bd) + phis, oro_g, sgh_g, npx, npy, jstart, jend, stretch_fac, bounded_domain, npx_global, bd) if (is_master()) write(*,*) 'map_to_cubed_raw: master PE done' call timing_off('map_to_cubed') @@ -396,7 +392,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ write(*,*) 'Applying terrain filters. zero_ocean is', zero_ocean endif call FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & - stretch_fac, nested, domain, area, dxa, dya, dx, dy, dxc, dyc, grid, & + stretch_fac, bounded_domain, domain, area, dxa, dya, dx, dy, dxc, dyc, grid, & agrid, sin_sg, phis, oro_g) call mpp_update_domains(phis, domain) endif ! end terrain filter @@ -427,7 +423,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ ! nested grids this allows us to do the smoothing near the boundary ! without having to fill the boundary halo from the coarse grid - !ALSO for nesting: note that we are smoothing the terrain using + !ALSO for nesting: note that we are smoothing the terrain using ! the nested-grid's outer halo filled with the terrain computed ! directly from the input file computed here, and then ! replacing it with interpolated topography in fv_restart, so @@ -457,7 +453,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ !----------------------------------------------- call global_mx(area, ng, da_min, da_max, bd) - if(zs_filter) call del4_cubed_sphere(npx, npy, sgh_g, area, dx, dy, dxc, dyc, sin_sg, 1, zero_ocean, oro_g, nested, domain, bd) + if(zs_filter) call del4_cubed_sphere(npx, npy, sgh_g, area, dx, dy, dxc, dyc, sin_sg, 1, zero_ocean, oro_g, bounded_domain, domain, bd) call global_mx(real(sgh_g,kind=R_GRID), ng, da_min, da_max, bd) if ( is_master() ) write(*,*) 'After filter SGH', trim(grid_string), ' min=', da_min, ' Max=', da_max @@ -470,7 +466,7 @@ subroutine surfdrv(npx, npy, grid, agrid, area, dx, dy, dxa, dya, dxc, dyc, sin_ end subroutine surfdrv subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & - stretch_fac, nested, domain, area, dxa, dya, dx, dy, dxc, dyc, grid, & + stretch_fac, bounded_domain, domain, area, dxa, dya, dx, dy, dxc, dyc, grid, & agrid, sin_sg, phis, oro ) integer, intent(in):: isd, ied, jsd, jed, npx, npy, npx_global type(fv_grid_bounds_type), intent(IN) :: bd @@ -481,9 +477,9 @@ subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & real(kind=R_GRID), intent(in):: grid(isd:ied+1, jsd:jed+1,2) real(kind=R_GRID), intent(in):: agrid(isd:ied, jsd:jed, 2) - real, intent(IN):: sin_sg(9,isd:ied,jsd:jed) + real, intent(IN):: sin_sg(isd:ied,jsd:jed,9) real(kind=R_GRID), intent(IN):: stretch_fac - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain real, intent(inout):: phis(isd:ied,jsd,jed) real, intent(inout):: oro(isd:ied,jsd,jed) type(domain2d), intent(INOUT) :: domain @@ -493,12 +489,12 @@ subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & if (is_master()) print*, ' Calling FV3_zs_filter...' if (.not. namelist_read) call read_namelist !when calling from external_ic - call global_mx(area, ng, da_min, da_max, bd) + call global_mx(area, bd%ng, da_min, da_max, bd) mdim = nint( real(npx_global) * min(10., stretch_fac) ) ! Del-2: high resolution only -! call del2_cubed_sphere(npx, npy, phis, area, dx, dy, dxc, dyc, sin_sg, n_del2, cd2, zero_ocean, oro, nested, domain, bd) +! call del2_cubed_sphere(npx, npy, phis, area, dx, dy, dxc, dyc, sin_sg, n_del2, cd2, zero_ocean, oro, bounded_domain, domain, bd) if (n_del2_strong < 0) then if ( npx_global<=97) then n_del2_strong = 0 @@ -512,7 +508,7 @@ subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & ! Applying strong 2-delta-filter: if ( n_del2_strong > 0 ) & call two_delta_filter(npx, npy, phis, area, dx, dy, dxa, dya, dxc, dyc, sin_sg, cd2, zero_ocean, & - .true., 0, oro, nested, domain, bd, n_del2_strong) + .true., 0, oro, bounded_domain, domain, bd, n_del2_strong) ! MFCT Del-4: if (n_del4 < 0) then @@ -524,18 +520,18 @@ subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & n_del4 = 3 endif endif - call del4_cubed_sphere(npx, npy, phis, area, dx, dy, dxc, dyc, sin_sg, n_del4, zero_ocean, oro, nested, domain, bd) + call del4_cubed_sphere(npx, npy, phis, area, dx, dy, dxc, dyc, sin_sg, n_del4, zero_ocean, oro, bounded_domain, domain, bd) ! Applying weak 2-delta-filter: cd2 = 0.12*da_min call two_delta_filter(npx, npy, phis, area, dx, dy, dxa, dya, dxc, dyc, sin_sg, cd2, zero_ocean, & - .true., 1, oro, nested, domain, bd, n_del2_weak) + .true., 1, oro, bounded_domain, domain, bd, n_del2_weak) end subroutine FV3_zs_filter subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_sg, cd, zero_ocean, & - check_slope, filter_type, oro, nested, domain, bd, ntmax) + check_slope, filter_type, oro, bounded_domain, domain, bd, ntmax) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy integer, intent(in):: ntmax @@ -549,10 +545,10 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s real, intent(in):: dya(bd%isd:bd%ied, bd%jsd:bd%jed) real, intent(in):: dxc(bd%isd:bd%ied+1,bd%jsd:bd%jed) real, intent(in):: dyc(bd%isd:bd%ied, bd%jsd:bd%jed+1) - real, intent(in):: sin_sg(9,bd%isd:bd%ied,bd%jsd:bd%jed) + real, intent(in):: sin_sg(bd%isd:bd%ied,bd%jsd:bd%jed,9) real, intent(in):: oro(bd%isd:bd%ied, bd%jsd:bd%jed) ! 0==water, 1==land logical, intent(in):: zero_ocean, check_slope - logical, intent(in):: nested + logical, intent(in):: bounded_domain type(domain2d), intent(inout) :: domain ! OUTPUT arrays real, intent(inout):: q(bd%isd:bd%ied, bd%jsd:bd%jed) @@ -584,7 +580,7 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s jsd = bd%jsd jed = bd%jed - if ( nested ) then + if ( bounded_domain ) then is1 = is-1; ie2 = ie+2 js1 = js-1; je2 = je+2 else @@ -597,7 +593,7 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s else m_slope = 10. endif - + do 777 nt=1, ntmax call mpp_update_domains(q, domain) @@ -606,13 +602,13 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s if ( nt==1 .and. check_slope ) then do j=js,je do i=is,ie+1 - ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) + ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) ddx(i,j) = abs(ddx(i,j)) enddo enddo do j=js,je+1 do i=is,ie - ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) + ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) ddy(i,j) = abs(ddy(i,j)) enddo enddo @@ -626,7 +622,7 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s endif ! First step: average the corners: - if ( .not. nested .and. nt==1 ) then + if ( .not. bounded_domain .and. nt==1 ) then if ( is==1 .and. js==1 ) then q(1,1) = (q(1,1)*area(1,1)+q(0,1)*area(0,1)+q(1,0)*area(1,0)) & / ( area(1,1)+ area(0,1)+ area(1,0) ) @@ -661,7 +657,7 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s a1(i) = p1*(q(i-1,j)+q(i,j)) + p2*(q(i-2,j)+q(i+1,j)) enddo - if ( .not. nested ) then + if ( .not. bounded_domain ) then if ( is==1 ) then a1(0) = c1*q(-2,j) + c2*q(-1,j) + c3*q(0,j) a1(1) = 0.5*(((2.*dxa(0,j)+dxa(-1,j))*q(0,j)-dxa(0,j)*q(-1,j))/(dxa(-1,j)+dxa(0,j)) & @@ -697,10 +693,10 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s do i=is,ie+1 ddx(i,j) = (q(i-1,j)-q(i,j))/dxc(i,j) if ( extm(i-1).and.extm(i) ) then - ddx(i,j) = 0.5*(sin_sg(3,i-1,j)+sin_sg(1,i,j))*dy(i,j)*ddx(i,j) + ddx(i,j) = 0.5*(sin_sg(i-1,j,3)+sin_sg(i,j,1))*dy(i,j)*ddx(i,j) elseif ( abs(ddx(i,j)) > m_slope ) then fac = min(1., max(0.1,(abs(ddx(i,j))-m_slope)/m_slope ) ) - ddx(i,j) = fac*0.5*(sin_sg(3,i-1,j)+sin_sg(1,i,j))*dy(i,j)*ddx(i,j) + ddx(i,j) = fac*0.5*(sin_sg(i-1,j,3)+sin_sg(i,j,1))*dy(i,j)*ddx(i,j) else ddx(i,j) = 0. endif @@ -713,7 +709,7 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s a2(i,j) = p1*(q(i,j-1)+q(i,j)) + p2*(q(i,j-2)+q(i,j+1)) enddo enddo - if ( .not. nested ) then + if ( .not. bounded_domain ) then if( js==1 ) then do i=is,ie a2(i,0) = c1*q(i,-2) + c2*q(i,-1) + c3*q(i,0) @@ -758,10 +754,10 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s do i=is,ie ddy(i,j) = (q(i,j-1)-q(i,j))/dyc(i,j) if ( ext2(i,j-1) .and. ext2(i,j) ) then - ddy(i,j) = 0.5*(sin_sg(4,i,j-1)+sin_sg(2,i,j))*dx(i,j)*ddy(i,j) + ddy(i,j) = 0.5*(sin_sg(i,j-1,4)+sin_sg(i,j,2))*dx(i,j)*ddy(i,j) elseif ( abs(ddy(i,j))>m_slope ) then fac = min(1., max(0.1,(abs(ddy(i,j))-m_slope)/m_slope)) - ddy(i,j) = fac*0.5*(sin_sg(4,i,j-1)+sin_sg(2,i,j))*dx(i,j)*ddy(i,j) + ddy(i,j) = fac*0.5*(sin_sg(i,j-1,4)+sin_sg(i,j,2))*dx(i,j)*ddy(i,j) else ddy(i,j) = 0. endif @@ -794,13 +790,13 @@ subroutine two_delta_filter(npx, npy, q, area, dx, dy, dxa, dya, dxc, dyc, sin_s call mpp_update_domains(q, domain) do j=js,je do i=is,ie+1 - ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) + ddx(i,j) = (q(i,j) - q(i-1,j))/dxc(i,j) ddx(i,j) = abs(ddx(i,j)) enddo enddo do j=js,je+1 do i=is,ie - ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) + ddy(i,j) = (q(i,j) - q(i,j-1))/dyc(i,j) ddy(i,j) = abs(ddy(i,j)) enddo enddo @@ -817,7 +813,7 @@ end subroutine two_delta_filter - subroutine del2_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, cd, zero_ocean, oro, nested, domain, bd) + subroutine del2_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, cd, zero_ocean, oro, bounded_domain, domain, bd) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy integer, intent(in):: nmax @@ -831,16 +827,17 @@ subroutine del2_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, real, intent(in):: dyc(bd%isd:bd%ied, bd%jsd:bd%jed+1) real, intent(IN):: sin_sg(bd%isd:bd%ied,bd%jsd:bd%jed,9) real, intent(in):: oro(bd%isd:bd%ied, bd%jsd:bd%jed) ! 0==water, 1==land - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain type(domain2d), intent(INOUT) :: domain ! OUTPUT arrays - real, intent(inout):: q(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng) + real, intent(inout):: q(bd%isd:bd%ied, bd%jsd:bd%jed) ! Local: real ddx(bd%is:bd%ie+1,bd%js:bd%je), ddy(bd%is:bd%ie,bd%js:bd%je+1) integer i,j,n integer :: is, ie, js, je integer :: isd, ied, jsd, jed + integer :: ng is = bd%is ie = bd%ie @@ -850,30 +847,30 @@ subroutine del2_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, ied = bd%ied jsd = bd%jsd jed = bd%jed - + ng = bd%ng call mpp_update_domains(q,domain,whalo=ng,ehalo=ng,shalo=ng,nhalo=ng) ! First step: average the corners: - if ( is==1 .and. js==1 .and. .not. nested) then + if ( is==1 .and. js==1 .and. .not. bounded_domain) then q(1,1) = (q(1,1)*area(1,1)+q(0,1)*area(0,1)+q(1,0)*area(1,0)) & / ( area(1,1)+ area(0,1)+ area(1,0) ) q(0,1) = q(1,1) q(1,0) = q(1,1) endif - if ( (ie+1)==npx .and. js==1 .and. .not. nested) then + if ( (ie+1)==npx .and. js==1 .and. .not. bounded_domain) then q(ie, 1) = (q(ie,1)*area(ie,1)+q(npx,1)*area(npx,1)+q(ie,0)*area(ie,0)) & / ( area(ie,1)+ area(npx,1)+ area(ie,0)) q(npx,1) = q(ie,1) q(ie, 0) = q(ie,1) endif - if ( (ie+1)==npx .and. (je+1)==npy .and. .not. nested ) then + if ( (ie+1)==npx .and. (je+1)==npy .and. .not. bounded_domain ) then q(ie, je) = (q(ie,je)*area(ie,je)+q(npx,je)*area(npx,je)+q(ie,npy)*area(ie,npy)) & / ( area(ie,je)+ area(npx,je)+ area(ie,npy)) q(npx,je) = q(ie,je) q(ie,npy) = q(ie,je) endif - if ( is==1 .and. (je+1)==npy .and. .not. nested) then + if ( is==1 .and. (je+1)==npy .and. .not. bounded_domain) then q(1, je) = (q(1,je)*area(1,je)+q(0,je)*area(0,je)+q(1,npy)*area(1,npy)) & / ( area(1,je)+ area(0,je)+ area(1,npy)) q(0, je) = q(1,je) @@ -919,7 +916,7 @@ subroutine del2_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, end subroutine del2_cubed_sphere - subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, zero_ocean, oro, nested, domain, bd) + subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, zero_ocean, oro, bounded_domain, domain, bd) type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: npx, npy, nmax logical, intent(in):: zero_ocean @@ -930,16 +927,16 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, real, intent(in):: dxc(bd%isd:bd%ied+1,bd%jsd:bd%jed) real, intent(in):: dyc(bd%isd:bd%ied, bd%jsd:bd%jed+1) real, intent(IN):: sin_sg(bd%isd:bd%ied,bd%jsd:bd%jed,9) - real, intent(inout):: q(bd%is-ng:bd%ie+ng, bd%js-ng:bd%je+ng) - logical, intent(IN) :: nested + real, intent(inout):: q(bd%isd:bd%ied, bd%jsd:bd%jed) + logical, intent(IN) :: bounded_domain type(domain2d), intent(INOUT) :: domain ! diffusivity real :: diff(bd%is-3:bd%ie+2,bd%js-3:bd%je+2) -! diffusive fluxes: +! diffusive fluxes: real :: fx1(bd%is:bd%ie+1,bd%js:bd%je), fy1(bd%is:bd%ie,bd%js:bd%je+1) real :: fx2(bd%is:bd%ie+1,bd%js:bd%je), fy2(bd%is:bd%ie,bd%js:bd%je+1) real :: fx4(bd%is:bd%ie+1,bd%js:bd%je), fy4(bd%is:bd%ie,bd%js:bd%je+1) - real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: d2, win, wou + real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: d2, win, wou real, dimension(bd%is:bd%ie,bd%js:bd%je):: qlow, qmin, qmax, q0 real, parameter:: esl = 1.E-20 integer i,j, n @@ -956,7 +953,7 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, jsd = bd%jsd jed = bd%jed - !On a nested grid the haloes are not filled. Set to zero. + !On a bounded_domain grid the haloes are not filled. Set to zero. d2 = 0. win = 0. wou = 0. @@ -977,28 +974,28 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, call mpp_update_domains(q,domain) ! First step: average the corners: - if ( is==1 .and. js==1 .and. .not. nested) then + if ( is==1 .and. js==1 .and. .not. bounded_domain) then q(1,1) = (q(1,1)*area(1,1)+q(0,1)*area(0,1)+q(1,0)*area(1,0)) & / ( area(1,1)+ area(0,1)+ area(1,0) ) q(0,1) = q(1,1) q(1,0) = q(1,1) q(0,0) = q(1,1) endif - if ( (ie+1)==npx .and. js==1 .and. .not. nested) then + if ( (ie+1)==npx .and. js==1 .and. .not. bounded_domain) then q(ie, 1) = (q(ie,1)*area(ie,1)+q(npx,1)*area(npx,1)+q(ie,0)*area(ie,0)) & / ( area(ie,1)+ area(npx,1)+ area(ie,0)) q(npx,1) = q(ie,1) q(ie, 0) = q(ie,1) q(npx,0) = q(ie,1) endif - if ( (ie+1)==npx .and. (je+1)==npy .and. .not. nested) then + if ( (ie+1)==npx .and. (je+1)==npy .and. .not. bounded_domain) then q(ie, je) = (q(ie,je)*area(ie,je)+q(npx,je)*area(npx,je)+q(ie,npy)*area(ie,npy)) & / ( area(ie,je)+ area(npx,je)+ area(ie,npy)) q(npx, je) = q(ie,je) q(ie, npy) = q(ie,je) q(npx,npy) = q(ie,je) endif - if ( is==1 .and. (je+1)==npy .and. .not. nested) then + if ( is==1 .and. (je+1)==npy .and. .not. bounded_domain) then q(1, je) = (q(1,je)*area(1,je)+q(0,je)*area(0,je)+q(1,npy)*area(1,npy)) & / ( area(1,je)+ area(0,je)+ area(1,npy)) q(0, je) = q(1,je) @@ -1110,18 +1107,18 @@ subroutine del4_cubed_sphere(npx, npy, q, area, dx, dy, dxc, dyc, sin_sg, nmax, do j=js,je do i=is,ie+1 if ( fx4(i,j) > 0. ) then - fx4(i,j) = min(1., wou(i-1,j), win(i,j)) * fx4(i,j) + fx4(i,j) = min(1., wou(i-1,j), win(i,j)) * fx4(i,j) else - fx4(i,j) = min(1., win(i-1,j), wou(i,j)) * fx4(i,j) + fx4(i,j) = min(1., win(i-1,j), wou(i,j)) * fx4(i,j) endif enddo enddo do j=js,je+1 do i=is,ie if ( fy4(i,j) > 0. ) then - fy4(i,j) = min(1., wou(i,j-1), win(i,j)) * fy4(i,j) + fy4(i,j) = min(1., wou(i,j-1), win(i,j)) * fy4(i,j) else - fy4(i,j) = min(1., win(i,j-1), wou(i,j)) * fy4(i,j) + fy4(i,j) = min(1., win(i,j-1), wou(i,j)) * fy4(i,j) endif enddo enddo @@ -1155,7 +1152,7 @@ end subroutine del4_cubed_sphere subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & q2, f2, h2, npx, npy, jstart, jend, stretch_fac, & - nested, npx_global, bd) + bounded_domain, npx_global, bd) ! Input type(fv_grid_bounds_type), intent(IN) :: bd @@ -1168,7 +1165,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & real(kind=R_GRID), intent(in):: agrid(bd%isd:bd%ied, bd%jsd:bd%jed, 2) integer, intent(in):: jstart, jend real(kind=R_GRID), intent(IN) :: stretch_fac - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain ! Output real, intent(out):: q2(bd%isd:bd%ied,bd%jsd:bd%jed) ! Mapped data at the target resolution real, intent(out):: f2(bd%isd:bd%ied,bd%jsd:bd%jed) ! oro @@ -1230,7 +1227,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & if(is_master()) write(*,*) 'surf_map: Search started ....' ! stretch_fac * pi5/(npx-1) / (pi/nlat) - lat_crit = nint( stretch_fac*real(nlat)/real(npx_global-1) ) + lat_crit = nint( stretch_fac*real(nlat)/real(npx_global-1) ) lat_crit = min( jt, max( 4, lat_crit ) ) if ( jstart==1 ) then @@ -1260,7 +1257,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & hsum = 0. np = 0 do j=1,lat_crit - do i=1,im + do i=1,im np = np + 1 hsum = hsum + (qsp-zs(i,j))**2 enddo @@ -1291,7 +1288,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & np = 0 do jp=jend-lat_crit+1, jend j = jp - jstart + 1 - do i=1,im + do i=1,im np = np + 1 hsum = hsum + (qnp-zs(i,j))**2 enddo @@ -1308,7 +1305,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & if (((i < is .and. j < js) .or. & (i < is .and. j > je) .or. & (i > ie .and. j < js) .or. & - (i > ie .and. j > je)) .and. .not. nested) then + (i > ie .and. j > je)) .and. .not. bounded_domain) then q2(i,j) = 1.e25 f2(i,j) = 1.e25 h2(i,j) = 1.e25 @@ -1347,7 +1344,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & j1 = j1 - jstart + 1 j2 = j2 - jstart + 1 - lon_w = min( grid(i,j,1), grid(i+1,j,1), grid(i,j+1,1), grid(i+1,j+1,1) ) + lon_w = min( grid(i,j,1), grid(i+1,j,1), grid(i,j+1,1), grid(i+1,j+1,1) ) lon_e = max( grid(i,j,1), grid(i+1,j,1), grid(i,j+1,1), grid(i+1,j+1,1) ) if ( (lon_e-lon_w) > pi ) then @@ -1383,7 +1380,7 @@ subroutine map_to_cubed_raw(igh, im, jt, lat1, lon1, zs, ft, grid, agrid, & pc(k) = p1(k) + p2(k) + p3(k) + p4(k) enddo call normalize_vect( pc ) - + th0 = min( v_prod(p1,p3), v_prod(p2, p4) ) th1 = min( cos_grid, cos(0.25*acos(max(v_prod(p1,p3), v_prod(p2, p4))))) @@ -1503,7 +1500,7 @@ end subroutine handle_err subroutine remove_ice_sheets (lon, lat, lfrac, bd ) !--------------------------------- ! Bruce Wyman's fix for Antarctic -!--------------------------------- +!--------------------------------- type(fv_grid_bounds_type), intent(IN) :: bd real(kind=R_GRID), intent(in) :: lon(bd%isd:bd%ied,bd%jsd:bd%jed), lat(bd%isd:bd%ied,bd%jsd:bd%jed) real, intent(inout) :: lfrac(bd%isd:bd%ied,bd%jsd:bd%jed) @@ -1514,10 +1511,10 @@ subroutine remove_ice_sheets (lon, lat, lfrac, bd ) ! lon = longitude in radians ! lat = latitude in radians ! lfrac = land-sea mask (land=1, sea=0) - + integer :: i, j real :: dtr, phs, phn - + is = bd%is ie = bd%ie js = bd%js @@ -1526,12 +1523,12 @@ subroutine remove_ice_sheets (lon, lat, lfrac, bd ) ied = bd%ied jsd = bd%jsd jed = bd%jed - + dtr = acos(0.)/90. - phs = -83.9999*dtr + phs = -83.9999*dtr ! phn = -78.9999*dtr phn = -76.4*dtr - + do j = jsd, jed do i = isd, ied if ( lat(i,j) < phn ) then @@ -1543,7 +1540,7 @@ subroutine remove_ice_sheets (lon, lat, lfrac, bd ) ! replace between 270 and 360 deg if ( sin(lon(i,j)) < 0. .and. cos(lon(i,j)) > 0.) then lfrac(i,j) = 1.0 - cycle + cycle endif endif enddo @@ -1569,7 +1566,7 @@ subroutine read_namelist ierr = check_nml_error(io,'surf_map_nml') #else unit = open_namelist_file ( ) - ierr=1 + ierr=1 do while (ierr /= 0) read (unit, nml=surf_map_nml, iostat=io, end=10) ierr = check_nml_error(io,'surf_map_nml') diff --git a/tools/fv_timing.F90 b/tools/fv_timing.F90 index 2c2302e71..3740a7ab8 100644 --- a/tools/fv_timing.F90 +++ b/tools/fv_timing.F90 @@ -55,10 +55,6 @@ module fv_timing_mod logical, private :: module_initialized = .false. -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine timing_init ! @@ -84,7 +80,7 @@ subroutine timing_init ! ... To reduce the overhead for the first call ! #if defined(SPMD) - wclk = MPI_Wtime() + wclk = MPI_Wtime() totim = wclk #else # if defined( IRIX64 ) || ( defined FFC ) @@ -112,7 +108,7 @@ subroutine timing_on(blk_name) character(len=20) :: UC_blk_name - character(len=20) :: ctmp + character(len=20) :: ctmp integer i integer iblk @@ -138,7 +134,7 @@ subroutine timing_on(blk_name) iblk =i endif enddo - + if ( iblk .eq. 0 ) then tblk=tblk+1 iblk=tblk @@ -163,7 +159,7 @@ subroutine timing_on(blk_name) last(iblk)%usr = wclk last(iblk)%sys = 0.0 # endif -#endif +#endif end subroutine timing_on @@ -197,12 +193,12 @@ subroutine timing_off(blk_name) iblk =i endif enddo - + ! write(*,*) 'timing_off ', ctmp, tblk, tblk if ( iblk .eq. 0 ) then call mpp_error(FATAL,'fv_timing_mod: timing_off called before timing_on for: '//trim(blk_name)) ! write(*,*) 'stop in timing off in ', ctmp -! stop +! stop endif #if defined(SPMD) @@ -212,7 +208,7 @@ subroutine timing_off(blk_name) last(iblk)%usr = wclk last(iblk)%sys = 0.0 #else -# if defined( IRIX64 ) || ( defined FFC ) +# if defined( IRIX64 ) || ( defined FFC ) totim = etime(tarray) accum(iblk)%usr = accum(iblk)%usr + & tarray(1) - last(iblk)%usr diff --git a/tools/fv_treat_da_inc.F90 b/tools/fv_treat_da_inc.F90 new file mode 100644 index 000000000..e42878741 --- /dev/null +++ b/tools/fv_treat_da_inc.F90 @@ -0,0 +1,476 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** +!------------------------------------------------------------------------------- +!> @brief Treat DA increment +!> @author Xi.Chen +!> @date 02/12/2016 +! +! REVISION HISTORY: +! 02/12/2016 - Initial Version +!------------------------------------------------------------------------------- + +#ifdef OVERLOAD_R4 +#define _GET_VAR1 get_var1_real +#else +#define _GET_VAR1 get_var1_double +#endif + +module fv_treat_da_inc_mod + + use fms_mod, only: file_exist, read_data, & + field_exist, write_version_number + use mpp_mod, only: mpp_error, FATAL, NOTE, mpp_pe + use mpp_domains_mod, only: mpp_get_tile_id, & + domain2d, & + mpp_update_domains, & + NORTH, & + EAST + use tracer_manager_mod,only: get_tracer_names, & + get_number_tracers, & + get_tracer_index + use field_manager_mod, only: MODEL_ATMOS + + use constants_mod, only: pi=>pi_8, omega, grav, kappa, & + rdgas, rvgas, cp_air + use fv_arrays_mod, only: fv_atmos_type, & + fv_grid_type, & + fv_grid_bounds_type, & + R_GRID + use fv_grid_utils_mod, only: ptop_min, g_sum, & + mid_pt_sphere, get_unit_vect2, & + get_latlon_vector, inner_prod, & + cubed_to_latlon + use fv_mp_mod, only: is_master, & + fill_corners, & + YDir, & + mp_reduce_min, & + mp_reduce_max + use sim_nc_mod, only: open_ncfile, & + close_ncfile, & + get_ncdim1, & + get_var1_double, & + get_var2_real, & + get_var3_r4, & + get_var1_real + implicit none + private + + public :: read_da_inc,remap_coef + +contains + !============================================================================= + !> @brief description + !> @author Xi.Chen + !> @date 02/12/2016 + + !> Do NOT Have delz increment available yet + !> EMC reads in delz increments but does NOT use them!! + subroutine read_da_inc(Atm, fv_domain, bd, npz_in, nq, u, v, q, delp, pt, is_in, js_in, ie_in, je_in ) + type(fv_atmos_type), intent(inout) :: Atm + type(domain2d), intent(inout) :: fv_domain + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npz_in, nq, is_in, js_in, ie_in, je_in + real, intent(inout), dimension(is_in:ie_in, js_in:je_in+1,npz_in):: u ! D grid zonal wind (m/s) + real, intent(inout), dimension(is_in:ie_in+1,js_in:je_in ,npz_in):: v ! D grid meridional wind (m/s) + real, intent(inout) :: delp(is_in:ie_in ,js_in:je_in ,npz_in) ! pressure thickness (pascal) + real, intent(inout) :: pt( is_in:ie_in ,js_in:je_in ,npz_in) ! temperature (K) + real, intent(inout) :: q( is_in:ie_in ,js_in:je_in ,npz_in, nq) ! + + ! local + real :: deg2rad + character(len=128) :: fname + real(kind=4), allocatable:: wk1(:), wk2(:,:), wk3(:,:,:) + real(kind=4), allocatable:: wk3_u(:,:,:), wk3_v(:,:,:) + real, allocatable:: tp(:,:,:), qp(:,:,:) + real, dimension(:,:,:), allocatable:: u_inc, v_inc, ud_inc, vd_inc + real, allocatable:: lat(:), lon(:) + real, allocatable:: pt_c(:,:,:), pt_d(:,:,:) + real:: s2c(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je,4) + real:: s2c_c(Atm%bd%is:Atm%bd%ie+1,Atm%bd%js:Atm%bd%je,4) + real:: s2c_d(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je+1,4) + integer, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je):: & + id1, id2, jdc + integer, dimension(Atm%bd%is:Atm%bd%ie+1,Atm%bd%js:Atm%bd%je)::& + id1_c, id2_c, jdc_c + integer, dimension(Atm%bd%is:Atm%bd%ie,Atm%bd%js:Atm%bd%je+1)::& + id1_d, id2_d, jdc_d + + integer:: i, j, k, im, jm, km, npt + integer:: i1, i2, j1, ncid + integer:: jbeg, jend + integer tsize(3) + real(kind=R_GRID), dimension(2):: p1, p2, p3 + real(kind=R_GRID), dimension(3):: e1, e2, ex, ey + + logical:: found + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + integer :: sphum, liq_wat, o3mr + + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + isd = Atm%bd%isd + ied = Atm%bd%ied + jsd = Atm%bd%jsd + jed = Atm%bd%jed + + deg2rad = pi/180. + + fname = 'INPUT/'//Atm%flagstruct%res_latlon_dynamics + + if( file_exist(fname) ) then + call open_ncfile( fname, ncid ) ! open the file + call get_ncdim1( ncid, 'lon', tsize(1) ) + call get_ncdim1( ncid, 'lat', tsize(2) ) + call get_ncdim1( ncid, 'lev', tsize(3) ) + + im = tsize(1); jm = tsize(2); km = tsize(3) + + if (km.ne.npz_in) then + if (is_master()) print *, 'km = ', km + call mpp_error(FATAL, & + '==> Error in read_da_inc: km is not equal to npz_in') + endif + + if(is_master()) write(*,*) fname, ' DA increment dimensions:', tsize + + allocate ( lon(im) ) + allocate ( lat(jm) ) + + call _GET_VAR1 (ncid, 'lon', im, lon ) + call _GET_VAR1 (ncid, 'lat', jm, lat ) + + ! Convert to radian + do i=1,im + lon(i) = lon(i) * deg2rad ! lon(1) = 0. + enddo + do j=1,jm + lat(j) = lat(j) * deg2rad + enddo + + else + call mpp_error(FATAL,'==> Error in read_da_inc: Expected file '& + //trim(fname)//' for DA increment does not exist') + endif + + ! Initialize lat-lon to Cubed bi-linear interpolation coeff: + call remap_coef( is, ie, js, je, isd, ied, jsd, jed, & + im, jm, lon, lat, id1, id2, jdc, s2c, & + Atm%gridstruct%agrid) + + ! Find bounding latitudes: + jbeg = jm-1; jend = 2 + do j=js,je + do i=is,ie + j1 = jdc(i,j) + jbeg = min(jbeg, j1) + jend = max(jend, j1+1) + enddo + enddo + + sphum = get_tracer_index(MODEL_ATMOS, 'sphum') + o3mr = get_tracer_index(MODEL_ATMOS, 'o3mr') + liq_wat = get_tracer_index(MODEL_ATMOS, 'liq_wat') + + ! perform increments on scalars + allocate ( wk3(1:im,jbeg:jend, 1:km) ) + allocate ( tp(is:ie,js:je,km) ) + + call apply_inc_on_3d_scalar('T_inc',pt, is_in, js_in, ie_in, je_in) + call apply_inc_on_3d_scalar('delp_inc',delp, is_in, js_in, ie_in, je_in) + call apply_inc_on_3d_scalar('sphum_inc',q(:,:,:,sphum), is_in, js_in, ie_in, je_in) + call apply_inc_on_3d_scalar('liq_wat_inc',q(:,:,:,liq_wat), is_in, js_in, ie_in, je_in) + call apply_inc_on_3d_scalar('o3mr_inc',q(:,:,:,o3mr), is_in, js_in, ie_in, je_in) + + deallocate ( tp ) + deallocate ( wk3 ) + + ! perform increments on winds + allocate (pt_c(isd:ied+1,jsd:jed ,2)) + allocate (pt_d(isd:ied ,jsd:jed+1,2)) + allocate (ud_inc(is:ie , js:je+1, km)) + allocate (vd_inc(is:ie+1, js:je , km)) + + call get_staggered_grid( & + is, ie, js, je, & + isd, ied, jsd, jed, & + Atm%gridstruct%grid, pt_c, pt_d) + + !------ pt_c part ------ + ! Initialize lat-lon to Cubed bi-linear interpolation coeff: + call remap_coef( is, ie+1, js, je, isd, ied+1, jsd, jed, & + im, jm, lon, lat, id1_c, id2_c, jdc_c, s2c_c, & + pt_c) + + ! Find bounding latitudes: + jbeg = jm-1; jend = 2 + do j=js,je + do i=is,ie+1 + j1 = jdc_c(i,j) + jbeg = min(jbeg, j1) + jend = max(jend, j1+1) + enddo + enddo + + allocate ( wk3_u(1:im,jbeg:jend, 1:km) ) + allocate ( wk3_v(1:im,jbeg:jend, 1:km) ) + allocate ( u_inc(is:ie+1,js:je,km) ) + allocate ( v_inc(is:ie+1,js:je,km) ) + + call get_var3_r4( ncid, 'u_inc', 1,im, jbeg,jend, 1,km, wk3_u ) + call get_var3_r4( ncid, 'v_inc', 1,im, jbeg,jend, 1,km, wk3_v ) + + do k=1,km + do j=js,je + do i=is,ie+1 + i1 = id1_c(i,j) + i2 = id2_c(i,j) + j1 = jdc_c(i,j) + u_inc(i,j,k) = s2c_c(i,j,1)*wk3_u(i1,j1 ,k) + & + s2c_c(i,j,2)*wk3_u(i2,j1 ,k) + & + s2c_c(i,j,3)*wk3_u(i2,j1+1,k) + & + s2c_c(i,j,4)*wk3_u(i1,j1+1,k) + v_inc(i,j,k) = s2c_c(i,j,1)*wk3_v(i1,j1 ,k) + & + s2c_c(i,j,2)*wk3_v(i2,j1 ,k) + & + s2c_c(i,j,3)*wk3_v(i2,j1+1,k) + & + s2c_c(i,j,4)*wk3_v(i1,j1+1,k) + p1(:) = Atm%gridstruct%grid(i,j ,1:2) + p2(:) = Atm%gridstruct%grid(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e2) + call get_latlon_vector(p3, ex, ey) + vd_inc(i,j,k) = u_inc(i,j,k)*inner_prod(e2,ex) + & + v_inc(i,j,k)*inner_prod(e2,ey) + v(i,j,k) = v(i,j,k) + vd_inc(i,j,k) + enddo + enddo + enddo + + deallocate ( u_inc, v_inc ) + deallocate ( wk3_u, wk3_v ) + + !------ pt_d part ------ + ! Initialize lat-lon to Cubed bi-linear interpolation coeff: + call remap_coef( is, ie, js, je+1, isd, ied, jsd, jed+1, & + im, jm, lon, lat, id1_d, id2_d, jdc_d, s2c_d, & + pt_d) + + ! Find bounding latitudes: + jbeg = jm-1; jend = 2 + do j=js,je+1 + do i=is,ie + j1 = jdc_d(i,j) + jbeg = min(jbeg, j1) + jend = max(jend, j1+1) + enddo + enddo + + allocate ( wk3_u(1:im,jbeg:jend, 1:km) ) + allocate ( wk3_v(1:im,jbeg:jend, 1:km) ) + allocate ( u_inc(is:ie,js:je+1,km) ) + allocate ( v_inc(is:ie,js:je+1,km) ) + + call get_var3_r4( ncid, 'u_inc', 1,im, jbeg,jend, 1,km, wk3_u ) + call get_var3_r4( ncid, 'v_inc', 1,im, jbeg,jend, 1,km, wk3_v ) + + do k=1,km + do j=js,je+1 + do i=is,ie + i1 = id1_d(i,j) + i2 = id2_d(i,j) + j1 = jdc_d(i,j) + u_inc(i,j,k) = s2c_d(i,j,1)*wk3_u(i1,j1 ,k) + & + s2c_d(i,j,2)*wk3_u(i2,j1 ,k) + & + s2c_d(i,j,3)*wk3_u(i2,j1+1,k) + & + s2c_d(i,j,4)*wk3_u(i1,j1+1,k) + v_inc(i,j,k) = s2c_d(i,j,1)*wk3_v(i1,j1 ,k) + & + s2c_d(i,j,2)*wk3_v(i2,j1 ,k) + & + s2c_d(i,j,3)*wk3_v(i2,j1+1,k) + & + s2c_d(i,j,4)*wk3_v(i1,j1+1,k) + p1(:) = Atm%gridstruct%grid(i, j,1:2) + p2(:) = Atm%gridstruct%grid(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + call get_unit_vect2(p1, p2, e1) + call get_latlon_vector(p3, ex, ey) + ud_inc(i,j,k) = u_inc(i,j,k)*inner_prod(e1,ex) + & + v_inc(i,j,k)*inner_prod(e1,ey) + u(i,j,k) = u(i,j,k) + ud_inc(i,j,k) + enddo + enddo + enddo + + deallocate ( u_inc, v_inc ) + deallocate ( wk3_u, wk3_v ) + +!rab The following is not necessary as ua/va will be re-calculated during model startup +!rab call cubed_to_latlon(Atm%u, Atm%v, Atm%ua, Atm%va, & +!rab Atm%gridstruct, Atm%flagstruct%npx, Atm%flagstruct%npy, & +!rab Atm%flagstruct%npz, 1, Atm%gridstruct%grid_type, & +!rab fv_domain, Atm%gridstruct%nested, & +!rab Atm%flagstruct%c2l_ord, Atm%bd) + + !------ winds clean up ------ + deallocate ( pt_c, pt_d, ud_inc, vd_inc ) + !------ all clean up ------ + deallocate ( lon, lat ) + + contains + !--------------------------------------------------------------------------- + subroutine apply_inc_on_3d_scalar(field_name,var, is_in, js_in, ie_in, je_in) + character(len=*), intent(in) :: field_name + integer, intent(IN) :: is_in, js_in, ie_in, je_in + real, dimension(is_in:ie_in,js_in:je_in,1:km), intent(inout) :: var + + if (is_master()) print*, 'Reading increments ', field_name + call get_var3_r4( ncid, field_name, 1,im, jbeg,jend, 1,km, wk3 ) + if (is_master()) print*,trim(field_name),'before=',var(4,4,30) + + do k=1,km + do j=js,je + do i=is,ie + i1 = id1(i,j) + i2 = id2(i,j) + j1 = jdc(i,j) + tp(i,j,k) = s2c(i,j,1)*wk3(i1,j1 ,k) + s2c(i,j,2)*wk3(i2,j1 ,k)+& + s2c(i,j,3)*wk3(i2,j1+1,k) + s2c(i,j,4)*wk3(i1,j1+1,k) + var(i,j,k) = var(i,j,k)+tp(i,j,k) + enddo + enddo + enddo + if (is_master()) print*,trim(field_name),'after=',var(4,4,30),tp(4,4,30) + + end subroutine apply_inc_on_3d_scalar + !--------------------------------------------------------------------------- + end subroutine read_da_inc + !============================================================================= + subroutine remap_coef( is, ie, js, je, isd, ied, jsd, jed, & + im, jm, lon, lat, id1, id2, jdc, s2c, agrid ) + + integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed + integer, intent(in):: im, jm + real, intent(in):: lon(im), lat(jm) + real, intent(out):: s2c(is:ie,js:je,4) + integer, intent(out), dimension(is:ie,js:je):: id1, id2, jdc + real, intent(in):: agrid(isd:ied,jsd:jed,2) + ! local: + real :: rdlon(im) + real :: rdlat(jm) + real:: a1, b1 + integer i,j, i1, i2, jc, i0, j0 + do i=1,im-1 + rdlon(i) = 1. / (lon(i+1) - lon(i)) + enddo + rdlon(im) = 1. / (lon(1) + 2.*pi - lon(im)) + + do j=1,jm-1 + rdlat(j) = 1. / (lat(j+1) - lat(j)) + enddo + + ! * Interpolate to cubed sphere cell center + do 5000 j=js,je + + do i=is,ie + + if ( agrid(i,j,1)>lon(im) ) then + i1 = im; i2 = 1 + a1 = (agrid(i,j,1)-lon(im)) * rdlon(im) + elseif ( agrid(i,j,1)=lon(i0) .and. agrid(i,j,1)<=lon(i0+1) ) then + i1 = i0; i2 = i0+1 + a1 = (agrid(i,j,1)-lon(i1)) * rdlon(i0) + go to 111 + endif + enddo + endif +111 continue + + if ( agrid(i,j,2)lat(jm) ) then + jc = jm-1 + b1 = 1. + else + do j0=1,jm-1 + if ( agrid(i,j,2)>=lat(j0) .and. agrid(i,j,2)<=lat(j0+1) ) then + jc = j0 + b1 = (agrid(i,j,2)-lat(jc)) * rdlat(jc) + go to 222 + endif + enddo + endif +222 continue + + if ( a1<0.0 .or. a1>1.0 .or. b1<0.0 .or. b1>1.0 ) then + write(*,*) 'gid=', mpp_pe(), i,j,a1, b1 + endif + + s2c(i,j,1) = (1.-a1) * (1.-b1) + s2c(i,j,2) = a1 * (1.-b1) + s2c(i,j,3) = a1 * b1 + s2c(i,j,4) = (1.-a1) * b1 + id1(i,j) = i1 + id2(i,j) = i2 + jdc(i,j) = jc + enddo !i-loop +5000 continue ! j-loop + + end subroutine remap_coef + !============================================================================= + subroutine get_staggered_grid( & + is, ie, js, je, & + isd, ied, jsd, jed, & + pt_b, pt_c, pt_d) + integer, intent(in) :: is, ie, js, je, isd, ied, jsd, jed + real, dimension(isd:ied+1,jsd:jed+1,2), intent(in) :: pt_b + real, dimension(isd:ied+1,jsd:jed ,2), intent(out) :: pt_c + real, dimension(isd:ied ,jsd:jed+1,2), intent(out) :: pt_d + ! local + real(kind=R_GRID), dimension(2):: p1, p2, p3 + integer :: i, j + + do j = js,je+1 + do i = is,ie + p1(:) = pt_b(i, j,1:2) + p2(:) = pt_b(i+1,j,1:2) + call mid_pt_sphere(p1, p2, p3) + pt_d(i,j,1:2) = p3(:) + enddo + enddo + do j = js,je + do i = is,ie+1 + p1(:) = pt_b(i,j ,1:2) + p2(:) = pt_b(i,j+1,1:2) + call mid_pt_sphere(p1, p2, p3) + pt_c(i,j,1:2) = p3(:) + enddo + enddo + + end subroutine get_staggered_grid + !============================================================================= +end module fv_treat_da_inc_mod + diff --git a/tools/init_hydro.F90 b/tools/init_hydro.F90 index b03583f33..360250f35 100644 --- a/tools/init_hydro.F90 +++ b/tools/init_hydro.F90 @@ -18,7 +18,6 @@ !* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** -! $Id$ module init_hydro_mod @@ -30,27 +29,20 @@ module init_hydro_mod use mpp_domains_mod, only: domain2d use fv_arrays_mod, only: R_GRID ! use fv_diagnostics_mod, only: prt_maxmin -!!! DEBUG CODE - use mpp_mod, only: mpp_pe -!!! END DEBUG CODE implicit none private public :: p_var, hydro_eq -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains !------------------------------------------------------------------------------- subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & delp, delz, pt, ps, pe, peln, pk, pkz, cappa, q, ng, nq, area, & dry_mass, adjust_dry_mass, mountain, moist_phys, & - hydrostatic, nwat, domain, make_nh) - + hydrostatic, nwat, domain, adiabatic, make_nh) + ! Given (ptop, delp) computes (ps, pk, pe, peln, pkz) ! Input: integer, intent(in):: km @@ -58,10 +50,10 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & integer, intent(in):: jfirst, jlast ! Latitude strip integer, intent(in):: nq, nwat integer, intent(in):: ng - logical, intent(in):: adjust_dry_mass, mountain, moist_phys, hydrostatic + logical, intent(in):: adjust_dry_mass, mountain, moist_phys, hydrostatic, adiabatic real, intent(in):: dry_mass, cappa, ptop, ptop_min real, intent(in ):: pt(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng, km) - real, intent(inout):: delz(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng, km) + real, intent(inout):: delz(ifirst:ilast,jfirst:jlast, km) real, intent(inout):: delp(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng, km) real, intent(inout):: q(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng, km, nq) real(kind=R_GRID), intent(IN) :: area(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) @@ -76,7 +68,7 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & ! Local integer sphum, liq_wat, ice_wat - integer rainwat, snowwat, graupel ! Lin Micro-physics + integer rainwat, snowwat, graupel ! GFDL Cloud Microphysics real ratio(ifirst:ilast) real pek, lnp, ak1, rdg, dpd, zvir integer i, j, k @@ -101,7 +93,7 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & if ( adjust_dry_mass ) then do i=ifirst,ilast ratio(i) = 1. + dpd/(ps(i,j)-ptop) - enddo + enddo do k=1,km do i=ifirst,ilast delp(i,j,k) = delp(i,j,k) * ratio(i) @@ -143,18 +135,24 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & endif enddo + if ( adiabatic ) then + zvir = 0. + else + zvir = rvgas/rdgas - 1. + endif + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') if ( .not.hydrostatic ) then rdg = -rdgas / grav if ( present(make_nh) ) then if ( make_nh ) then - delz = 1.e25 -!$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,delz,rdg,pt,peln) + delz = 1.e25 +!$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,delz,rdg,pt,peln,zvir,sphum,q) do k=1,km do j=jfirst,jlast do i=ifirst,ilast - delz(i,j,k) = rdg*pt(i,j,k)*(peln(i,k+1,j)-peln(i,k,j)) + delz(i,j,k) = rdg*pt(i,j,k)*(1.+zvir*q(i,j,k,sphum))*(peln(i,k+1,j)-peln(i,k,j)) enddo enddo enddo @@ -166,8 +164,6 @@ subroutine p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, & !------------------------------------------------------------------ ! The following form is the same as in "fv_update_phys.F90" !------------------------------------------------------------------ - zvir = rvgas/rdgas - 1. - sphum = get_tracer_index (MODEL_ATMOS, 'sphum') !$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,pkz,cappa,rdg, & !$OMP delp,pt,zvir,q,sphum,delz) do k=1,km @@ -196,14 +192,14 @@ end subroutine p_var - subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & + subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & cappa, ptop, ps, delp, q, nq, area, nwat, & dry_mass, adjust_dry_mass, moist_phys, dpd, domain) ! !INPUT PARAMETERS: integer km integer ifirst, ilast ! Long strip - integer jfirst, jlast ! Latitude strip + integer jfirst, jlast ! Latitude strip integer nq, ng, nwat real, intent(in):: dry_mass real, intent(in):: ptop @@ -213,7 +209,7 @@ subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & real(kind=R_GRID), intent(IN) :: area(ifirst-ng:ilast+ng, jfirst-ng:jlast+ng) type(domain2d), intent(IN) :: domain -! !INPUT/OUTPUT PARAMETERS: +! !INPUT/OUTPUT PARAMETERS: real, intent(in):: q(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng,km,nq) real, intent(in)::delp(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng,km) ! real, intent(inout):: ps(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) ! surface pressure @@ -223,7 +219,7 @@ subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & real psmo, psdry integer i, j, k -!$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,ps,ptop,psd,delp,nwat,q) +!$OMP parallel do default(none) shared(ifirst,ilast,jfirst,jlast,km,ps,ptop,psd,delp,nwat,q) do j=jfirst,jlast do i=ifirst,ilast @@ -252,13 +248,13 @@ subroutine drymadj(km, ifirst, ilast, jfirst, jlast, ng, & ! Check global maximum/minimum #ifndef QUICK_SUM - psdry = g_sum(domain, psd, ifirst, ilast, jfirst, jlast, ng, area, 1, .true.) + psdry = g_sum(domain, psd, ifirst, ilast, jfirst, jlast, ng, area, 1, .true.) psmo = g_sum(domain, ps(ifirst:ilast,jfirst:jlast), ifirst, ilast, jfirst, jlast, & - ng, area, 1, .true.) + ng, area, 1, .true.) #else - psdry = g_sum(domain, psd, ifirst, ilast, jfirst, jlast, ng, area, 1) + psdry = g_sum(domain, psd, ifirst, ilast, jfirst, jlast, ng, area, 1) psmo = g_sum(domain, ps(ifirst:ilast,jfirst:jlast), ifirst, ilast, jfirst, jlast, & - ng, area, 1) + ng, area, 1) #endif if(is_master()) then @@ -280,7 +276,7 @@ end subroutine drymadj subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & pt, delz, area, ng, mountain, hydrostatic, hybrid_z, domain) -! Input: +! Input: integer, intent(in):: is, ie, js, je, km, ng real, intent(in):: ak(km+1), bk(km+1) real, intent(in):: hs(is-ng:ie+ng,js-ng:je+ng) @@ -294,14 +290,14 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & real, intent(out):: ps(is-ng:ie+ng,js-ng:je+ng) real, intent(out):: pt(is-ng:ie+ng,js-ng:je+ng,km) real, intent(out):: delp(is-ng:ie+ng,js-ng:je+ng,km) - real, intent(inout):: delz(is-ng:ie+ng,js-ng:je+ng,km) + real, intent(inout):: delz(is:,js:,1:) ! Local real gz(is:ie,km+1) real ph(is:ie,km+1) real mslp, z1, t1, p1, t0, a0, psm real ztop, c0 #ifdef INIT_4BYTE - real(kind=4) :: dps + real(kind=4) :: dps #else real dps ! note that different PEs will get differt dps during initialization ! this has no effect after cold start @@ -321,7 +317,7 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & c0 = t0/a0 if ( hybrid_z ) then - ptop = 100. ! *** hardwired model top *** + ptop = 100. ! *** hardwired model top *** else ptop = ak(1) endif @@ -356,8 +352,8 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & ps(i,j) = ps(i,j) + dps gz(i, 1) = ztop gz(i,km+1) = hs(i,j) - ph(i, 1) = ptop - ph(i,km+1) = ps(i,j) + ph(i, 1) = ptop + ph(i,km+1) = ps(i,j) enddo if ( hybrid_z ) then @@ -366,14 +362,14 @@ subroutine hydro_eq(km, is, ie, js, je, ps, hs, drym, delp, ak, bk, & !--------------- do k=km,2,-1 do i=is,ie - gz(i,k) = gz(i,k+1) - delz(i,j,k)*grav + gz(i,k) = gz(i,k+1) - delz(i,j,k)*grav enddo enddo ! Correct delz at the top: do i=is,ie delz(i,j,1) = (gz(i,2) - ztop) / grav enddo - + do k=2,km do i=is,ie if ( gz(i,k) >= z1 ) then diff --git a/tools/sim_nc_mod.F90 b/tools/sim_nc_mod.F90 index 4727c91f7..e7f837e9d 100644 --- a/tools/sim_nc_mod.F90 +++ b/tools/sim_nc_mod.F90 @@ -41,10 +41,6 @@ module sim_nc_mod get_var3_real, get_var3_double, get_var3_r4, get_var2_real, get_var2_r4, & handle_err, check_var, get_var1_real, get_var_att_double -!---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains subroutine open_ncfile( iflnm, ncid ) @@ -247,7 +243,7 @@ subroutine get_var3_r4( ncid, var3_name, is,ie, js,je, ks,ke, var3, time_slice ) start(4) = time_slice end if - nreco(1) = ie - is + 1 + nreco(1) = ie - is + 1 nreco(2) = je - js + 1 nreco(3) = ke - ks + 1 nreco(4) = 1 @@ -265,7 +261,7 @@ subroutine get_var4_real( ncid, var4_name, im, jm, km, nt, var4 ) real*4:: wk4(im,jm,km,4) real*4, intent(out):: var4(im,jm) integer:: status, var4id - integer:: start(4), icount(4) + integer:: start(4), icount(4) integer:: i,j start(1) = 1 @@ -305,7 +301,7 @@ subroutine get_var4_double( ncid, var4_name, im, jm, km, nt, var4 ) real(kind=8), intent(out):: var4(im,jm,km,1) integer:: status, var4id ! - integer:: start(4), icount(4) + integer:: start(4), icount(4) start(1) = 1 start(2) = 1 @@ -358,7 +354,7 @@ logical function check_var( ncid, var3_name) integer:: status, var3id status = nf_inq_varid (ncid, var3_name, var3id) - check_var = (status == NF_NOERR) + check_var = (status == NF_NOERR) end function check_var @@ -415,7 +411,7 @@ subroutine calendar(year, month, day, hour) ! Local variables ! integer irem4,irem100 - integer mdays(12) ! number day of month + integer mdays(12) ! number day of month data mdays /31,28,31,30,31,30,31,31,30,31,30,31/ ! !*********************************************************************** diff --git a/tools/sorted_index.F90 b/tools/sorted_index.F90 index 1d3ea9edd..3ca5f3f91 100644 --- a/tools/sorted_index.F90 +++ b/tools/sorted_index.F90 @@ -18,7 +18,6 @@ !* License along with the FV3 dynamical core. !* If not, see . !*********************************************************************** -!-*- F90 -*- module sorted_index_mod !--------------------------------------------------------------------- ! @@ -27,7 +26,7 @@ module sorted_index_mod ! ! ! - ! i/jinta are indices of b-grid locations needed for line integrals + ! i/jinta are indices of b-grid locations needed for line integrals ! around an a-grid cell including ghosting. ! ! i/jintb are indices of a-grid locations needed for line integrals @@ -41,20 +40,16 @@ module sorted_index_mod private public :: sorted_inta, sorted_intb - !---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - contains !##################################################################### ! ! ! - ! Sort cell corner indices in latlon space based on grid locations - ! in index space. If not cubed_sphere assume orientations in index + ! Sort cell corner indices in latlon space based on grid locations + ! in index space. If not cubed_sphere assume orientations in index ! and latlon space are identical. ! - ! i/jinta are indices of b-grid locations needed for line integrals + ! i/jinta are indices of b-grid locations needed for line integrals ! around an a-grid cell including ghosting. ! ! i/jintb are indices of a-grid locations needed for line integrals @@ -79,7 +74,7 @@ subroutine sorted_inta(isd, ied, jsd, jed, cubed_sphere, bgrid, iinta, jinta) !------------------------------------------------------------------! if (cubed_sphere) then !---------------------------------------------------------------! - ! get order of indices for line integral around a-grid cell ! + ! get order of indices for line integral around a-grid cell ! !---------------------------------------------------------------! do j=jsd,jed do i=isd,ied @@ -99,7 +94,7 @@ subroutine sorted_inta(isd, ied, jsd, jed, cubed_sphere, bgrid, iinta, jinta) iinta(i,j,1)=i ; jinta(i,j,1)=j iinta(i,j,2)=i ; jinta(i,j,2)=j+1 iinta(i,j,3)=i+1; jinta(i,j,3)=j+1 - iinta(i,j,4)=i+1; jinta(i,j,4)=j + iinta(i,j,4)=i+1; jinta(i,j,4)=j enddo enddo endif @@ -121,7 +116,7 @@ subroutine sort_rectangle(iind, jind) ysorted(:)=10. isorted(:)=0 jsorted(:)=0 - + do l=1,4 do ll=1,4 if (xsort(l) ! ! - ! Sort cell corner indices in latlon space based on grid locations - ! in index space. If not cubed_sphere assume orientations in index + ! Sort cell corner indices in latlon space based on grid locations + ! in index space. If not cubed_sphere assume orientations in index ! and latlon space are identical. ! - ! i/jinta are indices of b-grid locations needed for line integrals + ! i/jinta are indices of b-grid locations needed for line integrals ! around an a-grid cell including ghosting. ! ! i/jintb are indices of a-grid locations needed for line integrals @@ -267,7 +262,7 @@ subroutine sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & !------------------------------------------------------------------! ! local variables ! !------------------------------------------------------------------! - real, dimension(4) :: xsort, ysort, xsorted, ysorted + real, dimension(4) :: xsort, ysort, xsorted, ysorted integer, dimension(4) :: isort, jsort, isorted, jsorted integer :: i, j, l, ll, lll !------------------------------------------------------------------! @@ -275,7 +270,7 @@ subroutine sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & !------------------------------------------------------------------! if (cubed_sphere) then !---------------------------------------------------------------! - ! get order of indices for line integral around b-grid cell ! + ! get order of indices for line integral around b-grid cell ! !---------------------------------------------------------------! do j=js,je+1 do i=is,ie+1 @@ -292,7 +287,7 @@ subroutine sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & if ( (is==1) .and. (js==1) ) then i=1 j=1 - xsort(1)=agrid(i ,j ,1); ysort(1)=agrid(i ,j ,2); isort(1)=i ; jsort(1)=j + xsort(1)=agrid(i ,j ,1); ysort(1)=agrid(i ,j ,2); isort(1)=i ; jsort(1)=j xsort(2)=agrid(i ,j-1,1); ysort(2)=agrid(i ,j-1,2); isort(2)=i ; jsort(2)=j-1 xsort(3)=agrid(i-1,j ,1); ysort(3)=agrid(i-1,j ,2); isort(3)=i-1; jsort(3)=j call sort_triangle() @@ -318,7 +313,7 @@ subroutine sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & call sort_triangle() iintb(4,i,j)=i; jintb(4,i,j)=j endif - + if ( (is==1) .and. (je+1==npy) ) then i=1 j=npy @@ -337,7 +332,7 @@ subroutine sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & iintb(1,i,j)=i ; jintb(1,i,j)=j iintb(2,i,j)=i ; jintb(2,i,j)=j-1 iintb(3,i,j)=i-1; jintb(3,i,j)=j-1 - iintb(4,i,j)=i-1; jintb(4,i,j)=j + iintb(4,i,j)=i-1; jintb(4,i,j)=j enddo enddo endif @@ -350,7 +345,7 @@ subroutine sort_rectangle(iind, jind) !----------------------------------------------------------------! ! local variables ! !----------------------------------------------------------------! - real, dimension(4) :: xsorted, ysorted + real, dimension(4) :: xsorted, ysorted integer, dimension(4) :: isorted, jsorted !----------------------------------------------------------------! ! sort in east west ! @@ -359,7 +354,7 @@ subroutine sort_rectangle(iind, jind) ysorted(:)=10. isorted(:)=0 jsorted(:)=0 - + do l=1,4 do ll=1,4 if (xsort(l)radius, pi=>pi_8, omega, grav, kappa, rdgas, cp_air, rvgas use init_hydro_mod, only: p_var, hydro_eq - use fv_mp_mod, only: ng, is_master, & - is,js,ie,je, isd,jsd,ied,jed, & + use fv_mp_mod, only: is_master, & domain_decomp, fill_corners, XDir, YDir, & mp_stop, mp_reduce_sum, mp_reduce_max, mp_gather, mp_bcst use fv_grid_utils_mod, only: cubed_to_latlon, great_circle_dist, mid_pt_sphere, & @@ -37,6 +36,8 @@ module test_cases_mod hybrid_z_dz use mpp_mod, only: mpp_error, FATAL, mpp_root_pe, mpp_broadcast, mpp_sum + use mpp_mod, only: stdlog, input_nml_file + use fms_mod, only: check_nml_error use mpp_domains_mod, only: mpp_update_domains, domain2d use mpp_parameter_mod, only: AGRID_PARAM=>AGRID,CGRID_NE_PARAM=>CGRID_NE, & SCALAR_PAIR @@ -51,15 +52,20 @@ module test_cases_mod implicit none private -! Test Case Number +!!! A NOTE ON TEST CASES +!!! If you have a DRY test case with no physics, be sure to set adiabatic = .TRUE. in your runscript. +!!!! This is especially important for nonhydrostatic cases in which delz will be initialized with the +!!!! virtual temperature effect. + +! Test Case Number (cubed-sphere domain) ! -1 = Divergence conservation test ! 0 = Idealized non-linear deformational flow ! 1 = Cosine Bell advection ! 2 = Zonal geostrophically balanced flow -! 3 = non-rotating potential flow +! 3 = non-rotating potential flow ! 4 = Tropical cyclones (merger of Rankine vortices) ! 5 = Zonal geostrophically balanced flow over an isolated mountain -! 6 = Rossby Wave number 4 +! 6 = Rossby Wave number 4 ! 7 = Barotropic instability ! ! 8 = Potential flow (as in 5 but no rotation and initially at rest) ! 8 = "Soliton" propagation twin-vortex along equator @@ -88,16 +94,17 @@ module test_cases_mod ! 44 = Lock-exchange on the sphere; atm at rest with no mountain ! 45 = New test ! 51 = 3D tracer advection (deformational nondivergent flow) -! 55 = TC +! 55 = TC +! -55 = DCMIP 2016 TC test ! 101 = 3D non-hydrostatic Large-Eddy-Simulation (LES) with hybrid_z IC integer :: sphum, theta_d real(kind=R_GRID), parameter :: radius = cnst_radius real(kind=R_GRID), parameter :: one = 1.d0 - integer :: test_case - logical :: bubble_do - real :: alpha - integer :: Nsolitons + integer :: test_case = 11 + logical :: bubble_do = .false. + real :: alpha = 0.0 + integer :: Nsolitons = 1 real :: soliton_size = 750.e3, soliton_Umax = 50. ! Case 0 parameters @@ -110,11 +117,11 @@ module test_cases_mod real, parameter :: pi_shift = 0.0 !3.0*pi/4. ! -1:null_op, 0:All-Grids, 1:C-Grid, 2:D-Grid, 3:A-Grid, 4:A-Grid then Rotate, 5:D-Grid with unit vectors then Rotate - integer, parameter :: initWindsCase0 =-1 + integer, parameter :: initWindsCase0 =-1 integer, parameter :: initWindsCase1 = 1 - integer, parameter :: initWindsCase2 = 5 + integer, parameter :: initWindsCase2 = 5 integer, parameter :: initWindsCase5 = 5 - integer, parameter :: initWindsCase6 =-1 + integer, parameter :: initWindsCase6 =-1 integer, parameter :: initWindsCase9 =-1 real, allocatable, dimension(:) :: pz0, zz0 @@ -148,18 +155,11 @@ module test_cases_mod public :: pz0, zz0 public :: test_case, bubble_do, alpha, tracer_test, wind_field, nsolitons, soliton_Umax, soliton_size - public :: init_case, get_stats, check_courant_numbers -#ifdef NCDF_OUTPUT - public :: output, output_ncdf -#endif + public :: init_case public :: case9_forcing1, case9_forcing2, case51_forcing - public :: init_double_periodic, init_latlon + public :: init_double_periodic public :: checker_tracers - !---- version number ----- - character(len=128) :: version = '$Id$' - character(len=128) :: tagname = '$Name$' - INTERFACE mp_update_dwinds MODULE PROCEDURE mp_update_dwinds_2d MODULE PROCEDURE mp_update_dwinds_3d @@ -170,24 +170,25 @@ module test_cases_mod !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! -! init_winds :: initialize the winds +! init_winds :: initialize the winds ! - subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nregions, nested, gridstruct, domain, tile) + subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nregions, bounded_domain, gridstruct, domain, tile, bd) ! defOnGrid = -1:null_op, 0:All-Grids, 1:C-Grid, 2:D-Grid, 3:A-Grid, 4:A-Grid then Rotate, 5:D-Grid with unit vectors then Rotate + type(fv_grid_bounds_type), intent(IN) :: bd real , intent(INOUT) :: UBar - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1) - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ) - real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ) - real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1) - real , intent(INOUT) :: ua(isd:ied ,jsd:jed ) - real , intent(INOUT) :: va(isd:ied ,jsd:jed ) + real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1) + real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ) + real , intent(INOUT) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ) + real , intent(INOUT) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1) + real , intent(INOUT) :: ua(bd%isd:bd%ied ,bd%jsd:bd%jed ) + real , intent(INOUT) :: va(bd%isd:bd%ied ,bd%jsd:bd%jed ) integer, intent(IN) :: defOnGrid integer, intent(IN) :: npx, npy integer, intent(IN) :: ng integer, intent(IN) :: ndims integer, intent(IN) :: nregions - logical, intent(IN) :: nested + logical, intent(IN) :: bounded_domain type(fv_grid_type), intent(IN), target :: gridstruct type(domain2d), intent(INOUT) :: domain integer, intent(IN) :: tile @@ -195,11 +196,11 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre real(kind=R_GRID) :: p1(2), p2(2), p3(2), p4(2), pt(2) real(kind=R_GRID) :: e1(3), e2(3), ex(3), ey(3) - real :: dist, r, r0 + real :: dist, r, r0 integer :: i,j,k,n real :: utmp, vtmp - real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 + real :: psi_b(bd%isd:bd%ied+1,bd%jsd:bd%jed+1), psi(bd%isd:bd%ied,bd%jsd:bd%jed), psi1, psi2 integer :: is2, ie2, js2, je2 real(kind=R_GRID), pointer, dimension(:,:,:) :: agrid, grid @@ -215,6 +216,9 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre integer, pointer :: ntiles_g real, pointer :: acapN, acapS, globalarea + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + grid => gridstruct%grid_64 agrid=> gridstruct%agrid_64 @@ -239,7 +243,7 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre rdya => gridstruct%rdya dxc => gridstruct%dxc dyc => gridstruct%dyc - + cubed_sphere => gridstruct%cubed_sphere latlon => gridstruct%latlon @@ -251,7 +255,16 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre acapS => gridstruct%acapS globalarea => gridstruct%globalarea - if (nested) then + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + if (bounded_domain) then is2 = is-2 ie2 = ie+2 @@ -316,7 +329,7 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre if (dist==0) u(i,j) = 0. enddo enddo - call mp_update_dwinds(u, v, npx, npy, domain) + call mp_update_dwinds(u, v, npx, npy, domain, bd) do j=js,je do i=is,ie psi1 = 0.5*(psi(i,j)+psi(i,j-1)) @@ -349,8 +362,8 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre enddo call mpp_update_domains( uc, vc, domain, gridtype=CGRID_NE_PARAM) call fill_corners(uc, vc, npx, npy, VECTOR=.true., CGRID=.true.) - call ctoa(uc,vc,ua,va,dx, dy, dxc,dyc,dxa,dya,npx,npy,ng) - call atod(ua,va,u ,v ,dxa, dya,dxc,dyc,npx,npy,ng, nested, domain) + call ctoa(uc,vc,ua,va,dx, dy, dxc,dyc,dxa,dya,npx,npy,ng, bd) + call atod(ua,va,u ,v ,dxa, dya,dxc,dyc,npx,npy,ng, bounded_domain, domain, bd) ! call d2a2c(npx,npy,1, is,ie, js,je, ng, u(isd,jsd),v(isd,jsd), & ! ua(isd,jsd),va(isd,jsd), uc(isd,jsd),vc(isd,jsd)) elseif ( (cubed_sphere) .and. (defOnGrid==2) ) then @@ -358,19 +371,19 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre do i=is2,ie2+1 dist = dxc(i,j) v(i,j) = (psi(i,j)-psi(i-1,j))/dist - if (dist==0) v(i,j) = 0. + if (dist==0) v(i,j) = 0. enddo enddo do j=js2,je2+1 do i=is2,ie2 dist = dyc(i,j) u(i,j) = -1.0*(psi(i,j)-psi(i,j-1))/dist - if (dist==0) u(i,j) = 0. + if (dist==0) u(i,j) = 0. enddo enddo - call mp_update_dwinds(u, v, npx, npy, domain) - call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, nested, domain) + call mp_update_dwinds(u, v, npx, npy, domain, bd) + call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng, bd) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, bounded_domain, domain, bd) elseif ( (cubed_sphere) .and. (defOnGrid==3) ) then do j=js,je do i=is,ie @@ -387,15 +400,15 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre enddo enddo call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) - call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, nested, domain) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, nested,domain) + call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, bounded_domain, domain, bd) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, bounded_domain,domain, bd) elseif ( (latlon) .or. (defOnGrid==4) ) then do j=js,je do i=is,ie ua(i,j) = Ubar * ( COS(agrid(i,j,2))*COS(alpha) + & SIN(agrid(i,j,2))*COS(agrid(i,j,1))*SIN(alpha) ) - va(i,j) = -Ubar * SIN(agrid(i,j,1))*SIN(alpha) + va(i,j) = -Ubar * SIN(agrid(i,j,1))*SIN(alpha) call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) @@ -410,8 +423,8 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre enddo enddo call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) - call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, nested, domain) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, nested, domain) + call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, bounded_domain, domain, bd) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, bounded_domain, domain, bd) elseif ( (latlon) .or. (defOnGrid==5) ) then ! SJL mods: ! v-wind: @@ -443,9 +456,9 @@ subroutine init_winds(UBar, u,v,ua,va,uc,vc, defOnGrid, npx, npy, ng, ndims, nre enddo enddo - call mp_update_dwinds(u, v, npx, npy, domain) - call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, nested, domain) + call mp_update_dwinds(u, v, npx, npy, domain, bd) + call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng, bd) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, bounded_domain, domain, bd) else !print*, 'Choose an appropriate grid to define the winds on' !stop @@ -470,7 +483,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, gridstruct, flagstruct, npx, npy, npz, ng, ncnst, nwat, ndims, nregions, & dry_mass, mountain, moist_phys, hydrostatic, hybrid_z, delz, ze0, adiabatic, & ks, npx_global, ptop, domain_in, tile_in, bd) - + type(fv_grid_bounds_type), intent(IN) :: bd real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) @@ -491,7 +504,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, real , intent(INOUT) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real , intent(INOUT) :: ua(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(INOUT) :: va(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) - real , intent(inout) :: delz(bd%isd:,bd%jsd:,1:) + real , intent(inout) :: delz(bd%is:,bd%js:,1:) real , intent(inout) :: ze0(bd%is:,bd%js:,1:) real , intent(inout) :: ak(npz+1) @@ -564,7 +577,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, real :: pmin, pmin1 real :: pmax, pmax1 real :: grad(bd%isd:bd%ied ,bd%jsd:bd%jed,2) - real :: div0(bd%isd:bd%ied ,bd%jsd:bd%jed ) + real :: div0(bd%isd:bd%ied ,bd%jsd:bd%jed ) real :: vor0(bd%isd:bd%ied ,bd%jsd:bd%jed ) real :: divg(bd%isd:bd%ied ,bd%jsd:bd%jed ) real :: vort(bd%isd:bd%ied ,bd%jsd:bd%jed ) @@ -590,7 +603,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! real sbuffer(npy+1,npz) real wbuffer(npy+2,npz) real sbuffer(npx+2,npz) - + real :: gz(bd%isd:bd%ied,bd%jsd:bd%jed,npz+1), zt, zdist real :: zvir @@ -640,6 +653,9 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, integer, pointer :: ntiles_g real, pointer :: acapN, acapS, globalarea + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + is = bd%is ie = bd%ie js = bd%js @@ -673,7 +689,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, rdya => gridstruct%rdya dxc => gridstruct%dxc dyc => gridstruct%dyc - + cubed_sphere => gridstruct%cubed_sphere latlon => gridstruct%latlon @@ -688,7 +704,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, acapS => gridstruct%acapS globalarea => gridstruct%globalarea - if (gridstruct%nested) then + if (gridstruct%bounded_domain) then is2 = isd ie2 = ied js2 = jsd @@ -738,7 +754,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, sin(agrid(i ,j ,2))*cos(alpha) ) ** 2.0 enddo enddo - call init_winds(UBar, u,v,ua,va,uc,vc, 1, npx, npy, ng, ndims, nregions, gridstruct%nested, gridstruct, domain, tile) + call init_winds(UBar, u,v,ua,va,uc,vc, 1, npx, npy, ng, ndims, nregions, gridstruct%bounded_domain, gridstruct, domain, tile) ! Test Divergence operator at cell centers do j=js,je @@ -772,9 +788,9 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, write(*,201) 'Divergence L1_norm : ', L1_norm write(*,201) 'Divergence L2_norm : ', L2_norm write(*,201) 'Divergence Linf_norm : ', Linf_norm - endif + endif - call init_winds(UBar, u,v,ua,va,uc,vc, 3, npx, npy, ng, ndims, nregions, gridstruct%nested, gridstruct, domain, tile) + call init_winds(UBar, u,v,ua,va,uc,vc, 3, npx, npy, ng, ndims, nregions, gridstruct%bounded_domain, gridstruct, domain, tile, bd) ! Test Divergence operator at cell centers do j=js,je do i=is,ie @@ -804,7 +820,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, write(*,201) 'Divergence Linf_norm : ', Linf_norm endif - call init_winds(UBar, u,v,ua,va,uc,vc, 2, npx, npy, ng, ndims, nregions, gridstruct%nested, gridstruct, domain, tile) + call init_winds(UBar, u,v,ua,va,uc,vc, 2, npx, npy, ng, ndims, nregions, gridstruct%bounded_domain, gridstruct, domain, tile, bd) !call d2a2c(npx,npy,1, is,ie, js,je, ng, u(isd,jsd,1),v(isd,jsd,1), & ! ua(isd,jsd,1),va(isd,jsd,1), uc(isd,jsd,1),vc(isd,jsd,1)) ! Test Divergence operator at cell centers @@ -840,14 +856,14 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=jsd,jed do i=isd,ied - x1 = agrid(i,j,1) + x1 = agrid(i,j,1) y1 = agrid(i,j,2) z1 = radius p = p0_c0 * cos(y1) Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) w_p = 0.0 - if (p /= 0.0) w_p = Vtx/p + if (p /= 0.0) w_p = Vtx/p delp(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*0.0) ) ua(i,j,1) = w_p*(sin(lat0)*cos(agrid(i,j,2)) + cos(lat0)*cos(agrid(i,j,1) - lon0)*sin(agrid(i,j,2))) va(i,j,1) = w_p*cos(lat0)*sin(agrid(i,j,1) - lon0) @@ -857,15 +873,15 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) - call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) + call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) if (cubed_sphere) call rotate_winds(ua(i,j,1),va(i,j,1), p1,p2,p3,p4, agrid(i,j,1:2), 2, 1) enddo enddo call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) - call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, gridstruct%nested, domain) - call mp_update_dwinds(u, v, npx, npy, npz, domain) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, gridstruct%nested, domain) + call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, gridstruct%bounded_domain, domain, bd) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, gridstruct%bounded_domain, domain, bd) call mpp_update_domains( uc, vc, domain, gridtype=CGRID_NE_PARAM) call fill_corners(uc, vc, npx, npy, npz, VECTOR=.true., CGRID=.true.) initWindsCase=initWindsCase0 @@ -1032,7 +1048,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ubar = 50. ! maxmium wind speed (m/s) r0 = 250.e3 ! RADIUS of the maximum wind of the Rankine vortex gh0 = grav * 1.e3 - + do j=jsd,jed do i=isd,ied delp(i,j,1) = gh0 @@ -1040,15 +1056,15 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo ! ddeg = 2.*r0/radius ! no merger - ddeg = 1.80*r0/radius ! merged + ddeg = 1.80*r0/radius ! merged p1(1) = pi*1.5 - ddeg p1(2) = pi/18. ! 10 N - call rankine_vortex(ubar, r0, p1, u, v, grid) + call rankine_vortex(ubar, r0, p1, u, v, grid, bd) p2(1) = pi*1.5 + ddeg p2(2) = pi/18. ! 10 N - call rankine_vortex(ubar, r0, p2, u, v, grid) + call rankine_vortex(ubar, r0, p2, u, v, grid, bd) #ifndef SINGULAR_VORTEX !----------- @@ -1060,21 +1076,21 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, e1(i) = -e1(i) enddo call cart_to_latlon(1, e1, p3(1), p3(2)) - call rankine_vortex(ubar, r0, p3, u, v, grid) + call rankine_vortex(ubar, r0, p3, u, v, grid, bd) call latlon2xyz(p2, e1) do i=1,3 e1(i) = -e1(i) enddo call cart_to_latlon(1, e1, p4(1), p4(2)) - call rankine_vortex(ubar, r0, p4, u, v, grid) + call rankine_vortex(ubar, r0, p4, u, v, grid, bd) #endif - call mp_update_dwinds(u, v, npx, npy, npz, domain) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) initWindsCase=-1 ! do nothing case(5) - Ubar = 20. + Ubar = 20. gh0 = 5960.*Grav phis = 0.0 r0 = PI/9. @@ -1125,7 +1141,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call get_unit_vect2(p1, p2, e2) call get_latlon_vector(p3, ex, ey) utmp = radius*omg*cos(p3(2)) + & - radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) + radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) vtmp = -radius*rk*R*sin(p3(2))*sin(R*p3(1))*cos(p3(2))**(R-1) v(i,j,1) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) enddo @@ -1138,15 +1154,15 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call get_unit_vect2(p1, p2, e1) call get_latlon_vector(p3, ex, ey) utmp = radius*omg*cos(p3(2)) + & - radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) + radius*rk*(cos(p3(2))**(R-1))*(R*sin(p3(2))**2-cos(p3(2))**2)*cos(R*p3(1)) vtmp = -radius*rk*R*sin(p3(2))*sin(R*p3(1))*cos(p3(2))**(R-1) u(i,j,1) = utmp*inner_prod(e1,ex) + vtmp*inner_prod(e1,ey) enddo enddo - call mp_update_dwinds(u, v, npx, npy, npz, domain) - call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) + call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,npx,npy,ng,bd) !call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, gridstruct%nested, domain) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, gridstruct%bounded_domain, domain, bd) initWindsCase=initWindsCase6 case(7) ! Barotropically unstable jet @@ -1162,7 +1178,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 pt1 = gh_jet(npy, agrid(i,j,2)) call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), pa) @@ -1227,7 +1243,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo initWindsCase=initWindsCase6 ! shouldn't do anything with this -!initialize tracer with shallow-water PV +!initialize tracer with shallow-water PV !Compute vorticity call get_vorticity(is, ie, js, je, isd, ied, jsd, jed, npz, u, v, q(is:ie,js:je,:,1), dx, dy, rarea) do j=jsd,jed+1 @@ -1425,15 +1441,15 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, gridstruct%nested, domain) + call atoc(ua,va,uc,vc,dx,dy,dxa,dya,npx,npy,ng, gridstruct%bounded_domain, domain, bd) call mpp_update_domains( uc, vc, domain, gridtype=CGRID_NE_PARAM) call fill_corners(uc, vc, npx, npy, npz, VECTOR=.true., CGRID=.true.) - call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, gridstruct%nested, domain) - call mp_update_dwinds(u, v, npx, npy, npz, domain) + call atod(ua,va, u, v,dxa, dya,dxc,dyc,npx,npy,ng, gridstruct%bounded_domain, domain, bd) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) initWindsCase=initWindsCase9 - call get_case9_B(case9_B, agrid) + call get_case9_B(case9_B, agrid, isd, ied, jsd, jed) AofT(:) = 0.0 #else !---------------------------- @@ -1497,7 +1513,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call mpp_update_domains( phis, domain ) phi0 = delp - call init_winds(UBar, u,v,ua,va,uc,vc, initWindsCase, npx, npy, ng, ndims, nregions, gridstruct%nested, gridstruct, domain, tile) + call init_winds(UBar, u,v,ua,va,uc,vc, initWindsCase, npx, npy, ng, ndims, nregions, gridstruct%bounded_domain, gridstruct, domain, tile, bd) ! Copy 3D data for Shallow Water Tests do z=2,npz u(:,:,z) = u(:,:,1) @@ -1551,7 +1567,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call surfdrv(npx, npy, gridstruct%grid_64, gridstruct%agrid_64, & gridstruct%area_64, dx, dy, dxa, dya, dxc, dyc, & gridstruct%sin_sg, phis, & - flagstruct%stretch_fac, gridstruct%nested, & + flagstruct%stretch_fac, gridstruct%nested, gridstruct%bounded_domain, & npx_global, domain, flagstruct%grid_number, bd) call mpp_update_domains( phis, domain ) @@ -1563,7 +1579,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! call mpp_error(FATAL, 'You must provide a routine for hybrid_z') if ( is_master() ) write(*,*) 'Using const DZ' ztop = 45.E3 ! assuming ptop = 100. - dz1(1) = ztop / real(npz) + dz1(1) = ztop / real(npz) dz1(npz) = 0.5*dz1(1) do z=2,npz-1 dz1(z) = dz1(1) @@ -1594,7 +1610,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call checker_tracers(is,ie, js,je, isd,ied, jsd,jed, & ncnst, npz, q, agrid(is:ie,js:je,1), agrid(is:ie,js:je,2), 9., 9.) #else - !For consistency with earlier single-grid simulations use gh0 = 1.0e-6 and p1(1) = 195.*pi/180. + !For consistency with earlier single-grid simulations use gh0 = 1.0e-6 and p1(1) = 195.*pi/180. q(:,:,:,:) = 0. gh0 = 1.0e-3 r0 = radius/3. !RADIUS radius/3. @@ -1615,7 +1631,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo #endif - + #else q(:,:,:,:) = 0. @@ -1626,7 +1642,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, cl2 = get_tracer_index(MODEL_ATMOS, 'cl2') if (cl > 0 .and. cl2 > 0) then call terminator_tracers(is,ie,js,je,isd,ied,jsd,jed,npz, & - q, delp,ncnst,agrid(isd:ied,jsd:jed,1),agrid(isd:ied,jsd:jed,2)) + q, delp,ncnst,agrid(isd:ied,jsd:jed,1),agrid(isd:ied,jsd:jed,2),bd) call mpp_update_domains(q,domain) endif @@ -1656,7 +1672,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) pk(i,j,k) = exp( kappa*log(pe(i,k,j)) ) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) enddo enddo enddo @@ -1681,9 +1697,9 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !Set up moisture sphum = get_tracer_index (MODEL_ATMOS, 'sphum') pcen(1) = PI/9. - pcen(2) = 2.0*PI/9. + pcen(2) = 2.0*PI/9. !$OMP parallel do default(none) shared(sphum,is,ie,js,je,npz,pe,q,agrid,pcen,delp,peln) & -!$OMP private(ptmp) +!$OMP private(ptmp) do k=1,npz do j=js,je do i=is,ie @@ -1699,11 +1715,11 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo endif - ! Initialize winds + ! Initialize winds Ubar = 35.0 r0 = 1.0 pcen(1) = PI/9. - pcen(2) = 2.0*PI/9. + pcen(2) = 2.0*PI/9. if (test_case == 13) then #ifdef ALT_PERT u1 = 0.0 @@ -1723,13 +1739,13 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, utmp = Ubar * COS(eta_v(z))**(3.0/2.0) * SIN(2.0*grid(i,j+1,2))**2.0 ! Perturbation if Case==13 r = great_circle_dist( pcen, grid(i,j+1,1:2), radius ) - if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) + if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) vv1 = utmp*(ee2(2,i,j+1)*cos(grid(i,j+1,1)) - ee2(1,i,j+1)*sin(grid(i,j+1,1))) utmp = Ubar * COS(eta_v(z))**(3.0/2.0) * SIN(2.0*grid(i,j,2))**2.0 ! Perturbation if Case==13 r = great_circle_dist( pcen, grid(i,j,1:2), radius ) - if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) + if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) vv3 = utmp*(ee2(2,i,j)*cos(grid(i,j,1)) - ee2(1,i,j)*sin(grid(i,j,1))) ! Mid-point: p1(:) = grid(i ,j ,1:2) @@ -1738,7 +1754,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, utmp = Ubar * COS(eta_v(z))**(3.0/2.0) * SIN(2.0*pa(2))**2.0 ! Perturbation if Case==13 r = great_circle_dist( pcen, pa, radius ) - if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) + if (-(r/r0)**2.0 > -40.0) utmp = utmp + u1*EXP(-(r/r0)**2.0) vv2 = utmp*(ew(2,i,j,2)*cos(pa(1)) - ew(1,i,j,2)*sin(pa(1))) ! 3-point average: v(i,j,z) = 0.25*(vv1 + 2.*vv2 + vv3) @@ -1806,7 +1822,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p1) @@ -1857,7 +1873,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, pt(i,j,z) = pt(i,j,z) + pt0*exp(-(r/r0)**2) endif #endif - + enddo enddo enddo @@ -1878,7 +1894,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p1) @@ -1961,7 +1977,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call DCMIP16_BC(delp,pt,u,v,q,w,delz, & is,ie,js,je,isd,ied,jsd,jed,npz,ncnst,ak,bk,ptop, & pk,peln,pe,pkz,gz,phis,ps,grid,agrid,hydrostatic, & - nwat, adiabatic, test_case == -13, domain) + nwat, adiabatic, test_case == -13, domain, bd) write(stdout(), *) 'PHIS:', mpp_chksum(phis(is:ie,js:je)) @@ -2098,15 +2114,15 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=jsd,jed do i=isd,ied - ps(i,j) = pe1(npz+1) + ps(i,j) = pe1(npz+1) enddo enddo do z=1,npz+1 do j=js,je do i=is,ie - pe(i,z,j) = pe1(z) - peln(i,z,j) = log(pe1(z)) + pe(i,z,j) = pe1(z) + peln(i,z,j) = log(pe1(z)) pk(i,j,z) = exp(kappa*peln(i,z,j)) enddo enddo @@ -2122,7 +2138,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if ( r 1.E-12 ) then - zeta = asin ( p4(2) / sqrt(p4(1)**2 + p4(2)**2) ) + zeta = asin ( p4(2) / sqrt(p4(1)**2 + p4(2)**2) ) else zeta = pi/2. endif @@ -2341,7 +2357,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, zeta = zeta + pi/6. v1 = r/uu1 * cos( zeta ) v2 = r/uu2 * sin( zeta ) - phis(i,j) = ftop / ( 1. + v1**2 + v2**2 ) + phis(i,j) = ftop / ( 1. + v1**2 + v2**2 ) else phis(i,j) = 0. endif @@ -2358,7 +2374,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, else if ( is_master() ) write(*,*) 'Using const DZ' ztop = 15.E3 - dz1(1) = ztop / real(npz) + dz1(1) = ztop / real(npz) do k=2,npz dz1(k) = dz1(1) enddo @@ -2392,23 +2408,23 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, t00 = 300. pt0 = t00/pk0 n2 = 1.E-4 - s0 = grav*grav / (cp_air*n2) + s0 = grav*grav / (cp_air*n2) ! For constant N2, Given z --> p do k=1,npz+1 pe1(k) = p00*( (1.-s0/t00) + s0/t00*exp(-n2*ze1(k)/grav) )**(1./kappa) enddo - ptop = pe1(1) + ptop = pe1(1) if ( is_master() ) write(*,*) 'Lee vortex testcase: model top (mb)=', ptop/100. -! Set up fake "sigma" coordinate +! Set up fake "sigma" coordinate ak(1) = pe1(1) bk(1) = 0. do k=2,npz bk(k) = (pe1(k) - pe1(1)) / (pe1(npz+1)-pe1(1)) ! bk == sigma - ak(k) = pe1(1)*(1.-bk(k)) - enddo + ak(k) = pe1(1)*(1.-bk(k)) + enddo ak(npz+1) = 0. bk(npz+1) = 1. @@ -2418,7 +2434,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=is,ie pk(i,j,k) = pk0 - (1.-exp(-n2/grav*ze0(i,j,k))) * (grav*grav)/(n2*cp_air*pt0) pe(i,k,j) = pk(i,j,k) ** (1./kappa) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) enddo enddo enddo @@ -2426,7 +2442,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie pe(i,1,j) = ptop - peln(i,1,j) = log(pe(i,1,j)) + peln(i,1,j) = log(pe(i,1,j)) pk(i,j,1) = pe(i,1,j) ** kappa ps(i,j) = pe(i,npz+1,j) enddo @@ -2436,7 +2452,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) + delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) pt(i,j,k) = pkz(i,j,k)*grav*delz(i,j,k) / ( cp_air*(pk(i,j,k)-pk(i,j,k+1)) ) enddo enddo @@ -2462,7 +2478,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !NOTE: since we have an isothermal atmosphere and specify constant height-thickness layers we will disregard ak and bk and specify the initial pressures in a different way dz = 12000./real(npz) - + allocate(zz0(npz+1)) allocate(pz0(npz+1)) @@ -2580,7 +2596,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call mpp_update_domains( uc, vc, domain, gridtype=CGRID_NE_PARAM) call fill_corners(uc, vc, npx, npy, npz, VECTOR=.true., CGRID=.true.) - call mp_update_dwinds(u, v, npx, npy, npz, domain) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) case (2) !DCMIP 12 @@ -2589,11 +2605,11 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, case default call mpp_error(FATAL, 'Value of tracer_test not implemented ') end select - + else if (test_case == 52) then !Orography and steady-state test: DCMIP 20 - + f0 = 0. fC = 0. @@ -2641,7 +2657,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie p2(:) = agrid(i,j,1:2) - r = great_circle_dist( p1, p2, one ) + r = great_circle_dist( p1, p2, one ) if (r < r0) then phis(i,j) = grav*0.5*2000.*(1. + cos(pi*r/r0))*cos(pi*r/zetam)**2. pe(i,npz+1,j) = p00*(1.-gamma/T00*phis(i,j)/grav)**(1./exponent) @@ -2676,7 +2692,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !ANalytic layer-mean pt(i,j,k) = -grav*t00*p00/(rdgas*gamma + grav)/delp(i,j,k) * & ( (pe(i,k,j)/p00)**(exponent+1.) - (pe(i,k+1,j)/p00)**(exponent+1.) ) - + enddo enddo @@ -2720,12 +2736,13 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, pk1(k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) enddo + call SuperCell_Sounding(npz, p00, pk1, ts1, qs1) w(:,:,:) = 0. q(:,:,:,:) = 0. - pp0(1) = 262.0/180.*pi ! OKC - pp0(2) = 35.0/180.*pi + pp0(1) = 262.0/180.*pi ! OKC + pp0(2) = 35.0/180.*pi do k=1,npz do j=js,je @@ -2758,7 +2775,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if (test_case > 0) then ! SRH = 40 if ( zm .le. 2.e3 ) then - utmp = 8.*(1.-cos(pi*zm/4.e3)) + utmp = 8.*(1.-cos(pi*zm/4.e3)) vtmp = 8.*sin(pi*zm/4.e3) elseif (zm .le. 6.e3 ) then utmp = 8. + (us0-8.)*(zm-2.e3)/4.e3 @@ -2790,7 +2807,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, if( is_master() ) then write(6,*) k, utmp, vtmp endif - + do j=js,je do i=is,ie+1 p1(:) = grid(i ,j ,1:2) @@ -2819,7 +2836,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - .true., hydrostatic, nwat, domain) + .true., hydrostatic, nwat, domain, adiabatic) ! *** Add Initial perturbation *** pturb = 2. @@ -2874,7 +2891,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! pt = 0.25*pt1 + 0.125*(pt2+pt3+pt4+pt5) + 0.0625*(pt6+pt7+pt8+pt9) if ( test_case==35 ) then @@ -2914,7 +2931,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, pt(:,:,:) = t00 endif - if( test_case==33 ) then + if( test_case==33 ) then ! NCAR Ridge-mountain Mods: do j=js,je do i=is,ie @@ -2962,35 +2979,35 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! 9 4 8 ! ! 5 1 3 -! +! ! 6 2 7 ! pt = 0.25*pt1 + 0.125*(pt2+pt3+pt4+pt5) + 0.0625*(pt6+pt7+pt8+pt9) #ifdef USE_CELL_AVG - r = great_circle_dist( p0, agrid(i,j,1:2), radius ) + r = great_circle_dist( p0, agrid(i,j,1:2), radius ) pt1 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt2 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt3 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt4 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt5 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i,j,1:2), radius ) + r = great_circle_dist( p0, grid(i,j,1:2), radius ) pt6 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i+1,j,1:2), radius ) + r = great_circle_dist( p0, grid(i+1,j,1:2), radius ) pt7 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i+1,j+1,1:2), radius ) + r = great_circle_dist( p0, grid(i+1,j+1,1:2), radius ) pt8 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 - r = great_circle_dist( p0, grid(i,j+1,1:2), radius ) + r = great_circle_dist( p0, grid(i,j+1,1:2), radius ) pt9 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 phis(i,j) = grav*h0*(0.25*pt1+0.125*(pt2+pt3+pt4+pt5)+0.0625*(pt6+pt7+pt8+pt9)) #else - r = great_circle_dist( p0, agrid(i,j,1:2), radius ) + r = great_circle_dist( p0, agrid(i,j,1:2), radius ) pt1 = exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 phis(i,j) = grav*h0*exp(-(r/5.e3)**2)*cos(pi*r/4.e3)**2 #endif @@ -3072,13 +3089,13 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - .true., hydrostatic, nwat, domain) + .true., hydrostatic, nwat, domain, adiabatic) else if ( test_case==36 .or. test_case==37 ) then !------------------------------------ ! HIWPP Super-Cell !------------------------------------ -! HIWPP SUPER_K; +! HIWPP SUPER_K; f0(:,:) = 0. fC(:,:) = 0. q(:,:,:,:) = 0. @@ -3242,17 +3259,17 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo enddo - + do j=js,je do i=is,ie pe(i,1,j) = ptop - peln(i,1,j) = log(pe(i,1,j)) + peln(i,1,j) = log(pe(i,1,j)) pk(i,j,1) = exp(kappa*peln(i,1,j)) enddo do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) pk(i,j,k) = exp(kappa*peln(i,k,j)) enddo enddo @@ -3266,7 +3283,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - dist = great_circle_dist( p0, agrid(i,j,1:2), radius ) + dist = great_circle_dist( p0, agrid(i,j,1:2), radius ) if ( dist .le. r0 ) then pt(i,j,k) = 275. q(i,j,k,1) = 1. @@ -3316,17 +3333,17 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo enddo - + do j=js,je do i=is,ie pe(i,1,j) = ptop - peln(i,1,j) = log(pe(i,1,j)) + peln(i,1,j) = log(pe(i,1,j)) pk(i,j,1) = exp(kappa*peln(i,1,j)) enddo do k=2,npz+1 do i=is,ie pe(i,k,j) = pe(i,k-1,j) + delp(i,j,k-1) - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) pk(i,j,k) = exp(kappa*peln(i,k,j)) enddo enddo @@ -3334,7 +3351,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, ! Initiate the westerly-wind-burst: ubar = soliton_Umax - r0 = soliton_size + r0 = soliton_size !!$ if (test_case == 46) then !!$ ubar = 200. !!$ r0 = 250.e3 @@ -3438,7 +3455,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie p2(:) = agrid(i,j,1:2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) ps(i,j) = p00 - dp*exp(-(r/rp)**1.5) phis(i,j) = 0. enddo @@ -3454,7 +3471,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo enddo enddo - + !Pressure do j=js,je do i=is,ie @@ -3471,18 +3488,18 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do j=js,je do i=is,ie+1 p2(:) = 0.5*(grid(i,j,1:2)+grid(i,j+1,1:2)) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) ps_v(i,j) = p00 - dp*exp(-(r/rp)**1.5) enddo enddo do j=js,je+1 do i=is,ie p2(:) = 0.5*(grid(i,j,1:2)+grid(i+1,j,1:2)) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) ps_u(i,j) = p00 - dp*exp(-(r/rp)**1.5) enddo enddo - + !Pressure do j=js,je do i=is,ie+1 @@ -3513,7 +3530,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, !endif p0 = (/ pi, pi/18. /) - + exppr = 1.5 exppz = 2. gamma = 0.007 @@ -3539,7 +3556,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, d2 = cos(p0(2))*sin(p3(1)-p0(1)) d = max(1.e-15,sqrt(d1**2+d2**2)) - r = great_circle_dist( p0, p3, radius ) + r = great_circle_dist( p0, p3, radius ) do k=1,npz ptmp = 0.5*(pe_v(i,k,j)+pe_v(i,k+1,j)) @@ -3553,7 +3570,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, +(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz))))) vtmp = utmp*d2 utmp = utmp*d1 - + v(i,j,k) = utmp*inner_prod(e2,ex) + vtmp*inner_prod(e2,ey) endif @@ -3572,7 +3589,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, d2 = cos(p0(2))*sin(p3(1)-p0(1)) d = max(1.e-15,sqrt(d1**2+d2**2)) - r = great_circle_dist( p0, p3, radius ) + r = great_circle_dist( p0, p3, radius ) do k=1,npz ptmp = 0.5*(pe_u(i,k,j)+pe_u(i,k+1,j)) @@ -3612,7 +3629,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, else q(i,j,k,1) = q00*exp(-height/zq1)*exp(-(height/zq2)**exppz) p2(:) = agrid(i,j,1:2) - r = great_circle_dist( p0, p2, radius ) + r = great_circle_dist( p0, p2, radius ) pt(i,j,k) = (T00-gamma*height)/(1.d0+zvir*q(i,j,k,1))/(1.d0+exppz*Rdgas*(T00-gamma*height)*height & /(grav*zp**exppz*(1.d0-p00/dp*exp((r/rp)**exppr)*exp((height/zp)**exppz)))) end if @@ -3638,7 +3655,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, enddo endif - call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) + call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng, bd) call prt_maxmin('PS', ps(is:ie,js:je), is, ie, js, je, 0, 1, 0.01) @@ -3652,9 +3669,9 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, do i=isd,ied f0(i,j) = cor enddo - enddo + enddo endif - + else if ( test_case == -55 ) then @@ -3678,7 +3695,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, #ifndef SUPER_K call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., mountain, & - moist_phys, hydrostatic, nwat, domain, .not.hydrostatic) + moist_phys, hydrostatic, nwat, domain, adiabatic, .not.hydrostatic) #endif #ifdef COLUMN_TRACER @@ -3704,7 +3721,7 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, #endif #endif - call mp_update_dwinds(u, v, npx, npy, npz, domain) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) nullify(agrid) @@ -3716,21 +3733,21 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, nullify(fC) nullify(f0) - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) - - nullify(ee1) - nullify(ee2) - nullify(ew) - nullify(es) - nullify(en1) - nullify(en2) + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) + + nullify(ee1) + nullify(ee2) + nullify(ew) + nullify(es) + nullify(en1) + nullify(en2) nullify(latlon) nullify(cubed_sphere) @@ -3738,13 +3755,13 @@ subroutine init_case(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, nullify(domain) nullify(tile) - nullify(have_south_pole) - nullify(have_north_pole) + nullify(have_south_pole) + nullify(have_north_pole) - nullify(ntiles_g) - nullify(acapN) - nullify(acapS) - nullify(globalarea) + nullify(ntiles_g) + nullify(acapN) + nullify(acapS) + nullify(globalarea) end subroutine init_case @@ -3778,9 +3795,9 @@ subroutine get_vorticity(isc, iec, jsc, jec ,isd, ied, jsd, jed, npz, u, v, vort enddo enddo enddo - + end subroutine get_vorticity - + subroutine checker_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & nq, km, q, lon, lat, nx, ny, rn) !-------------------------------------------------------------------- @@ -3851,11 +3868,13 @@ subroutine checker_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & end subroutine checker_tracers subroutine terminator_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & - km, q, delp, ncnst, lon, lat) + km, q, delp, ncnst, lon, lat, bd) !-------------------------------------------------------------------- ! This routine implements the terminator test. ! Coded by Lucas Harris for DCMIP 2016, May 2016 +! NOTE: Implementation assumes DRY mixing ratio!!! !-------------------------------------------------------------------- + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(in):: km ! vertical dimension integer, intent(in):: i0, i1 ! compute domain dimension in E-W integer, intent(in):: j0, j1 ! compute domain dimension in N-S @@ -3904,8 +3923,8 @@ subroutine terminator_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & !Compute qcly0 qcly0 = 0. if (is_master()) then - i = is - j = js + i = bd%is + j = bd%js mm = 0. do k=1,km qcly0 = qcly0 + (q(i,j,k,Cl) + 2.*q(i,j,k,Cl2))*delp(i,j,k) @@ -3915,20 +3934,22 @@ subroutine terminator_tracers(i0, i1, j0, j1, ifirst, ilast, jfirst, jlast, & endif call mpp_sum(qcly0) if (is_master()) print*, ' qcly0 = ', qcly0 - + end subroutine terminator_tracers - subroutine rankine_vortex(ubar, r0, p1, u, v, grid ) + subroutine rankine_vortex(ubar, r0, p1, u, v, grid, bd ) !---------------------------- ! Rankine vortex !---------------------------- + type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(in):: ubar ! max wind (m/s) real, intent(in):: r0 ! Radius of max wind (m) real, intent(in):: p1(2) ! center position (longitude, latitude) in radian - real, intent(inout):: u(isd:ied, jsd:jed+1) - real, intent(inout):: v(isd:ied+1,jsd:jed) - real(kind=R_GRID), intent(IN) :: grid(isd:ied+1,jsd:jed+1,2) + real, intent(inout):: u(bd%isd:bd%ied, bd%jsd:bd%jed+1) + real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed) + real(kind=R_GRID), intent(IN) :: grid(bd%isd:bd%ied+1,bd%jsd:bd%jed+1,2) ! local: real(kind=R_GRID):: p2(2), p3(2), p4(2) real(kind=R_GRID):: e1(3), e2(3), ex(3), ey(3) @@ -3936,13 +3957,25 @@ subroutine rankine_vortex(ubar, r0, p1, u, v, grid ) real:: utmp, vtmp integer i, j + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + ! Compute u-wind do j=js,je+1 do i=is,ie call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) ! shift: p2(1) = p2(1) - p1(1) - cos_p = sin(p2(2))*sin(p1(2)) + cos(p2(2))*cos(p1(2))*cos(p2(1)) + cos_p = sin(p2(2))*sin(p1(2)) + cos(p2(2))*cos(p1(2))*cos(p2(1)) r = radius*acos(cos_p) ! great circle distance ! if( r<0.) call mpp_error(FATAL, 'radius negative!') if( r gridstruct%agrid_64 grid => gridstruct%grid_64 @@ -4191,7 +4241,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, dyc => gridstruct%dyc period = real( 12*24*3600 ) !12 days - + l = 2.*pi/period dt2 = dt*0.5 @@ -4222,7 +4272,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo enddo - + do k=1,npz do j=js,je do i=is,ie @@ -4308,7 +4358,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo enddo - + do k=1,npz do j=js,je do i=is,ie @@ -4347,7 +4397,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo - call mp_update_dwinds(u(:,:,1), v(:,:,1), npx, npy, domain) + call mp_update_dwinds(u(:,:,1), v(:,:,1), npx, npy, domain, bd) ! copy vertically; no wind shear do k=2,npz @@ -4363,12 +4413,12 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo - call mp_update_dwinds(u, v, npx, npy, npz, domain) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) - call dtoa( u(:,:,1), v(:,:,1),ua(:,:,1),va(:,:,1),dx,dy,dxa,dya,dxc,dyc,npx,npy,ng) + call dtoa( u(:,:,1), v(:,:,1),ua(:,:,1),va(:,:,1),dx,dy,dxa,dya,dxc,dyc,npx,npy,ng,bd) call mpp_update_domains( ua, va, domain, gridtype=AGRID_PARAM) !! ABSOLUTELY NECESSARY!! - call atoc(ua(:,:,1),va(:,:,1),uc(:,:,1),vc(:,:,1),dx,dy,dxa,dya,npx,npy,ng, gridstruct%nested, domain) - + call atoc(ua(:,:,1),va(:,:,1),uc(:,:,1),vc(:,:,1),dx,dy,dxa,dya,npx,npy,ng, gridstruct%bounded_domain, domain, bd) + do k=2,npz do j=js,je do i=is,ie @@ -4409,7 +4459,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, enddo enddo enddo - + do k=1,npz do j=js,je do i=is,ie @@ -4420,7 +4470,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, ubar = 40. - !Set lat-lon A-grid winds + !Set lat-lon A-grid winds k = 1 do j=js,je do i=is,ie @@ -4444,7 +4494,7 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, call mpp_update_domains( uc, vc, domain, gridtype=CGRID_NE_PARAM) call fill_corners(uc, vc, npx, npy, npz, VECTOR=.true., CGRID=.true.) - call mp_update_dwinds(u, v, npx, npy, npz, domain) + call mp_update_dwinds(u, v, npx, npy, npz, domain, bd) nullify(agrid) nullify(grid) @@ -4456,1544 +4506,1585 @@ subroutine case51_forcing(delp, uc, vc, u, v, ua, va, pe, time, dt, gridstruct, end subroutine case51_forcing -!------------------------------------------------------------------------------- -! -! get_stats :: get L-1, L-2, and L-inf norms and other stats as defined -! in Williamson, 1994 (p.16) - subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & - uc,vc, ua,va, npx, npy, npz, ncnst, ndims, nregions, & - gridstruct, stats_lun, consv_lun, monitorFreq, tile, & - domain, nested) - integer, intent(IN) :: nt, maxnt - real , intent(IN) :: dt, dtout, ndays - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) - real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) - real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) - real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) - integer, intent(IN) :: npx, npy, npz, ncnst, tile - integer, intent(IN) :: ndims - integer, intent(IN) :: nregions - integer, intent(IN) :: stats_lun - integer, intent(IN) :: consv_lun - integer, intent(IN) :: monitorFreq - type(fv_grid_type), target :: gridstruct - type(domain2d), intent(INOUT) :: domain - logical, intent(IN) :: nested - - real :: L1_norm - real :: L2_norm - real :: Linf_norm - real :: pmin, pmin1, uamin1, vamin1 - real :: pmax, pmax1, uamax1, vamax1 - real(kind=4) :: arr_r4(5) - real :: tmass0, tvort0, tener0, tKE0 - real :: tmass, tvort, tener, tKE - real :: temp(is:ie,js:je) - integer :: i0, j0, k0, n0 - integer :: i, j, k, n, iq - - real :: psmo, Vtx, p, w_p, p0 - real :: x1,y1,z1,x2,y2,z2,ang - - real :: p1(2), p2(2), p3(2), r, r0, dist, heading - - real :: uc0(isd:ied+1,jsd:jed ,npz) - real :: vc0(isd:ied ,jsd:jed+1,npz) - - real :: myDay - integer :: myRec - - real, save, allocatable, dimension(:,:,:) :: u0, v0 - real :: up(isd:ied ,jsd:jed+1,npz) - real :: vp(isd:ied+1,jsd:jed ,npz) - - real, dimension(:,:,:), pointer :: grid, agrid - real, dimension(:,:), pointer :: area, f0, dx, dy, dxa, dya, dxc, dyc - - grid => gridstruct%grid - agrid=> gridstruct%agrid - - area => gridstruct%area - f0 => gridstruct%f0 - - dx => gridstruct%dx - dy => gridstruct%dy - dxa => gridstruct%dxa - dya => gridstruct%dya - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - !!! DEBUG CODE - if (nt == 0 .and. is_master()) print*, 'INITIALIZING GET_STATS' - !!! END DEBUG CODE - - myDay = ndays*((FLOAT(nt)/FLOAT(maxnt))) - -#if defined(SW_DYNAMICS) - if (test_case==0) then - phi0 = 0.0 - do j=js,je - do i=is,ie - x1 = agrid(i,j,1) - y1 = agrid(i,j,2) - z1 = radius - p = p0_c0 * cos(y1) - Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) - w_p = 0.0 - if (p /= 0.0) w_p = Vtx/p - ! delp(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) - phi0(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) - enddo - enddo - elseif (test_case==1) then -! Get Current Height Field "Truth" - p1(1) = pi/2. + pi_shift - p1(2) = 0. - p2(1) = 3.*pi/2. + pi_shift - p2(2) = 0. - r0 = radius/3. !RADIUS 3. - dist = 2.0*pi*radius* ((FLOAT(nt)/FLOAT(maxnt))) - heading = 3.0*pi/2.0 - alpha !5.0*pi/2.0 - alpha - call get_pt_on_great_circle( p1, p2, dist, heading, p3) - phi0 = 0.0 - do j=js,je - do i=is,ie - p2(1) = agrid(i,j,1) - p2(2) = agrid(i,j,2) - r = great_circle_dist( p3, p2, radius ) - if (r < r0) then - phi0(i,j,1) = phis(i,j) + gh0*0.5*(1.0+cos(PI*r/r0)) - else - phi0(i,j,1) = phis(i,j) - endif - enddo - enddo - endif - -! Get Height Field Stats - call pmxn(delp(:,:,1), npx, npy, nregions, tile, gridstruct, pmin1, pmax1, i0, j0, n0) - pmin1=pmin1/Grav - pmax1=pmax1/Grav - if (test_case <= 2) then - call get_scalar_stats( delp(:,:,1), phi0(:,:,1), npx, npy, ndims, nregions, & - pmin, pmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile) - pmin=pmin/Grav - pmax=pmax/Grav - arr_r4(1) = pmin1 - arr_r4(2) = pmax1 - arr_r4(3) = L1_norm - arr_r4(4) = L2_norm - arr_r4(5) = Linf_norm - !if (is_master()) write(stats_lun,rec=(nt)*2 + 1) arr_r4 - else - arr_r4(1) = pmin1 - arr_r4(2) = pmax1 - arr_r4(3:5) = 0. - pmin = 0. - pmax = 0. - L1_norm = 0. - L2_norm = 0. - Linf_norm = 0. - endif - - 200 format(i6.6,A,i6.6,A,e21.14) - 201 format(' ',A,e21.14,' ',e21.14) - 202 format(' ',A,i4.4,'x',i4.4,'x',i4.4) - - if ( (is_master()) .and. MOD(nt,monitorFreq)==0 ) then - write(*,200) nt, ' step of ', maxnt, ' DAY ', myDay - write(*,201) 'Height MAX : ', pmax1 - write(*,201) 'Height MIN : ', pmin1 - write(*,202) 'HGT MAX location : ', i0, j0, n0 - if (test_case <= 2) then - write(*,201) 'Height L1_norm : ', L1_norm - write(*,201) 'Height L2_norm : ', L2_norm - write(*,201) 'Height Linf_norm : ', Linf_norm - endif - endif - -! Get UV Stats - call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) - call pmxn(ua(:,:,1), npx, npy, nregions, tile, gridstruct, pmin1, pmax1, i0, j0, n0) - if (test_case <= 2) then - call get_vector_stats( ua(:,:,1), ua0(:,:,1), va(:,:,1), va0(:,:,1), npx, npy, ndims, nregions, & - pmin, pmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile) - endif - arr_r4(1) = pmin1 - arr_r4(2) = pmax1 - arr_r4(3) = L1_norm - arr_r4(4) = L2_norm - arr_r4(5) = Linf_norm - !if (is_master()) write(stats_lun,rec=(nt)*2 + 2) arr_r4 - if ( (is_master()) .and. MOD(nt,monitorFreq)==0) then - write(*,201) 'UV MAX : ', pmax1 - write(*,201) 'UV MIN : ', pmin1 - write(*,202) 'UV MAX location : ', i0, j0, n0 - if (test_case <= 2) then - write(*,201) 'UV L1_norm : ', L1_norm - write(*,201) 'UV L2_norm : ', L2_norm - write(*,201) 'UV Linf_norm : ', Linf_norm - endif - endif -#else - - 200 format(i6.6,A,i6.6,A,e10.4) - 201 format(' ',A,e10.4,' ',e10.4,' ',i4.4,'x',i4.4,'x',i4.4,'x',i4.4) - 202 format(' ',A,e10.4,' ',e10.4,' ',i4.4,'x',i4.4,'x',i4.4,'x',i4.4,' ',e10.4) - 203 format(' ',A,i3.3,A,e10.4,' ',e10.4,' ',i4.4,'x',i4.4,'x',i4.4,'x',i4.4) - - if(is_master()) write(*,200) nt, ' step of ', maxnt, ' DAY ', myDay - -! Surface Pressure - psmo = globalsum(ps(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - if(is_master()) write(*,*) ' Total surface pressure =', 0.01*psmo - call pmxn(ps, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - if (is_master()) then - write(*,201) 'PS MAX|MIN : ', 0.01*pmax, 0.01*pmin, i0, j0, n0 - endif - -! Get PT Stats - pmax1 = -1.e25 - pmin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - do k=1,npz - call pmxn(pt(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - pmin1 = min(pmin, pmin1) - pmax1 = max(pmax, pmax1) - if (pmax1 == pmax) k0 = k - enddo - if (is_master()) then - write(*,201) 'PT MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 - endif +!!$!------------------------------------------------------------------------------- +!!$! +!!$! get_stats :: get L-1, L-2, and L-inf norms and other stats as defined +!!$! in Williamson, 1994 (p.16) +!!$ subroutine get_stats(dt, dtout, nt, maxnt, ndays, u,v,pt,delp,q,phis, ps, & +!!$ uc,vc, ua,va, npx, npy, npz, ncnst, ndims, nregions, & +!!$ gridstruct, stats_lun, consv_lun, monitorFreq, tile, & +!!$ domain, bounded_domain, bd) +!!$ type(fv_grid_bounds_type), intent(IN) :: bd +!!$ integer, intent(IN) :: nt, maxnt +!!$ real , intent(IN) :: dt, dtout, ndays +!!$ real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +!!$ real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) +!!$ real , intent(INOUT) :: pt(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) +!!$ real , intent(INOUT) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) +!!$ real , intent(INOUT) :: q(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) +!!$ real , intent(INOUT) :: phis(bd%isd:bd%ied ,bd%jsd:bd%jed ) +!!$ real , intent(INOUT) :: ps(bd%isd:bd%ied ,bd%jsd:bd%jed ) +!!$ real , intent(INOUT) :: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) +!!$ real , intent(INOUT) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +!!$ real , intent(INOUT) :: ua(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) +!!$ real , intent(INOUT) :: va(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) +!!$ integer, intent(IN) :: npx, npy, npz, ncnst, tile +!!$ integer, intent(IN) :: ndims +!!$ integer, intent(IN) :: nregions +!!$ integer, intent(IN) :: stats_lun +!!$ integer, intent(IN) :: consv_lun +!!$ integer, intent(IN) :: monitorFreq +!!$ type(fv_grid_type), target :: gridstruct +!!$ type(domain2d), intent(INOUT) :: domain +!!$ logical, intent(IN) :: bounded_domain +!!$ +!!$ real :: L1_norm +!!$ real :: L2_norm +!!$ real :: Linf_norm +!!$ real :: pmin, pmin1, uamin1, vamin1 +!!$ real :: pmax, pmax1, uamax1, vamax1 +!!$ real(kind=4) :: arr_r4(5) +!!$ real :: tmass0, tvort0, tener0, tKE0 +!!$ real :: tmass, tvort, tener, tKE +!!$ real :: temp(bd%is:bd%ie,bd%js:bd%je) +!!$ integer :: i0, j0, k0, n0 +!!$ integer :: i, j, k, n, iq +!!$ +!!$ real :: psmo, Vtx, p, w_p, p0 +!!$ real :: x1,y1,z1,x2,y2,z2,ang +!!$ +!!$ real :: p1(2), p2(2), p3(2), r, r0, dist, heading +!!$ +!!$ real :: uc0(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) +!!$ real :: vc0(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +!!$ +!!$ real :: myDay +!!$ integer :: myRec +!!$ +!!$ real, save, allocatable, dimension(:,:,:) :: u0, v0 +!!$ real :: up(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) +!!$ real :: vp(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) +!!$ +!!$ real, dimension(:,:,:), pointer :: grid, agrid +!!$ real, dimension(:,:), pointer :: area, f0, dx, dy, dxa, dya, dxc, dyc +!!$ +!!$ integer :: is, ie, js, je +!!$ integer :: isd, ied, jsd, jed +!!$ +!!$ is = bd%is +!!$ ie = bd%ie +!!$ js = bd%js +!!$ je = bd%je +!!$ isd = bd%isd +!!$ ied = bd%ied +!!$ jsd = bd%jsd +!!$ jed = bd%jed +!!$ +!!$ grid => gridstruct%grid +!!$ agrid=> gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ f0 => gridstruct%f0 +!!$ +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxa => gridstruct%dxa +!!$ dya => gridstruct%dya +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ !!! DEBUG CODE +!!$ if (nt == 0 .and. is_master()) print*, 'INITIALIZING GET_STATS' +!!$ !!! END DEBUG CODE +!!$ +!!$ myDay = ndays*((FLOAT(nt)/FLOAT(maxnt))) +!!$ +!!$#if defined(SW_DYNAMICS) +!!$ if (test_case==0) then +!!$ phi0 = 0.0 +!!$ do j=js,je +!!$ do i=is,ie +!!$ x1 = agrid(i,j,1) +!!$ y1 = agrid(i,j,2) +!!$ z1 = radius +!!$ p = p0_c0 * cos(y1) +!!$ Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) +!!$ w_p = 0.0 +!!$ if (p /= 0.0) w_p = Vtx/p +!!$ ! delp(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) +!!$ phi0(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) +!!$ enddo +!!$ enddo +!!$ elseif (test_case==1) then +!!$! Get Current Height Field "Truth" +!!$ p1(1) = pi/2. + pi_shift +!!$ p1(2) = 0. +!!$ p2(1) = 3.*pi/2. + pi_shift +!!$ p2(2) = 0. +!!$ r0 = radius/3. !RADIUS 3. +!!$ dist = 2.0*pi*radius* ((FLOAT(nt)/FLOAT(maxnt))) +!!$ heading = 3.0*pi/2.0 - alpha !5.0*pi/2.0 - alpha +!!$ call get_pt_on_great_circle( p1, p2, dist, heading, p3) +!!$ phi0 = 0.0 +!!$ do j=js,je +!!$ do i=is,ie +!!$ p2(1) = agrid(i,j,1) +!!$ p2(2) = agrid(i,j,2) +!!$ r = great_circle_dist( p3, p2, radius ) +!!$ if (r < r0) then +!!$ phi0(i,j,1) = phis(i,j) + gh0*0.5*(1.0+cos(PI*r/r0)) +!!$ else +!!$ phi0(i,j,1) = phis(i,j) +!!$ endif +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$! Get Height Field Stats +!!$ call pmxn(delp(:,:,1), npx, npy, nregions, tile, gridstruct, pmin1, pmax1, i0, j0, n0) +!!$ pmin1=pmin1/Grav +!!$ pmax1=pmax1/Grav +!!$ if (test_case <= 2) then +!!$ call get_scalar_stats( delp(:,:,1), phi0(:,:,1), npx, npy, ndims, nregions, & +!!$ pmin, pmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile, bd) +!!$ pmin=pmin/Grav +!!$ pmax=pmax/Grav +!!$ arr_r4(1) = pmin1 +!!$ arr_r4(2) = pmax1 +!!$ arr_r4(3) = L1_norm +!!$ arr_r4(4) = L2_norm +!!$ arr_r4(5) = Linf_norm +!!$ !if (is_master()) write(stats_lun,rec=(nt)*2 + 1) arr_r4 +!!$ else +!!$ arr_r4(1) = pmin1 +!!$ arr_r4(2) = pmax1 +!!$ arr_r4(3:5) = 0. +!!$ pmin = 0. +!!$ pmax = 0. +!!$ L1_norm = 0. +!!$ L2_norm = 0. +!!$ Linf_norm = 0. +!!$ endif +!!$ +!!$ 200 format(i6.6,A,i6.6,A,e21.14) +!!$ 201 format(' ',A,e21.14,' ',e21.14) +!!$ 202 format(' ',A,i4.4,'x',i4.4,'x',i4.4) +!!$ +!!$ if ( (is_master()) .and. MOD(nt,monitorFreq)==0 ) then +!!$ write(*,200) nt, ' step of ', maxnt, ' DAY ', myDay +!!$ write(*,201) 'Height MAX : ', pmax1 +!!$ write(*,201) 'Height MIN : ', pmin1 +!!$ write(*,202) 'HGT MAX location : ', i0, j0, n0 +!!$ if (test_case <= 2) then +!!$ write(*,201) 'Height L1_norm : ', L1_norm +!!$ write(*,201) 'Height L2_norm : ', L2_norm +!!$ write(*,201) 'Height Linf_norm : ', Linf_norm +!!$ endif +!!$ endif +!!$ +!!$! Get UV Stats +!!$ call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) +!!$ call pmxn(ua(:,:,1), npx, npy, nregions, tile, gridstruct, pmin1, pmax1, i0, j0, n0) +!!$ if (test_case <= 2) then +!!$ call get_vector_stats( ua(:,:,1), ua0(:,:,1), va(:,:,1), va0(:,:,1), npx, npy, ndims, nregions, & +!!$ pmin, pmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile) +!!$ endif +!!$ arr_r4(1) = pmin1 +!!$ arr_r4(2) = pmax1 +!!$ arr_r4(3) = L1_norm +!!$ arr_r4(4) = L2_norm +!!$ arr_r4(5) = Linf_norm +!!$ !if (is_master()) write(stats_lun,rec=(nt)*2 + 2) arr_r4 +!!$ if ( (is_master()) .and. MOD(nt,monitorFreq)==0) then +!!$ write(*,201) 'UV MAX : ', pmax1 +!!$ write(*,201) 'UV MIN : ', pmin1 +!!$ write(*,202) 'UV MAX location : ', i0, j0, n0 +!!$ if (test_case <= 2) then +!!$ write(*,201) 'UV L1_norm : ', L1_norm +!!$ write(*,201) 'UV L2_norm : ', L2_norm +!!$ write(*,201) 'UV Linf_norm : ', Linf_norm +!!$ endif +!!$ endif +!!$#else +!!$ +!!$ 200 format(i6.6,A,i6.6,A,e10.4) +!!$ 201 format(' ',A,e10.4,' ',e10.4,' ',i4.4,'x',i4.4,'x',i4.4,'x',i4.4) +!!$ 202 format(' ',A,e10.4,' ',e10.4,' ',i4.4,'x',i4.4,'x',i4.4,'x',i4.4,' ',e10.4) +!!$ 203 format(' ',A,i3.3,A,e10.4,' ',e10.4,' ',i4.4,'x',i4.4,'x',i4.4,'x',i4.4) +!!$ +!!$ if(is_master()) write(*,200) nt, ' step of ', maxnt, ' DAY ', myDay +!!$ +!!$! Surface Pressure +!!$ psmo = globalsum(ps(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ if(is_master()) write(*,*) ' Total surface pressure =', 0.01*psmo +!!$ call pmxn(ps, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ if (is_master()) then +!!$ write(*,201) 'PS MAX|MIN : ', 0.01*pmax, 0.01*pmin, i0, j0, n0 +!!$ endif +!!$ +!!$! Get PT Stats +!!$ pmax1 = -1.e25 +!!$ pmin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ do k=1,npz +!!$ call pmxn(pt(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ pmin1 = min(pmin, pmin1) +!!$ pmax1 = max(pmax, pmax1) +!!$ if (pmax1 == pmax) k0 = k +!!$ enddo +!!$ if (is_master()) then +!!$ write(*,201) 'PT MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 +!!$ endif +!!$ +!!$#if defined(DEBUG_TEST_CASES) +!!$ if(is_master()) write(*,*) ' ' +!!$ do k=1,npz +!!$ pmax1 = -1.e25 +!!$ pmin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ call pmxn(pt(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ pmin1 = min(pmin, pmin1) +!!$ pmax1 = max(pmax, pmax1) +!!$ if (is_master()) then +!!$ write(*,202) 'PT MAX|MIN : ', pmax1, pmin1, i0, j0, k, n0, 0.5*( (ak(k)+ak(k+1))/1.e5 + bk(k)+bk(k+1) ) +!!$ endif +!!$ enddo +!!$ if(is_master()) write(*,*) ' ' +!!$#endif +!!$ +!!$! Get DELP Stats +!!$ pmax1 = -1.e25 +!!$ pmin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ do k=1,npz +!!$ call pmxn(delp(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ pmin1 = min(pmin, pmin1) +!!$ pmax1 = max(pmax, pmax1) +!!$ if (pmax1 == pmax) k0 = k +!!$ enddo +!!$ if (is_master()) then +!!$ write(*,201) 'Delp MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 +!!$ endif +!!$ +!!$! Get UV Stats +!!$ uamax1 = -1.e25 +!!$ uamin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ do k=1,npz +!!$ call dtoa(u(isd,jsd,k), v(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), dx,dy,dxa,dya,dxc,dyc,npx, npy, bd%ng) +!!$ call pmxn(ua(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ uamin1 = min(pmin, uamin1) +!!$ uamax1 = max(pmax, uamax1) +!!$ if (uamax1 == pmax) k0 = k +!!$ enddo +!!$ if (is_master()) then +!!$ write(*,201) 'U MAX|MIN : ', uamax1, uamin1, i0, j0, k0, n0 +!!$ endif +!!$ +!!$ vamax1 = -1.e25 +!!$ vamin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ do k=1,npz +!!$ call pmxn(va(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ vamin1 = min(pmin, vamin1) +!!$ vamax1 = max(pmax, vamax1) +!!$ if (vamax1 == pmax) k0 = k +!!$ enddo +!!$ if (is_master()) then +!!$ write(*,201) 'V MAX|MIN : ', vamax1, vamin1, i0, j0, k0, n0 +!!$ endif +!!$ +!!$! Get Q Stats +!!$ pmax1 = -1.e25 +!!$ pmin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ do k=1,npz +!!$ call pmxn(q(isd,jsd,k,1), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ pmin1 = min(pmin, pmin1) +!!$ pmax1 = max(pmax, pmax1) +!!$ if (pmax1 == pmax) k0 = k +!!$ enddo +!!$ if (is_master()) then +!!$ write(*,201) 'Q MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 +!!$ endif +!!$ +!!$! Get tracer Stats +!!$ do iq=2,ncnst +!!$ pmax1 = -1.e25 +!!$ pmin1 = 1.e25 +!!$ i0=-999 +!!$ j0=-999 +!!$ k0=-999 +!!$ n0=-999 +!!$ do k=1,npz +!!$ call pmxn(q(isd,jsd,k,iq), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ pmin1 = min(pmin, pmin1) +!!$ pmax1 = max(pmax, pmax1) +!!$ if (pmax1 == pmax) k0 = k +!!$ enddo +!!$ if (is_master()) then +!!$ write(*,203) 'TR',iq-1,' MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 +!!$ endif +!!$ enddo +!!$ +!!$#endif +!!$ +!!$ if (test_case == 12) then +!!$! Get UV Stats +!!$ call get_vector_stats( ua(:,:,22), ua0(:,:,22), va(:,:,22), va0(:,:,22), npx, npy, ndims, nregions, & +!!$ pmin, pmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile, bd) +!!$ if (is_master()) then +!!$ write(*,201) 'UV(850) L1_norm : ', L1_norm +!!$ write(*,201) 'UV(850) L2_norm : ', L2_norm +!!$ write(*,201) 'UV(850) Linf_norm : ', Linf_norm +!!$ endif +!!$ endif +!!$ +!!$ tmass = 0.0 +!!$ tKE = 0.0 +!!$ tener = 0.0 +!!$ tvort = 0.0 +!!$#if defined(SW_DYNAMICS) +!!$ do k=1,1 +!!$#else +!!$ do k=1,npz +!!$#endif +!!$! Get conservation Stats +!!$ +!!$! Conservation of Mass +!!$ temp(:,:) = delp(is:ie,js:je,k) +!!$ tmass0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ tmass = tmass + tmass0 +!!$ +!!$ !if (.not. allocated(u0, v0)) then +!!$ if (nt == 0) then +!!$ allocate(u0(isd:ied,jsd:jed+1,npz)) +!!$ allocate(v0(isd:ied+1,jsd:jed,npz)) +!!$ u0 = u +!!$ v0 = v +!!$ endif +!!$ +!!$ !! UA is the PERTURBATION now +!!$ up = u - u0 +!!$ vp = v - v0 +!!$ +!!$ call dtoa(up(isd,jsd,k), vp(isd,jsd,k), ua, va, dx,dy, dxa, dya, dxc, dyc, npx, npy, bd%ng) +!!$ call atoc(ua(isd,jsd,k),va(isd,jsd,k),uc0(isd,jsd,k),vc0(isd,jsd,k),dx,dy,dxa,dya,npx,npy,bd%ng,bounded_domain, domain, noComm=.true.) +!!$! Conservation of Kinetic Energy +!!$ do j=js,je +!!$ do i=is,ie +!!$ temp(i,j) = ( uc0(i,j,k)*uc0(i,j,k) + uc0(i+1,j,k)*uc0(i+1,j,k) + & +!!$ vc0(i,j,k)*vc0(i,j,k) + vc0(i,j+1,k)*vc0(i,j+1,k) ) +!!$ enddo +!!$ enddo +!!$ tKE0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ tKE = tKE + tKE0 +!!$ +!!$! Conservation of Energy +!!$ do j=js,je +!!$ do i=is,ie +!!$ temp(i,j) = 0.5 * (delp(i,j,k)/Grav) * temp(i,j) ! Include Previously calcullated KE +!!$ temp(i,j) = temp(i,j) + & +!!$ Grav*((delp(i,j,k)/Grav + phis(i,j))*(delp(i,j,k)/Grav + phis(i,j))) - & +!!$ phis(i,j)*phis(i,j) +!!$ enddo +!!$ enddo +!!$ tener0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ tener = tener + tener0 +!!$ +!!$! Conservation of Potential Enstrophy +!!$ if (test_case>1) then +!!$ do j=js,je +!!$ do i=is,ie +!!$ temp(i,j) = f0(i,j) + (1./area(i,j)) * ( (v(i+1,j,k)*dy(i+1,j) - v(i,j,k)*dy(i,j)) - & +!!$ (u(i,j+1,k)*dx(i,j+1) - u(i,j,k)*dx(i,j)) ) +!!$ temp(i,j) = ( Grav*(temp(i,j)*temp(i,j))/delp(i,j,k) ) +!!$ enddo +!!$ enddo +!!$ tvort0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ tvort = tvort + tvort0 +!!$ else +!!$ tvort=1. +!!$ endif +!!$ enddo +!!$ +!!$ if (nt == 0) then +!!$ tmass_orig = tmass +!!$ tener_orig = tener +!!$ tvort_orig = tvort +!!$ endif +!!$ arr_r4(1) = (tmass-tmass_orig)/tmass_orig +!!$ arr_r4(2) = (tener-tener_orig)/tener_orig +!!$ arr_r4(3) = (tvort-tvort_orig)/tvort_orig +!!$ arr_r4(4) = tKE +!!$ if (test_case==12) arr_r4(4) = L2_norm +!!$#if defined(SW_DYNAMICS) +!!$ myRec = nt+1 +!!$#else +!!$ myRec = myDay*86400.0/dtout + 1 +!!$#endif +!!$ if (is_master()) write(consv_lun,rec=myRec) arr_r4(1:4) +!!$#if defined(SW_DYNAMICS) +!!$ if ( (is_master()) .and. MOD(nt,monitorFreq)==0) then +!!$#else +!!$ if ( (is_master()) ) then +!!$#endif +!!$ write(*,201) 'MASS TOTAL : ', tmass +!!$ write(*,201) 'NORMALIZED MASS : ', (tmass-tmass_orig)/tmass_orig +!!$ if (test_case >= 2) then +!!$ write(*,201) 'Kinetic Energy KE : ', tKE +!!$ write(*,201) 'ENERGY TOTAL : ', tener +!!$ write(*,201) 'NORMALIZED ENERGY : ', (tener-tener_orig)/tener_orig +!!$ write(*,201) 'ENSTR TOTAL : ', tvort +!!$ write(*,201) 'NORMALIZED ENSTR : ', (tvort-tvort_orig)/tvort_orig +!!$ endif +!!$ write(*,*) ' ' +!!$ endif +!!$ +!!$ nullify(grid) +!!$ nullify(agrid) +!!$ nullify(area) +!!$ nullify(f0) +!!$ nullify(dx) +!!$ nullify(dy) +!!$ +!!$ end subroutine get_stats -#if defined(DEBUG_TEST_CASES) - if(is_master()) write(*,*) ' ' - do k=1,npz - pmax1 = -1.e25 - pmin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - call pmxn(pt(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - pmin1 = min(pmin, pmin1) - pmax1 = max(pmax, pmax1) - if (is_master()) then - write(*,202) 'PT MAX|MIN : ', pmax1, pmin1, i0, j0, k, n0, 0.5*( (ak(k)+ak(k+1))/1.e5 + bk(k)+bk(k+1) ) - endif - enddo - if(is_master()) write(*,*) ' ' -#endif -! Get DELP Stats - pmax1 = -1.e25 - pmin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - do k=1,npz - call pmxn(delp(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - pmin1 = min(pmin, pmin1) - pmax1 = max(pmax, pmax1) - if (pmax1 == pmax) k0 = k - enddo - if (is_master()) then - write(*,201) 'Delp MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 - endif -! Get UV Stats - uamax1 = -1.e25 - uamin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - do k=1,npz - call dtoa(u(isd,jsd,k), v(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) - call pmxn(ua(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - uamin1 = min(pmin, uamin1) - uamax1 = max(pmax, uamax1) - if (uamax1 == pmax) k0 = k - enddo - if (is_master()) then - write(*,201) 'U MAX|MIN : ', uamax1, uamin1, i0, j0, k0, n0 - endif + subroutine get_pt_on_great_circle(p1, p2, dist, heading, p3) +! get_pt_on_great_circle :: Get the mid-point on a great circle given: +! -2 points (Lon/Lat) to define a great circle +! -Great Cirle distance between 2 defining points +! -Heading +! compute: +! Arrival Point (Lon/Lat) - vamax1 = -1.e25 - vamin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - do k=1,npz - call pmxn(va(:,:,k), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - vamin1 = min(pmin, vamin1) - vamax1 = max(pmax, vamax1) - if (vamax1 == pmax) k0 = k - enddo - if (is_master()) then - write(*,201) 'V MAX|MIN : ', vamax1, vamin1, i0, j0, k0, n0 - endif + real , intent(IN) :: p1(2), p2(2) + real , intent(IN) :: dist + real , intent(IN) :: heading + real , intent(OUT) :: p3(2) -! Get Q Stats - pmax1 = -1.e25 - pmin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - do k=1,npz - call pmxn(q(isd,jsd,k,1), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - pmin1 = min(pmin, pmin1) - pmax1 = max(pmax, pmax1) - if (pmax1 == pmax) k0 = k - enddo - if (is_master()) then - write(*,201) 'Q MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 - endif + real pha, dp -! Get tracer Stats - do iq=2,ncnst - pmax1 = -1.e25 - pmin1 = 1.e25 - i0=-999 - j0=-999 - k0=-999 - n0=-999 - do k=1,npz - call pmxn(q(isd,jsd,k,iq), npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - pmin1 = min(pmin, pmin1) - pmax1 = max(pmax, pmax1) - if (pmax1 == pmax) k0 = k - enddo - if (is_master()) then - write(*,203) 'TR',iq-1,' MAX|MIN : ', pmax1, pmin1, i0, j0, k0, n0 - endif - enddo + pha = dist/radius -#endif + p3(2) = ASIN( (COS(heading)*COS(p1(2))*SIN(pha)) + (SIN(p1(2))*COS(pha)) ) + dp = ATAN2( SIN(heading)*SIN(pha)*COS(p1(2)) , COS(pha) - SIN(p1(2))*SIN(p3(2)) ) + p3(1) = MOD( (p1(1)-pi)-dp+pi , 2.*pi ) !- pi Leave at 0 to 360 - if (test_case == 12) then -! Get UV Stats - call get_vector_stats( ua(:,:,22), ua0(:,:,22), va(:,:,22), va0(:,:,22), npx, npy, ndims, nregions, & - pmin, pmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile) - if (is_master()) then - write(*,201) 'UV(850) L1_norm : ', L1_norm - write(*,201) 'UV(850) L2_norm : ', L2_norm - write(*,201) 'UV(850) Linf_norm : ', Linf_norm - endif - endif + end subroutine get_pt_on_great_circle - tmass = 0.0 - tKE = 0.0 - tener = 0.0 - tvort = 0.0 -#if defined(SW_DYNAMICS) - do k=1,1 -#else - do k=1,npz -#endif -! Get conservation Stats - -! Conservation of Mass - temp(:,:) = delp(is:ie,js:je,k) - tmass0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - tmass = tmass + tmass0 - - !if (.not. allocated(u0, v0)) then - if (nt == 0) then - allocate(u0(isd:ied,jsd:jed+1,npz)) - allocate(v0(isd:ied+1,jsd:jed,npz)) - u0 = u - v0 = v - endif - - !! UA is the PERTURBATION now - up = u - u0 - vp = v - v0 - - call dtoa(up(isd,jsd,k), vp(isd,jsd,k), ua, va, dx,dy, dxa, dya, dxc, dyc, npx, npy, ng) - call atoc(ua(isd,jsd,k),va(isd,jsd,k),uc0(isd,jsd,k),vc0(isd,jsd,k),dx,dy,dxa,dya,npx,npy,ng,nested, domain, noComm=.true.) -! Conservation of Kinetic Energy - do j=js,je - do i=is,ie - temp(i,j) = ( uc0(i,j,k)*uc0(i,j,k) + uc0(i+1,j,k)*uc0(i+1,j,k) + & - vc0(i,j,k)*vc0(i,j,k) + vc0(i,j+1,k)*vc0(i,j+1,k) ) - enddo - enddo - tKE0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - tKE = tKE + tKE0 -! Conservation of Energy - do j=js,je - do i=is,ie - temp(i,j) = 0.5 * (delp(i,j,k)/Grav) * temp(i,j) ! Include Previously calcullated KE - temp(i,j) = temp(i,j) + & - Grav*((delp(i,j,k)/Grav + phis(i,j))*(delp(i,j,k)/Grav + phis(i,j))) - & - phis(i,j)*phis(i,j) - enddo - enddo - tener0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - tener = tener + tener0 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- -! Conservation of Potential Enstrophy - if (test_case>1) then - do j=js,je - do i=is,ie - temp(i,j) = f0(i,j) + (1./area(i,j)) * ( (v(i+1,j,k)*dy(i+1,j) - v(i,j,k)*dy(i,j)) - & - (u(i,j+1,k)*dx(i,j+1) - u(i,j,k)*dx(i,j)) ) - temp(i,j) = ( Grav*(temp(i,j)*temp(i,j))/delp(i,j,k) ) - enddo - enddo - tvort0 = globalsum(temp, npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - tvort = tvort + tvort0 - else - tvort=1. - endif - enddo - - if (nt == 0) then - tmass_orig = tmass - tener_orig = tener - tvort_orig = tvort - endif - arr_r4(1) = (tmass-tmass_orig)/tmass_orig - arr_r4(2) = (tener-tener_orig)/tener_orig - arr_r4(3) = (tvort-tvort_orig)/tvort_orig - arr_r4(4) = tKE - if (test_case==12) arr_r4(4) = L2_norm -#if defined(SW_DYNAMICS) - myRec = nt+1 -#else - myRec = myDay*86400.0/dtout + 1 -#endif - if (is_master()) write(consv_lun,rec=myRec) arr_r4(1:4) -#if defined(SW_DYNAMICS) - if ( (is_master()) .and. MOD(nt,monitorFreq)==0) then -#else - if ( (is_master()) ) then -#endif - write(*,201) 'MASS TOTAL : ', tmass - write(*,201) 'NORMALIZED MASS : ', (tmass-tmass_orig)/tmass_orig - if (test_case >= 2) then - write(*,201) 'Kinetic Energy KE : ', tKE - write(*,201) 'ENERGY TOTAL : ', tener - write(*,201) 'NORMALIZED ENERGY : ', (tener-tener_orig)/tener_orig - write(*,201) 'ENSTR TOTAL : ', tvort - write(*,201) 'NORMALIZED ENSTR : ', (tvort-tvort_orig)/tvort_orig - endif - write(*,*) ' ' - endif - - nullify(grid) - nullify(agrid) - nullify(area) - nullify(f0) - nullify(dx) - nullify(dy) - - end subroutine get_stats - - - - subroutine get_pt_on_great_circle(p1, p2, dist, heading, p3) -! get_pt_on_great_circle :: Get the mid-point on a great circle given: -! -2 points (Lon/Lat) to define a great circle -! -Great Cirle distance between 2 defining points -! -Heading -! compute: -! Arrival Point (Lon/Lat) - - real , intent(IN) :: p1(2), p2(2) - real , intent(IN) :: dist - real , intent(IN) :: heading - real , intent(OUT) :: p3(2) - - real pha, dp - - pha = dist/radius - - p3(2) = ASIN( (COS(heading)*COS(p1(2))*SIN(pha)) + (SIN(p1(2))*COS(pha)) ) - dp = ATAN2( SIN(heading)*SIN(pha)*COS(p1(2)) , COS(pha) - SIN(p1(2))*SIN(p3(2)) ) - p3(1) = MOD( (p1(1)-pi)-dp+pi , 2.*pi ) !- pi Leave at 0 to 360 - - end subroutine get_pt_on_great_circle - - -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! get_scalar_stats: get L-1, L-2, and L-inf norms and min/max stats as defined -! in Williamson, 1994 (p.16) -! for any var - - subroutine get_scalar_stats(var, varT, npx, npy, ndims, nregions, & - vmin, vmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile) - integer, intent(IN) :: npx, npy - integer, intent(IN) :: ndims - integer, intent(IN) :: nregions, tile - real , intent(IN) :: var(isd:ied,jsd:jed) - real , intent(IN) :: varT(isd:ied,jsd:jed) - real , intent(OUT) :: vmin - real , intent(OUT) :: vmax - real , intent(OUT) :: L1_norm - real , intent(OUT) :: L2_norm - real , intent(OUT) :: Linf_norm - - type(fv_grid_type), target :: gridstruct - - real :: vmean - real :: vvar - real :: vmin1 - real :: vmax1 - real :: pdiffmn - real :: pdiffmx - - real :: varSUM, varSUM2, varMAX - real :: gsum - real :: vminT, vmaxT, vmeanT, vvarT - integer :: i0, j0, n0 - - real, dimension(:,:,:), pointer :: grid, agrid - real, dimension(:,:), pointer :: area - - grid => gridstruct%grid - agrid=> gridstruct%agrid - - area => gridstruct%area - - varSUM = 0. - varSUM2 = 0. - varMAX = 0. - L1_norm = 0. - L2_norm = 0. - Linf_norm = 0. - vmean = 0. - vvar = 0. - vmax = 0. - vmin = 0. - pdiffmn= 0. - pdiffmx= 0. - vmeanT = 0. - vvarT = 0. - vmaxT = 0. - vminT = 0. - - vmean = globalsum(var(is:ie,js:je) , npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - vmeanT = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - vmean = vmean / (4.0*pi) - vmeanT = vmeanT / (4.0*pi) - - call pmxn(var, npx, npy, nregions, tile, gridstruct, vmin , vmax , i0, j0, n0) - call pmxn(varT, npx, npy, nregions, tile, gridstruct, vminT, vmaxT, i0, j0, n0) - call pmxn(var-varT, npx, npy, nregions, tile, gridstruct, pdiffmn, pdiffmx, i0, j0, n0) - - vmax = (vmax - vmaxT) / (vmaxT-vminT) - vmin = (vmin - vminT) / (vmaxT-vminT) - - varSUM = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - varSUM2 = globalsum(varT(is:ie,js:je)**2., npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L1_norm = globalsum(ABS(var(is:ie,js:je)-varT(is:ie,js:je)), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L2_norm = globalsum((var(is:ie,js:je)-varT(is:ie,js:je))**2., npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L1_norm = L1_norm/varSUM - L2_norm = SQRT(L2_norm)/SQRT(varSUM2) - - call pmxn(ABS(varT), npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) - varMAX = vmax - call pmxn(ABS(var-varT), npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) - Linf_norm = vmax/varMAX - - end subroutine get_scalar_stats -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! get_vector_stats: get L-1, L-2, and L-inf norms and min/max stats as defined -! in Williamson, 1994 (p.16) -! for any var - - subroutine get_vector_stats(varU, varUT, varV, varVT, & - npx, npy, ndims, nregions, & - vmin, vmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile) - integer, intent(IN) :: npx, npy - integer, intent(IN) :: ndims - integer, intent(IN) :: nregions, tile - real , intent(IN) :: varU(isd:ied,jsd:jed) - real , intent(IN) :: varUT(isd:ied,jsd:jed) - real , intent(IN) :: varV(isd:ied,jsd:jed) - real , intent(IN) :: varVT(isd:ied,jsd:jed) - real , intent(OUT) :: vmin - real , intent(OUT) :: vmax - real , intent(OUT) :: L1_norm - real , intent(OUT) :: L2_norm - real , intent(OUT) :: Linf_norm - - real :: var(isd:ied,jsd:jed) - real :: varT(isd:ied,jsd:jed) - real :: vmean - real :: vvar - real :: vmin1 - real :: vmax1 - real :: pdiffmn - real :: pdiffmx - - real :: varSUM, varSUM2, varMAX - real :: gsum - real :: vminT, vmaxT, vmeanT, vvarT - integer :: i,j,n - integer :: i0, j0, n0 - - type(fv_grid_type), target :: gridstruct - - real, dimension(:,:,:), pointer :: grid, agrid - real, dimension(:,:), pointer :: area - - grid => gridstruct%grid - agrid=> gridstruct%agrid - - area => gridstruct%area - - varSUM = 0. - varSUM2 = 0. - varMAX = 0. - L1_norm = 0. - L2_norm = 0. - Linf_norm = 0. - vmean = 0. - vvar = 0. - vmax = 0. - vmin = 0. - pdiffmn= 0. - pdiffmx= 0. - vmeanT = 0. - vvarT = 0. - vmaxT = 0. - vminT = 0. - - do j=js,je - do i=is,ie - var(i,j) = SQRT( (varU(i,j)-varUT(i,j))**2. + & - (varV(i,j)-varVT(i,j))**2. ) - varT(i,j) = SQRT( varUT(i,j)*varUT(i,j) + & - varVT(i,j)*varVT(i,j) ) - enddo - enddo - varSUM = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L1_norm = globalsum(var(is:ie,js:je) , npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L1_norm = L1_norm/varSUM - - call pmxn(varT, npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) - varMAX = vmax - call pmxn(var, npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) - Linf_norm = vmax/varMAX - - do j=js,je - do i=is,ie - var(i,j) = ( (varU(i,j)-varUT(i,j))**2. + & - (varV(i,j)-varVT(i,j))**2. ) - varT(i,j) = ( varUT(i,j)*varUT(i,j) + & - varVT(i,j)*varVT(i,j) ) - enddo - enddo - varSUM = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L2_norm = globalsum(var(is:ie,js:je) , npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) - L2_norm = SQRT(L2_norm)/SQRT(varSUM) - - end subroutine get_vector_stats -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! check_courant_numbers :: -! - subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, tile, noPrint) - - real, intent(IN) :: ndt - integer, intent(IN) :: n_split - integer, intent(IN) :: npx, npy, npz, tile - logical, OPTIONAL, intent(IN) :: noPrint - real , intent(IN) :: uc(isd:ied+1,jsd:jed ,npz) - real , intent(IN) :: vc(isd:ied ,jsd:jed+1,npz) - - real :: ideal_c=0.06 - real :: tolerance= 1.e-3 - real :: dt_inc, dt_orig - real :: meanCy, minCy, maxCy, meanCx, minCx, maxCx - - real :: counter - logical :: ideal - - integer :: i,j,k - real :: dt - - type(fv_grid_type), intent(IN), target :: gridstruct - real, dimension(:,:), pointer :: dxc, dyc - - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - dt = ndt/real(n_split) - - 300 format(i4.4,' ',i4.4,' ',i4.4,' ',i4.4,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14) - - dt_orig = dt - dt_inc = 1 - ideal = .false. - - do while(.not. ideal) - - counter = 0 - minCy = missing - maxCy = -1.*missing - minCx = missing - maxCx = -1.*missing - meanCx = 0 - meanCy = 0 - do k=1,npz - do j=js,je - do i=is,ie+1 - minCx = MIN(minCx, ABS( (dt/dxc(i,j))*uc(i,j,k) )) - maxCx = MAX(maxCx, ABS( (dt/dxc(i,j))*uc(i,j,k) )) - meanCx = meanCx + ABS( (dt/dxc(i,j))*uc(i,j,k) ) - - if (ABS( (dt/dxc(i,j))*uc(i,j,k) ) > 1.0) then - counter = counter+1 - write(*,300) i,j,k,tile, ABS( (dt/dxc(i,j))*uc(i,j,k) ), dt, dxc(i,j), uc(i,j,k), counter - call exit(1) - endif - - enddo - enddo - do j=js,je+1 - do i=is,ie - minCy = MIN(minCy, ABS( (dt/dyc(i,j))*vc(i,j,k) )) - maxCy = MAX(maxCy, ABS( (dt/dyc(i,j))*vc(i,j,k) )) - meanCy = meanCy + ABS( (dt/dyc(i,j))*vc(i,j,k) ) - - if (ABS( (dt/dyc(i,j))*vc(i,j,k) ) > 1.0) then - counter = counter+1 - write(*,300) i,j,k,tile, ABS( (dt/dyc(i,j))*vc(i,j,k) ), dt, dyc(i,j), vc(i,j,k), counter - call exit(1) - endif - - enddo - enddo - enddo - - call mp_reduce_max(maxCx) - call mp_reduce_max(maxCy) - minCx = -minCx - minCy = -minCy - call mp_reduce_max(minCx) - call mp_reduce_max(minCy) - minCx = -minCx - minCy = -minCy - call mp_reduce_sum(meanCx) - call mp_reduce_sum(meanCy) - meanCx = meanCx/(6.0*DBLE(npx)*DBLE(npy-1)) - meanCy = meanCy/(6.0*DBLE(npx-1)*DBLE(npy)) - - !if ( (ABS(maxCy-ideal_c) <= tolerance) .and. (ABS(maxCx-ideal_c) <= tolerance) ) then - ideal = .true. - !elseif (maxCy-ideal_c > 0) then - ! dt = dt - dt_inc - !else - ! dt = dt + dt_inc - !endif - - enddo - - if ( (.not. present(noPrint)) .and. (is_master()) ) then - print*, '' - print*, '--------------------------------------------' - print*, 'Y-dir Courant number MIN : ', minCy - print*, 'Y-dir Courant number MAX : ', maxCy - print*, '' - print*, 'X-dir Courant number MIN : ', minCx - print*, 'X-dir Courant number MAX : ', maxCx - print*, '' - print*, 'X-dir Courant number MEAN : ', meanCx - print*, 'Y-dir Courant number MEAN : ', meanCy - print*, '' - print*, 'NDT: ', ndt - print*, 'n_split: ', n_split - print*, 'DT: ', dt - print*, '' - print*, '--------------------------------------------' - print*, '' - endif - - end subroutine check_courant_numbers -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! pmxn :: find max and min of field p -! - subroutine pmxn(p, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) - integer, intent(IN) :: npx - integer, intent(IN) :: npy - integer, intent(IN) :: nregions, tile - real , intent(IN) :: p(isd:ied,jsd:jed) - type(fv_grid_type), intent(IN), target :: gridstruct - real , intent(OUT) :: pmin - real , intent(OUT) :: pmax - integer, intent(OUT) :: i0 - integer, intent(OUT) :: j0 - integer, intent(OUT) :: n0 - - real :: temp - integer :: i,j,n - - - real, pointer, dimension(:,:,:) :: agrid, grid - real, pointer, dimension(:,:) :: area, rarea, fC, f0 - real(kind=R_GRID), pointer, dimension(:,:,:) :: ee1, ee2, en1, en2 - real(kind=R_GRID), pointer, dimension(:,:,:,:) :: ew, es - real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc - - logical, pointer :: cubed_sphere, latlon - - logical, pointer :: have_south_pole, have_north_pole - - integer, pointer :: ntiles_g - real, pointer :: acapN, acapS, globalarea - - grid => gridstruct%grid - agrid=> gridstruct%agrid - - area => gridstruct%area - rarea => gridstruct%rarea - - fC => gridstruct%fC - f0 => gridstruct%f0 - - ee1 => gridstruct%ee1 - ee2 => gridstruct%ee2 - ew => gridstruct%ew - es => gridstruct%es - en1 => gridstruct%en1 - en2 => gridstruct%en2 - - dx => gridstruct%dx - dy => gridstruct%dy - dxa => gridstruct%dxa - dya => gridstruct%dya - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - cubed_sphere => gridstruct%cubed_sphere - latlon => gridstruct%latlon - - have_south_pole => gridstruct%have_south_pole - have_north_pole => gridstruct%have_north_pole - - ntiles_g => gridstruct%ntiles_g - acapN => gridstruct%acapN - acapS => gridstruct%acapS - globalarea => gridstruct%globalarea - - pmax = -1.e25 - pmin = 1.e25 - i0 = -999 - j0 = -999 - n0 = tile - - do j=js,je - do i=is,ie - temp = p(i,j) - if (temp > pmax) then - pmax = temp - i0 = i - j0 = j - elseif (temp < pmin) then - pmin = temp - endif - enddo - enddo - - temp = pmax - call mp_reduce_max(temp) - if (temp /= pmax) then - i0 = -999 - j0 = -999 - n0 = -999 - endif - pmax = temp - call mp_reduce_max(i0) - call mp_reduce_max(j0) - call mp_reduce_max(n0) - - pmin = -pmin - call mp_reduce_max(pmin) - pmin = -pmin - - end subroutine pmxn -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!! These routines are no longer used -#ifdef NCDF_OUTPUT - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! output_ncdf :: write out NETCDF fields -! - subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & - omga, npx, npy, npz, ng, ncnst, ndims, nregions, ncid, & - npx_p1_id, npy_p1_id, npx_id, npy_id, npz_id, ntiles_id, ncnst_id, nt_id, & - phis_id, delp_id, ps_id, pt_id, pv_id, om_id, u_id, v_id, q_id, tracers_ids, & - lats_id, lons_id, gridstruct, flagstruct) - real, intent(IN) :: dt - integer, intent(IN) :: nt, maxnt - integer, intent(INOUT) :: nout - - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) - - real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) - real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) - - real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: omga(isd:ied ,jsd:jed ,npz) - - integer, intent(IN) :: npx, npy, npz - integer, intent(IN) :: ng, ncnst - integer, intent(IN) :: ndims - integer, intent(IN) :: nregions - integer, intent(IN) :: ncid - integer, intent(IN) :: npx_p1_id, npy_p1_id, npx_id, npy_id, npz_id, ncnst_id - integer, intent(IN) :: ntiles_id, nt_id - integer, intent(IN) :: phis_id, delp_id, ps_id, pt_id, pv_id, u_id, v_id, q_id - integer, intent(IN) :: om_id ! omega (dp/dt) - integer, intent(IN) :: tracers_ids(ncnst-1) - integer, intent(IN) :: lats_id, lons_id - - type(fv_grid_type), target :: gridstruct - type(fv_flags_type), intent(IN) :: flagstruct - - real, allocatable :: tmp(:,:,:) - real, allocatable :: tmpA(:,:,:) -#if defined(SW_DYNAMICS) - real, allocatable :: ut(:,:,:) - real, allocatable :: vt(:,:,:) -#else - real, allocatable :: ut(:,:,:,:) - real, allocatable :: vt(:,:,:,:) - real, allocatable :: tmpA_3d(:,:,:,:) -#endif - real, allocatable :: vort(:,:) - - real :: p1(2) ! Temporary Point - real :: p2(2) ! Temporary Point - real :: p3(2) ! Temporary Point - real :: p4(2) ! Temporary Point - real :: pa(2) ! Temporary Point - real :: utmp, vtmp, r, r0, dist, heading - integer :: i,j,k,n,iq,nreg - - real :: Vtx, p, w_p - real :: x1,y1,z1,x2,y2,z2,ang - - real, pointer, dimension(:,:,:) :: agrid, grid - real, pointer, dimension(:,:) :: area, rarea - real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc - - grid => gridstruct%grid - agrid => gridstruct%agrid - - area => gridstruct%area - rarea => gridstruct%rarea - - dx => gridstruct%dx - dy => gridstruct%dy - dxa => gridstruct%dxa - dya => gridstruct%dya - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - allocate( tmp(npx ,npy ,nregions) ) - allocate( tmpA(npx-1,npy-1,nregions) ) -#if defined(SW_DYNAMICS) - allocate( ut(npx-1,npy-1,nregions) ) - allocate( vt(npx-1,npy-1,nregions) ) -#else - allocate( ut(npx-1,npy-1,npz,nregions) ) - allocate( vt(npx-1,npy-1,npz,nregions) ) - allocate( tmpA_3d(npx-1,npy-1,npz,nregions) ) -#endif - allocate( vort(isd:ied,jsd:jed) ) - - nout = nout + 1 - - if (nt==0) then - tmp(is:ie+1,js:je+1,tile) = grid(is:ie+1,js:je+1,2) - call wrtvar_ncdf(ncid, lats_id, nout, is,ie+1, js,je+1, npx+1, npy+1, 1, nregions, tmp(1:npx,1:npy,1:nregions), 3) - tmp(is:ie+1,js:je+1,tile) = grid(is:ie+1,js:je+1,1) - call wrtvar_ncdf(ncid, lons_id, nout, is,ie+1, js,je+1, npx+1, npy+1, 1, nregions, tmp(1:npx,1:npy,1:nregions), 3) - endif - -#if defined(SW_DYNAMICS) - if (test_case > 1) then - tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1)/Grav - - if ((nt==0) .and. (test_case==2)) then - Ubar = (2.0*pi*radius)/(12.0*86400.0) - gh0 = 2.94e4 - phis = 0.0 - do j=js,je+1 - do i=is,ie+1 - tmp(i,j,tile) = (gh0 - (radius*omega*Ubar + (Ubar*Ubar)/2.) * & - ( -1.*cos(grid(i ,j ,1))*cos(grid(i ,j ,2))*sin(alpha) + & - sin(grid(i ,j ,2))*cos(alpha) ) ** 2.0) / Grav - enddo - enddo - endif - - else - - if (test_case==1) then -! Get Current Height Field "Truth" - p1(1) = pi/2. + pi_shift - p1(2) = 0. - p2(1) = 3.*pi/2. + pi_shift - p2(2) = 0. - r0 = radius/3. !RADIUS /3. - dist = 2.0*pi*radius* ((FLOAT(nt)/FLOAT(maxnt))) - heading = 5.0*pi/2.0 - alpha - call get_pt_on_great_circle( p1, p2, dist, heading, p3) - do j=jsd,jed - do i=isd,ied - p2(1) = agrid(i,j,1) - p2(2) = agrid(i,j,2) - r = great_circle_dist( p3, p2, radius ) - if (r < r0) then - phi0(i,j,1) = phis(i,j) + gh0*0.5*(1.0+cos(PI*r/r0)) - else - phi0(i,j,1) = phis(i,j) - endif - enddo - enddo - elseif (test_case == 0) then - phi0 = 0.0 - do j=jsd,jed - do i=isd,ied - x1 = agrid(i,j,1) - y1 = agrid(i,j,2) - z1 = radius - p = p0_c0 * cos(y1) - Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) - w_p = 0.0 - if (p /= 0.0) w_p = Vtx/p - phi0(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) - enddo - enddo - endif - - tmpA(is:ie,js:je,tile) = phi0(is:ie,js:je,1) - call wrtvar_ncdf(ncid, phis_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) - tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1) - endif - call wrtvar_ncdf(ncid, ps_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) - - if (test_case == 9) then -! Calc Vorticity - do j=jsd,jed - do i=isd,ied - vort(i,j) = f0(i,j) + (1./area(i,j)) * ( (v(i+1,j,1)*dy(i+1,j) - v(i,j,1)*dy(i,j)) - & - (u(i,j+1,1)*dx(i,j+1) - u(i,j,1)*dx(i,j)) ) - vort(i,j) = Grav*vort(i,j)/delp(i,j,1) - enddo - enddo - tmpA(is:ie,js:je,tile) = vort(is:ie,js:je) - call wrtvar_ncdf(ncid, pv_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) - endif - - call cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, 1, 1, gridstruct%grid_type, gridstruct%nested, flagstruct%c2l_ord, bd) - do j=js,je - do i=is,ie - ut(i,j,tile) = ua(i,j,1) - vt(i,j,tile) = va(i,j,1) - enddo - enddo - - call wrtvar_ncdf(ncid, u_id, nout, is,ie, js,je, npx, npy, npz, nregions, ut(1:npx-1,1:npy-1,1:nregions), 3) - call wrtvar_ncdf(ncid, v_id, nout, is,ie, js,je, npx, npy, npz, nregions, vt(1:npx-1,1:npy-1,1:nregions), 3) - - if ((test_case >= 2) .and. (nt==0) ) then - tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav - call wrtvar_ncdf(ncid, phis_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) - endif -#else - -! Write Moisture Data - tmpA_3d(is:ie,js:je,1:npz,tile) = q(is:ie,js:je,1:npz,1) - call wrtvar_ncdf(ncid, q_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) - -! Write Tracer Data - do iq=2,ncnst - tmpA_3d(is:ie,js:je,1:npz,tile) = q(is:ie,js:je,1:npz,iq) - call wrtvar_ncdf(ncid, tracers_ids(iq-1), nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) - enddo - -! Write Surface height data - tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav - call wrtvar_ncdf(ncid, phis_id, nout, is,ie, js,je, npx, npy, 1, nregions, tmpA, 3) - -! Write Pressure Data - tmpA(is:ie,js:je,tile) = ps(is:ie,js:je) - call wrtvar_ncdf(ncid, ps_id, nout, is,ie, js,je, npx, npy, 1, nregions, tmpA, 3) - do k=1,npz - tmpA_3d(is:ie,js:je,k,tile) = delp(is:ie,js:je,k)/Grav - enddo - call wrtvar_ncdf(ncid, delp_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) - -! Write PT Data - do k=1,npz - tmpA_3d(is:ie,js:je,k,tile) = pt(is:ie,js:je,k) - enddo - call wrtvar_ncdf(ncid, pt_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) - -! Write U,V Data - call cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, npz, gridstruct%grid_type, gridstruct%nested, flagstruct%c2l_ord) - do k=1,npz - do j=js,je - do i=is,ie - ut(i,j,k,tile) = ua(i,j,k) - vt(i,j,k,tile) = va(i,j,k) - enddo - enddo - enddo - call wrtvar_ncdf(ncid, u_id, nout, is,ie, js,je, npx, npy, npz, nregions, ut(1:npx-1,1:npy-1,1:npz,1:nregions), 4) - call wrtvar_ncdf(ncid, v_id, nout, is,ie, js,je, npx, npy, npz, nregions, vt(1:npx-1,1:npy-1,1:npz,1:nregions), 4) - - -! Calc Vorticity - do k=1,npz - do j=js,je - do i=is,ie - tmpA_3d(i,j,k,tile) = rarea(i,j) * ( (v(i+1,j,k)*dy(i+1,j) - v(i,j,k)*dy(i,j)) - & - (u(i,j+1,k)*dx(i,j+1) - u(i,j,k)*dx(i,j)) ) - enddo - enddo - enddo - call wrtvar_ncdf(ncid, pv_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) -! -! Output omega (dp/dt): - do k=1,npz - do j=js,je - do i=is,ie - tmpA_3d(i,j,k,tile) = omga(i,j,k) - enddo - enddo - enddo - call wrtvar_ncdf(ncid, om_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) - -#endif - - deallocate( tmp ) - deallocate( tmpA ) -#if defined(SW_DYNAMICS) - deallocate( ut ) - deallocate( vt ) -#else - deallocate( ut ) - deallocate( vt ) - deallocate( tmpA_3d ) -#endif - deallocate( vort ) - - nullify(grid) - nullify(agrid) - - nullify(area) - - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) - - end subroutine output_ncdf - -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! -! output :: write out fields -! - subroutine output(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & - npx, npy, npz, ng, ncnst, ndims, nregions, phis_lun, phi_lun, & - pt_lun, pv_lun, uv_lun, gridstruct) - - real, intent(IN) :: dt - integer, intent(IN) :: nt, maxnt - integer, intent(INOUT) :: nout - - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) - - real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) - real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) - - real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) - - integer, intent(IN) :: npx, npy, npz - integer, intent(IN) :: ng, ncnst - integer, intent(IN) :: ndims - integer, intent(IN) :: nregions - integer, intent(IN) :: phis_lun, phi_lun, pt_lun, pv_lun, uv_lun - - type(fv_grid_type), target :: gridstruct - - real :: tmp(1-ng:npx +ng,1-ng:npy +ng,1:nregions) - real :: tmpA(1-ng:npx-1+ng,1-ng:npy-1+ng,1:nregions) - real :: p1(2) ! Temporary Point - real :: p2(2) ! Temporary Point - real :: p3(2) ! Temporary Point - real :: p4(2) ! Temporary Point - real :: pa(2) ! Temporary Point - real :: ut(1:npx,1:npy,1:nregions) - real :: vt(1:npx,1:npy,1:nregions) - real :: utmp, vtmp, r, r0, dist, heading - integer :: i,j,k,n,nreg - real :: vort(isd:ied,jsd:jed) - - real :: Vtx, p, w_p - real :: x1,y1,z1,x2,y2,z2,ang - - real, pointer, dimension(:,:,:) :: agrid, grid - real, pointer, dimension(:,:) :: area, rarea - real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc - - grid => gridstruct%grid - agrid => gridstruct%agrid - - area => gridstruct%area - - dx => gridstruct%dx - dy => gridstruct%dy - dxa => gridstruct%dxa - dya => gridstruct%dya - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - cubed_sphere => gridstruct%cubed_sphere - - nout = nout + 1 - -#if defined(SW_DYNAMICS) - if (test_case > 1) then - call atob_s(delp(:,:,1)/Grav, tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%nested) !, altInterp=1) - tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1)/Grav - - if ((nt==0) .and. (test_case==2)) then - Ubar = (2.0*pi*radius)/(12.0*86400.0) - gh0 = 2.94e4 - phis = 0.0 - do j=js,je+1 - do i=is,ie+1 - tmp(i,j,tile) = (gh0 - (radius*omega*Ubar + (Ubar*Ubar)/2.) * & - ( -1.*cos(grid(i ,j ,1))*cos(grid(i ,j ,2))*sin(alpha) + & - sin(grid(i ,j ,2))*cos(alpha) ) ** 2.0) / Grav - enddo - enddo - endif - - else - - if (test_case==1) then -! Get Current Height Field "Truth" - p1(1) = pi/2. + pi_shift - p1(2) = 0. - p2(1) = 3.*pi/2. + pi_shift - p2(2) = 0. - r0 = radius/3. !RADIUS /3. - dist = 2.0*pi*radius* ((FLOAT(nt)/FLOAT(maxnt))) - heading = 5.0*pi/2.0 - alpha - call get_pt_on_great_circle( p1, p2, dist, heading, p3) - do j=jsd,jed - do i=isd,ied - p2(1) = agrid(i,j,1) - p2(2) = agrid(i,j,2) - r = great_circle_dist( p3, p2, radius ) - if (r < r0) then - phi0(i,j,1) = phis(i,j) + gh0*0.5*(1.0+cos(PI*r/r0)) - else - phi0(i,j,1) = phis(i,j) - endif - enddo - enddo - elseif (test_case == 0) then - phi0 = 0.0 - do j=jsd,jed - do i=isd,ied - x1 = agrid(i,j,1) - y1 = agrid(i,j,2) - z1 = radius - p = p0_c0 * cos(y1) - Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) - w_p = 0.0 - if (p /= 0.0) w_p = Vtx/p - phi0(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) - enddo - enddo - endif - - call atob_s(phi0(:,:,1), tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%nested) !, altInterp=1) - tmpA(is:ie,js:je,tile) = phi0(is:ie,js:je,1) - call wrt2d(phis_lun, nout , is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - call atob_s(delp(:,:,1), tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%nested) !, altInterp=1) - tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1) - endif - ! call wrt2d(phi_lun, nout, is,ie+1, js,je+1, npx+1, npy+1, nregions, tmp(1:npx,1:npy,1:nregions)) - call wrt2d(phi_lun, nout, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - - if (test_case == 9) then -! Calc Vorticity - do j=jsd,jed - do i=isd,ied - vort(i,j) = f0(i,j) + (1./area(i,j)) * ( (v(i+1,j,1)*dy(i+1,j) - v(i,j,1)*dy(i,j)) - & - (u(i,j+1,1)*dx(i,j+1) - u(i,j,1)*dx(i,j)) ) - vort(i,j) = Grav*vort(i,j)/delp(i,j,1) - enddo - enddo - call atob_s(vort, tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%nested) !, altInterp=1) - call wrt2d(pv_lun, nout, is,ie+1, js,je+1, npx+1, npy+1, nregions, tmp(1:npx,1:npy,1:nregions)) - endif - - call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) -! Rotate winds to standard Lat-Lon orientation - if (cubed_sphere) then - do j=js,je - do i=is,ie - call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) - call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) - call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) - call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) - utmp = ua(i,j,1) - vtmp = va(i,j,1) - if (cubed_sphere) call rotate_winds(utmp,vtmp, p1,p2,p3,p4, agrid(i,j,1:2), 2, 2) - ut(i,j,tile) = utmp - vt(i,j,tile) = vtmp - enddo - enddo - endif - - call wrt2d(uv_lun, 2*(nout-1) + 1, is,ie, js,je, npx, npy, nregions, ut(1:npx-1,1:npy-1,1:nregions)) - call wrt2d(uv_lun, 2*(nout-1) + 2, is,ie, js,je, npx, npy, nregions, vt(1:npx-1,1:npy-1,1:nregions)) - - if ((test_case >= 2) .and. (nt==0) ) then - call atob_s(phis/Grav, tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%nested) !, altInterp=1) - ! call wrt2d(phis_lun, nout , is,ie+1, js,je+1, npx+1, npy+1, nregions, tmp(1:npx,1:npy,1:nregions)) - tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav - call wrt2d(phis_lun, nout , is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - endif -#else - -! Write Surface height data - if (nt==0) then - tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav - call wrt2d(phis_lun, nout , is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - endif - -! Write Pressure Data - - !if (tile==2) then - ! do i=is,ie - ! print*, i, ps(i,35) - ! enddo - !endif - tmpA(is:ie,js:je,tile) = ps(is:ie,js:je) - call wrt2d(phi_lun, (nout-1)*(npz+1) + 1, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - do k=1,npz - tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,k)/Grav - call wrt2d(phi_lun, (nout-1)*(npz+1) + 1 + k, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - enddo - -! Write PT Data - do k=1,npz - tmpA(is:ie,js:je,tile) = pt(is:ie,js:je,k) - call wrt2d(pt_lun, (nout-1)*npz + (k-1) + 1, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) - enddo - -! Write U,V Data - do k=1,npz - call dtoa(u(isd,jsd,k), v(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) -! Rotate winds to standard Lat-Lon orientation - if (cubed_sphere) then - do j=js,je - do i=is,ie - call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) - call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) - call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) - call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) - utmp = ua(i,j,k) - vtmp = va(i,j,k) - if (cubed_sphere) call rotate_winds(utmp,vtmp, p1,p2,p3,p4, agrid(i,j,1:2), 2, 2) - ut(i,j,tile) = utmp - vt(i,j,tile) = vtmp - enddo - enddo - endif - call wrt2d(uv_lun, 2*((nout-1)*npz + (k-1)) + 1, is,ie, js,je, npx, npy, nregions, ut(1:npx-1,1:npy-1,1:nregions)) - call wrt2d(uv_lun, 2*((nout-1)*npz + (k-1)) + 2, is,ie, js,je, npx, npy, nregions, vt(1:npx-1,1:npy-1,1:nregions)) - enddo -#endif - - nullify(grid) - nullify(agrid) - - nullify(area) - - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) - - nullify(cubed_sphere) - - end subroutine output -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! wrt2d_ncdf :: write out a 2d field -! - subroutine wrtvar_ncdf(ncid, varid, nrec, i1,i2, j1,j2, npx, npy, npz, ntiles, p, ndims) -#include - integer, intent(IN) :: ncid, varid - integer, intent(IN) :: nrec - integer, intent(IN) :: i1,i2,j1,j2 - integer, intent(IN) :: npx - integer, intent(IN) :: npy - integer, intent(IN) :: npz - integer, intent(IN) :: ntiles - real , intent(IN) :: p(npx-1,npy-1,npz,ntiles) - integer, intent(IN) :: ndims - - integer :: error - real(kind=4), allocatable :: p_R4(:,:,:,:) - integer :: i,j,k,n - integer :: istart(ndims+1), icount(ndims+1) - - allocate( p_R4(npx-1,npy-1,npz,ntiles) ) - - p_R4(:,:,:,:) = missing - p_R4(i1:i2,j1:j2,1:npz,tile) = p(i1:i2,j1:j2,1:npz,tile) - call mp_gather(p_R4, i1,i2, j1,j2, npx-1, npy-1, npz, ntiles) - - istart(:) = 1 - istart(ndims+1) = nrec - icount(1) = npx-1 - icount(2) = npy-1 - icount(3) = npz - if (ndims == 3) icount(3) = ntiles - if (ndims == 4) icount(4) = ntiles - icount(ndims+1) = 1 - - if (is_master()) then - error = NF_PUT_VARA_REAL(ncid, varid, istart, icount, p_R4) - endif ! masterproc - - deallocate( p_R4 ) - - end subroutine wrtvar_ncdf -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- - -!------------------------------------------------------------------------------- -! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! wrt2d :: write out a 2d field -! - subroutine wrt2d(iout, nrec, i1,i2, j1,j2, npx, npy, nregions, p) - integer, intent(IN) :: iout - integer, intent(IN) :: nrec - integer, intent(IN) :: i1,i2,j1,j2 - integer, intent(IN) :: npx - integer, intent(IN) :: npy - integer, intent(IN) :: nregions - real , intent(IN) :: p(npx-1,npy-1,nregions) - - real(kind=4) :: p_R4(npx-1,npy-1,nregions) - integer :: i,j,n - - do n=tile,tile - do j=j1,j2 - do i=i1,i2 - p_R4(i,j,n) = p(i,j,n) - enddo - enddo - enddo - - call mp_gather(p_R4, i1,i2, j1,j2, npx-1, npy-1, nregions) - - if (is_master()) then - write(iout,rec=nrec) p_R4(1:npx-1,1:npy-1,1:nregions) - endif ! masterproc - - end subroutine wrt2d -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- -#endif +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! +!!$! get_scalar_stats: get L-1, L-2, and L-inf norms and min/max stats as defined +!!$! in Williamson, 1994 (p.16) +!!$! for any var +!!$ +!!$ subroutine get_scalar_stats(var, varT, npx, npy, ndims, nregions, & +!!$ vmin, vmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile, bd) +!!$ type(fv_grid_bounds_type), intent(IN) :: bd +!!$ integer, intent(IN) :: npx, npy +!!$ integer, intent(IN) :: ndims +!!$ integer, intent(IN) :: nregions, tile +!!$ real , intent(IN) :: var(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real , intent(IN) :: varT(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real , intent(OUT) :: vmin +!!$ real , intent(OUT) :: vmax +!!$ real , intent(OUT) :: L1_norm +!!$ real , intent(OUT) :: L2_norm +!!$ real , intent(OUT) :: Linf_norm +!!$ +!!$ type(fv_grid_type), target :: gridstruct +!!$ +!!$ real :: vmean +!!$ real :: vvar +!!$ real :: vmin1 +!!$ real :: vmax1 +!!$ real :: pdiffmn +!!$ real :: pdiffmx +!!$ +!!$ real :: varSUM, varSUM2, varMAX +!!$ real :: gsum +!!$ real :: vminT, vmaxT, vmeanT, vvarT +!!$ integer :: i0, j0, n0 +!!$ +!!$ real, dimension(:,:,:), pointer :: grid, agrid +!!$ real, dimension(:,:), pointer :: area +!!$ +!!$ integer :: is, ie, js, je +!!$ integer :: isd, ied, jsd, jed, ng +!!$ +!!$ is = bd%is +!!$ ie = bd%ie +!!$ js = bd%js +!!$ je = bd%je +!!$ isd = bd%isd +!!$ ied = bd%ied +!!$ jsd = bd%jsd +!!$ jed = bd%jed +!!$ ng = bd%ng +!!$ +!!$ grid => gridstruct%grid +!!$ agrid=> gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ +!!$ varSUM = 0. +!!$ varSUM2 = 0. +!!$ varMAX = 0. +!!$ L1_norm = 0. +!!$ L2_norm = 0. +!!$ Linf_norm = 0. +!!$ vmean = 0. +!!$ vvar = 0. +!!$ vmax = 0. +!!$ vmin = 0. +!!$ pdiffmn= 0. +!!$ pdiffmx= 0. +!!$ vmeanT = 0. +!!$ vvarT = 0. +!!$ vmaxT = 0. +!!$ vminT = 0. +!!$ +!!$ vmean = globalsum(var(is:ie,js:je) , npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ vmeanT = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ vmean = vmean / (4.0*pi) +!!$ vmeanT = vmeanT / (4.0*pi) +!!$ +!!$ call pmxn(var, npx, npy, nregions, tile, gridstruct, vmin , vmax , i0, j0, n0) +!!$ call pmxn(varT, npx, npy, nregions, tile, gridstruct, vminT, vmaxT, i0, j0, n0) +!!$ call pmxn(var-varT, npx, npy, nregions, tile, gridstruct, pdiffmn, pdiffmx, i0, j0, n0) +!!$ +!!$ vmax = (vmax - vmaxT) / (vmaxT-vminT) +!!$ vmin = (vmin - vminT) / (vmaxT-vminT) +!!$ +!!$ varSUM = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ varSUM2 = globalsum(varT(is:ie,js:je)**2., npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L1_norm = globalsum(ABS(var(is:ie,js:je)-varT(is:ie,js:je)), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L2_norm = globalsum((var(is:ie,js:je)-varT(is:ie,js:je))**2., npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L1_norm = L1_norm/varSUM +!!$ L2_norm = SQRT(L2_norm)/SQRT(varSUM2) +!!$ +!!$ call pmxn(ABS(varT), npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) +!!$ varMAX = vmax +!!$ call pmxn(ABS(var-varT), npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) +!!$ Linf_norm = vmax/varMAX +!!$ +!!$ end subroutine get_scalar_stats +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- +!!$ +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! +!!$! get_vector_stats: get L-1, L-2, and L-inf norms and min/max stats as defined +!!$! in Williamson, 1994 (p.16) +!!$! for any var +!!$ +!!$ subroutine get_vector_stats(varU, varUT, varV, varVT, & +!!$ npx, npy, ndims, nregions, & +!!$ vmin, vmax, L1_norm, L2_norm, Linf_norm, gridstruct, tile, bd) +!!$ type(fv_grid_bounds_type), intent(IN) :: bd +!!$ integer, intent(IN) :: npx, npy +!!$ integer, intent(IN) :: ndims +!!$ integer, intent(IN) :: nregions, tile +!!$ real , intent(IN) :: varU(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real , intent(IN) :: varUT(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real , intent(IN) :: varV(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real , intent(IN) :: varVT(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real , intent(OUT) :: vmin +!!$ real , intent(OUT) :: vmax +!!$ real , intent(OUT) :: L1_norm +!!$ real , intent(OUT) :: L2_norm +!!$ real , intent(OUT) :: Linf_norm +!!$ +!!$ real :: var(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real :: varT(bd%isd:bd%ied,bd%jsd:bd%jed) +!!$ real :: vmean +!!$ real :: vvar +!!$ real :: vmin1 +!!$ real :: vmax1 +!!$ real :: pdiffmn +!!$ real :: pdiffmx +!!$ +!!$ real :: varSUM, varSUM2, varMAX +!!$ real :: gsum +!!$ real :: vminT, vmaxT, vmeanT, vvarT +!!$ integer :: i,j,n +!!$ integer :: i0, j0, n0 +!!$ +!!$ type(fv_grid_type), target :: gridstruct +!!$ +!!$ real, dimension(:,:,:), pointer :: grid, agrid +!!$ real, dimension(:,:), pointer :: area +!!$ +!!$ integer :: is, ie, js, je +!!$ integer :: isd, ied, jsd, jed, ng +!!$ +!!$ is = bd%is +!!$ ie = bd%ie +!!$ js = bd%js +!!$ je = bd%je +!!$ isd = bd%isd +!!$ ied = bd%ied +!!$ jsd = bd%jsd +!!$ jed = bd%jed +!!$ ng = bd%ng +!!$ +!!$ grid => gridstruct%grid +!!$ agrid=> gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ +!!$ varSUM = 0. +!!$ varSUM2 = 0. +!!$ varMAX = 0. +!!$ L1_norm = 0. +!!$ L2_norm = 0. +!!$ Linf_norm = 0. +!!$ vmean = 0. +!!$ vvar = 0. +!!$ vmax = 0. +!!$ vmin = 0. +!!$ pdiffmn= 0. +!!$ pdiffmx= 0. +!!$ vmeanT = 0. +!!$ vvarT = 0. +!!$ vmaxT = 0. +!!$ vminT = 0. +!!$ +!!$ do j=js,je +!!$ do i=is,ie +!!$ var(i,j) = SQRT( (varU(i,j)-varUT(i,j))**2. + & +!!$ (varV(i,j)-varVT(i,j))**2. ) +!!$ varT(i,j) = SQRT( varUT(i,j)*varUT(i,j) + & +!!$ varVT(i,j)*varVT(i,j) ) +!!$ enddo +!!$ enddo +!!$ varSUM = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L1_norm = globalsum(var(is:ie,js:je) , npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L1_norm = L1_norm/varSUM +!!$ +!!$ call pmxn(varT, npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) +!!$ varMAX = vmax +!!$ call pmxn(var, npx, npy, nregions, tile, gridstruct, vmin, vmax, i0, j0, n0) +!!$ Linf_norm = vmax/varMAX +!!$ +!!$ do j=js,je +!!$ do i=is,ie +!!$ var(i,j) = ( (varU(i,j)-varUT(i,j))**2. + & +!!$ (varV(i,j)-varVT(i,j))**2. ) +!!$ varT(i,j) = ( varUT(i,j)*varUT(i,j) + & +!!$ varVT(i,j)*varVT(i,j) ) +!!$ enddo +!!$ enddo +!!$ varSUM = globalsum(varT(is:ie,js:je), npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L2_norm = globalsum(var(is:ie,js:je) , npx, npy, is,ie, js,je, isd, ied, jsd, jed, gridstruct, tile) +!!$ L2_norm = SQRT(L2_norm)/SQRT(varSUM) +!!$ +!!$ end subroutine get_vector_stats +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- + +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! +!!$! check_courant_numbers :: +!!$! +!!$ subroutine check_courant_numbers(uc,vc, ndt, n_split, gridstruct, npx, npy, npz, tile, noPrint) +!!$ +!!$ real, intent(IN) :: ndt +!!$ integer, intent(IN) :: n_split +!!$ integer, intent(IN) :: npx, npy, npz, tile +!!$ logical, OPTIONAL, intent(IN) :: noPrint +!!$ real , intent(IN) :: uc(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(IN) :: vc(isd:ied ,jsd:jed+1,npz) +!!$ +!!$ real :: ideal_c=0.06 +!!$ real :: tolerance= 1.e-3 +!!$ real :: dt_inc, dt_orig +!!$ real :: meanCy, minCy, maxCy, meanCx, minCx, maxCx +!!$ +!!$ real :: counter +!!$ logical :: ideal +!!$ +!!$ integer :: i,j,k +!!$ real :: dt +!!$ +!!$ type(fv_grid_type), intent(IN), target :: gridstruct +!!$ real, dimension(:,:), pointer :: dxc, dyc +!!$ +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ dt = ndt/real(n_split) +!!$ +!!$ 300 format(i4.4,' ',i4.4,' ',i4.4,' ',i4.4,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14,' ',e21.14) +!!$ +!!$ dt_orig = dt +!!$ dt_inc = 1 +!!$ ideal = .false. +!!$ +!!$ do while(.not. ideal) +!!$ +!!$ counter = 0 +!!$ minCy = missing +!!$ maxCy = -1.*missing +!!$ minCx = missing +!!$ maxCx = -1.*missing +!!$ meanCx = 0 +!!$ meanCy = 0 +!!$ do k=1,npz +!!$ do j=js,je +!!$ do i=is,ie+1 +!!$ minCx = MIN(minCx, ABS( (dt/dxc(i,j))*uc(i,j,k) )) +!!$ maxCx = MAX(maxCx, ABS( (dt/dxc(i,j))*uc(i,j,k) )) +!!$ meanCx = meanCx + ABS( (dt/dxc(i,j))*uc(i,j,k) ) +!!$ +!!$ if (ABS( (dt/dxc(i,j))*uc(i,j,k) ) > 1.0) then +!!$ counter = counter+1 +!!$ write(*,300) i,j,k,tile, ABS( (dt/dxc(i,j))*uc(i,j,k) ), dt, dxc(i,j), uc(i,j,k), counter +!!$ call exit(1) +!!$ endif +!!$ +!!$ enddo +!!$ enddo +!!$ do j=js,je+1 +!!$ do i=is,ie +!!$ minCy = MIN(minCy, ABS( (dt/dyc(i,j))*vc(i,j,k) )) +!!$ maxCy = MAX(maxCy, ABS( (dt/dyc(i,j))*vc(i,j,k) )) +!!$ meanCy = meanCy + ABS( (dt/dyc(i,j))*vc(i,j,k) ) +!!$ +!!$ if (ABS( (dt/dyc(i,j))*vc(i,j,k) ) > 1.0) then +!!$ counter = counter+1 +!!$ write(*,300) i,j,k,tile, ABS( (dt/dyc(i,j))*vc(i,j,k) ), dt, dyc(i,j), vc(i,j,k), counter +!!$ call exit(1) +!!$ endif +!!$ +!!$ enddo +!!$ enddo +!!$ enddo +!!$ +!!$ call mp_reduce_max(maxCx) +!!$ call mp_reduce_max(maxCy) +!!$ minCx = -minCx +!!$ minCy = -minCy +!!$ call mp_reduce_max(minCx) +!!$ call mp_reduce_max(minCy) +!!$ minCx = -minCx +!!$ minCy = -minCy +!!$ call mp_reduce_sum(meanCx) +!!$ call mp_reduce_sum(meanCy) +!!$ meanCx = meanCx/(6.0*DBLE(npx)*DBLE(npy-1)) +!!$ meanCy = meanCy/(6.0*DBLE(npx-1)*DBLE(npy)) +!!$ +!!$ !if ( (ABS(maxCy-ideal_c) <= tolerance) .and. (ABS(maxCx-ideal_c) <= tolerance) ) then +!!$ ideal = .true. +!!$ !elseif (maxCy-ideal_c > 0) then +!!$ ! dt = dt - dt_inc +!!$ !else +!!$ ! dt = dt + dt_inc +!!$ !endif +!!$ +!!$ enddo +!!$ +!!$ if ( (.not. present(noPrint)) .and. (is_master()) ) then +!!$ print*, '' +!!$ print*, '--------------------------------------------' +!!$ print*, 'Y-dir Courant number MIN : ', minCy +!!$ print*, 'Y-dir Courant number MAX : ', maxCy +!!$ print*, '' +!!$ print*, 'X-dir Courant number MIN : ', minCx +!!$ print*, 'X-dir Courant number MAX : ', maxCx +!!$ print*, '' +!!$ print*, 'X-dir Courant number MEAN : ', meanCx +!!$ print*, 'Y-dir Courant number MEAN : ', meanCy +!!$ print*, '' +!!$ print*, 'NDT: ', ndt +!!$ print*, 'n_split: ', n_split +!!$ print*, 'DT: ', dt +!!$ print*, '' +!!$ print*, '--------------------------------------------' +!!$ print*, '' +!!$ endif +!!$ +!!$ end subroutine check_courant_numbers +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- + +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! +!!$! pmxn :: find max and min of field p +!!$! +!!$ subroutine pmxn(p, npx, npy, nregions, tile, gridstruct, pmin, pmax, i0, j0, n0) +!!$ integer, intent(IN) :: npx +!!$ integer, intent(IN) :: npy +!!$ integer, intent(IN) :: nregions, tile +!!$ real , intent(IN) :: p(isd:ied,jsd:jed) +!!$ type(fv_grid_type), intent(IN), target :: gridstruct +!!$ real , intent(OUT) :: pmin +!!$ real , intent(OUT) :: pmax +!!$ integer, intent(OUT) :: i0 +!!$ integer, intent(OUT) :: j0 +!!$ integer, intent(OUT) :: n0 +!!$ +!!$ real :: temp +!!$ integer :: i,j,n +!!$ +!!$ +!!$ real, pointer, dimension(:,:,:) :: agrid, grid +!!$ real, pointer, dimension(:,:) :: area, rarea, fC, f0 +!!$ real(kind=R_GRID), pointer, dimension(:,:,:) :: ee1, ee2, en1, en2 +!!$ real(kind=R_GRID), pointer, dimension(:,:,:,:) :: ew, es +!!$ real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc +!!$ +!!$ logical, pointer :: cubed_sphere, latlon +!!$ +!!$ logical, pointer :: have_south_pole, have_north_pole +!!$ +!!$ integer, pointer :: ntiles_g +!!$ real, pointer :: acapN, acapS, globalarea +!!$ +!!$ grid => gridstruct%grid +!!$ agrid=> gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ rarea => gridstruct%rarea +!!$ +!!$ fC => gridstruct%fC +!!$ f0 => gridstruct%f0 +!!$ +!!$ ee1 => gridstruct%ee1 +!!$ ee2 => gridstruct%ee2 +!!$ ew => gridstruct%ew +!!$ es => gridstruct%es +!!$ en1 => gridstruct%en1 +!!$ en2 => gridstruct%en2 +!!$ +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxa => gridstruct%dxa +!!$ dya => gridstruct%dya +!!$ rdxa => gridstruct%rdxa +!!$ rdya => gridstruct%rdya +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ cubed_sphere => gridstruct%cubed_sphere +!!$ latlon => gridstruct%latlon +!!$ +!!$ have_south_pole => gridstruct%have_south_pole +!!$ have_north_pole => gridstruct%have_north_pole +!!$ +!!$ ntiles_g => gridstruct%ntiles_g +!!$ acapN => gridstruct%acapN +!!$ acapS => gridstruct%acapS +!!$ globalarea => gridstruct%globalarea +!!$ +!!$ pmax = -1.e25 +!!$ pmin = 1.e25 +!!$ i0 = -999 +!!$ j0 = -999 +!!$ n0 = tile +!!$ +!!$ do j=js,je +!!$ do i=is,ie +!!$ temp = p(i,j) +!!$ if (temp > pmax) then +!!$ pmax = temp +!!$ i0 = i +!!$ j0 = j +!!$ elseif (temp < pmin) then +!!$ pmin = temp +!!$ endif +!!$ enddo +!!$ enddo +!!$ +!!$ temp = pmax +!!$ call mp_reduce_max(temp) +!!$ if (temp /= pmax) then +!!$ i0 = -999 +!!$ j0 = -999 +!!$ n0 = -999 +!!$ endif +!!$ pmax = temp +!!$ call mp_reduce_max(i0) +!!$ call mp_reduce_max(j0) +!!$ call mp_reduce_max(n0) +!!$ +!!$ pmin = -pmin +!!$ call mp_reduce_max(pmin) +!!$ pmin = -pmin +!!$ +!!$ end subroutine pmxn +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- +!!$ +!!$!! These routines are no longer used +!!$#ifdef NCDF_OUTPUT +!!$ +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! +!!$! output_ncdf :: write out NETCDF fields +!!$! +!!$ subroutine output_ncdf(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & +!!$ omga, npx, npy, npz, ng, ncnst, ndims, nregions, ncid, & +!!$ npx_p1_id, npy_p1_id, npx_id, npy_id, npz_id, ntiles_id, ncnst_id, nt_id, & +!!$ phis_id, delp_id, ps_id, pt_id, pv_id, om_id, u_id, v_id, q_id, tracers_ids, & +!!$ lats_id, lons_id, gridstruct, flagstruct) +!!$ real, intent(IN) :: dt +!!$ integer, intent(IN) :: nt, maxnt +!!$ integer, intent(INOUT) :: nout +!!$ +!!$ real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) +!!$ real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) +!!$ +!!$ real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) +!!$ real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) +!!$ +!!$ real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) +!!$ real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: omga(isd:ied ,jsd:jed ,npz) +!!$ +!!$ integer, intent(IN) :: npx, npy, npz +!!$ integer, intent(IN) :: ng, ncnst +!!$ integer, intent(IN) :: ndims +!!$ integer, intent(IN) :: nregions +!!$ integer, intent(IN) :: ncid +!!$ integer, intent(IN) :: npx_p1_id, npy_p1_id, npx_id, npy_id, npz_id, ncnst_id +!!$ integer, intent(IN) :: ntiles_id, nt_id +!!$ integer, intent(IN) :: phis_id, delp_id, ps_id, pt_id, pv_id, u_id, v_id, q_id +!!$ integer, intent(IN) :: om_id ! omega (dp/dt) +!!$ integer, intent(IN) :: tracers_ids(ncnst-1) +!!$ integer, intent(IN) :: lats_id, lons_id +!!$ +!!$ type(fv_grid_type), target :: gridstruct +!!$ type(fv_flags_type), intent(IN) :: flagstruct +!!$ +!!$ real, allocatable :: tmp(:,:,:) +!!$ real, allocatable :: tmpA(:,:,:) +!!$#if defined(SW_DYNAMICS) +!!$ real, allocatable :: ut(:,:,:) +!!$ real, allocatable :: vt(:,:,:) +!!$#else +!!$ real, allocatable :: ut(:,:,:,:) +!!$ real, allocatable :: vt(:,:,:,:) +!!$ real, allocatable :: tmpA_3d(:,:,:,:) +!!$#endif +!!$ real, allocatable :: vort(:,:) +!!$ +!!$ real :: p1(2) ! Temporary Point +!!$ real :: p2(2) ! Temporary Point +!!$ real :: p3(2) ! Temporary Point +!!$ real :: p4(2) ! Temporary Point +!!$ real :: pa(2) ! Temporary Point +!!$ real :: utmp, vtmp, r, r0, dist, heading +!!$ integer :: i,j,k,n,iq,nreg +!!$ +!!$ real :: Vtx, p, w_p +!!$ real :: x1,y1,z1,x2,y2,z2,ang +!!$ +!!$ real, pointer, dimension(:,:,:) :: agrid, grid +!!$ real, pointer, dimension(:,:) :: area, rarea +!!$ real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc +!!$ +!!$ grid => gridstruct%grid +!!$ agrid => gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ rarea => gridstruct%rarea +!!$ +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxa => gridstruct%dxa +!!$ dya => gridstruct%dya +!!$ rdxa => gridstruct%rdxa +!!$ rdya => gridstruct%rdya +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ allocate( tmp(npx ,npy ,nregions) ) +!!$ allocate( tmpA(npx-1,npy-1,nregions) ) +!!$#if defined(SW_DYNAMICS) +!!$ allocate( ut(npx-1,npy-1,nregions) ) +!!$ allocate( vt(npx-1,npy-1,nregions) ) +!!$#else +!!$ allocate( ut(npx-1,npy-1,npz,nregions) ) +!!$ allocate( vt(npx-1,npy-1,npz,nregions) ) +!!$ allocate( tmpA_3d(npx-1,npy-1,npz,nregions) ) +!!$#endif +!!$ allocate( vort(isd:ied,jsd:jed) ) +!!$ +!!$ nout = nout + 1 +!!$ +!!$ if (nt==0) then +!!$ tmp(is:ie+1,js:je+1,tile) = grid(is:ie+1,js:je+1,2) +!!$ call wrtvar_ncdf(ncid, lats_id, nout, is,ie+1, js,je+1, npx+1, npy+1, 1, nregions, tmp(1:npx,1:npy,1:nregions), 3) +!!$ tmp(is:ie+1,js:je+1,tile) = grid(is:ie+1,js:je+1,1) +!!$ call wrtvar_ncdf(ncid, lons_id, nout, is,ie+1, js,je+1, npx+1, npy+1, 1, nregions, tmp(1:npx,1:npy,1:nregions), 3) +!!$ endif +!!$ +!!$#if defined(SW_DYNAMICS) +!!$ if (test_case > 1) then +!!$ tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1)/Grav +!!$ +!!$ if ((nt==0) .and. (test_case==2)) then +!!$ Ubar = (2.0*pi*radius)/(12.0*86400.0) +!!$ gh0 = 2.94e4 +!!$ phis = 0.0 +!!$ do j=js,je+1 +!!$ do i=is,ie+1 +!!$ tmp(i,j,tile) = (gh0 - (radius*omega*Ubar + (Ubar*Ubar)/2.) * & +!!$ ( -1.*cos(grid(i ,j ,1))*cos(grid(i ,j ,2))*sin(alpha) + & +!!$ sin(grid(i ,j ,2))*cos(alpha) ) ** 2.0) / Grav +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ else +!!$ +!!$ if (test_case==1) then +!!$! Get Current Height Field "Truth" +!!$ p1(1) = pi/2. + pi_shift +!!$ p1(2) = 0. +!!$ p2(1) = 3.*pi/2. + pi_shift +!!$ p2(2) = 0. +!!$ r0 = radius/3. !RADIUS /3. +!!$ dist = 2.0*pi*radius* ((FLOAT(nt)/FLOAT(maxnt))) +!!$ heading = 5.0*pi/2.0 - alpha +!!$ call get_pt_on_great_circle( p1, p2, dist, heading, p3) +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ p2(1) = agrid(i,j,1) +!!$ p2(2) = agrid(i,j,2) +!!$ r = great_circle_dist( p3, p2, radius ) +!!$ if (r < r0) then +!!$ phi0(i,j,1) = phis(i,j) + gh0*0.5*(1.0+cos(PI*r/r0)) +!!$ else +!!$ phi0(i,j,1) = phis(i,j) +!!$ endif +!!$ enddo +!!$ enddo +!!$ elseif (test_case == 0) then +!!$ phi0 = 0.0 +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ x1 = agrid(i,j,1) +!!$ y1 = agrid(i,j,2) +!!$ z1 = radius +!!$ p = p0_c0 * cos(y1) +!!$ Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) +!!$ w_p = 0.0 +!!$ if (p /= 0.0) w_p = Vtx/p +!!$ phi0(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ tmpA(is:ie,js:je,tile) = phi0(is:ie,js:je,1) +!!$ call wrtvar_ncdf(ncid, phis_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) +!!$ tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1) +!!$ endif +!!$ call wrtvar_ncdf(ncid, ps_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) +!!$ +!!$ if (test_case == 9) then +!!$! Calc Vorticity +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ vort(i,j) = f0(i,j) + (1./area(i,j)) * ( (v(i+1,j,1)*dy(i+1,j) - v(i,j,1)*dy(i,j)) - & +!!$ (u(i,j+1,1)*dx(i,j+1) - u(i,j,1)*dx(i,j)) ) +!!$ vort(i,j) = Grav*vort(i,j)/delp(i,j,1) +!!$ enddo +!!$ enddo +!!$ tmpA(is:ie,js:je,tile) = vort(is:ie,js:je) +!!$ call wrtvar_ncdf(ncid, pv_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) +!!$ endif +!!$ +!!$ call cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, 1, 1, gridstruct%grid_type, gridstruct%bounded_domain, flagstruct%c2l_ord, bd) +!!$ do j=js,je +!!$ do i=is,ie +!!$ ut(i,j,tile) = ua(i,j,1) +!!$ vt(i,j,tile) = va(i,j,1) +!!$ enddo +!!$ enddo +!!$ +!!$ call wrtvar_ncdf(ncid, u_id, nout, is,ie, js,je, npx, npy, npz, nregions, ut(1:npx-1,1:npy-1,1:nregions), 3) +!!$ call wrtvar_ncdf(ncid, v_id, nout, is,ie, js,je, npx, npy, npz, nregions, vt(1:npx-1,1:npy-1,1:nregions), 3) +!!$ +!!$ if ((test_case >= 2) .and. (nt==0) ) then +!!$ tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav +!!$ call wrtvar_ncdf(ncid, phis_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA, 3) +!!$ endif +!!$#else +!!$ +!!$! Write Moisture Data +!!$ tmpA_3d(is:ie,js:je,1:npz,tile) = q(is:ie,js:je,1:npz,1) +!!$ call wrtvar_ncdf(ncid, q_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) +!!$ +!!$! Write Tracer Data +!!$ do iq=2,ncnst +!!$ tmpA_3d(is:ie,js:je,1:npz,tile) = q(is:ie,js:je,1:npz,iq) +!!$ call wrtvar_ncdf(ncid, tracers_ids(iq-1), nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) +!!$ enddo +!!$ +!!$! Write Surface height data +!!$ tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav +!!$ call wrtvar_ncdf(ncid, phis_id, nout, is,ie, js,je, npx, npy, 1, nregions, tmpA, 3) +!!$ +!!$! Write Pressure Data +!!$ tmpA(is:ie,js:je,tile) = ps(is:ie,js:je) +!!$ call wrtvar_ncdf(ncid, ps_id, nout, is,ie, js,je, npx, npy, 1, nregions, tmpA, 3) +!!$ do k=1,npz +!!$ tmpA_3d(is:ie,js:je,k,tile) = delp(is:ie,js:je,k)/Grav +!!$ enddo +!!$ call wrtvar_ncdf(ncid, delp_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) +!!$ +!!$! Write PT Data +!!$ do k=1,npz +!!$ tmpA_3d(is:ie,js:je,k,tile) = pt(is:ie,js:je,k) +!!$ enddo +!!$ call wrtvar_ncdf(ncid, pt_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) +!!$ +!!$! Write U,V Data +!!$ call cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, npz, gridstruct%grid_type, gridstruct%bounded_domain, flagstruct%c2l_ord) +!!$ do k=1,npz +!!$ do j=js,je +!!$ do i=is,ie +!!$ ut(i,j,k,tile) = ua(i,j,k) +!!$ vt(i,j,k,tile) = va(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ call wrtvar_ncdf(ncid, u_id, nout, is,ie, js,je, npx, npy, npz, nregions, ut(1:npx-1,1:npy-1,1:npz,1:nregions), 4) +!!$ call wrtvar_ncdf(ncid, v_id, nout, is,ie, js,je, npx, npy, npz, nregions, vt(1:npx-1,1:npy-1,1:npz,1:nregions), 4) +!!$ +!!$ +!!$! Calc Vorticity +!!$ do k=1,npz +!!$ do j=js,je +!!$ do i=is,ie +!!$ tmpA_3d(i,j,k,tile) = rarea(i,j) * ( (v(i+1,j,k)*dy(i+1,j) - v(i,j,k)*dy(i,j)) - & +!!$ (u(i,j+1,k)*dx(i,j+1) - u(i,j,k)*dx(i,j)) ) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ call wrtvar_ncdf(ncid, pv_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) +!!$! +!!$! Output omega (dp/dt): +!!$ do k=1,npz +!!$ do j=js,je +!!$ do i=is,ie +!!$ tmpA_3d(i,j,k,tile) = omga(i,j,k) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ call wrtvar_ncdf(ncid, om_id, nout, is,ie, js,je, npx, npy, npz, nregions, tmpA_3d, 4) +!!$ +!!$#endif +!!$ +!!$ deallocate( tmp ) +!!$ deallocate( tmpA ) +!!$#if defined(SW_DYNAMICS) +!!$ deallocate( ut ) +!!$ deallocate( vt ) +!!$#else +!!$ deallocate( ut ) +!!$ deallocate( vt ) +!!$ deallocate( tmpA_3d ) +!!$#endif +!!$ deallocate( vort ) +!!$ +!!$ nullify(grid) +!!$ nullify(agrid) +!!$ +!!$ nullify(area) +!!$ +!!$ nullify(dx) +!!$ nullify(dy) +!!$ nullify(dxa) +!!$ nullify(dya) +!!$ nullify(rdxa) +!!$ nullify(rdya) +!!$ nullify(dxc) +!!$ nullify(dyc) +!!$ +!!$ end subroutine output_ncdf +!!$ +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- +!!$ +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! +!!$! output :: write out fields +!!$! +!!$ subroutine output(dt, nt, maxnt, nout, u,v,pt,delp,q,phis,ps, uc,vc, ua,va, & +!!$ npx, npy, npz, ng, ncnst, ndims, nregions, phis_lun, phi_lun, & +!!$ pt_lun, pv_lun, uv_lun, gridstruct) +!!$ +!!$ real, intent(IN) :: dt +!!$ integer, intent(IN) :: nt, maxnt +!!$ integer, intent(INOUT) :: nout +!!$ +!!$ real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) +!!$ real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) +!!$ +!!$ real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) +!!$ real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) +!!$ +!!$ real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) +!!$ real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) +!!$ +!!$ integer, intent(IN) :: npx, npy, npz +!!$ integer, intent(IN) :: ng, ncnst +!!$ integer, intent(IN) :: ndims +!!$ integer, intent(IN) :: nregions +!!$ integer, intent(IN) :: phis_lun, phi_lun, pt_lun, pv_lun, uv_lun +!!$ +!!$ type(fv_grid_type), target :: gridstruct +!!$ +!!$ real :: tmp(1-ng:npx +ng,1-ng:npy +ng,1:nregions) +!!$ real :: tmpA(1-ng:npx-1+ng,1-ng:npy-1+ng,1:nregions) +!!$ real :: p1(2) ! Temporary Point +!!$ real :: p2(2) ! Temporary Point +!!$ real :: p3(2) ! Temporary Point +!!$ real :: p4(2) ! Temporary Point +!!$ real :: pa(2) ! Temporary Point +!!$ real :: ut(1:npx,1:npy,1:nregions) +!!$ real :: vt(1:npx,1:npy,1:nregions) +!!$ real :: utmp, vtmp, r, r0, dist, heading +!!$ integer :: i,j,k,n,nreg +!!$ real :: vort(isd:ied,jsd:jed) +!!$ +!!$ real :: Vtx, p, w_p +!!$ real :: x1,y1,z1,x2,y2,z2,ang +!!$ +!!$ real, pointer, dimension(:,:,:) :: agrid, grid +!!$ real, pointer, dimension(:,:) :: area, rarea +!!$ real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc +!!$ +!!$ grid => gridstruct%grid +!!$ agrid => gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxa => gridstruct%dxa +!!$ dya => gridstruct%dya +!!$ rdxa => gridstruct%rdxa +!!$ rdya => gridstruct%rdya +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ cubed_sphere => gridstruct%cubed_sphere +!!$ +!!$ nout = nout + 1 +!!$ +!!$#if defined(SW_DYNAMICS) +!!$ if (test_case > 1) then +!!$ call atob_s(delp(:,:,1)/Grav, tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%bounded_domain) !, altInterp=1) +!!$ tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1)/Grav +!!$ +!!$ if ((nt==0) .and. (test_case==2)) then +!!$ Ubar = (2.0*pi*radius)/(12.0*86400.0) +!!$ gh0 = 2.94e4 +!!$ phis = 0.0 +!!$ do j=js,je+1 +!!$ do i=is,ie+1 +!!$ tmp(i,j,tile) = (gh0 - (radius*omega*Ubar + (Ubar*Ubar)/2.) * & +!!$ ( -1.*cos(grid(i ,j ,1))*cos(grid(i ,j ,2))*sin(alpha) + & +!!$ sin(grid(i ,j ,2))*cos(alpha) ) ** 2.0) / Grav +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ else +!!$ +!!$ if (test_case==1) then +!!$! Get Current Height Field "Truth" +!!$ p1(1) = pi/2. + pi_shift +!!$ p1(2) = 0. +!!$ p2(1) = 3.*pi/2. + pi_shift +!!$ p2(2) = 0. +!!$ r0 = radius/3. !RADIUS /3. +!!$ dist = 2.0*pi*radius* ((FLOAT(nt)/FLOAT(maxnt))) +!!$ heading = 5.0*pi/2.0 - alpha +!!$ call get_pt_on_great_circle( p1, p2, dist, heading, p3) +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ p2(1) = agrid(i,j,1) +!!$ p2(2) = agrid(i,j,2) +!!$ r = great_circle_dist( p3, p2, radius ) +!!$ if (r < r0) then +!!$ phi0(i,j,1) = phis(i,j) + gh0*0.5*(1.0+cos(PI*r/r0)) +!!$ else +!!$ phi0(i,j,1) = phis(i,j) +!!$ endif +!!$ enddo +!!$ enddo +!!$ elseif (test_case == 0) then +!!$ phi0 = 0.0 +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ x1 = agrid(i,j,1) +!!$ y1 = agrid(i,j,2) +!!$ z1 = radius +!!$ p = p0_c0 * cos(y1) +!!$ Vtx = ((3.0*SQRT(2.0))/2.0) * (( 1.0/cosh(p) )**2.0) * tanh(p) +!!$ w_p = 0.0 +!!$ if (p /= 0.0) w_p = Vtx/p +!!$ phi0(i,j,1) = 1.0 - tanh( (p/rgamma) * sin(x1 - w_p*(nt*dt/86400.0)) ) +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ call atob_s(phi0(:,:,1), tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%bounded_domain) !, altInterp=1) +!!$ tmpA(is:ie,js:je,tile) = phi0(is:ie,js:je,1) +!!$ call wrt2d(phis_lun, nout , is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ call atob_s(delp(:,:,1), tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%bounded_domain) !, altInterp=1) +!!$ tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,1) +!!$ endif +!!$ ! call wrt2d(phi_lun, nout, is,ie+1, js,je+1, npx+1, npy+1, nregions, tmp(1:npx,1:npy,1:nregions)) +!!$ call wrt2d(phi_lun, nout, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ +!!$ if (test_case == 9) then +!!$! Calc Vorticity +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ vort(i,j) = f0(i,j) + (1./area(i,j)) * ( (v(i+1,j,1)*dy(i+1,j) - v(i,j,1)*dy(i,j)) - & +!!$ (u(i,j+1,1)*dx(i,j+1) - u(i,j,1)*dx(i,j)) ) +!!$ vort(i,j) = Grav*vort(i,j)/delp(i,j,1) +!!$ enddo +!!$ enddo +!!$ call atob_s(vort, tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%bounded_domain) !, altInterp=1) +!!$ call wrt2d(pv_lun, nout, is,ie+1, js,je+1, npx+1, npy+1, nregions, tmp(1:npx,1:npy,1:nregions)) +!!$ endif +!!$ +!!$ call dtoa(u , v , ua, va, dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) +!!$! Rotate winds to standard Lat-Lon orientation +!!$ if (cubed_sphere) then +!!$ do j=js,je +!!$ do i=is,ie +!!$ call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) +!!$ call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) +!!$ call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) +!!$ call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) +!!$ utmp = ua(i,j,1) +!!$ vtmp = va(i,j,1) +!!$ if (cubed_sphere) call rotate_winds(utmp,vtmp, p1,p2,p3,p4, agrid(i,j,1:2), 2, 2) +!!$ ut(i,j,tile) = utmp +!!$ vt(i,j,tile) = vtmp +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ call wrt2d(uv_lun, 2*(nout-1) + 1, is,ie, js,je, npx, npy, nregions, ut(1:npx-1,1:npy-1,1:nregions)) +!!$ call wrt2d(uv_lun, 2*(nout-1) + 2, is,ie, js,je, npx, npy, nregions, vt(1:npx-1,1:npy-1,1:nregions)) +!!$ +!!$ if ((test_case >= 2) .and. (nt==0) ) then +!!$ call atob_s(phis/Grav, tmp(isd:ied+1,jsd:jed+1,tile), npx,npy, dxa, dya, gridstruct%bounded_domain) !, altInterp=1) +!!$ ! call wrt2d(phis_lun, nout , is,ie+1, js,je+1, npx+1, npy+1, nregions, tmp(1:npx,1:npy,1:nregions)) +!!$ tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav +!!$ call wrt2d(phis_lun, nout , is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ endif +!!$#else +!!$ +!!$! Write Surface height data +!!$ if (nt==0) then +!!$ tmpA(is:ie,js:je,tile) = phis(is:ie,js:je)/Grav +!!$ call wrt2d(phis_lun, nout , is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ endif +!!$ +!!$! Write Pressure Data +!!$ +!!$ !if (tile==2) then +!!$ ! do i=is,ie +!!$ ! print*, i, ps(i,35) +!!$ ! enddo +!!$ !endif +!!$ tmpA(is:ie,js:je,tile) = ps(is:ie,js:je) +!!$ call wrt2d(phi_lun, (nout-1)*(npz+1) + 1, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ do k=1,npz +!!$ tmpA(is:ie,js:je,tile) = delp(is:ie,js:je,k)/Grav +!!$ call wrt2d(phi_lun, (nout-1)*(npz+1) + 1 + k, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ enddo +!!$ +!!$! Write PT Data +!!$ do k=1,npz +!!$ tmpA(is:ie,js:je,tile) = pt(is:ie,js:je,k) +!!$ call wrt2d(pt_lun, (nout-1)*npz + (k-1) + 1, is,ie, js,je, npx, npy, nregions, tmpA(1:npx-1,1:npy-1,1:nregions)) +!!$ enddo +!!$ +!!$! Write U,V Data +!!$ do k=1,npz +!!$ call dtoa(u(isd,jsd,k), v(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), dx,dy,dxa,dya,dxc,dyc,npx, npy, ng) +!!$! Rotate winds to standard Lat-Lon orientation +!!$ if (cubed_sphere) then +!!$ do j=js,je +!!$ do i=is,ie +!!$ call mid_pt_sphere(grid(i,j,1:2), grid(i,j+1,1:2), p1) +!!$ call mid_pt_sphere(grid(i,j,1:2), grid(i+1,j,1:2), p2) +!!$ call mid_pt_sphere(grid(i+1,j,1:2), grid(i+1,j+1,1:2), p3) +!!$ call mid_pt_sphere(grid(i,j+1,1:2), grid(i+1,j+1,1:2), p4) +!!$ utmp = ua(i,j,k) +!!$ vtmp = va(i,j,k) +!!$ if (cubed_sphere) call rotate_winds(utmp,vtmp, p1,p2,p3,p4, agrid(i,j,1:2), 2, 2) +!!$ ut(i,j,tile) = utmp +!!$ vt(i,j,tile) = vtmp +!!$ enddo +!!$ enddo +!!$ endif +!!$ call wrt2d(uv_lun, 2*((nout-1)*npz + (k-1)) + 1, is,ie, js,je, npx, npy, nregions, ut(1:npx-1,1:npy-1,1:nregions)) +!!$ call wrt2d(uv_lun, 2*((nout-1)*npz + (k-1)) + 2, is,ie, js,je, npx, npy, nregions, vt(1:npx-1,1:npy-1,1:nregions)) +!!$ enddo +!!$#endif +!!$ +!!$ nullify(grid) +!!$ nullify(agrid) +!!$ +!!$ nullify(area) +!!$ +!!$ nullify(dx) +!!$ nullify(dy) +!!$ nullify(dxa) +!!$ nullify(dya) +!!$ nullify(rdxa) +!!$ nullify(rdya) +!!$ nullify(dxc) +!!$ nullify(dyc) +!!$ +!!$ nullify(cubed_sphere) +!!$ +!!$ end subroutine output +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- +!!$ +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! wrt2d_ncdf :: write out a 2d field +!!$! +!!$ subroutine wrtvar_ncdf(ncid, varid, nrec, i1,i2, j1,j2, npx, npy, npz, ntiles, p, ndims) +!!$#include +!!$ integer, intent(IN) :: ncid, varid +!!$ integer, intent(IN) :: nrec +!!$ integer, intent(IN) :: i1,i2,j1,j2 +!!$ integer, intent(IN) :: npx +!!$ integer, intent(IN) :: npy +!!$ integer, intent(IN) :: npz +!!$ integer, intent(IN) :: ntiles +!!$ real , intent(IN) :: p(npx-1,npy-1,npz,ntiles) +!!$ integer, intent(IN) :: ndims +!!$ +!!$ integer :: error +!!$ real(kind=4), allocatable :: p_R4(:,:,:,:) +!!$ integer :: i,j,k,n +!!$ integer :: istart(ndims+1), icount(ndims+1) +!!$ +!!$ allocate( p_R4(npx-1,npy-1,npz,ntiles) ) +!!$ +!!$ p_R4(:,:,:,:) = missing +!!$ p_R4(i1:i2,j1:j2,1:npz,tile) = p(i1:i2,j1:j2,1:npz,tile) +!!$ call mp_gather(p_R4, i1,i2, j1,j2, npx-1, npy-1, npz, ntiles) +!!$ +!!$ istart(:) = 1 +!!$ istart(ndims+1) = nrec +!!$ icount(1) = npx-1 +!!$ icount(2) = npy-1 +!!$ icount(3) = npz +!!$ if (ndims == 3) icount(3) = ntiles +!!$ if (ndims == 4) icount(4) = ntiles +!!$ icount(ndims+1) = 1 +!!$ +!!$ if (is_master()) then +!!$ error = NF_PUT_VARA_REAL(ncid, varid, istart, icount, p_R4) +!!$ endif ! masterproc +!!$ +!!$ deallocate( p_R4 ) +!!$ +!!$ end subroutine wrtvar_ncdf +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- +!!$ +!!$!------------------------------------------------------------------------------- +!!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +!!$! wrt2d :: write out a 2d field +!!$! +!!$ subroutine wrt2d(iout, nrec, i1,i2, j1,j2, npx, npy, nregions, p) +!!$ integer, intent(IN) :: iout +!!$ integer, intent(IN) :: nrec +!!$ integer, intent(IN) :: i1,i2,j1,j2 +!!$ integer, intent(IN) :: npx +!!$ integer, intent(IN) :: npy +!!$ integer, intent(IN) :: nregions +!!$ real , intent(IN) :: p(npx-1,npy-1,nregions) +!!$ +!!$ real(kind=4) :: p_R4(npx-1,npy-1,nregions) +!!$ integer :: i,j,n +!!$ +!!$ do n=tile,tile +!!$ do j=j1,j2 +!!$ do i=i1,i2 +!!$ p_R4(i,j,n) = p(i,j,n) +!!$ enddo +!!$ enddo +!!$ enddo +!!$ +!!$ call mp_gather(p_R4, i1,i2, j1,j2, npx-1, npy-1, nregions) +!!$ +!!$ if (is_master()) then +!!$ write(iout,rec=nrec) p_R4(1:npx-1,1:npy-1,1:nregions) +!!$ endif ! masterproc +!!$ +!!$ end subroutine wrt2d +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- +!!$#endif !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! init_double_periodic @@ -6002,7 +6093,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, gridstruct, flagstruct, npx, npy, npz, ng, ncnst, nwat, ndims, nregions, dry_mass, & mountain, moist_phys, hydrostatic, hybrid_z, delz, ze0, ks, ptop, domain_in, tile_in, bd) - + type(fv_grid_bounds_type), intent(IN) :: bd real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) @@ -6010,7 +6101,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, real , intent(INOUT) :: pt(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(INOUT) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(INOUT) :: q(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, ncnst) - + real , intent(INOUT) :: phis(bd%isd:bd%ied ,bd%jsd:bd%jed ) real , intent(INOUT) :: ps(bd%isd:bd%ied ,bd%jsd:bd%jed ) @@ -6022,17 +6113,17 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, real , intent(INOUT) :: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) real , intent(INOUT) :: ua(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) real , intent(INOUT) :: va(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) - real , intent(inout) :: delz(bd%isd:,bd%jsd:,1:) + real , intent(inout) :: delz(bd%is:,bd%js:,1:) real , intent(inout) :: ze0(bd%is:,bd%js:,1:) - + real , intent(inout) :: ak(npz+1) real , intent(inout) :: bk(npz+1) - + integer, intent(IN) :: npx, npy, npz integer, intent(IN) :: ng, ncnst, nwat integer, intent(IN) :: ndims integer, intent(IN) :: nregions - + real, intent(IN) :: dry_mass logical, intent(IN) :: mountain logical, intent(IN) :: moist_phys @@ -6075,7 +6166,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, real, pointer :: acapN, acapS, globalarea real(kind=R_GRID), pointer :: dx_const, dy_const - + integer :: is, ie, js, je integer :: isd, ied, jsd, jed @@ -6139,7 +6230,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, vc(:,:,:)=10. pt(:,:,:)=1. delp(:,:,:)=0. - + do j=js,je if (j>0 .and. j<5) then do i=is,ie @@ -6202,7 +6293,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do k=1,npz prf = ak(k) + ps(i,j)*bk(k) if ( prf > 100.E2 ) then - pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) + pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) endif enddo enddo @@ -6211,12 +6302,12 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, if ( hydrostatic ) then call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - moist_phys, .true., nwat , domain) + moist_phys, .true., nwat , domain, flagstruct%adiabatic) else w(:,:,:) = 0. call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - moist_phys, hydrostatic, nwat, domain, .true. ) + moist_phys, hydrostatic, nwat, domain, flagstruct%adiabatic, .true. ) endif q = 0. @@ -6272,7 +6363,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - moist_phys, .false., nwat, domain) + moist_phys, .false., nwat, domain, flagstruct%adiabatic) ! *** Add Initial perturbation *** r0 = 5.*max(dx_const, dy_const) @@ -6329,7 +6420,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do i=is,ie delz(i,j,k) = ze1(k+1) - ze1(k) pk(i,j,k) = pk(i,j,k+1) + grav*delz(i,j,k)/(cp_air*t00)*pk0 - pe(i,k,j) = pk(i,j,k)**(1./kappa) + pe(i,k,j) = pk(i,j,k)**(1./kappa) enddo enddo enddo @@ -6340,7 +6431,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do k=1,npz+1 do j=js,je do i=is,ie - peln(i,k,j) = log(pe(i,k,j)) + peln(i,k,j) = log(pe(i,k,j)) ze0(i,j,k) = ze1(k) enddo enddo @@ -6350,14 +6441,14 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do j=js,je do i=is,ie pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) + delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) pt(i,j,k) = t00/pk0 ! potential temp enddo enddo enddo pturb = 15. - xmax = 51.2E3 + xmax = 51.2E3 xc = xmax / 2. do k=1,npz @@ -6365,11 +6456,11 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, do j=js,je do i=is,ie ! Impose perturbation in potential temperature: pturb - xx = (dx_const * (0.5+real(i-1)) - xc) / 4.E3 + xx = (dx_const * (0.5+real(i-1)) - xc) / 4.E3 yy = (dy_const * (0.5+real(j-1)) - xc) / 4.E3 dist = sqrt( xx**2 + yy**2 + zm**2 ) if ( dist<=1. ) then - pt(i,j,k) = pt(i,j,k) - pturb/pkz(i,j,k)*(cos(pi*dist)+1.)/2. + pt(i,j,k) = pt(i,j,k) - pturb/pkz(i,j,k)*(cos(pi*dist)+1.)/2. endif ! Transform back to temperature: pt(i,j,k) = pt(i,j,k) * pkz(i,j,k) @@ -6410,6 +6501,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, pk1(k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) enddo + call SuperCell_Sounding(npz, p00, pk1, ts1, qs1) v(:,:,:) = 0. w(:,:,:) = 0. @@ -6442,7 +6534,7 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - .true., hydrostatic, nwat, domain) + .true., hydrostatic, nwat, domain, flagstruct%adiabatic) ! *** Add Initial perturbation *** pturb = 2. @@ -6463,958 +6555,755 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, enddo enddo endif - enddo - - case ( 18 ) -!--------------------------- -! Doubly periodic SuperCell, quarter circle hodograph -! M. Toy, Apr 2013, MWR - pturb = 2.5 - zvir = rvgas/rdgas - 1. - p00 = 1000.E2 - ps(:,:) = p00 - phis(:,:) = 0. - do j=js,je - do i=is,ie - pk(i,j,1) = ptop**kappa - pe(i,1,j) = ptop - peln(i,1,j) = log(ptop) - enddo - enddo - - do k=1,npz - do j=js,je - do i=is,ie - delp(i,j,k) = ak(k+1)-ak(k) + ps(i,j)*(bk(k+1)-bk(k)) - pe(i,k+1,j) = ak(k+1) + ps(i,j)*bk(k+1) - peln(i,k+1,j) = log(pe(i,k+1,j)) - pk(i,j,k+1) = exp( kappa*peln(i,k+1,j) ) - enddo - enddo - enddo - - i = is - j = js - do k=1,npz - pk1(k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - enddo - - - w(:,:,:) = 0. - q(:,:,:,:) = 0. - - do k=1,npz - do j=js,je - do i=is,ie - pt(i,j,k) = ts1(k) - q(i,j,k,1) = qs1(k) - delz(i,j,k) = rdgas/grav*ts1(k)*(1.+zvir*qs1(k))*(peln(i,k,j)-peln(i,k+1,j)) - enddo - enddo - enddo - - ze1(npz+1) = 0. - do k=npz,1,-1 - ze1(k) = ze1(k+1) - delz(is,js,k) - enddo - -! Quarter-circle hodograph (Harris approximation) - us0 = 30. - do k=1,npz - zm = 0.5*(ze1(k)+ze1(k+1)) - if ( zm .le. 2.e3 ) then - utmp = 8.*(1.-cos(pi*zm/4.e3)) - vtmp = 8.*sin(pi*zm/4.e3) - elseif (zm .le. 6.e3 ) then - utmp = 8. + (us0-8.)*(zm-2.e3)/4.e3 - vtmp = 8. - else - utmp = us0 - vtmp = 8. - endif -! u-wind - do j=js,je+1 - do i=is,ie - u(i,j,k) = utmp - 8. - enddo - enddo -! v-wind - do j=js,je - do i=is,ie+1 - v(i,j,k) = vtmp - 4. - enddo - enddo - enddo - - - call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & - pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & - .true., hydrostatic, nwat, domain) - -! *** Add Initial perturbation *** - if (bubble_do) then - r0 = 10.e3 - zc = 1.4e3 ! center of bubble from surface - icenter = (npx-1)/2 + 1 - jcenter = (npy-1)/2 + 1 - do k=1, npz - zm = 0.5*(ze1(k)+ze1(k+1)) - ptmp = ( (zm-zc)/zc ) **2 - if ( ptmp < 1. ) then - do j=js,je - do i=is,ie - dist = ptmp+((i-icenter)*dx_const/r0)**2+((j-jcenter)*dy_const/r0)**2 - if ( dist < 1. ) then - pt(i,j,k) = pt(i,j,k) + pturb*(1.-sqrt(dist)) - endif - enddo - enddo - endif - enddo - endif - - case ( 101 ) - -! IC for LES - t00 = 250. ! constant temp - p00 = 1.E5 - pk0 = p00**kappa - - phis = 0. - u = 0. - v = 0. - w = 0. - pt(:,:,:) = t00 - q(:,:,:,1) = 0. - - if (.not.hybrid_z) call mpp_error(FATAL, 'hybrid_z must be .TRUE.') - - rgrav = 1./ grav - - if ( npz/=101) then - call mpp_error(FATAL, 'npz must be == 101 ') - else - call compute_dz_L101( npz, ztop, dz1 ) - endif - - call set_hybrid_z(is, ie, js, je, ng, npz, ztop, dz1, rgrav, & - phis, ze0, delz) - - do j=js,je - do i=is,ie - ps(i,j) = p00 - pe(i,npz+1,j) = p00 - pk(i,j,npz+1) = pk0 - peln(i,npz+1,j) = log(p00) - enddo - enddo - - do k=npz,1,-1 - do j=js,je - do i=is,ie - peln(i,k,j) = peln(i,k+1,j) + grav*delz(i,j,k)/(rdgas*t00) - pe(i,k,j) = exp(peln(i,k,j)) - pk(i,j,k) = pe(i,k,j)**kappa - enddo - enddo - enddo - - -! Set up fake "sigma" coordinate - call make_eta_level(npz, pe, area, ks, ak, bk, ptop, domain, bd) - - if ( is_master() ) write(*,*) 'LES testcase: computed model top (mb)=', ptop/100. - - do k=1,npz - do j=js,je - do i=is,ie - pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) - enddo - enddo - enddo - - do k=1,npz - do j=js,je - do i=is,ie - pm(i) = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) - enddo - call qsmith(ie-is+1, 1, 1, pt(is:ie,j,k), pm, q(is:ie,j,k,1), qs) - do i=is,ie - if ( pm(i) > 100.E2 ) then - q(i,j,k,1) = 0.9*qs(i) - else - q(i,j,k,1) = 2.E-6 - endif - enddo - enddo - enddo - -! *** Add perturbation *** - r0 = 1.0e3 ! radius (m) - zc = 1.0e3 ! center of bubble - icenter = npx/2 - jcenter = npy/2 - - do k=1,npz - do j=js,je - do i=is,ie - zm = 0.5*(ze0(i,j,k)+ze0(i,j,k+1)) - dist = ((i-icenter)*dx_const)**2 + ((j-jcenter)*dy_const)**2 + (zm-zc)**2 - dist = sqrt(dist) - if ( dist <= r0 ) then - pt(i,j,k) = pt(i,j,k) + 2.0*(1.-dist/r0) - endif - enddo - enddo - enddo - - end select - - nullify(grid) - nullify(agrid) - - nullify(area) - - nullify(fC) - nullify(f0) - - nullify(ee1) - nullify(ee2) - nullify(ew) - nullify(es) - nullify(en1) - nullify(en2) - - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) - - nullify(dx_const) - nullify(dy_const) - - nullify(domain) - nullify(tile) - - nullify(have_south_pole) - nullify(have_north_pole) + enddo - nullify(ntiles_g) - nullify(acapN) - nullify(acapS) - nullify(globalarea) + case ( 18 ) +!--------------------------- +! Doubly periodic SuperCell, quarter circle hodograph +! M. Toy, Apr 2013, MWR + pturb = 2.5 + zvir = rvgas/rdgas - 1. + p00 = 1000.E2 + ps(:,:) = p00 + phis(:,:) = 0. + do j=js,je + do i=is,ie + pk(i,j,1) = ptop**kappa + pe(i,1,j) = ptop + peln(i,1,j) = log(ptop) + enddo + enddo - end subroutine init_double_periodic + do k=1,npz + do j=js,je + do i=is,ie + delp(i,j,k) = ak(k+1)-ak(k) + ps(i,j)*(bk(k+1)-bk(k)) + pe(i,k+1,j) = ak(k+1) + ps(i,j)*bk(k+1) + peln(i,k+1,j) = log(pe(i,k+1,j)) + pk(i,j,k+1) = exp( kappa*peln(i,k+1,j) ) + enddo + enddo + enddo - subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) -! This is the z-ccordinate version: -! Morris Weisman & J. Klemp 2002 sounding - integer, intent(in):: km - real, intent(in):: p00 - real, intent(inout), dimension(km+1):: pe - real, intent(in), dimension(km+1):: ze -! pt: potential temperature / pk0 -! qz: specific humidity (mixing ratio) - real, intent(out), dimension(km):: pt, qz -! Local: - integer, parameter:: nx = 5 - real, parameter:: qst = 1.0e-6 - real, parameter:: qv0 = 1.4e-2 - real, parameter:: ztr = 12.E3 - real, parameter:: ttr = 213. - real, parameter:: ptr = 343. ! Tropopause potential temp. - real, parameter:: pt0 = 300. ! surface potential temperature - real, dimension(km):: zs, rh, temp, dp, dp0 - real, dimension(km+1):: peln, pk - real:: qs, zvir, fac_z, pk0, temp1, pm - integer:: k, n, kk + i = is + j = js + do k=1,npz + pk1(k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) + enddo - zvir = rvgas/rdgas - 1. - pk0 = p00**kappa - if ( (is_master()) ) then - write(*,*) 'Computing sounding for HIWPP super-cell test using p00=', p00 - endif + call SuperCell_Sounding(npz, p00, pk1, ts1, qs1) - qz(:) = qst - rh(:) = 0.25 + w(:,:,:) = 0. + q(:,:,:,:) = 0. - do k=1, km - zs(k) = 0.5*(ze(k)+ze(k+1)) -! Potential temperature - if ( zs(k) .gt. ztr ) then -! Stratosphere: - pt(k) = ptr*exp(grav*(zs(k)-ztr)/(cp_air*ttr)) - else -! Troposphere: - fac_z = (zs(k)/ztr)**1.25 - pt(k) = pt0 + (ptr-pt0)* fac_z - rh(k) = 1. - 0.75 * fac_z -! First guess on q: - qz(k) = qv0 - (qv0-qst)*fac_z - endif - if ( is_master() ) write(*,*) zs(k), pt(k), qz(k) -! Convert to FV's definition of potential temperature - pt(k) = pt(k) / pk0 - enddo + do k=1,npz + do j=js,je + do i=is,ie + pt(i,j,k) = ts1(k) + q(i,j,k,1) = qs1(k) + delz(i,j,k) = rdgas/grav*ts1(k)*(1.+zvir*qs1(k))*(peln(i,k,j)-peln(i,k+1,j)) + enddo + enddo + enddo -#ifdef USE_MOIST_P00 -!-------------------------------------- -! Iterate nx times with virtual effect: -!-------------------------------------- -! pt & height remain unchanged - pk(km+1) = pk0 - pe(km+1) = p00 ! Dry - peln(km+1) = log(p00) + ze1(npz+1) = 0. + do k=npz,1,-1 + ze1(k) = ze1(k+1) - delz(is,js,k) + enddo - do n=1, nx -! Derive pressure fields from hydrostatic balance: - do k=km,1,-1 - pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)*(1.+zvir*qz(k))) - peln(k) = log(pk(k)) / kappa - pe(k) = exp(peln(k)) - enddo - do k=1, km - pm = (pe(k+1)-pe(k))/(peln(k+1)-peln(k)) - temp(k) = pt(k)*pm**kappa -! NCAR form: - qs = 380./pm*exp(17.27*(temp(k)-273.)/(temp(k)-36.)) - qz(k) = min( qv0, rh(k)*qs ) - if ( n==nx .and. is_master() ) write(*,*) 0.01*pm, temp(k), qz(k), qs - enddo - enddo -#else -! pt & height remain unchanged - pk(km+1) = pk0 - pe(km+1) = p00 ! Dry - peln(km+1) = log(p00) +! Quarter-circle hodograph (Harris approximation) + us0 = 30. + do k=1,npz + zm = 0.5*(ze1(k)+ze1(k+1)) + if ( zm .le. 2.e3 ) then + utmp = 8.*(1.-cos(pi*zm/4.e3)) + vtmp = 8.*sin(pi*zm/4.e3) + elseif (zm .le. 6.e3 ) then + utmp = 8. + (us0-8.)*(zm-2.e3)/4.e3 + vtmp = 8. + else + utmp = us0 + vtmp = 8. + endif +! u-wind + do j=js,je+1 + do i=is,ie + u(i,j,k) = utmp - 8. + enddo + enddo +! v-wind + do j=js,je + do i=is,ie+1 + v(i,j,k) = vtmp - 4. + enddo + enddo + enddo -! Derive "dry" pressure fields from hydrostatic balance: - do k=km,1,-1 - pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)) - peln(k) = log(pk(k)) / kappa - pe(k) = exp(peln(k)) - enddo - do k=1, km - dp0(k) = pe(k+1) - pe(k) - pm = dp0(k)/(peln(k+1)-peln(k)) - temp(k) = pt(k)*pm**kappa -! NCAR form: - qs = 380./pm*exp(17.27*(temp(k)-273.)/(temp(k)-36.)) - qz(k) = min( qv0, rh(k)*qs ) - enddo - do n=1, nx + call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & + pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & + .true., hydrostatic, nwat, domain, flagstruct%adiabatic) - do k=1, km - dp(k) = dp0(k)*(1. + qz(k)) ! moist air - pe(k+1) = pe(k) + dp(k) - enddo -! dry pressure, pt & height remain unchanged - pk(km+1) = pe(km+1)**kappa - peln(km+1) = log(pe(km+1)) +! *** Add Initial perturbation *** + if (bubble_do) then + r0 = 10.e3 + zc = 1.4e3 ! center of bubble from surface + icenter = (npx-1)/2 + 1 + jcenter = (npy-1)/2 + 1 + do k=1, npz + zm = 0.5*(ze1(k)+ze1(k+1)) + ptmp = ( (zm-zc)/zc ) **2 + if ( ptmp < 1. ) then + do j=js,je + do i=is,ie + dist = ptmp+((i-icenter)*dx_const/r0)**2+((j-jcenter)*dy_const/r0)**2 + if ( dist < 1. ) then + pt(i,j,k) = pt(i,j,k) + pturb*(1.-sqrt(dist)) + endif + enddo + enddo + endif + enddo + endif -! Derive pressure fields from hydrostatic balance: - do k=km,1,-1 - pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)*(1.+zvir*qz(k))) - peln(k) = log(pk(k)) / kappa - pe(k) = exp(peln(k)) - enddo - do k=1, km - pm = (pe(k+1)-pe(k))/(peln(k+1)-peln(k)) - temp(k) = pt(k)*pm**kappa -! NCAR form: - qs = 380./pm*exp(17.27*(temp(k)-273.)/(temp(k)-36.)) - qz(k) = min( qv0, rh(k)*qs ) - if ( n==nx .and. is_master() ) write(*,*) 0.01*pm, temp(k), qz(k), qs - enddo - enddo -#endif - - if ( is_master() ) then - write(*,*) 'Super_K: computed ptop (mb)=', 0.01*pe(1), ' PS=', 0.01*pe(km+1) - call prt_m1('1D Sounding T0', temp, 1, km, 1, 1, 0, 1, 1.) - endif + case ( 101 ) - end subroutine SuperK_Sounding +! IC for LES + t00 = 250. ! constant temp + p00 = 1.E5 + pk0 = p00**kappa - subroutine balanced_K(km, is, ie, js, je, ng, ps0, ze1, ts1, qs1, uz1, dudz, pe, pk, pt, & - delz, zvir, ptop, ak, bk, agrid) - integer, intent(in):: is, ie, js, je, ng, km - real, intent(in), dimension(km ):: ts1, qs1, uz1, dudz - real, intent(in), dimension(km+1):: ze1 - real, intent(in):: zvir, ps0 - real, intent(inout):: ptop - real(kind=R_GRID), intent(in):: agrid(is-ng:ie+ng,js-ng:je+ng,2) - real, intent(inout), dimension(km+1):: ak, bk - real, intent(inout), dimension(is-ng:ie+ng,js-ng:je+ng,km):: pt, delz - real, intent(out), dimension(is:ie,js:je,km+1):: pk -! pt is FV's cp*thelta_v - real, intent(inout), dimension(is-1:ie+1,km+1,js-1:je+1):: pe -! Local - integer, parameter:: nt=5 - integer, parameter:: nlat=1001 - real, dimension(nlat,km):: pt2, pky, dzc - real, dimension(nlat,km+1):: pk2, pe2, peln2, pte - real, dimension(km+1):: pe1 - real:: lat(nlat), latc(nlat-1) - real:: fac_y, dlat, dz0, pk0, tmp1, tmp2, tmp3, pint - integer::i,j,k,n, jj, k1 - real:: p00=1.e5 + phis = 0. + u = 0. + v = 0. + w = 0. + pt(:,:,:) = t00 + q(:,:,:,1) = 0. - pk0 = p00**kappa - dz0 = ze1(km) - ze1(km+1) -!!! dzc(:,:) =dz0 + if (.not.hybrid_z) call mpp_error(FATAL, 'hybrid_z must be .TRUE.') - dlat = 0.5*pi/real(nlat-1) - do j=1,nlat - lat(j) = dlat*real(j-1) - do k=1,km - dzc(j,k) = ze1(k) - ze1(k+1) - enddo - enddo - do j=1,nlat-1 - latc(j) = 0.5*(lat(j)+lat(j+1)) - enddo + rgrav = 1./ grav -! Initialize pt2 - do k=1,km - do j=1,nlat - pt2(j,k) = ts1(k) - enddo - enddo - if ( is_master() ) then - tmp1 = pk0/cp_air - call prt_m1('Super_K PT0', pt2, 1, nlat, 1, km, 0, 1, tmp1) - endif + if ( npz/=101) then + call mpp_error(FATAL, 'npz must be == 101 ') + else + call compute_dz_L101( npz, ztop, dz1 ) + endif -! pt2 defined from Eq to NP -! Check NP - do n=1, nt -! Compute edge values - call ppme(pt2, pte, dzc, nlat, km) - do k=1,km - do j=2,nlat - tmp1 = 0.5*(pte(j-1,k ) + pte(j,k )) - tmp3 = 0.5*(pte(j-1,k+1) + pte(j,k+1)) - pt2(j,k) = pt2(j-1,k) + dlat/(2.*grav)*sin(2.*latc(j-1))*uz1(k)* & - ( uz1(k)*(tmp1-tmp3)/dzc(j,k) - (pt2(j-1,k)+pt2(j,k))*dudz(k) ) - enddo - enddo - if ( is_master() ) then - call prt_m1('Super_K PT', pt2, 1, nlat, 1, km, 0, 1, pk0/cp_air) - endif - enddo -! -! Compute surface pressure using gradient-wind balance: -!!! pk2(1,km+1) = pk0 - pk2(1,km+1) = ps0**kappa ! fixed at equator - do j=2,nlat - pk2(j,km+1) = pk2(j-1,km+1) - dlat*uz1(km)*uz1(km)*sin(2.*latc(j-1)) & - / (pt2(j-1,km) + pt2(j,km)) - enddo -! Compute pressure using hydrostatic balance: - do j=1,nlat - do k=km,1,-1 - pk2(j,k) = pk2(j,k+1) - grav*dzc(j,k)/pt2(j,k) - enddo - enddo + call set_hybrid_z(is, ie, js, je, ng, npz, ztop, dz1, rgrav, & + phis, ze0, delz) - do k=1,km+1 - do j=1,nlat - peln2(j,k) = log(pk2(j,k)) / kappa - pe2(j,k) = exp(peln2(j,k)) - enddo - enddo -! Convert pt2 to temperature - do k=1,km - do j=1,nlat - pky(j,k) = (pk2(j,k+1)-pk2(j,k))/(kappa*(peln2(j,k+1)-peln2(j,k))) - pt2(j,k) = pt2(j,k)*pky(j,k)/(cp_air*(1.+zvir*qs1(k))) - enddo - enddo + do j=js,je + do i=is,ie + ps(i,j) = p00 + pe(i,npz+1,j) = p00 + pk(i,j,npz+1) = pk0 + peln(i,npz+1,j) = log(p00) + enddo + enddo - do k=1,km+1 - pe1(k) = pe2(1,k) - enddo + do k=npz,1,-1 + do j=js,je + do i=is,ie + peln(i,k,j) = peln(i,k+1,j) + grav*delz(i,j,k)/(rdgas*t00) + pe(i,k,j) = exp(peln(i,k,j)) + pk(i,j,k) = pe(i,k,j)**kappa + enddo + enddo + enddo - if ( is_master() ) then - write(*,*) 'SuperK ptop at EQ=', 0.01*pe1(1), 'new ptop=', 0.01*ptop - call prt_m1('Super_K pe', pe2, 1, nlat, 1, km+1, 0, 1, 0.01) - call prt_m1('Super_K Temp', pt2, 1, nlat, 1, km, 0, 1, 1.) - endif -! Interpolate (pt2, pk2) from lat-dir to cubed-sphere - do j=js, je - do i=is, ie - do jj=1,nlat-1 - if (abs(agrid(i,j,2))>=lat(jj) .and. abs(agrid(i,j,2))<=lat(jj+1) ) then -! found it ! - fac_y = (abs(agrid(i,j,2))-lat(jj)) / dlat - do k=1,km - pt(i, j,k) = pt2(jj, k) + fac_y*(pt2(jj+1, k)-pt2(jj,k)) - enddo - do k=1,km+1 - pe(i,k,j) = pe2(jj,k) + fac_y*(pe2(jj+1,k)-pe2(jj,k)) - enddo -! k = km+1 -! pk(i,j,k) = pk2(jj,k) + fac_y*(pk2(jj+1,k)-pk2(jj,k)) - goto 123 - endif - enddo -123 continue - enddo - enddo +! Set up fake "sigma" coordinate + call make_eta_level(npz, pe, area, ks, ak, bk, ptop, domain, bd) -! Adjust pk -! ak & bk -! Adjusting model top to be a constant pressure surface, assuming isothermal atmosphere -! pe = ak + bk*ps -! One pressure layer - pe1(1) = ptop - ak(1) = ptop - pint = pe1(2) - bk(1) = 0. - ak(2) = pint - bk(2) = 0. - do k=3,km+1 - bk(k) = (pe1(k) - pint) / (pe1(km+1)-pint) ! bk == sigma - ak(k) = pe1(k) - bk(k) * pe1(km+1) - if ( is_master() ) write(*,*) k, ak(k), bk(k) + if ( is_master() ) write(*,*) 'LES testcase: computed model top (mb)=', ptop/100. + + do k=1,npz + do j=js,je + do i=is,ie + pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) + delp(i,j,k) = pe(i,k+1,j)-pe(i,k,j) + enddo + enddo + enddo + + do k=1,npz + do j=js,je + do i=is,ie + pm(i) = delp(i,j,k)/(peln(i,k+1,j)-peln(i,k,j)) + enddo + call qsmith(ie-is+1, 1, 1, pt(is:ie,j,k), pm, q(is:ie,j,k,1), qs) + do i=is,ie + if ( pm(i) > 100.E2 ) then + q(i,j,k,1) = 0.9*qs(i) + else + q(i,j,k,1) = 2.E-6 + endif + enddo + enddo enddo - ak(km+1) = 0. - bk(km+1) = 1. - do j=js, je - do i=is, ie - pe(i,1,j) = ptop - enddo - enddo +! *** Add perturbation *** + r0 = 1.0e3 ! radius (m) + zc = 1.0e3 ! center of bubble + icenter = npx/2 + jcenter = npy/2 - end subroutine balanced_K + do k=1,npz + do j=js,je + do i=is,ie + zm = 0.5*(ze0(i,j,k)+ze0(i,j,k+1)) + dist = ((i-icenter)*dx_const)**2 + ((j-jcenter)*dy_const)**2 + (zm-zc)**2 + dist = sqrt(dist) + if ( dist <= r0 ) then + pt(i,j,k) = pt(i,j,k) + 2.0*(1.-dist/r0) + endif + enddo + enddo + enddo - subroutine SuperK_u(km, zz, um, dudz) - integer, intent(in):: km - real, intent(in):: zz(km) - real, intent(out):: um(km), dudz(km) -! Local - real, parameter:: zs = 5.e3 - real, parameter:: us = 30. - real:: uc = 15. - integer k + end select - do k=1, km -#ifndef TEST_TANHP -! MPAS specification: - if ( zz(k) .gt. zs+1.e3 ) then - um(k) = us - dudz(k) = 0. - elseif ( abs(zz(k)-zs) .le. 1.e3 ) then - um(k) = us*(-4./5. + 3.*zz(k)/zs - 5./4.*(zz(k)/zs)**2) - dudz(k) = us/zs*(3. - 5./2.*zz(k)/zs) - else - um(k) = us*zz(k)/zs - dudz(k) = us/zs - endif -! constant wind so as to make the storm relatively stationary - um(k) = um(k) - uc -#else - uc = 12. ! this gives near stationary (in longitude) storms - um(k) = us*tanh( zz(k)/zs ) - uc - dudz(k) = (us/zs)/cosh(zz(k)/zs)**2 -#endif - enddo + nullify(grid) + nullify(agrid) - end subroutine superK_u + nullify(area) + nullify(fC) + nullify(f0) - subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& - is,ie,js,je,isd,ied,jsd,jed,npz,nq,ak,bk,ptop, & - pk,peln,pe,pkz,gz,phis,ps,grid,agrid, & - hydrostatic, nwat, adiabatic, do_pert, domain) + nullify(ee1) + nullify(ee2) + nullify(ew) + nullify(es) + nullify(en1) + nullify(en2) - integer, intent(IN) :: is,ie,js,je,isd,ied,jsd,jed,npz,nq, nwat - real, intent(IN) :: ptop - real, intent(IN), dimension(npz+1) :: ak, bk - real, intent(INOUT), dimension(isd:ied,jsd:jed,npz,nq) :: q - real, intent(OUT), dimension(isd:ied,jsd:jed,npz) :: delp, pt, w, delz - real, intent(OUT), dimension(isd:ied,jsd:jed+1,npz) :: u - real, intent(OUT), dimension(isd:ied+1,jsd:jed,npz) :: v - real, intent(OUT), dimension(is:ie,js:je,npz+1) :: pk - real, intent(OUT), dimension(is:ie,npz+1,js:je) :: peln - real, intent(OUT), dimension(is-1:ie+1,npz+1,js-1:je+1) :: pe - real, intent(OUT), dimension(is:ie,js:je,npz) :: pkz - real, intent(OUT), dimension(isd:ied,jsd:jed) :: phis,ps - real(kind=R_GRID), intent(IN), dimension(isd:ied,jsd:jed,2) :: agrid - real(kind=R_GRID), intent(IN), dimension(isd:ied+1,jsd:jed+1,2) :: grid - real, intent(OUT), dimension(isd:ied,jsd:jed,npz+1) :: gz - logical, intent(IN) :: hydrostatic,adiabatic,do_pert - type(domain2d), intent(INOUT) :: domain + nullify(dx) + nullify(dy) + nullify(dxa) + nullify(dya) + nullify(rdxa) + nullify(rdya) + nullify(dxc) + nullify(dyc) - real, parameter :: p0 = 1.e5 - real, parameter :: u0 = 35. - real, parameter :: b = 2. - real, parameter :: KK = 3. - real, parameter :: Te = 310. - real, parameter :: Tp = 240. - real, parameter :: T0 = 0.5*(Te + Tp) !!WRONG in document - real, parameter :: up = 1. - real, parameter :: zp = 1.5e4 - real(kind=R_GRID), parameter :: lamp = pi/9. - real(kind=R_GRID), parameter :: phip = 2.*lamp - real(kind=R_GRID), parameter :: ppcenter(2) = (/ lamp, phip /) - real, parameter :: Rp = radius/10. - real, parameter :: lapse = 5.e-3 - real, parameter :: dT = 4.8e5 - real, parameter :: phiW = 2.*pi/9. - real, parameter :: pW = 34000. - real, parameter :: q0 = .018 - real, parameter :: qt = 1.e-12 - real, parameter :: ptrop = 1.e4 + nullify(dx_const) + nullify(dy_const) + + nullify(domain) + nullify(tile) - real, parameter :: zconv = 1.e-6 - real, parameter :: rdgrav = rdgas/grav - real, parameter :: zvir = rvgas/rdgas - 1. - real, parameter :: rrdgrav = grav/rdgas + nullify(have_south_pole) + nullify(have_north_pole) - integer :: i,j,k,iter, sphum, cl, cl2, n - real :: p,z,z0,ziter,piter,titer,uu,vv,pl,pt_u,pt_v - real(kind=R_GRID), dimension(2) :: pa - real(kind=R_GRID), dimension(3) :: e1,e2,ex,ey - real, dimension(is:ie,js:je+1) :: gz_u,p_u,peln_u,ps_u,u1,u2 - real(kind=R_GRID), dimension(is:ie,js:je+1) :: lat_u,lon_u - real, dimension(is:ie+1,js:je) :: gz_v,p_v,peln_v,ps_v,v1,v2 - real(kind=R_GRID), dimension(is:ie+1,js:je) :: lat_v,lon_v + nullify(ntiles_g) + nullify(acapN) + nullify(acapS) + nullify(globalarea) - !Compute ps, phis, delp, aux pressure variables, Temperature, winds - ! (with or without perturbation), moisture, Terminator tracer, w, delz + end subroutine init_double_periodic - !Compute p, z, T on both the staggered and unstaggered grids. Then compute the zonal - ! and meridional winds on both grids, and rotate as needed + subroutine read_namelist_test_case_nml(nml_filename) - !PS - do j=js,je - do i=is,ie - ps(i,j) = p0 - enddo - enddo + character(*), intent(IN) :: nml_filename + integer :: ierr, f_unit, unit, ios - !delp - do k=1,npz - do j=js,je - do i=is,ie - delp(i,j,k) = ak(k+1)-ak(k) + ps(i,j)*(bk(k+1)-bk(k)) - enddo - enddo - enddo +#include - !Pressure variables - do j=js,je - do i=is,ie - pe(i,1,j) = ptop - enddo - do i=is,ie - peln(i,1,j) = log(ptop) - pk(i,j,1) = ptop**kappa - enddo - do k=2,npz+1 - do i=is,ie - pe(i,k,j) = ak(k) + ps (i,j)*bk(k) - enddo - do i=is,ie - pk(i,j,k) = exp(kappa*log(pe(i,k,j))) - peln(i,k,j) = log(pe(i,k,j)) - enddo - enddo - enddo + unit = stdlog() - do k=1,npz - do j=js,je - do i=is,ie - pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) - enddo - enddo - enddo + ! Make alpha = 0 the default: + alpha = 0. + bubble_do = .false. + test_case = 11 ! (USGS terrain) + namelist /test_case_nml/test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size - !Height: Use Newton's method - !Cell centered - do j=js,je - do i=is,ie - phis(i,j) = 0. - gz(i,j,npz+1) = 0. - enddo - enddo - do k=npz,1,-1 - do j=js,je - do i=is,ie - p = pe(i,k,j) - z = gz(i,j,k+1) - do iter=1,30 - ziter = z - piter = DCMIP16_BC_pressure(ziter,agrid(i,j,2)) - titer = DCMIP16_BC_temperature(ziter,agrid(i,j,2)) - z = ziter + (piter - p)*rdgrav*titer/piter -!!$ !!! DEBUG CODE -!!$ if (is_master() .and. i == is .and. j == js) then -!!$ write(*,'(A,I,2x,I, 4(2x,F10.3), 2x, F7.3)') ' NEWTON: ' , k, iter, piter, p, ziter, z, titer -!!$ endif -!!$ !!! END DEBUG CODE - if (abs(z - ziter) < zconv) exit - enddo - gz(i,j,k) = z - enddo - enddo - enddo +#ifdef INTERNAL_FILE_NML + ! Read Test_Case namelist + read (input_nml_file,test_case_nml,iostat=ios) + ierr = check_nml_error(ios,'test_case_nml') +#else + f_unit = open_namelist_file(nml_filename) - !Temperature: Compute from hydro balance - do k=1,npz - do j=js,je - do i=is,ie - pt(i,j,k) = rrdgrav * ( gz(i,j,k) - gz(i,j,k+1) ) / ( peln(i,k+1,j) - peln(i,k,j)) - enddo - enddo - enddo + ! Read Test_Case namelist + rewind (f_unit) + read (f_unit,test_case_nml,iostat=ios) + ierr = check_nml_error(ios,'test_case_nml') + call close_file(f_unit) +#endif + write(unit, nml=test_case_nml) - !Compute height and temperature for u and v points also, to be able to compute the local winds - !Use temporary 2d arrays for this purpose - do j=js,je+1 - do i=is,ie - gz_u(i,j) = 0. - p_u(i,j) = p0 - peln_u(i,j) = log(p0) - ps_u(i,j) = p0 - call mid_pt_sphere(grid(i,j,:),grid(i+1,j,:),pa) - lat_u(i,j) = pa(2) - lon_u(i,j) = pa(1) - call get_unit_vect2(grid(i,j,:),grid(i+1,j,:),e1) - call get_latlon_vector(pa,ex,ey) - u1(i,j) = inner_prod(e1,ex) !u components - u2(i,j) = inner_prod(e1,ey) - enddo - enddo - do k=npz,1,-1 - do j=js,je+1 - do i=is,ie - !Pressure (Top of interface) - p = ak(k) + ps_u(i,j)*bk(k) - pl = log(p) - !Height (top of interface); use newton's method - z = gz_u(i,j) !first guess, height of lower level - z0 = z - do iter=1,30 - ziter = z - piter = DCMIP16_BC_pressure(ziter,lat_u(i,j)) - titer = DCMIP16_BC_temperature(ziter,lat_u(i,j)) - z = ziter + (piter - p)*rdgrav*titer/piter - if (abs(z - ziter) < zconv) exit - enddo - !Temperature, compute from hydro balance - pt_u = rrdgrav * ( z - gz_u(i,j) ) / (peln_u(i,j) - pl) - !Now compute winds. Note no meridional winds - !!!NOTE: do we need to use LAYER-mean z? - uu = DCMIP16_BC_uwind(0.5*(z+z0),pt_u,lat_u(i,j)) - if (do_pert) then - uu = uu + DCMIP16_BC_uwind_pert(0.5*(z+z0),lat_u(i,j),lon_u(i,j)) - endif - u(i,j,k) = u1(i,j)*uu - gz_u(i,j) = z - p_u(i,j) = p - peln_u(i,j) = pl - enddo - enddo - enddo + end subroutine read_namelist_test_case_nml + + + subroutine SuperK_Sounding(km, pe, p00, ze, pt, qz) +! This is the z-ccordinate version: +! Morris Weisman & J. Klemp 2002 sounding + integer, intent(in):: km + real, intent(in):: p00 + real, intent(inout), dimension(km+1):: pe + real, intent(in), dimension(km+1):: ze +! pt: potential temperature / pk0 +! qz: specific humidity (mixing ratio) + real, intent(out), dimension(km):: pt, qz +! Local: + integer, parameter:: nx = 5 + real, parameter:: qst = 1.0e-6 + real, parameter:: qv0 = 1.4e-2 + real, parameter:: ztr = 12.E3 + real, parameter:: ttr = 213. + real, parameter:: ptr = 343. ! Tropopause potential temp. + real, parameter:: pt0 = 300. ! surface potential temperature + real, dimension(km):: zs, rh, temp, dp, dp0 + real, dimension(km+1):: peln, pk + real:: qs, zvir, fac_z, pk0, temp1, pm + integer:: k, n, kk + + zvir = rvgas/rdgas - 1. + pk0 = p00**kappa + if ( (is_master()) ) then + write(*,*) 'Computing sounding for HIWPP super-cell test using p00=', p00 + endif + + qz(:) = qst + rh(:) = 0.25 + + do k=1, km + zs(k) = 0.5*(ze(k)+ze(k+1)) +! Potential temperature + if ( zs(k) .gt. ztr ) then +! Stratosphere: + pt(k) = ptr*exp(grav*(zs(k)-ztr)/(cp_air*ttr)) + else +! Troposphere: + fac_z = (zs(k)/ztr)**1.25 + pt(k) = pt0 + (ptr-pt0)* fac_z + rh(k) = 1. - 0.75 * fac_z +! First guess on q: + qz(k) = qv0 - (qv0-qst)*fac_z + endif + if ( is_master() ) write(*,*) zs(k), pt(k), qz(k) +! Convert to FV's definition of potential temperature + pt(k) = pt(k) / pk0 + enddo + +#ifdef USE_MOIST_P00 +!-------------------------------------- +! Iterate nx times with virtual effect: +!-------------------------------------- +! pt & height remain unchanged + pk(km+1) = pk0 + pe(km+1) = p00 ! Dry + peln(km+1) = log(p00) + + do n=1, nx +! Derive pressure fields from hydrostatic balance: + do k=km,1,-1 + pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)*(1.+zvir*qz(k))) + peln(k) = log(pk(k)) / kappa + pe(k) = exp(peln(k)) + enddo + do k=1, km + pm = (pe(k+1)-pe(k))/(peln(k+1)-peln(k)) + temp(k) = pt(k)*pm**kappa +! NCAR form: + qs = 380./pm*exp(17.27*(temp(k)-273.)/(temp(k)-36.)) + qz(k) = min( qv0, rh(k)*qs ) + if ( n==nx .and. is_master() ) write(*,*) 0.01*pm, temp(k), qz(k), qs + enddo + enddo +#else +! pt & height remain unchanged + pk(km+1) = pk0 + pe(km+1) = p00 ! Dry + peln(km+1) = log(p00) + +! Derive "dry" pressure fields from hydrostatic balance: + do k=km,1,-1 + pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)) + peln(k) = log(pk(k)) / kappa + pe(k) = exp(peln(k)) + enddo + do k=1, km + dp0(k) = pe(k+1) - pe(k) + pm = dp0(k)/(peln(k+1)-peln(k)) + temp(k) = pt(k)*pm**kappa +! NCAR form: + qs = 380./pm*exp(17.27*(temp(k)-273.)/(temp(k)-36.)) + qz(k) = min( qv0, rh(k)*qs ) + enddo + + do n=1, nx + + do k=1, km + dp(k) = dp0(k)*(1. + qz(k)) ! moist air + pe(k+1) = pe(k) + dp(k) + enddo +! dry pressure, pt & height remain unchanged + pk(km+1) = pe(km+1)**kappa + peln(km+1) = log(pe(km+1)) + +! Derive pressure fields from hydrostatic balance: + do k=km,1,-1 + pk(k) = pk(k+1) - grav*(ze(k)-ze(k+1))/(cp_air*pt(k)*(1.+zvir*qz(k))) + peln(k) = log(pk(k)) / kappa + pe(k) = exp(peln(k)) + enddo + do k=1, km + pm = (pe(k+1)-pe(k))/(peln(k+1)-peln(k)) + temp(k) = pt(k)*pm**kappa +! NCAR form: + qs = 380./pm*exp(17.27*(temp(k)-273.)/(temp(k)-36.)) + qz(k) = min( qv0, rh(k)*qs ) + if ( n==nx .and. is_master() ) write(*,*) 0.01*pm, temp(k), qz(k), qs + enddo + enddo +#endif + + if ( is_master() ) then + write(*,*) 'Super_K: computed ptop (mb)=', 0.01*pe(1), ' PS=', 0.01*pe(km+1) + call prt_m1('1D Sounding T0', temp, 1, km, 1, 1, 0, 1, 1.) + endif + + end subroutine SuperK_Sounding - do j=js,je - do i=is,ie+1 - gz_v(i,j) = 0. - p_v(i,j) = p0 - peln_v(i,j) = log(p0) - ps_v(i,j) = p0 - call mid_pt_sphere(grid(i,j,:),grid(i,j+1,:),pa) - lat_v(i,j) = pa(2) - lon_v(i,j) = pa(1) - call get_unit_vect2(grid(i,j,:),grid(i,j+1,:),e2) - call get_latlon_vector(pa,ex,ey) - v1(i,j) = inner_prod(e2,ex) !v components - v2(i,j) = inner_prod(e2,ey) - enddo - enddo - do k=npz,1,-1 - do j=js,je - do i=is,ie+1 - !Pressure (Top of interface) - p = ak(k) + ps_v(i,j)*bk(k) - pl = log(p) - !Height (top of interface); use newton's method - z = gz_v(i,j) !first guess, height of lower level - z0 = z - do iter=1,30 - ziter = z - piter = DCMIP16_BC_pressure(ziter,lat_v(i,j)) - titer = DCMIP16_BC_temperature(ziter,lat_v(i,j)) - z = ziter + (piter - p)*rdgrav*titer/piter - if (abs(z - ziter) < zconv) exit - enddo - !Temperature, compute from hydro balance - pt_v = rrdgrav * ( z - gz_v(i,j) ) / (peln_v(i,j) - pl) - !Now compute winds - uu = DCMIP16_BC_uwind(0.5*(z+z0),pt_v,lat_v(i,j)) - if (do_pert) then - uu = uu + DCMIP16_BC_uwind_pert(0.5*(z+z0),lat_v(i,j),lon_v(i,j)) - endif - v(i,j,k) = v1(i,j)*uu - gz_v(i,j) = z - p_v(i,j) = p - peln_v(i,j) = pl - enddo - enddo - enddo + subroutine balanced_K(km, is, ie, js, je, ng, ps0, ze1, ts1, qs1, uz1, dudz, pe, pk, pt, & + delz, zvir, ptop, ak, bk, agrid) + integer, intent(in):: is, ie, js, je, ng, km + real, intent(in), dimension(km ):: ts1, qs1, uz1, dudz + real, intent(in), dimension(km+1):: ze1 + real, intent(in):: zvir, ps0 + real, intent(inout):: ptop + real(kind=R_GRID), intent(in):: agrid(is-ng:ie+ng,js-ng:je+ng,2) + real, intent(inout), dimension(km+1):: ak, bk + real, intent(inout), dimension(is:ie,js:je,km):: pt + real, intent(inout), dimension(is:,js:,1:) :: delz + real, intent(out), dimension(is:ie,js:je,km+1):: pk +! pt is FV's cp*thelta_v + real, intent(inout), dimension(is-1:ie+1,km+1,js-1:je+1):: pe +! Local + integer, parameter:: nt=5 + integer, parameter:: nlat=1001 + real, dimension(nlat,km):: pt2, pky, dzc + real, dimension(nlat,km+1):: pk2, pe2, peln2, pte + real, dimension(km+1):: pe1 + real:: lat(nlat), latc(nlat-1) + real:: fac_y, dlat, dz0, pk0, tmp1, tmp2, tmp3, pint + integer::i,j,k,n, jj, k1 + real:: p00=1.e5 - !Compute moisture and other tracer fields, as desired - do n=1,nq - do k=1,npz - do j=jsd,jed - do i=isd,ied - q(i,j,k,n) = 0. - enddo - enddo - enddo - enddo - if (.not. adiabatic) then - sphum = get_tracer_index (MODEL_ATMOS, 'sphum') - do k=1,npz - do j=js,je - do i=is,ie - p = delp(i,j,k)/(peln(i,k+1,j) - peln(i,k,j)) - q(i,j,k,sphum) = DCMIP16_BC_sphum(p,ps(i,j),agrid(i,j,2),agrid(i,j,1)) - !Convert pt to non-virtual temperature - pt(i,j,k) = pt(i,j,k) / ( 1. + zvir*q(i,j,k,sphum)) - enddo - enddo - enddo - endif + pk0 = p00**kappa + dz0 = ze1(km) - ze1(km+1) +!!! dzc(:,:) =dz0 - cl = get_tracer_index(MODEL_ATMOS, 'cl') - cl2 = get_tracer_index(MODEL_ATMOS, 'cl2') - if (cl > 0 .and. cl2 > 0) then - call terminator_tracers(is,ie,js,je,isd,ied,jsd,jed,npz, & - q, delp,nq,agrid(isd,jsd,1),agrid(isd,jsd,2)) - call mpp_update_domains(q,domain) - endif + dlat = 0.5*pi/real(nlat-1) + do j=1,nlat + lat(j) = dlat*real(j-1) + do k=1,km + dzc(j,k) = ze1(k) - ze1(k+1) + enddo + enddo + do j=1,nlat-1 + latc(j) = 0.5*(lat(j)+lat(j+1)) + enddo - !Compute nonhydrostatic variables, if needed - if (.not. hydrostatic) then - do k=1,npz - do j=js,je - do i=is,ie - w(i,j,k) = 0. - delz(i,j,k) = gz(i,j,k) - gz(i,j,k+1) - enddo - enddo - enddo - endif +! Initialize pt2 + do k=1,km + do j=1,nlat + pt2(j,k) = ts1(k) + enddo + enddo + if ( is_master() ) then + tmp1 = pk0/cp_air + call prt_m1('Super_K PT0', pt2, 1, nlat, 1, km, 0, 1, tmp1) + endif - contains +! pt2 defined from Eq to NP +! Check NP + do n=1, nt +! Compute edge values + call ppme(pt2, pte, dzc, nlat, km) + do k=1,km + do j=2,nlat + tmp1 = 0.5*(pte(j-1,k ) + pte(j,k )) + tmp3 = 0.5*(pte(j-1,k+1) + pte(j,k+1)) + pt2(j,k) = pt2(j-1,k) + dlat/(2.*grav)*sin(2.*latc(j-1))*uz1(k)* & + ( uz1(k)*(tmp1-tmp3)/dzc(j,k) - (pt2(j-1,k)+pt2(j,k))*dudz(k) ) + enddo + enddo + if ( is_master() ) then + call prt_m1('Super_K PT', pt2, 1, nlat, 1, km, 0, 1, pk0/cp_air) + endif + enddo +! +! Compute surface pressure using gradient-wind balance: +!!! pk2(1,km+1) = pk0 + pk2(1,km+1) = ps0**kappa ! fixed at equator + do j=2,nlat + pk2(j,km+1) = pk2(j-1,km+1) - dlat*uz1(km)*uz1(km)*sin(2.*latc(j-1)) & + / (pt2(j-1,km) + pt2(j,km)) + enddo +! Compute pressure using hydrostatic balance: + do j=1,nlat + do k=km,1,-1 + pk2(j,k) = pk2(j,k+1) - grav*dzc(j,k)/pt2(j,k) + enddo + enddo - - real function DCMIP16_BC_temperature(z, lat) + do k=1,km+1 + do j=1,nlat + peln2(j,k) = log(pk2(j,k)) / kappa + pe2(j,k) = exp(peln2(j,k)) + enddo + enddo +! Convert pt2 to temperature + do k=1,km + do j=1,nlat + pky(j,k) = (pk2(j,k+1)-pk2(j,k))/(kappa*(peln2(j,k+1)-peln2(j,k))) + pt2(j,k) = pt2(j,k)*pky(j,k)/(cp_air*(1.+zvir*qs1(k))) + enddo + enddo - real, intent(IN) :: z - real(kind=R_GRID), intent(IN) :: lat - real :: IT, T1, T2, Tr, zsc + do k=1,km+1 + pe1(k) = pe2(1,k) + enddo - IT = exp(KK * log(cos(lat))) - KK/(KK+2.)*exp((KK+2.)*log(cos(lat))) - zsc = z*grav/(b*Rdgas*T0) - Tr = ( 1. - 2.*zsc**2.) * exp(-zsc**2. ) + if ( is_master() ) then + write(*,*) 'SuperK ptop at EQ=', 0.01*pe1(1), 'new ptop=', 0.01*ptop + call prt_m1('Super_K pe', pe2, 1, nlat, 1, km+1, 0, 1, 0.01) + call prt_m1('Super_K Temp', pt2, 1, nlat, 1, km, 0, 1, 1.) + endif - T1 = (1./T0)*exp(lapse*z/T0) + (T0 - Tp)/(T0*Tp) * Tr - T2 = 0.5* ( KK + 2.) * (Te - Tp)/(Te*Tp) * Tr +! Interpolate (pt2, pk2) from lat-dir to cubed-sphere + do j=js, je + do i=is, ie + do jj=1,nlat-1 + if (abs(agrid(i,j,2))>=lat(jj) .and. abs(agrid(i,j,2))<=lat(jj+1) ) then +! found it ! + fac_y = (abs(agrid(i,j,2))-lat(jj)) / dlat + do k=1,km + pt(i, j,k) = pt2(jj, k) + fac_y*(pt2(jj+1, k)-pt2(jj,k)) + enddo + do k=1,km+1 + pe(i,k,j) = pe2(jj,k) + fac_y*(pe2(jj+1,k)-pe2(jj,k)) + enddo +! k = km+1 +! pk(i,j,k) = pk2(jj,k) + fac_y*(pk2(jj+1,k)-pk2(jj,k)) + goto 123 + endif + enddo +123 continue + enddo + enddo - DCMIP16_BC_temperature = 1./(T1 - T2*IT) +! Adjust pk +! ak & bk +! Adjusting model top to be a constant pressure surface, assuming isothermal atmosphere +! pe = ak + bk*ps +! One pressure layer + pe1(1) = ptop + ak(1) = ptop + pint = pe1(2) + bk(1) = 0. + ak(2) = pint + bk(2) = 0. + do k=3,km+1 + bk(k) = (pe1(k) - pint) / (pe1(km+1)-pint) ! bk == sigma + ak(k) = pe1(k) - bk(k) * pe1(km+1) + if ( is_master() ) write(*,*) k, ak(k), bk(k) + enddo + ak(km+1) = 0. + bk(km+1) = 1. + do j=js, je + do i=is, ie + pe(i,1,j) = ptop + enddo + enddo - end function DCMIP16_BC_temperature - real function DCMIP16_BC_pressure(z,lat) + end subroutine balanced_K - real, intent(IN) :: z - real(kind=R_GRID), intent(IN) :: lat - real :: IT, Ti1, Ti2, Tir + subroutine SuperK_u(km, zz, um, dudz) + integer, intent(in):: km + real, intent(in):: zz(km) + real, intent(out):: um(km), dudz(km) +! Local + real, parameter:: zs = 5.e3 + real, parameter:: us = 30. + real:: uc = 15. + integer k - IT = exp(KK * log(cos(lat))) - KK/(KK+2.)*exp((KK+2.)*log(cos(lat))) - Tir = z*exp(-(z*grav/(b*Rdgas*T0))*(z*grav/(b*Rdgas*T0)) ) + do k=1, km +#ifndef TEST_TANHP +! MPAS specification: + if ( zz(k) .gt. zs+1.e3 ) then + um(k) = us + dudz(k) = 0. + elseif ( abs(zz(k)-zs) .le. 1.e3 ) then + um(k) = us*(-4./5. + 3.*zz(k)/zs - 5./4.*(zz(k)/zs)**2) + dudz(k) = us/zs*(3. - 5./2.*zz(k)/zs) + else + um(k) = us*zz(k)/zs + dudz(k) = us/zs + endif +! constant wind so as to make the storm relatively stationary + um(k) = um(k) - uc +#else + uc = 12. ! this gives near stationary (in longitude) storms + um(k) = us*tanh( zz(k)/zs ) - uc + dudz(k) = (us/zs)/cosh(zz(k)/zs)**2 +#endif + enddo - Ti1 = 1./lapse* (exp(lapse*z/T0) - 1.) + Tir*(T0-Tp)/(T0*Tp) - Ti2 = 0.5*(KK+2.)*(Te-Tp)/(Te*Tp) * Tir + end subroutine superK_u - DCMIP16_BC_pressure = p0*exp(-grav/Rdgas * ( Ti1 - Ti2*IT)) - end function DCMIP16_BC_pressure + subroutine SuperCell_Sounding(km, ps, pk1, tp, qp) + use gfdl_cloud_microphys_mod, only: wqsat_moist, qsmith_init, qs_blend +! Morris Weisman & J. Klemp 2002 sounding +! Output sounding on pressure levels: + integer, intent(in):: km + real, intent(in):: ps ! surface pressure (Pa) + real, intent(in), dimension(km):: pk1 + real, intent(out), dimension(km):: tp, qp +! Local: + integer, parameter:: ns = 401 + integer, parameter:: nx = 3 + real, dimension(ns):: zs, pt, qs, us, rh, pp, pk, dpk, dqdt + real, parameter:: Tmin = 175. + real, parameter:: p00 = 1.0e5 + real, parameter:: qst = 3.0e-6 + real, parameter:: qv0 = 1.4e-2 + real, parameter:: ztr = 12.E3 + real, parameter:: ttr = 213. + real, parameter:: ptr = 343. ! Tropopause potential temp. + real, parameter:: pt0 = 300. ! surface potential temperature + real:: dz0, zvir, fac_z, pk0, temp1, p2 + integer:: k, n, kk - real function DCMIP16_BC_uwind(z,T,lat) +#ifdef GFS_PHYS - real, intent(IN) :: z, T - real(kind=R_GRID), intent(IN) :: lat - real :: Tir, Ti2, UU, ur + call mpp_error(FATAL, 'SuperCell sounding cannot perform with GFS Physics.') - Tir = z*exp(-(z*grav/(b*Rdgas*T0))*(z*grav/(b*Rdgas*T0)) ) - Ti2 = 0.5*(KK+2.)*(Te-Tp)/(Te*Tp) * Tir +#else - UU = grav*KK/radius * Ti2 * ( cos(lat)**(int(KK)-1) - cos(lat)**(int(KK)+1) ) * T - ur = - omega * radius * cos(lat) + sqrt( (omega*radius*cos(lat))**2 + radius*cos(lat)*UU) + zvir = rvgas/rdgas - 1. + pk0 = p00**kappa + pp(ns) = ps + pk(ns) = ps**kappa + if ( (is_master()) ) then + write(*,*) 'Computing sounding for super-cell test' + endif - DCMIP16_BC_uwind = ur + call qsmith_init - end function DCMIP16_BC_uwind + dz0 = 50. + zs(ns) = 0. + qs(:) = qst + rh(:) = 0.25 - real function DCMIP16_BC_uwind_pert(z,lat,lon) + do k=ns-1, 1, -1 + zs(k) = zs(k+1) + dz0 + enddo - real, intent(IN) :: z - real(kind=R_GRID), intent(IN) :: lat, lon - real :: ZZ, zrat - real(kind=R_GRID) :: dst, pphere(2) + do k=1,ns +! Potential temperature + if ( zs(k) .gt. ztr ) then +! Stratosphere: + pt(k) = ptr*exp(grav*(zs(k)-ztr)/(cp_air*ttr)) + else +! Troposphere: + fac_z = (zs(k)/ztr)**1.25 + pt(k) = pt0 + (ptr-pt0)* fac_z + rh(k) = 1. - 0.75 * fac_z +! First guess on q: + qs(k) = qv0 - (qv0-qst)*fac_z + endif + pt(k) = pt(k) / pk0 + enddo - zrat = z/zp - ZZ = max(1. - 3.*zrat*zrat + 2.*zrat*zrat*zrat, 0.) +!-------------------------------------- +! Iterate nx times with virtual effect: +!-------------------------------------- + do n=1, nx + do k=1,ns-1 + temp1 = 0.5*(pt(k)*(1.+zvir*qs(k)) + pt(k+1)*(1.+zvir*qs(k+1))) + dpk(k) = grav*(zs(k)-zs(k+1))/(cp_air*temp1) ! DPK > 0 + enddo - pphere = (/ lon, lat /) - dst = great_circle_dist(pphere, ppcenter, radius) - - DCMIP16_BC_uwind_pert = max(0., up*ZZ*exp(-(dst/Rp)**2) ) + do k=ns-1,1,-1 + pk(k) = pk(k+1) - dpk(k) + enddo - end function DCMIP16_BC_uwind_pert + do k=1, ns + temp1 = pt(k)*pk(k) +! if ( (is_master()) ) write(*,*) k, temp1, rh(k) + if ( pk(k) > 0. ) then + pp(k) = exp(log(pk(k))/kappa) +#ifdef SUPER_K + qs(k) = 380./pp(k)*exp(17.27*(temp1-273.)/(temp1-36.)) + qs(k) = min( qv0, rh(k)*qs(k) ) + if ( (is_master()) ) write(*,*) 0.01*pp(k), qs(k) +#else - real function DCMIP16_BC_sphum(p,ps,lat, lon) +#ifdef USE_MIXED_TABLE + qs(k) = min(qv0, rh(k)*qs_blend(temp1, pp(k), qs(k))) +#else + qs(k) = min(qv0, rh(k)*wqsat_moist(temp1, qs(k), pp(k))) +#endif - real, intent(IN) :: p, ps - real(kind=R_GRID), intent(IN) :: lat, lon - real :: eta +#endif + else + if ( (is_master()) ) write(*,*) n, k, pk(k) + call mpp_error(FATAL, 'Super-Cell case: pk < 0') + endif + enddo + enddo - eta = p/ps +! Interpolate to p levels using pk1: p**kappa + do 555 k=1, km + if ( pk1(k) .le. pk(1) ) then + tp(k) = pt(1)*pk(1)/pk1(k) ! isothermal above + qp(k) = qst ! set to stratosphere value + elseif ( pk1(k) .ge. pk(ns) ) then + tp(k) = pt(ns) + qp(k) = qs(ns) + else + do kk=1,ns-1 + if( (pk1(k).le.pk(kk+1)) .and. (pk1(k).ge.pk(kk)) ) then + fac_z = (pk1(k)-pk(kk))/(pk(kk+1)-pk(kk)) + tp(k) = pt(kk) + (pt(kk+1)-pt(kk))*fac_z + qp(k) = qs(kk) + (qs(kk+1)-qs(kk))*fac_z + goto 555 + endif + enddo + endif +555 continue - DCMIP16_BC_sphum = qt - if (p > ptrop) then - DCMIP16_BC_sphum = q0 * exp(-(lat/phiW)**4) * exp(-( (eta-1.)*p0/pw)**2) - endif + do k=1,km + tp(k) = tp(k)*pk1(k) ! temperature + tp(k) = max(Tmin, tp(k)) + enddo - end function DCMIP16_BC_sphum +#endif - end subroutine DCMIP16_BC + end subroutine SuperCell_Sounding - subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& + subroutine DCMIP16_BC(delp,pt,u,v,q,w,delz,& is,ie,js,je,isd,ied,jsd,jed,npz,nq,ak,bk,ptop, & pk,peln,pe,pkz,gz,phis,ps,grid,agrid, & - hydrostatic, nwat, adiabatic) + hydrostatic, nwat, adiabatic, do_pert, domain, bd) + + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: is,ie,js,je,isd,ied,jsd,jed,npz,nq, nwat real, intent(IN) :: ptop real, intent(IN), dimension(npz+1) :: ak, bk real, intent(INOUT), dimension(isd:ied,jsd:jed,npz,nq) :: q - real, intent(OUT), dimension(isd:ied,jsd:jed,npz) :: delp, pt, w, delz + real, intent(OUT), dimension(isd:ied,jsd:jed,npz) :: delp, pt, w + real, intent(OUT), dimension(is:,js:,1:) :: delz real, intent(OUT), dimension(isd:ied,jsd:jed+1,npz) :: u real, intent(OUT), dimension(isd:ied+1,jsd:jed,npz) :: v real, intent(OUT), dimension(is:ie,js:je,npz+1) :: pk @@ -7425,63 +7314,60 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& real(kind=R_GRID), intent(IN), dimension(isd:ied,jsd:jed,2) :: agrid real(kind=R_GRID), intent(IN), dimension(isd:ied+1,jsd:jed+1,2) :: grid real, intent(OUT), dimension(isd:ied,jsd:jed,npz+1) :: gz - logical, intent(IN) :: hydrostatic,adiabatic + logical, intent(IN) :: hydrostatic,adiabatic,do_pert + type(domain2d), intent(INOUT) :: domain - real, parameter :: zt = 15000 ! m - real, parameter :: q0 = 0.021 ! kg/kg - real, parameter :: qt = 1.e-11 ! kg/kg - real, parameter :: T0 = 302.15 ! K - real, parameter :: Tv0 = 302.15*(1.+0.608*q0) ! K - real, parameter :: Ts = 302.15 ! K - real, parameter :: zq1 = 3000. ! m - real, parameter :: zq2 = 8000. ! m - real, parameter :: lapse = 7.e-3 ! K/m - real, parameter :: Tvt = Tv0 - lapse*zt ! K - real, parameter :: pb = 101500. ! Pa - real, parameter :: ptt = pb*(TvT/Tv0)**(grav/Rdgas/lapse) - real(kind=R_GRID), parameter :: lamp = pi - real(kind=R_GRID), parameter :: phip = pi/18. + real, parameter :: p0 = 1.e5 + real, parameter :: u0 = 35. + real, parameter :: b = 2. + real, parameter :: KK = 3. + real, parameter :: Te = 310. + real, parameter :: Tp = 240. + real, parameter :: T0 = 0.5*(Te + Tp) !!WRONG in document + real, parameter :: up = 1. + real, parameter :: zp = 1.5e4 + real(kind=R_GRID), parameter :: lamp = pi/9. + real(kind=R_GRID), parameter :: phip = 2.*lamp real(kind=R_GRID), parameter :: ppcenter(2) = (/ lamp, phip /) - real, parameter :: dp = 1115. ! Pa - real, parameter :: rp = 282000. ! m - real, parameter :: zp = 7000. ! m - real, parameter :: fc = 2.*OMEGA*sin(phip) + real, parameter :: Rp = radius/10. + real, parameter :: lapse = 5.e-3 + real, parameter :: dT = 4.8e5 + real, parameter :: phiW = 2.*pi/9. + real, parameter :: pW = 34000. + real, parameter :: q0 = .018 + real, parameter :: qt = 1.e-12 + real, parameter :: ptrop = 1.e4 real, parameter :: zconv = 1.e-6 real, parameter :: rdgrav = rdgas/grav + !real, parameter :: zvir = rvgas/rdgas - 1. + real :: zvir real, parameter :: rrdgrav = grav/rdgas integer :: i,j,k,iter, sphum, cl, cl2, n - real :: p,z,z0,ziter,piter,titer,uu,vv,pl, r + real :: p,z,z0,ziter,piter,titer,uu,vv,pl,pt_u,pt_v real(kind=R_GRID), dimension(2) :: pa real(kind=R_GRID), dimension(3) :: e1,e2,ex,ey - real, dimension(is:ie,js:je) :: rc - real, dimension(is:ie,js:je+1) :: gz_u,p_u,peln_u,ps_u,u1,u2, rc_u + real, dimension(is:ie,js:je+1) :: gz_u,p_u,peln_u,ps_u,u1,u2 real(kind=R_GRID), dimension(is:ie,js:je+1) :: lat_u,lon_u - real, dimension(is:ie+1,js:je) :: gz_v,p_v,peln_v,ps_v,v1,v2, rc_v + real, dimension(is:ie+1,js:je) :: gz_v,p_v,peln_v,ps_v,v1,v2 real(kind=R_GRID), dimension(is:ie+1,js:je) :: lat_v,lon_v !Compute ps, phis, delp, aux pressure variables, Temperature, winds - ! (with or without perturbation), moisture, w, delz + ! (with or without perturbation), moisture, Terminator tracer, w, delz !Compute p, z, T on both the staggered and unstaggered grids. Then compute the zonal ! and meridional winds on both grids, and rotate as needed - - !Save r for easy use - do j=js,je - do i=is,ie - rc(i,j) = great_circle_dist(agrid(i,j,:), ppcenter, radius) - enddo - enddo + zvir = rvgas/rdgas - 1. !PS do j=js,je do i=is,ie - ps(i,j) = pb - dp*exp( -sqrt((rc(i,j)/rp)**3) ) + ps(i,j) = p0 enddo enddo - !delp + !delp do k=1,npz do j=js,je do i=is,ie @@ -7533,8 +7419,8 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& z = gz(i,j,k+1) do iter=1,30 ziter = z - piter = DCMIP16_TC_pressure(ziter,rc(i,j)) - titer = DCMIP16_TC_temperature(ziter,rc(i,j)) + piter = DCMIP16_BC_pressure(ziter,agrid(i,j,2)) + titer = DCMIP16_BC_temperature(ziter,agrid(i,j,2)) z = ziter + (piter - p)*rdgrav*titer/piter !!$ !!! DEBUG CODE !!$ if (is_master() .and. i == is .and. j == js) then @@ -7542,13 +7428,13 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& !!$ endif !!$ !!! END DEBUG CODE if (abs(z - ziter) < zconv) exit - enddo + enddo gz(i,j,k) = z enddo enddo enddo - !Temperature: Compute from hydro balance + !(Virtual) Temperature: Compute from hydro balance do k=1,npz do j=js,je do i=is,ie @@ -7557,10 +7443,16 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& enddo enddo + call mpp_update_domains(pt, domain) + call mpp_update_domains(gz, domain) !Compute height and temperature for u and v points also, to be able to compute the local winds !Use temporary 2d arrays for this purpose do j=js,je+1 do i=is,ie + gz_u(i,j) = 0. + p_u(i,j) = p0 + peln_u(i,j) = log(p0) + ps_u(i,j) = p0 call mid_pt_sphere(grid(i,j,:),grid(i+1,j,:),pa) lat_u(i,j) = pa(2) lon_u(i,j) = pa(1) @@ -7568,11 +7460,6 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& call get_latlon_vector(pa,ex,ey) u1(i,j) = inner_prod(e1,ex) !u components u2(i,j) = inner_prod(e1,ey) - rc_u(i,j) = great_circle_dist(pa, ppcenter, radius) - gz_u(i,j) = 0. - p_u(i,j) = pb - dp*exp( -sqrt((rc_u(i,j)/rp)**3) ) - peln_u(i,j) = log(p_u(i,j)) - ps_u(i,j) = p_u(i,j) enddo enddo do k=npz,1,-1 @@ -7586,14 +7473,20 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& z0 = z do iter=1,30 ziter = z - piter = DCMIP16_TC_pressure(ziter,rc_u(i,j)) - titer = DCMIP16_TC_temperature(ziter,rc_u(i,j)) + piter = DCMIP16_BC_pressure(ziter,lat_u(i,j)) + titer = DCMIP16_BC_temperature(ziter,lat_u(i,j)) z = ziter + (piter - p)*rdgrav*titer/piter if (abs(z - ziter) < zconv) exit enddo - !Now compute winds - call DCMIP16_TC_uwind_pert(0.5*(z+z0),rc_u(i,j),lon_u(i,j),lat_u(i,j), uu, vv) - u(i,j,k) = u1(i,j)*uu + u2(i,j)*vv + !Temperature, compute from hydro balance + pt_u = rrdgrav * ( z - gz_u(i,j) ) / (peln_u(i,j) - pl) + !Now compute winds. Note no meridional winds + !!!NOTE: do we need to use LAYER-mean z? + uu = DCMIP16_BC_uwind(0.5*(z+z0),pt_u,lat_u(i,j)) + if (do_pert) then + uu = uu + DCMIP16_BC_uwind_pert(0.5*(z+z0),lat_u(i,j),lon_u(i,j)) + endif + u(i,j,k) = u1(i,j)*uu gz_u(i,j) = z p_u(i,j) = p @@ -7604,6 +7497,10 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& do j=js,je do i=is,ie+1 + gz_v(i,j) = 0. + p_v(i,j) = p0 + peln_v(i,j) = log(p0) + ps_v(i,j) = p0 call mid_pt_sphere(grid(i,j,:),grid(i,j+1,:),pa) lat_v(i,j) = pa(2) lon_v(i,j) = pa(1) @@ -7611,11 +7508,6 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& call get_latlon_vector(pa,ex,ey) v1(i,j) = inner_prod(e2,ex) !v components v2(i,j) = inner_prod(e2,ey) - rc_v(i,j) = great_circle_dist(pa, ppcenter, radius) - gz_v(i,j) = 0. - p_v(i,j) = pb - dp*exp( - sqrt((rc_v(i,j)/rp)**3) ) - peln_v(i,j) = log(p_v(i,j)) - ps_v(i,j) = p_v(i,j) enddo enddo do k=npz,1,-1 @@ -7629,14 +7521,19 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& z0 = z do iter=1,30 ziter = z - piter = DCMIP16_TC_pressure(ziter,rc_v(i,j)) - titer = DCMIP16_TC_temperature(ziter,rc_v(i,j)) + piter = DCMIP16_BC_pressure(ziter,lat_v(i,j)) + titer = DCMIP16_BC_temperature(ziter,lat_v(i,j)) z = ziter + (piter - p)*rdgrav*titer/piter if (abs(z - ziter) < zconv) exit enddo + !Temperature, compute from hydro balance + pt_v = rrdgrav * ( z - gz_v(i,j) ) / (peln_v(i,j) - pl) !Now compute winds - call DCMIP16_TC_uwind_pert(0.5*(z+z0),rc_v(i,j),lon_v(i,j),lat_v(i,j), uu, vv) - v(i,j,k) = v1(i,j)*uu + v2(i,j)*vv + uu = DCMIP16_BC_uwind(0.5*(z+z0),pt_v,lat_v(i,j)) + if (do_pert) then + uu = uu + DCMIP16_BC_uwind_pert(0.5*(z+z0),lat_v(i,j),lon_v(i,j)) + endif + v(i,j,k) = v1(i,j)*uu gz_v(i,j) = z p_v(i,j) = p peln_v(i,j) = pl @@ -7644,6 +7541,19 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& enddo enddo + !Compute nonhydrostatic variables, if needed + if (.not. hydrostatic) then + do k=1,npz + do j=js,je + do i=is,ie + w(i,j,k) = 0. + !Re-compute from hydro balance + delz(i,j,k) = rdgrav * (peln(i,k+1,j) - peln(i,k,j)) * pt(i,j,k) + !delz(i,j,k) = gz(i,j,k) - gz(i,j,k+1) + enddo + enddo + enddo + endif !Compute moisture and other tracer fields, as desired do n=1,nq do k=1,npz @@ -7654,25 +7564,30 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& enddo enddo enddo - if (.not. adiabatic) then sphum = get_tracer_index (MODEL_ATMOS, 'sphum') do k=1,npz do j=js,je do i=is,ie - z = 0.5*(gz(i,j,k) + gz(i,j,k+1)) - q(i,j,k,sphum) = DCMIP16_TC_sphum(z) + p = delp(i,j,k)/(peln(i,k+1,j) - peln(i,k,j)) + q(i,j,k,sphum) = DCMIP16_BC_sphum(p,ps(i,j),agrid(i,j,2),agrid(i,j,1)) enddo enddo enddo + + cl = get_tracer_index(MODEL_ATMOS, 'cl') + cl2 = get_tracer_index(MODEL_ATMOS, 'cl2') + if (cl > 0 .and. cl2 > 0) then + call terminator_tracers(is,ie,js,je,isd,ied,jsd,jed,npz, & + q, delp,nq,agrid(isd,jsd,1),agrid(isd,jsd,2),bd) + call mpp_update_domains(q,domain) endif - !Compute nonhydrostatic variables, if needed - if (.not. hydrostatic) then + if (.not. adiabatic) then do k=1,npz do j=js,je do i=is,ie - w(i,j,k) = 0. - delz(i,j,k) = gz(i,j,k) - gz(i,j,k+1) + !Convert pt to non-virtual temperature + pt(i,j,k) = pt(i,j,k) / ( 1. + zvir*q(i,j,k,sphum)) enddo enddo enddo @@ -7680,746 +7595,1094 @@ subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& contains - !Initialize with virtual temperature - real function DCMIP16_TC_temperature(z, r) - - real, intent(IN) :: z, r - real :: Tv, term1, term2 - - if (z > zt) then - DCMIP16_TC_temperature = Tvt - return - endif - - Tv = Tv0 - lapse*z - term1 = grav*zp*zp* ( 1. - pb/dp * exp( sqrt(r/rp)**3 + (z/zp)**2 ) ) - term2 = 2*rdgas*Tv*z - DCMIP16_TC_temperature = Tv + Tv*( 1./(1 + term2/term1) - 1.) - - end function DCMIP16_TC_temperature - - !Initialize with moist air mass - real function DCMIP16_TC_pressure(z, r) - - real, intent(IN) :: z, r - - if (z <= zt) then - DCMIP16_TC_pressure = pb*exp(grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) -dp* exp(-sqrt((r/rp)**3) - (z/zp)**2) * & - exp( grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) - else - DCMIP16_TC_pressure = ptt*exp(grav*(zt-z)/(Rdgas*Tvt)) - endif - - end function DCMIP16_TC_pressure - - subroutine DCMIP16_TC_uwind_pert(z,r,lon,lat,uu,vv) - - real, intent(IN) :: z, r - real(kind=R_GRID), intent(IN) :: lon, lat - real, intent(OUT) :: uu, vv - real :: rfac, Tvrd, vt, fr5, d1, d2, d - real(kind=R_GRID) :: dst, pphere(2) - - if (z > zt) then - uu = 0. - vv = 0. - return - endif - - rfac = sqrt(r/rp)**3 - - fr5 = 0.5*fc*r - Tvrd = (Tv0 - lapse*z)*Rdgas - - vt = -fr5 + sqrt( fr5**2 - (1.5 * rfac * Tvrd) / & - ( 1. + 2*Tvrd*z/(grav*zp**2) - pb/dp*exp( rfac + (z/zp)**2) ) ) - - d1 = sin(phip)*cos(lat) - cos(phip)*sin(lat)*cos(lon - lamp) - d2 = cos(phip)*sin(lon - lamp) - d = max(1.e-25,sqrt(d1*d1 + d2*d2)) - uu = vt * d1/d - vv = vt * d2/d - - end subroutine DCMIP16_TC_uwind_pert - - real function DCMIP16_TC_sphum(z) + real function DCMIP16_BC_temperature(z, lat) real, intent(IN) :: z + real(kind=R_GRID), intent(IN) :: lat + real :: IT, T1, T2, Tr, zsc - DCMIP16_TC_sphum = qt - if (z < zt) then - DCMIP16_TC_sphum = q0 * exp(-z/zq1) * exp(-(z/zq2 )**2) - endif - - end function DCMIP16_TC_sphum - - end subroutine DCMIP16_TC - - subroutine init_latlon(u,v,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, bk, & - gridstruct, npx, npy, npz, ng, ncnst, ndims, nregions, dry_mass, & - mountain, moist_phys, hybrid_z, delz, ze0, domain_in, tile_in) - - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) - - real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) - - real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) - real , intent(INOUT) :: pe(is-1:ie+1,npz+1,js-1:je+1) - real , intent(INOUT) :: pk(is:ie ,js:je ,npz+1) - real , intent(INOUT) :: peln(is :ie ,npz+1 ,js:je) - real , intent(INOUT) :: pkz(is:ie ,js:je ,npz ) - real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) - real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) - real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) - real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) - real , intent(inout) :: delz(isd:,jsd:,1:) - real , intent(inout) :: ze0(is:,js:,1:) - - real , intent(IN) :: ak(npz+1) - real , intent(IN) :: bk(npz+1) - - integer, intent(IN) :: npx, npy, npz - integer, intent(IN) :: ng, ncnst - integer, intent(IN) :: ndims - integer, intent(IN) :: nregions - integer,target,intent(IN):: tile_in - - real, intent(IN) :: dry_mass - logical, intent(IN) :: mountain - logical, intent(IN) :: moist_phys - logical, intent(IN) :: hybrid_z - - type(fv_grid_type), intent(IN), target :: gridstruct - type(domain2d), intent(IN), target :: domain_in - - real, pointer, dimension(:,:,:) :: agrid, grid - real, pointer, dimension(:,:) :: area, rarea, fC, f0 - real, pointer, dimension(:,:,:) :: ee1, ee2, en1, en2 - real, pointer, dimension(:,:,:,:) :: ew, es - real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc - - logical, pointer :: cubed_sphere, latlon - - type(domain2d), pointer :: domain - integer, pointer :: tile - - logical, pointer :: have_south_pole, have_north_pole - - integer, pointer :: ntiles_g - real, pointer :: acapN, acapS, globalarea - - real(kind=R_GRID) :: p1(2), p2(2) - real :: r, r0 - integer :: i,j - - agrid => gridstruct%agrid - grid => gridstruct%grid - - area => gridstruct%area - - dx => gridstruct%dx - dy => gridstruct%dy - dxa => gridstruct%dxa - dya => gridstruct%dya - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - fC => gridstruct%fC - f0 => gridstruct%f0 - - ntiles_g => gridstruct%ntiles_g - acapN => gridstruct%acapN - acapS => gridstruct%acapS - globalarea => gridstruct%globalarea + IT = exp(KK * log(cos(lat))) - KK/(KK+2.)*exp((KK+2.)*log(cos(lat))) + zsc = z*grav/(b*Rdgas*T0) + Tr = ( 1. - 2.*zsc**2.) * exp(-zsc**2. ) - domain => domain_in - tile => tile_in + T1 = (1./T0)*exp(lapse*z/T0) + (T0 - Tp)/(T0*Tp) * Tr + T2 = 0.5* ( KK + 2.) * (Te - Tp)/(Te*Tp) * Tr - have_south_pole => gridstruct%have_south_pole - have_north_pole => gridstruct%have_north_pole + DCMIP16_BC_temperature = 1./(T1 - T2*IT) - do j=jsd,jed+1 - do i=isd,ied+1 - fc(i,j) = 2.*omega*( -cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) & - +sin(grid(i,j,2))*cos(alpha) ) - enddo - enddo - do j=jsd,jed - do i=isd,ied - f0(i,j) = 2.*omega*( -cos(agrid(i,j,1))*cos(agrid(i,j,2))*sin(alpha) & - +sin(agrid(i,j,2))*cos(alpha) ) - enddo - enddo + end function DCMIP16_BC_temperature - select case (test_case) - case ( 1 ) + real function DCMIP16_BC_pressure(z,lat) - Ubar = (2.0*pi*radius)/(12.0*86400.0) - phis = 0.0 - r0 = radius/3. !RADIUS radius/3. -!!$ p1(1) = 0. - p1(1) = pi/2. + pi_shift - p1(2) = 0. - do j=jsd,jed - do i=isd,ied - p2(1) = agrid(i,j,1) - p2(2) = agrid(i,j,2) - r = great_circle_dist( p1, p2, radius ) - if (r < r0) then - delp(i,j,1) = phis(i,j) + 0.5*(1.0+cos(PI*r/r0)) - else - delp(i,j,1) = phis(i,j) - endif - enddo - enddo - call init_latlon_winds(UBar, u, v, ua, va, uc, vc, 1, gridstruct) + real, intent(IN) :: z + real(kind=R_GRID), intent(IN) :: lat + real :: IT, Ti1, Ti2, Tir + IT = exp(KK * log(cos(lat))) - KK/(KK+2.)*exp((KK+2.)*log(cos(lat))) + Tir = z*exp(-(z*grav/(b*Rdgas*T0))*(z*grav/(b*Rdgas*T0)) ) -!!$ phis(:,:)=0. -!!$ -!!$ u (:,:,:)=10. -!!$ v (:,:,:)=10. -!!$ ua(:,:,:)=10. -!!$ va(:,:,:)=10. -!!$ uc(:,:,:)=10. -!!$ vc(:,:,:)=10. -!!$ pt(:,:,:)=1. -!!$ delp(:,:,:)=0. -!!$ -!!$ do j=js,je -!!$ if (j>10 .and. j<15) then -!!$ do i=is,ie -!!$ if (i>10 .and. i<15) then -!!$ delp(i,j,:)=1. -!!$ endif -!!$ enddo -!!$ endif -!!$ enddo -!!$ call mpp_update_domains( delp, domain ) + Ti1 = 1./lapse* (exp(lapse*z/T0) - 1.) + Tir*(T0-Tp)/(T0*Tp) + Ti2 = 0.5*(KK+2.)*(Te-Tp)/(Te*Tp) * Tir - end select + DCMIP16_BC_pressure = p0*exp(-grav/Rdgas * ( Ti1 - Ti2*IT)) - nullify(grid) - nullify(agrid) + end function DCMIP16_BC_pressure - nullify(area) + real function DCMIP16_BC_uwind(z,T,lat) - nullify(fC) - nullify(f0) + real, intent(IN) :: z, T + real(kind=R_GRID), intent(IN) :: lat + real :: Tir, Ti2, UU, ur - nullify(dx) - nullify(dy) - nullify(dxa) - nullify(dya) - nullify(rdxa) - nullify(rdya) - nullify(dxc) - nullify(dyc) + Tir = z*exp(-(z*grav/(b*Rdgas*T0))*(z*grav/(b*Rdgas*T0)) ) + Ti2 = 0.5*(KK+2.)*(Te-Tp)/(Te*Tp) * Tir - nullify(domain) - nullify(tile) - - nullify(have_south_pole) - nullify(have_north_pole) + UU = grav*KK/radius * Ti2 * ( cos(lat)**(int(KK)-1) - cos(lat)**(int(KK)+1) ) * T + ur = - omega * radius * cos(lat) + sqrt( (omega*radius*cos(lat))**2 + radius*cos(lat)*UU) - nullify(ntiles_g) - nullify(acapN) - nullify(acapS) - nullify(globalarea) + DCMIP16_BC_uwind = ur - end subroutine init_latlon + end function DCMIP16_BC_uwind - subroutine init_latlon_winds(UBar, u, v, ua, va, uc, vc, defOnGrid, gridstruct) + real function DCMIP16_BC_uwind_pert(z,lat,lon) - ! defOnGrid = -1:null_op, 0:All-Grids, 1:C-Grid, 2:D-Grid, 3:A-Grid, 4:A-Grid then Rotate, 5:D-Grid with unit vectors then Rotate + real, intent(IN) :: z + real(kind=R_GRID), intent(IN) :: lat, lon + real :: ZZ, zrat + real(kind=R_GRID) :: dst, pphere(2) - real, intent(INOUT) :: UBar - real, intent(INOUT) :: u(isd:ied ,jsd:jed+1) - real, intent(INOUT) :: v(isd:ied+1,jsd:jed ) - real, intent(INOUT) :: uc(isd:ied+1,jsd:jed ) - real, intent(INOUT) :: vc(isd:ied ,jsd:jed+1) - real, intent(INOUT) :: ua(isd:ied ,jsd:jed ) - real, intent(INOUT) :: va(isd:ied ,jsd:jed ) - integer, intent(IN) :: defOnGrid - type(fv_grid_type), intent(IN), target :: gridstruct + zrat = z/zp + ZZ = max(1. - 3.*zrat*zrat + 2.*zrat*zrat*zrat, 0.) - real :: p1(2),p2(2),p3(2),p4(2), pt(2) - real :: e1(3), e2(3), ex(3), ey(3) + pphere = (/ lon, lat /) + dst = great_circle_dist(pphere, ppcenter, radius) - real :: dist, r, r0 - integer :: i,j,k,n - real :: utmp, vtmp + DCMIP16_BC_uwind_pert = max(0., up*ZZ*exp(-(dst/Rp)**2) ) - real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 + end function DCMIP16_BC_uwind_pert - real, dimension(:,:,:), pointer :: grid, agrid - real, dimension(:,:), pointer :: area, dx, dy, dxc, dyc + real function DCMIP16_BC_sphum(p,ps,lat, lon) - grid => gridstruct%grid - agrid=> gridstruct%agrid + real, intent(IN) :: p, ps + real(kind=R_GRID), intent(IN) :: lat, lon + real :: eta - area => gridstruct%area - dx => gridstruct%dx - dy => gridstruct%dy - dxc => gridstruct%dxc - dyc => gridstruct%dyc + eta = p/ps - psi(:,:) = 1.e25 - psi_b(:,:) = 1.e25 - do j=jsd,jed - do i=isd,ied - psi(i,j) = (-1.0 * Ubar * radius *( sin(agrid(i,j,2)) *cos(alpha) - & - cos(agrid(i,j,1))*cos(agrid(i,j,2))*sin(alpha) ) ) - enddo - enddo - do j=jsd,jed+1 - do i=isd,ied+1 - psi_b(i,j) = (-1.0 * Ubar * radius *( sin(grid(i,j,2)) *cos(alpha) - & - cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) ) ) - enddo - enddo - - if ( defOnGrid == 1 ) then - do j=jsd,jed+1 - do i=isd,ied - dist = dx(i,j) - vc(i,j) = (psi_b(i+1,j)-psi_b(i,j))/dist - if (dist==0) vc(i,j) = 0. - enddo - enddo - do j=jsd,jed - do i=isd,ied+1 - dist = dy(i,j) - uc(i,j) = -1.0*(psi_b(i,j+1)-psi_b(i,j))/dist - if (dist==0) uc(i,j) = 0. - enddo - enddo + DCMIP16_BC_sphum = qt + if (p > ptrop) then + DCMIP16_BC_sphum = q0 * exp(-(lat/phiW)**4) * exp(-( (eta-1.)*p0/pw)**2) + endif - - do j=js,je - do i=is,ie+1 - dist = dxc(i,j) - v(i,j) = (psi(i,j)-psi(i-1,j))/dist - if (dist==0) v(i,j) = 0. - enddo - enddo - do j=js,je+1 - do i=is,ie - dist = dyc(i,j) - u(i,j) = -1.0*(psi(i,j)-psi(i,j-1))/dist - if (dist==0) u(i,j) = 0. - enddo - enddo - endif - - end subroutine init_latlon_winds - - subroutine d2a2c(im,jm,km, ifirst,ilast, jfirst,jlast, ng, nested, & - u,v, ua,va, uc,vc, gridstruct, domain) - -! Input - integer, intent(IN) :: im,jm,km - integer, intent(IN) :: ifirst,ilast - integer, intent(IN) :: jfirst,jlast - integer, intent(IN) :: ng - logical, intent(IN) :: nested - type(fv_grid_type), intent(IN), target :: gridstruct - type(domain2d), intent(INOUT) :: domain - - !real , intent(in) :: sinlon(im,jm) - !real , intent(in) :: coslon(im,jm) - !real , intent(in) :: sinl5(im,jm) - !real , intent(in) :: cosl5(im,jm) - -! Output - ! real , intent(inout) :: u(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) - ! real , intent(inout) :: v(ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) - ! real , intent(inout) :: ua(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) - ! real , intent(inout) :: va(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) - ! real , intent(inout) :: uc(ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) - ! real , intent(inout) :: vc(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) - - real , intent(inout) :: u(isd:ied,jsd:jed+1) !ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) - real , intent(inout) :: v(isd:ied+1,jsd:jed) !ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) - real , intent(inout) :: ua(isd:ied,jsd:jed) !ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) - real , intent(inout) :: va(isd:ied,jsd:jed) !(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) - real , intent(inout) :: uc(isd:ied+1,jsd:jed) !(ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) - real , intent(inout) :: vc(isd:ied,jsd:jed+1) !(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) - -!-------------------------------------------------------------- -! Local - - real :: sinlon(im,jm) - real :: coslon(im,jm) - real :: sinl5(im,jm) - real :: cosl5(im,jm) - - real :: tmp1(jsd:jed+1) - real :: tmp2(jsd:jed) - real :: tmp3(jsd:jed) - - real mag,mag1,mag2, ang,ang1,ang2 - real us, vs, un, vn - integer i, j, k, im2 - integer js1g1 - integer js2g1 - integer js2g2 - integer js2gc - integer js2gc1 - integer js2gcp1 - integer js2gd - integer jn2gc - integer jn1g1 - integer jn1g2 - integer jn2gd - integer jn2gsp1 - - real, pointer, dimension(:,:,:) :: agrid, grid - real, pointer, dimension(:,:) :: area, rarea, fC, f0 - real(kind=R_GRID), pointer, dimension(:,:,:) :: ee1, ee2, en1, en2 - real(kind=R_GRID), pointer, dimension(:,:,:,:) :: ew, es - real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc + end function DCMIP16_BC_sphum - logical, pointer :: cubed_sphere, latlon + end subroutine DCMIP16_BC - logical, pointer :: have_south_pole, have_north_pole + subroutine DCMIP16_TC(delp,pt,u,v,q,w,delz,& + is,ie,js,je,isd,ied,jsd,jed,npz,nq,ak,bk,ptop, & + pk,peln,pe,pkz,gz,phis,ps,grid,agrid, & + hydrostatic, nwat, adiabatic) - integer, pointer :: ntiles_g - real, pointer :: acapN, acapS, globalarea + integer, intent(IN) :: is,ie,js,je,isd,ied,jsd,jed,npz,nq, nwat + real, intent(IN) :: ptop + real, intent(IN), dimension(npz+1) :: ak, bk + real, intent(INOUT), dimension(isd:ied,jsd:jed,npz,nq) :: q + real, intent(OUT), dimension(isd:ied,jsd:jed,npz) :: delp, pt, w + real, intent(OUT), dimension(is:,js:,1:) :: delz + real, intent(OUT), dimension(isd:ied,jsd:jed+1,npz) :: u + real, intent(OUT), dimension(isd:ied+1,jsd:jed,npz) :: v + real, intent(OUT), dimension(is:ie,js:je,npz+1) :: pk + real, intent(OUT), dimension(is:ie,npz+1,js:je) :: peln + real, intent(OUT), dimension(is-1:ie+1,npz+1,js-1:je+1) :: pe + real, intent(OUT), dimension(is:ie,js:je,npz) :: pkz + real, intent(OUT), dimension(isd:ied,jsd:jed) :: phis,ps + real(kind=R_GRID), intent(IN), dimension(isd:ied,jsd:jed,2) :: agrid + real(kind=R_GRID), intent(IN), dimension(isd:ied+1,jsd:jed+1,2) :: grid + real, intent(OUT), dimension(isd:ied,jsd:jed,npz+1) :: gz + logical, intent(IN) :: hydrostatic,adiabatic - grid => gridstruct%grid - agrid=> gridstruct%agrid + real, parameter :: zt = 15000 ! m + real, parameter :: q0 = 0.021 ! kg/kg + real, parameter :: qt = 1.e-11 ! kg/kg + real, parameter :: T0 = 302.15 ! K + real, parameter :: Tv0 = 302.15*(1.+0.608*q0) ! K + real, parameter :: Ts = 302.15 ! K + real, parameter :: zq1 = 3000. ! m + real, parameter :: zq2 = 8000. ! m + real, parameter :: lapse = 7.e-3 ! K/m + real, parameter :: Tvt = Tv0 - lapse*zt ! K + real, parameter :: pb = 101500. ! Pa + real, parameter :: ptt = pb*(TvT/Tv0)**(grav/Rdgas/lapse) + real(kind=R_GRID), parameter :: lamp = pi + real(kind=R_GRID), parameter :: phip = pi/18. + real(kind=R_GRID), parameter :: ppcenter(2) = (/ lamp, phip /) + real, parameter :: dp = 1115. ! Pa + real, parameter :: rp = 282000. ! m + real, parameter :: zp = 7000. ! m + real, parameter :: fc = 2.*OMEGA*sin(phip) - area => gridstruct%area - rarea => gridstruct%rarea + real, parameter :: zconv = 1.e-6 + real, parameter :: rdgrav = rdgas/grav + real, parameter :: rrdgrav = grav/rdgas + real, parameter :: zvir = rvgas/rdgas - 1. - fC => gridstruct%fC - f0 => gridstruct%f0 + integer :: i,j,k,iter, sphum, cl, cl2, n + real :: p,z,z0,ziter,piter,titer,uu,vv,pl, r + real(kind=R_GRID), dimension(2) :: pa + real(kind=R_GRID), dimension(3) :: e1,e2,ex,ey + real, dimension(is:ie,js:je) :: rc + real, dimension(is:ie,js:je+1) :: gz_u,p_u,peln_u,ps_u,u1,u2, rc_u + real(kind=R_GRID), dimension(is:ie,js:je+1) :: lat_u,lon_u + real, dimension(is:ie+1,js:je) :: gz_v,p_v,peln_v,ps_v,v1,v2, rc_v + real(kind=R_GRID), dimension(is:ie+1,js:je) :: lat_v,lon_v - ee1 => gridstruct%ee1 - ee2 => gridstruct%ee2 - ew => gridstruct%ew - es => gridstruct%es - en1 => gridstruct%en1 - en2 => gridstruct%en2 + !Compute ps, phis, delp, aux pressure variables, Temperature, winds + ! (with or without perturbation), moisture, w, delz - dx => gridstruct%dx - dy => gridstruct%dy - dxa => gridstruct%dxa - dya => gridstruct%dya - rdxa => gridstruct%rdxa - rdya => gridstruct%rdya - dxc => gridstruct%dxc - dyc => gridstruct%dyc - - cubed_sphere => gridstruct%cubed_sphere - latlon => gridstruct%latlon + !Compute p, z, T on both the staggered and unstaggered grids. Then compute the zonal + ! and meridional winds on both grids, and rotate as needed - have_south_pole => gridstruct%have_south_pole - have_north_pole => gridstruct%have_north_pole + !Save r for easy use + do j=js,je + do i=is,ie + rc(i,j) = great_circle_dist(agrid(i,j,:), ppcenter, radius) + enddo + enddo - ntiles_g => gridstruct%ntiles_g - acapN => gridstruct%acapN - acapS => gridstruct%acapS - globalarea => gridstruct%globalarea + !PS + do j=js,je + do i=is,ie + ps(i,j) = pb - dp*exp( -sqrt((rc(i,j)/rp)**3) ) + enddo + enddo - if (cubed_sphere) then - - call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,im,jm,ng) - if (.not. nested) call fill_corners(ua, va, im, jm, VECTOR=.true., AGRID=.true.) - call atoc(ua,va,uc,vc,dx,dy,dxa,dya,im,jm,ng, nested, domain, noComm=.true.) - if (.not. nested) call fill_corners(uc, vc, im, jm, VECTOR=.true., CGRID=.true.) - - else ! Lat-Lon - - im2 = im/2 - -! Set loop limits - - js1g1 = jfirst-1 - js2g1 = jfirst-1 - js2g2 = jfirst-2 - js2gc = jfirst-ng - js2gcp1 = jfirst-ng-1 - js2gd = jfirst-ng - jn1g1 = jlast+1 - jn1g2 = jlast+2 - jn2gc = jlast+ng - jn2gd = jlast+ng-1 - jn2gsp1 = jlast+ng-1 - - if (have_south_pole) then - js1g1 = 1 - js2g1 = 2 - js2g2 = 2 - js2gc = 2 - js2gcp1 = 2 ! NG-1 latitudes on S (starting at 2) - js2gd = 2 - endif - if (have_north_pole) then - jn1g1 = jm - jn1g2 = jm - jn2gc = jm-1 ! NG latitudes on N (ending at jm-1) - jn2gd = jm-1 - jn2gsp1 = jm-1 - endif -! -! Treat the special case of ng = 1 -! - if ( ng == 1 .AND. ng > 1 ) THEN - js2gc1 = js2gc - else - js2gc1 = jfirst-ng+1 - if (have_south_pole) js2gc1 = 2 ! NG-1 latitudes on S (starting at 2) - endif + !delp + do k=1,npz + do j=js,je + do i=is,ie + delp(i,j,k) = ak(k+1)-ak(k) + ps(i,j)*(bk(k+1)-bk(k)) + enddo + enddo + enddo - do k=1,km + !Pressure variables + do j=js,je + do i=is,ie + pe(i,1,j) = ptop + enddo + do i=is,ie + peln(i,1,j) = log(ptop) + pk(i,j,1) = ptop**kappa + enddo + do k=2,npz+1 + do i=is,ie + pe(i,k,j) = ak(k) + ps (i,j)*bk(k) + enddo + do i=is,ie + pk(i,j,k) = exp(kappa*log(pe(i,k,j))) + peln(i,k,j) = log(pe(i,k,j)) + enddo + enddo + enddo - if ((have_south_pole) .or. (have_north_pole)) then -! Get D-grid V-wind at the poles. - call vpol5(u(1:im,:), v(1:im,:), im, jm, & - coslon, sinlon, cosl5, sinl5, ng, ng, jfirst, jlast ) - call mp_ghost_ew(im,jm,1,1, ifirst,ilast, jfirst,jlast, 1,1, ng,ng, ng,ng, v(:,:)) - endif + do k=1,npz + do j=js,je + do i=is,ie + pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(kappa*(peln(i,k+1,j)-peln(i,k,j))) + enddo + enddo + enddo - call dtoa(u, v, ua, va, dx,dy,dxa,dya,dxc,dyc,im, jm, ng) - if (.not. nested) call fill_corners(ua, va, im, jm, VECTOR=.true., AGRID=.true.) + !Height: Use Newton's method + !Cell centered + do j=js,je + do i=is,ie + phis(i,j) = 0. + gz(i,j,npz+1) = 0. + enddo + enddo + do k=npz,1,-1 + do j=js,je + do i=is,ie + p = pe(i,k,j) + z = gz(i,j,k+1) + do iter=1,30 + ziter = z + piter = DCMIP16_TC_pressure(ziter,rc(i,j)) + titer = DCMIP16_TC_temperature(ziter,rc(i,j)) + z = ziter + (piter - p)*rdgrav*titer/piter +!!$ !!! DEBUG CODE +!!$ if (is_master() .and. i == is .and. j == js) then +!!$ write(*,'(A,I,2x,I, 4(2x,F10.3), 2x, F7.3)') ' NEWTON: ' , k, iter, piter, p, ziter, z, titer +!!$ endif +!!$ !!! END DEBUG CODE + if (abs(z - ziter) < zconv) exit + enddo + gz(i,j,k) = z + enddo + enddo + enddo - if ( have_south_pole ) then -! Projection at SP - us = 0. - vs = 0. - do i=1,im2 - us = us + (ua(i+im2,2)-ua(i,2))*sinlon(i,2) & - + (va(i,2)-va(i+im2,2))*coslon(i,2) - vs = vs + (ua(i+im2,2)-ua(i,2))*coslon(i,2) & - + (va(i+im2,2)-va(i,2))*sinlon(i,2) - enddo - us = us/im - vs = vs/im -! SP - do i=1,im2 - ua(i,1) = -us*sinlon(i,1) - vs*coslon(i,1) - va(i,1) = us*coslon(i,1) - vs*sinlon(i,1) - ua(i+im2,1) = -ua(i,1) - va(i+im2,1) = -va(i,1) - enddo - ua(0 ,1) = ua(im,1) - ua(im+1,1) = ua(1 ,1) - va(im+1,1) = va(1 ,1) - endif + !Temperature: Compute from hydro balance + do k=1,npz + do j=js,je + do i=is,ie + pt(i,j,k) = rrdgrav * ( gz(i,j,k) - gz(i,j,k+1) ) / ( peln(i,k+1,j) - peln(i,k,j)) + enddo + enddo + enddo - if ( have_north_pole ) then -! Projection at NP - un = 0. - vn = 0. - j = jm-1 - do i=1,im2 - un = un + (ua(i+im2,j)-ua(i,j))*sinlon(i,j) & - + (va(i+im2,j)-va(i,j))*coslon(i,j) - vn = vn + (ua(i,j)-ua(i+im2,j))*coslon(i,j) & - + (va(i+im2,j)-va(i,j))*sinlon(i,j) - enddo - un = un/im - vn = vn/im -! NP - do i=1,im2 - ua(i,jm) = -un*sinlon(i,jm) + vn*coslon(i,jm) - va(i,jm) = -un*coslon(i,jm) - vn*sinlon(i,jm) - ua(i+im2,jm) = -ua(i,jm) - va(i+im2,jm) = -va(i,jm) - enddo - ua(0 ,jm) = ua(im,jm) - ua(im+1,jm) = ua(1 ,jm) - va(im+1,jm) = va(1 ,jm) - endif + !Compute height and temperature for u and v points also, to be able to compute the local winds + !Use temporary 2d arrays for this purpose + do j=js,je+1 + do i=is,ie + call mid_pt_sphere(grid(i,j,:),grid(i+1,j,:),pa) + lat_u(i,j) = pa(2) + lon_u(i,j) = pa(1) + call get_unit_vect2(grid(i,j,:),grid(i+1,j,:),e1) + call get_latlon_vector(pa,ex,ey) + u1(i,j) = inner_prod(e1,ex) !u components + u2(i,j) = inner_prod(e1,ey) + rc_u(i,j) = great_circle_dist(pa, ppcenter, radius) + gz_u(i,j) = 0. + p_u(i,j) = pb - dp*exp( -sqrt((rc_u(i,j)/rp)**3) ) + peln_u(i,j) = log(p_u(i,j)) + ps_u(i,j) = p_u(i,j) + enddo + enddo + do k=npz,1,-1 + do j=js,je+1 + do i=is,ie + !Pressure (Top of interface) + p = ak(k) + ps_u(i,j)*bk(k) + pl = log(p) + !Height (top of interface); use newton's method + z = gz_u(i,j) !first guess, height of lower level + z0 = z + do iter=1,30 + ziter = z + piter = DCMIP16_TC_pressure(ziter,rc_u(i,j)) + titer = DCMIP16_TC_temperature(ziter,rc_u(i,j)) + z = ziter + (piter - p)*rdgrav*titer/piter + if (abs(z - ziter) < zconv) exit + enddo + !Now compute winds + call DCMIP16_TC_uwind_pert(0.5*(z+z0),rc_u(i,j),lon_u(i,j),lat_u(i,j), uu, vv) + u(i,j,k) = u1(i,j)*uu + u2(i,j)*vv - if (latlon) call mp_ghost_ew(im,jm,1,1, ifirst,ilast, jfirst,jlast, 1,1, ng,ng, ng,ng, ua(:,:)) - if (latlon) call mp_ghost_ew(im,jm,1,1, ifirst,ilast, jfirst,jlast, 1,1, ng,ng, ng,ng, va(:,:)) + gz_u(i,j) = z + p_u(i,j) = p + peln_u(i,j) = pl + enddo + enddo + enddo -! A -> C - call atoc(ua, va, uc, vc, dx,dy,dxa,dya,im, jm, ng, nested, domain, noComm=.true.) + do j=js,je + do i=is,ie+1 + call mid_pt_sphere(grid(i,j,:),grid(i,j+1,:),pa) + lat_v(i,j) = pa(2) + lon_v(i,j) = pa(1) + call get_unit_vect2(grid(i,j,:),grid(i,j+1,:),e2) + call get_latlon_vector(pa,ex,ey) + v1(i,j) = inner_prod(e2,ex) !v components + v2(i,j) = inner_prod(e2,ey) + rc_v(i,j) = great_circle_dist(pa, ppcenter, radius) + gz_v(i,j) = 0. + p_v(i,j) = pb - dp*exp( - sqrt((rc_v(i,j)/rp)**3) ) + peln_v(i,j) = log(p_v(i,j)) + ps_v(i,j) = p_v(i,j) + enddo + enddo + do k=npz,1,-1 + do j=js,je + do i=is,ie+1 + !Pressure (Top of interface) + p = ak(k) + ps_v(i,j)*bk(k) + pl = log(p) + !Height (top of interface); use newton's method + z = gz_v(i,j) !first guess, height of lower level + z0 = z + do iter=1,30 + ziter = z + piter = DCMIP16_TC_pressure(ziter,rc_v(i,j)) + titer = DCMIP16_TC_temperature(ziter,rc_v(i,j)) + z = ziter + (piter - p)*rdgrav*titer/piter + if (abs(z - ziter) < zconv) exit + enddo + !Now compute winds + call DCMIP16_TC_uwind_pert(0.5*(z+z0),rc_v(i,j),lon_v(i,j),lat_v(i,j), uu, vv) + v(i,j,k) = v1(i,j)*uu + v2(i,j)*vv + gz_v(i,j) = z + p_v(i,j) = p + peln_v(i,j) = pl + enddo + enddo + enddo - enddo ! km loop + !Compute moisture and other tracer fields, as desired + do n=1,nq + do k=1,npz + do j=jsd,jed + do i=isd,ied + q(i,j,k,n) = 0. + enddo + enddo + enddo + enddo + if (.not. adiabatic) then + sphum = get_tracer_index (MODEL_ATMOS, 'sphum') + do k=1,npz + do j=js,je + do i=is,ie + z = 0.5*(gz(i,j,k) + gz(i,j,k+1)) + q(i,j,k,sphum) = DCMIP16_TC_sphum(z) + !Convert pt to non-virtual temperature + pt(i,j,k) = pt(i,j,k) / ( 1. + zvir*q(i,j,k,sphum)) + enddo + enddo + enddo + endif - if (.not. nested) call fill_corners(uc, vc, im, jm, VECTOR=.true., CGRID=.true.) + !Compute nonhydrostatic variables, if needed + if (.not. hydrostatic) then + do k=1,npz + do j=js,je + do i=is,ie + w(i,j,k) = 0. + delz(i,j,k) = gz(i,j,k) - gz(i,j,k+1) + enddo + enddo + enddo endif + contains - end subroutine d2a2c + !Initialize with virtual temperature + real function DCMIP16_TC_temperature(z, r) + real, intent(IN) :: z, r + real :: Tv, term1, term2 - subroutine atob_s(qin, qout, npx, npy, dxa, dya, nested, cubed_sphere, altInterp) + if (z > zt) then + DCMIP16_TC_temperature = Tvt + return + endif -! atob_s :: interpolate scalar from the A-Grid to the B-grid -! - integer, intent(IN) :: npx, npy - real , intent(IN) :: qin(isd:ied ,jsd:jed ) ! A-grid field - real , intent(OUT) :: qout(isd:ied+1,jsd:jed+1) ! Output B-grid field - integer, OPTIONAL, intent(IN) :: altInterp - logical, intent(IN) :: nested, cubed_sphere - real, intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya + Tv = Tv0 - lapse*z + term1 = grav*zp*zp* ( 1. - pb/dp * exp( sqrt(r/rp)**3 + (z/zp)**2 ) ) + term2 = 2*rdgas*Tv*z + DCMIP16_TC_temperature = Tv + Tv*( 1./(1 + term2/term1) - 1.) - integer :: i,j,n + end function DCMIP16_TC_temperature - real :: tmp1j(jsd:jed+1) - real :: tmp2j(jsd:jed+1) - real :: tmp3j(jsd:jed+1) - real :: tmp1i(isd:ied+1) - real :: tmp2i(isd:ied+1) - real :: tmp3i(isd:ied+1) - real :: tmpq(isd:ied ,jsd:jed ) - real :: tmpq1(isd:ied+1,jsd:jed+1) - real :: tmpq2(isd:ied+1,jsd:jed+1) + !Initialize with moist air mass + real function DCMIP16_TC_pressure(z, r) - if (present(altInterp)) then + real, intent(IN) :: z, r - tmpq(:,:) = qin(:,:) + if (z <= zt) then + DCMIP16_TC_pressure = pb*exp(grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) -dp* exp(-sqrt((r/rp)**3) - (z/zp)**2) * & + exp( grav/(Rdgas*lapse) * log( (Tv0-lapse*z)/Tv0) ) + else + DCMIP16_TC_pressure = ptt*exp(grav*(zt-z)/(Rdgas*Tvt)) + endif - if (.not. nested) call fill_corners(tmpq , npx, npy, FILL=XDir, AGRID=.true.) -! ATOC - do j=jsd,jed - call interp_left_edge_1d(tmpq1(:,j), tmpq(:,j), dxa(:,j), isd, ied, altInterp) - enddo + end function DCMIP16_TC_pressure - if (.not. nested) call fill_corners(tmpq , npx, npy, FILL=YDir, AGRID=.true.) -! ATOD - do i=isd,ied - tmp1j(jsd:jed) = 0.0 - tmp2j(jsd:jed) = tmpq(i,jsd:jed) - tmp3j(jsd:jed) = dya(i,jsd:jed) - call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed, altInterp) - tmpq2(i,jsd:jed) = tmp1j(jsd:jed) - enddo + subroutine DCMIP16_TC_uwind_pert(z,r,lon,lat,uu,vv) -! CTOB - do i=isd,ied - tmp1j(:) = tmpq1(i,:) - tmp2j(:) = tmpq1(i,:) - tmp3j(:) = 1.0 ! Uniform Weighting missing first value so will not reproduce - call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, altInterp) - tmpq1(i,:) = tmp1j(:) - enddo + real, intent(IN) :: z, r + real(kind=R_GRID), intent(IN) :: lon, lat + real, intent(OUT) :: uu, vv + real :: rfac, Tvrd, vt, fr5, d1, d2, d + real(kind=R_GRID) :: dst, pphere(2) -! DTOB - do j=jsd,jed - tmp1i(:) = tmpq2(:,j) - tmp2i(:) = tmpq2(:,j) - tmp3i(:) = 1.0 ! Uniform Weighting missing first value so will not reproduce - call interp_left_edge_1d(tmp1i, tmp2i, tmp3i, isd, ied+1, altInterp) - tmpq2(:,j) = tmp1i(:) - enddo + if (z > zt) then + uu = 0. + vv = 0. + return + endif -! Average - do j=jsd,jed+1 - do i=isd,ied+1 - qout(i,j) = 0.5 * (tmpq1(i,j) + tmpq2(i,j)) - enddo - enddo + rfac = sqrt(r/rp)**3 -! Fix Corners - if (cubed_sphere .and. .not. nested) then - i=1 - j=1 - if ( (is==i) .and. (js==j) ) then - qout(i,j) = (1./3.) * (qin(i,j) + qin(i-1,j) + qin(i,j-1)) - endif + fr5 = 0.5*fc*r + Tvrd = (Tv0 - lapse*z)*Rdgas - i=npx - j=1 - if ( (ie+1==i) .and. (js==j) ) then - qout(i,j) = (1./3.) * (qin(i-1,j) + qin(i-1,j-1) + qin(i,j)) - endif + vt = -fr5 + sqrt( fr5**2 - (1.5 * rfac * Tvrd) / & + ( 1. + 2*Tvrd*z/(grav*zp**2) - pb/dp*exp( rfac + (z/zp)**2) ) ) - i=1 - j=npy - if ( (is==i) .and. (je+1==j) ) then - qout(i,j) = (1./3.) * (qin(i,j-1) + qin(i-1,j-1) + qin(i,j)) - endif + d1 = sin(phip)*cos(lat) - cos(phip)*sin(lat)*cos(lon - lamp) + d2 = cos(phip)*sin(lon - lamp) + d = max(1.e-25,sqrt(d1*d1 + d2*d2)) - i=npx - j=npy - if ( (ie+1==i) .and. (je+1==j) ) then - qout(i,j) = (1./3.) * (qin(i-1,j-1) + qin(i,j-1) + qin(i-1,j)) - endif - endif + uu = vt * d1/d + vv = vt * d2/d - else ! altInterp + end subroutine DCMIP16_TC_uwind_pert - do j=js,je+1 - do i=is,ie+1 - qout(i,j) = 0.25 * (qin(i-1,j) + qin(i-1,j-1) + & - qin(i ,j) + qin(i ,j-1)) - enddo - enddo + real function DCMIP16_TC_sphum(z) - if (.not. nested) then - i=1 - j=1 - if ( (is==i) .and. (js==j) ) then - qout(i,j) = (1./3.) * (qin(i,j) + qin(i-1,j) + qin(i,j-1)) - endif + real, intent(IN) :: z - i=npx - j=1 - if ( (ie+1==i) .and. (js==j) ) then - qout(i,j) = (1./3.) * (qin(i-1,j) + qin(i-1,j-1) + qin(i,j)) - endif + DCMIP16_TC_sphum = qt + if (z < zt) then + DCMIP16_TC_sphum = q0 * exp(-z/zq1) * exp(-(z/zq2 )**2) + endif - i=1 - j=npy - if ( (is==i) .and. (je+1==j) ) then - qout(i,j) = (1./3.) * (qin(i,j-1) + qin(i-1,j-1) + qin(i,j)) - endif + end function DCMIP16_TC_sphum - i=npx - j=npy - if ( (ie+1==i) .and. (je+1==j) ) then - qout(i,j) = (1./3.) * (qin(i-1,j-1) + qin(i,j-1) + qin(i-1,j)) - endif - endif !not nested + end subroutine DCMIP16_TC + +!!$ subroutine init_latlon(u,v,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, ua,va, ak, bk, & +!!$ gridstruct, npx, npy, npz, ng, ncnst, ndims, nregions, dry_mass, & +!!$ mountain, moist_phys, hybrid_z, delz, ze0, domain_in, tile_in, bd) +!!$ +!!$ real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) +!!$ real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(INOUT) :: pt(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: delp(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: q(isd:ied ,jsd:jed ,npz, ncnst) +!!$ +!!$ real , intent(INOUT) :: phis(isd:ied ,jsd:jed ) +!!$ +!!$ real , intent(INOUT) :: ps(isd:ied ,jsd:jed ) +!!$ real , intent(INOUT) :: pe(is-1:ie+1,npz+1,js-1:je+1) +!!$ real , intent(INOUT) :: pk(is:ie ,js:je ,npz+1) +!!$ real , intent(INOUT) :: peln(is :ie ,npz+1 ,js:je) +!!$ real , intent(INOUT) :: pkz(is:ie ,js:je ,npz ) +!!$ real , intent(INOUT) :: uc(isd:ied+1,jsd:jed ,npz) +!!$ real , intent(INOUT) :: vc(isd:ied ,jsd:jed+1,npz) +!!$ real , intent(INOUT) :: ua(isd:ied ,jsd:jed ,npz) +!!$ real , intent(INOUT) :: va(isd:ied ,jsd:jed ,npz) +!!$ real , intent(inout) :: delz(is:,js:,1:) +!!$ real , intent(inout) :: ze0(is:,js:,1:) +!!$ +!!$ real , intent(IN) :: ak(npz+1) +!!$ real , intent(IN) :: bk(npz+1) +!!$ +!!$ integer, intent(IN) :: npx, npy, npz +!!$ integer, intent(IN) :: ng, ncnst +!!$ integer, intent(IN) :: ndims +!!$ integer, intent(IN) :: nregions +!!$ integer,target,intent(IN):: tile_in +!!$ +!!$ real, intent(IN) :: dry_mass +!!$ logical, intent(IN) :: mountain +!!$ logical, intent(IN) :: moist_phys +!!$ logical, intent(IN) :: hybrid_z +!!$ +!!$ type(fv_grid_type), intent(IN), target :: gridstruct +!!$ type(domain2d), intent(IN), target :: domain_in +!!$ +!!$ real, pointer, dimension(:,:,:) :: agrid, grid +!!$ real, pointer, dimension(:,:) :: area, rarea, fC, f0 +!!$ real, pointer, dimension(:,:,:) :: ee1, ee2, en1, en2 +!!$ real, pointer, dimension(:,:,:,:) :: ew, es +!!$ real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc +!!$ +!!$ logical, pointer :: cubed_sphere, latlon +!!$ +!!$ type(domain2d), pointer :: domain +!!$ integer, pointer :: tile +!!$ +!!$ logical, pointer :: have_south_pole, have_north_pole +!!$ +!!$ integer, pointer :: ntiles_g +!!$ real, pointer :: acapN, acapS, globalarea +!!$ +!!$ real(kind=R_GRID) :: p1(2), p2(2) +!!$ real :: r, r0 +!!$ integer :: i,j +!!$ +!!$ agrid => gridstruct%agrid +!!$ grid => gridstruct%grid +!!$ +!!$ area => gridstruct%area +!!$ +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxa => gridstruct%dxa +!!$ dya => gridstruct%dya +!!$ rdxa => gridstruct%rdxa +!!$ rdya => gridstruct%rdya +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ fC => gridstruct%fC +!!$ f0 => gridstruct%f0 +!!$ +!!$ ntiles_g => gridstruct%ntiles_g +!!$ acapN => gridstruct%acapN +!!$ acapS => gridstruct%acapS +!!$ globalarea => gridstruct%globalarea +!!$ +!!$ domain => domain_in +!!$ tile => tile_in +!!$ +!!$ have_south_pole => gridstruct%have_south_pole +!!$ have_north_pole => gridstruct%have_north_pole +!!$ +!!$ do j=jsd,jed+1 +!!$ do i=isd,ied+1 +!!$ fc(i,j) = 2.*omega*( -cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) & +!!$ +sin(grid(i,j,2))*cos(alpha) ) +!!$ enddo +!!$ enddo +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ f0(i,j) = 2.*omega*( -cos(agrid(i,j,1))*cos(agrid(i,j,2))*sin(alpha) & +!!$ +sin(agrid(i,j,2))*cos(alpha) ) +!!$ enddo +!!$ enddo +!!$ +!!$ select case (test_case) +!!$ case ( 1 ) +!!$ +!!$ Ubar = (2.0*pi*radius)/(12.0*86400.0) +!!$ phis = 0.0 +!!$ r0 = radius/3. !RADIUS radius/3. +!!$ p1(1) = 0. +!!$ p1(1) = pi/2. + pi_shift +!!$ p1(2) = 0. +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ p2(1) = agrid(i,j,1) +!!$ p2(2) = agrid(i,j,2) +!!$ r = great_circle_dist( p1, p2, radius ) +!!$ if (r < r0) then +!!$ delp(i,j,1) = phis(i,j) + 0.5*(1.0+cos(PI*r/r0)) +!!$ else +!!$ delp(i,j,1) = phis(i,j) +!!$ endif +!!$ enddo +!!$ enddo +!!$ call init_latlon_winds(UBar, u, v, ua, va, uc, vc, 1, gridstruct) +!!$ +!!$ +!!$ +!!$ end select +!!$ +!!$ nullify(grid) +!!$ nullify(agrid) +!!$ +!!$ nullify(area) +!!$ +!!$ nullify(fC) +!!$ nullify(f0) +!!$ +!!$ nullify(dx) +!!$ nullify(dy) +!!$ nullify(dxa) +!!$ nullify(dya) +!!$ nullify(rdxa) +!!$ nullify(rdya) +!!$ nullify(dxc) +!!$ nullify(dyc) +!!$ +!!$ nullify(domain) +!!$ nullify(tile) +!!$ +!!$ nullify(have_south_pole) +!!$ nullify(have_north_pole) +!!$ +!!$ nullify(ntiles_g) +!!$ nullify(acapN) +!!$ nullify(acapS) +!!$ nullify(globalarea) +!!$ +!!$ end subroutine init_latlon +!!$ +!!$ subroutine init_latlon_winds(UBar, u, v, ua, va, uc, vc, defOnGrid, gridstruct) +!!$ +!!$ ! defOnGrid = -1:null_op, 0:All-Grids, 1:C-Grid, 2:D-Grid, 3:A-Grid, 4:A-Grid then Rotate, 5:D-Grid with unit vectors then Rotate +!!$ +!!$ real, intent(INOUT) :: UBar +!!$ real, intent(INOUT) :: u(isd:ied ,jsd:jed+1) +!!$ real, intent(INOUT) :: v(isd:ied+1,jsd:jed ) +!!$ real, intent(INOUT) :: uc(isd:ied+1,jsd:jed ) +!!$ real, intent(INOUT) :: vc(isd:ied ,jsd:jed+1) +!!$ real, intent(INOUT) :: ua(isd:ied ,jsd:jed ) +!!$ real, intent(INOUT) :: va(isd:ied ,jsd:jed ) +!!$ integer, intent(IN) :: defOnGrid +!!$ type(fv_grid_type), intent(IN), target :: gridstruct +!!$ +!!$ real :: p1(2),p2(2),p3(2),p4(2), pt(2) +!!$ real :: e1(3), e2(3), ex(3), ey(3) +!!$ +!!$ real :: dist, r, r0 +!!$ integer :: i,j,k,n +!!$ real :: utmp, vtmp +!!$ +!!$ real :: psi_b(isd:ied+1,jsd:jed+1), psi(isd:ied,jsd:jed), psi1, psi2 +!!$ +!!$ real, dimension(:,:,:), pointer :: grid, agrid +!!$ real, dimension(:,:), pointer :: area, dx, dy, dxc, dyc +!!$ +!!$ grid => gridstruct%grid +!!$ agrid=> gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ psi(:,:) = 1.e25 +!!$ psi_b(:,:) = 1.e25 +!!$ do j=jsd,jed +!!$ do i=isd,ied +!!$ psi(i,j) = (-1.0 * Ubar * radius *( sin(agrid(i,j,2)) *cos(alpha) - & +!!$ cos(agrid(i,j,1))*cos(agrid(i,j,2))*sin(alpha) ) ) +!!$ enddo +!!$ enddo +!!$ do j=jsd,jed+1 +!!$ do i=isd,ied+1 +!!$ psi_b(i,j) = (-1.0 * Ubar * radius *( sin(grid(i,j,2)) *cos(alpha) - & +!!$ cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) ) ) +!!$ enddo +!!$ enddo +!!$ +!!$ if ( defOnGrid == 1 ) then +!!$ do j=jsd,jed+1 +!!$ do i=isd,ied +!!$ dist = dx(i,j) +!!$ vc(i,j) = (psi_b(i+1,j)-psi_b(i,j))/dist +!!$ if (dist==0) vc(i,j) = 0. +!!$ enddo +!!$ enddo +!!$ do j=jsd,jed +!!$ do i=isd,ied+1 +!!$ dist = dy(i,j) +!!$ uc(i,j) = -1.0*(psi_b(i,j+1)-psi_b(i,j))/dist +!!$ if (dist==0) uc(i,j) = 0. +!!$ enddo +!!$ enddo +!!$ +!!$ +!!$ do j=js,je +!!$ do i=is,ie+1 +!!$ dist = dxc(i,j) +!!$ v(i,j) = (psi(i,j)-psi(i-1,j))/dist +!!$ if (dist==0) v(i,j) = 0. +!!$ enddo +!!$ enddo +!!$ do j=js,je+1 +!!$ do i=is,ie +!!$ dist = dyc(i,j) +!!$ u(i,j) = -1.0*(psi(i,j)-psi(i,j-1))/dist +!!$ if (dist==0) u(i,j) = 0. +!!$ enddo +!!$ enddo +!!$ endif +!!$ +!!$ end subroutine init_latlon_winds - endif ! altInterp +!!$ subroutine d2a2c(im,jm,km, ifirst,ilast, jfirst,jlast, ng, bounded_domain, & +!!$ u,v, ua,va, uc,vc, gridstruct, domain, bd) +!!$ +!!$! Input +!!$ integer, intent(IN) :: im,jm,km +!!$ integer, intent(IN) :: ifirst,ilast +!!$ integer, intent(IN) :: jfirst,jlast +!!$ integer, intent(IN) :: ng +!!$ logical, intent(IN) :: bounded_domain +!!$ type(fv_grid_type), intent(IN), target :: gridstruct +!!$ type(domain2d), intent(INOUT) :: domain +!!$ +!!$ !real , intent(in) :: sinlon(im,jm) +!!$ !real , intent(in) :: coslon(im,jm) +!!$ !real , intent(in) :: sinl5(im,jm) +!!$ !real , intent(in) :: cosl5(im,jm) +!!$ +!!$! Output +!!$ ! real , intent(inout) :: u(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) +!!$ ! real , intent(inout) :: v(ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) +!!$ ! real , intent(inout) :: ua(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) +!!$ ! real , intent(inout) :: va(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) +!!$ ! real , intent(inout) :: uc(ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) +!!$ ! real , intent(inout) :: vc(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) +!!$ +!!$ real , intent(inout) :: u(isd:ied,jsd:jed+1) !ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) +!!$ real , intent(inout) :: v(isd:ied+1,jsd:jed) !ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) +!!$ real , intent(inout) :: ua(isd:ied,jsd:jed) !ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) +!!$ real , intent(inout) :: va(isd:ied,jsd:jed) !(ifirst-ng:ilast+ng,jfirst-ng:jlast+ng) +!!$ real , intent(inout) :: uc(isd:ied+1,jsd:jed) !(ifirst-ng:ilast+1+ng,jfirst-ng:jlast+ng) +!!$ real , intent(inout) :: vc(isd:ied,jsd:jed+1) !(ifirst-ng:ilast+ng,jfirst-ng:jlast+1+ng) +!!$ +!!$!-------------------------------------------------------------- +!!$! Local +!!$ +!!$ real :: sinlon(im,jm) +!!$ real :: coslon(im,jm) +!!$ real :: sinl5(im,jm) +!!$ real :: cosl5(im,jm) +!!$ +!!$ real :: tmp1(jsd:jed+1) +!!$ real :: tmp2(jsd:jed) +!!$ real :: tmp3(jsd:jed) +!!$ +!!$ real mag,mag1,mag2, ang,ang1,ang2 +!!$ real us, vs, un, vn +!!$ integer i, j, k, im2 +!!$ integer js1g1 +!!$ integer js2g1 +!!$ integer js2g2 +!!$ integer js2gc +!!$ integer js2gc1 +!!$ integer js2gcp1 +!!$ integer js2gd +!!$ integer jn2gc +!!$ integer jn1g1 +!!$ integer jn1g2 +!!$ integer jn2gd +!!$ integer jn2gsp1 +!!$ +!!$ real, pointer, dimension(:,:,:) :: agrid, grid +!!$ real, pointer, dimension(:,:) :: area, rarea, fC, f0 +!!$ real(kind=R_GRID), pointer, dimension(:,:,:) :: ee1, ee2, en1, en2 +!!$ real(kind=R_GRID), pointer, dimension(:,:,:,:) :: ew, es +!!$ real, pointer, dimension(:,:) :: dx,dy, dxa,dya, rdxa, rdya, dxc,dyc +!!$ +!!$ logical, pointer :: cubed_sphere, latlon +!!$ +!!$ logical, pointer :: have_south_pole, have_north_pole +!!$ +!!$ integer, pointer :: ntiles_g +!!$ real, pointer :: acapN, acapS, globalarea +!!$ +!!$ grid => gridstruct%grid +!!$ agrid=> gridstruct%agrid +!!$ +!!$ area => gridstruct%area +!!$ rarea => gridstruct%rarea +!!$ +!!$ fC => gridstruct%fC +!!$ f0 => gridstruct%f0 +!!$ +!!$ ee1 => gridstruct%ee1 +!!$ ee2 => gridstruct%ee2 +!!$ ew => gridstruct%ew +!!$ es => gridstruct%es +!!$ en1 => gridstruct%en1 +!!$ en2 => gridstruct%en2 +!!$ +!!$ dx => gridstruct%dx +!!$ dy => gridstruct%dy +!!$ dxa => gridstruct%dxa +!!$ dya => gridstruct%dya +!!$ rdxa => gridstruct%rdxa +!!$ rdya => gridstruct%rdya +!!$ dxc => gridstruct%dxc +!!$ dyc => gridstruct%dyc +!!$ +!!$ cubed_sphere => gridstruct%cubed_sphere +!!$ latlon => gridstruct%latlon +!!$ +!!$ have_south_pole => gridstruct%have_south_pole +!!$ have_north_pole => gridstruct%have_north_pole +!!$ +!!$ ntiles_g => gridstruct%ntiles_g +!!$ acapN => gridstruct%acapN +!!$ acapS => gridstruct%acapS +!!$ globalarea => gridstruct%globalarea +!!$ +!!$ if (cubed_sphere) then +!!$ +!!$ call dtoa( u, v,ua,va,dx,dy,dxa,dya,dxc,dyc,im,jm,ng) +!!$ if (.not. bounded_domain) call fill_corners(ua, va, im, jm, VECTOR=.true., AGRID=.true.) +!!$ call atoc(ua,va,uc,vc,dx,dy,dxa,dya,im,jm,ng, bounded_domain, domain, noComm=.true.) +!!$ if (.not. bounded_domain) call fill_corners(uc, vc, im, jm, VECTOR=.true., CGRID=.true.) +!!$ +!!$ else ! Lat-Lon +!!$ +!!$ im2 = im/2 +!!$ +!!$! Set loop limits +!!$ +!!$ js1g1 = jfirst-1 +!!$ js2g1 = jfirst-1 +!!$ js2g2 = jfirst-2 +!!$ js2gc = jfirst-ng +!!$ js2gcp1 = jfirst-ng-1 +!!$ js2gd = jfirst-ng +!!$ jn1g1 = jlast+1 +!!$ jn1g2 = jlast+2 +!!$ jn2gc = jlast+ng +!!$ jn2gd = jlast+ng-1 +!!$ jn2gsp1 = jlast+ng-1 +!!$ +!!$ if (have_south_pole) then +!!$ js1g1 = 1 +!!$ js2g1 = 2 +!!$ js2g2 = 2 +!!$ js2gc = 2 +!!$ js2gcp1 = 2 ! NG-1 latitudes on S (starting at 2) +!!$ js2gd = 2 +!!$ endif +!!$ if (have_north_pole) then +!!$ jn1g1 = jm +!!$ jn1g2 = jm +!!$ jn2gc = jm-1 ! NG latitudes on N (ending at jm-1) +!!$ jn2gd = jm-1 +!!$ jn2gsp1 = jm-1 +!!$ endif +!!$! +!!$! Treat the special case of ng = 1 +!!$! +!!$ if ( ng == 1 .AND. ng > 1 ) THEN +!!$ js2gc1 = js2gc +!!$ else +!!$ js2gc1 = jfirst-ng+1 +!!$ if (have_south_pole) js2gc1 = 2 ! NG-1 latitudes on S (starting at 2) +!!$ endif +!!$ +!!$ do k=1,km +!!$ +!!$ if ((have_south_pole) .or. (have_north_pole)) then +!!$! Get D-grid V-wind at the poles. +!!$ call vpol5(u(1:im,:), v(1:im,:), im, jm, & +!!$ coslon, sinlon, cosl5, sinl5, ng, ng, jfirst, jlast ) +!!$ call mp_ghost_ew(im,jm,1,1, ifirst,ilast, jfirst,jlast, 1,1, ng,ng, ng,ng, v(:,:)) +!!$ endif +!!$ +!!$ call dtoa(u, v, ua, va, dx,dy,dxa,dya,dxc,dyc,im, jm, ng) +!!$ if (.not. bounded_domain) call fill_corners(ua, va, im, jm, VECTOR=.true., AGRID=.true.) +!!$ +!!$ if ( have_south_pole ) then +!!$! Projection at SP +!!$ us = 0. +!!$ vs = 0. +!!$ do i=1,im2 +!!$ us = us + (ua(i+im2,2)-ua(i,2))*sinlon(i,2) & +!!$ + (va(i,2)-va(i+im2,2))*coslon(i,2) +!!$ vs = vs + (ua(i+im2,2)-ua(i,2))*coslon(i,2) & +!!$ + (va(i+im2,2)-va(i,2))*sinlon(i,2) +!!$ enddo +!!$ us = us/im +!!$ vs = vs/im +!!$! SP +!!$ do i=1,im2 +!!$ ua(i,1) = -us*sinlon(i,1) - vs*coslon(i,1) +!!$ va(i,1) = us*coslon(i,1) - vs*sinlon(i,1) +!!$ ua(i+im2,1) = -ua(i,1) +!!$ va(i+im2,1) = -va(i,1) +!!$ enddo +!!$ ua(0 ,1) = ua(im,1) +!!$ ua(im+1,1) = ua(1 ,1) +!!$ va(im+1,1) = va(1 ,1) +!!$ endif +!!$ +!!$ if ( have_north_pole ) then +!!$! Projection at NP +!!$ un = 0. +!!$ vn = 0. +!!$ j = jm-1 +!!$ do i=1,im2 +!!$ un = un + (ua(i+im2,j)-ua(i,j))*sinlon(i,j) & +!!$ + (va(i+im2,j)-va(i,j))*coslon(i,j) +!!$ vn = vn + (ua(i,j)-ua(i+im2,j))*coslon(i,j) & +!!$ + (va(i+im2,j)-va(i,j))*sinlon(i,j) +!!$ enddo +!!$ un = un/im +!!$ vn = vn/im +!!$! NP +!!$ do i=1,im2 +!!$ ua(i,jm) = -un*sinlon(i,jm) + vn*coslon(i,jm) +!!$ va(i,jm) = -un*coslon(i,jm) - vn*sinlon(i,jm) +!!$ ua(i+im2,jm) = -ua(i,jm) +!!$ va(i+im2,jm) = -va(i,jm) +!!$ enddo +!!$ ua(0 ,jm) = ua(im,jm) +!!$ ua(im+1,jm) = ua(1 ,jm) +!!$ va(im+1,jm) = va(1 ,jm) +!!$ endif +!!$ +!!$ if (latlon) call mp_ghost_ew(im,jm,1,1, ifirst,ilast, jfirst,jlast, 1,1, ng,ng, ng,ng, ua(:,:)) +!!$ if (latlon) call mp_ghost_ew(im,jm,1,1, ifirst,ilast, jfirst,jlast, 1,1, ng,ng, ng,ng, va(:,:)) +!!$ +!!$! A -> C +!!$ call atoc(ua, va, uc, vc, dx,dy,dxa,dya,im, jm, ng, bounded_domain, domain, noComm=.true.) +!!$ +!!$ enddo ! km loop +!!$ +!!$ if (.not. bounded_domain) call fill_corners(uc, vc, im, jm, VECTOR=.true., CGRID=.true.) +!!$ endif +!!$ +!!$ +!!$ end subroutine d2a2c +!!$ - end subroutine atob_s -! -! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! -!------------------------------------------------------------------------------- +!!$ subroutine atob_s(qin, qout, npx, npy, dxa, dya, bounded_domain, cubed_sphere, altInterp) +!!$ +!!$! atob_s :: interpolate scalar from the A-Grid to the B-grid +!!$! +!!$ integer, intent(IN) :: npx, npy +!!$ real , intent(IN) :: qin(isd:ied ,jsd:jed ) ! A-grid field +!!$ real , intent(OUT) :: qout(isd:ied+1,jsd:jed+1) ! Output B-grid field +!!$ integer, OPTIONAL, intent(IN) :: altInterp +!!$ logical, intent(IN) :: bounded_domain, cubed_sphere +!!$ real, intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya +!!$ +!!$ integer :: i,j,n +!!$ +!!$ real :: tmp1j(jsd:jed+1) +!!$ real :: tmp2j(jsd:jed+1) +!!$ real :: tmp3j(jsd:jed+1) +!!$ real :: tmp1i(isd:ied+1) +!!$ real :: tmp2i(isd:ied+1) +!!$ real :: tmp3i(isd:ied+1) +!!$ real :: tmpq(isd:ied ,jsd:jed ) +!!$ real :: tmpq1(isd:ied+1,jsd:jed+1) +!!$ real :: tmpq2(isd:ied+1,jsd:jed+1) +!!$ +!!$ if (present(altInterp)) then +!!$ +!!$ tmpq(:,:) = qin(:,:) +!!$ +!!$ if (.not. bounded_domain) call fill_corners(tmpq , npx, npy, FILL=XDir, AGRID=.true.) +!!$! ATOC +!!$ do j=jsd,jed +!!$ call interp_left_edge_1d(tmpq1(:,j), tmpq(:,j), dxa(:,j), isd, ied, altInterp) +!!$ enddo +!!$ +!!$ if (.not. bounded_domain) call fill_corners(tmpq , npx, npy, FILL=YDir, AGRID=.true.) +!!$! ATOD +!!$ do i=isd,ied +!!$ tmp1j(jsd:jed) = 0.0 +!!$ tmp2j(jsd:jed) = tmpq(i,jsd:jed) +!!$ tmp3j(jsd:jed) = dya(i,jsd:jed) +!!$ call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed, altInterp) +!!$ tmpq2(i,jsd:jed) = tmp1j(jsd:jed) +!!$ enddo +!!$ +!!$! CTOB +!!$ do i=isd,ied +!!$ tmp1j(:) = tmpq1(i,:) +!!$ tmp2j(:) = tmpq1(i,:) +!!$ tmp3j(:) = 1.0 ! Uniform Weighting missing first value so will not reproduce +!!$ call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, altInterp) +!!$ tmpq1(i,:) = tmp1j(:) +!!$ enddo +!!$ +!!$! DTOB +!!$ do j=jsd,jed +!!$ tmp1i(:) = tmpq2(:,j) +!!$ tmp2i(:) = tmpq2(:,j) +!!$ tmp3i(:) = 1.0 ! Uniform Weighting missing first value so will not reproduce +!!$ call interp_left_edge_1d(tmp1i, tmp2i, tmp3i, isd, ied+1, altInterp) +!!$ tmpq2(:,j) = tmp1i(:) +!!$ enddo +!!$ +!!$! Average +!!$ do j=jsd,jed+1 +!!$ do i=isd,ied+1 +!!$ qout(i,j) = 0.5 * (tmpq1(i,j) + tmpq2(i,j)) +!!$ enddo +!!$ enddo +!!$ +!!$! Fix Corners +!!$ if (cubed_sphere .and. .not. bounded_domain) then +!!$ i=1 +!!$ j=1 +!!$ if ( (is==i) .and. (js==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i,j) + qin(i-1,j) + qin(i,j-1)) +!!$ endif +!!$ +!!$ i=npx +!!$ j=1 +!!$ if ( (ie+1==i) .and. (js==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i-1,j) + qin(i-1,j-1) + qin(i,j)) +!!$ endif +!!$ +!!$ i=1 +!!$ j=npy +!!$ if ( (is==i) .and. (je+1==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i,j-1) + qin(i-1,j-1) + qin(i,j)) +!!$ endif +!!$ +!!$ i=npx +!!$ j=npy +!!$ if ( (ie+1==i) .and. (je+1==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i-1,j-1) + qin(i,j-1) + qin(i-1,j)) +!!$ endif +!!$ endif +!!$ +!!$ else ! altInterp +!!$ +!!$ do j=js,je+1 +!!$ do i=is,ie+1 +!!$ qout(i,j) = 0.25 * (qin(i-1,j) + qin(i-1,j-1) + & +!!$ qin(i ,j) + qin(i ,j-1)) +!!$ enddo +!!$ enddo +!!$ +!!$ if (.not. bounded_domain) then +!!$ i=1 +!!$ j=1 +!!$ if ( (is==i) .and. (js==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i,j) + qin(i-1,j) + qin(i,j-1)) +!!$ endif +!!$ +!!$ i=npx +!!$ j=1 +!!$ if ( (ie+1==i) .and. (js==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i-1,j) + qin(i-1,j-1) + qin(i,j)) +!!$ endif +!!$ +!!$ i=1 +!!$ j=npy +!!$ if ( (is==i) .and. (je+1==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i,j-1) + qin(i-1,j-1) + qin(i,j)) +!!$ endif +!!$ +!!$ i=npx +!!$ j=npy +!!$ if ( (ie+1==i) .and. (je+1==j) ) then +!!$ qout(i,j) = (1./3.) * (qin(i-1,j-1) + qin(i,j-1) + qin(i-1,j)) +!!$ endif +!!$ endif !not bounded_domain +!!$ +!!$ endif ! altInterp +!!$ +!!$ end subroutine atob_s +!!$! +!!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!!$!------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! ! atod :: interpolate from the A-Grid to the D-grid ! - subroutine atod(uin, vin, uout, vout, dxa, dya, dxc, dyc, npx, npy, ng, nested, domain) - + subroutine atod(uin, vin, uout, vout, dxa, dya, dxc, dyc, npx, npy, ng, bounded_domain, domain, bd) + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: npx, npy, ng - real , intent(IN) :: uin(isd:ied ,jsd:jed ) ! A-grid u-wind field - real , intent(IN) :: vin(isd:ied ,jsd:jed ) ! A-grid v-wind field - real , intent(OUT) :: uout(isd:ied ,jsd:jed+1) ! D-grid u-wind field - real , intent(OUT) :: vout(isd:ied+1,jsd:jed ) ! D-grid v-wind field - logical, intent(IN) :: nested - real , intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya - real , intent(IN), dimension(isd:ied+1,jsd:jed) :: dxc - real , intent(IN), dimension(isd:ied,jsd:jed+1) :: dyc + real , intent(IN) :: uin(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid u-wind field + real , intent(IN) :: vin(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid v-wind field + real , intent(OUT) :: uout(bd%isd:bd%ied ,bd%jsd:bd%jed+1) ! D-grid u-wind field + real , intent(OUT) :: vout(bd%isd:bd%ied+1,bd%jsd:bd%jed ) ! D-grid v-wind field + logical, intent(IN) :: bounded_domain + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: dxa, dya + real , intent(IN), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed) :: dxc + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1) :: dyc type(domain2d), intent(INOUT) :: domain integer :: i,j - real :: tmp1i(isd:ied+1) - real :: tmp2i(isd:ied) - real :: tmp3i(isd:ied) - real :: tmp1j(jsd:jed+1) - real :: tmp2j(jsd:jed) - real :: tmp3j(jsd:jed) + real :: tmp1i(bd%isd:bd%ied+1) + real :: tmp2i(bd%isd:bd%ied) + real :: tmp3i(bd%isd:bd%ied) + real :: tmp1j(bd%jsd:bd%jed+1) + real :: tmp2j(bd%jsd:bd%jed) + real :: tmp3j(bd%jsd:bd%jed) + + integer :: jsd, jed, isd, ied + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed do j=jsd+1,jed tmp1i(:) = 0.0 @@ -8435,8 +8698,8 @@ subroutine atod(uin, vin, uout, vout, dxa, dya, dxc, dyc, npx, npy, ng, nested, call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed, interpOrder) uout(i,:) = tmp1j(:)/dyc(i,:) enddo - call mp_update_dwinds(uout, vout, npx, npy, domain) - if (.not. nested) call fill_corners(uout, vout, npx, npy, VECTOR=.true., DGRID=.true.) + call mp_update_dwinds(uout, vout, npx, npy, domain, bd) + if (.not. bounded_domain) call fill_corners(uout, vout, npx, npy, VECTOR=.true., DGRID=.true.) end subroutine atod ! ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! @@ -8447,25 +8710,38 @@ end subroutine atod ! ! dtoa :: interpolate from the D-Grid to the A-grid ! - subroutine dtoa(uin, vin, uout, vout, dx, dy, dxa, dya, dxc, dyc, npx, npy, ng) + subroutine dtoa(uin, vin, uout, vout, dx, dy, dxa, dya, dxc, dyc, npx, npy, ng, bd) + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: npx, npy, ng - real , intent(IN) :: uin(isd:ied ,jsd:jed+1) ! D-grid u-wind field - real , intent(IN) :: vin(isd:ied+1,jsd:jed ) ! D-grid v-wind field - real , intent(OUT) :: uout(isd:ied ,jsd:jed ) ! A-grid u-wind field - real , intent(OUT) :: vout(isd:ied ,jsd:jed ) ! A-grid v-wind field - real , intent(IN), dimension(isd:ied,jsd:jed+1) :: dx, dyc - real , intent(IN), dimension(isd:ied+1,jsd:jed) :: dy, dxc - real , intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya + real , intent(IN) :: uin(bd%isd:bd%ied ,bd%jsd:bd%jed+1) ! D-grid u-wind field + real , intent(IN) :: vin(bd%isd:bd%ied+1,bd%jsd:bd%jed ) ! D-grid v-wind field + real , intent(OUT) :: uout(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid u-wind field + real , intent(OUT) :: vout(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid v-wind field + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1) :: dx, dyc + real , intent(IN), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed) :: dy, dxc + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: dxa, dya integer :: i,j,n - real :: tmp1i(isd:ied+1) - real :: tmp2i(isd:ied+1) - real :: tmp3i(isd:ied+1) - real :: tmp1j(jsd:jed+1) - real :: tmp2j(jsd:jed+1) - real :: tmp3j(jsd:jed+1) + real :: tmp1i(bd%isd:bd%ied+1) + real :: tmp2i(bd%isd:bd%ied+1) + real :: tmp3i(bd%isd:bd%ied+1) + real :: tmp1j(bd%jsd:bd%jed+1) + real :: tmp2j(bd%jsd:bd%jed+1) + real :: tmp3j(bd%jsd:bd%jed+1) + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed !CLEANUP: replace dxa with rdxa, and dya with rdya; may change numbers. #ifdef VORT_ON @@ -8481,14 +8757,14 @@ subroutine dtoa(uin, vin, uout, vout, dx, dy, dxa, dya, dxc, dyc, npx, npy, ng) tmp1j(:) = 0.0 tmp2j(:) = uin(i,:)*dyc(i,:) tmp3j(:) = dyc(i,:) - call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, interpOrder) + call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed+1, interpOrder) uout(i,jsd:jed) = tmp1j(jsd+1:jed+1)/dya(i,jsd:jed) enddo do j=jsd,jed tmp1i(:) = 0.0 tmp2i(:) = vin(:,j)*dxc(:,j) tmp3i(:) = dxc(:,j) - call interp_left_edge_1d(tmp1i, tmp2i, tmp3i, isd, ied+1, interpOrder) + call interp_left_edge_1d(tmp1i, tmp2i, tmp3i, isd, ied+1, interpOrder) vout(isd:ied,j) = tmp1i(isd+1:ied+1)/dxa(isd:ied,j) enddo #endif @@ -8503,30 +8779,43 @@ end subroutine dtoa ! ! atoc :: interpolate from the A-Grid to the C-grid ! - subroutine atoc(uin, vin, uout, vout, dx, dy, dxa, dya, npx, npy, ng, nested, domain, noComm) - + subroutine atoc(uin, vin, uout, vout, dx, dy, dxa, dya, npx, npy, ng, bounded_domain, domain, bd, noComm) + type(fv_grid_bounds_type), intent(IN) :: bd integer, intent(IN) :: npx, npy, ng - real , intent(IN) :: uin(isd:ied ,jsd:jed ) ! A-grid u-wind field - real , intent(IN) :: vin(isd:ied ,jsd:jed ) ! A-grid v-wind field - real , intent(OUT) :: uout(isd:ied+1,jsd:jed ) ! C-grid u-wind field - real , intent(OUT) :: vout(isd:ied ,jsd:jed+1) ! C-grid v-wind field - logical, intent(IN) :: nested + real , intent(IN) :: uin(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid u-wind field + real , intent(IN) :: vin(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid v-wind field + real , intent(OUT) :: uout(bd%isd:bd%ied+1,bd%jsd:bd%jed ) ! C-grid u-wind field + real , intent(OUT) :: vout(bd%isd:bd%ied ,bd%jsd:bd%jed+1) ! C-grid v-wind field + logical, intent(IN) :: bounded_domain logical, OPTIONAL, intent(IN) :: noComm - real , intent(IN), dimension(isd:ied,jsd:jed+1) :: dx - real , intent(IN), dimension(isd:ied+1,jsd:jed) :: dy - real , intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1) :: dx + real , intent(IN), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed) :: dy + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: dxa, dya type(domain2d), intent(INOUT) :: domain real :: ang1 integer :: i,j,n - real :: tmp1i(isd:ied+1) - real :: tmp2i(isd:ied) - real :: tmp3i(isd:ied) - real :: tmp1j(jsd:jed+1) - real :: tmp2j(jsd:jed) - real :: tmp3j(jsd:jed) + real :: tmp1i(bd%isd:bd%ied+1) + real :: tmp2i(bd%isd:bd%ied) + real :: tmp3i(bd%isd:bd%ied) + real :: tmp1j(bd%jsd:bd%jed+1) + real :: tmp2j(bd%jsd:bd%jed) + real :: tmp3j(bd%jsd:bd%jed) + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + #if !defined(ALT_INTERP) #ifdef VORT_ON @@ -8553,7 +8842,7 @@ subroutine atoc(uin, vin, uout, vout, dx, dy, dxa, dya, npx, npy, ng, nested, do tmp3j(:) = dya(i,:) call interp_left_edge_1d(tmp1j, tmp2j, tmp3j, jsd, jed, interpOrder) vout(i,:) = tmp1j(:) - enddo + enddo #endif #else @@ -8572,7 +8861,7 @@ subroutine atoc(uin, vin, uout, vout, dx, dy, dxa, dya, npx, npy, ng, nested, do vout(i,:) = tmp1j(:)/dx(i,:) enddo - if (cubed_sphere .and. .not. nested) then + if (cubed_sphere .and. .not. bounded_domain) then csFac = COS(30.0*PI/180.0) ! apply Corner scale factor for interp on Cubed-Sphere if ( (is==1) .and. (js==1) ) then @@ -8616,7 +8905,7 @@ subroutine atoc(uin, vin, uout, vout, dx, dy, dxa, dya, npx, npy, ng, nested, do else call mpp_update_domains( uout,vout, domain, gridtype=CGRID_NE_PARAM, complete=.true.) endif - if (.not. nested) call fill_corners(uout, vout, npx, npy, VECTOR=.true., CGRID=.true.) + if (.not. bounded_domain) call fill_corners(uout, vout, npx, npy, VECTOR=.true., CGRID=.true.) end subroutine atoc ! @@ -8628,26 +8917,39 @@ end subroutine atoc ! ! ctoa :: interpolate from the C-Grid to the A-grid ! - subroutine ctoa(uin, vin, uout, vout, dx, dy, dxc, dyc, dxa, dya, npx, npy, ng) + subroutine ctoa(uin, vin, uout, vout, dx, dy, dxc, dyc, dxa, dya, npx, npy, ng, bd) - integer, intent(IN) :: npx, npy, ng - real , intent(IN) :: uin(isd:ied+1,jsd:jed ) ! C-grid u-wind field - real , intent(IN) :: vin(isd:ied ,jsd:jed+1) ! C-grid v-wind field - real , intent(OUT) :: uout(isd:ied ,jsd:jed ) ! A-grid u-wind field - real , intent(OUT) :: vout(isd:ied ,jsd:jed ) ! A-grid v-wind field - real , intent(IN), dimension(isd:ied+1,jsd:jed) :: dxc, dy - real , intent(IN), dimension(isd:ied,jsd:jed+1) :: dyc, dx - real , intent(IN), dimension(isd:ied,jsd:jed) :: dxa, dya + type(fv_grid_bounds_type), intent(IN) :: bd + integer, intent(IN) :: npx, npy, ng + real , intent(IN) :: uin(bd%isd:bd%ied+1,bd%jsd:bd%jed ) ! C-grid u-wind field + real , intent(IN) :: vin(bd%isd:bd%ied ,bd%jsd:bd%jed+1) ! C-grid v-wind field + real , intent(OUT) :: uout(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid u-wind field + real , intent(OUT) :: vout(bd%isd:bd%ied ,bd%jsd:bd%jed ) ! A-grid v-wind field + real , intent(IN), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed) :: dxc, dy + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed+1) :: dyc, dx + real , intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: dxa, dya integer :: i,j - real :: tmp1i(isd:ied+1) - real :: tmp2i(isd:ied+1) - real :: tmp3i(isd:ied+1) - real :: tmp1j(jsd:jed+1) - real :: tmp2j(jsd:jed+1) - real :: tmp3j(jsd:jed+1) + real :: tmp1i(bd%isd:bd%ied+1) + real :: tmp2i(bd%isd:bd%ied+1) + real :: tmp3i(bd%isd:bd%ied+1) + real :: tmp1j(bd%jsd:bd%jed+1) + real :: tmp2j(bd%jsd:bd%jed+1) + real :: tmp3j(bd%jsd:bd%jed+1) + + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed ! do j=jsd,jed ! do i=isd,ied @@ -8690,11 +8992,11 @@ subroutine rotate_winds(myU, myV, p1, p2, p3, p4, t1, ndims, dir) integer, intent(IN) :: ndims real , intent(INOUT) :: myU ! u-wind field real , intent(INOUT) :: myV ! v-wind field - real(kind=R_GRID) , intent(IN) :: p1(ndims) ! p4 - real(kind=R_GRID) , intent(IN) :: p2(ndims) ! + real(kind=R_GRID) , intent(IN) :: p1(ndims) ! p4 + real(kind=R_GRID) , intent(IN) :: p2(ndims) ! real(kind=R_GRID) , intent(IN) :: p3(ndims) ! p1 t1 p3 - real(kind=R_GRID) , intent(IN) :: p4(ndims) ! - real(kind=R_GRID) , intent(IN) :: t1(ndims) ! p2 + real(kind=R_GRID) , intent(IN) :: p4(ndims) ! + real(kind=R_GRID) , intent(IN) :: t1(ndims) ! p2 integer, intent(IN) :: dir ! Direction ; 1=>sphere-to-cube 2=> cube-to-sphere real(kind=R_GRID) :: ee1(3), ee2(3), ee3(3), elon(3), elat(3) @@ -8721,7 +9023,7 @@ subroutine rotate_winds(myU, myV, p1, p2, p3, p4, t1, ndims, dir) newu = myU*g11 + myV*g12 newv = myU*g21 + myV*g22 else - newu = ( myU*g22 - myV*g12)/(g11*g22 - g21*g12) + newu = ( myU*g22 - myV*g12)/(g11*g22 - g21*g12) newv = (-myU*g21 + myV*g11)/(g11*g22 - g21*g12) endif myU = newu @@ -8729,15 +9031,16 @@ subroutine rotate_winds(myU, myV, p1, p2, p3, p4, t1, ndims, dir) end subroutine rotate_winds - subroutine mp_update_dwinds_2d(u, v, npx, npy, domain) + subroutine mp_update_dwinds_2d(u, v, npx, npy, domain, bd) use mpp_parameter_mod, only: DGRID_NE - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1) ! D-grid u-wind field - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ) ! D-grid v-wind field + type(fv_grid_bounds_type), intent(IN) :: bd + real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1) ! D-grid u-wind field + real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ) ! D-grid v-wind field integer, intent(IN) :: npx, npy type(domain2d), intent(INOUT) :: domain call mpp_update_domains( u, v, domain, gridtype=DGRID_NE, complete=.true.) -! if (.not. nested) call fill_corners(u , v , npx, npy, VECTOR=.true., DGRID=.true.) +! if (.not. bounded_domain) call fill_corners(u , v , npx, npy, VECTOR=.true., DGRID=.true.) end subroutine mp_update_dwinds_2d ! @@ -8747,17 +9050,18 @@ end subroutine mp_update_dwinds_2d !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! - subroutine mp_update_dwinds_3d(u, v, npx, npy, npz, domain) + subroutine mp_update_dwinds_3d(u, v, npx, npy, npz, domain, bd) use mpp_parameter_mod, only: DGRID_NE - real , intent(INOUT) :: u(isd:ied ,jsd:jed+1,npz) ! D-grid u-wind field - real , intent(INOUT) :: v(isd:ied+1,jsd:jed ,npz) ! D-grid v-wind field + type(fv_grid_bounds_type), intent(IN) :: bd + real , intent(INOUT) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) ! D-grid u-wind field + real , intent(INOUT) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) ! D-grid v-wind field integer, intent(IN) :: npx, npy, npz type(domain2d), intent(INOUT) :: domain integer k call mpp_update_domains( u, v, domain, gridtype=DGRID_NE, complete=.true.) ! do k=1,npz -! if (.not. nested) call fill_corners(u(isd:,jsd:,k) , v(isd:,jsd:,k) , npx, npy, VECTOR=.true., DGRID=.true.) +! if (.not. bounded_domain) call fill_corners(u(isd:,jsd:,k) , v(isd:,jsd:,k) , npx, npy, VECTOR=.true., DGRID=.true.) ! enddo end subroutine mp_update_dwinds_3d @@ -8768,7 +9072,7 @@ end subroutine mp_update_dwinds_3d ! gsum :: get global sum ! real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, jsd, jed, gridstruct, tile) result (gsum) - + integer, intent(IN) :: npx, npy integer, intent(IN) :: ifirst, ilast integer, intent(IN) :: jfirst, jlast @@ -8824,9 +9128,9 @@ real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, js allocate(p_r8(npx-1,npy-1,ntiles_g)) gsum = 0. - - if (latlon) then - j1 = 2 + + if (latlon) then + j1 = 2 j2 = npy-2 !!! WARNING: acapS and acapN have NOT been initialized. gsum = gsum + p(1,1)*acapS @@ -8838,7 +9142,7 @@ real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, js enddo else - do n=tile,tile + do n=tile,tile do j=jfirst,jlast do i=ifirst,ilast p_R8(i,j,n) = p(i,j)*area(i,j) @@ -8861,7 +9165,7 @@ real function globalsum(p, npx, npy, ifirst, ilast, jfirst, jlast, isd, ied, js endif deallocate(p_r8) - + end function globalsum @@ -8869,9 +9173,9 @@ subroutine get_unit_vector( p1, p2, p3, uvect ) real(kind=R_GRID), intent(in):: p1(2), p2(2), p3(2) ! input position unit vectors (spherical coordinates) real(kind=R_GRID), intent(out):: uvect(3) ! output unit spherical cartesian ! local - integer :: n + integer :: n real(kind=R_GRID) :: xyz1(3), xyz2(3), xyz3(3) - real :: dp(3) + real :: dp(3) call spherical_to_cartesian(p1(1), p1(2), one, xyz1(1), xyz1(2), xyz1(3)) call spherical_to_cartesian(p2(1), p2(2), one, xyz2(1), xyz2(2), xyz2(3)) @@ -8926,7 +9230,7 @@ subroutine mp_ghost_ew(im, jm, km, nq, ifirst, ilast, jfirst, jlast, & ! ! !DESCRIPTION: ! -! Ghost 4d east/west +! Ghost 4d east/west ! ! !REVISION HISTORY: ! 2005.08.22 Putman @@ -8965,11 +9269,11 @@ end subroutine mp_ghost_ew !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! -! +! ! interp_left_edge_1d :: interpolate to left edge of a cell either ! order = 1 -> Linear average ! order = 2 -> Uniform PPM -! order = 3 -> Non-Uniform PPM +! order = 3 -> Non-Uniform PPM ! subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) integer, intent(in):: ifirst,ilast @@ -8980,26 +9284,26 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) integer :: i real :: dm(ifirst:ilast),qmax,qmin - real :: r3, da1, da2, a6da, a6, al, ar + real :: r3, da1, da2, a6da, a6, al, ar real :: qLa, qLb1, qLb2 real :: x r3 = 1./3. - qout(:) = 0.0 - if (order==1) then + qout(:) = 0.0 + if (order==1) then ! 1st order Uniform linear averaging do i=ifirst+1,ilast qout(i) = 0.5 * (qin(i-1) + qin(i)) enddo elseif (order==2) then -! Non-Uniform 1st order average +! Non-Uniform 1st order average do i=ifirst+1,ilast qout(i) = (dx(i-1)*qin(i-1) + dx(i)*qin(i))/(dx(i-1)+dx(i)) enddo - elseif (order==3) then + elseif (order==3) then -! PPM - Uniform +! PPM - Uniform do i=ifirst+1,ilast-1 dm(i) = 0.25*(qin(i+1) - qin(i-1)) enddo @@ -9055,12 +9359,12 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) enddo elseif (order==5) then - + ! Linear Spline do i=ifirst+1,ilast-1 - x = FLOAT(i-(ifirst+1))*FLOAT(ilast-ifirst+1-1)/FLOAT(ilast-ifirst-1) + x = FLOAT(i-(ifirst+1))*FLOAT(ilast-ifirst+1-1)/FLOAT(ilast-ifirst-1) qout(i) = qin(ifirst+NINT(x)) + (x - NINT(x)) * (qin(ifirst+NINT(x+1)) - qin(ifirst+NINT(x))) - ! if (tile==1) print*, ifirst+NINT(x+1), ifirst+NINT(x), (x - NINT(x)) + ! if (tile==1) print*, ifirst+NINT(x+1), ifirst+NINT(x), (x - NINT(x)) ! if (tile==1) print*, 0.5*(qin(i-1)+qin(i)), qout(i) enddo @@ -9077,7 +9381,7 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) end subroutine interp_left_edge_1d !------------------------------------------------------------------------------ -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- !BOP ! subroutine vpol5(u, v, im, jm, coslon, sinlon, cosl5, sinl5, & @@ -9098,9 +9402,9 @@ subroutine vpol5(u, v, im, jm, coslon, sinlon, cosl5, sinl5, & ! !DESCRIPTION: ! -! Treat the V winds at the poles. This requires an average +! Treat the V winds at the poles. This requires an average ! of the U- and V-winds, weighted by their angles of incidence -! at the pole points. +! at the pole points. ! ! !REVISION HISTORY: ! @@ -9228,7 +9532,7 @@ subroutine var_dz(km, ztop, ze) s_fac(km ) = 0.25 s_fac(km-1) = 0.30 s_fac(km-2) = 0.50 - s_fac(km-3) = 0.70 + s_fac(km-3) = 0.70 s_fac(km-4) = 0.90 s_fac(km-5) = 1. do k=km-6, 5, -1 From 6b6870f3bb4e39ac0a0e1c82b00b3cf812f39460 Mon Sep 17 00:00:00 2001 From: bensonr <6594772+bensonr@users.noreply.github.com> Date: Fri, 21 Feb 2020 12:36:00 -0500 Subject: [PATCH 08/14] Master test (#18) * commit of new version of dycore from Weather and Climate Dynamics Group at GFDL * updated versions of GFDL-specific files from dev/gfdl * updated README.md with current release information * cleaned up a few lines in fv_dynamics * new file RELEASE.md with release notes documenting differences between this and the last release * updated RELEASE.md message * hand merge of diagnostic updates * remove trailing spaces from sources * updates to merge some GFDL specific updates into this public release * updated README.md * updated GFDL_tools/fv_cmip_diag to be consistent with dev/gfdl branch --- GFDL_tools/fv_cmip_diag.F90 | 147 +++++++++++++++++++++++++++++++++--- README.md | 24 +++--- 2 files changed, 147 insertions(+), 24 deletions(-) diff --git a/GFDL_tools/fv_cmip_diag.F90 b/GFDL_tools/fv_cmip_diag.F90 index 39cee2e4d..91c5d40f9 100644 --- a/GFDL_tools/fv_cmip_diag.F90 +++ b/GFDL_tools/fv_cmip_diag.F90 @@ -28,7 +28,7 @@ module fv_cmip_diag_mod use fms_io_mod, only: set_domain, nullify_domain, string use time_manager_mod, only: time_type use mpp_domains_mod, only: domain2d -use diag_manager_mod, only: register_diag_field, & +use diag_manager_mod, only: register_diag_field, diag_axis_init, & send_data, get_diag_field_id, & register_static_field, & diag_field_add_attribute, & @@ -36,13 +36,13 @@ module fv_cmip_diag_mod use diag_data_mod, only: CMOR_MISSING_VALUE, null_axis_id use tracer_manager_mod, only: get_tracer_index use field_manager_mod, only: MODEL_ATMOS -use constants_mod, only: GRAV +use constants_mod, only: GRAV, RDGAS use fv_mapz_mod, only: E_Flux use fv_arrays_mod, only: fv_atmos_type use fv_diagnostics_mod, only: interpolate_vertical, & get_height_given_pressure, & - rh_calc, get_height_field + rh_calc, get_height_field, get_vorticity use atmos_cmip_diag_mod, only: register_cmip_diag_field_2d, & register_cmip_diag_field_3d, & @@ -58,7 +58,7 @@ module fv_cmip_diag_mod public :: fv_cmip_diag_init, fv_cmip_diag, fv_cmip_diag_end -integer :: sphum +integer :: sphum, nql, nqi, nqa !----------------------------------------------------------------------- !--- namelist --- @@ -71,12 +71,15 @@ module fv_cmip_diag_mod type(cmip_diag_id_type) :: ID_ta, ID_ua, ID_va, ID_hus, ID_hur, ID_wap, ID_zg, & ID_u2, ID_v2, ID_t2, ID_wap2, ID_uv, ID_ut, ID_vt, & - ID_uwap, ID_vwap, ID_twap + ID_uwap, ID_vwap, ID_twap, ID_wa, & + ID_cls, ID_clws, ID_clis + integer :: id_ps, id_orog integer :: id_ua200, id_va200, id_ua850, id_va850, & id_ta500, id_ta700, id_ta850, id_zg500, & id_zg100, id_zg10, id_zg1000, & id_hus850, id_wap500, id_ua10 +integer :: id_rv200, id_rv500, id_rv850, id_vortmean character(len=5) :: mod_name = 'atmos' @@ -107,6 +110,7 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) integer, dimension(7) :: id_plevels integer, parameter :: id_p10=1, id_p100=2, id_p200=3, id_p500=4, id_p700=5, id_p850=6, id_p1000=7 character(len=4) :: plabel +integer :: id_pl700, id_pl700_bnds, id_nv !----------------------------------------------------------------------- if (module_is_initialized) then @@ -151,6 +155,9 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) !----------------------------------------------------------------------- sphum = get_tracer_index (MODEL_ATMOS, 'sphum') + nql = get_tracer_index (MODEL_ATMOS, 'liq_wat') + nqi = get_tracer_index (MODEL_ATMOS, 'ice_wat') + nqa = get_tracer_index (MODEL_ATMOS, 'cld_amt') !----------------------------------------------------------------------- ! register cmip 3D variables (on model levels and pressure levels) @@ -173,6 +180,9 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) ID_hur = register_cmip_diag_field_3d (mod_name, 'hur', Time, & 'Relative Humidity', '%', standard_name='relative_humidity') + ID_wa = register_cmip_diag_field_3d (mod_name, 'wa', Time, & + 'Upward Air Velocity', 'm s-1', standard_name='upward_air_velocity') + ID_zg = register_cmip_diag_field_3d (mod_name, 'zg', Time, & 'Geopotential Height', 'm', standard_name='geopotential_height', axis='half') @@ -215,6 +225,24 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) 'Air Temperature times Omega', 'K m s-1', & standard_name='product_of_omega_and_air_temperature') +!----------------------------------------------------------------------- +! stratiform cloud tracers + + if (nql > 0) then + ID_clws = register_cmip_diag_field_3d (mod_name, 'clws', Time, & + 'Mass Fraction of Stratiform Cloud Liquid Water', '1.0', & + standard_name='mass_fraction_of_stratiform_cloud_liquid_water_in_air') + endif + if (nqi > 0) then + ID_clis = register_cmip_diag_field_3d (mod_name, 'clis', Time, & + 'Mass Fraction of Stratiform Cloud Ice', '1.0', & + standard_name='mass_fraction_of_convective_cloud_ice_in_air') + endif + if (nqa > 0) then + ID_cls = register_cmip_diag_field_3d (mod_name, 'cls', Time, & + 'Percentage Cover of Stratiform Cloud', '%', & + standard_name='stratiform_cloud_area_fraction_in_atmosphere_layer') + endif !----------------------------------------------------------------------- ! 2D fields @@ -232,14 +260,14 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) id_orog = register_static_field (mod_name, 'orog', axes(1:2), & 'Surface Altitude', 'm', & standard_name='surface_altitude', & - area=area_id) + area=area_id, interp_method='conserve_order1') if (id_orog > 0) used = send_data (id_orog, Atm(n)%phis(isc:iec,jsc:jec)/GRAV, Time) #else !--- for now output this as 'zsurf' from fv_diagnostics --- ! id_orog = register_diag_field (mod_name, 'orog', axes(1:2), Time, & ! 'Surface Altitude', 'm', & ! standard_name='surface_altitude', & -! area=area_id) +! area=area_id, interp_method='conserve_order1') #endif !----------------------------------------------------------------------- @@ -259,6 +287,24 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) endif enddo + id_pl700 = register_static_field (mod_name, 'pl700', (/null_axis_id/), & + '700 hPa Average', 'Pa', standard_name='air_pressure') + if (id_pl700 > 0) then + call diag_field_add_attribute (id_pl700, 'axis', 'Z') + call diag_field_add_attribute (id_pl700, 'positive', 'down' ) + call diag_field_add_attribute (id_pl700, 'comment', 'average at levels 600,700,850 hPa' ) + ! add bounds + id_nv = diag_axis_init('nv', (/1.,2./), 'none', 'N', 'vertex number', set_name='nv') + id_pl700_bnds = register_static_field (mod_name, 'pl700_bnds', (/id_nv,null_axis_id/), & + '700 hPa boundaries', 'Pa', standard_name='air_pressure') + if (id_pl700_bnds > 0) then + call diag_field_add_attribute (id_pl700, 'bounds', 'pl700_bnds' ) + used = send_data (id_pl700_bnds, (/850.e2,600.e2/), Time) + endif + used = send_data (id_pl700, 700.e2, Time) + endif + + !---- register field on single pressure levels ---- id_ua10 = register_cmip_diag_field_2d (mod_name, 'ua10', Time, & @@ -311,6 +357,30 @@ subroutine fv_cmip_diag_init ( Atm, axes, Time ) if (id_hus850 > 0 .and. id_plevels(id_p850) > 0) & call diag_field_add_attribute (id_hus850, 'coordinates', 'p850') + !---- relative vorticity at 200, 500, 850 hPa ---- + id_rv200 = register_cmip_diag_field_2d (mod_name, 'rv200', Time, & + 'Relative Vorticity at 200 hPa', 's-1', standard_name='atmosphere_relative_vorticity') + if (id_rv200 > 0 .and. id_plevels(id_p200) > 0) & + call diag_field_add_attribute (id_rv200, 'coordinates', 'p200') + + id_rv500 = register_cmip_diag_field_2d (mod_name, 'rv500', Time, & + 'Relative Vorticity at 500 hPa', 's-1', standard_name='atmosphere_relative_vorticity') + if (id_rv500 > 0 .and. id_plevels(id_p500) > 0) & + call diag_field_add_attribute (id_rv500, 'coordinates', 'p500') + + id_rv850 = register_cmip_diag_field_2d (mod_name, 'rv850', Time, & + 'Relative Vorticity at 850 hPa', 's-1', standard_name='atmosphere_relative_vorticity') + if (id_rv850 > 0 .and. id_plevels(id_p850) > 0) & + call diag_field_add_attribute (id_rv850, 'coordinates', 'p850') + + !---- mean relative vorticity 600, 700, 850 hPa ---- + + id_vortmean = register_cmip_diag_field_2d (mod_name, 'vortmean', Time, & + 'Mean Relative Vorticity over 600-850 hPa', 's-1', & + standard_name='atmosphere_relative_vorticity') + if (id_vortmean > 0 .and. id_pl700 > 0) & + call diag_field_add_attribute (id_vortmean, 'coordinates', 'pl700') + !---- omega at 500 hPa ---- id_wap500 = register_cmip_diag_field_2d (mod_name, 'wap500', Time, & @@ -357,15 +427,18 @@ subroutine fv_cmip_diag ( Atm, zvir, Time ) integer :: isc, iec, jsc, jec, n, i, j, k, id integer :: ngc, npz logical :: used +logical :: compute_wa , compute_rh real, dimension(Atm(1)%bd%isc:Atm(1)%bd%iec, & - Atm(1)%bd%jsc:Atm(1)%bd%jec) :: pfull, dat2 + Atm(1)%bd%jsc:Atm(1)%bd%jec) :: pfull, dat2, & + rv850, rv700, rv600 + real, dimension(Atm(1)%bd%isc:Atm(1)%bd%iec, & Atm(1)%bd%jsc:Atm(1)%bd%jec,1) :: dat3 real, dimension(Atm(1)%bd%isc:Atm(1)%bd%iec, & Atm(1)%bd%jsc:Atm(1)%bd%jec, & - Atm(1)%npz) :: rhum + Atm(1)%npz) :: rhum, wa, rv real, dimension(Atm(1)%bd%isc:Atm(1)%bd%iec, & Atm(1)%bd%jsc:Atm(1)%bd%jec, & @@ -384,26 +457,45 @@ subroutine fv_cmip_diag ( Atm, zvir, Time ) call set_domain(Atm(n)%domain) + ! set flags for computing quantities + compute_rh = .false. + compute_wa = .false. + if (count(ID_hur%field_id(:)>0) > 0) compute_rh = .true. + if (count(ID_wa%field_id(:)>0) > 0) compute_wa = .true. + ! compute relative humidity at model levels (if needed) - if (count(ID_hur%field_id(:)>0) > 0) then + if (compute_rh .or. compute_wa) then do k=1,npz do j=jsc,jec do i=isc,iec pfull(i,j) = Atm(n)%delp(i,j,k)/(Atm(n)%peln(i,k+1,j)-Atm(n)%peln(i,k,j)) enddo enddo - call rh_calc (pfull, Atm(n)%pt(isc:iec,jsc:jec,k), & + ! compute relative humidity + if (compute_rh) then + call rh_calc (pfull, Atm(n)%pt(isc:iec,jsc:jec,k), & Atm(n)%q(isc:iec,jsc:jec,k,sphum), rhum(isc:iec,jsc:jec,k), do_cmip=.true.) + endif + ! compute vertical velocity + if (compute_wa) then + wa(isc:iec,jsc:jec,k) = -(Atm(n)%omga(isc:iec,jsc:jec,k)*Atm(n)%pt(isc:iec,jsc:jec,k)/ & + pfull(isc:iec,jsc:jec))*(RDGAS/GRAV) + endif enddo endif - ! height field (wz) if needed if (count(ID_zg%field_id(:)>0) > 0 .or. any((/id_zg10,id_zg100,id_zg500,id_zg1000/) > 0)) then call get_height_field(isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%delz, & wz, Atm(n)%pt, Atm(n)%q, Atm(n)%peln, zvir) endif + ! relative vorticity + if (any((/id_rv200,id_rv500,id_rv850,id_vortmean/) > 0)) then + call get_vorticity(isc, iec, jsc, jec, Atm(n)%bd%isd, Atm(n)%bd%ied, Atm(n)%bd%jsd, Atm(n)%bd%jed, npz, & + Atm(n)%u, Atm(n)%v, rv, Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, Atm(n)%gridstruct%rarea) + endif + !---------------------------------------------------------------------- ! process 2D fields @@ -431,6 +523,10 @@ subroutine fv_cmip_diag ( Atm, zvir, Time ) if (query_cmip_diag_id(ID_hur)) & used = send_cmip_data_3d (ID_hur, rhum(isc:iec,jsc:jec,:), Time, phalf=Atm(n)%peln, opt=1) + ! vertical velocity + if (query_cmip_diag_id(ID_wa)) & + used = send_cmip_data_3d (ID_wa, wa(isc:iec,jsc:jec,:), Time, phalf=Atm(n)%peln, opt=1) + ! geopotential height if (query_cmip_diag_id(ID_zg)) & used = send_cmip_data_3d (ID_zg, wz, Time, phalf=Atm(n)%peln, opt=1, ext=.true.) @@ -478,6 +574,13 @@ subroutine fv_cmip_diag ( Atm, zvir, Time ) used = send_cmip_data_3d (ID_twap, Atm(n)%pt (isc:iec,jsc:jec,:)*Atm(n)%omga(isc:iec,jsc:jec,:), & Time, phalf=Atm(n)%peln, opt=1) +!---------------------------------------------------------------------- +! stratiform cloud tracers (only on model levels) + + if (query_cmip_diag_id(ID_cls)) used = send_cmip_data_3d (ID_cls, Atm(n)%q(isc:iec,jsc:jec,:,nqa)*100., Time) + if (query_cmip_diag_id(ID_clws)) used = send_cmip_data_3d (ID_clws, Atm(n)%q(isc:iec,jsc:jec,:,nql), Time) + if (query_cmip_diag_id(ID_clis)) used = send_cmip_data_3d (ID_clis, Atm(n)%q(isc:iec,jsc:jec,:,nqi), Time) + !---------------------------------------------------------------------- ! process 2D fields on specific pressure levels ! @@ -541,6 +644,26 @@ subroutine fv_cmip_diag ( Atm, zvir, Time ) used = send_data (id_wap500, dat2, Time) endif + if (id_rv200 > 0) then + call interpolate_vertical (isc, iec, jsc, jec, npz, 200.e2, Atm(n)%peln, rv, dat2) + used = send_data (id_rv200, dat2, Time) + endif + + if (id_rv500 > 0) then + call interpolate_vertical (isc, iec, jsc, jec, npz, 500.e2, Atm(n)%peln, rv, dat2) + used = send_data (id_rv500, dat2, Time) + endif + + if (id_rv850 > 0 .or. id_vortmean > 0 ) then + call interpolate_vertical (isc, iec, jsc, jec, npz, 850.e2, Atm(n)%peln, rv, rv850) + if (id_rv850 > 0) used = send_data (id_rv850, rv850, Time) + if (id_vortmean > 0) then + call interpolate_vertical (isc, iec, jsc, jec, npz, 600.e2, Atm(n)%peln, rv, rv600) + call interpolate_vertical (isc, iec, jsc, jec, npz, 700.e2, Atm(n)%peln, rv, rv700) + used = send_data (id_vortmean, (rv600+rv700+rv850)/3., Time) + endif + endif + if (id_zg10 > 0) then call get_height_given_pressure (isc, iec, jsc, jec, npz, wz, 1, (/id_zg10/), & (/log(10.e2)/), Atm(n)%peln, dat3) diff --git a/README.md b/README.md index 9eeb7d3f6..4aaef30f1 100644 --- a/README.md +++ b/README.md @@ -6,26 +6,26 @@ The GFDL Microphysics is also available via this repository. # Where to find information -See the [FV3 documentation and references](https://www.gfdl.noaa.gov/fv3/fv3-documentation-and-references/) for more information. +See the [FV3 documentation and references](https://www.gfdl.noaa.gov/fv3/fv3-documentation-and-references/) +for more information. # Proper usage attribution -Cite either Putman and Lin (2007) or Harris and Lin (2013) when describing a model using the FV3 dynamical core. -Cite Chen et al (2013) and Zhou et al (2019) if using the GFDL Microphysics. +Cite Putman and Lin (2007) and Harris and Lin (2013) when describing a model using the FV3 dynamical core. +Cite Chen et al (2013) and Zhou et al (2019) when using the GFDL Microphysics. # What files are what The top level directory structure groups source code and input files as follow: -| File/directory | Purpose | -| -------------- | ------- | -| ```LICENSE.md``` | a copy of the Gnu lesser general public license, version 3. | -| ```README.md``` | this file with basic pointers to more information | -| ```model/``` | contains the source code for core of the FV3 dyanmical core | -| ```model_nh/``` | contains the source code for non-hydrostatic extensions | -| ```driver/``` | contains drivers used by different models/modeling systems | -| ```tools/``` | contains source code of tools used within the core | -| ```GFDL_tools/``` | contains source code of tools specific to GFDL models | +| File/directory | Purpose | +| -------------- | ------- | +| ```LICENSE.md``` | a copy of the Gnu lesser general public license, version 3. | +| ```README.md``` | this file with basic pointers to more information | +| ```model/``` | contains the source code for core of the FV3 dyanmical core | +| ```driver/``` | contains drivers used by different models/modeling systems | +| ```tools/``` | contains source code of tools used within the core | +| ```GFDL_tools/``` | contains source code of tools specific to GFDL models | # Disclaimer From 972251989d8d187ec8ea39a9f97e88865a0d39e2 Mon Sep 17 00:00:00 2001 From: lharris4 <53020884+lharris4@users.noreply.github.com> Date: Tue, 14 Apr 2020 09:39:46 -0400 Subject: [PATCH 09/14] Bug fix for two-way nest updating (#21) --- model/boundary.F90 | 36 +++++----- model/fv_control.F90 | 85 ++++++++++++++++++---- model/fv_nesting.F90 | 164 ++++++------------------------------------- 3 files changed, 110 insertions(+), 175 deletions(-) diff --git a/model/boundary.F90 b/model/boundary.F90 index 9b3c7a056..b16216b38 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -2306,6 +2306,8 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are position = CENTER end if + !Note that *_c does not have values on the parent_proc. + !Must use isu, etc. to get bounds of update region on parent. call mpp_get_F2C_index(nest_domain, is_c, ie_c, js_c, je_c, is_f, ie_f, js_f, je_f, nest_level=nest_level, position=position) if (child_proc) then allocate(coarse_dat_send(is_c:ie_c, js_c:je_c,npz)) @@ -2332,9 +2334,9 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are s = r/2 !rounds down (since r > 0) qr = r*upoff + nsponge - s - if (parent_proc .and. .not. (ie_c < is_c .or. je_c < js_c)) then + if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then call fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed_p, & - is_c, ie_c, js_c, je_c, npx, npy, npz, istag, jstag, nestupdate, parent_grid) + isu, ieu, jsu, jeu, npx, npy, npz, istag, jstag, nestupdate, parent_grid) endif if (allocated(coarse_dat_recv)) deallocate(coarse_dat_recv) @@ -2454,14 +2456,14 @@ subroutine fill_coarse_data_send(coarse_dat_send, var_nest, dx, dy, area, & end subroutine fill_coarse_data_send subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed_p, & - is_c, ie_c, js_c, je_c, npx, npy, npz, istag, jstag, nestupdate, parent_grid) + isu, ieu, jsu, jeu, npx, npy, npz, istag, jstag, nestupdate, parent_grid) !This routine assumes the coarse and nested grids are properly ! aligned, and that in particular for odd refinement ratios all ! coarse-grid cells (faces) coincide with nested-grid cells (faces) integer, intent(IN) :: isd_p, ied_p, jsd_p, jed_p - integer, intent(IN) :: is_c, ie_c, js_c, je_c + integer, intent(IN) :: isu, ieu, jsu, jeu integer, intent(IN) :: istag, jstag integer, intent(IN) :: npx, npy, npz, nestupdate real, intent(INOUT) :: var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag,npz) @@ -2475,10 +2477,10 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed select case (nestupdate) case (1,2,6,7,8) ! 1 = Conserving update on all variables; 2 = conserving update for cell-centered values; 6 = conserving remap-update -!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse) +!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz - do j=js_c,je_c - do i=is_c,ie_c + do j=jsu,jeu + do i=isu,ieu var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rarea(i,j) end do end do @@ -2498,10 +2500,10 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed select case (nestupdate) case (1,6,7,8) -!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse) +!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz - do j=js_c,je_c+1 - do i=is_c,ie_c + do j=jsu,jeu+1 + do i=isu,ieu var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rdx(i,j) end do end do @@ -2518,10 +2520,10 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed select case (nestupdate) case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average -!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse) +!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz - do j=js_c,je_c - do i=is_c,ie_c+1 + do j=jsu,jeu + do i=isu,ieu+1 var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rdy(i,j) end do end do @@ -2611,13 +2613,13 @@ subroutine update_coarse_grid_mpp_vector(u_coarse, v_coarse, u_nest, v_nest, nes s = r/2 !rounds down (since r > 0) qr = r*upoff + nsponge - s - if (parent_proc .and. .not. (ie_cx < is_cx .or. je_cx < js_cx)) then + if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then call fill_var_coarse(u_coarse, coarse_dat_recv_u, isd_p, ied_p, jsd_p, jed_p, & - is_cx, ie_cx, js_cx, je_cx, npx, npy, npz, istag_u, jstag_u, nestupdate, parent_grid) + isu, ieu, jsu, jeu, npx, npy, npz, istag_u, jstag_u, nestupdate, parent_grid) endif - if (parent_proc .and. .not. (ie_cy < is_cy .or. je_cy < js_cy)) then + if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then call fill_var_coarse(v_coarse, coarse_dat_recv_v, isd_p, ied_p, jsd_p, jed_p, & - is_cy, ie_cy, js_cy, je_cy, npx, npy, npz, istag_v, jstag_v, nestupdate, parent_grid) + isu, ieu, jsu, jeu, npx, npy, npz, istag_v, jstag_v, nestupdate, parent_grid) endif if (allocated(coarse_dat_recv_u)) deallocate(coarse_dat_recv_u) diff --git a/model/fv_control.F90 b/model/fv_control.F90 index 29fc68420..efa33224d 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -103,11 +103,12 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) integer, dimension(MAX_NNEST) :: grid_pes = 0 integer, dimension(MAX_NNEST) :: grid_coarse = -1 integer, dimension(MAX_NNEST) :: nest_refine = 3 - integer, dimension(MAX_NNEST) :: nest_ioffsets = -999, nest_joffsets = -999 + integer, dimension(MAX_NNEST) :: nest_ioffsets, nest_joffsets integer, dimension(MAX_NNEST) :: all_npx = 0 integer, dimension(MAX_NNEST) :: all_npy = 0 integer, dimension(MAX_NNEST) :: all_npz = 0 integer, dimension(MAX_NNEST) :: all_ntiles = 0 + integer, dimension(MAX_NNEST) :: all_twowaynest = 0 ! > 0 implies two-way !integer, dimension(MAX_NNEST) :: tile_fine = 0 integer, dimension(MAX_NNEST) :: icount_coarse = 1 integer, dimension(MAX_NNEST) :: jcount_coarse = 1 @@ -468,6 +469,8 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) all_npz(this_grid) = npz call mpp_max(all_npz, ngrids, global_pelist) + if (Atm(this_grid)%neststruct%twowaynest) all_twowaynest(this_grid) = 1 + call mpp_max(all_twowaynest, ngrids, global_pelist) ntiles_nest_all = 0 do n=1,ngrids if (n/=this_grid) then @@ -475,6 +478,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) Atm(n)%flagstruct%npy = all_npy(n) Atm(n)%flagstruct%npz = all_npz(n) Atm(n)%flagstruct%ntiles = all_ntiles(n) + Atm(n)%neststruct%twowaynest = (all_twowaynest(n) > 0) ! disabled endif npes_nest_tile(ntiles_nest_all+1:ntiles_nest_all+all_ntiles(n)) = & Atm(n)%npes_this_grid / all_ntiles(n) @@ -494,7 +498,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) endif enddo - if (mpp_pe() == 0) then + if (mpp_pe() == 0 .and. ngrids > 1) then print*, ' NESTING TREE' do n=1,ngrids write(*,'(12i4)') n, nest_level(n), nest_ioffsets(n), nest_joffsets(n), icount_coarse(n), jcount_coarse(n), tile_fine(n), tile_coarse(n), nest_refine(n), all_ntiles(n), all_npx(n), all_npy(n) @@ -564,24 +568,20 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) endif - allocate(Atm(this_grid)%neststruct%child_grids(ngrids)) !only temporary? + allocate(Atm(this_grid)%neststruct%child_grids(ngrids)) do n=1,ngrids Atm(this_grid)%neststruct%child_grids(n) = (grid_coarse(n) == this_grid) allocate(Atm(n)%neststruct%do_remap_bc(ngrids)) Atm(n)%neststruct%do_remap_bc(:) = .false. enddo - Atm(this_grid)%neststruct%parent_proc = ANY(tile_coarse == Atm(this_grid)%global_tile) - !Atm(this_grid)%neststruct%child_proc = ANY(Atm(this_grid)%pelist == gid) !this means a nested grid -!!$ if (Atm(this_grid)%neststruct%nestbctype > 1) then -!!$ call mpp_error(FATAL, 'nestbctype > 1 not yet implemented') -!!$ Atm(this_grid)%neststruct%upoff = 0 -!!$ endif -!!$ end if -!!$ -!!$ do nn=1,size(Atm) -!!$ if (n == 1) allocate(Atm(nn)%neststruct%nest_domain_all(size(Atm))) -!!$ Atm(nn)%neststruct%nest_domain_all(n) = Atm(this_grid)%neststruct%nest_domain -!!$ enddo + Atm(this_grid)%neststruct%parent_proc = ANY(Atm(this_grid)%neststruct%child_grids) !ANY(tile_coarse == Atm(this_grid)%global_tile) + Atm(this_grid)%neststruct%child_proc = ASSOCIATED(Atm(this_grid)%parent_grid) !this means a nested grid + + if (ngrids > 1) call setup_update_regions + if (Atm(this_grid)%neststruct%nestbctype > 1) then + call mpp_error(FATAL, 'nestbctype > 1 not yet implemented') + Atm(this_grid)%neststruct%upoff = 0 + endif if (Atm(this_grid)%gridstruct%bounded_domain .and. is_master()) print*, & ' Bounded domain: nested = ', Atm(this_grid)%neststruct%nested, ', regional = ', Atm(this_grid)%flagstruct%regional @@ -1045,6 +1045,61 @@ subroutine read_namelist_fv_core_nml(Atm) end subroutine read_namelist_fv_core_nml + subroutine setup_update_regions + + integer :: isu, ieu, jsu, jeu ! update regions + integer :: isc, jsc, iec, jec + integer :: upoff + + isc = Atm(this_grid)%bd%isc + jsc = Atm(this_grid)%bd%jsc + iec = Atm(this_grid)%bd%iec + jec = Atm(this_grid)%bd%jec + + upoff = Atm(this_grid)%neststruct%upoff + + do n=2,ngrids + write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS 0: ', mpp_pe(), tile_coarse(n), Atm(this_grid)%global_tile + if (tile_coarse(n) == Atm(this_grid)%global_tile) then + + isu = nest_ioffsets(n) + ieu = isu + icount_coarse(n) - 1 + jsu = nest_joffsets(n) + jeu = jsu + jcount_coarse(n) - 1 + + !update offset adjustment + isu = isu + upoff + ieu = ieu - upoff + jsu = jsu + upoff + jeu = jeu - upoff + + !restriction to current domain +!!$ !!! DEBUG CODE +!!$ if (Atm(this_grid)%flagstruct%fv_debug) then +!!$ write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS : ', isu, jsu, ieu, jeu +!!$ write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS 2: ', isc, jsc, iec, jsc +!!$ endif +!!$ !!! END DEBUG CODE + if (isu > iec .or. ieu < isc .or. & + jsu > jec .or. jeu < jsc ) then + isu = -999 ; jsu = -999 ; ieu = -1000 ; jeu = -1000 + else + isu = max(isu,isc) ; jsu = max(jsu,jsc) + ieu = min(ieu,iec) ; jeu = min(jeu,jec) + endif +!!$ !!! DEBUG CODE +!!$ if (Atm(this_grid)%flagstruct%fv_debug) & +!!$ write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS 3: ', isu, jsu, ieu, jeu +!!$ !!! END DEBUG CODE + + Atm(n)%neststruct%isu = isu + Atm(n)%neststruct%ieu = ieu + Atm(n)%neststruct%jsu = jsu + Atm(n)%neststruct%jeu = jeu + endif + enddo + + end subroutine setup_update_regions end subroutine fv_control_init diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index dd5d1011b..aab034ef3 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -65,9 +65,6 @@ module fv_nesting_mod contains -!!!!NOTE: Later we can add a flag to see if remap BCs are needed -!!! if not we can save some code complexity and cycles by skipping it - subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & u, v, w, pt, delp, delz,q, uc, vc, & #ifdef USE_COND @@ -863,9 +860,6 @@ subroutine setup_eul_delp_BC_k(delplagBC, delpeulBC, pelagBC, peeulBC, ptop_src, character(len=120) :: errstring -!!$!!! DEBUG CODE -!!$ write(*, '(A, 7I5)') 'setup_eul_delp_BC_k', mpp_pe(), isd_BC, ied_BC, istart, iend, lbound(pelagBC,1), ubound(pelagBC,1) -!!$!!! END DEBUG CODE !$OMP parallel do default(none) shared(istart,iend,jstart,jend,pelagBC,ptop_src) do j=jstart,jend @@ -2286,6 +2280,10 @@ end subroutine twoway_nesting !!!CLEANUP: this routine assumes that the PARENT GRID has pt = (regular) temperature, !!!not potential temperature; which may cause problems when updating if this is not the case. + +!!! NOTE ALSO: parent_grid%flagstruct is NOT SET UP by default and may be missing much information +!!! Either make sure that parent_grid%flagstruct is filled in fv_control or that proper steps +!!! are taken to make sure null flags are not used subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & u, v, w, pt, delp, q, & pe, pkz, delz, ps, ptop, ak, bk, & @@ -2359,7 +2357,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & !If pt is actual temperature, set conv_theta to .false. if (present(conv_theta_in)) conv_theta = conv_theta_in - if ((.not. neststruct%parent_proc) .and. (.not. neststruct%child_proc)) return + if ((.not. parent_grid%neststruct%parent_proc) .and. (.not. neststruct%child_proc)) return call mpp_get_data_domain( parent_grid%domain, & isd_p, ied_p, jsd_p, jed_p ) @@ -2388,7 +2386,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & endif enddo - if (neststruct%parent_proc .and. is_master() .and. first_timestep) then + if (parent_grid%neststruct%parent_proc .and. is_master() .and. first_timestep) then print*, ' TWO-WAY BLENDING WEIGHTS' ph2 = parent_grid%ak(1) do k=1,parent_grid%npz @@ -2400,130 +2398,6 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & first_timestep = .false. endif - - !!! RENORMALIZATION UPDATE OPTION - if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 7 .and. neststruct%nestupdate /= 8) then - -!!$ allocate(qdp_coarse(isd_p:ied_p,jsd_p:jed_p,npz)) -!!$ if (parent_grid%flagstruct%nwat > 0) then -!!$ allocate(q_diff(isd_p:ied_p,jsd_p:jed_p,npz)) -!!$ q_diff = 0. -!!$ endif -!!$ -!!$ do n=1,parent_grid%flagstruct%nwat -!!$ -!!$ qdp_coarse = 0. -!!$ if (neststruct%child_proc) then -!!$ do k=1,npz -!!$ do j=jsd,jed -!!$ do i=isd,ied -!!$ qdp(i,j,k) = q(i,j,k,n)*delp(i,j,k) -!!$ enddo -!!$ enddo -!!$ enddo -!!$ else -!!$ qdp = 0. -!!$ endif -!!$ -!!$ if (neststruct%parent_proc) then -!!$ !Add up ONLY region being replaced by nested grid -!!$ do k=1,npz -!!$ do j=jsu,jeu -!!$ do i=isu,ieu -!!$ qdp_coarse(i,j,k) = parent_grid%q(i,j,k,n)*parent_grid%delp(i,j,k) -!!$ enddo -!!$ enddo -!!$ enddo -!!$ call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, & -!!$ parent_grid%bd, npz, L_sum_b) -!!$ else -!!$ qdp_coarse = 0. -!!$ endif -!!$ if (neststruct%parent_proc) then -!!$ if (n <= parent_grid%flagstruct%nwat) then -!!$ do k=1,npz -!!$ do j=jsu,jeu -!!$ do i=isu,ieu -!!$ q_diff(i,j,k) = q_diff(i,j,k) - qdp_coarse(i,j,k) -!!$ enddo -!!$ enddo -!!$ enddo -!!$ endif -!!$ endif -!!$ -!!$ call mpp_update_domains(qdp, domain) -!!$ call update_coarse_grid(var_src, qdp, global_nest_domain, & -!!$ gridstruct%dx, gridstruct%dy, gridstruct%area, & -!!$ bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & -!!$ neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & -!!$ npx, npy, npz, 0, 0, & -!!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, & -!!$ neststruct%parent_proc, neststruct%child_proc, parent_grid) -!!$ if (neststruct%parent_proc) call remap_up_k(ps0, parent_grid%ps, & -!!$ ak, bk, parent_grid%ak, parent_grid%bk, var_src, qdp_coarse, & -!!$ parent_grid%bd, neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & -!!$ 0, 0, npz, parent_grid%npz, 0, parent_grid%flagstruct%kord_tr, blend_wt, log_pe=.false.) -!!$ -!!$ call mpp_sync!self -!!$ -!!$ if (neststruct%parent_proc) then -!!$ call level_sum(qdp_coarse, parent_grid%gridstruct%area, parent_grid%domain, & -!!$ parent_grid%bd, npz, L_sum_a) -!!$ do k=1,npz -!!$ if (L_sum_a(k) > 0.) then -!!$ fix = L_sum_b(k)/L_sum_a(k) -!!$ do j=jsu,jeu -!!$ do i=isu,ieu -!!$ !Normalization mass fixer -!!$ parent_grid%q(i,j,k,n) = qdp_coarse(i,j,k)*fix -!!$ enddo -!!$ enddo -!!$ endif -!!$ enddo -!!$ if (n == 1) sphum_ll_fix = 1. - fix -!!$ endif -!!$ if (neststruct%parent_proc) then -!!$ if (n <= parent_grid%flagstruct%nwat) then -!!$ do k=1,npz -!!$ do j=jsu,jeu -!!$ do i=isu,ieu -!!$ q_diff(i,j,k) = q_diff(i,j,k) + parent_grid%q(i,j,k,n) -!!$ enddo -!!$ enddo -!!$ enddo -!!$ endif -!!$ endif -!!$ -!!$ end do -!!$ -!!$ if (neststruct%parent_proc) then -!!$ if (parent_grid%flagstruct%nwat > 0) then -!!$ do k=1,npz -!!$ do j=jsu,jeu -!!$ do i=isu,ieu -!!$ parent_grid%delp(i,j,k) = parent_grid%delp(i,j,k) + q_diff(i,j,k) -!!$ enddo -!!$ enddo -!!$ enddo -!!$ endif -!!$ -!!$ do n=1,parent_grid%flagstruct%nwat -!!$ do k=1,npz -!!$ do j=jsu,jeu -!!$ do i=isu,ieu -!!$ parent_grid%q(i,j,k,n) = parent_grid%q(i,j,k,n)/parent_grid%delp(i,j,k) -!!$ enddo -!!$ enddo -!!$ enddo -!!$ enddo -!!$ endif -!!$ -!!$ deallocate(qdp_coarse) -!!$ if (allocated(q_diff)) deallocate(q_diff) - - endif - !!! END RENORMALIZATION UPDATE - #ifndef SW_DYNAMICS if (neststruct%nestupdate /= 3 .and. neststruct%nestupdate /= 8) then @@ -2561,7 +2435,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, npz, 0, 0, & neststruct%refinement, neststruct%nestupdate, upoff, 0, & - neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) + parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) if (neststruct%child_proc) deallocate(t_nest) else if (neststruct%child_proc) call mpp_update_domains(pt, domain, complete=.true.) @@ -2573,14 +2447,18 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, npz, 0, 0, & neststruct%refinement, neststruct%nestupdate, upoff, 0, & - neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) + parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) endif !conv_theta call mpp_sync!self - if (.not. flagstruct%hydrostatic) then + !We don't currently have a good way to communicate all namelist items between + ! grids (since we cannot assume that we have internal namelists available), so + ! we get the clutzy structure here. + if ( (neststruct%child_proc .and. .not. flagstruct%hydrostatic) .or. & + (parent_grid%neststruct%parent_proc .and. .not. parent_grid%flagstruct%hydrostatic) ) then allocate(w_src(isd_p:ied_p,jsd_p:jed_p,npz)) w_src = -999. @@ -2590,7 +2468,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, npz, 0, 0, & neststruct%refinement, neststruct%nestupdate, upoff, 0, & - neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) + parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) call mpp_sync!self !Updating for delz not yet implemented; @@ -2598,7 +2476,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & ! consider updating specific volume instead? !!$ call update_coarse_grid(parent_grid%delz, delz, global_nest_domain, & !!$ bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, npz, 0, 0, & -!!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc) +!!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, parent_grid%neststruct%parent_proc, neststruct%child_proc) end if @@ -2616,7 +2494,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, npz, 0, 1, 1, 0, & neststruct%refinement, neststruct%nestupdate, upoff, 0, & - neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1, gridtype=DGRID_NE) + parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1, gridtype=DGRID_NE) call mpp_sync() @@ -2629,7 +2507,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & !Re-compute nested (AND COARSE) grid ps allocate(ps0(isd_p:ied_p,jsd_p:jed_p)) - if (neststruct%parent_proc) then + if (parent_grid%neststruct%parent_proc) then parent_grid%ps = parent_grid%ptop !$OMP parallel do default(none) shared(jsd_p,jed_p,isd_p,ied_p,parent_grid) @@ -2663,13 +2541,13 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) + neststruct%refinement, neststruct%nestupdate, upoff, 0, parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) !!! The mpp version of update_coarse_grid does not return a consistent value of ps !!! across PEs, as it does not go into the haloes of a given coarse-grid PE. This !!! update_domains call takes care of the problem. - if (neststruct%parent_proc) then + if (parent_grid%neststruct%parent_proc) then call mpp_update_domains(parent_grid%ps, parent_grid%domain, complete=.false.) call mpp_update_domains(ps0, parent_grid%domain, complete=.true.) endif @@ -2678,7 +2556,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & if (parent_grid%global_tile == neststruct%parent_tile) then - if (neststruct%parent_proc) then + if (parent_grid%neststruct%parent_proc) then !comment out if statement to always remap theta instead of t in the remap-update. !(In LtE typically we use remap_t = .true.: remapping t is better (except in @@ -2736,7 +2614,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, jsd_p, jed_p, parent_grid%ptop, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, blend_wt) - endif !neststruct%parent_proc + endif !parent_grid%neststruct%parent_proc end if From 35b7384c7be7abced5cbf53900774a659303fee9 Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Mon, 20 Apr 2020 15:36:23 -0400 Subject: [PATCH 10/14] remove trailing whitespace and any tabs --- model/boundary.F90 | 8 ++--- model/fv_control.F90 | 2 +- model/fv_nesting.F90 | 4 +-- model/tp_core.F90 | 4 +-- tools/external_ic.F90 | 4 +-- tools/test_cases.F90 | 84 +++++++++++++++++++++---------------------- 6 files changed, 53 insertions(+), 53 deletions(-) diff --git a/model/boundary.F90 b/model/boundary.F90 index b16216b38..69e740ee5 100644 --- a/model/boundary.F90 +++ b/model/boundary.F90 @@ -2306,7 +2306,7 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are position = CENTER end if - !Note that *_c does not have values on the parent_proc. + !Note that *_c does not have values on the parent_proc. !Must use isu, etc. to get bounds of update region on parent. call mpp_get_F2C_index(nest_domain, is_c, ie_c, js_c, je_c, is_f, ie_f, js_f, je_f, nest_level=nest_level, position=position) if (child_proc) then @@ -2477,7 +2477,7 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed select case (nestupdate) case (1,2,6,7,8) ! 1 = Conserving update on all variables; 2 = conserving update for cell-centered values; 6 = conserving remap-update -!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) +!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz do j=jsu,jeu do i=isu,ieu @@ -2500,7 +2500,7 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed select case (nestupdate) case (1,6,7,8) -!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) +!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz do j=jsu,jeu+1 do i=isu,ieu @@ -2520,7 +2520,7 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed select case (nestupdate) case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average -!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) +!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse) do k=1,npz do j=jsu,jeu do i=isu,ieu+1 diff --git a/model/fv_control.F90 b/model/fv_control.F90 index efa33224d..c8cee43a0 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -568,7 +568,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) endif - allocate(Atm(this_grid)%neststruct%child_grids(ngrids)) + allocate(Atm(this_grid)%neststruct%child_grids(ngrids)) do n=1,ngrids Atm(this_grid)%neststruct%child_grids(n) = (grid_coarse(n) == this_grid) allocate(Atm(n)%neststruct%do_remap_bc(ngrids)) diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index aab034ef3..b6877d81e 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -1785,7 +1785,7 @@ end subroutine set_BCs_t0 subroutine d2c_setup(u, v, & ua, va, & - uc, vc, dord4, & + uc, vc, dord4, & isd,ied,jsd,jed, is,ie,js,je, npx,npy, & grid_type, bounded_domain, & se_corner, sw_corner, ne_corner, nw_corner, & @@ -2455,7 +2455,7 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & !We don't currently have a good way to communicate all namelist items between - ! grids (since we cannot assume that we have internal namelists available), so + ! grids (since we cannot assume that we have internal namelists available), so ! we get the clutzy structure here. if ( (neststruct%child_proc .and. .not. flagstruct%hydrostatic) .or. & (parent_grid%neststruct%parent_proc .and. .not. parent_grid%flagstruct%hydrostatic) ) then diff --git a/model/tp_core.F90 b/model/tp_core.F90 index 0846ea567..5219cf47c 100644 --- a/model/tp_core.F90 +++ b/model/tp_core.F90 @@ -128,7 +128,7 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & ord_ou = hord if (.not. gridstruct%bounded_domain) & - call copy_corners(q, npx, npy, 2, gridstruct%bounded_domain, bd, & + call copy_corners(q, npx, npy, 2, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) call yppm(fy2, q, cry, ord_in, isd,ied,isd,ied, js,je,jsd,jed, npx,npy, gridstruct%dya, gridstruct%bounded_domain, gridstruct%grid_type) @@ -147,7 +147,7 @@ subroutine fv_tp_2d(q, crx, cry, npx, npy, hord, fx, fy, xfx, yfx, & call xppm(fx, q_i, crx(is,js), ord_ou, is,ie,isd,ied, js,je,jsd,jed, npx,npy, gridstruct%dxa, gridstruct%bounded_domain, gridstruct%grid_type) if (.not. gridstruct%bounded_domain) & - call copy_corners(q, npx, npy, 1, gridstruct%bounded_domain, bd, & + call copy_corners(q, npx, npy, 1, gridstruct%bounded_domain, bd, & gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) call xppm(fx2, q, crx, ord_in, is,ie,isd,ied, jsd,jed,jsd,jed, npx,npy, gridstruct%dxa, gridstruct%bounded_domain, gridstruct%grid_type) diff --git a/tools/external_ic.F90 b/tools/external_ic.F90 index de747b7ee..774f6f694 100644 --- a/tools/external_ic.F90 +++ b/tools/external_ic.F90 @@ -655,7 +655,7 @@ subroutine get_nggps_ic (Atm, fv_domain) Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & Atm%flagstruct%n_zs_filter, cnst_0p20*Atm%gridstruct%da_min, & .false., oro_g, Atm%gridstruct%bounded_domain, & - Atm%domain, Atm%bd) + Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-2 terrain filter has been applied ', & Atm%flagstruct%n_zs_filter, ' times' else if( Atm%flagstruct%nord_zs_filter == 4 ) then @@ -663,7 +663,7 @@ subroutine get_nggps_ic (Atm, fv_domain) Atm%gridstruct%dx, Atm%gridstruct%dy, & Atm%gridstruct%dxc, Atm%gridstruct%dyc, Atm%gridstruct%sin_sg, & Atm%flagstruct%n_zs_filter, .false., oro_g, & - Atm%gridstruct%bounded_domain, & + Atm%gridstruct%bounded_domain, & Atm%domain, Atm%bd) if ( is_master() ) write(*,*) 'Warning !!! del-4 terrain filter has been applied ', & Atm%flagstruct%n_zs_filter, ' times' diff --git a/tools/test_cases.F90 b/tools/test_cases.F90 index 6efd29519..ea77c2c0f 100644 --- a/tools/test_cases.F90 +++ b/tools/test_cases.F90 @@ -6279,26 +6279,26 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, call hydro_eq(npz, is, ie, js, je, ps, phis, dry_mass, & delp, ak, bk, pt, delz, area, ng, .false., hydrostatic, hybrid_z, domain) - ! *** Add Initial perturbation *** - if (bubble_do) then - r0 = 100.*sqrt(dx_const**2 + dy_const**2) - icenter = npx/2 - jcenter = npy/2 - - do j=js,je - do i=is,ie - dist = (i-icenter)*dx_const*(i-icenter)*dx_const & - +(j-jcenter)*dy_const*(j-jcenter)*dy_const - dist = min(r0, sqrt(dist)) - do k=1,npz - prf = ak(k) + ps(i,j)*bk(k) - if ( prf > 100.E2 ) then - pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) - endif - enddo - enddo - enddo - endif + ! *** Add Initial perturbation *** + if (bubble_do) then + r0 = 100.*sqrt(dx_const**2 + dy_const**2) + icenter = npx/2 + jcenter = npy/2 + + do j=js,je + do i=is,ie + dist = (i-icenter)*dx_const*(i-icenter)*dx_const & + +(j-jcenter)*dy_const*(j-jcenter)*dy_const + dist = min(r0, sqrt(dist)) + do k=1,npz + prf = ak(k) + ps(i,j)*bk(k) + if ( prf > 100.E2 ) then + pt(i,j,k) = pt(i,j,k) + 0.01*(1. - (dist/r0)) * prf/ps(i,j) + endif + enddo + enddo + enddo + endif if ( hydrostatic ) then call p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, & pe, peln, pk, pkz, kappa, q, ng, ncnst, area, dry_mass, .false., .false., & @@ -6645,26 +6645,26 @@ subroutine init_double_periodic(u,v,w,pt,delp,q,phis, ps,pe,peln,pk,pkz, uc,vc, .true., hydrostatic, nwat, domain, flagstruct%adiabatic) ! *** Add Initial perturbation *** - if (bubble_do) then - r0 = 10.e3 - zc = 1.4e3 ! center of bubble from surface - icenter = (npx-1)/2 + 1 - jcenter = (npy-1)/2 + 1 - do k=1, npz - zm = 0.5*(ze1(k)+ze1(k+1)) - ptmp = ( (zm-zc)/zc ) **2 - if ( ptmp < 1. ) then - do j=js,je - do i=is,ie - dist = ptmp+((i-icenter)*dx_const/r0)**2+((j-jcenter)*dy_const/r0)**2 - if ( dist < 1. ) then - pt(i,j,k) = pt(i,j,k) + pturb*(1.-sqrt(dist)) - endif - enddo - enddo - endif - enddo - endif + if (bubble_do) then + r0 = 10.e3 + zc = 1.4e3 ! center of bubble from surface + icenter = (npx-1)/2 + 1 + jcenter = (npy-1)/2 + 1 + do k=1, npz + zm = 0.5*(ze1(k)+ze1(k+1)) + ptmp = ( (zm-zc)/zc ) **2 + if ( ptmp < 1. ) then + do j=js,je + do i=is,ie + dist = ptmp+((i-icenter)*dx_const/r0)**2+((j-jcenter)*dy_const/r0)**2 + if ( dist < 1. ) then + pt(i,j,k) = pt(i,j,k) + pturb*(1.-sqrt(dist)) + endif + enddo + enddo + endif + enddo + endif case ( 101 ) @@ -9374,8 +9374,8 @@ subroutine interp_left_edge_1d(qout, qin, dx, ifirst, ilast, order) !!$ enddo - call mp_stop - stop + call mp_stop + stop endif From 902cf782670a772270a064e407308510fed88e81 Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Mon, 20 Apr 2020 15:36:52 -0400 Subject: [PATCH 11/14] update the RELEASE.md with the FV3 technical memorandum --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 40c37d10b..d0dd29a63 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -28,4 +28,4 @@ The non-functional gfdl_cloud_microphys.F90 has been removed and replaced with t The namelist nggps_diag_nml has been eliminated. 'fdiag' is no longer handled by the dynamical core, and should be handled by the physics driver. -For a complete technical description see the [forthcoming] GFDL Technical Memorandum. +For a complete technical description see the NOAA Technical Memorandum OAR GFDL: https://repository.library.noaa.gov/view/noaa/23432. From dcacb69d4e76aafaf5ea7e55fd3f0955804c4a1c Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Mon, 20 Apr 2020 15:38:24 -0400 Subject: [PATCH 12/14] semantic fix in RELEASE.md --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index d0dd29a63..85f7df54d 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -28,4 +28,4 @@ The non-functional gfdl_cloud_microphys.F90 has been removed and replaced with t The namelist nggps_diag_nml has been eliminated. 'fdiag' is no longer handled by the dynamical core, and should be handled by the physics driver. -For a complete technical description see the NOAA Technical Memorandum OAR GFDL: https://repository.library.noaa.gov/view/noaa/23432. +For a complete technical description see the NOAA Technical Memorandum OAR GFDL: https://repository.library.noaa.gov/view/noaa/23432 From 5a9ea433c3004da22969da0820f43776bd72995c Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Mon, 20 Apr 2020 16:41:49 -0400 Subject: [PATCH 13/14] adds default values for nest_*offsets in fv_control breaks up a too long line in fv_nesting.F90 --- model/fv_control.F90 | 2 +- model/fv_nesting.F90 | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/model/fv_control.F90 b/model/fv_control.F90 index c8cee43a0..5f67f344b 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -103,7 +103,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split) integer, dimension(MAX_NNEST) :: grid_pes = 0 integer, dimension(MAX_NNEST) :: grid_coarse = -1 integer, dimension(MAX_NNEST) :: nest_refine = 3 - integer, dimension(MAX_NNEST) :: nest_ioffsets, nest_joffsets + integer, dimension(MAX_NNEST) :: nest_ioffsets = -999, nest_joffsets = -999 integer, dimension(MAX_NNEST) :: all_npx = 0 integer, dimension(MAX_NNEST) :: all_npy = 0 integer, dimension(MAX_NNEST) :: all_npz = 0 diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index b6877d81e..d5d214a4a 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -2541,7 +2541,9 @@ subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, & bd, isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, & neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu, & npx, npy, 0, 0, & - neststruct%refinement, neststruct%nestupdate, upoff, 0, parent_grid%neststruct%parent_proc, neststruct%child_proc, parent_grid, grid_number-1) + neststruct%refinement, neststruct%nestupdate, upoff, 0, & + parent_grid%neststruct%parent_proc, neststruct%child_proc, & + parent_grid, grid_number-1) !!! The mpp version of update_coarse_grid does not return a consistent value of ps !!! across PEs, as it does not go into the haloes of a given coarse-grid PE. This From 2baae594783027bbb6d3222eeb172c699ccfdb93 Mon Sep 17 00:00:00 2001 From: "Rusty.Benson" Date: Mon, 20 Apr 2020 22:30:40 -0400 Subject: [PATCH 14/14] change default value of nestupdate to 7 --- model/fv_arrays.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index e112817a6..fddeaf635 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -584,7 +584,7 @@ module fv_arrays_mod logical :: nested = .false. integer :: nestbctype = 1 integer :: nsponge = 0 - integer :: nestupdate = 0 + integer :: nestupdate = 7 logical :: twowaynest = .false. integer :: ioffset, joffset !Position of nest within parent grid integer :: nlevel = 0 ! levels down from top-most domain