From 6172d21584d266d73e7f548754bb65de21846c67 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Sun, 18 Oct 2020 21:45:02 -0600 Subject: [PATCH 01/45] Add modifications needed for SE dycore configuration and namelist generation. --- cime_config/buildnml | 108 ++++++++++++++++++++++- cime_config/cam_config.py | 109 ++++++++++++++++++++---- cime_config/config_component.xml | 1 + cime_config/namelist_definition_cam.xml | 20 +++-- 4 files changed, 213 insertions(+), 25 deletions(-) diff --git a/cime_config/buildnml b/cime_config/buildnml index 54404ddd..d57bc4e2 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -39,6 +39,12 @@ _LOGGER = logging.getLogger(__name__) #HELPER FUNCTIONS ################# +class CamBuildnmlError(ValueError): + """Class used to handle CAM buildnml errors + (e.g., log user errors without backtrace)""" + +################## + # This simplifies the filename mangling for different cases. def _create_ic_filename(inst_string, i_or_r, run_refcase, run_refdate, run_reftod): @@ -50,7 +56,7 @@ def _create_ic_filename(inst_string, i_or_r, def nml_attr_set(config): """ - creates a dictionary of namelist attributes + Creates a dictionary of namelist attributes from the CAM config object, in order to properly set namelist defaults. """ @@ -68,13 +74,92 @@ def nml_attr_set(config): #is also a namelist attribute: if conf.is_nml_attr: #If so, then add to attribute dictionary: - cam_nml_attr_dict[conf_name] = conf.value + if type(conf.value) is str: + #If value is a string, then add directly: + cam_nml_attr_dict[conf_name] = conf.value + else: + #If not, then convert to string before adding: + cam_nml_attr_dict[conf_name] = "{}".format(conf.value) + # End if # End if # End for # End if #Return namelist attribute dictionary: return cam_nml_attr_dict +################## + +def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): + + """ + Determines if a user has specified + which CCPP physics suite to use, + assuming there is more than one suite + listed in the 'physics_suites' CAM + configure option. + """ + + #Extract physics suite list: + phys_suites = config.get_value('physics_suites').split(';') + + if len(phys_suites) > 1: + #If more than one physics suite is listed, + #then check the "user_nl_cam" file to see if user + #specified a particular suite to use for this + #simulation: + with open(user_nl_file, 'r') as nl_file: + #Read lines in file: + nl_user_lines = nl_file.readlines() + + #Strip out all comment lines: + real_nl_lines = \ + [line for line in nl_user_lines if line[0] != "!"] + + #Search for "physics_suite" line: + phys_suite_lines = \ + [line for line in real_nl_lines if "physics_suite" in line] + + #If there is no "physics_suite" line, then throw an error: + if not phys_suite_lines: + emsg = "No 'physics_suite' variable is present in user_nl_cam.\n \ + This is required if more than one suite is listed\n \ + in CAM_CONFIG_OPTS." + raise CamBuildnmlError(emsg) + + #If there is more than one "physics_suite" line, then also throw an error: + if len(phys_suite_lines) > 1: + emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n \ + Only one 'physics_suite' line is allowed." + raise CamBuildnmlError(emsg) + + #Extract string from list: + phys_suite_line = phys_suite_lines[0] + + #Search for equals (=) sign in "physics_suite" string: + eq_sign_idx = phys_suite_line.find("=") + + if eq_sign_idx > 0: + #Assume all text left of equals sign is the physics suite name: + phys_suite_val = phys_suite_line[(eq_sign_idx+1):].strip() + else: + #Syntax is bad, so raise an error: + emsg = "No equals (=) sign was found with the 'physics_suite' variable." + raise CamBuildnmlError(emsg) + + #Check that physics suite specified is actually in config list: + if phys_suite_val not in phys_suites: + emsg = "physics_suite specified in user_nl_cam doesn't match any suites\n \ + listed in CAM_CONFIG_OPTS" + raise CamBuildnmlError(emsg) + + else: + #If only physics suite is listed, then + #just use that one: + phys_suite_val = phys_suites[0] + + #Add new namelist attribute to dictionary: + cam_nml_attr_dict["phys_suite"] = phys_suite_val + ################# #PRIMARY FUNCTION ################# @@ -110,7 +195,7 @@ def buildnml(case, caseroot, compname): #-------------------------------------------------------------------------- # Call config_CAM to set the configuration options needed to - # generate CAM's namelist + # generate CAM's namelist #-------------------------------------------------------------------------- # Create CAM configure object: @@ -249,10 +334,25 @@ def buildnml(case, caseroot, compname): # Determine location and name of "user_nl_cam" files: user_nl_file = os.path.join(caseroot, "user_nl_cam" + inst_string) + # Check that file actually exists. If not then throw an error: + if not os.path.exists(user_nl_file): + emsg = "The file 'user_nl_cam' is missing. Please run 'case.setup' first." + raise CamBuildnmlError(emsg) + # Determine location and name of namelist input file: namelist_infile = os.path.join(confdir, "namelist_infile") + #----------------------------------------------------------- + # Determine CCPP physics suite chosen by user, and set suite + # name to be a namelist attribute: + #----------------------------------------------------------- + + # Find user-chosen CCPP physics suite, and set as an attribute: + ccpp_phys_set(config, cam_nml_dict, user_nl_file) + + #-------------------------------- # Create CIME namelist input file: + #-------------------------------- create_namelist_infile(case, user_nl_file, namelist_infile, "\n".join(infile_lines)) @@ -263,7 +363,7 @@ def buildnml(case, caseroot, compname): # Initialize namelist defaults: #----------------------------- - # Initalize namelist defaults in used namelis groups: + # Initalize namelist defaults in used namelist groups: nmlgen.init_defaults(namelist_infile_list, cam_nml_dict) #-------------------------- diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 2c4a497e..9f12c97e 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -702,6 +702,8 @@ def __init__(self, case, case_log): # Horizontal grid self.create_config("hgrid", hgrid_desc, atm_grid, se_grid_re, is_nml_attr=True) + # Add SE namelist group to nmlgen list: + self.__nml_groups.append("dyn_se_inparm") elif fv3_grid_re.match(atm_grid) is not None: # Dynamical core @@ -764,6 +766,72 @@ def __init__(self, case, case_log): raise CamConfigValError(emsg.format(user_dyn_opt, dyn)) # End if + #-------------------------------------------------------- + # Set CAM grid variables (nlev and horizontal dimensions) + #-------------------------------------------------------- + + # Set number of vertical levels + if case_nlev: + # Save variable for CPPDEFs + nlev = case_nlev + else: + # Save variable for CPPDEFs + nlev = 30 + + # Add vertical levels to configure object + nlev_desc = "Number of vertical levels." + self.create_config("nlev", nlev_desc, nlev, None, is_nml_attr=True) + + #Set horizontal dimension variables: + if dyn == "se": + # Extract cubed-sphere grid values from hgrid string: + csne_re = re.search(r"ne[0-9]+", atm_grid) + csne_val = int(csne_re.group()[2:]) + + csnp_re = re.search(r"np[0-9]+", atm_grid) + csnp_val = int(csnp_re.group()[2:]) + + # Add number of elements along edge of cubed-sphere grid + csne_desc = "Number of elements along one edge of a cubed sphere grid." + self.create_config("csne", csne_desc, csne_val) + + # Add number of points on each cubed-sphere element edge + csnp_desc = "Number of points on each edge of the elements in a cubed sphere grid." + self.create_config("csnp", csnp_desc, csnp_val) + + else: + # Add number of latitudes in grid to configure object + nlat_desc = "Number of unique latitude points in rectangular lat/lon" \ + " grid.\nSet to 1 (one) for unstructured grids." + self.create_config("nlat", nlat_desc, case_ny) + + # Add number of longitudes in grid to configure object + nlon_desc = "Number of unique longitude points in rectangular lat/lon" \ + " grid.\nTotal number of columns for unstructured grids." + self.create_config("nlon", nlon_desc, case_nx) + + #--------------------------------------- + # Set initial and/or boundary conditions + #--------------------------------------- + + #Check if user specified Analytic Initial Conditions (ICs): + if user_config_opts.analytic_ic: + #Set "analytic_ic" to True (1): + analy_ic_val = 1 #Use Analytic ICs + + #Add analytic_ic to namelist group list: + self.__nml_groups.append("analytic_ic_nl") + else: + analy_ic_val = 0 #Don't use Analytic ICs + + analy_ic_desc = "\n\ + Switch to turn on analytic initial conditions for the dynamics state:\n\ + 0 => no,\n\ + 1 => yes." + + self.create_config("analytic_ic", analy_ic_desc, + analy_ic_val, [0, 1], is_nml_attr=True) + #-------------------- # Set ocean component #-------------------- @@ -781,12 +849,13 @@ def __init__(self, case, case_log): self.create_config("ocn", ocn_desc, comp_ocn, ocn_valid_vals, is_nml_attr=True) - phys_desc = """\nA comma-separate list of physics suite definition - file (SDF) names.\nTo specify the Kessler and Held-Suarez suites as \ - run time options, use '--physics-suites kessler,hs94'. - """ + phys_desc = """\n\ + A semi-colon separated list of physics Suite Definition\n\ + File (SDF) names. To specify the Kessler and Held-Suarez\n\ + suites as run time options, use '--physics-suites kessler;hs94'.""" + self.create_config("physics_suites", phys_desc, - user_config_opts.physics_suites, is_nml_attr=True) + user_config_opts.physics_suites) #-------------------------------------------------------- # Print CAM configure settings and values to debug logger @@ -824,27 +893,35 @@ def parse_config_opts(cls, config_opts, test_mode=False): >>> ConfigCAM.parse_config_opts("--dyn se", test_mode=True) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): SystemExit: 2 - >>> ConfigCAM.parse_config_opts("--phys kessler") - Namespace(dyn='', physics_suites='kessler') - >>> ConfigCAM.parse_config_opts("--phys kessler --dyn se") - Namespace(dyn='se', physics_suites='kessler') - >>> ConfigCAM.parse_config_opts("--phys kessler;musica") - Namespace(dyn='', physics_suites='kessler;musica') - >>> ConfigCAM.parse_config_opts("--phys kessler musica", test_mode=True) #doctest: +IGNORE_EXCEPTION_DETAIL + >>> ConfigCAM.parse_config_opts("--physics-suites kessler") + Namespace(analytic_ic=False, dyn='', physics_suites='kessler') + >>> ConfigCAM.parse_config_opts("--physics-suites kessler --dyn se") + Namespace(analytic_ic=False, dyn='se', physics_suites='kessler') + >>> ConfigCAM.parse_config_opts("--physics-suites kessler --dyn se --analytic_ic") + Namespace(analytic_ic=True, dyn='se', physics_suites='kessler') + >>> ConfigCAM.parse_config_opts("--physics-suites kessler;musica") + Namespace(analytic_ic=False, dyn='', physics_suites='kessler;musica') + >>> ConfigCAM.parse_config_opts("--physics-suites kessler musica", test_mode=True) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): SystemExit: 2 """ cco_str = "CAM_CONFIG_OPTS" parser = argparse.ArgumentParser(description=cco_str, - prog="ConfigCAM", + prog="ConfigCAM", allow_abbrev=False, epilog="Allowed values of "+cco_str) - parser.add_argument("--physics-suites", type=str, required=True, + parser.add_argument("--physics-suites", "-physics-suites", type=str, + required=True, metavar='', help="""Semicolon-separated list of Physics Suite - Definition Files (SDFs)""") + Definition Files (SDFs)""") parser.add_argument("--dyn", "-dyn", metavar='', type=str, required=False, default="", - help="Name of dycore") + help="""Name of dycore""") + parser.add_argument("--analytic_ic", "-analytic_ic", + action='store_true', required=False, + help="""Flag to turn on Analytic Initial + Conditions (ICs).""") + popts = [opt for opt in config_opts.split(" ") if opt] if test_mode: stderr_save = sys.stderr diff --git a/cime_config/config_component.xml b/cime_config/config_component.xml index c8b2d829..9873d45b 100644 --- a/cime_config/config_component.xml +++ b/cime_config/config_component.xml @@ -164,6 +164,7 @@ -phys tj2016 -analytic_ic -phys held_suarez -phys kessler -chem terminator -analytic_ic --> + --physics-suites kessler --analytic_ic --dyn none --physics-suites adiabatic diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index cf402c92..84140d48 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -10856,6 +10856,7 @@ Default: 0 (i.e., not used) + 0 3.0D0 @@ -10935,6 +10936,9 @@ Must match value of grid. Set this to zero to use a refined mesh. Default: UNKNOWN. + + 30 + integer @@ -10942,8 +10946,11 @@ dyn_se_inparm Number of PEs to be used by SE dycore. - Default: Number of PEs used by CAM. + Default: 0 = Number of PEs used by CAM. + + 0 + integer @@ -11317,6 +11324,9 @@ result in 9 equally-spaced physics points per element). Default: 0 = feature disabled, use dynamics GLL points. + + 0 + @@ -11418,10 +11428,10 @@ none - held_suarez_1994 - held_suarez_1994 - moist_baroclinic_wave_dcmip2016 - moist_baroclinic_wave_dcmip2016 + held_suarez_1994 + held_suarez_1994 + moist_baroclinic_wave_dcmip2016 + moist_baroclinic_wave_dcmip2016 From 47ecaa7eeb57736f354c238e62155f9038b65487 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 29 Oct 2020 20:15:48 -0600 Subject: [PATCH 02/45] Remove extra public copy of 'iMap' (Github issue #78). --- src/utils/cam_grid_support.F90 | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/utils/cam_grid_support.F90 b/src/utils/cam_grid_support.F90 index a635177a..faed9e7d 100644 --- a/src/utils/cam_grid_support.F90 +++ b/src/utils/cam_grid_support.F90 @@ -3,7 +3,8 @@ module cam_grid_support use shr_kind_mod, only: i8=>shr_kind_i8, i4=>shr_kind_i4 use shr_kind_mod, only: max_chars=>shr_kind_cl use shr_sys_mod, only: shr_sys_flush - use pio, only: iMap=>PIO_OFFSET_KIND, var_desc_t + use cam_map_utils, only: iMap + use pio, only: var_desc_t use cam_abortutils, only: endrun use cam_logfile, only: iulog use spmd_utils, only: masterproc @@ -13,8 +14,6 @@ module cam_grid_support implicit none private - public iMap - integer, parameter, public :: max_hcoordname_len = 16 real(r8), parameter :: grid_fill_value = -900.0_r8 !--------------------------------------------------------------------------- From afa9da80bac1dc4e141c47e5f7fa49a479636d5a Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 4 Nov 2020 11:36:08 -0700 Subject: [PATCH 03/45] Add 'air_composition_nl' namelist group, which is needed for the SE dycore. --- cime_config/cam_config.py | 3 +- cime_config/namelist_definition_cam.xml | 73 ++++++++++++++++++++++--- 2 files changed, 68 insertions(+), 8 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 9f12c97e..59426f0d 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -702,7 +702,8 @@ def __init__(self, case, case_log): # Horizontal grid self.create_config("hgrid", hgrid_desc, atm_grid, se_grid_re, is_nml_attr=True) - # Add SE namelist group to nmlgen list: + # Add SE namelist groups to nmlgen list: + self.__nml_groups.append("air_composition_nl") self.__nml_groups.append("dyn_se_inparm") elif fv3_grid_re.match(atm_grid) is not None: diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 84140d48..de33b4ef 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -8194,6 +8194,23 @@ atm/cam/topo/se/ne30x8_conus_nc3000_Co060_Fi001_MulG_PF_nullRR_Nsw042_20190710.nc + + logical + topo + cam_initfiles_nl + + Setting use_topo_file=.false. allows the user to specify that PHIS, SGH, + SGH30, and LANDM_COSLAT are all zero without having to supply a topo file + full of zeros. + Default: TRUE + + + .true. + .false. + .false. + .false. + + @@ -10771,6 +10788,48 @@ + + + + char*6(20) + physconst + air_composition_nl + + List of major species of dry air. If not set then the composition of dry + air is considered fixed at tropospheric conditions and the properties of + dry air are constant. If set then the list of major species is assumed to + have 'N2' listed last. This information is currently used only for + computing the variable properties of air in WACCM-X configurations. + Default: ['O', 'O2', 'H', 'N2'] if WACCM-X, otherwise None. + + + "" + O, O2, H, N2 + + + + + char*6(20) + physconst + air_composition_nl + + List of water species that are included in "moist" air. This is currently + used only by the SE dycore to generalize the computation of the moist air + mass and thermodynamic properties. + Default: + ['Q','CLDLIQ','RAINQM'] if CAM4, CAM5, or Kessler physics is used. + ['Q','CLDLIQ','CLDICE','RAINQM','SNOWQM'] if CAM6 physics is used. + ['Q'] for all other physics choices. + + + Q + Q, CLDLIQ, RAINQM + Q, CLDLIQ, RAINQM + Q, CLDLIQ, RAINQM + Q, CLDLIQ, CLDICE, RAINQM, SNOWQM + + + @@ -10790,13 +10849,13 @@ 3 - 3 - 3 - 5 - 1 - 1 - 1 - 1 + 3 + 3 + 5 + 1 + 1 + 1 + 1 From 570e648b1026bc5dd9524e5a07a661436d1a1f8c Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Fri, 6 Nov 2020 11:23:23 -0700 Subject: [PATCH 04/45] Add SE dycore source code, modified to work with new host model infrastructure. --- src/data/physconst.F90 | 679 ++++- src/data/registry.xml | 7 +- src/dynamics/none/stepon.F90 | 2 +- src/dynamics/se/advect_tend.F90 | 95 + src/dynamics/se/dp_coupling.F90 | 840 ++++++ src/dynamics/se/dp_mapping.F90 | 702 +++++ src/dynamics/se/dycore/bndry_mod.F90 | 952 ++++++ .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 2310 +++++++++++++++ src/dynamics/se/dycore/control_mod.F90 | 122 + .../se/dycore/coordinate_systems_mod.F90 | 919 ++++++ src/dynamics/se/dycore/cube_mod.F90 | 2332 +++++++++++++++ src/dynamics/se/dycore/derivative_mod.F90 | 2525 ++++++++++++++++ src/dynamics/se/dycore/dimensions_mod.F90 | 141 + src/dynamics/se/dycore/dof_mod.F90 | 402 +++ src/dynamics/se/dycore/edge_mod.F90 | 2629 +++++++++++++++++ src/dynamics/se/dycore/edgetype_mod.F90 | 94 + src/dynamics/se/dycore/element_mod.F90 | 377 +++ src/dynamics/se/dycore/fv_mapz.F90 | 1658 +++++++++++ src/dynamics/se/dycore/fvm_analytic_mod.F90 | 1214 ++++++++ .../se/dycore/fvm_consistent_se_cslam.F90 | 2031 +++++++++++++ .../se/dycore/fvm_control_volume_mod.F90 | 311 ++ src/dynamics/se/dycore/fvm_mapping.F90 | 1276 ++++++++ src/dynamics/se/dycore/fvm_mod.F90 | 953 ++++++ src/dynamics/se/dycore/fvm_overlap_mod.F90 | 877 ++++++ .../se/dycore/fvm_reconstruction_mod.F90 | 1845 ++++++++++++ src/dynamics/se/dycore/gbarrier.c | 109 + src/dynamics/se/dycore/gbarrier_mod.F90 | 79 + src/dynamics/se/dycore/gbarriertype_mod.F90 | 8 + src/dynamics/se/dycore/global_norms_mod.F90 | 1133 +++++++ src/dynamics/se/dycore/gridgraph_mod.F90 | 555 ++++ src/dynamics/se/dycore/hybrid_mod.F90 | 566 ++++ src/dynamics/se/dycore/hybvcoord_mod.F90 | 28 + src/dynamics/se/dycore/interpolate_mod.F90 | 1828 ++++++++++++ src/dynamics/se/dycore/ll_mod.F90 | 149 + src/dynamics/se/dycore/mass_matrix_mod.F90 | 120 + src/dynamics/se/dycore/mesh_mod.F90 | 1289 ++++++++ src/dynamics/se/dycore/metagraph_mod.F90 | 375 +++ src/dynamics/se/dycore/namelist_mod.F90 | 166 ++ src/dynamics/se/dycore/parallel_mod.F90 | 246 ++ src/dynamics/se/dycore/params_mod.F90 | 11 + src/dynamics/se/dycore/prim_advance_mod.F90 | 2259 ++++++++++++++ src/dynamics/se/dycore/prim_advection_mod.F90 | 1122 +++++++ src/dynamics/se/dycore/prim_driver_mod.F90 | 684 +++++ src/dynamics/se/dycore/prim_init.F90 | 395 +++ src/dynamics/se/dycore/prim_state_mod.F90 | 476 +++ src/dynamics/se/dycore/quadrature_mod.F90 | 955 ++++++ src/dynamics/se/dycore/reduction_mod.F90 | 447 +++ src/dynamics/se/dycore/schedtype_mod.F90 | 59 + src/dynamics/se/dycore/schedule_mod.F90 | 714 +++++ src/dynamics/se/dycore/spacecurve_mod.F90 | 1274 ++++++++ src/dynamics/se/dycore/thread_mod.F90 | 82 + src/dynamics/se/dycore/time_mod.F90 | 135 + src/dynamics/se/dycore/vertremap_mod.F90 | 606 ++++ src/dynamics/se/dycore/viscosity_mod.F90 | 740 +++++ src/dynamics/se/dyn_comp.F90 | 2398 +++++++++++++++ src/dynamics/se/dyn_grid.F90 | 1255 ++++++++ src/dynamics/se/native_mapping.F90 | 537 ++++ src/dynamics/se/pmgrid.F90 | 15 + src/dynamics/se/spmd_dyn.F90 | 34 + src/dynamics/se/stepon.F90 | 420 +++ src/dynamics/se/test_fvm_mapping.F90 | 853 ++++++ src/dynamics/tests/dyn_tests_utils.F90 | 23 + src/dynamics/tests/inic_analytic.F90 | 623 ++++ src/dynamics/tests/inic_analytic_utils.F90 | 132 + .../initial_conditions/ic_baro_dry_jw06.F90 | 271 ++ .../initial_conditions/ic_baroclinic.F90 | 720 +++++ .../initial_conditions/ic_held_suarez.F90 | 163 + .../initial_conditions/ic_us_standard_atm.F90 | 186 ++ src/physics/utils/physics_column_type.F90 | 21 +- src/physics/utils/physics_grid.F90 | 39 + src/utils/hycoef.F90 | 403 +++ 71 files changed, 49981 insertions(+), 15 deletions(-) create mode 100644 src/dynamics/se/advect_tend.F90 create mode 100644 src/dynamics/se/dp_coupling.F90 create mode 100644 src/dynamics/se/dp_mapping.F90 create mode 100644 src/dynamics/se/dycore/bndry_mod.F90 create mode 100644 src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 create mode 100644 src/dynamics/se/dycore/control_mod.F90 create mode 100644 src/dynamics/se/dycore/coordinate_systems_mod.F90 create mode 100644 src/dynamics/se/dycore/cube_mod.F90 create mode 100644 src/dynamics/se/dycore/derivative_mod.F90 create mode 100644 src/dynamics/se/dycore/dimensions_mod.F90 create mode 100644 src/dynamics/se/dycore/dof_mod.F90 create mode 100644 src/dynamics/se/dycore/edge_mod.F90 create mode 100644 src/dynamics/se/dycore/edgetype_mod.F90 create mode 100644 src/dynamics/se/dycore/element_mod.F90 create mode 100644 src/dynamics/se/dycore/fv_mapz.F90 create mode 100644 src/dynamics/se/dycore/fvm_analytic_mod.F90 create mode 100644 src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 create mode 100644 src/dynamics/se/dycore/fvm_control_volume_mod.F90 create mode 100644 src/dynamics/se/dycore/fvm_mapping.F90 create mode 100644 src/dynamics/se/dycore/fvm_mod.F90 create mode 100644 src/dynamics/se/dycore/fvm_overlap_mod.F90 create mode 100644 src/dynamics/se/dycore/fvm_reconstruction_mod.F90 create mode 100644 src/dynamics/se/dycore/gbarrier.c create mode 100644 src/dynamics/se/dycore/gbarrier_mod.F90 create mode 100644 src/dynamics/se/dycore/gbarriertype_mod.F90 create mode 100644 src/dynamics/se/dycore/global_norms_mod.F90 create mode 100644 src/dynamics/se/dycore/gridgraph_mod.F90 create mode 100644 src/dynamics/se/dycore/hybrid_mod.F90 create mode 100644 src/dynamics/se/dycore/hybvcoord_mod.F90 create mode 100644 src/dynamics/se/dycore/interpolate_mod.F90 create mode 100644 src/dynamics/se/dycore/ll_mod.F90 create mode 100644 src/dynamics/se/dycore/mass_matrix_mod.F90 create mode 100644 src/dynamics/se/dycore/mesh_mod.F90 create mode 100644 src/dynamics/se/dycore/metagraph_mod.F90 create mode 100644 src/dynamics/se/dycore/namelist_mod.F90 create mode 100644 src/dynamics/se/dycore/parallel_mod.F90 create mode 100644 src/dynamics/se/dycore/params_mod.F90 create mode 100644 src/dynamics/se/dycore/prim_advance_mod.F90 create mode 100644 src/dynamics/se/dycore/prim_advection_mod.F90 create mode 100644 src/dynamics/se/dycore/prim_driver_mod.F90 create mode 100644 src/dynamics/se/dycore/prim_init.F90 create mode 100644 src/dynamics/se/dycore/prim_state_mod.F90 create mode 100644 src/dynamics/se/dycore/quadrature_mod.F90 create mode 100644 src/dynamics/se/dycore/reduction_mod.F90 create mode 100644 src/dynamics/se/dycore/schedtype_mod.F90 create mode 100644 src/dynamics/se/dycore/schedule_mod.F90 create mode 100644 src/dynamics/se/dycore/spacecurve_mod.F90 create mode 100644 src/dynamics/se/dycore/thread_mod.F90 create mode 100644 src/dynamics/se/dycore/time_mod.F90 create mode 100644 src/dynamics/se/dycore/vertremap_mod.F90 create mode 100644 src/dynamics/se/dycore/viscosity_mod.F90 create mode 100644 src/dynamics/se/dyn_comp.F90 create mode 100644 src/dynamics/se/dyn_grid.F90 create mode 100644 src/dynamics/se/native_mapping.F90 create mode 100644 src/dynamics/se/pmgrid.F90 create mode 100644 src/dynamics/se/spmd_dyn.F90 create mode 100644 src/dynamics/se/stepon.F90 create mode 100644 src/dynamics/se/test_fvm_mapping.F90 create mode 100644 src/dynamics/tests/dyn_tests_utils.F90 create mode 100644 src/dynamics/tests/inic_analytic.F90 create mode 100644 src/dynamics/tests/inic_analytic_utils.F90 create mode 100644 src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 create mode 100644 src/dynamics/tests/initial_conditions/ic_baroclinic.F90 create mode 100644 src/dynamics/tests/initial_conditions/ic_held_suarez.F90 create mode 100644 src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 create mode 100644 src/utils/hycoef.F90 diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 214d36c1..32a1e935 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -30,8 +30,18 @@ module physconst public :: physconst_readnl public :: physconst_init + public :: composition_init public :: physconst_update public :: physconst_calc_kappav + public :: get_cp + + ! + ! subroutines to compute thermodynamic quantities + ! see Lauritzen et al. (2018) for formulas + ! doi: 10.1029/2017MS001257 + ! + + public :: get_molecular_diff_coef ! molecular diffusion and thermal conductivity !> \section arg_table_physconst Argument Table !! \htmlinclude physconst.html @@ -144,7 +154,48 @@ module physconst real(kind_phys), public, protected :: ez real(kind_phys), public, protected :: Cpd_on_Cpv = real(shr_const_cpdair/shr_const_cpwv, kind_phys) - !------------- Variables below here are for WACCM-X ----------------------- + !--------------- Variables for consistent themodynamics -------------------- + ! + ! composition of air + ! + ! NOTE: These routines may be replaced once constituents are enabled in the CCPP-framework + ! + integer, parameter :: num_names_max = 30 + character(len=6) :: dry_air_species(num_names_max) + character(len=6) :: water_species_in_air(num_names_max) + + integer, protected, public :: dry_air_species_num + integer, protected, public :: water_species_in_air_num + + integer, protected, public :: thermodynamic_active_species_num + integer, allocatable, protected, public :: thermodynamic_active_species_idx(:) + integer, allocatable, public :: thermodynamic_active_species_idx_dycore(:) + real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_cp(:) + real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_cv(:) + real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_R(:) + real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_mwi(:)!inverse molecular weights dry air + real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_kv(:) !molecular diffusion + real(kind_phsy), allocatable, protected, public :: thermodynamic_active_species_kc(:) !thermal conductivity + + ! standard dry air (constant composition) + real(kind_phys) :: mmro2, mmrn2 ! Mass mixing ratios of O2 and N2 + real(kind_phys) :: o2_mwi, n2_mwi ! Inverse molecular weights + real(kind_phys) :: mbar ! Mean mass at mid level + + ! coefficients in expressions for molecular diffusion coefficients + ! kv1,..,kv4 are coefficients for kmvis calculation + ! kc1,..,kc4 are coefficients for kmcnd calculation + real(kind_phys), parameter :: & + kv1 = 4.03_kind_phys, & + kv2 = 3.42_kind_phys, & + kv3 = 3.9_kind_phys, & + kv4 = 0.69_kind_phys, & + kc1 = 56._kind_phys, & + kc2 = 56._kind_phys, & + kc3 = 75.9_kind_phys, & + kc4 = 0.69_kind_phys + + !------------- Variables below here are for WACCM-X ----------------------- ! composition dependent specific heat at constant pressure real(kind_phys), public, pointer :: cpairv(:,:) ! composition dependent gas "constant" @@ -171,7 +222,6 @@ module physconst ! Read namelist variables. subroutine physconst_readnl(nlfile) - use shr_kind_mod, only: r8 => shr_kind_r8 use shr_nl_mod, only: find_group_name => shr_nl_find_group_name use shr_flux_mod, only: shr_flux_adjust_constants ! use mpi, only: mpi_bcast !!XXgoldyXX: Why not? @@ -193,6 +243,8 @@ subroutine physconst_readnl(nlfile) namelist /physconst_nl/ gravit, sday, mwh2o, cpwv, mwdry, cpair, & rearth, tmelt, omega + ! Variable components of dry air and water species in air + namelist /air_composition_nl/ dry_air_species, water_species_in_air !------------------------------------------------------------------------ if (masterproc) then @@ -284,12 +336,73 @@ subroutine physconst_readnl(nlfile) Cpd_on_Cpv = cpair / cpwv ! Adjust constants in shr_flux_mod. - call shr_flux_adjust_constants(zvir=real(zvir, r8), & - cpvir=real(cpvir, r8), gravit=real(gravit, r8)) + call shr_flux_adjust_constants(zvir=real(zvir, kind_phys), & + cpvir=real(cpvir, kind_phys), gravit=real(gravit, kind_phys)) end if ez = omega / sqrt(0.375_kind_phys) + ! Read variable components of dry air and water species in air + + dry_air_species = (/ (' ', i=1,num_names_max) /) + water_species_in_air = (/ (' ', i=1,num_names_max) /) + + if (masterproc) then + open( newunit=unitn, file=trim(nlfile), status='old' ) + call find_group_name(unitn, 'air_composition_nl', status=ierr) + if (ierr == 0) then + read(unitn, air_composition_nl, iostat=ierr) + if (ierr /= 0) then + call endrun(subname // ':: ERROR reading namelist') + end if + end if + close(unitn) + end if + + call mpi_bcast(dry_air_species, len(dry_air_species)*num_names_max, mpi_character, & + masterprocid, mpicom, ierr) + call mpi_bcast(water_species_in_air, len(water_species_in_air)*num_names_max, mpi_character, & + masterprocid, mpicom, ierr) + + dry_air_species_num = 0 + water_species_in_air_num = 0 + do i = 1, num_names_max + if (.not. LEN(TRIM(dry_air_species(i)))==0) then + dry_air_species_num = dry_air_species_num + 1 + end if + if (.not. LEN(TRIM(water_species_in_air(i)))==0) then + water_species_in_air_num = water_species_in_air_num + 1 + endif + end do + thermodynamic_active_species_num = dry_air_species_num+water_species_in_air_num + + if (masterproc) then + + write(iulog,*)'****************************************************************************' + write(iulog,*)' ' + + if (dry_air_species_num == 0) then + write(iulog,*)' Thermodynamic properties of dry air are fixed at troposphere values' + else + write(iulog,*)' Thermodynamic properties of dry air are based on variable' + write(iulog,*)' composition of the following species:' + do i = 1, dry_air_species_num + write(iulog,*)' ', trim(dry_air_species(i)) + end do + write(iulog,*) ' ' + end if + + write(iulog,*)' Thermodynamic properties of moist air are based on variable' + write(iulog,*)' composition of the following water species:' + do i = 1, water_species_in_air_num + write(iulog,*)' ', trim(water_species_in_air(i)) + end do + + write(iulog,*)' ' + write(iulog,*)'****************************************************************************' + + end if + end subroutine physconst_readnl !=========================================================================== @@ -390,6 +503,315 @@ subroutine physconst_init(pcols, pver, pverp) end subroutine physconst_init + !=========================================================================== + + subroutine composition_init() +! use constituents, only: cnst_get_ind, cnst_mw + use physics_types, only: ix_qv, ix_cld_liq, ix_rain !Remove once constituents are enabled -JN + use spmd_utils, only: masterproc + use cam_logfile, only: iulog + character(len=*), parameter :: subname = 'composition_init' + real(kind_phys) :: mw, dof1, dof2, dof3 + + integer :: icnst,ix,i + + ! standard dry air (constant composition) + o2_mwi = 1._kind_phys/32._kind_phys + n2_mwi = 1._kind_phys/28._kind_phys + mmro2 = 0.235_kind_phys + mmrn2 = 0.765_kind_phys + mbar = 1._kind_phys/(mmro2*o2_mwi + mmrn2*n2_mwi) + + ! init for variable composition dry air + + i = dry_air_species_num+water_species_in_air_num + allocate(thermodynamic_active_species_idx(i)) + allocate(thermodynamic_active_species_idx_dycore(i)) + allocate(thermodynamic_active_species_cp(0:i)) + allocate(thermodynamic_active_species_cv(0:i)) + allocate(thermodynamic_active_species_R(0:i)) + + i = dry_air_species_num + allocate(thermodynamic_active_species_mwi(i)) + allocate(thermodynamic_active_species_kv(i)) + allocate(thermodynamic_active_species_kc(i)) + thermodynamic_active_species_idx = -999 + thermodynamic_active_species_idx_dycore = -999 + thermodynamic_active_species_cp = 0.0_kind_phys + thermodynamic_active_species_cv = 0.0_kind_phys + thermodynamic_active_species_R = 0.0_kind_phys + thermodynamic_active_species_mwi = 0.0_kind_phys + thermodynamic_active_species_kv = 0.0_kind_phys + thermodynamic_active_species_kc = 0.0_kind_phys + ! + ! define cp and R for species in species_name + ! + ! Last major species in namelist dry_air_species is derived from the other major species + ! (since sum of dry mixing ratios for major species of dry air add must add to one) + ! + dof1 = 3._kind_phys ! monatomic ideal gas cv=dof1/2 * R; cp=(1+dof1/2) * R; dof=3 translational + dof2 = 5._kind_phys ! diatomic ideal gas cv=dof2/2 * R; cp=(1+dof2/2) * R; dof=3 tranlational + 2 rotational + dof3 = 6._kind_phys ! polyatomic ideal gas cv=dof3/2 * R; cp=(1+dof3/2) * R; dof=3 tranlational + 3 rotational + ! + if (dry_air_species_num>0) then + ! + ! last major species in dry_air_species is derived from the others and constants associated with it + ! are initialized here + ! + if (TRIM(dry_air_species(dry_air_species_num))=='N2') then +! call cnst_get_ind('N' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' dry air component not found: ', dry_air_species(dry_air_species_num) + call endrun(subname // ':: dry air component not found') + else + mw = 2.0_kind_phys*cnst_mw(ix) + icnst = dry_air_species_num + thermodynamic_active_species_idx(icnst) = 1!note - this is not used since this tracer value is derived + thermodynamic_active_species_cp (icnst) = 0.5_kind_phys*shr_const_rgas*(2._kind_phys+dof2)/mw !N2 + thermodynamic_active_species_cv (icnst) = 0.5_kind_phys*shr_const_rgas*dof2/mw !N2 + thermodynamic_active_species_R (icnst) = shr_const_rgas/mw + thermodynamic_active_species_mwi(icnst) = 1.0_kind_phys/mw + thermodynamic_active_species_kv(icnst) = 3.42_kind_phys + thermodynamic_active_species_kc(icnst) = 56._kind_phys + end if + ! + ! if last major species is not N2 then add code here + ! + else + write(iulog, *) subname//' derived major species not found: ', dry_air_species(dry_air_species_num) + call endrun(subname // ':: derived major species not found') + end if + else + ! + ! dry air is not species dependent + ! + icnst = 0 + thermodynamic_active_species_cp (icnst) = cpair + thermodynamic_active_species_cv (icnst) = cpair - rair + thermodynamic_active_species_R (icnst) = rair + end if + + ! + !****************************************************************************** + ! + ! add prognostic components of dry air + ! + !****************************************************************************** + ! + icnst = 1 + do i=1,dry_air_species_num-1 + select case (TRIM(dry_air_species(i))) + ! + ! O + ! + case('O') +! call cnst_get_ind('O' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) + call endrun(subname // ':: dry air component not found') + else + mw = cnst_mw(ix) + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = 0.5_kind_phys*shr_const_rgas*(2._kind_phys+dof1)/mw + thermodynamic_active_species_cv (icnst) = 0.5_kind_phys*shr_const_rgas*dof1/mw + thermodynamic_active_species_R (icnst) = shr_const_rgas/mw + thermodynamic_active_species_mwi(icnst) = 1.0_kind_phys/mw + thermodynamic_active_species_kv(icnst) = 3.9_kind_phys + thermodynamic_active_species_kc(icnst) = 75.9_kind_phys + icnst = icnst+1 + end if + ! + ! O2 + ! + case('O2') +! call cnst_get_ind('O2' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) + call endrun(subname // ':: dry air component not found') + else + mw = cnst_mw(ix) + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = 0.5_kind_phys*shr_const_rgas*(2._kind_phys+dof2)/mw + thermodynamic_active_species_cv (icnst) = 0.5_kind_phys*shr_const_rgas*dof2/mw + thermodynamic_active_species_R (icnst) = shr_const_rgas/mw + thermodynamic_active_species_mwi(icnst) = 1.0_kind_phys/mw + thermodynamic_active_species_kv(icnst) = 4.03_kind_phys + thermodynamic_active_species_kc(icnst) = 56._kind_phys + icnst = icnst+1 + end if + ! + ! H + ! + case('H') +! call cnst_get_ind('H' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) + call endrun(subname // ':: dry air component not found') + else + mw = cnst_mw(ix) + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = 0.5_kind_phys*shr_const_rgas*(2._kind_phys+dof1)/mw + thermodynamic_active_species_cv (icnst) = 0.5_kind_phys*shr_const_rgas*dof1/mw + thermodynamic_active_species_R (icnst) = shr_const_rgas/mw + thermodynamic_active_species_mwi(icnst) = 1.0_kind_phys/mw + thermodynamic_active_species_kv(icnst) = 0.0_kind_phys + thermodynamic_active_species_kc(icnst) = 0.0_kind_phys + icnst = icnst+1 + end if + ! + ! If support for more major species is to be included add code here + ! + case default + write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) + call endrun(subname // ':: dry air component not found') + end select + + if (masterproc) then + write(iulog, *) "Dry air composition ",TRIM(dry_air_species(i)),& + icnst-1,thermodynamic_active_species_idx(icnst-1),& + thermodynamic_active_species_mwi(icnst-1),& + thermodynamic_active_species_cp(icnst-1),& + thermodynamic_active_species_cv(icnst-1) + end if + end do + i = dry_air_species_num + if (i>0) then + if (masterproc) then + write(iulog, *) "Dry air composition ",TRIM(dry_air_species(i)),& + icnst,thermodynamic_active_species_idx(icnst),& + thermodynamic_active_species_mwi(icnst),& + thermodynamic_active_species_cp(icnst),& + thermodynamic_active_species_cv(icnst) + end if + end if + ! + !************************************************************************************ + ! + ! Add non-dry components of moist air (water vapor and condensates) + ! + !************************************************************************************ + ! + icnst = dry_air_species_num+1 + do i=1,water_species_in_air_num + select case (TRIM(water_species_in_air(i))) + ! + ! Q + ! + case('Q') +! call cnst_get_ind('Q' ,ix, abort=.false.) + ix = ix_qv !This should be removed once constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + else +! mw = cnst_mw(ix) + mw = mwh2o !This should be removed once constituents are enabled -JN. + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = cpwv + thermodynamic_active_species_cv (icnst) = 0.5_kind_phys*shr_const_rgas*dof3/mw + thermodynamic_active_species_R (icnst) = rh2o + icnst = icnst+1 + end if + ! + ! CLDLIQ + ! + case('CLDLIQ') +! call cnst_get_ind('CLDLIQ' ,ix, abort=.false.) + ix = ix_cld_liq !This should be removed once constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + else + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = cpliq + thermodynamic_active_species_cv (icnst) = cpliq + icnst = icnst+1 + end if + ! + ! CLDICE + ! + case('CLDICE') +! call cnst_get_ind('CLDICE' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + else + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = cpice + thermodynamic_active_species_cv (icnst) = cpice + icnst = icnst+1 + end if + ! + ! RAINQM + ! + case('RAINQM') +! call cnst_get_ind('RAINQM' ,ix, abort=.false.) + ix = ix_rain !This should be removed once constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + else + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = cpliq + thermodynamic_active_species_cv (icnst) = cpliq + icnst = icnst+1 + end if + ! + ! SNOWQM + ! + case('SNOWQM') +! call cnst_get_ind('SNOWQM' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + else + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = cpice + thermodynamic_active_species_cv (icnst) = cpice + icnst = icnst+1 + end if + ! + ! GRAUQM + ! + case('GRAUQM') +! call cnst_get_ind('GRAUQM' ,ix, abort=.false.) + ix = -1 !Model should die if it gets here, until constituents are enabled -JN. + if (ix<1) then + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + else + mw = cnst_mw(ix) + thermodynamic_active_species_idx(icnst) = ix + thermodynamic_active_species_cp (icnst) = cpice + thermodynamic_active_species_cv (icnst) = cpice + icnst = icnst+1 + end if + ! + ! If support for more major species is to be included add code here + ! + case default + write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) + call endrun(subname // ':: moist air component not found') + end select + ! + ! + ! + if (masterproc) then + write(iulog, *) "Thermodynamic active species ",TRIM(water_species_in_air(i)),& + icnst-1,thermodynamic_active_species_idx(icnst-1),& + thermodynamic_active_species_cp(icnst-1),& + thermodynamic_active_species_cv(icnst-1) + end if + end do + + end subroutine composition_init + !=========================================================================== subroutine physconst_update(mmr, t, ncol, to_moist_factor) @@ -560,4 +982,253 @@ subroutine physconst_calc_kappav(i0,i1,j0,j1,k0,k1,ntotq,tracer,kappav,cpv) !!XXgoldyXX: ^ until we get constituents figured out in CCPP end subroutine physconst_calc_kappav + + ! + !************************************************************************************************************************* + ! + ! compute 3D molecular diffusion and thermal conductivity + ! + !************************************************************************************************************************* + ! + subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sponge_factor,kmvis,kmcnd, ntrac,& + tracer, fact, active_species_idx_dycore, mbarv_in) + + !Given that this routine is only used with the dycore structures, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + + ! args + integer, intent(in) :: i0,i1,j0,j1,k1,nlev + real(r8), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature + integer, intent(in) :: get_at_interfaces ! 1: compute kmvis and kmcnd at interfaces + ! 0: compute kmvis and kmcnd at mid-levels + real(r8), intent(in) :: sponge_factor(1:k1) ! multiply kmvis and kmcnd with sponge_factor (for sponge layer) + real(r8), intent(out) :: kmvis(i0:i1,j0:j1,1:k1+get_at_interfaces) + real(r8), intent(out) :: kmcnd(i0:i1,j0:j1,1:k1+get_at_interfaces) + integer , intent(in) :: ntrac + real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracer array + integer, intent(in), optional :: active_species_idx_dycore(:) ! index of active species in tracer + real(r8), intent(in), optional :: fact(i0:i1,j0:j1,k1) ! if tracer is in units of mass or moist + ! fact converts to dry mixing ratio: tracer/fact + real(r8), intent(in), optional :: mbarv_in(i0:i1,j0:j1,1:k1) ! composition dependent atmosphere mean mass + ! + ! local vars + ! + integer :: i,j,k,icnst,ispecies + real(r8):: mbarvi,mm,residual ! Mean mass at mid level + real(r8):: cnst_vis, cnst_cnd, temp_local + real(r8), dimension(i0:i1,j0:j1,1:k1) :: factor,mbarv + integer, dimension(thermodynamic_active_species_num):: idx_local + !-------------------------------------------- + ! Set constants needed for updates + !-------------------------------------------- + + if (dry_air_species_num==0) then + + cnst_vis = (kv1*mmro2*o2_mwi + kv2*mmrn2*n2_mwi)*mbar*1.e-7_r8 + cnst_cnd = (kc1*mmro2*o2_mwi + kc2*mmrn2*n2_mwi)*mbar*1.e-5_r8 + if (get_at_interfaces==1) then + do k=2,k1 + do j=j0,j1 + do i=i0,i1 + temp_local = 0.5_r8*(temp(i,j,k)+temp(i,j,k-1)) + kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp_local**kv4 + kmcnd(i,j,k) = sponge_factor(k)*cnst_cnd*temp_local**kc4 + end do + end do + end do + ! + ! extrapolate top level value + ! + kmvis(i0:i1,j0:j1,1) = 1.5_r8*kmvis(i0:i1,j0:j1,2)-0.5_r8*kmvis(i0:i1,j0:j1,3) + kmcnd(i0:i1,j0:j1,1) = 1.5_r8*kmcnd(i0:i1,j0:j1,2)-0.5_r8*kmcnd(i0:i1,j0:j1,3) + else if (get_at_interfaces==0) then + do k=1,k1 + do j=j0,j1 + do i=i0,i1 + kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp(i,j,k)**kv4 + kmcnd(i,j,k) = sponge_factor(k)*cnst_cnd*temp(i,j,k)**kc4 + end do + end do + end do + else + call endrun('get_molecular_diff_coef: get_at_interfaces must be 0 or 1') + end if + else + if (present(active_species_idx_dycore)) then + idx_local = active_species_idx_dycore + else + idx_local = thermodynamic_active_species_idx + end if + if (present(fact)) then + factor = fact(:,:,:) + else + factor = 1.0_r8 + endif + if (present(mbarv_in)) then + mbarv = mbarv_in + else + call get_mbarv(i0,i1,j0,j1,1,k1,nlev,ntrac,tracer,idx_local,mbarv,fact=factor) + end if + ! + ! major species dependent code + ! + if (get_at_interfaces==1) then + do k=2,k1 + do j=j0,j1 + do i=i0,i1 + kmvis(i,j,k) = 0.0_r8 + kmcnd(i,j,k) = 0.0_r8 + residual = 1.0_r8 + do icnst=1,dry_air_species_num-1 + ispecies = idx_local(icnst) + mm = 0.5_r8*(tracer(i,j,k,ispecies)*factor(i,j,k)+tracer(i,j,k-1,ispecies)*factor(i,j,k-1)) + kmvis(i,j,k) = kmvis(i,j,k)+thermodynamic_active_species_kv(icnst)* & + thermodynamic_active_species_mwi(icnst)*mm + kmcnd(i,j,k) = kmcnd(i,j,k)+thermodynamic_active_species_kc(icnst)* & + thermodynamic_active_species_mwi(icnst)*mm + residual = residual - mm + end do + icnst=dry_air_species_num + ispecies = idx_local(icnst) + kmvis(i,j,k) = kmvis(i,j,k)+thermodynamic_active_species_kv(icnst)* & + thermodynamic_active_species_mwi(icnst)*residual + kmcnd(i,j,k) = kmcnd(i,j,k)+thermodynamic_active_species_kc(icnst)* & + thermodynamic_active_species_mwi(icnst)*residual + + temp_local = .5_r8*(temp(i,j,k-1)+temp(i,j,k)) + mbarvi = 0.5_r8*(mbarv(i,j,k-1)+mbarv(i,j,k)) + kmvis(i,j,k) = kmvis(i,j,k)*mbarvi*temp_local**kv4*1.e-7_r8 + kmcnd(i,j,k) = kmcnd(i,j,k)*mbarvi*temp_local**kc4*1.e-5_r8 + enddo + enddo + end do + do j=j0,j1 + do i=i0,i1 + kmvis(i,j,1) = 1.5_r8*kmvis(i,j,2)-.5_r8*kmvis(i,j,3) + kmcnd(i,j,1) = 1.5_r8*kmcnd(i,j,2)-.5_r8*kmcnd(i,j,3) + kmvis(i,j,k1+1) = kmvis(i,j,k1) + kmcnd(i,j,k1+1) = kmcnd(i,j,k1) + end do + end do + else if (get_at_interfaces==0) then + else + call endrun('get_molecular_diff_coef: get_at_interfaces must be 0 or 1') + end if + end if + end subroutine get_molecular_diff_coef + ! + !**************************************************************************************************************** + ! + ! Compute dry air heaet capacity under constant pressure + ! + !**************************************************************************************************************** + ! + subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx,cp_dry,fact) + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) ! Tracer array + integer, intent(in) :: active_species_idx(:) + real(kind_phys), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) ! dry pressure level thickness + real(kind_phys), intent(out) :: cp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + + integer :: i,j,k,m_cnst,nq + real(kind_phys) :: factor(i0:i1,j0:j1,k0_trac:k1_trac) ! dry pressure level thickness + real(kind_phys) :: residual(i0:i1,j0:j1,k0:k1), mm + ! + ! dry air not species dependent + ! + if (dry_air_species_num==0) then + cp_dry = cpair + else + if (present(fact)) then + factor = fact(:,:,:) + else + factor = 1.0_kind_phys + endif + + cp_dry = 0.0_kind_phys + residual = 1.0_kind_phys + do nq=1,dry_air_species_num-1 + m_cnst = active_species_idx(nq) + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + mm = tracer(i,j,k,m_cnst)*factor(i,j,k) + cp_dry(i,j,k) = cp_dry(i,j,k)+thermodynamic_active_species_cp(nq)*mm + residual(i,j,k) = residual(i,j,k) - mm + end do + end do + end do + end do + nq = dry_air_species_num + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + cp_dry(i,j,k) = cp_dry(i,j,k)+thermodynamic_active_species_cp(nq)*residual(i,j,k) + end do + end do + end do + end if + end subroutine get_cp_dry + ! + !************************************************************************************************************************* + ! + ! Compute generalized heat capacity at constant pressure + ! + !************************************************************************************************************************* + ! + subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species_idx_dycore) + use cam_logfile, only: iulog + ! args + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntrac) ! Tracer array + real(kind_phys), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) + logical , intent(in) :: inv_cp !output inverse cp instead of cp + real(kind_phys), intent(out) :: cp(i0:i1,j0:j1,k0:k1) + ! + ! array of indicies for index of thermodynamic active species in dycore tracer array + ! (if different from physics index) + ! + integer, optional, intent(in) :: active_species_idx_dycore(:) + + ! local vars + integer :: nq,i,j,k, itrac + real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, sum_cp, factor + integer, dimension(thermodynamic_active_species_num) :: idx_local + + if (present(active_species_idx_dycore)) then + idx_local = active_species_idx_dycore + else + idx_local = thermodynamic_active_species_idx + end if + + if (present(dp_dry)) then + factor = 1.0_kind_phys/dp_dry + else + factor = 1.0_kind_phys + end if + + sum_species = 1.0_kind_phys !all dry air species sum to 1 + do nq=dry_air_species_num+1,thermodynamic_active_species_num + itrac = idx_local(nq) + sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:) + end do + + if (dry_air_species_num==0) then + sum_cp = thermodynamic_active_species_cp(0) + else + call get_cp_dry(i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer,idx_local,sum_cp,fact=factor) + end if + do nq=dry_air_species_num+1,thermodynamic_active_species_num + itrac = idx_local(nq) + sum_cp(:,:,:) = sum_cp(:,:,:)+thermodynamic_active_species_cp(nq)*tracer(:,:,:,itrac)*factor(:,:,:) + end do + if (inv_cp) then + cp=sum_species/sum_cp + else + cp=sum_cp/sum_species + end if + + end subroutine get_cp + end module physconst diff --git a/src/data/registry.xml b/src/data/registry.xml index ca5e7285..ea665fa1 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -271,17 +271,20 @@ .false. + geopotential_at_surface air_temperature x_wind y_wind lagrangian_tendency_of_air_pressure + dry_static_energy_content_of_atmosphere_layer + constituent_mixing_ratio diff --git a/src/dynamics/none/stepon.F90 b/src/dynamics/none/stepon.F90 index 41070c75..3f846ffd 100644 --- a/src/dynamics/none/stepon.F90 +++ b/src/dynamics/none/stepon.F90 @@ -81,7 +81,7 @@ subroutine stepon_run3(dtime, cam_out, phys_state, dyn_in, dyn_out) !-------------------------------------------------------------------------------------- - ! Syncrhronize all PEs and then run dynamics (dyn_run): + ! Synchronize all PEs and then run dynamics (dyn_run): call t_barrierf('sync_dyn_run', mpicom) call t_startf('dyn_run') call dyn_run(dyn_out) diff --git a/src/dynamics/se/advect_tend.F90 b/src/dynamics/se/advect_tend.F90 new file mode 100644 index 00000000..0ac430c7 --- /dev/null +++ b/src/dynamics/se/advect_tend.F90 @@ -0,0 +1,95 @@ +!---------------------------------------------------------------------- +! this module computes the total advection tendencies of advected +! constituents for the finite volume dycore +!---------------------------------------------------------------------- +module advect_tend + + use shr_kind_mod, only : r8 => shr_kind_r8 + + save + private + + public :: compute_adv_tends_xyz + + real(r8), allocatable :: adv_tendxyz(:,:,:,:,:) + +contains + + !---------------------------------------------------------------------- + ! computes the total advective tendencies + ! called twice each time step: + ! - first call sets the initial mixing ratios + ! - second call computes and outputs the tendencies + !---------------------------------------------------------------------- + subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) +! use cam_history, only: outfld, hist_fld_active + use time_manager, only: get_step_size +! use constituents, only: tottnam,pcnst + use constituents, only: pcnst + + ! SE dycore: + use dimensions_mod, only: nc,np,nlev,ntrac + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + + implicit none + + type (element_t), intent(in) :: elem(:) + type(fvm_struct), intent(in) :: fvm(:) + integer, intent(in) :: nets,nete,qn0,n0 + real(r8) :: dt,idt + integer :: i,j,ic,nx,ie + logical :: init + real(r8), allocatable, dimension(:,:) :: ftmp + + if (ntrac>0) then + nx=nc + else + nx=np + endif + allocate( ftmp(nx*nx,nlev) ) + + init = .false. + if ( .not. allocated( adv_tendxyz ) ) then + init = .true. + allocate( adv_tendxyz(nx,nx,nlev,pcnst,nets:nete) ) + adv_tendxyz(:,:,:,:,:) = 0._r8 + endif + + if (ntrac>0) then + do ie=nets,nete + do ic=1,pcnst + adv_tendxyz(:,:,:,ic,ie) = fvm(ie)%c(1:nc,1:nc,:,ic) - adv_tendxyz(:,:,:,ic,ie) + end do + end do + else + do ie=nets,nete + do ic=1,pcnst + adv_tendxyz(:,:,:,ic,ie) = elem(ie)%state%Qdp(:,:,:,ic,qn0)/elem(ie)%state%dp3d(:,:,:,n0) - adv_tendxyz(:,:,:,ic,ie) + enddo + end do + end if + +!Remove once history outputs are enabled: +#if 0 + if ( .not. init ) then + dt = get_step_size() + idt = 1._r8/dt + + do ie=nets,nete + do ic = 1,pcnst + do j=1,nx + do i=1,nx + ftmp(i+(j-1)*nx,:) = adv_tendxyz(i,j,:,ic,ie) + end do + end do + call outfld(tottnam(ic), ftmp,nx*nx, ie) + end do + end do + deallocate(adv_tendxyz) + endif + deallocate(ftmp) +#endif + end subroutine compute_adv_tends_xyz + +end module advect_tend diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 new file mode 100644 index 00000000..5be7176e --- /dev/null +++ b/src/dynamics/se/dp_coupling.F90 @@ -0,0 +1,840 @@ +module dp_coupling + +!------------------------------------------------------------------------------- +! dynamics - physics coupling module +!------------------------------------------------------------------------------- + +use shr_kind_mod, only: r8=>shr_kind_r8 +use ccpp_kinds, only: kind_phys +!use constituents, only: pcnst, cnst_type +use constituents, only: pcnst + +use spmd_dyn, only: local_dp_map +use spmd_utils, only: mpicom, iam +use dyn_grid, only: TimeLevel, edgebuf +use dyn_comp, only: dyn_export_t, dyn_import_t + +use physics_types, only: physics_state, physics_tend +use physics_types, only: ix_qv, ix_cld_liq, ix_rain !Remove once constituents are enabled +use physics_grid, only: pcols => columns_on_task, get_dyn_col_p +use physics_grid, only: pver, pverp + +use dp_mapping, only: nphys_pts + +use cam_logfile, only: iulog +use perf_mod, only: t_startf, t_stopf, t_barrierf +use cam_abortutils, only: endrun + +!SE dycore: +use parallel_mod, only: par +use thread_mod, only: horz_num_threads, max_num_threads +use hybrid_mod, only: config_thread_region, get_loop_ranges, hybrid_t +use dimensions_mod, only: np, npsq, nelemd, nlev, nc, qsize, ntrac, fv_nphys + +use dof_mod, only: UniquePoints, PutUniquePoints +use element_mod, only: element_t +use fvm_control_volume_mod, only: fvm_struct + +implicit none +private +save + +public :: d_p_coupling, p_d_coupling + +real(r8), allocatable :: q_prev(:,:,:) ! Previous Q for computing tendencies + +!========================================================================================= +CONTAINS +!========================================================================================= + +subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out) + + ! Convert the dynamics output state into the physics input state. + ! Note that all pressures and tracer mixing ratios coming from the dycore are based on + ! dry air mass. + + use physics_types, only: ps, pdel +! use gravity_waves_sources, only: gws_src_fnct + use dyn_comp, only: frontgf_idx, frontga_idx +! use phys_control, only: use_gw_front, use_gw_front_igw + use hycoef, only: hyai, ps0 + use test_fvm_mapping, only: test_mapping_overwrite_dyn_state, test_mapping_output_phys_state + + !SE dycore: + use fvm_mapping, only: dyn2phys_vector, dyn2phys_all_vars + use time_mod, only: timelevel_qdp + use control_mod, only: qsplit + + ! arguments + type(dyn_export_t), intent(inout) :: dyn_out ! dynamics export + type(physics_state), intent(inout) :: phys_state + type(physics_tend ), intent(inout) :: phys_tend + + + ! LOCAL VARIABLES + type(element_t), pointer :: elem(:) ! pointer to dyn_out element array + integer :: ie ! indices over elements + integer :: icol, ilyr ! indices over chunks, columns, layers + + real(r8), allocatable :: ps_tmp(:,:) ! temp array to hold ps + real(r8), allocatable :: dp3d_tmp(:,:,:) ! temp array to hold dp3d + real(r8), allocatable :: dp3d_tmp_tmp(:,:) + real(r8), allocatable :: phis_tmp(:,:) ! temp array to hold phis + real(r8), allocatable :: T_tmp(:,:,:) ! temp array to hold T + real(r8), allocatable :: uv_tmp(:,:,:,:) ! temp array to hold u and v + real(r8), allocatable :: q_tmp(:,:,:,:) ! temp to hold advected constituents + real(r8), allocatable :: omega_tmp(:,:,:) ! temp array to hold omega + + ! Frontogenesis + !real (kind=r8), allocatable :: frontgf(:,:,:) ! temp arrays to hold frontogenesis + !real (kind=r8), allocatable :: frontga(:,:,:) ! function (frontgf) and angle (frontga) + !real (kind=r8), allocatable :: frontgf_phys(:,:,:) + !real (kind=r8), allocatable :: frontga_phys(:,:,:) + + integer :: ncols,i,j,ierr,k,iv + integer :: col_ind, blk_ind(1), m, m_cnst + integer :: tsize ! amount of data per grid point passed to physics + integer, allocatable :: bpter(:,:) ! offsets into block buffer for packing data + integer :: cpter(pcols,0:pver) ! offsets into chunk buffer for unpacking data + integer :: nphys + + real(r8), allocatable :: bbuffer(:), cbuffer(:) ! transpose buffers + real(r8), allocatable :: qgll(:,:,:,:) + real(r8) :: inv_dp3d(np,np,nlev) + integer :: tl_f, tl_qdp_np0, tl_qdp_np1 + logical :: lmono + !---------------------------------------------------------------------------- + + if (.not. local_dp_map) then + call endrun('d_p_coupling: Weak scaling does not support load balancing') + end if + + elem => dyn_out%elem + tl_f = TimeLevel%n0 + call TimeLevel_Qdp(TimeLevel, qsplit, tl_qdp_np0,tl_qdp_np1) + + if (fv_nphys > 0) then + nphys = fv_nphys + else + allocate(qgll(np,np,nlev,pcnst)) + nphys = np + end if + + ! Allocate temporary arrays to hold data for physics decomposition + allocate(ps_tmp(nphys_pts,nelemd)) + allocate(dp3d_tmp(nphys_pts,pver,nelemd)) + allocate(dp3d_tmp_tmp(nphys_pts,pver)) + allocate(phis_tmp(nphys_pts,nelemd)) + allocate(T_tmp(nphys_pts,pver,nelemd)) + allocate(uv_tmp(nphys_pts,2,pver,nelemd)) + allocate(q_tmp(nphys_pts,pver,pcnst,nelemd)) + allocate(omega_tmp(nphys_pts,pver,nelemd)) + +!Remove once a gravity wave parameterization is available -JN +#if 0 + if (use_gw_front .or. use_gw_front_igw) then + allocate(frontgf(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) call endrun("dp_coupling: Allocate of frontgf failed.") + allocate(frontga(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) call endrun("dp_coupling: Allocate of frontga failed.") + end if +#endif + + if (iam < par%nprocs) then +!Remove once a gravity wave parameterization is available -JN +#if 0 + if (use_gw_front .or. use_gw_front_igw) then + call gws_src_fnct(elem, tl_f, tl_qdp_np0, frontgf, frontga, nphys) + end if +#endif + + if (fv_nphys > 0) then + call test_mapping_overwrite_dyn_state(elem,dyn_out%fvm) + !****************************************************************** + ! physics runs on an FVM grid: map GLL vars to physics grid + !****************************************************************** + call t_startf('dyn2phys') + ! note that the fvm halo has been filled in prim_run_subcycle + ! if physics grid resolution is not equal to fvm resolution + call dyn2phys_all_vars(1,nelemd,elem, dyn_out%fvm,& + pcnst,hyai(1)*ps0,tl_f, & + ! output + dp3d_tmp, ps_tmp, q_tmp, T_tmp, & + omega_tmp, phis_tmp & + ) + do ie = 1, nelemd + uv_tmp(:,:,:,ie) = & + dyn2phys_vector(elem(ie)%state%v(:,:,:,:,tl_f),elem(ie)) + end do + call t_stopf('dyn2phys') + else + + !****************************************************************** + ! Physics runs on GLL grid: collect unique points before mapping to + ! physics decomposition + !****************************************************************** + + if (qsize < pcnst) then + call endrun('d_p_coupling: Fewer GLL tracers advected than required') + end if + + call t_startf('UniquePoints') + do ie = 1, nelemd + inv_dp3d(:,:,:) = 1.0_r8/elem(ie)%state%dp3d(:,:,:,tl_f) + do m=1,pcnst + qgll(:,:,:,m) = elem(ie)%state%Qdp(:,:,:,m,tl_qdp_np0)*inv_dp3d(:,:,:) + end do + ncols = elem(ie)%idxP%NumUniquePts + call UniquePoints(elem(ie)%idxP, elem(ie)%state%psdry(:,:), ps_tmp(1:ncols,ie)) + call UniquePoints(elem(ie)%idxP, nlev, elem(ie)%state%dp3d(:,:,:,tl_f), dp3d_tmp(1:ncols,:,ie)) + call UniquePoints(elem(ie)%idxP, nlev, elem(ie)%state%T(:,:,:,tl_f), T_tmp(1:ncols,:,ie)) + call UniquePoints(elem(ie)%idxV, 2, nlev, elem(ie)%state%V(:,:,:,:,tl_f), uv_tmp(1:ncols,:,:,ie)) + call UniquePoints(elem(ie)%idxV, nlev, elem(ie)%derived%omega, omega_tmp(1:ncols,:,ie)) + + call UniquePoints(elem(ie)%idxP, elem(ie)%state%phis, phis_tmp(1:ncols,ie)) + call UniquePoints(elem(ie)%idxP, nlev, pcnst, qgll,q_tmp(1:ncols,:,:,ie)) + end do + call t_stopf('UniquePoints') + + end if ! if fv_nphys>0 + + else + + ps_tmp(:,:) = 0._r8 + T_tmp(:,:,:) = 0._r8 + uv_tmp(:,:,:,:) = 0._r8 + omega_tmp(:,:,:) = 0._r8 + phis_tmp(:,:) = 0._r8 + q_tmp(:,:,:,:) = 0._r8 + +!Remove once a gravity wave parameterization is available -JN +#if 0 + if (use_gw_front .or. use_gw_front_igw) then + frontgf(:,:,:) = 0._r8 + frontga(:,:,:) = 0._r8 + end if +#endif + + endif ! iam < par%nprocs + + if (fv_nphys < 1) then + deallocate(qgll) + end if + + ! q_prev is for saving the tracer fields for calculating tendencies + if (.not. allocated(q_prev)) then + allocate(q_prev(pcols,pver,pcnst)) + end if + q_prev = 0.0_r8 + + call t_startf('dpcopy') +!Remove once a gravity wave parameterization is available -JN +#if 0 + if (use_gw_front .or. use_gw_front_igw) then + allocate(frontgf_phys(pcols, pver, begchunk:endchunk)) + allocate(frontga_phys(pcols, pver, begchunk:endchunk)) + end if +#endif + !$omp parallel do num_threads(max_num_threads) private (col_ind, icol, ie, blk_ind, ilyr, m) + do col_ind = 1, pcols + call get_dyn_col_p(col_ind, ie, blk_ind) + ps(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) + phys_state%phis(icol) = real(phis_tmp(blk_ind(1), ie), kind_phys) + do ilyr = 1, pver + pdel(icol, ilyr) = real(dp3d_tmp(blk_ind(1), ilyr, ie), kind_phys) + phys_state%t(icol, ilyr) = real(T_tmp(blk_ind(1), ilyr, ie), kind_phys) + phys_state%u(icol, ilyr) = real(uv_tmp(blk_ind(1), 1, ilyr, ie), kind_phys) + phys_state%v(icol, ilyr) = real(uv_tmp(blk_ind(1), 2, ilyr, ie), kind_phys) + phys_state%omega(icol, ilyr) = real(omega_tmp(blk_ind(1), ilyr, ie), kind_phys) + +!Remove once a gravity wave parameterization is available -JN +#if 0 + if (use_gw_front .or. use_gw_front_igw) then + frontgf_phys(icol, ilyr, lchnk) = frontgf(blk_ind(1), ilyr, ie) + frontga_phys(icol, ilyr, lchnk) = frontga(blk_ind(1), ilyr, ie) + end if +#endif + end do + + do m = 1, pcnst + do ilyr = 1, pver + phys_state(lchnk)%q(icol, ilyr,m) = real(q_tmp(blk_ind(1), ilyr,m, ie), kind_phys) + end do + end do + end do + +!Remove once a gravity wave parameterization is available -JN +#if 0 + if (use_gw_front .or. use_gw_front_igw) then + !$omp parallel do num_threads(max_num_threads) private (lchnk, ncols, icol, ilyr, pbuf_chnk, pbuf_frontgf, pbuf_frontga) + do lchnk = begchunk, endchunk + ncols = get_ncols_p(lchnk) + pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) + call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf) + call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga) + do icol = 1, ncols + do ilyr = 1, pver + pbuf_frontgf(icol, ilyr) = frontgf_phys(icol, ilyr, lchnk) + pbuf_frontga(icol, ilyr) = frontga_phys(icol, ilyr, lchnk) + end do + end do + end do + deallocate(frontgf_phys) + deallocate(frontga_phys) + end if +#endif + + call t_stopf('dpcopy') + + ! Save the tracer fields input to physics package for calculating tendencies + ! The mixing ratios are all dry at this point. +! q_prev(1:ncols,1:pver,:) = phys_state(lchnk)%q(1:ncols,1:pver,1:pcnst) + q_prev(1:pcols,1:pver,:) = real(phys_state%q(1:pcols,1:pver,1:3), r8) + + call test_mapping_output_phys_state(phys_state,dyn_out%fvm) + + ! Deallocate the temporary arrays + deallocate(ps_tmp) + deallocate(dp3d_tmp) + deallocate(phis_tmp) + deallocate(T_tmp) + deallocate(uv_tmp) + deallocate(q_tmp) + deallocate(omega_tmp) + + ! ps, pdel, and q in phys_state are all dry at this point. After return from derived_phys_dry + ! ps and pdel include water vapor only, and the 'wet' constituents have been converted to wet mmr. + call t_startf('derived_phys') + call derived_phys_dry(phys_state, phys_tend) + call t_stopf('derived_phys') + +end subroutine d_p_coupling + +!========================================================================================= + +subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) + + use physics_types, only: pdel, pdeldry + + ! Convert the physics output state into the dynamics input state. + use test_fvm_mapping, only: test_mapping_overwrite_tendencies + use test_fvm_mapping, only: test_mapping_output_mapped_tendencies + + ! SE dycore: + use bndry_mod, only: bndry_exchange + use edge_mod, only: edgeVpack, edgeVunpack + use fvm_mapping, only: phys2dyn_forcings_fvm + + ! arguments + type(physics_state), intent(inout) :: phys_state + type(physics_tend), intent(inout) :: phys_tend + integer, intent(in) :: tl_qdp, tl_f + type(dyn_import_t), intent(inout) :: dyn_in + type(hybrid_t) :: hybrid + + ! LOCAL VARIABLES + integer :: ic , ncols ! index + type(element_t), pointer :: elem(:) ! pointer to dyn_in element array + integer :: ie ! index for elements + integer :: col_ind ! index over columns + integer :: blk_ind(1) ! element offset + integer :: icol, ilyr ! indices for chunk, column, layer + + real(r8), allocatable :: dp_phys(:,:,:) ! temp array to hold dp on physics grid + real(r8), allocatable :: T_tmp(:,:,:) ! temp array to hold T + real(r8), allocatable :: dq_tmp(:,:,:,:) ! temp array to hold q + real(r8), allocatable :: uv_tmp(:,:,:,:) ! temp array to hold uv + integer :: m, i, j, k + + real(kind_phys) :: factor + integer :: num_trac + integer :: nets, nete + integer :: kptr, ii + !---------------------------------------------------------------------------- + + if (.not. local_dp_map) then + call endrun('p_d_coupling: Weak scaling does not support load balancing') + end if + + if (iam < par%nprocs) then + elem => dyn_in%elem + else + nullify(elem) + end if + + allocate(T_tmp(nphys_pts,pver,nelemd)) + allocate(uv_tmp(nphys_pts,2,pver,nelemd)) + allocate(dq_tmp(nphys_pts,pver,pcnst,nelemd)) + allocate(dp_phys(nphys_pts,pver,nelemd)) + + T_tmp = 0.0_r8 + uv_tmp = 0.0_r8 + dq_tmp = 0.0_r8 + + if (.not. allocated(q_prev)) then + call endrun('p_d_coupling: q_prev not allocated') + end if + +!Remove once constituents are implemented in the CCPP framework -JN: +#if 0 + ! Convert wet to dry mixing ratios and modify the physics temperature + ! tendency to be thermodynamically consistent with the dycore. + !$omp parallel do num_threads(max_num_threads) private (lchnk, ncols, icol, ilyr, m, factor) + do lchnk = begchunk, endchunk + ncols = get_ncols_p(lchnk) + do icol = 1, ncols + do ilyr = 1, pver + ! convert wet mixing ratios to dry + factor = phys_state(lchnk)%pdel(icol,ilyr)/phys_state(lchnk)%pdeldry(icol,ilyr) + do m = 1, pcnst + if (cnst_type(m) == 'wet') then + phys_state(lchnk)%q(icol,ilyr,m) = factor*phys_state(lchnk)%q(icol,ilyr,m) + end if + end do + end do + end do + call thermodynamic_consistency( & + phys_state(lchnk), phys_tend(lchnk), ncols, pver) + end do +#else + do ilyr = 1, pver + do icol=1, pcols + !Apply adjustment only to water vapor: + factor = pdel(icol,ilyr)/pdeldry(icol,ilyr) + phys_state%q(icol,ilyr,ix_qv) = factor*phys_state%q(icol,ilyr,ix_qv) + phys_state%q(icol,ilyr,ix_cld_liq) = factor*phys_state%q(icol,ilyr,ix_cld_liq) + phys_state%q(icol,ilyr,ix_rain) = factor*phys_state%q(icol,ilyr,ix_rain) + end do + end do + call thermodynamic_consistency(phys_state, phys_tend, pcols, pver) +#endif + + call t_startf('pd_copy') + !$omp parallel do num_threads(max_num_threads) private (col_ind, icol, ie, blk_ind, ilyr, m) + do col_ind = 1, pcols + call get_dyn_col_p(col_ind, ie, blk_ind) + + ! test code -- does nothing unless cpp macro debug_coupling is defined. + call test_mapping_overwrite_tendencies(phys_state, & + phys_tend, pcols, q_prev(1:pcols,:,:), & + dyn_in%fvm) + + do ilyr = 1, pver + dp_phys(blk_ind(1),ilyr,ie) = real(pdeldry(icol,ilyr), r8) + T_tmp(blk_ind(1),ilyr,ie) = real(phys_tend%dtdt(icol,ilyr), r8) + uv_tmp(blk_ind(1),1,ilyr,ie) = real(phys_tend%dudt(icol,ilyr), r8) + uv_tmp(blk_ind(1),2,ilyr,ie) = real(phys_tend%dvdt(icol,ilyr), r8) + do m = 1, pcnst + dq_tmp(blk_ind(1),ilyr,m,ie) = & + (real(phys_state(lchnk)%q(icol,ilyr,m), r8) - q_prev(icol,ilyr,m,lchnk)) + end do + end do + end do + call t_stopf('pd_copy') + + + if (iam < par%nprocs) then + + if (fv_nphys > 0) then + + ! put forcings into fvm structure + num_trac = max(qsize,ntrac) + do ie = 1, nelemd + do j = 1, fv_nphys + do i = 1, fv_nphys + ii = i + (j-1)*fv_nphys + dyn_in%fvm(ie)%ft(i,j,1:pver) = T_tmp(ii,1:pver,ie) + dyn_in%fvm(ie)%fm(i,j,1:2,1:pver) = uv_tmp(ii,1:2,1:pver,ie) + dyn_in%fvm(ie)%fc_phys(i,j,1:pver,1:num_trac) = dq_tmp(ii,1:pver,1:num_trac,ie) + dyn_in%fvm(ie)%dp_phys(i,j,1:pver) = dp_phys(ii,1:pver,ie) + end do + end do + end do + + !JMD $OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,n) + !JMD hybrid = config_thread_region(par,'horizontal') + hybrid = config_thread_region(par,'serial') + call get_loop_ranges(hybrid,ibeg=nets,iend=nete) + + ! high-order mapping of ft and fm (and fq if no cslam) using fvm technology + call t_startf('phys2dyn') + call phys2dyn_forcings_fvm(elem, dyn_in%fvm, hybrid,nets,nete,ntrac==0, tl_f, tl_qdp) + call t_stopf('phys2dyn') + else + + call t_startf('putUniquePoints') + + !$omp parallel do num_threads(max_num_threads) private(ie,ncols) + do ie = 1, nelemd + ncols = elem(ie)%idxP%NumUniquePts + call putUniquePoints(elem(ie)%idxP, nlev, T_tmp(1:pcols,:,ie), & + elem(ie)%derived%fT(:,:,:)) + call putUniquePoints(elem(ie)%idxV, 2, nlev, uv_tmp(1:pcols,:,:,ie), & + elem(ie)%derived%fM(:,:,:,:)) + call putUniquePoints(elem(ie)%idxV, nlev, pcnst, dq_tmp(1:pcols,:,:,ie), & + elem(ie)%derived%fQ(:,:,:,:)) + end do + call t_stopf('putUniquePoints') + end if + end if + + deallocate(T_tmp) + deallocate(uv_tmp) + deallocate(dq_tmp) + + ! Boundary exchange for physics forcing terms. + ! For physics on GLL grid, for points with duplicate degrees of freedom, + ! putuniquepoints() set one of the element values and set the others to zero, + ! so do a simple sum (boundary exchange with no weights). + ! For physics grid, we interpolated into all points, so do weighted average. + + call t_startf('p_d_coupling:bndry_exchange') + + do ie = 1, nelemd + if (fv_nphys > 0) then + do k = 1, nlev + dyn_in%elem(ie)%derived%FM(:,:,1,k) = & + dyn_in%elem(ie)%derived%FM(:,:,1,k) * & + dyn_in%elem(ie)%spheremp(:,:) + dyn_in%elem(ie)%derived%FM(:,:,2,k) = & + dyn_in%elem(ie)%derived%FM(:,:,2,k) * & + dyn_in%elem(ie)%spheremp(:,:) + dyn_in%elem(ie)%derived%FT(:,:,k) = & + dyn_in%elem(ie)%derived%FT(:,:,k) * & + dyn_in%elem(ie)%spheremp(:,:) + do m = 1, qsize + dyn_in%elem(ie)%derived%FQ(:,:,k,m) = & + dyn_in%elem(ie)%derived%FQ(:,:,k,m) * & + dyn_in%elem(ie)%spheremp(:,:) + end do + end do + end if + kptr = 0 + call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FM(:,:,:,:), 2*nlev, kptr, ie) + kptr = kptr + 2*nlev + call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FT(:,:,:), nlev, kptr, ie) + kptr = kptr + nlev + call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FQ(:,:,:,:), nlev*qsize, kptr, ie) + end do + + if (iam < par%nprocs) then + call bndry_exchange(par, edgebuf, location='p_d_coupling') + end if + + do ie = 1, nelemd + kptr = 0 + call edgeVunpack(edgebuf, dyn_in%elem(ie)%derived%FM(:,:,:,:), 2*nlev, kptr, ie) + kptr = kptr + 2*nlev + call edgeVunpack(edgebuf, dyn_in%elem(ie)%derived%FT(:,:,:), nlev, kptr, ie) + kptr = kptr + nlev + call edgeVunpack(edgebuf, dyn_in%elem(ie)%derived%FQ(:,:,:,:), nlev*qsize, kptr, ie) + if (fv_nphys > 0) then + do k = 1, nlev + dyn_in%elem(ie)%derived%FM(:,:,1,k) = & + dyn_in%elem(ie)%derived%FM(:,:,1,k) * & + dyn_in%elem(ie)%rspheremp(:,:) + dyn_in%elem(ie)%derived%FM(:,:,2,k) = & + dyn_in%elem(ie)%derived%FM(:,:,2,k) * & + dyn_in%elem(ie)%rspheremp(:,:) + dyn_in%elem(ie)%derived%FT(:,:,k) = & + dyn_in%elem(ie)%derived%FT(:,:,k) * & + dyn_in%elem(ie)%rspheremp(:,:) + do m = 1, qsize + dyn_in%elem(ie)%derived%FQ(:,:,k,m) = & + dyn_in%elem(ie)%derived%FQ(:,:,k,m) * & + dyn_in%elem(ie)%rspheremp(:,:) + end do + end do + end if + end do + call t_stopf('p_d_coupling:bndry_exchange') + + if (iam < par%nprocs .and. fv_nphys > 0) then + call test_mapping_output_mapped_tendencies(dyn_in%fvm(1:nelemd), elem(1:nelemd), & + 1, nelemd, tl_f, tl_qdp) + end if +end subroutine p_d_coupling + +!========================================================================================= + +subroutine derived_phys_dry(phys_state, phys_tend) + + ! The ps, pdel, and q components of phys_state are all dry on input. + ! On output the psdry and pdeldry components are initialized; ps and pdel are + ! updated to contain contribution from water vapor only; the 'wet' constituent + ! mixing ratios are converted to a wet basis. Initialize geopotential heights. + ! Finally compute energy and water column integrals of the physics input state. + +! use constituents, only: qmin + use physics_types, only: psdry, pint, lnpint, pintdry, lnpintdry + use physics_types, only: pdel, rpdel, pdeldry, rpdeldry + use physics_types, only: pmid, lnpmid, pmiddry, lnpmiddry + use physics_types, only: exner, zi, zm + use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update + use shr_const_mod, only: shr_const_rwv + use phys_control, only: waccmx_is + use geopotential_t, only: geopotential_t +! use check_energy, only: check_energy_timestep_init + use hycoef, only: hyai, hybi, ps0 + use shr_vmath_mod, only: shr_vmath_log + use gmean_mod, only: gmean +! use qneg_module, only: qneg3 + use dyn_comp, only: ixo, ixo2, ixh, ixh2 + + ! arguments + type(physics_state), intent(inout) :: phys_state + type(physics_tend ), intent(inout) :: phys_tend + + ! local variables + real(r8) :: qbot ! bottom level q before change + real(r8) :: qbotm1 ! bottom-1 level q before change + real(r8) :: dqreq ! q change at pver-1 required to remove q shr_const_pi + + !SE dycore: + use dimensions_mod, only: np, npsq, fv_nphys + use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl + use coordinate_systems_mod, only: spherical_polar_t + use fvm_control_volume_mod, only: fvm_struct + + implicit none + private + save + + public :: dp_init + public :: dp_reorder + public :: dp_write + public :: dp_allocate + public :: dp_deallocate + + ! Total number of physics points per spectral element + ! no physgrid: nphys_pts = npsq (physics on GLL grid) + ! physgrid: nphys_pts = nphys2 (physics on CSLAM grid) + ! Value is set when se_fv_nphys namelist variable is read + integer, public :: nphys_pts = npsq + + ! NOTE: dp_gid() is in space filling curve rank order + ! all other global arrays are in block id (global id) order + ! + ! dp_gid() is used to re-order data collected on root via mpi_gatherv + ! into block id ordering + ! + ! j=dp_gid(i) i = element space filling curve rank + ! j = element global id = block id = history file ordering + ! + integer, allocatable,dimension(:) :: dp_gid ! NE=240, integer*4 = 1.3MB + integer, public,allocatable,dimension(:) :: dp_owner + + real (r8),public,allocatable,dimension(:,:,:) :: weights_all_fvm2phys + integer ,public,allocatable,dimension(:,:,:) :: weights_eul_index_all_fvm2phys,weights_lgr_index_all_fvm2phys + real (r8),public,allocatable,dimension(:,:,:) :: weights_all_phys2fvm + integer ,public,allocatable,dimension(:,:,:) :: weights_eul_index_all_phys2fvm,weights_lgr_index_all_phys2fvm + integer ,public,allocatable,dimension(:) :: jall_fvm2phys,jall_phys2fvm + integer ,public :: num_weights_fvm2phys,num_weights_phys2fvm + + +contains + subroutine dp_init(elem,fvm) + + use spmd_utils, only: masterproc + use cam_logfile, only: iulog + + !SE dycore: + use dimensions_mod, only: nelemd, nc, irecons_tracer + use element_mod, only: element_t + + type(element_t) , dimension(nelemd), intent(in) :: elem + type (fvm_struct), dimension(nelemd), intent(in) :: fvm + + num_weights_phys2fvm = 0 + num_weights_fvm2phys = 0 + if (fv_nphys>0) then + num_weights_phys2fvm = (nc+fv_nphys)**2 + num_weights_fvm2phys = (nc+fv_nphys)**2 + + allocate(weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd)) + allocate(weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)) + allocate(weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)) + + allocate(weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd)) + allocate(weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)) + allocate(weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)) + allocate(jall_fvm2phys(nelemd)) + allocate(jall_phys2fvm(nelemd)) + + call fvm2phys_init(elem,fvm,nc,fv_nphys,irecons_tracer,& + weights_all_fvm2phys,weights_eul_index_all_fvm2phys,weights_lgr_index_all_fvm2phys,& + weights_all_phys2fvm,weights_eul_index_all_phys2fvm,weights_lgr_index_all_phys2fvm,& + jall_fvm2phys,jall_phys2fvm) + + if (masterproc) then + write(iulog, *) 'dp_init: Initialized phys2fvm/fvm2phys mapping vars' + end if + + end if + end subroutine dp_init + + subroutine dp_reorder(before, after) + use cam_abortutils, only: endrun + use cam_logfile, only: iulog + use spmd_utils, only: masterproc + use shr_sys_mod, only: shr_sys_flush + + !SE dycore: + use dimensions_mod, only: nelem + + implicit none + real(r8), dimension(fv_nphys*fv_nphys,*), intent(in) :: before + real(r8), dimension(fv_nphys*fv_nphys,*), intent(out) :: after + integer :: ie + + ! begin + do ie = 1,nelem + if (dp_gid(ie) < 0) then + if (masterproc) then + write(iulog,*) 'ie =',ie,', dp_gid(ie) =',dp_gid(ie) + call shr_sys_flush(iulog) + end if + call endrun('Bad element remap in dp_reorder') + end if + after(:,dp_gid(ie)) = before(:,ie) + end do + end subroutine dp_reorder + + !!! + + subroutine dp_allocate(elem) + use spmd_utils, only: masterproc, masterprocid, npes + use spmd_utils, only: mpicom, mpi_integer + + !SE dycore: + use dimensions_mod, only: nelem, nelemd + use element_mod, only: element_t + + implicit none + type(element_t),dimension(nelemd),intent(in) :: elem + + integer :: i,j,ierror + integer,dimension(nelemd) :: lgid + integer,dimension(:),allocatable :: displs,recvcount + + ! begin + + allocate(displs(npes)) + allocate(dp_gid(nelem)) + allocate(recvcount(npes)) + call mpi_gather(nelemd, 1, mpi_integer, recvcount, 1, mpi_integer, & + masterprocid, mpicom, ierror) + lgid(:) = elem(:)%globalid + if (masterproc) then + displs(1) = 0 + do i = 2,npes + displs(i) = displs(i-1)+recvcount(i-1) + end do + end if + call mpi_gatherv(lgid, nelemd, mpi_integer, dp_gid, recvcount, displs, & + mpi_integer, masterprocid, mpicom, ierror) + if (masterproc) then + allocate(dp_owner(nelem)) + dp_owner(:) = -1 + do i = 1,npes + do j = displs(i)+1,displs(i)+recvcount(i) + dp_owner(dp_gid(j)) = i-1 + end do + end do + end if + deallocate(displs) + deallocate(recvcount) + ! minimize global memory use + call mpi_barrier(mpicom,ierror) + if (.not.masterproc) then + allocate(dp_owner(nelem)) + end if + call mpi_bcast(dp_gid,nelem,mpi_integer,masterprocid,mpicom,ierror) + call mpi_bcast(dp_owner,nelem,mpi_integer,masterprocid,mpicom,ierror) + end subroutine dp_allocate + + !!! + + subroutine dp_deallocate() + deallocate(dp_gid) + deallocate(dp_owner) + end subroutine dp_deallocate + + !!! + + subroutine dp_write(elem, fvm, grid_format, filename_in) + use cam_abortutils, only: endrun + use netcdf, only: nf90_create, nf90_close, nf90_enddef + use netcdf, only: nf90_def_dim, nf90_def_var, nf90_put_var + use netcdf, only: nf90_double, nf90_int, nf90_put_att + use netcdf, only: nf90_noerr, nf90_strerror, nf90_clobber + use spmd_utils, only: masterproc, masterprocid, mpicom, npes + use spmd_utils, only: mpi_integer, mpi_real8 + use cam_logfile, only: iulog + use shr_sys_mod, only: shr_sys_flush + + !SE dycore: + use dimensions_mod, only: nelem, nelemd + use element_mod, only: element_t + use dimensions_mod, only: ne + use coordinate_systems_mod, only: cart2spherical + + ! Inputs + type(element_t), intent(in) :: elem(:) + type (fvm_struct), intent(in) :: fvm(:) + character(len=*), intent(in) :: grid_format + character(len=*), intent(in) :: filename_in + + real(r8), parameter :: rad2deg = 180._r8/pi + + ! Local variables + integer :: i, ie, ierror, j, status, ivtx + integer :: grid_corners_id, grid_rank_id, grid_size_id + character(len=256) :: errormsg + character(len=shr_kind_cl) :: filename + integer :: ncid + integer :: grid_dims_id, grid_area_id, grid_center_lat_id + integer :: grid_center_lon_id, grid_corner_lat_id + integer :: grid_corner_lon_id, grid_imask_id + integer :: gridsize + integer :: IOrootID + logical :: IOroot + integer,allocatable,dimension(:) :: displs,recvcount + + real(r8), dimension(fv_nphys, fv_nphys, nelemd, 4, 2) :: corners + real(r8), dimension(fv_nphys, fv_nphys, nelemd) :: lwork + real(r8), allocatable, dimension(:) :: recvbuf + real(r8), allocatable, dimension(:,:) :: gwork + real(r8) :: x, y + type (spherical_polar_t) :: sphere + + ! begin + + !! Check to see if we are doing grid output + if (trim(grid_format) == "no") then + if (masterproc) then + write(iulog, *) 'dp_write: Not writing phys_grid file.' + end if + return + else if (trim(grid_format) /= 'SCRIP') then + if (masterproc) then + write(errormsg, *) 'dp_write: ERROR, bad value for se_write_grid, ',& + trim(grid_format) + call endrun(errormsg) + end if + end if + + ! Create the NetCDF file + if (len_trim(filename_in) == 0) then + write(filename, '(3(a,i0),3a)') "ne", ne, "np", np, ".pg", fv_nphys, & + "_", trim(grid_format), ".nc" + else + filename = trim(filename_in) + end if + status = nf90_create(trim(filename), nf90_clobber, ncid) + if (status /= nf90_noerr) then + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + ! PIO_put_var puts from its root node, find that (so we do our work there) + IOrootID = masterprocid + IOroot = masterproc + + ! Allocate workspace and calculate PE displacement information + if (IOroot) then + allocate(displs(npes)) + allocate(recvcount(npes)) + else + allocate(displs(0)) + allocate(recvcount(0)) + end if + gridsize = nelem * fv_nphys*fv_nphys + if(masterproc) then + write(iulog, *) 'Writing physics SCRIP grid file: ', trim(filename) + write(iulog, '(a,i7,a,i3)') 'nelem = ', nelem, ', fv_nphys = ', fv_nphys + call shr_sys_flush(iulog) + end if + call mpi_gather(nelemd*fv_nphys*fv_nphys, 1, mpi_integer, recvcount, 1, & + mpi_integer, IOrootID, mpicom, ierror) + + if (IOroot) then + displs(1) = 0 + do i = 2, npes + displs(i) = displs(i-1)+recvcount(i-1) + end do + allocate(recvbuf(gridsize)) + else + allocate(recvbuf(0)) + end if + allocate(gwork(4, gridsize)) + + if (IOroot) then + ! Define the horizontal grid dimensions for SCRIP output + status = nf90_def_dim(ncid, "grid_corners", 4, grid_corners_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining dimension, grid_corners' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + status = nf90_def_dim(ncid, "grid_rank", 1, grid_rank_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining dimension, grid_rank' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + status = nf90_def_dim(ncid, "grid_size", gridsize, grid_size_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining dimension, grid_size' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + ! Define the coordinate variables + status = nf90_def_var(ncid, "grid_dims", nf90_int, (/grid_rank_id/), & + grid_dims_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining variable grid_dims' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_def_var(ncid, "grid_area", nf90_double, & + (/grid_size_id/), grid_area_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining variable grid_area' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_put_att(ncid, grid_area_id, "units", "radians^2") + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining attributes for grid_area' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_put_att(ncid, grid_area_id, "long_name", "area weights") + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining attributes for grid_area' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_def_var(ncid, "grid_center_lat", nf90_double, & + (/grid_size_id/), grid_center_lat_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining variable grid_center_lat' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_put_att(ncid, grid_center_lat_id, "units", "degrees") + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining attributes for grid_center_lat' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_def_var(ncid, "grid_center_lon", nf90_double, & + (/grid_size_id/), grid_center_lon_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining variable grid_center_lon' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_put_att(ncid, grid_center_lon_id, "units", "degrees") + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining attributes for grid_center_lon' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_def_var(ncid, "grid_corner_lat", nf90_double, & + (/grid_corners_id, grid_size_id/), grid_corner_lat_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining grid_corner_lat' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_put_att(ncid, grid_corner_lat_id, "units", "degrees") + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining attributes for grid_corner_lat' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_def_var(ncid, "grid_corner_lon", nf90_double, & + (/grid_corners_id, grid_size_id/), grid_corner_lon_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining variable grid_corner_lon' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_put_att(ncid, grid_corner_lon_id, "units", "degrees") + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining attributes for grid_corner_lon' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + status = nf90_def_var(ncid, "grid_imask", nf90_double, & + (/grid_size_id/), grid_imask_id) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error defining variable grid_imask' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + + ! End of NetCDF definitions + status = nf90_enddef(ncid) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error calling enddef' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if ! IOroot + + if (IOroot) then + status = nf90_put_var(ncid, grid_dims_id, (/ gridsize /)) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_dims' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + + do ie=1,nelemd + lwork(:,:,ie) = fvm(ie)%area_sphere_physgrid(:,:) + end do + call mpi_gatherv(lwork, size(lwork), mpi_real8, recvbuf, recvcount, & + displs, mpi_real8, IOrootID, mpicom, ierror) + call dp_allocate(elem) + if (IOroot) then + call dp_reorder(recvbuf, gwork(1,:)) + status = nf90_put_var(ncid, grid_area_id, gwork(1,:)) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_area' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + do ie=1,nelemd + lwork(:,:,ie) = rad2deg*fvm(ie)%center_cart_physgrid(:,:)%lat + end do + call mpi_gatherv(lwork, size(lwork), mpi_real8, recvbuf, recvcount, & + displs, mpi_real8, IOrootID, mpicom, ierror) + if (IOroot) then + call dp_reorder(recvbuf, gwork(1,:)) + status = nf90_put_var(ncid, grid_center_lat_id, gwork(1,:)) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_center_lat' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + + do ie=1,nelemd + lwork(:,:,ie) = rad2deg*fvm(ie)%center_cart_physgrid(:,:)%lon + end do + call mpi_gatherv(lwork, size(lwork), mpi_real8, recvbuf, recvcount, & + displs, mpi_real8, IOrootID, mpicom, ierror) + if (IOroot) then + call dp_reorder(recvbuf, gwork(1,:)) + status = nf90_put_var(ncid, grid_center_lon_id, gwork(1,:)) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_center_lon' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + ! compute physgrid grid corners + do ie=1,nelemd + do j=1,fv_nphys + do i=1,fv_nphys + do ivtx=1,4 + x = fvm(ie)%vtx_cart_physgrid(ivtx,1,i,j) + y = fvm(ie)%vtx_cart_physgrid(ivtx,2,i,j) + sphere = cart2spherical(x,y,elem(ie)%FaceNum) + corners(i,j,ie,ivtx,1) = rad2deg * sphere%lat + corners(i,j,ie,ivtx,2) = rad2deg * sphere%lon + end do + end do + end do + end do + ! Collect all information for the grid corner latitude (counter-clockwise) + do ivtx=1,4 + call mpi_gatherv(corners(:,:,:,ivtx,1), size(corners(:,:,:,ivtx,1)), mpi_real8, recvbuf, recvcount, & + displs, mpi_real8, IOrootID, mpicom, ierror) + if (IOroot) then + call dp_reorder(recvbuf, gwork(ivtx,:)) + end if + end do + if (IOroot) then + status = nf90_put_var(ncid, grid_corner_lat_id, gwork) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_corner_lat' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + ! Collect all information for the grid corner longitudes (counter-clockwise) + do ivtx=1,4 + call mpi_gatherv(corners(:,:,:,ivtx,2), size(corners(:,:,:,ivtx,2)), mpi_real8, recvbuf, recvcount, & + displs, mpi_real8, IOrootID, mpicom, ierror) + if (IOroot) then + call dp_reorder(recvbuf, gwork(ivtx,:)) + end if + end do + call dp_deallocate() + if (IOroot) then + status = nf90_put_var(ncid, grid_corner_lon_id, gwork) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_corner_lon' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + + if (IOroot) then + gwork(1,:) = 1._r8 + status = nf90_put_var(ncid, grid_imask_id, gwork(1,:)) + if (status /= nf90_noerr) then + write(iulog, *) 'dp_write: Error writing variable grid_imask' + call shr_sys_flush(iulog) + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + +! call pio_seterrorhandling(ncid, PIO_INTERNAL_ERROR) + ! Close the file + call mpi_barrier(mpicom, ierror) + if (IOroot) then + status = nf90_close(ncid) + if (status /= nf90_noerr) then + call endrun("dp_write: "//trim(nf90_strerror(status))) + end if + end if + + call mpi_barrier(mpicom, ierror) + if(masterproc) then + write(iulog, *) 'Finished writing physics grid file: ', trim(filename) + call shr_sys_flush(iulog) + end if + + end subroutine dp_write + !!! + + subroutine fvm2phys_init(elem,fvm,fvm_nc,phys_nc,irecons,& + weights_all_fvm2phys,weights_eul_index_all_fvm2phys,weights_lgr_index_all_fvm2phys,& + weights_all_phys2fvm,weights_eul_index_all_phys2fvm,weights_lgr_index_all_phys2fvm,& + jall_fvm2phys,jall_phys2fvm) + + !SE dycore: + use dimensions_mod , only: ngpc,nelemd + use fvm_overlap_mod , only: compute_weights_cell + use element_mod , only: element_t + + type(element_t) , dimension(nelemd), intent(in) :: elem + type (fvm_struct), dimension(nelemd), intent(in) :: fvm + integer , intent(in) :: fvm_nc, phys_nc, irecons + real (kind=r8) :: dalpha,dbeta + real (kind=r8), dimension(0:phys_nc+2):: xgno_phys,ygno_phys + real (kind=r8), dimension(0:fvm_nc+2) :: xgno_fvm,ygno_fvm + + real (kind=r8), dimension(ngpc):: gauss_weights, abscissae !dimension(ngauss) + + integer :: i,j,h + integer, parameter :: nvertex = 4 + real (kind=r8), dimension(nvertex) :: xcell,ycell + + real (kind=r8) , dimension(num_weights_fvm2phys,irecons,nelemd),intent(out) :: weights_all_fvm2phys + integer, dimension(num_weights_fvm2phys,2,nelemd),intent(out) :: weights_eul_index_all_fvm2phys + integer, dimension(num_weights_fvm2phys,2,nelemd),intent(out) :: weights_lgr_index_all_fvm2phys + + real (kind=r8) , dimension(num_weights_phys2fvm,irecons,nelemd),intent(out) :: weights_all_phys2fvm + integer, dimension(num_weights_phys2fvm,2,nelemd),intent(out) :: weights_eul_index_all_phys2fvm + integer, dimension(num_weights_phys2fvm,2,nelemd),intent(out) :: weights_lgr_index_all_phys2fvm + + integer , dimension(nelemd) ,intent(out) :: jall_fvm2phys,jall_phys2fvm + + integer, parameter :: jmax_segments_cell = 50 + real (kind=r8) , dimension(jmax_segments_cell,irecons) :: weights_cell + integer , dimension(jmax_segments_cell,2) :: weights_eul_index_cell + integer :: jcollect_cell,ie + real(kind=r8), dimension(phys_nc,phys_nc) :: phys_area, factor + real(kind=r8), dimension(fvm_nc,fvm_nc) :: fvm_area + + xgno_phys(0) = -1D20; xgno_phys(phys_nc+2) = 1D20 + xgno_fvm(0) = -1D20; xgno_fvm(fvm_nc+2) = 1D20 + do ie=1,nelemd + dalpha = abs(elem(ie)%corners(1)%x-elem(ie)%corners(2)%x)/phys_nc !in alpha + dbeta = abs(elem(ie)%corners(1)%y-elem(ie)%corners(4)%y)/phys_nc !in beta + do i=1,phys_nc+1 + xgno_phys(i) = tan(elem(ie)%corners(1)%x+(i-1)*dalpha) + ygno_phys(i) = tan(elem(ie)%corners(1)%y+(i-1)*dbeta ) + end do + + dalpha = abs(elem(ie)%corners(1)%x-elem(ie)%corners(2)%x)/fvm_nc !in alpha + dbeta = abs(elem(ie)%corners(1)%y-elem(ie)%corners(4)%y)/fvm_nc !in beta + do i=1,fvm_nc+1 + xgno_fvm(i) = tan(elem(ie)%corners(1)%x+(i-1)*dalpha) + ygno_fvm(i) = tan(elem(ie)%corners(1)%y+(i-1)*dbeta ) + end do + + ! + ! compute area using line-integrals + ! + ! do j=1,phys_nc + ! do i=1,phys_nc + ! da_phys(i,j) = (I_00(xgno_phys(i+1),ygno_phys(j+1)) - I_00(xgno_phys(i ),ygno_phys(j+1)) + & + ! I_00(xgno_phys(i ),ygno_phys(j )) - I_00(xgno_phys(i+1),ygno_phys(j ))) + ! end do + ! end do + ! + ! do j=1,fvm_nc + ! do i=1,fvm_nc + ! da_fvm(i,j) = (I_00(xgno_fvm(i+1),ygno_fvm(j+1)) - I_00(xgno_fvm(i ),ygno_fvm(j+1)) + & + ! I_00(xgno_fvm(i ),ygno_fvm(j )) - I_00(xgno_fvm(i+1),ygno_fvm(j ))) + ! end do + ! end do + + gauss_weights = 0.0D0; abscissae=0.0D0!not used since line-segments are parallel to coordinate + + jall_fvm2phys(ie)=1 + do j=1,phys_nc + do i=1,phys_nc + xcell(1) = xgno_phys(i) ; ycell(1) = ygno_phys(j) + xcell(2) = xgno_phys(i) ; ycell(2) = ygno_phys(j+1) + xcell(3) = xgno_phys(i+1); ycell(3) = ygno_phys(j+1) + xcell(4) = xgno_phys(i+1); ycell(4) = ygno_phys(j) + + call compute_weights_cell(nvertex,.true.,& + xcell,ycell,i,j,irecons,xgno_fvm,ygno_fvm,0,fvm_nc+2,& + 1,fvm_nc+1,1,fvm_nc+1,& + ngpc,gauss_weights,abscissae,& + weights_cell,weights_eul_index_cell,jcollect_cell,jmax_segments_cell) + + if (jcollect_cell>0) then + weights_all_fvm2phys(jall_fvm2phys(ie):jall_fvm2phys(ie)+jcollect_cell-1,:,ie) = & + weights_cell(1:jcollect_cell,:)!/fvm(ie)%area_sphere_physgrid(i,j)!da_phys(i,j) + + weights_eul_index_all_fvm2phys(jall_fvm2phys(ie):jall_fvm2phys(ie)+jcollect_cell-1,:,ie) = & + weights_eul_index_cell(1:jcollect_cell,:) + weights_lgr_index_all_fvm2phys(jall_fvm2phys(ie):jall_fvm2phys(ie)+jcollect_cell-1,1,ie) = i + weights_lgr_index_all_fvm2phys(jall_fvm2phys(ie):jall_fvm2phys(ie)+jcollect_cell-1,2,ie) = j + jall_fvm2phys(ie) = jall_fvm2phys(ie)+jcollect_cell + endif + end do + enddo + jall_fvm2phys(ie)=jall_fvm2phys(ie)-1 + ! + ! make sure sum of area overlap weights exactly match fvm%%area_sphere_physgrid + ! + phys_area = 0.0_r8 + do h=1,jall_fvm2phys(ie) + i = weights_lgr_index_all_fvm2phys(h,1,ie); j = weights_lgr_index_all_fvm2phys(h,2,ie) + phys_area(i,j) = phys_area(i,j) +weights_all_fvm2phys(h,1,ie) + end do + factor(:,:) = fvm(ie)%area_sphere_physgrid(:,:)/phys_area(:,:) + do h=1,jall_fvm2phys(ie) + i = weights_lgr_index_all_fvm2phys(h,1,ie); j = weights_lgr_index_all_fvm2phys(h,2,ie) + weights_all_fvm2phys(h,1,ie) = weights_all_fvm2phys(h,1,ie)*factor(i,j) + end do + + jall_phys2fvm(ie)=1 + do j=1,fvm_nc + do i=1,fvm_nc + xcell(1) = xgno_fvm(i) ; ycell(1) = ygno_fvm(j) + xcell(2) = xgno_fvm(i) ; ycell(2) = ygno_fvm(j+1) + xcell(3) = xgno_fvm(i+1); ycell(3) = ygno_fvm(j+1) + xcell(4) = xgno_fvm(i+1); ycell(4) = ygno_fvm(j) + + call compute_weights_cell(nvertex,.true.,& + xcell,ycell,i,j,irecons,xgno_phys,ygno_phys,0,phys_nc+2,& + 1,phys_nc+1,1,phys_nc+1,& + ngpc,gauss_weights,abscissae,& + weights_cell,weights_eul_index_cell,jcollect_cell,jmax_segments_cell) + + if (jcollect_cell>0) then + weights_all_phys2fvm(jall_phys2fvm(ie):jall_phys2fvm(ie)+jcollect_cell-1,:,ie) & + = weights_cell(1:jcollect_cell,:)!/fvm(ie)%area_sphere(i,j)!da_fvm(i,j) + + weights_eul_index_all_phys2fvm(jall_phys2fvm(ie):jall_phys2fvm(ie)+jcollect_cell-1,:,ie) = & + weights_eul_index_cell(1:jcollect_cell,:) + weights_lgr_index_all_phys2fvm(jall_phys2fvm(ie):jall_phys2fvm(ie)+jcollect_cell-1,1,ie) = i + weights_lgr_index_all_phys2fvm(jall_phys2fvm(ie):jall_phys2fvm(ie)+jcollect_cell-1,2,ie) = j + jall_phys2fvm(ie) = jall_phys2fvm(ie)+jcollect_cell + endif + end do + enddo + jall_phys2fvm(ie)=jall_phys2fvm(ie)-1 + ! + ! make sure sum of area overlap weights exactly matches fvm%%area_sphere_physgrid + ! + fvm_area = 0.0_r8 + do h=1,jall_phys2fvm(ie) + i = weights_lgr_index_all_phys2fvm(h,1,ie); j = weights_lgr_index_all_phys2fvm(h,2,ie) + fvm_area(i,j) = fvm_area(i,j) +weights_all_phys2fvm(h,1,ie) + end do + fvm_area(:,:) = fvm(ie)%area_sphere(:,:)/fvm_area(:,:) + do h=1,jall_phys2fvm(ie) + i = weights_lgr_index_all_phys2fvm(h,1,ie); j = weights_lgr_index_all_phys2fvm(h,2,ie) + weights_all_phys2fvm(h,1,ie) = weights_all_phys2fvm(h,1,ie)*fvm_area(i,j) + end do + end do + end subroutine fvm2phys_init +end module dp_mapping diff --git a/src/dynamics/se/dycore/bndry_mod.F90 b/src/dynamics/se/dycore/bndry_mod.F90 new file mode 100644 index 00000000..84dc8a45 --- /dev/null +++ b/src/dynamics/se/dycore/bndry_mod.F90 @@ -0,0 +1,952 @@ +module bndry_mod + use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 + use parallel_mod, only: HME_BNDRY_A2A, HME_BNDRY_A2AO + use thread_mod, only: omp_in_parallel, omp_get_thread_num + use gbarrier_mod, only: gbarrier + use cam_abortutils, only: endrun + use cam_logfile, only: iulog + + + implicit none + private + + interface bndry_exchange + module procedure bndry_exchange_threaded + module procedure bndry_exchange_nonthreaded + module procedure long_bndry_exchange_nonth + end interface + public :: bndry_exchange + + interface ghost_exchange + module procedure ghost_exchange_threaded + module procedure ghost_exchange_nonthreaded + end interface + public :: ghost_exchange + + interface bndry_exchange_start + module procedure bndry_exchange_threaded_start + module procedure bndry_exchange_nonthreaded_start + end interface + public :: bndry_exchange_start + + interface bndry_exchange_finish + module procedure bndry_exchange_threaded_finish + module procedure bndry_exchange_nonthreaded_finish + end interface + public :: bndry_exchange_finish + + + public :: compute_ghost_corner_orientation + public :: ghost_exchangeVfull + public :: copyBuffer + +contains + + subroutine bndry_exchange_a2a(par,nthreads,ithr,buffer,location) + use edgetype_mod, only: Edgebuffer_t + use schedtype_mod, only: schedule_t, cycle_t, schedule + use thread_mod, only: omp_in_parallel, omp_get_thread_num + use perf_mod, only: t_startf, t_stopf + use spmd_utils, only: mpi_real8, mpi_success + use parallel_mod, only: parallel_t + use perf_mod, only: t_startf, t_stopf + + type (parallel_t) :: par + integer, intent(in) :: nthreads + integer :: ithr ! The OpenMP thread ID + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + type (Schedule_t), pointer :: pSchedule + type (Cycle_t), pointer :: pCycle + integer :: icycle,ierr + integer :: length + integer :: iptr,source,nlyr + integer :: nSendCycles,nRecvCycles + integer :: errorcode,errorlen + character*(80) :: errorstring + character(len=*), parameter :: subname = 'bndry_exchange_a2a' + character(len=80) :: locstring + logical :: ompthreadMissmatch + + integer :: i,j + integer :: request + +! Neighborhood collectives are only in MPI3 and up +#ifdef SPMD +#if MPI_VERSION >= 3 + + if(ithr == 0) then + + call MPI_Ineighbor_Alltoallv(buffer%buf,buffer%scountsFull,buffer%sdisplsFull,Mpi_real8, & + buffer%receive,buffer%rcountsFull,buffer%rdisplsFull,Mpi_real8,par%commGraphFull,request,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Ineighbor_alltoallv: ',errorstring + endif + + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + ! location 1 for copyBuffer + call copyBuffer(nthreads,ithr,buffer,locstring) + + call MPI_wait(request,lstatus,ierr) + call t_stopf('bndry_a2a') + else + + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + call copyBuffer(nthreads,ithr,buffer,locstring) + + endif +#else + call endrun('bndry_exchange_a2a requires MPI-3 feature support') +#endif +#endif + + end subroutine bndry_exchange_a2a + + subroutine copyBuffer(nthreads,ithr,buffer,location) + use edgetype_mod, only : Edgebuffer_t + integer :: nthreads + integer :: ithr + type (EdgeBuffer_t) :: buffer + character(len=80) :: location + logical :: ompThreadMissmatch + integer lenMovePtr, iptr,length,i,j + + ompThreadMissmatch = .false. + lenMovePtr = size(buffer%moveptr) + if ( lenMOveptr .ne. nthreads) then + ompthreadMissmatch = .true. + write(*,30) TRIM(location), lenMoveptr, nthreads + endif + + if (.not. ompthreadMissmatch) then + iptr = buffer%moveptr(ithr+1) + length = buffer%moveLength(ithr+1) + if(length>0) then + do i=0,length-1 + buffer%receive(iptr+i) = buffer%buf(iptr+i) + enddo + endif + else if(ompthreadMissmatch .and. ithr == 0) then + do j=1,lenMovePtr + iptr = buffer%moveptr(j) + length = buffer%moveLength(j) + if(length>0) then + do i=0,length-1 + buffer%receive(iptr+i) = buffer%buf(iptr+i) + enddo + endif + enddo + endif +30 format(a,'Potential perf issue: ',a,'LenMoveptr,nthreads: ',2(i3)) + end subroutine copyBuffer + + subroutine bndry_exchange_a2ao(par,nthreads,ithr,buffer,location) + use edgetype_mod, only : Edgebuffer_t + use schedtype_mod, only : schedule_t, cycle_t, schedule + use thread_mod, only : omp_in_parallel, omp_get_thread_num + use perf_mod, only : t_startf, t_stopf + use spmd_utils, only: mpi_real8, mpi_success, mpi_status_size + use parallel_mod, only: parallel_t + use perf_mod, only : t_startf, t_stopf + + type (parallel_t) :: par + integer, intent(in) :: nthreads + integer :: ithr ! The OpenMP thread ID + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + integer :: ierr + integer :: errorcode,errorlen + character(len=80) :: errorstring + character(len=*), parameter :: subname = 'bndry_exchange_a2ao' + character(len=80) :: locstring + + integer :: requestIntra,requestInter + integer :: lstatus(MPI_status_size) + +! Neighborhood collectives are only in MPI3 and up +#ifdef SPMD +#if MPI_VERSION >= 3 + + if(ithr == 0) then + + call t_startf('bndry_a2ao') + ! Start Inter-node communication + call MPI_Ineighbor_Alltoallv(buffer%buf,buffer%scountsInter,buffer%sdisplsInter,MPI_real8, & + buffer%receive,buffer%rcountsInter,buffer%rdisplsInter,MPI_real8,par%commGraphInter,requestInter,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Ineighbor_alltoallv: ',errorstring + endif + ! Start Intra-node communication + call MPI_Ineighbor_Alltoallv(buffer%buf,buffer%scountsIntra,buffer%sdisplsIntra,MPI_real8, & + buffer%receive,buffer%rcountsIntra,buffer%rdisplsIntra,MPI_real8,par%commGraphIntra,requestIntra,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Ineighbor_alltoallv: ',errorstring + endif + + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + ! Finish the Intra-node communication + call MPI_wait(requestIntra,lstatus,ierr) + + ! location 3 for copyBuffer + call copyBuffer(nthreads,ithr,buffer,locstring) + + ! Finish the Inter-node communication + call MPI_wait(requestInter,lstatus,ierr) + call t_stopf('bndry_a2ao') + + else + + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + !Copy buffer for ithr!=0 + call copyBuffer(nthreads,ithr,buffer,locstring) + + endif +#else + call endrun('bndry_exchange_a2ao requires MPI-3 feature support') +#endif +#endif + + end subroutine bndry_exchange_a2ao + + subroutine bndry_exchange_p2p(par,nthreads,ithr,buffer,location) + use edgetype_mod, only: Edgebuffer_t + use schedtype_mod, only: schedule_t, cycle_t, schedule + use thread_mod, only: omp_in_parallel, omp_get_thread_num + use spmd_utils, only: mpi_real8, mpi_success + use parallel_mod, only: parallel_t + use perf_mod, only: t_startf, t_stopf + + type (parallel_t) :: par + integer, intent(in) :: nthreads + integer :: ithr + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + type (Schedule_t),pointer :: pSchedule + type (Cycle_t),pointer :: pCycle + integer :: dest,length,tag + integer :: icycle,ierr + integer :: iptr,source,nlyr + integer :: nSendCycles,nRecvCycles + integer :: errorcode,errorlen + character*(80) :: errorstring + character(len=*), parameter :: subname = 'bndry_exchange_p2p' + character(len=80) :: locstring + logical, parameter :: Debug=.FALSE. + + integer :: i,j + logical :: ompthreadMissmatch + integer :: lenMovePtr + + pSchedule => Schedule(1) + nlyr = buffer%nlyr + ompthreadMissmatch = .FALSE. + + lenMovePtr = size(buffer%moveptr) + + if(ithr == 0) then + nSendCycles = pSchedule%nSendCycles + nRecvCycles = pSchedule%nRecvCycles + + + !================================================== + ! Fire off the sends + !================================================== + + do icycle=1,nSendCycles + pCycle => pSchedule%SendCycle(icycle) + dest = pCycle%dest - 1 + length = buffer%scountsFull(icycle) + tag = buffer%tag + iptr = buffer%sdisplsFull(icycle) + 1 + if(Debug) write(iulog,*) subname,': MPI_Isend: DEST:',dest,'LENGTH:',length,'TAG: ',tag + call MPI_Isend(buffer%buf(iptr),length,Mpi_real8,dest,tag,par%comm,buffer%Srequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Isend: ',errorstring + endif + end do ! icycle + + !================================================== + ! Post the Receives + !================================================== + do icycle=1,nRecvCycles + pCycle => pSchedule%RecvCycle(icycle) + source = pCycle%source - 1 + length = buffer%rcountsFull(icycle) + tag = buffer%tag + iptr = buffer%rdisplsFull(icycle) + 1 + if(Debug) write(iulog,*) subname,': MPI_Irecv: SRC:',source,'LENGTH:',length,'TAG: ',tag + call MPI_Irecv(buffer%receive(iptr),length,Mpi_real8, & + source,tag,par%comm,buffer%Rrequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Irecv: ',errorstring + endif + end do ! icycle + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + call copyBuffer(nthreads,ithr,buffer,locstring) + if (nSendCycles>0) call MPI_Waitall(nSendCycles,buffer%Srequest,buffer%status,ierr) + if (nRecvCycles>0) call MPI_Waitall(nRecvCycles,buffer%Rrequest,buffer%status,ierr) + else + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + call copyBuffer(nthreads,ithr,buffer,locstring) + endif + + end subroutine bndry_exchange_p2p + + subroutine bndry_exchange_p2p_start(par,nthreads,ithr,buffer,location) + + use edgetype_mod, only: Edgebuffer_t + use schedtype_mod, only: schedule_t, cycle_t, schedule + use thread_mod, only: omp_in_parallel, omp_get_thread_num + use spmd_utils, only: mpi_real8, mpi_success + use parallel_mod, only: parallel_t + + type (parallel_t) :: par + integer, intent(in) :: nthreads + integer :: ithr + type (EdgeBuffer_t) :: buffer + character (len=*), optional :: location + + type (Schedule_t),pointer :: pSchedule + type (Cycle_t),pointer :: pCycle + integer :: dest,length,tag + integer :: icycle,ierr + integer :: iptr,source,nlyr + integer :: nSendCycles,nRecvCycles + integer :: errorcode,errorlen + character*(80) :: errorstring + character(len=*), parameter :: subname = 'bndry_exchange_p2p_start' + logical, parameter :: Debug=.FALSE. + + integer :: i,j, lenMovePtr + logical :: ompthreadMissmatch + + pSchedule => Schedule(1) + nlyr = buffer%nlyr + ompthreadMissmatch = .FALSE. + + lenMovePtr = size(buffer%moveptr) + + if(ithr == 0) then + nSendCycles = pSchedule%nSendCycles + nRecvCycles = pSchedule%nRecvCycles + + !================================================== + ! Fire off the sends + !================================================== + + do icycle=1,nSendCycles + pCycle => pSchedule%SendCycle(icycle) + dest = pCycle%dest - 1 + length = buffer%scountsFull(icycle) + tag = buffer%tag + iptr = buffer%sdisplsFull(icycle) + 1 + if(Debug) write(iulog,*) subname,': MPI_Isend: DEST:',dest,'LENGTH:',length,'TAG: ',tag + call MPI_Isend(buffer%buf(iptr),length,Mpi_real8,dest,tag,par%comm,buffer%Srequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Isend: ',errorstring + endif + end do ! icycle + + !================================================== + ! Post the Receives + !================================================== + do icycle=1,nRecvCycles + pCycle => pSchedule%RecvCycle(icycle) + source = pCycle%source - 1 + length = buffer%rcountsFull(icycle) + tag = buffer%tag + iptr = buffer%rdisplsFull(icycle) + 1 + if(Debug) write(iulog,*) subname,': MPI_Irecv: SRC:',source,'LENGTH:',length,'TAG: ',tag + call MPI_Irecv(buffer%receive(iptr),length,Mpi_real8, & + source,tag,par%comm,buffer%Rrequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Irecv: ',errorstring + endif + end do ! icycle + endif + + end subroutine bndry_exchange_p2p_start + + subroutine bndry_exchange_p2p_finish(par,nthreads,ithr,buffer,location) + use edgetype_mod, only: Edgebuffer_t + use schedtype_mod, only: schedule_t, cycle_t, schedule + use thread_mod, only: omp_in_parallel, omp_get_thread_num + use parallel_mod, only: parallel_t + use perf_mod, only: t_startf, t_stopf + + + type (parallel_t) :: par + integer, intent(in) :: nthreads + integer :: ithr + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + type (Schedule_t), pointer :: pSchedule + type (Cycle_t), pointer :: pCycle + integer :: dest,length,tag + integer :: icycle,ierr + integer :: iptr,source,nlyr + integer :: nSendCycles,nRecvCycles + integer :: errorcode,errorlen + character*(80) :: errorstring + character(len=*), parameter :: subname = 'bndry_exchange_p2p_finish' + character(len=80) :: locstring + + integer :: i,j + logical :: ompthreadMissmatch + integer :: lenMovePtr + + + pSchedule => Schedule(1) + if(present(location)) then + locstring = TRIM(subname) // ': ' // TRIM(location) + else + locstring = TRIM(subname) + endif + call copyBuffer(nthreads,ithr,buffer,locstring) + + if(ithr == 0) then + + nSendCycles = pSchedule%nSendCycles + nRecvCycles = pSchedule%nRecvCycles + + if (nSendCycles>0) call MPI_Waitall(nSendCycles,buffer%Srequest,buffer%status,ierr) + if (nRecvCycles>0) call MPI_Waitall(nRecvCycles,buffer%Rrequest,buffer%status,ierr) + + endif + + end subroutine bndry_exchange_p2p_finish + + subroutine long_bndry_exchange_nonth(par,buffer) + use edgetype_mod, only: LongEdgebuffer_t + use schedtype_mod, only: schedule_t, cycle_t, schedule + use thread_mod, only: omp_in_parallel + use parallel_mod, only: parallel_t, status, srequest, rrequest + use spmd_utils, only: mpi_integer, mpi_success + + type (parallel_t) :: par + type (LongEdgeBuffer_t) :: buffer + + type (Schedule_t), pointer :: pSchedule + type (Cycle_t), pointer :: pCycle + integer :: dest,length,tag + integer :: icycle,ierr + integer :: iptr,source,nlyr + integer :: nSendCycles,nRecvCycles + integer :: errorcode,errorlen + character*(80) :: errorstring + character(len=*), parameter :: subname = 'long_bndry_exchange_nonth' + + integer :: i + +#ifdef SPMD + if(omp_in_parallel()) then + print *,subname,': Warning you are calling a non-thread safe' + print *,' routine inside a threaded region.... ' + print *,' Results are not predictable!! ' + endif + + + ! Setup the pointer to proper Schedule + pSchedule => Schedule(1) + nlyr = buffer%nlyr + + nSendCycles = pSchedule%nSendCycles + nRecvCycles = pSchedule%nRecvCycles + + + !================================================== + ! Fire off the sends + !================================================== + + do icycle=1,nSendCycles + pCycle => pSchedule%SendCycle(icycle) + dest = pCycle%dest - 1 + length = nlyr * pCycle%lengthP + tag = pCycle%tag + iptr = pCycle%ptrP + + call MPI_Isend(buffer%buf(1,iptr),length,Mpi_integer,dest,tag,par%comm,Srequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Isend: ',errorstring + endif + end do ! icycle + + !================================================== + ! Post the Receives + !================================================== + do icycle=1,nRecvCycles + pCycle => pSchedule%RecvCycle(icycle) + source = pCycle%source - 1 + length = nlyr * pCycle%lengthP + tag = pCycle%tag + iptr = pCycle%ptrP + + call MPI_Irecv(buffer%receive(1,iptr),length,Mpi_integer, & + source,tag,par%comm,Rrequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + write(iulog,*) subname,': Error after call to MPI_Irecv: ',errorstring + endif + end do ! icycle + + + !================================================== + ! Wait for all the receives to complete + !================================================== + + if (nSendCycles>0) call MPI_Waitall(nSendCycles,Srequest,status,ierr) + if (nRecvCycles>0) call MPI_Waitall(nRecvCycles,Rrequest,status,ierr) + do icycle=1,nRecvCycles + pCycle => pSchedule%RecvCycle(icycle) + length = pCycle%lengthP + iptr = pCycle%ptrP + do i=0,length-1 + buffer%buf(1:nlyr,iptr+i) = buffer%receive(1:nlyr,iptr+i) + enddo + end do ! icycle + +#endif + + end subroutine long_bndry_exchange_nonth + !******************************************************************************** + ! + !******************************************************************************** + + + subroutine ghost_exchange_threaded(hybrid,buffer,location) + use hybrid_mod, only : hybrid_t + use edgetype_mod, only : Edgebuffer_t + + implicit none + + type (hybrid_t) :: hybrid + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + call bndry_exchange_threaded(hybrid,buffer,location) + end subroutine ghost_exchange_threaded + + subroutine bndry_exchange_threaded(hybrid,buffer,location) + use hybrid_mod, only : hybrid_t + use edgetype_mod, only : Edgebuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf + implicit none + + type (hybrid_t) :: hybrid + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + character(len=*), parameter :: subname = 'bndry_exchange_threaded' +!VERBOSE +! if(present(location)) then +! print *,subname,' ',location +! else +! print *,subname,' somewhere' +! endif + + call gbarrier(buffer%gbarrier, hybrid%ithr) + if(buffer%bndry_type == HME_BNDRY_A2A) then + call bndry_exchange_a2a(hybrid%par,hybrid%nthreads,hybrid%ithr,buffer,location) + else if (buffer%bndry_type == HME_BNDRY_A2AO) then + call bndry_exchange_a2ao(hybrid%par,hybrid%nthreads,hybrid%ithr,buffer,location) + else + call bndry_exchange_p2p(hybrid%par,hybrid%nthreads,hybrid%ithr,buffer,location) + endif + call gbarrier(buffer%gbarrier, hybrid%ithr) + + end subroutine bndry_exchange_threaded + + subroutine bndry_exchange_threaded_start(hybrid,buffer,location) + use hybrid_mod, only : hybrid_t + use edgetype_mod, only : Edgebuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf + implicit none + + type (hybrid_t) :: hybrid + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + character(len=*), parameter :: subname = 'bndry_exchange_threaded_start' + + call gbarrier(buffer%gbarrier, hybrid%ithr) + call bndry_exchange_p2p_start(hybrid%par,hybrid%nthreads,hybrid%ithr,buffer,location) + + end subroutine bndry_exchange_threaded_start + + subroutine bndry_exchange_threaded_finish(hybrid,buffer,location) + use hybrid_mod, only : hybrid_t + use edgetype_mod, only : Edgebuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf + implicit none + + type (hybrid_t) :: hybrid + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + character(len=*), parameter :: subname = 'bndry_exchange_threaded_finish' + + call bndry_exchange_p2p_finish(hybrid%par,hybrid%nthreads,hybrid%ithr,buffer,location) + call gbarrier(buffer%gbarrier, hybrid%ithr) + + end subroutine bndry_exchange_threaded_finish + + subroutine ghost_exchange_nonthreaded(par,buffer,location) + use parallel_mod, only : parallel_t + use edgetype_mod, only : Edgebuffer_t + type (parallel_t) :: par + type (EdgeBUffer_t) :: buffer + character(len=*), optional :: location + call bndry_exchange_nonthreaded(par,buffer,location) + end subroutine ghost_exchange_nonthreaded + + subroutine bndry_exchange_nonthreaded(par,buffer,location) + use parallel_mod, only : parallel_t + use edgetype_mod, only : Edgebuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf + implicit none + + type (parallel_t) :: par + type (EdgeBuffer_t) :: buffer + character(len=*), optional :: location + + integer :: ithr + integer :: nthreads + character(len=*), parameter :: subname = 'bndry_exchange_nonthreaded' + + !$OMP BARRIER + ithr=0 + nthreads = 1 + if(buffer%bndry_type == HME_BNDRY_A2A) then + call bndry_exchange_a2a(par,nthreads,ithr,buffer,location) + else if (buffer%bndry_type == HME_BNDRY_A2AO) then + call bndry_exchange_a2ao(par,nthreads,ithr,buffer,location) + else + call bndry_exchange_p2p(par,nthreads,ithr,buffer,location) + endif + !$OMP BARRIER + + end subroutine bndry_exchange_nonthreaded + + subroutine bndry_exchange_nonthreaded_start(par,buffer,location) + use parallel_mod, only : parallel_t + use edgetype_mod, only : Edgebuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf + implicit none + + type (parallel_t) :: par + type (EdgeBuffer_t) :: buffer + character (len=*), optional :: location + + integer :: ithr + integer :: nthreads + character(len=*), parameter :: subname = 'bndry_exchange_nonthreaded_start' + + !$OMP BARRIER + ithr=0 + nthreads=1 + call bndry_exchange_p2p_start(par,nthreads,ithr,buffer,location) + + end subroutine bndry_exchange_nonthreaded_start + + subroutine bndry_exchange_nonthreaded_finish(par,buffer,location) + use parallel_mod, only : parallel_t + use edgetype_mod, only : Edgebuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf + implicit none + + type (parallel_t) :: par + integer :: ithr + type (EdgeBuffer_t) :: buffer + character (len=*), optional :: location + integer :: nthreads + + character(len=*), parameter :: subname = 'bndry_exchange_nonthreaded_finish' + + ithr=0 + nthreads=1 + call bndry_exchange_p2p_finish(par,nthreads,ithr,buffer,location) + !$OMP BARRIER + + end subroutine bndry_exchange_nonthreaded_finish + + subroutine compute_ghost_corner_orientation(hybrid,elem,nets,nete) +! +! this routine can NOT be called in a threaded region because then each thread +! will have its on ghostbuffer. initghostbufer3D() should detect this and abort. +! + use dimensions_mod, only: nelemd, np + use parallel_mod, only : syncmp + use hybrid_mod, only : hybrid_t + use element_mod, only : element_t + use edgetype_mod, only : edgebuffer_t + use edge_mod, only : ghostpack, ghostunpack, & + initghostbuffer,freeghostbuffer + + use control_mod, only : north,south,east,west,neast, nwest, seast, swest + + type (hybrid_t) , intent(in) :: hybrid + type (element_t) , intent(inout), target :: elem(:) + integer :: nets,nete + type (edgeBuffer_t) :: ghostbuf_cv + + real (kind=r8) :: cin(-1:4,-1:4,1,nets:nete) !CE: fvm tracer + real (kind=r8) :: cout(-1:4,-1:4,1,nets:nete) !CE: fvm tracer + integer :: i,j,ie,kptr,np1,np2,nc,nc1,nc2,k,nlev + logical :: fail,fail1,fail2 + real (kind=r8) :: tol = 0.1_r8 + call syncmp(hybrid%par) + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! first test on the Gauss Grid with same number of ghost cells: +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + nc=2 ! test using GLL interior points + nc1=-1 + nc2=4 + + nlev=1 + + if (hybrid%nthreads > 1) then + call endrun('ERROR: compute_ghost_corner_orientation must be called before threaded region') + endif + call initghostbuffer(hybrid%par,ghostbuf_cv,elem,nlev,nc,nc,nthreads=1) + + + cin = 0._r8 + do ie=nets,nete + cin(1,1,1,ie)= elem(ie)%gdofp(1,1) + cin(nc,nc,1,ie)= elem(ie)%gdofp(np,np) + cin(1,nc,1,ie)= elem(ie)%gdofp(1,np) + cin(nc,1,1,ie)= elem(ie)%gdofp(np,1) + enddo + cout=0 + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! run ghost exchange on c array to get corner orientation +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + do ie=nets,nete + kptr=0 + call ghostpack(ghostbuf_cv, cin(:,:,:,ie),nlev,kptr,ie) + end do + call ghost_exchange(hybrid,ghostbuf_cv,location='compute_ghost_corner_orientation') + do ie=nets,nete + kptr=0 + call ghostunpack(ghostbuf_cv, cout(:,:,:,ie),nlev,kptr,ie) + enddo + +! nc +--------+ +! ^ | nw ne | +! j | | | +! 1 | sw se | +! +--------+ +! 1 --> nc +! i + +! check SW corner + do ie=nets,nete + fail1=.false. + fail2=.false. + if ( elem(ie)%desc%putmapP_ghost(swest) /= -1) then + if (abs(cout(nc1,1,1,ie)-cout(nc1,0,1,ie)) .gt. tol ) fail1=.true. + if (abs(cout(1,nc1,1,ie)-cout(0,nc1,1,ie)).gt.tol) fail2=.true. + endif + if (fail1 .neqv. fail2 ) call endrun( 'ghost exchange SW orientation failure') + if (fail1) then + elem(ie)%desc%reverse(swest)=.true. + endif + enddo +! check SE corner + do ie=nets,nete + fail1=.false. + fail2=.false. + if ( elem(ie)%desc%putmapP_ghost(seast) /= -1) then + if (abs(cout(nc2,1,1,ie)-cout(nc2,0,1,ie)) .gt. tol ) fail1=.true. + if (abs(cout(nc+1,nc1,1,ie)-cout(nc,nc1,1,ie)).gt.tol) fail2=.true. + endif + if (fail1 .neqv. fail2 ) call endrun('ghost exchange SE orientation failure') + if (fail1) then + elem(ie)%desc%reverse(seast)=.true. + endif + enddo +! check NW corner + do ie=nets,nete + fail1=.false. + fail2=.false. + if ( elem(ie)%desc%putmapP_ghost(nwest) /= -1) then + if (abs(cout(nc1,nc+1,1,ie)-cout(nc1,nc,1,ie)) .gt. tol ) fail1=.true. + if (abs(cout(1,nc2,1,ie)-cout(0,nc2,1,ie)).gt.tol) fail2=.true. + endif + if (fail1 .neqv. fail2 ) call endrun( 'ghost exchange NW orientation failure') + if (fail1) then + elem(ie)%desc%reverse(nwest)=.true. + endif + enddo +! check NE corner + do ie=nets,nete + fail1=.false. + fail2=.false. + if ( elem(ie)%desc%putmapP_ghost(neast) /= -1) then + if (abs(cout(nc2,nc+1,1,ie)-cout(nc2,nc,1,ie)) .gt. tol ) fail1=.true. + if (abs(cout(nc+1,nc2,1,ie)-cout(nc,nc2,1,ie)).gt.tol) fail2=.true. + endif + if (fail1 .neqv. fail2 ) call endrun( 'ghost exchange NE orientation failure') + if (fail1) then + elem(ie)%desc%reverse(neast)=.true. + endif + enddo + call freeghostbuffer(ghostbuf_cv) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! end ghost exchange corner orientation +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + end subroutine + subroutine ghost_exchangeVfull(par,ithr,buffer) +! +! MT 2011: derived from bndry_exchange, but copies an entire +! element of ghost cell information, including corner +! elements. Requres cubed-sphere grid +! + use hybrid_mod, only : hybrid_t + use edgetype_mod, only: Ghostbuffer3D_t + use schedtype_mod, only : schedule_t, cycle_t, schedule + use dimensions_mod, only: nelemd + use parallel_mod, only : status, srequest, rrequest, parallel_t + use spmd_utils, only: mpi_integer, mpi_success,mpi_real8 + + implicit none + type (parallel_t) :: par + integer :: ithr ! hybrid%ithr 0 if called outside threaded region + + type (GhostBuffer3D_t) :: buffer + + type (Schedule_t),pointer :: pSchedule + type (Cycle_t),pointer :: pCycle + integer :: dest,length,tag + integer :: icycle,ierr + integer :: iptr,source,nlyr + integer :: nSendCycles,nRecvCycles + integer :: errorcode,errorlen + character(len=*), parameter :: subname = 'ghost_exchangeVfull' + character*(80) errorstring + + integer :: i,i1,i2 + + !$OMP BARRIER + if(ithr == 0) then + + +#ifdef SPMD + ! Setup the pointer to proper Schedule + pSchedule => Schedule(1) + nlyr = buffer%nlyr + + nSendCycles = pSchedule%nSendCycles + nRecvCycles = pSchedule%nRecvCycles + + !================================================== + ! Fire off the sends + !================================================== + do icycle=1,nSendCycles + pCycle => pSchedule%SendCycle(icycle) + dest = pCycle%dest - 1 + length = nlyr * pCycle%lengthP_ghost * buffer%elem_size + tag = pCycle%tag + iptr = pCycle%ptrP_ghost + + call MPI_Isend(buffer%buf(1,1,1,iptr),length,MPI_real8,dest,tag,par%comm,Srequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + print *,subname,': Error after call to MPI_Isend: ',errorstring + endif + end do ! icycle + + !================================================== + ! Post the Receives + !================================================== + do icycle=1,nRecvCycles + pCycle => pSchedule%RecvCycle(icycle) + source = pCycle%source - 1 + length = nlyr * pCycle%lengthP_ghost * buffer%elem_size + tag = pCycle%tag + iptr = pCycle%ptrP_ghost + + call MPI_Irecv(buffer%receive(1,1,1,iptr),length,MPI_real8, & + source,tag,par%comm,Rrequest(icycle),ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + print *,subname,': Error after call to MPI_Irecv: ',errorstring + endif + end do ! icycle + + + !================================================== + ! Wait for all the receives to complete + !================================================== + + call MPI_Waitall(nSendCycles,Srequest,status,ierr) + call MPI_Waitall(nRecvCycles,Rrequest,status,ierr) + + do icycle=1,nRecvCycles + pCycle => pSchedule%RecvCycle(icycle) + length = pCycle%lengthP_ghost + iptr = pCycle%ptrP_ghost + do i=0,length-1 + buffer%buf(:,:,1:nlyr,iptr+i) = buffer%receive(:,:,1:nlyr,iptr+i) + enddo + end do ! icycle + + +#endif + endif ! if (hybrid%ithr == 0) + !$OMP BARRIER + + end subroutine ghost_exchangeVfull + + +end module bndry_mod diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 new file mode 100644 index 00000000..6c2760b6 --- /dev/null +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -0,0 +1,2310 @@ +! Code that computes control volumes with the same area as the GLL weights +! (for SCRIP) uses analytic area formula. + +module comp_gll_ctr_vol + use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl + use cam_abortutils, only: endrun + use cam_logfile, only: iulog + use shr_sys_mod, only: shr_sys_flush + use global_norms_mod, only: wrap_repro_sum + use physconst, only: pi + use infnan, only: isnan + + use coordinate_systems_mod, only: cartesian3d_t, cartesian2d_t + use coordinate_systems_mod, only: spherical_polar_t, change_coordinates + use coordinate_systems_mod, only: cubedsphere2cart, cube_face_number_from_cart + use coordinate_systems_mod, only: distance, sphere_tri_area + use parallel_mod, only: global_shared_buf, global_shared_sum + use edgetype_mod, only: EdgeBuffer_t, Ghostbuffer3D_t + use dimensions_mod, only: np, ne + use control_mod, only: fine_ne + use reduction_mod, only: red_sum, parallelmin, parallelmax + + implicit none + private + save + + character(len=16), public :: se_write_gll_grid = "no" + + ! nv_max will be set to 2*max_elements_attached_to_node + ! This works out to 6 for the regular case and 14 for refined meshes + integer :: nv_max=-99 + + type, public :: ctrlvol_t + real(r8) :: vol(np,np) ! area of the unit sphere covered (local) + real(r8) :: totvol(np,np) ! area of the unit sphere covered (local) + real(r8) :: invvol(np,np) ! inverse area (includes neigbors) + type(cartesian2d_t) :: cartp_dual(0:np,0:np) + type(cartesian3d_t) :: cart3d_dual(0:np,0:np) + type(cartesian3D_t), allocatable :: vert(:,:,:) ! bounding box for the polygon + type(spherical_polar_t), allocatable :: vert_latlon(:,:,:) ! bounding box for the polygon + integer, allocatable :: face_no(:,:,:) ! face_no of cv vertex. 0 if on cube edge + integer :: nvert(np,np) ! number of vertex per polygon + end type ctrlvol_t + + ! tho options: + ! (1) for NP<>4 or Refined Meshes (this is less accurate) + ! build control volumes out of lines which are + ! always gnomonic coordinate lines. results in hexagon control volumes + ! at cube corners and edges. control volumes at cube-sphere edges are + ! non-convex, which breaks SCRIP. + ! iterative option for NP=4 only: + ! (2) (USE_PENTAGONS) + ! iterate to minimize difference between spherical area and GLL weight + ! introduce pentagons in the center of each element to make areas agree + ! control volumes are triangles, squares or pentagons + + type(ctrlvol_t), allocatable, target :: cvlist(:) + type(EdgeBuffer_t) :: edge1 + type(GhostBuffer3D_t) :: ghost_buf + + ! User interface + public :: gll_grid_write ! Write the grid in SCRIP format and exit + + ! Private interfaces + private:: InitControlVolumesData ! allocates internal data structure + private:: InitControlVolumes ! Inits all surfaces: vol,totvol, invvol + + private:: GetVolumeLocal + private:: GetVolume + + logical, private :: initialized = .false. + +CONTAINS + + subroutine gll_grid_write(elem, grid_format, filename_in) + use netcdf, only: nf90_strerror + use spmd_utils, only: masterproc, mpicom + use pio, only: var_desc_t, file_desc_t + use pio, only: pio_int, pio_double, PIO_NOERR + use pio, only: pio_put_att, pio_put_var, pio_enddef + use cam_pio_utils, only: cam_pio_createfile, cam_pio_closefile + use cam_pio_utils, only: cam_pio_def_dim, cam_pio_def_var + use cam_grid_support, only: cam_grid_id, cam_grid_dimensions + use cam_grid_support, only: cam_grid_write_dist_array + !!XXgoldyXX: v debug only +#define USE_PIO3D +#ifdef USE_PIO3D + use pio, only: io_desc_t, pio_write_darray, PIO_OFFSET_KIND + use cam_pio_utils, only: cam_pio_newdecomp + use spmd_utils, only: iam +#endif + !!XXgoldyXX: ^ debug only + + use hybrid_mod, only: hybrid_t, config_thread_region + use parallel_mod, only: par + use dimensions_mod, only: nelem, nelemd + use control_mod, only: refined_mesh, fine_ne + use element_mod, only: element_t + use dof_mod, only: UniquePoints + use coordinate_systems_mod, only: cart2spherical + + ! Inputs + type(element_t), intent(in) :: elem(:) + character(len=*), intent(in) :: grid_format + character(len=*), intent(in) :: filename_in + + real(r8), parameter :: rad2deg = 180._r8/pi + + ! Local variables +!!XXgoldyXX: v debug only +#ifdef USE_PIO3D + integer(PIO_OFFSET_KIND), allocatable :: ldof(:) + integer :: ii, jj + type(io_desc_t), pointer :: iodesc +#endif +!!XXgoldyXX: ^ debug only + integer :: i, j, ie, ierror, status, ivtx, index + integer :: grid_corners_id, grid_rank_id, grid_size_id + type(var_desc_t) :: grid_dims_id, grid_area_id, grid_center_lat_id + type(var_desc_t) :: grid_center_lon_id, grid_corner_lat_id + type(var_desc_t) :: grid_corner_lon_id, grid_imask_id + + type(file_desc_t) :: file + integer :: gll_grid ! Grid ID + integer :: gridsize ! Total number of unique grid columns + integer :: arr_dims2d(2) ! (/ np*np, nelemed) + integer :: file_dims2d(1) ! (/ gridsize /) + integer :: arr_dims3d(3) ! (/ np*np, nv_max, nelemed) + integer :: file_dims3d(2) ! (/ nv_max, gridsize /) + + real(r8), allocatable :: gwork(:,:,:) ! np*np, nv_max, nelemd + type(hybrid_t) :: hybrid + character(len=256) :: errormsg + character(len=shr_kind_cl) :: filename + type(spherical_polar_t) :: sphere + character(len=*), parameter :: subname = 'gll_grid_write' + + !! Check to see if we are doing grid output + if (trim(grid_format) == "no") then + if (masterproc) then + write(iulog, *) subname, ': Not writing phys_grid file.' + end if + return + else if (trim(grid_format) /= 'SCRIP') then + write(errormsg, *) subname, ': ERROR, bad value for se_write_grid, ', & + trim(grid_format) + call endrun(errormsg) + end if + + ! Set up the control volumes + if (refined_mesh) then + nv_max = 14 + else + nv_max = 5 + end if + if (masterproc) then + write(iulog, *) subname, ': computing GLL dual grid for control volumes:' + end if + call InitControlVolumesData(par,elem,nelemd) + ! single thread + hybrid = config_thread_region(par,'serial') + call InitControlVolumes(elem,hybrid,1,nelemd) + if (masterproc) then + write(6, *) subname, ': done computing GLL dual grid for control volumes.' + end if + + ! Create the NetCDF file + if (len_trim(filename_in) == 0) then + if (refined_mesh) then + if (fine_ne <= 0) then + call endrun('gll_grid_write: refined_mesh selected but fine_ne not set') + else + write(filename,'(a,i0,a,a,3a)') "ne0np", np, "_refined_", trim(grid_format), ".nc" + end if + else + write(filename, '(a,i0,a,i0,a,a,3a)') "ne", ne, "np", np, & + "_", trim(grid_format), ".nc" + end if + else + filename = trim(filename_in) + end if + if (masterproc) then + write(iulog, *) 'Writing gll SCRIP grid file: ', trim(filename) + call shr_sys_flush(iulog) + end if + + call cam_pio_createfile(file, trim(filename)) + gll_grid = cam_grid_id('GLL') + call cam_grid_dimensions(gll_grid, file_dims3d) + gridsize = file_dims3d(1) + file_dims2d(1) = gridsize + file_dims3d(1) = nv_max + file_dims3d(2) = gridsize + arr_dims2d(1) = np*np + arr_dims2d(2) = nelemd + arr_dims3d(1) = np*np + arr_dims3d(2) = nv_max + arr_dims3d(3) = nelemd + call cam_pio_def_dim(file, "grid_corners", nv_max, grid_corners_id) + call cam_pio_def_dim(file, "grid_rank", 1, grid_rank_id) + call cam_pio_def_dim(file, "grid_size", gridsize, grid_size_id) + ! Define the coordinate variables + call cam_pio_def_var(file, "grid_dims", pio_int, (/ grid_rank_id /), & + grid_dims_id) + + ! Define grid area + call cam_pio_def_var(file, "grid_area", pio_double, & + (/grid_size_id/), grid_area_id) + status = pio_put_att(file, grid_area_id, "units", "radians^2") + if (status /= pio_noerr) then + write(iulog, *) subname,': Error defining units attribute for grid_area' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + status = pio_put_att(file, grid_area_id, "long_name", "area weights") + if (status /= pio_noerr) then + write(iulog, *) subname,': Error defining long_name attribute for grid_area' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + + ! Define grid center latitudes + call cam_pio_def_var(file, "grid_center_lat", pio_double, & + (/grid_size_id/), grid_center_lat_id) + status = pio_put_att(file, grid_center_lat_id, "units", "degrees") + if (status /= pio_noerr) then + write(iulog, *) subname,': Error defining units attribute for grid_center_lat' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + + ! Define grid center longitudes + call cam_pio_def_var(file, "grid_center_lon", pio_double, & + (/grid_size_id/), grid_center_lon_id) + status = pio_put_att(file, grid_center_lon_id, "units", "degrees") + if (status /= pio_noerr) then + write(iulog, *) subname,': Error defining units attribute for grid_center_lon' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + + ! Define grid corner latitudes + call cam_pio_def_var(file, "grid_corner_lat", pio_double, & + (/grid_corners_id, grid_size_id/), grid_corner_lat_id) + status = pio_put_att(file, grid_corner_lat_id, "units", "degrees") + if (status /= pio_noerr) then + write(iulog, *) subname,': Error defining units attribute for grid_corner_lon' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + + ! Grid corner longitudes + call cam_pio_def_var(file, "grid_corner_lon", pio_double, & + (/grid_corners_id, grid_size_id/), grid_corner_lon_id) + status = pio_put_att(file, grid_corner_lon_id, "units", "degrees") + if (status /= pio_noerr) then + write(iulog, *) subname,': Error defining units attribute for grid_corner_lon' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + + ! Grid mask + call cam_pio_def_var(file, "grid_imask", pio_double, & + (/grid_size_id/), grid_imask_id) + + ! End of NetCDF definitions + status = PIO_enddef(file) + if (status /= pio_noerr) then + write(iulog, *) subname, ': Error calling enddef' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + + ! Work array to gather info before writing + allocate(gwork(np*np, nv_max, nelemd)) + + ! Write grid size + status = pio_put_var(file, grid_dims_id, (/ gridsize /)) + if (status /= pio_noerr) then + write(iulog, *) subname, ': Error writing variable grid_dims' + call shr_sys_flush(iulog) + call endrun(subname//": "//trim(nf90_strerror(status))) + end if + ! Write GLL grid areas + do ie = 1, nelemd + index = 1 + do j = 1, np + do i = 1, np + gwork(index, 1, ie) = cvlist(ie)%vol(i,j) + index = index + 1 + end do + end do + end do + call cam_grid_write_dist_array(file, gll_grid, arr_dims2d, file_dims2d, & + gwork(:,1,:), grid_area_id) + ! Write GLL grid cell center latitude + do ie = 1, nelemd + index = 1 + do j = 1, np + do i = 1, np + gwork(index, 1, ie) = elem(ie)%spherep(i,j)%lat * rad2deg + index = index + 1 + end do + end do + end do + call cam_grid_write_dist_array(file, gll_grid, arr_dims2d, file_dims2d, & + gwork(:,1,:), grid_center_lat_id) + ! Write GLL grid cell center longitude + do ie = 1, nelemd + index = 1 + do j = 1, np + do i = 1, np + gwork(index, 1, ie) = elem(ie)%spherep(i,j)%lon * rad2deg + index = index + 1 + end do + end do + end do + call cam_grid_write_dist_array(file, gll_grid, arr_dims2d, file_dims2d, & + gwork(:,1,:), grid_center_lon_id) + + ! GLL grid corners + ! Collect all information for the grid corner latitude (counter-clockwise) + do ie = 1, nelemd + do ivtx = 1, nv_max + index = 1 + do j = 1, np + do i = 1, np + gwork(index, ivtx, ie) = cvlist(ie)%vert_latlon(ivtx,i,j)%lat * rad2deg + index = index + 1 + end do + end do + end do + end do +!!XXgoldyXX: v debug only +#ifdef USE_PIO3D +allocate(ldof(np*np*nelemd*nv_max)) +ldof = 0 +do ie = 1, nelemd + do index = 1, elem(ie)%idxP%NumUniquePts + i = elem(ie)%idxP%ia(index) + j = elem(ie)%idxP%ja(index) + ii = (i - 1) + ((j - 1) * np) + ((ie - 1) * np * np * nv_max) + 1 + jj = (elem(ie)%idxP%UniquePtOffset + index - 2) * nv_max + do ivtx = 1, nv_max + ldof(ii) = jj + ivtx + if ((jj+ivtx < 1) .or. (jj+ivtx > gridsize*nv_max)) then + write(errormsg, '(4(a,i0))') ' ERROR (',iam,'): ldof(',ii,') = ',jj + ivtx,' > ',gridsize*nv_max + call endrun(subname//trim(errormsg)) + end if + ii = ii + np*np + end do + end do +end do +allocate(iodesc) +call cam_pio_newdecomp(iodesc, (/ nv_max, gridsize /), ldof, PIO_double) +call pio_write_darray(file, grid_corner_lat_id, iodesc, gwork, status) +#else +!!XXgoldyXX: ^ debug only + call cam_grid_write_dist_array(file, gll_grid, arr_dims3d, file_dims3d, & + gwork, grid_corner_lat_id) +!!XXgoldyXX: v debug only +#endif +!!XXgoldyXX: ^ debug only + ! Collect all information for the grid corner longitude (counter-clockwise) + do ie = 1, nelemd + do ivtx = 1, nv_max + index = 1 + do j = 1, np + do i = 1, np + gwork(index, ivtx, ie) = cvlist(ie)%vert_latlon(ivtx,i,j)%lon * rad2deg + index = index + 1 + end do + end do + end do + end do +!!XXgoldyXX: v debug only +#ifdef USE_PIO3D +call pio_write_darray(file, grid_corner_lon_id, iodesc, gwork, status) +#else +!!XXgoldyXX: ^ debug only + call cam_grid_write_dist_array(file, gll_grid, arr_dims3d, file_dims3d, & + gwork, grid_corner_lon_id) +!!XXgoldyXX: v debug only +#endif +!!XXgoldyXX: ^ debug only + ! Grid imask + gwork(:,1,:) = 1.0_r8 + call cam_grid_write_dist_array(file, gll_grid, arr_dims2d, file_dims2d, & + gwork(:,1,:), grid_imask_id) + + call mpi_barrier(mpicom, ierror) + ! Close the file + call cam_pio_closefile(file) + if(masterproc) then + write(iulog, *) 'Finished writing physics grid file: ', trim(filename) + call shr_sys_flush(iulog) + end if + + end subroutine gll_grid_write + + ! elemid is the local element id (in nets:nete) + function GetVolume(elemid) result(vol) + + integer, intent(in) :: elemid + real(kind=r8), pointer :: vol(:,:) + + if(.not. initialized) then + call endrun('Attempt to use volumes prior to initializing') + end if + vol => cvlist(elemid)%totvol + + end function GetVolume + + function GetVolumeLocal(elemid) result(vol) + + integer, intent(in) :: elemid + real(r8), pointer :: vol(:,:) + + if(.not. initialized) then + call endrun('Attempt to use volumes prior to initializing') + end if + vol => cvlist(elemid)%vol + + end function GetVolumeLocal + + subroutine InitControlVolumesData(par, elem, nelemd) + use edge_mod, only: initedgebuffer, initGhostBuffer3D + use parallel_mod, only: parallel_t, HME_BNDRY_P2P + use element_mod, only: element_t + use thread_mod, only: horz_num_threads + + type(parallel_t), intent(in) :: par + type(element_t), intent(in) :: elem(:) + integer, intent(in) :: nelemd + + integer :: ie + + ! Cannot be done in a threaded region + allocate(cvlist(nelemd)) + do ie = 1, nelemd + allocate(cvlist(ie)%vert(nv_max, np,np)) + allocate(cvlist(ie)%vert_latlon(nv_max,np,np)) + allocate(cvlist(ie)%face_no(nv_max,np,np)) + end do + + call initedgebuffer(par,edge1,elem,3,bndry_type=HME_BNDRY_P2P, nthreads=1) + call initGhostBuffer3D(ghost_buf,3,np+1,1) + end subroutine InitControlVolumesData + + subroutine VerifyAreas(elem,hybrid,nets,nete) + + use element_mod, only: element_t + use hybrid_mod, only: hybrid_t + + integer, intent(in) :: nets,nete + type(element_t), intent(in), target :: elem(:) + type(hybrid_t), intent(in) :: hybrid + + integer :: i, j, ie, k, kptr, kmax + real(r8) :: rspheremp(np,np) + real(r8) :: invvol(np,np) + real(r8) :: error, max_error, max_invvol, maxrsphere + + error = 0 + max_error = 0 + do ie=nets,nete + rspheremp = elem(ie)%rspheremp + invvol = cvlist(ie)%invvol + do j=1,np + do i=1,np + error = 100*ABS(rspheremp(i,j)-invvol(i,j))/invvol(i,j) + if (max_error.lt.error) then + max_error = error + max_invvol = invvol(i,j) + maxrsphere = rspheremp(i,j) + end if + end do + end do + end do + print '(A,F16.4 )',"Control Volume Stats: Max error percent:", max_error + print '(A,F16.12)'," Value From Element:",1/maxrsphere + print '(A,F16.12)'," Value From Control Volume:",1/max_invvol + max_error = parallelmax(max_error,hybrid) + if (hybrid%masterthread) then + write(6, '(a,f16.4)') "Control volume area vs. gll area: max error (percent):", max_error + end if + + end subroutine VerifyAreas + + + subroutine InitControlVolumes(elem, hybrid,nets,nete) + use element_mod, only: element_t + use hybrid_mod, only: hybrid_t + use control_mod, only: refined_mesh + + integer, intent(in) :: nets,nete + type(element_t), intent(in), target :: elem(:) + type(hybrid_t), intent(in) :: hybrid + + if (refined_mesh .or. (np /= 4)) then + call InitControlVolumes_duel(elem, hybrid,nets,nete) + else + call InitControlVolumes_gll(elem, hybrid,nets,nete) + call VerifVolumes(elem, hybrid,nets,nete) + end if + end subroutine InitControlVolumes + + subroutine InitControlVolumes_duel(elem, hybrid,nets,nete) + use bndry_mod, only: bndry_exchange + use edge_mod, only: edgeVpack, edgeVunpack, freeedgebuffer, freeghostbuffer3D + use element_mod, only: element_t, element_var_coordinates, element_var_coordinates3d + use hybrid_mod, only: hybrid_t + + use quadrature_mod, only: quadrature_t, gausslobatto + use coordinate_systems_mod, only: cube_face_number_from_sphere + + integer, intent(in) :: nets,nete + type(element_t), intent(in), target :: elem(:) + type(hybrid_t), intent(in) :: hybrid + + type(quadrature_t) :: gll_pts + type(cartesian3d_t) :: quad(4),corners3d(4) + real(r8) :: cv_pts(0:np) !was kind=longdouble_kind in HOMME + real(r8) :: test(np,np,1) + + integer :: i, j, ie, k, kmax2, kk + + gll_pts = gausslobatto(np) + ! gll points + cv_pts(0)=-1 + do i=1,np + cv_pts(i) = cv_pts(i-1) + gll_pts%weights(i) + end do + cv_pts(np)=1 + do i=1,np-1 + if (gll_pts%points(i) > cv_pts(i) .or. cv_pts(i) > gll_pts%points(i+1)) then + call endrun("Error: CV and GLL points not interleaved") + end if + end do + + + ! intialize local element areas + test = 0 + do ie=nets,nete + cvlist(ie)%cart3d_dual(0:np,0:np) = element_var_coordinates3D(elem(ie)%corners3D, cv_pts) + + ! compute true area of element and SEM area + cvlist(ie)%vol=0 + do i=1,np + do j=1,np + ! (gnomonic coordinate lines only), more accurate + quad(1) = cvlist(ie)%cart3d_dual(i-1,j-1) + quad(2) = cvlist(ie)%cart3d_dual(i,j-1) + quad(3) = cvlist(ie)%cart3d_dual(i,j) + quad(4) = cvlist(ie)%cart3d_dual(i-1,j) + cvlist(ie)%vol(i,j) = surfarea(quad,4) + end do + end do + test(:,:,1) = cvlist(ie)%vol(:,:) + call edgeVpack(edge1,test,1,0,ie) + end do + + call bndry_exchange(hybrid, edge1,location='InitControlVolumes_duel') + + test = 0 + do ie=nets,nete + test(:,:,1) = cvlist(ie)%vol(:,:) + call edgeVunpack(edge1, test, 1, 0, ie) + cvlist(ie)%totvol(:,:) = test(:,:,1) + cvlist(ie)%invvol(:,:)=1.0_r8/cvlist(ie)%totvol(:,:) + end do + + call VerifyAreas(elem, hybrid, nets, nete) + + ! construct the global CV grid and global CV areas from the + ! local dual grid (cvlist()%cart_dual) and local areas (cvlist()%vol) + call construct_cv_duel(elem, hybrid, nets, nete) + ! compute output needed for SCRIP: lat/lon coordinates, and for the + ! control volume with only 3 corners, repeat the last point to make a + ! degenerate quad. + kmax2 = 0 + do ie = nets, nete + kmax2 = MAX(kmax2, MAXVAL(cvlist(ie)%nvert)) + end do + do ie = nets, nete + do j = 1, np + do i = 1, np + cvlist(ie)%vert_latlon(:,i,j)%lat = 0.0_r8 + cvlist(ie)%vert_latlon(:,i,j)%lon = 0.0_r8 + k = cvlist(ie)%nvert(i,j) + ! + ! follow SCRIP protocol - of kk>k then repeat last vertex + ! + do kk = k+1, nv_max + cvlist(ie)%vert(kk, i, j) = cvlist(ie)%vert(k,i,j) + end do + do kk = 1, nv_max + cvlist(ie)%vert_latlon(kk, i, j) = change_coordinates(cvlist(ie)%vert(kk, i, j)) + cvlist(ie)%face_no(kk, i, j) = cube_face_number_from_sphere(cvlist(ie)%vert_latlon(kk, i, j)) + end do + + end do + end do + end do + ! Release memory + if(hybrid%masterthread) then + call freeedgebuffer(edge1) + call FreeGhostBuffer3D(ghost_buf) + end if + + initialized=.true. + end subroutine InitControlVolumes_duel + + function average(t, n) result(a) + + integer, intent(in) :: n + type(cartesian3d_t), intent(in) :: t(n) + type(cartesian3d_t) :: a + integer :: i + + a%x = 0._r8 + a%y = 0._r8 + a%z = 0._r8 + do i = 1, n + a%x = a%x + t(i)%x + a%y = a%y + t(i)%y + a%z = a%z + t(i)%z + end do + a%x = a%x / n + a%y = a%y / n + a%z = a%z / n + return + end function average + + function make_unique(a, n) result(m) + + integer, intent(in) :: n + real(r8), intent(inout) :: a(n) + integer :: m + integer :: i,j + real(r8) :: delta + + do i=1,n-1 + do j=i+1,n + ! if (ABS(a(j)-a(i)).lt. 1e-6) a(j) = 9999 + delta = abs(a(j)-a(i)) + if (delta < 1.e-6_r8) a(j) = 9999.0_r8 + if (abs((2.0_r8*pi) - delta) < 1.0e-6_r8) a(j) = 9999.0_r8 + end do + end do + m = 0 + do i=1,n + if (a(i) < 9000.0_r8) m = m + 1 + end do + if (mod(m,2).ne.0) then + do i=1,n + print *,'angle with centroid: ',i,a(i),mod(a(i),2*pi) + end do + call endrun("Error: Found an odd number or nodes for cv element. Should be even.") + end if + return + end function make_unique + + function SortNodes(t3, n) result(m) + use coordinate_systems_mod, only: cube_face_number_from_cart, cart2cubedsphere, change_coordinates + + + integer, intent(in) :: n + type(cartesian3d_t), intent(inout) :: t3(n) + + type(cartesian3d_t) :: c3, t(n) + type(cartesian2d_t) :: c2, t2 + real(r8) :: angle(n) + integer :: i,j,k,m,f + integer :: ip(n) + + c3 = average(t3, n) + f = cube_face_number_from_cart(c3) + c2 = cart2cubedsphere(c3, f) + + do i=1,n + t2 = cart2cubedsphere(t3(i), f) + t2%x = t2%x - c2%x + t2%y = t2%y - c2%y + angle(i) = atan2(t2%y, t2%x) + end do + m = make_unique(angle,n) + do i=1,m + k = 1 + do j=2,n + if (angle(j)2->3->4 is counter clockwise on the sphere + ! Negative: clockwise orientation + + do j=1,np + do i=1,np + cvlist(ie)%vert(:,i,j)%x = 0.0_r8 + cvlist(ie)%vert(:,i,j)%y = 0.0_r8 + cvlist(ie)%vert(:,i,j)%z = 0.0_r8 + end do + end do + + do j=-1,np+1 + do i=-1,np+1 + cv(i,j)%x = vertunpack(i,j,1) + cv(i,j)%y = vertunpack(i,j,2) + cv(i,j)%z = vertunpack(i,j,3) + end do + end do + + do j=-1,0 + do i=-1,0 + do k=1,mlt(swest)-1 + cv_sw(i,j,k)%x = sw(i,j,1,k) + cv_sw(i,j,k)%y = sw(i,j,2,k) + cv_sw(i,j,k)%z = sw(i,j,3,k) + end do + end do + end do + do j=-1,0 + do i=np,np+1 + do k=1,mlt(seast)-1 + cv_se(i,j,k)%x = se(i,j,1,k) + cv_se(i,j,k)%y = se(i,j,2,k) + cv_se(i,j,k)%z = se(i,j,3,k) + end do + end do + end do + do j=np,np+1 + do i=-1,0 + do k=1,mlt(nwest)-1 + cv_nw(i,j,k)%x = nw(i,j,1,k) + cv_nw(i,j,k)%y = nw(i,j,2,k) + cv_nw(i,j,k)%z = nw(i,j,3,k) + end do + end do + end do + do j=np,np+1 + do i=np,np+1 + do k=1,mlt(neast)-1 + cv_ne(i,j,k)%x = ne(i,j,1,k) + cv_ne(i,j,k)%y = ne(i,j,2,k) + cv_ne(i,j,k)%z = ne(i,j,3,k) + end do + end do + end do + + do j=2,np-1 + do i=2,np-1 + ! internal vertex on Cubed sphere + ! Here is the order: + ! + ! 4NW <- 3NE + ! | ^ + ! v | + ! 1SW -> 2SE + vert(1) = cv(i-1, j-1) + vert(2) = cv(i , j-1) + vert(3) = cv(i , j ) + vert(4) = cv(i-1, j ) + cvlist(ie)%vert(1:4,i,j) = vert(1:4) + cvlist(ie)%nvert(i,j) = 4 + m=4 + end do + end do + + do j=0,np,np + do i=2,np-1 + vert(1) = cv(i-1, j-1) + vert(2) = cv(i , j-1) + vert(3) = cv(i , j ) + vert(4) = cv(i , j+1) + vert(5) = cv(i-1, j+1) + vert(6) = cv(i-1, j ) + p = j + if (p.eq.0) p=1 + cvlist(ie)%vert(1:6,i,p) = vert(1:6) + cvlist(ie)%nvert(i,p) = 6 + m=6 + end do + end do + + do j=2,np-1 + do i=0,np,np + vert(1) = cv(i-1, j-1) + vert(2) = cv(i , j-1) + vert(3) = cv(i+1, j-1) + vert(4) = cv(i+1, j ) + vert(5) = cv(i , j ) + vert(6) = cv(i-1, j ) + o = i + if (o.eq.0) o=1 + cvlist(ie)%vert(1:6,o,j) = vert(1:6) + cvlist(ie)%nvert(o,j) = 6 + m=6 + end do + end do + do j=0,np,np + do i=0,np,np + m = 0 + vert(:)%x = 0 + vert(:)%y = 0 + vert(:)%z = 0 + if (i.eq.0.and.j.eq.0) then + ! counterclockwise from lower right + vert(m+1) = cv(i+1, j-1) ! 5 4 + vert(m+2) = cv(i+1, j ) ! (-1,+1) (0,+1) (+1,+1) 3 + vert(m+3) = cv(i+1, j+1) ! + vert(m+4) = cv(i , j+1) ! (-1, 0) (i, j) (+1, 0) 2 + vert(m+5) = cv(i-1, j+1) ! + vert(m+6) = cv(i-1, j ) ! X X (+1,-1) 1 + m = m + 6 + if (mlt(swest).ne.0) then + vert(m+1) = cv(i-1, j-1) + vert(m+2) = cv(i , j-1) + m = m+2 + do k=1,mlt(swest)-1 ! Bummer, toss in (-1,0) because transpose is undetectable + vert(m+1) = cv_sw(i-1, j , k) + vert(m+2) = cv_sw(i-1, j-1, k) + vert(m+3) = cv_sw(i , j-1, k) + m=m+3 + end do + end if + end if + if (i.eq.np.and.j.eq.0) then + if (mlt(seast).ne.0) then + vert(m+1) = cv(i+1, j-1) + vert(m+2) = cv(i+1, j ) + m = m+2 + do k=1,mlt(seast)-1 + vert(m+1) = cv_se(i , j-1, k) + vert(m+2) = cv_se(i+1, j-1, k) + vert(m+3) = cv_se(i+1, j , k) + m=m+3 + end do + end if + vert(m+1) = cv(i+1, j+1) + vert(m+2) = cv(i , j+1) + vert(m+3) = cv(i-1, j+1) + vert(m+4) = cv(i-1, j ) + vert(m+5) = cv(i-1, j-1) + vert(m+6) = cv(i , j-1) + m = m + 6 + end if + if (i.eq.np.and.j.eq.np) then + vert(1) = cv(i+1, j-1) + vert(2) = cv(i+1, j ) + m = m + 2 + if (mlt(neast).ne.0) then + vert(m+1) = cv(i+1, j+1) + vert(m+2) = cv(i , j+1) + m = m+2 + do k=1,mlt(neast)-1 + vert(m+1) = cv_ne(i+1, j , k) + vert(m+2) = cv_ne(i+1, j+1, k) + vert(m+3) = cv_ne(i , j+1, k) + m=m+3 + end do + end if + vert(m+1) = cv(i-1, j+1) + vert(m+2) = cv(i-1, j ) + vert(m+3) = cv(i-1, j-1) + vert(m+4) = cv(i , j-1) + m = m + 4 + end if + if (i.eq.0.and.j.eq.np) then + vert(m+1) = cv(i+1, j-1) + vert(m+2) = cv(i+1, j ) + vert(m+3) = cv(i+1, j+1) + vert(m+4) = cv(i , j+1) + m = m + 4 + if (mlt(nwest).ne.0) then + vert(m+1) = cv(i-1, j+1) + vert(m+2) = cv(i-1, j ) + m = m+2 + do k=1,mlt(nwest)-1 + vert(m+1) = cv_nw(i , j+1, k) + vert(m+2) = cv_nw(i-1, j+1, k) + vert(m+3) = cv_nw(i-1, j , k) + m=m+3 + end do + end if + vert(m+1) = cv(i-1, j-1) + vert(m+2) = cv(i , j-1) + m = m + 2 + end if + o = i + p = j + if (o.eq.0) o=1 + if (p.eq.0) p=1 + m2=m + if (8 < m) then + m = SortNodes(vert, m2) + end if + if (m > nv_max) then + call endrun("error: vert dimensioned too small") + end if + cvlist(ie)%vert(1:m,o,p) = vert(1:m) + cvlist(ie)%nvert(o,p) = m + end do + end do + end do + end subroutine construct_cv_duel + + function SurfArea( cv, nvert ) result(area) + + type(cartesian3D_t), intent(in) :: cv(:) + integer, intent(in) :: nvert + + real(kind=r8) :: area, area1, area2, area3 + + if (abs(nvert) == 3 ) then + area2 = 0.0_r8 + area3 = 0.0_r8 + if (cv(1)%x == 0) then + call sphere_tri_area(cv(2), cv(3), cv(4), area1) + else if (cv(2)%x == 0) then + call sphere_tri_area(cv(1), cv(3), cv(4), area1) + else if (cv(3)%x == 0) then + call sphere_tri_area(cv(1), cv(2), cv(4), area1) + else if (cv(4)%x == 0) then + call sphere_tri_area(cv(1), cv(2), cv(3), area1) + else + write(iulog, *) cv(1)%x, cv(1)%y + write(iulog, *) cv(2)%x, cv(2)%y + write(iulog, *) cv(3)%x, cv(3)%y + write(iulog, *) cv(4)%x, cv(4)%y + write(iulog, *) 'SurfArea error: should never happen' + call shr_sys_flush(iulog) + call endrun('SurfArea: invalid cv coordinates') + end if + else if (abs(nvert)==4) then + call sphere_tri_area(cv(1), cv(2), cv(3), area1) + call sphere_tri_area(cv(1), cv(3), cv(4), area2) + area3 = 0.0_r8 + + else if (abs(nvert)==5) then + call sphere_tri_area(cv(1),cv(2),cv(3),area1) + call sphere_tri_area(cv(1),cv(3),cv(4),area2) + call sphere_tri_area(cv(1),cv(4),cv(5),area3) + else + call endrun('SurfArea: nvert > 5 not yet supported') + end if + area = area1 + area2 + area3 + end function SurfArea + + ! ^ + ! |dy o + ! | + ! (x,y) ---->dx + function SurfArea_dxdy(dx, dy, corner) result(integral) + use quadrature_mod, only: quadrature_t + + real(r8), intent(in) :: dx, dy + type(cartesian2d_t), intent(in) :: corner + real(r8) :: integral + + real(r8) :: alpha, beta, a1, a2, a3, a4 + + ! cubed-sphere cell area, from Lauritzen & Nair MWR 2008 + ! central angles: + ! cube face: -pi/4,-pi/4 -> pi/4,pi/4 + ! this formula gives 2 so normalize by 4pi/6 / 2 = pi/3 + alpha = corner%x + beta = corner%y + a1 = acos(-sin(alpha)*sin(beta)) ! 2.094 + a2 = -acos(-sin(alpha+dx)*sin(beta) ) ! -1.047 + a3 =- acos(-sin(alpha)*sin(beta+dy) ) ! -1.047 + a4 = acos(-sin(alpha+dx)*sin(beta+dy) ) ! 2.094 + integral = (a1+a2+a3+a4) + return + end function SurfArea_dxdy + + function find_intersect(x1in, x2in, y1in, y2in) result(sect) + + type(cartesian2D_t), intent(in) :: x1in, x2in, y1in, y2in + type(cartesian2D_t) :: sect + + type(cartesian2D_t) :: x, y, b, x1, x2, y1, y2 + real(kind=r8) :: s1, s2, detA + + ! x1 + (x2-x1)*s1 = y1 + (y2-y1)*s2 + ! b = y1-x1 + ! x=x2-x1 + ! y=y2-y1 + ! x s1 - y s2 = b + ! x(1) s1 - y(1) s2 = b(1) + ! x(2) s1 - y(2) s2 = b(2) + ! + ! x(1) -y(1) s1 = b(1) A s = b + ! x(2) -y(2) s2 = b(2) + ! + ! A2= -y(2) y(1) + ! -x(2) x(1) s = A2 * b /detA + + ! convert to gnomonic + x1%x = tan(x1in%x) + x2%x = tan(x2in%x) + y1%x = tan(y1in%x) + y2%x = tan(y2in%x) + x1%y = tan(x1in%y) + x2%y = tan(x2in%y) + y1%y = tan(y1in%y) + y2%y = tan(y2in%y) + + x%x = x2%x-x1%x + x%y = x2%y-x1%y + y%x = y2%x-y1%x + y%y = y2%y-y1%y + b%x = y1%x-x1%x + b%y = y1%y-x1%y + + detA = -x%x*y%y + x%y*y%x + + s1 = (-y%y*b%x + y%x*b%y )/detA + s2 = (-x%y*b%x + x%x*b%y )/detA + + sect%x = x1%x + (x2%x-x1%x)*s1 + sect%y = x1%y + (x2%y-x1%y)*s1 + + sect%x = (sect%x + y1%x + (y2%x-y1%x)*s2)/2 + sect%y = (sect%y + y1%y + (y2%y-y1%y)*s2)/2 + + if (s1<0 .or. s1>1) then + write(iulog, *) 'failed: intersection: ',s1,s2 + call shr_sys_flush(iulog) + call endrun('find_intersect: intersection failure') + end if + + ! convert back to equal angle: + sect%x = atan(sect%x) + sect%y = atan(sect%y) + end function find_intersect + + subroutine pentagon_iteration(sq1,sq2,pent,asq1,asq2,apent,faceno,anorm) + ! sq2 + ! 4 3 + ! 1 2 + ! + ! sq1 4 3 + ! 2 1 5 pent + ! 3 4 1 2 + ! + ! + ! d/dt sq1(1) = (area(sq1)-asq1) * [ com(sq1)-sq1(1) ] + ! +(area(sq2)-asq2) * [ com(sq2)-sq1(1) ] + ! +(area(pent)-apent) * [ com(pent)-sq1(1) ] + ! + ! + ! + type(cartesian2d_t), intent(inout) :: sq1(4), sq2(4), pent(5) + real(r8), intent(in) :: asq1, asq2, apent, anorm + integer, intent(in) :: faceno + + type(cartesian3D_t) :: sq1_3d(4), sq2_3d(4), pent_3d(5) + real(r8) :: isq1, isq2, ipent, diff1, diff2, diffp, err + real(r8), parameter :: dt = .5_r8 + real(r8), parameter :: tol_pentagon_iteration = 1.0e-10_r8 + type(cartesian2d_t) :: sq1com, sq2com, pentcom, ds1, ds2 + integer :: i, iter + integer, parameter :: iter_max = 10000 + + ! compute center of mass: + sq1com%x = sum(sq1(:)%x)/4 + sq1com%y = sum(sq1(:)%y)/4 + sq2com%x = sum(sq2(:)%x)/4 + sq2com%y = sum(sq2(:)%y)/4 + pentcom%x = sum(pent(:)%x)/5 + pentcom%y = sum(pent(:)%y)/5 + + do i = 1, 4 + sq1_3d(i)=cubedsphere2cart(sq1(i),faceno ) + sq2_3d(i)=cubedsphere2cart(sq2(i),faceno ) + pent_3d(i)=cubedsphere2cart(pent(i),faceno ) + end do + pent_3d(5)=cubedsphere2cart(pent(5),faceno ) + + do iter = 1, iter_max + isq1 = SurfArea(sq1_3d,4) + isq2 = SurfArea(sq2_3d,4) + ipent = SurfArea(pent_3d,5) + + ! d/dt sq1(1) = (area(sq1)-asq1) * [ com(sq1)-sq1(1) ] + ! +(area(sq2)-asq2) * [ com(sq2)-sq1(1) ] + ! +(area(pent)-apent) * [ com(pent)-sq1(1) ] + ! + diff1 = (isq1-asq1)/anorm + diff2 = (isq2-asq2)/anorm + diffp = (ipent-apent)/anorm + + err = abs(diff1) + abs(diff2) + abs(diffp) + if (err < tol_pentagon_iteration) exit + if (mod(iter,1000) == 0) then + write(iulog, '(i5,3e18.5)') iter, err + call shr_sys_flush(iulog) + end if + + ds1%x = diff1* ( sq1com%x - sq1(1)%x ) + ds1%y = diff1* ( sq1com%y - sq1(1)%y ) + ds1%x = ds1%x + diffp* ( pentcom%x - sq1(1)%x ) + ds1%y = ds1%y + diffp* ( pentcom%y - sq1(1)%y ) + + ds2%x = diff2* ( sq2com%x - sq2(1)%x ) + ds2%y = diff2* ( sq2com%y - sq2(1)%y ) + ds2%x = ds2%x + diffp* ( pentcom%x - sq2(1)%x ) + ds2%y = ds2%y + diffp* ( pentcom%y - sq2(1)%y ) + + sq1(1)%x = sq1(1)%x + dt*ds1%x + sq1(1)%y = sq1(1)%y + dt*ds1%y + sq2(1)%x = sq2(1)%x + dt*ds2%x + sq2(1)%y = sq2(1)%y + dt*ds2%y + pent(4)=sq2(1) + pent(5)=sq1(1) + sq1_3d(1)=cubedsphere2cart(sq1(1),faceno ) + sq2_3d(1)=cubedsphere2cart(sq2(1),faceno ) + pent_3d(4)=sq2_3d(1) + pent_3d(5)=sq1_3d(1) + end do + if (iter >= iter_max) then + write(iulog, *) 'pentagon iteration did not converge err=', err + call shr_sys_flush(iulog) + end if + end subroutine pentagon_iteration + + subroutine InitControlVolumes_gll(elem, hybrid,nets,nete) + use edge_mod, only: freeedgebuffer + use element_mod, only: element_t,element_coordinates + use hybrid_mod, only: hybrid_t + + use quadrature_mod, only: quadrature_t, gausslobatto + use dimensions_mod, only: nlev + use cube_mod, only: convert_gbl_index + use coordinate_systems_mod, only: cart2cubedsphere_failsafe, cart2cubedsphere + use coordinate_systems_mod, only: cube_face_number_from_sphere + + integer, intent(in) :: nets,nete + type(element_t), intent(in), target :: elem(:) + type(hybrid_t), intent(in) :: hybrid + + type(cartesian2d_t) :: cartp_com(np,np) ! center of mass + type(cartesian2d_t) :: cartp_nm1(0:np,0:np) + real(r8) :: delx_k,dely_k,sum_dbg,r + integer :: i,j,ie,k,kptr,gllpts,nvert,k2,ie1,je1,face_no,kinsert + integer :: iter,iter_max,i1,j1 + real(r8) :: diff(np,np),diffy(np-1,np-1),diffx(np-1,np-1) + real(r8) :: dx,dy,a1(nets:nete),a2(nets:nete),d1(nets:nete),d1mid(nets:nete) + real(r8) :: d2,d1_global,d1_global_mid,sphere1,sphere2,diff2,diff3 + real(r8) :: diff23,diff32,diff33,diff22 + real(r8) :: gllnm1(0:np) !was longdouble_kind in HOMME + type(cartesian2d_t) :: corner,start,endd,cv_loc_2d(4,np,np),cvnew_loc_2d(4,np,np) + type(cartesian3D_t) :: cart,cv_loc_3d(nv_max,np,np) + type(cartesian3D_t) :: temp3d(nv_max) + type(cartesian2d_t) :: cartp2d(np,np) + type(cartesian2d_t) :: x1,x2,x3,x + type(cartesian2d_t) :: sq1(4),sq2(4),pent(5) + type(cartesian3D_t) :: x1_3d,x2_3d,x3_3d + type(quadrature_t) :: gll + type(cartesian2d_t) :: dir,dirsum + type(spherical_polar_t) :: polar_tmp(0:np,0:np) + real(r8) :: rvert,area1,area2,ave,lat(4),lon(4) + real(r8) :: s,ds,triarea,triarea_target + real(r8) :: xp1,xm1,yp1,ym1,sumdiff + real(r8) :: tiny = 1e-11_r8,norm + real(r8) :: tol = 2.e-11_r8 ! convergece outer iteration + real(r8) :: tol_pentagons = 1.e-13_r8 ! convergece pentagon iteration + + ! area difference to trigger pentagons. + ! if it is too small, we will have pentagons with 1 very short edges + ! accuracy of surfarea() with very thin triangles seems poor (1e-11) + ! ne=30 1e-3: add 648 pentagons. area ratio: 1.003 + ! ne=30 1e-4: add 696 pentagons. area ratio: 1.000004102 + ! ne=30 1e-5: add 696 pentagons. area ratio: 1.000004102 + ! ne=240 1e-4: add 5688/ 345600 pentagons, area ratio: 1.0004 + ! ne=240 1e-5: add 5736/ 345600 pentagons, area ratio: 1.000000078 + real(r8) :: tol_use_pentagons=1.0e-5_r8 + logical :: Debug=.FALSE.,keep + + integer :: face1,face2,found,ie_max,movex,movey,moved,ii,kmax,kk + integer :: nskip,npent + integer :: nskipie(nets:nete), npentie(nets:nete) + type(cartesian2d_t) :: vert1_2d, vert_2d,vert2_2d + type(cartesian3D_t) :: vert1,vert2,vert_inserted(7) + + kmax=4 + + gll = gausslobatto(np) + ! mid point rule: + do i=1,np-1 + gllnm1(i) = ( gll%points(i) + gll%points(i+1) ) /2 + end do + ! check that gll(i) < gllnm1(i) < gll(i+1) + do i=1,np-1 + if (gll%points(i) > gllnm1(i) .or. gllnm1(i) > gll%points(i+1)) then + call endrun("InitControlVolumes_gll: CV and GLL points not interleaved") + end if + end do + gllnm1(0)=-1 + gllnm1(np)=1 + + ! MNL: dx and dy are no longer part of element_t + ! but they are easily computed for the + ! uniform case + dx = pi/(2.0d0*dble(ne)) + dy = dx + + ! intialize local element dual grid, local element areas + + do ie=nets,nete + + call convert_gbl_index(elem(ie)%vertex%number,ie1,je1,face_no) + start%x=-pi/4 + ie1*dx + start%y=-pi/4 + je1*dy + endd%x =start%x + dx + endd%y =start%y + dy + cartp_nm1(0:np,0:np) = element_coordinates(start,endd,gllnm1) + cvlist(ie)%cartp_dual = cartp_nm1 + + ! compute true area of element and SEM area + a1(ie) = SurfArea_dxdy(dx,dy,elem(ie)%cartp(1,1)) + a2(ie) = sum(elem(ie)%spheremp(:,:)) + do i=1,np + do j=1,np + ! (gnomonic coordinate lines only), more accurate + delx_k = cartp_nm1(i,j-1)%x - cartp_nm1(i-1,j-1)%x + dely_k = cartp_nm1(i-1,j)%y - cartp_nm1(i-1,j-1)%y + cvlist(ie)%vol(i,j) = SurfArea_dxdy(delx_k,dely_k,cartp_nm1(i-1,j-1)) + end do + end do + global_shared_buf(ie,1) = a1(ie) + global_shared_buf(ie,2) = a2(ie) + end do + call wrap_repro_sum(nvars=2, comm=hybrid%par%comm) + sphere1 = global_shared_sum(1) + sphere2 = global_shared_sum(2) + + ! construct the global CV grid and global CV areas from the + ! local dual grid (cvlist()%cart_dual) and local areas (cvlist()%vol) + call construct_cv_gll(elem,hybrid,nets,nete) + + iter_max=2000 + if (iter_max>0) then + ! areas computed from eleemnts on boundaries are from hexagons and pentagons + ! compute new areas where all CVs are squares or triangles + do ie=nets,nete + do i=1,np + do j=1,np + ! ifort bug if we try this: + ! area2 = surfarea(cvlist(ie)%vert(1:4,i,j),cvlist(ie)%nvert(i,j)) + cv_loc_3d(:,i,j)=cvlist(ie)%vert(:,i,j) + area2 = surfarea(cv_loc_3d(:,i,j),cvlist(ie)%nvert(i,j)) + cvlist(ie)%totvol(i,j)=area2 + end do + end do + end do + end if + ! iteration over cvlist(ie)%totvol + d1_global=0 + do iter=1,iter_max + ie_max=-1 + do ie=nets,nete + ! we want at each point, the gll_area = true_area + ! but sum(gll_area) = a2 and sum(true_area)=a1 + ! so normalize so that: gll_area/a2 = true_area/a1, or gll_area = area*a2/a1 + + ! requires more iterations, but the total volume within an + ! element is always correct + diff(:,:) = ( cvlist(ie)%vol(:,:) - elem(ie)%spheremp(:,:)*a1(ie)/a2(ie) ) + sumdiff=sum( cvlist(ie)%vol(:,:)) - a1(ie) + diff(:,:) = diff(:,:)/(a1(ie)/(np*np)) + + + + ! set boundary values (actually not used) + cartp_nm1 = cvlist(ie)%cartp_dual(0:np,0:np) + ! convert 9 cv corners in this element into cart_nm1 cubed-sphere coordiantes + do i=1,np-1 + do j=1,np-1 + cartp_nm1(i,j) = cart2cubedsphere( cvlist(ie)%vert(3,i,j),elem(ie)%FaceNum ) + end do + end do + ! compute center of mass of control volumes: + ! todo: move points towards GLL points, not center of mass + ! center of mass could send up a feedback with CV points! + do i=1,np + do j=1,np + cart%x = sum( cvlist(ie)%vert(:,i,j)%x )/abs(cvlist(ie)%nvert(i,j)) + cart%y = sum( cvlist(ie)%vert(:,i,j)%y )/abs(cvlist(ie)%nvert(i,j)) + cart%z = sum( cvlist(ie)%vert(:,i,j)%z )/abs(cvlist(ie)%nvert(i,j)) + cartp_com(i,j) = cart2cubedsphere( cart,elem(ie)%FaceNum ) + end do + end do + d2=0 + do i=1,np-1 + do j=1,np-1 + dirsum%x=0 + dirsum%y=0 + movex=1 + movey=1 + moved=0 + + do i1=0,1 + do j1=0,1 + ! keep=.true. : .85/1.05 + ! corners only: .93/1.07 + ! corners and edges: .89/1.11 + keep=.false. + ! corner volumes + if (i==1 .and. j==1) then + if (i1==0 .and. j1==0) keep=.true. + moved=1 + else if (i==np-1 .and. j==1) then + if (i1==1 .and. j1==0) keep=.true. + moved=-1 + else if (i==1 .and. j==np-1) then + if (i1==0 .and. j1==1) keep=.true. + moved=-1 + else if (i==np-1 .and. j==np-1) then + if (i1==1 .and. j1==1) keep=.true. + moved=1 + ! edge volumes + + + else if (i==1) then + if (i1==0) keep=.true. + else if (i==np-1) then + if (i1==1) keep=.true. + else if (j==1) then + if (j1==0) keep=.true. + else if (j==np-1) then + if (j1==1) keep=.true. + else + keep=.true. + end if + if (keep) then + ! error weighted direction towards center of mass of area + ! move towards grid point + dir%x = (elem(ie)%cartp(i+i1,j+j1)%x - cartp_nm1(i,j)%x )*(abs(diff(i+i1,j+j1))) + dir%y = (elem(ie)%cartp(i+i1,j+j1)%y - cartp_nm1(i,j)%y )*(abs(diff(i+i1,j+j1))) + if (moved==1) then + ! project onto (1,1)/sqrt(2) + dir%x = dir%x/sqrt(2d0) + dir%y/sqrt(2d0) + dir%y = dir%x + end if + if (moved==-1) then + ! project onto (-1,1)/sqrt(2) + dir%y = -dir%x/sqrt(2d0) + dir%y/sqrt(2d0) + dir%x = -dir%y + end if + + + if ( diff(i+i1,j+j1) > 0 ) then + ! this volume is too big, so move cv point towards grid center + ! weighted by length error + dirsum%x = dirsum%x + movex*dir%x + dirsum%y = dirsum%y + movey*dir%y + else + dirsum%x = dirsum%x - movex*dir%x + dirsum%y = dirsum%y - movey*dir%y + end if + end if + end do + end do + d2 = d2 + dirsum%x**2 + dirsum%y**2 + cartp_nm1(i,j)%x = cartp_nm1(i,j)%x + 0.25_r8*dirsum%x + cartp_nm1(i,j)%y = cartp_nm1(i,j)%y + 0.25_r8*dirsum%y + + end do + end do + cvlist(ie)%cartp_dual(0:np,0:np) = cartp_nm1 + d2=sqrt(d2) + + d1(ie)=sqrt(sum(diff**2)) + + d1mid(ie)=d1(ie) + ! ignore center cv's: + diff(2:3,2:3)=0 + d1mid(ie)=sqrt(sum(diff**2)) + + end do ! ie loop + dx=maxval(d1) + d1_global = ParallelMax(dx,hybrid) + dx=maxval(d1mid) + d1_global_mid = ParallelMax(dx,hybrid) + if (mod(iter-1,250).eq.0) then + if (hybrid%masterthread) write(iulog, *) iter,"max d1=",d1_global,d1_global_mid + end if + ! compute new global CV (cvlist(ie)%vert from cvlist(ie)%cartp_dual). + ! cvlist()%totarea incorrect since local volumes not computed above + call construct_cv_gll(elem,hybrid,nets,nete) + + ! update totvol (area of multi-element cv) + do ie=nets,nete + do i=1,np + do j=1,np + ! ifort bug if we try this: + ! area2 = surfarea(cvlist(ie)%vert(1:4,i,j),cvlist(ie)%nvert(i,j)) + cv_loc_3d(:,i,j)=cvlist(ie)%vert(:,i,j) + area2 = surfarea(cv_loc_3d(:,i,j),cvlist(ie)%nvert(i,j)) + cvlist(ie)%totvol(i,j) = area2 + if (isnan(area2)) then + write(iulog, *) 'ie,i,j',ie,i,j + write(iulog, *) cvlist(ie)%nvert(i,j) + write(iulog, *) cv_loc_3d(1,i,j) + write(iulog, *) cv_loc_3d(2,i,j) + write(iulog, *) cv_loc_3d(3,i,j) + write(iulog, *) cv_loc_3d(4,i,j) + call shr_sys_flush(iulog) + call endrun('InitControlVolumes_gll: area = NaN') + end if + end do + end do + end do + + ! update %vol (local control volume within each element) + do ie=nets,nete + cartp2d = elem(ie)%cartp + do i=1,np + do j=1,np + ! ifort bug if we try this: + ! area2 = surfarea(cvlist(ie)%vert(1:4,i,j),cvlist(ie)%nvert(i,j)) + + do ii=1,4 + ! + ! if we do not use _failsafe version of cart2cubedsphere code will fail with "-debug" + ! + cv_loc_2d(ii,i,j) = cart2cubedsphere_failsafe( cvlist(ie)%vert(ii,i,j),elem(ie)%FaceNum ) + end do + if (i==1 .and. j==1) then + cv_loc_2d(1,i,j)=cartp2d(i,j) + end if + if (i==np .and. j==1) then + cv_loc_2d(2,i,j)=cartp2d(i,j) + end if + if (i==1 .and. j==np) then + cv_loc_2d(4,i,j)=cartp2d(i,j) + end if + if (i==np .and. j==np) then + cv_loc_2d(3,i,j)=cartp2d(i,j) + end if + + + cvnew_loc_2d(:,i,j)=cv_loc_2d(:,i,j) + + ! + ! 4NW <- 3NE + ! | ^ + ! v | + ! 1SW -> 2SE + if (i==1) then + ! replace points with x< elem(ie)%vert(i,j)%x + if (cv_loc_2d(1,i,j)%x < cartp2d(i,j)%x) then + cvnew_loc_2d(1,i,j) = find_intersect(& + cv_loc_2d(1,i,j), cv_loc_2d(2,i,j),& + elem(ie)%cartp(i,1),elem(ie)%cartp(i,np)) + end if + if (cv_loc_2d(4,i,j)%x < cartp2d(i,j)%x) then + cvnew_loc_2d(4,i,j) = find_intersect(& + cv_loc_2d(4,i,j), cv_loc_2d(3,i,j),& + elem(ie)%cartp(i,1),elem(ie)%cartp(i,np)) + end if + end if + + if (i==np) then + ! replace points with x> elem(ie)%vert(i,j)%x + if (cv_loc_2d(2,i,j)%x > cartp2d(i,j)%x) then + cvnew_loc_2d(2,i,j) = find_intersect(& + cv_loc_2d(1,i,j), cv_loc_2d(2,i,j),& + elem(ie)%cartp(i,1),elem(ie)%cartp(i,np)) + end if + if (cv_loc_2d(3,i,j)%x > cartp2d(i,j)%x) then + cvnew_loc_2d(3,i,j) = find_intersect(& + cv_loc_2d(4,i,j), cv_loc_2d(3,i,j),& + elem(ie)%cartp(i,1),elem(ie)%cartp(i,np)) + end if + end if + ! + ! 4NW <- 3NE + ! | ^ + ! v | + ! 1SW -> 2SE + if (j==1) then + ! replace points with y < elem(ie)%vert(i,j)%y + if (cv_loc_2d(1,i,j)%y < cartp2d(i,j)%y) then + cvnew_loc_2d(1,i,j) = find_intersect(& + cv_loc_2d(1,i,j), cv_loc_2d(4,i,j),& + elem(ie)%cartp(1,j),elem(ie)%cartp(np,j)) + end if + if (cv_loc_2d(2,i,j)%y < cartp2d(i,j)%y) then + cvnew_loc_2d(2,i,j) = find_intersect(& + cv_loc_2d(2,i,j), cv_loc_2d(3,i,j),& + elem(ie)%cartp(1,j),elem(ie)%cartp(np,j)) + end if + end if + if (j==np) then + ! replace points with y > elem(ie)%vert(i,j)%y + if (cv_loc_2d(4,i,j)%y > cartp2d(i,j)%y) then + cvnew_loc_2d(4,i,j) = find_intersect(& + cv_loc_2d(1,i,j), cv_loc_2d(4,i,j),& + elem(ie)%cartp(1,j),elem(ie)%cartp(np,j)) + end if + if (cv_loc_2d(3,i,j)%y > cartp2d(i,j)%y) then + cvnew_loc_2d(3,i,j) = find_intersect(& + cv_loc_2d(2,i,j), cv_loc_2d(3,i,j),& + elem(ie)%cartp(1,j),elem(ie)%cartp(np,j)) + end if + end if + do ii=1,4 + cv_loc_3d(ii,i,j)=cubedsphere2cart(cvnew_loc_2d(ii,i,j),elem(ie)%FaceNum ) + end do + area2 = surfarea(cv_loc_3d(:,i,j),4) + cvlist(ie)%vol(i,j) = area2 + if (isnan(area2)) then + write(iulog, *) 'ie,i,j',ie,i,j + write(iulog, *) cvlist(ie)%nvert(i,j) + write(iulog, *) cv_loc_3d(1,i,j) + write(iulog, *) cv_loc_3d(2,i,j) + write(iulog, *) cv_loc_3d(3,i,j) + write(iulog, *) cv_loc_3d(4,i,j) + call shr_sys_flush(iulog) + call endrun('InitControlVolumes_gll: area = NaN') + end if + end do + end do + end do +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if ( d1_global > 10.0_r8 .or. d1_global_mid < tol) then + if (hybrid%masterthread) then + write(iulog, *) 'first iteration stopping:' + write(iulog, *) iter, "max error=", d1_global_mid + call shr_sys_flush(iulog) + end if + exit + end if + end do ! iteration loop + + kmax=5 + + nskip=0 + npent=0 + nskipie(:) = 0 + npentie(:) = 0 + do ie=nets,nete + diff = ( cvlist(ie)%vol(:,:) - elem(ie)%spheremp(:,:)*a1(ie)/a2(ie) ) + if ( maxval(abs(diff(2:3,2:3)))/a1(ie) > tol_use_pentagons ) then + npent=npent+1 + npentie(ie) = npentie(ie) + 1 + ! + ! 4NW <- 3NE + ! | ^ + ! v | 23 33 + ! 1SW -> 2SE 22 32 + if (diff(2,2)>0 .and. diff(3,3)>0) then + x1 = cart2cubedsphere( cvlist(ie)%vert(3,2,2),elem(ie)%FaceNum ) + x2 = cart2cubedsphere( cvlist(ie)%vert(1,2,2),elem(ie)%FaceNum ) + s = .99_r8 + x3%x = x2%x + (x1%x-x2%x)*s + x3%y = x2%y + (x1%y-x2%y)*s + + sq1(1) = x3 + sq1(2) = cart2cubedsphere( cvlist(ie)%vert(4,2,2),elem(ie)%FaceNum ) + sq1(3) = cart2cubedsphere( cvlist(ie)%vert(1,2,2),elem(ie)%FaceNum ) + sq1(4) = cart2cubedsphere( cvlist(ie)%vert(2,2,2),elem(ie)%FaceNum ) + + x2 = cart2cubedsphere( cvlist(ie)%vert(3,3,3),elem(ie)%FaceNum ) + s = .99_r8 + x3%x = x2%x + (x1%x-x2%x)*s + x3%y = x2%y + (x1%y-x2%y)*s + + sq2(1) = x3 + sq2(2) = cart2cubedsphere( cvlist(ie)%vert(2,3,3),elem(ie)%FaceNum ) + sq2(3) = cart2cubedsphere( cvlist(ie)%vert(3,3,3),elem(ie)%FaceNum ) + sq2(4) = cart2cubedsphere( cvlist(ie)%vert(4,3,3),elem(ie)%FaceNum ) + + pent(1) = cart2cubedsphere( cvlist(ie)%vert(1,3,2),elem(ie)%FaceNum ) + pent(2) = cart2cubedsphere( cvlist(ie)%vert(2,3,2),elem(ie)%FaceNum ) + pent(3) = cart2cubedsphere( cvlist(ie)%vert(3,3,2),elem(ie)%FaceNum ) + pent(4) = sq2(1) + pent(5) = sq1(1) + + call pentagon_iteration(sq1,sq2,pent,& + elem(ie)%spheremp(2,2)*a1(ie)/a2(ie), & + elem(ie)%spheremp(3,3)*a1(ie)/a2(ie), & + elem(ie)%spheremp(3,2)*a1(ie)/a2(ie),elem(ie)%FaceNum,a1(ie)) + + x2_3d=cubedsphere2cart(sq1(1),elem(ie)%FaceNum ) + x3_3d=cubedsphere2cart(sq2(1),elem(ie)%FaceNum ) + + cvlist(ie)%vert(3,2,2)=x2_3d + cvlist(ie)%vert(1,3,3)=x3_3d + + cvlist(ie)%vert(5,2,3)=cvlist(ie)%vert(4,2,3) + cvlist(ie)%vert(4,2,3)=cvlist(ie)%vert(3,2,3) + cvlist(ie)%vert(2,2,3)=x2_3d + cvlist(ie)%vert(3,2,3)=x3_3d + + cvlist(ie)%vert(5,3,2)=x2_3d + cvlist(ie)%vert(4,3,2)=x3_3d + + cvlist(ie)%nvert(2,3)=sign(5,cvlist(ie)%nvert(2,3)) + cvlist(ie)%nvert(3,2)=sign(5,cvlist(ie)%nvert(3,2)) + else if (diff(2,3) >0 .and. diff(3,2)>0) then + ! + ! 4NW <- 3NE + ! | ^ + ! v | 23 33 + ! 1SW -> 2SE 22 32 + x1 = cart2cubedsphere( cvlist(ie)%vert(2,2,3),elem(ie)%FaceNum ) + x2 = cart2cubedsphere( cvlist(ie)%vert(4,2,3),elem(ie)%FaceNum ) + s = .99_r8 + x3%x = x2%x + (x1%x-x2%x)*s + x3%y = x2%y + (x1%y-x2%y)*s + + sq1(1) = x3 + sq1(2) = cart2cubedsphere( cvlist(ie)%vert(3,2,3),elem(ie)%FaceNum ) + sq1(3) = cart2cubedsphere( cvlist(ie)%vert(4,2,3),elem(ie)%FaceNum ) + sq1(4) = cart2cubedsphere( cvlist(ie)%vert(1,2,3),elem(ie)%FaceNum ) + + x2 = cart2cubedsphere( cvlist(ie)%vert(2,3,2),elem(ie)%FaceNum ) + s = .99_r8 + x3%x = x2%x + (x1%x-x2%x)*s + x3%y = x2%y + (x1%y-x2%y)*s + + sq2(1) = x3 + sq2(2) = cart2cubedsphere( cvlist(ie)%vert(1,3,2),elem(ie)%FaceNum ) + sq2(3) = cart2cubedsphere( cvlist(ie)%vert(2,3,2),elem(ie)%FaceNum ) + sq2(4) = cart2cubedsphere( cvlist(ie)%vert(3,3,2),elem(ie)%FaceNum ) + + pent(1) = cart2cubedsphere( cvlist(ie)%vert(4,2,2),elem(ie)%FaceNum ) + pent(2) = cart2cubedsphere( cvlist(ie)%vert(1,2,2),elem(ie)%FaceNum ) + pent(3) = cart2cubedsphere( cvlist(ie)%vert(2,2,2),elem(ie)%FaceNum ) + pent(4) = sq2(1) + pent(5) = sq1(1) + + call pentagon_iteration(sq1,sq2,pent,& + elem(ie)%spheremp(2,3)*a1(ie)/a2(ie), & + elem(ie)%spheremp(3,2)*a1(ie)/a2(ie), & + elem(ie)%spheremp(2,2)*a1(ie)/a2(ie),elem(ie)%FaceNum,a1(ie)) + + x2_3d=cubedsphere2cart(sq1(1),elem(ie)%FaceNum ) + x3_3d=cubedsphere2cart(sq2(1),elem(ie)%FaceNum ) + + cvlist(ie)%vert(2,2,3)=x2_3d + + cvlist(ie)%vert(4,3,2)=x3_3d + + cvlist(ie)%vert(5,2,2)=cvlist(ie)%vert(4,2,2) + cvlist(ie)%vert(3,2,2)=x3_3d + cvlist(ie)%vert(4,2,2)=x2_3d + + + cvlist(ie)%vert(1,3,3)=x3_3d + cvlist(ie)%vert(5,3,3)=x2_3d + + cvlist(ie)%nvert(3,3)=sign(5,cvlist(ie)%nvert(3,3)) + cvlist(ie)%nvert(2,2)=sign(5,cvlist(ie)%nvert(2,2)) + else + if (hybrid%masterthread) then + write(iulog, *) ie,'bad type' + call shr_sys_flush(iulog) + end if + call endrun('InitControlVolumes_gll: bad type') + end if + ! recompute areas: + do i=2,3 + do j=2,3 + nvert=abs(cvlist(ie)%nvert(i,j)) + temp3d(1:nvert)=cvlist(ie)%vert(1:nvert,i,j) + cvlist(ie)%vol(i,j)=surfarea(temp3d,nvert) + cvlist(ie)%totvol(i,j)=cvlist(ie)%vol(i,j) + end do + end do + else + !write(iulog, *) 'skipping pentagon procedure ie=',ie + !write(iulog, *) 'maxval diff: ',maxval(abs(diff(2:3,2:3)))/a1(ie) + nskip=nskip+1 + nskipie(ie) = nskipie(ie) + 1 + end if + global_shared_buf(ie,1) = nskipie(ie) + global_shared_buf(ie,2) = npentie(ie) + end do + call wrap_repro_sum(nvars=2, comm=hybrid%par%comm) + nskip = global_shared_sum(1) + npent = global_shared_sum(2) + if (hybrid%masterthread) then + write(*,'(a,i7,a,i7)') "no. elements where pentagons were added: ",npent,"/",npent+nskip + end if + + ! compute output needed for SCRIP: lat/lon coordinates, and for the + ! control volume with only 3 corners, repeat the last point to make a + ! degenerate quad. + do ie=nets,nete + do j=1,np + do i=1,np + cvlist(ie)%vert_latlon(:,i,j)%lat = 0._r8 + cvlist(ie)%vert_latlon(:,i,j)%lon = 0._r8 + do k = 1, kmax + rvert = cvlist(ie)%vert(k,i,j)%x**2+cvlist(ie)%vert(k,i,j)%y**2+cvlist(ie)%vert(k,i,j)%z**2 + if(rvert > 0.9_r8) then + cvlist(ie)%vert_latlon(k,i,j) = change_coordinates(cvlist(ie)%vert(k,i,j)) + else + ! coordinates = 0, this corner was not set above because this point + ! only has 3 cells (corner point) pick either neighbor to make a degenerate quad + k2 = k - 1 + if (k2 == 0) then + k2 = 2 ! can only happen for corner point with data in 2,3,4 + end if + cvlist(ie)%vert_latlon(k,i,j) = change_coordinates(cvlist(ie)%vert(k2,i,j)) + cvlist(ie)%vert(k,i,j) = cvlist(ie)%vert(k2,i,j) + end if + end do + end do + end do + end do + ! Release memory + if(hybrid%masterthread) then + call freeedgebuffer(edge1) + end if + + initialized=.true. + end subroutine InitControlVolumes_gll + + subroutine construct_cv_gll(elem,hybrid,nets,nete) + ! + ! construct global dual grid from local element dual grid cvlist(ie)%cartp_dual(:,:) + ! all control volumes will be squares or triangles (at cube corners) + ! + ! 10/2009: added option to make hexagon control volumes at cube edges and corners + ! + use bndry_mod, only: bndry_exchange + use element_mod, only: element_t + use hybrid_mod, only: hybrid_t + use edge_mod, only: edgeVpack, edgeVunpack, edgeVunpackVert + + type(element_t), intent(in), target :: elem(:) + type(hybrid_t), intent(in) :: hybrid + integer, intent(in) :: nets,nete + ! local + integer :: i,j,k,ie,kptr,nvert,ie2 + logical :: corner + real(r8) :: test(np,np,1),vertpack(np,np,3),rvert + type(cartesian2d_t) :: vert(4) + type(cartesian2d_t) :: cartp_nm1(0:np,0:np) + + test(:,:,:) = 0 + + do ie=nets,nete + ! now construct the dual grid + + cartp_nm1 = cvlist(ie)%cartp_dual + + do j=1,np + do i=1,np + cvlist(ie)%vert(:,i,j)%x = 0_r8 + cvlist(ie)%vert(:,i,j)%y = 0_r8 + cvlist(ie)%vert(:,i,j)%z = 0_r8 + end do + end do + + ! interior + + do j=2,np-1 + do i=2,np-1 + + ! internal vertex on Cubed sphere + ! Here is the order: + ! + ! 4NW <- 3NE + ! | ^ + ! v | + ! 1SW -> 2SE + vert(1)%x = cartp_nm1(i-1,j-1)%x + vert(1)%y = cartp_nm1(i-1,j-1)%y + vert(2)%x = cartp_nm1(i ,j-1)%x + vert(2)%y = cartp_nm1(i ,j-1)%y + vert(3)%x = cartp_nm1(i ,j )%x + vert(3)%y = cartp_nm1(i ,j )%y + vert(4)%x = cartp_nm1(i-1,j )%x + vert(4)%y = cartp_nm1(i-1,j )%y + + do k=1,4 + cvlist(ie)%vert(k,i,j) = cubedsphere2cart(vert(k),elem(ie)%FaceNum) + end do + cvlist(ie)%nvert(i,j) = 4 + + end do + end do + + ! Compute everything on the edges and then sum + do i=2,np-1 + j=1 + ! + ! 4NW <- 3NE + ! | ^ + ! v | + ! 1SW -> 2SE + ! + ! + ! only pack top two nodes. + ! leave other data zero, filled in by edgeexchange + cvlist(ie)%vert(4,i,j)%x = cvlist(ie)%vert(1,i,j+1)%x + cvlist(ie)%vert(4,i,j)%y = cvlist(ie)%vert(1,i,j+1)%y + cvlist(ie)%vert(4,i,j)%z = cvlist(ie)%vert(1,i,j+1)%z + cvlist(ie)%vert(3,i,j)%x = cvlist(ie)%vert(2,i,j+1)%x + cvlist(ie)%vert(3,i,j)%y = cvlist(ie)%vert(2,i,j+1)%y + cvlist(ie)%vert(3,i,j)%z = cvlist(ie)%vert(2,i,j+1)%z + + + j=np + + cvlist(ie)%vert(1,i,j)%x = cvlist(ie)%vert(4,i,j-1)%x + cvlist(ie)%vert(1,i,j)%y = cvlist(ie)%vert(4,i,j-1)%y + cvlist(ie)%vert(1,i,j)%z = cvlist(ie)%vert(4,i,j-1)%z + cvlist(ie)%vert(2,i,j)%x = cvlist(ie)%vert(3,i,j-1)%x + cvlist(ie)%vert(2,i,j)%y = cvlist(ie)%vert(3,i,j-1)%y + cvlist(ie)%vert(2,i,j)%z = cvlist(ie)%vert(3,i,j-1)%z + + end do + + do j=2,np-1 + i=1 + + cvlist(ie)%vert(2,i,j)%x = cvlist(ie)%vert(1,i+1,j)%x + cvlist(ie)%vert(2,i,j)%y = cvlist(ie)%vert(1,i+1,j)%y + cvlist(ie)%vert(2,i,j)%z = cvlist(ie)%vert(1,i+1,j)%z + cvlist(ie)%vert(3,i,j)%x = cvlist(ie)%vert(4,i+1,j)%x + cvlist(ie)%vert(3,i,j)%y = cvlist(ie)%vert(4,i+1,j)%y + cvlist(ie)%vert(3,i,j)%z = cvlist(ie)%vert(4,i+1,j)%z + + i=np + + cvlist(ie)%vert(4,i,j)%x = cvlist(ie)%vert(3,i-1,j)%x + cvlist(ie)%vert(4,i,j)%y = cvlist(ie)%vert(3,i-1,j)%y + cvlist(ie)%vert(4,i,j)%z = cvlist(ie)%vert(3,i-1,j)%z + cvlist(ie)%vert(1,i,j)%x = cvlist(ie)%vert(2,i-1,j)%x + cvlist(ie)%vert(1,i,j)%y = cvlist(ie)%vert(2,i-1,j)%y + cvlist(ie)%vert(1,i,j)%z = cvlist(ie)%vert(2,i-1,j)%z + + end do + + ! Corners + ! SW + cvlist(ie)%vert(3,1,1)%x = cvlist(ie)%vert(1,2,2)%x + cvlist(ie)%vert(3,1,1)%y = cvlist(ie)%vert(1,2,2)%y + cvlist(ie)%vert(3,1,1)%z = cvlist(ie)%vert(1,2,2)%z + + ! SE + cvlist(ie)%vert(4,np,1)%x = cvlist(ie)%vert(2,np-1,2)%x + cvlist(ie)%vert(4,np,1)%y = cvlist(ie)%vert(2,np-1,2)%y + cvlist(ie)%vert(4,np,1)%z = cvlist(ie)%vert(2,np-1,2)%z + + ! NE + cvlist(ie)%vert(1,np,np)%x = cvlist(ie)%vert(3,np-1,np-1)%x + cvlist(ie)%vert(1,np,np)%y = cvlist(ie)%vert(3,np-1,np-1)%y + cvlist(ie)%vert(1,np,np)%z = cvlist(ie)%vert(3,np-1,np-1)%z + + ! NW + cvlist(ie)%vert(2,1,np)%x = cvlist(ie)%vert(4,2,np-1)%x + cvlist(ie)%vert(2,1,np)%y = cvlist(ie)%vert(4,2,np-1)%y + cvlist(ie)%vert(2,1,np)%z = cvlist(ie)%vert(4,2,np-1)%z + + kptr=0 + test(:,:,1) = cvlist(ie)%vol(:,:) + call edgeVpack(edge1,test(1,1,1),1,kptr,ie) + + cvlist(ie)%invvol(:,:) = cvlist(ie)%vol(:,:) + + end do ! loop over NE + + call bndry_exchange(hybrid,edge1,location='construct_cv_gll #1') + + do ie=nets,nete + kptr=0 + call edgeVunpack(edge1, cvlist(ie)%invvol(1,1),1, kptr, ie) + cvlist(ie)%totvol(:,:)=cvlist(ie)%invvol(:,:) + cvlist(ie)%invvol(:,:)=1.0_r8/cvlist(ie)%invvol(:,:) + end do + + ! Create the polygon at the edges of the element + + + if(.NOT.(MODULO(np,2)==0)) then + call endrun("surfaces_mod: NV odd not implemented") + end if + vertpack = 0 + do ie=nets,nete + ! Special messed up copy + ! + !ASC should be replaced by a edgepack + ! S+N + do i=1,np/2 + j=1 + vertpack(i,j,1) = cvlist(ie)%vert(3,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(3,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(3,i,j)%z + j=np + vertpack(i,j,1) = cvlist(ie)%vert(2,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(2,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(2,i,j)%z + end do + + do i=np/2+1,np + j=1 + vertpack(i,j,1) = cvlist(ie)%vert(4,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(4,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(4,i,j)%z + j=np + vertpack(i,j,1) = cvlist(ie)%vert(1,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(1,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(1,i,j)%z + end do + + ! E+W + do j=2,np/2 + i=1 + vertpack(i,j,1) = cvlist(ie)%vert(3,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(3,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(3,i,j)%z + i=np + vertpack(i,j,1) = cvlist(ie)%vert(4,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(4,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(4,i,j)%z + end do + + do j=np/2+1,np-1 + i=1 + vertpack(i,j,1) = cvlist(ie)%vert(2,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(2,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(2,i,j)%z + i=np + vertpack(i,j,1) = cvlist(ie)%vert(1,i,j)%x + vertpack(i,j,2) = cvlist(ie)%vert(1,i,j)%y + vertpack(i,j,3) = cvlist(ie)%vert(1,i,j)%z + end do + + do j=2,np-1 + do i=2,np-1 + vertpack(i,j,1) =0_r8 + vertpack(i,j,2) =0_r8 + vertpack(i,j,3) =0_r8 + end do + end do + + kptr=0 + call edgeVpack(edge1,vertpack,3,kptr,ie) + end do + + call bndry_exchange(hybrid,edge1,location='construct_cv_gll #2') + + do ie=nets,nete + kptr=0 + call edgeVunpackVert(edge1, cvlist(ie)%vert,ie) + ! Count and orient vert array + ! nvert is an integer: -4,-3,3,4 + ! Positive: 1->2->3->4 is counter clockwise on the sphere + ! Negative: clockwise orientation + do j=1,np + do i=1,np + nvert=0 + do k=1,4 + rvert = cvlist(ie)%vert(k,i,j)%x**2+cvlist(ie)%vert(k,i,j)%y**2+cvlist(ie)%vert(k,i,j)%z**2 + if(rvert>0.9_r8)nvert=nvert+1 + end do + if(.NOT.Orientation(cvlist(ie)%vert(:,i,j),elem(ie)%FaceNum))nvert=-nvert + cvlist(ie)%nvert(i,j) = nvert + corner = ( ((i==1) .and. (j==1)) .or. & + ((i==1) .and. (j==np)) .or. & + ((i==np) .and. (j==1)) .or. & + ((i==np) .and. (j==np)) ) + if (abs(nvert)/=4) then + if (abs(nvert)/=3) then + write(iulog, *) 'i,j,nvert=',i,j,nvert + call shr_sys_flush(iulog) + call endrun('construct_cv_gll: bad value of nvert') + end if + if (.not. corner) then + write(iulog, *) 'non-corner node with only 3 verticies' + write(iulog, *) 'ie,i,j,nvert,corner=',ie,i,j,nvert,corner + write(iulog, *) cvlist(ie)%vert(1,i,j)%x + write(iulog, *) cvlist(ie)%vert(2,i,j)%x + write(iulog, *) cvlist(ie)%vert(3,i,j)%x + write(iulog, *) cvlist(ie)%vert(4,i,j)%x + !write(iulog, *) 'dual:' + !do ie2=nets,nete + ! write(iulog, *) ie2,maxval(cvlist(ie2)%cartp_dual(:,:)%x) + ! write(iulog, *) ie2,maxval(cvlist(ie2)%cartp_dual(:,:)%y) + !end do + call shr_sys_flush(iulog) + call endrun('construct_cv_gll: corner point should have nvert=3') + end if + ! nvert=3. we are at a cube corner. One of the control volume + ! nodes from the 'missing' corner element should be all zeros: + if (cvlist(ie)%vert(1,i,j)%x==0) then + ! ok + else if (cvlist(ie)%vert(2,i,j)%x==0) then + ! ok + else if (cvlist(ie)%vert(3,i,j)%x==0) then + ! ok + else if (cvlist(ie)%vert(4,i,j)%x==0) then + ! ok + else + write(iulog, *) 'cube corner node with 4 neighbors' + write(iulog, *) 'ie,i,j,nvert,corner=',ie,i,j,nvert,corner + write(iulog, *) cvlist(ie)%vert(1,i,j)%x + write(iulog, *) cvlist(ie)%vert(2,i,j)%x + write(iulog, *) cvlist(ie)%vert(3,i,j)%x + write(iulog, *) cvlist(ie)%vert(4,i,j)%x + call shr_sys_flush(iulog) + call endrun('construct_cv_gll: control volume at cube corner should be a triangle') + end if + + end if + end do + end do + end do + end subroutine construct_cv_gll + + logical function Orientation(v, FaceNum) result(orient) + + type(cartesian3d_t), intent(in) :: v(3) + integer, intent(in) :: FaceNum + + type(cartesian3D_t) :: v12, v23 + real(r8) :: test, cart(3,3) + + orient = .FALSE. + + if ((FaceNum == 5).OR.(FaceNum == 6)) then + + cart(1,1) = v(1)%x + cart(2,1) = v(1)%y + cart(3,1) = v(1)%z + + cart(1,2) = v(2)%x + cart(2,2) = v(2)%y + cart(3,2) = v(2)%z + + cart(1,3) = v(3)%x + cart(2,3) = v(3)%y + cart(3,3) = v(3)%z + + v12%x = cart(1,2) - cart(1,1) + v12%y = cart(2,2) - cart(2,1) + v12%z = cart(3,2) - cart(3,1) + + v23%x = cart(1,3) - cart(1,2) + v23%y = cart(2,3) - cart(2,2) + v23%z = cart(3,3) - cart(3,2) + + test = (v12%y*v23%z - v12%z*v23%y)*v12%x & + - (v12%x*v23%z - v12%z*v23%x)*v12%y & + + (v12%x*v23%y - v12%y*v23%x)*v12%z + + if (test > 0_r8)then + orient=.TRUE. + end if + + else + orient=.TRUE. + end if + + end function Orientation + + subroutine VerifVolumes(elem, hybrid,nets,nete) + use hybrid_mod, only: hybrid_t + use element_mod, only: element_t + + type(element_t), intent(in) :: elem(:) + integer, intent(in) :: nets,nete + type(hybrid_t), intent(in) :: hybrid + + real(r8) :: psum,ptot,Vol_tmp(1),corr,maxelem_variation + real(r8) :: vol(np,np,nets:nete),r,rmin,rmax,a1,a2,locmin,locmax,emin,emax,dx,dy + integer :: i,j,ie,kptr,face + + real(r8), pointer :: locvol(:,:) + + dx = pi/(2.0d0*dble(ne)) + dy = dx + + if(.not. initialized) then + call endrun('VerifyVolumes: Attempt to use volumes prior to initializing') + end if + rmin=2 + rmax=0 + maxelem_variation=0 + do ie=nets,nete + locvol => GetVolume(ie) + locmin = minval(locvol(:,:)*elem(ie)%rspheremp(:,:)) + locmax = maxval(locvol(:,:)*elem(ie)%rspheremp(:,:)) + rmin = min(rmin,locmin) + rmax = max(rmax,locmax) + + if (locmax > 1.01_r8) then + write(iulog, *) 'locmin(:,i)=',ie,locvol(1,1),1/elem(ie)%rspheremp(1,1) + end if + + + if (locmax-locmin > maxelem_variation) then + maxelem_variation = locmax-locmin + emin=locmin + emax=locmax + end if + end do + rmin = ParallelMin(rmin,hybrid) + rmax = ParallelMax(rmax,hybrid) + if(hybrid%masterthread) then + write(iulog,'(a,2e14.7)') "Min/max ratio between spherical and GLL area:",rmin,rmax + end if + if (maxelem_variation == ParallelMax(maxelem_variation,hybrid) ) then + write(iulog,'(a,2e14.7)') "Min/max ratio element with largest variation:",emin,emax + end if + call shr_sys_flush(iulog) + + rmin=2 + rmax=0 + do ie=nets,nete + a1 = SurfArea_dxdy(dx,dy,elem(ie)%cartp(1,1)) + a2 = sum(elem(ie)%spheremp(:,:)) + r=a1/a2 + rmin = min(r,rmin) + rmax = max(r,rmax) + end do + rmin = ParallelMin(rmin,hybrid) + rmax = ParallelMax(rmax,hybrid) + if(hybrid%masterthread) then + write(*,'(a,2f12.9)') "Min/max ratio spherical and GLL element area:",rmin,rmax + end if + + do ie=nets,nete + global_shared_buf(ie,1:6) = 0.d0 + face = elem(ie)%FaceNum + locvol => GetVolumeLocal(ie) + do j=1,np + do i=1,np + global_shared_buf(ie,face) = global_shared_buf(ie,face) + locvol(i,j) + end do + end do + end do + call wrap_repro_sum(nvars=6, comm=hybrid%par%comm) + + ptot=0_r8 + do face=1,6 + red_sum%buf(1) = global_shared_sum(face) + psum = red_sum%buf(1) + + ptot = ptot + psum + + if(hybrid%masterthread) then + write(*,'(a,i2,a,2e23.15)') "cube face:",face," : SURFACE FV =",& + 6_r8*psum/(4_r8 * pi), & + 6_r8*psum/(4_r8 * pi)-1 + end if + end do + + if(hybrid%masterthread) then + write(iulog, *) "SURFACE FV (total)= ", ptot/(4_r8 * pi) + end if + + end subroutine VerifVolumes + +end module comp_gll_ctr_vol diff --git a/src/dynamics/se/dycore/control_mod.F90 b/src/dynamics/se/dycore/control_mod.F90 new file mode 100644 index 00000000..d5fc4abe --- /dev/null +++ b/src/dynamics/se/dycore/control_mod.F90 @@ -0,0 +1,122 @@ +! This module contains constants and namelist variables used through out the model +! to avoid circular dependancies please do not 'use' any further modules here. +! +module control_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, public, parameter :: MAX_STRING_LEN=240 + integer, public, parameter :: MAX_FILE_LEN=240 +! character(len=MAX_STRING_LEN) , public :: integration ! time integration (only one currently supported is "explicit") + +!shallow water advection tests: +!kmass points to a level with density. other levels contain test tracers + + integer, public :: tstep_type= 0 ! 0 = leapfrog + ! 1 = RK (foward-in-time) + integer, public :: rk_stage_user = 0 ! number of RK stages to use + integer, public :: ftype = 2 ! Forcing Type + integer, public :: ftype_conserve = 1 !conserve momentum (dp*u) + integer, public :: statediag_numtrac = 3 + + integer, public :: qsplit = 1 ! ratio of dynamics tsteps to tracer tsteps + integer, public :: rsplit =-1 ! for vertically lagrangian dynamics, apply remap + ! every rsplit tracer timesteps + logical, public :: variable_nsplit=.false. + + integer, public :: phys_dyn_cp = 0 !=0; no thermal energy scaling of T increment + !=1; scale increment for cp consistency between dynamics and physics + + logical, public :: refined_mesh + + integer, public :: vert_remap_q_alg = 10 + + + integer, public :: cubed_sphere_map = -1 ! -1 = chosen at run time + ! 0 = equi-angle Gnomonic (default) + ! 1 = equi-spaced Gnomonic (not yet coded) + ! 2 = element-local projection (for var-res) + ! 3 = parametric (not yet coded) + +!tolerance to define smth small, was introduced for lim 8 in 2d and 3d + real (kind=r8), public, parameter :: tol_limiter=1.0e-13_r8 + + integer , public :: limiter_option = 0 + + integer , public :: partmethod ! partition methods + character(len=MAX_STRING_LEN) , public :: topology ! options: "cube" is supported + integer , public :: tasknum + integer , public :: remapfreq ! remap frequency of synopsis of system state (steps) + character(len=MAX_STRING_LEN) :: remap_type ! selected remapping option + integer , public :: statefreq ! output frequency of synopsis of system state (steps) + integer , public :: runtype + integer , public :: timerdetail + integer , public :: numnodes + integer , public :: multilevel + + character(len=MAX_STRING_LEN) , public :: columnpackage + + integer , public :: maxits ! max iterations of solver + real (kind=r8), public :: tol ! solver tolerance (convergence criteria) + + integer , public :: fine_ne = -1 ! set for refined exodus meshes (variable viscosity) + real (kind=r8), public :: max_hypervis_courant = 1d99 ! upper bound for Courant number + ! (only used for variable viscosity, recommend 1.9 in namelist) + real (kind=r8), public :: nu = 7.0D5 ! viscosity (momentum equ) + real (kind=r8), public :: nu_div = -1 ! viscsoity (momentum equ, div component) + real (kind=r8), public :: nu_s = -1 ! default = nu T equ. viscosity + real (kind=r8), public :: nu_q = -1 ! default = nu tracer viscosity + real (kind=r8), public :: nu_p = 0.0D5 ! default = 0 ps equ. viscosity + real (kind=r8), public :: nu_top = 0.0D5 ! top-of-the-model viscosity + integer, public :: hypervis_subcycle=1 ! number of subcycles for hyper viscsosity timestep + integer, public :: hypervis_subcycle_sponge=1 ! number of subcycles for hyper viscsosity timestep in sponge + integer, public :: hypervis_subcycle_q=1 ! number of subcycles for hyper viscsosity timestep on TRACERS + integer, public :: psurf_vis = 0 ! 0 = use laplace on eta surfaces + ! 1 = use (approx.) laplace on p surfaces + + real (kind=r8), public :: hypervis_power=0 ! if not 0, use variable hyperviscosity based on element area + real (kind=r8), public :: hypervis_scaling=0 ! use tensor hyperviscosity + +! +!three types of hyper viscosity are supported right now: +! (1) const hv: nu * del^2 del^2 +! (2) scalar hv: nu(lat,lon) * del^2 del^2 +! (3) tensor hv, nu * ( \div * tensor * \grad ) * del^2 +! +! (1) default: hypervis_power=0, hypervis_scaling=0 +! (2) Original version for var-res grids. (M. Levy) +! scalar coefficient within each element +! hypervisc_scaling=0 +! set hypervis_power>0 and set fine_ne, max_hypervis_courant +! (3) tensor HV var-res grids +! tensor within each element: +! set hypervis_scaling > 0 (typical values would be 3.2 or 4.0) +! hypervis_power=0 +! (\div * tensor * \grad) operator uses cartesian laplace +! + + real (kind=r8), public :: initial_global_ave_dry_ps = 0._r8 ! scale dry surface pressure to initial_global_ave_dry_ps + + integer, public, parameter :: west = 1 + integer, public, parameter :: east = 2 + integer, public, parameter :: south = 3 + integer, public, parameter :: north = 4 + + integer, public, parameter :: swest = 5 + integer, public, parameter :: seast = 6 + integer, public, parameter :: nwest = 7 + integer, public, parameter :: neast = 8 + + ! + ! parameters for sponge layer Rayleigh damping + ! + real(r8), public :: raytau0 + real(r8), public :: raykrange + integer, public :: rayk0 + ! + ! molecular diffusion + ! + real(r8), public :: molecular_diff = -1.0_r8 + + integer, public :: vert_remap_uvTq_alg, vert_remap_tracer_alg + +end module control_mod diff --git a/src/dynamics/se/dycore/coordinate_systems_mod.F90 b/src/dynamics/se/dycore/coordinate_systems_mod.F90 new file mode 100644 index 00000000..b5a845ac --- /dev/null +++ b/src/dynamics/se/dycore/coordinate_systems_mod.F90 @@ -0,0 +1,919 @@ +module coordinate_systems_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use physconst, only: pi + use cam_abortutils, only: endrun + +! WARNING: When using this class be sure that you know if the +! cubic coordinates are on the unit cube or the [-\pi/4,\pi/4] cube +! and if the spherical longitude is in [0,2\pi] or [-\pi,\pi] + implicit none + private + + real(kind=r8), public, parameter :: DIST_THRESHOLD= 1.0e-9_r8 + real(kind=r8), parameter :: one=1.0_r8 + real(kind=r8), parameter :: two=2.0_r8 + + type, public :: cartesian2D_t + real(r8) :: x ! x coordinate + real(r8) :: y ! y coordinate + end type cartesian2D_t + + type, public :: cartesian3D_t + real(r8) :: x ! x coordinate + real(r8) :: y ! y coordinate + real(r8) :: z ! z coordinate + end type cartesian3D_t + + type, public :: spherical_polar_t + real(r8) :: r ! radius + real(r8) :: lon ! longitude + real(r8) :: lat ! latitude + end type spherical_polar_t + + + interface assignment ( = ) + module procedure copy_cart2d + module procedure copy_spherical_polar + end interface + + interface operator( == ) + module procedure eq_cart2d + end interface + + interface distance + module procedure distance_cart2D + module procedure distance_cart2D_v + module procedure distance_cart3D + module procedure distance_cart3D_v + end interface + + interface change_coordinates + module procedure spherical_to_cart_v + module procedure spherical_to_cart + module procedure cart_to_spherical_v + module procedure cart_to_spherical + module procedure aray_to_spherical + end interface + + + ! ========================================== + ! Public Interfaces + ! ========================================== + + public :: sphere_tri_area + public :: surfareaxy + public :: distance + public :: change_coordinates + public :: cart2cubedsphere ! (x,y,z) -> equal-angle (x,y) + public :: cart2cubedsphere_failsafe + public :: spherical_to_cart ! (lat,lon) -> (x,y,z) + public :: projectpoint ! equal-angle (x,y) -> (lat,lon) + ! should be called cubedsphere2spherical + public :: cubedsphere2cart ! equal-angle (x,y) -> (x,y,z) + public :: sphere2cubedsphere ! (lat,lon) -> equal-angle (x,y) + public :: cube_face_number_from_cart + public :: cube_face_number_from_sphere + +! CE + public :: cart2cubedspherexy ! (x,y,z) -> gnomonic (x,y) + public :: cart2spherical ! gnominic (x,y) -> (lat,lon) + + private :: copy_cart2d + private :: copy_spherical_polar + private :: eq_cart2d + private :: distance_cart2D + private :: distance_cart2D_v + private :: distance_cart3D + private :: distance_cart3D_v + private :: spherical_to_cart_v + !private :: spherical_to_cart + private :: cart_to_spherical_v + private :: cart_to_spherical + private :: aray_to_spherical + +contains + + ! ============================================ + ! copy_cart2d: + ! + ! Overload assignment operator for cartesian2D_t + ! ============================================ + + subroutine copy_cart2d(cart2,cart1) + + type(cartesian2D_t), intent(out) :: cart2 + type(cartesian2D_t), intent(in) :: cart1 + cart2%x=cart1%x + cart2%y=cart1%y + end subroutine copy_cart2d + + ! ============================================ + ! copy_spherical_polar: + ! + ! Overload assignment operator for spherical_polar_t + ! ============================================ + + pure subroutine copy_spherical_polar(sph2, sph1) + + type(spherical_polar_t), intent(out) :: sph2 + type(spherical_polar_t), intent(in) :: sph1 + sph2%r = sph1%r + sph2%lat = sph1%lat + sph2%lon = sph1%lon + end subroutine copy_spherical_polar + + ! ============================================ + ! eq_cart2d: + ! + ! Overload == operator for cartesian2D_t + ! ============================================ + + pure function eq_cart2d(cart2,cart1) result(is_same) + + type(cartesian2D_t), intent(in) :: cart2 + type(cartesian2D_t), intent(in) :: cart1 + + logical :: is_same + + if (distance(cart1,cart2)= DIST_THRESHOLD) then + + if ( abs(abs(sphere%lat)-PI/2) >= DIST_THRESHOLD ) then + sphere%lon=ATAN2(cart%y,cart%x) + if (sphere%lon<0) then + sphere%lon=sphere%lon + 2*PI + end if + end if + + end function cart_to_spherical + + pure function aray_to_spherical(coordinates) result (sphere) + implicit none + real(kind=r8), intent(in) :: coordinates(3) + type(spherical_polar_t) :: sphere + type(cartesian3D_t) :: cart + cart%x = coordinates(1) + cart%y = coordinates(2) + cart%z = coordinates(3) + sphere = cart_to_spherical(cart) + end function aray_to_spherical + + + pure function cart_to_spherical_v(cart) result (sphere) + + type(cartesian3D_t), intent(in) :: cart(:) + type(spherical_polar_t) :: sphere(SIZE(cart)) + + integer :: i + forall (i=1:SIZE(cart)) sphere(i) = cart_to_spherical(cart(i)) + end function cart_to_spherical_v + + + + + function unit_face_based_cube_to_unit_sphere(cart, face_no) result(sphere) + +! Note: Output spherical longitude is [-pi,pi] + +! Project from a UNIT cube to a UNIT sphere. ie, the lenght of the cube edge is 2. +! Face 1 of the cube touches the sphere at longitude, latitude (0,0). The negative +! x axis is negative longitude (ie. going west is negative), the positive x axis +! is increasing longitude. Face 1 maps the Face 1 to the lat,lon on the sphere: +! [-1,1] x [-1,1] => [-\pi/4,\pi/4] x [-\pi/4, \pi/4] + +! Face 2 continues with increasing longitude (ie to the east of Face 1). +! The left edge of Face 2 (negative x) is the right edge of Face 1 (positive x) +! The latitude is the same as Face 1, but the longitude increases: +! [-1,1] x [-1,1] => [\pi/4, 3\pi/4] x [-\pi/4, \pi/4] + +! Face 3 continues with increasing longitude (ie to the east of Face 2). +! Face 3 is like Face 1, but the x coordinates are reversed, ie. decreasing x +! is increasing longitude: +! [-1,1] x [-1,1] = [-1,0] x [-1,1] U [0,1] x [-1,1] => +! [3\pi/4,\pi] x [-\pi, -3\pi/4] + +! Face 4 finally connects Face 3 to Face 1. Like Face 2, but wtih opposite x +! [-1,1] x [-1,1] => [-3\pi/4, -\pi/4] x [-\pi/4, \pi/4] + +! Face 5 is along the bottom edges of Faces 1,2,3,and 4 so the latitude goes from +! -\pi/4 to -\pi/2. The tricky part is lining up the longitude. The zero longitude +! must line up with the center of Face 1. ATAN2(x,1) = 0 => x = 0. +! So the (0,1) point on Face 5 is the zero longitude on the sphere. The top edge of +! Face 5 is the bottom edge of Face 1. +! ATAN(x,0) = \pi/2 => x = 1, so the right edge of Face 5 is the bottom of Face 2. +! Continueing, the bottom edge of 5 is the bottom of 3. Left of 5 is bottom of 4. + +! Face 6 is along the top edges of Faces 1,2,3 and 4 so the latitude goes from +! \pi/4 to \pi/2. The zero longitude must line up with the center of Face 1. +! This is just like Face 5, but the y axis is reversed. So the bottom edge of Face 6 +! is the top edge of Face 1. The right edge of Face 6 is the top of Face 2. The +! top of 6 the top of 3 and the left of 6 the top of 4. + + type (cartesian2d_t), intent(in) :: cart ! On face_no of a unit cube + integer, intent(in) :: face_no + + type (spherical_polar_t) :: sphere + + integer i,j + real(kind=r8) :: r!, l_inf + +! MNL: removing check that points are on the unit cube because we allow +! spherical grids to map beyond the extent of the cube (though we probably +! should still have an upper bound for how far past the edge the element lies) +! l_inf = MAX(ABS(cart%x), ABS(cart%y)) +! if (1.01 < l_inf) then +! call endrun('unit_face_based_cube_to_unit_sphere: Input not on unit cube.') +! end if + + sphere%r=one + r = SQRT( one + (cart%x)**2 + (cart%y)**2) + select case (face_no) + case (1) + sphere%lat=ASIN((cart%y)/r) + sphere%lon=ATAN2(cart%x,one) + case (2) + sphere%lat=ASIN((cart%y)/r) + sphere%lon=ATAN2(one,-cart%x) + case (3) + sphere%lat=ASIN((cart%y)/r) + sphere%lon=ATAN2(-cart%x,-one) + case (4) + sphere%lat=ASIN((cart%y)/r) + sphere%lon=ATAN2(-one,cart%x) + case (5) + if (ABS(cart%y) > DIST_THRESHOLD .or. ABS(cart%x) > DIST_THRESHOLD ) then + sphere%lon=ATAN2(cart%x, cart%y ) + else + sphere%lon= 0.0_r8 ! longitude is meaningless at south pole set to 0.0 + end if + sphere%lat=ASIN(-one/r) + case (6) + if (ABS(cart%y) > DIST_THRESHOLD .or. ABS(cart%x) > DIST_THRESHOLD ) then + sphere%lon = ATAN2(cart%x, -cart%y) + else + sphere%lon= 0.0_r8 ! longitude is meaningless at north pole set to 0.0 + end if + sphere%lat=ASIN(one/r) + case default + call endrun('unit_face_based_cube_to_unit_sphere: Face number not 1 to 6.') + end select + + if (sphere%lon < 0.0_r8) then + sphere%lon=sphere%lon + two*PI + end if + + end function unit_face_based_cube_to_unit_sphere + + function cart2spherical(x,y, face_no) result(sphere) +! IMPORTANT: INPUT ARE the REAL cartesian from the cube sphere +! Note: Output spherical longitude is [-pi,pi] + +! Project from a UNIT cube to a UNIT sphere. ie, the lenght of the cube edge is 2. +! Face 1 of the cube touches the sphere at longitude, latitude (0,0). The negative +! x axis is negative longitude (ie. going west is negative), the positive x axis +! is increasing longitude. Face 1 maps the Face 1 to the lat,lon on the sphere: +! [-1,1] x [-1,1] => [-\pi/4,\pi/4] x [-\pi/4, \pi/4] + +! Face 2 continues with increasing longitude (ie to the east of Face 1). +! The left edge of Face 2 (negative x) is the right edge of Face 1 (positive x) +! The latitude is the same as Face 1, but the longitude increases: +! [-1,1] x [-1,1] => [\pi/4, 3\pi/4] x [-\pi/4, \pi/4] + +! Face 3 continues with increasing longitude (ie to the east of Face 2). +! Face 3 is like Face 1, but the x coordinates are reversed, ie. decreasing x +! is increasing longitude: +! [-1,1] x [-1,1] = [-1,0] x [-1,1] U [0,1] x [-1,1] => +! [3\pi/4,\pi] x [-\pi, -3\pi/4] + +! Face 4 finally connects Face 3 to Face 1. Like Face 2, but wtih opposite x +! [-1,1] x [-1,1] => [-3\pi/4, -\pi/4] x [-\pi/4, \pi/4] + +! Face 5 is along the bottom edges of Faces 1,2,3,and 4 so the latitude goes from +! -\pi/4 to -\pi/2. The tricky part is lining up the longitude. The zero longitude +! must line up with the center of Face 1. ATAN2(x,1) = 0 => x = 0. +! So the (0,1) point on Face 5 is the zero longitude on the sphere. The top edge of +! Face 5 is the bottom edge of Face 1. +! ATAN(x,0) = \pi/2 => x = 1, so the right edge of Face 5 is the bottom of Face 2. +! Continueing, the bottom edge of 5 is the bottom of 3. Left of 5 is bottom of 4. + +! Face 6 is along the top edges of Faces 1,2,3 and 4 so the latitude goes from +! \pi/4 to \pi/2. The zero longitude must line up with the center of Face 1. +! This is just like Face 5, but the y axis is reversed. So the bottom edge of Face 6 +! is the top edge of Face 1. The right edge of Face 6 is the top of Face 2. The +! top of 6 the top of 3 and the left of 6 the top of 4. + + implicit none + real(kind=r8), intent(in) :: x,y ! On face_no of a unit cube + integer, intent(in) :: face_no + + type (spherical_polar_t) :: sphere + + integer i,j + real(kind=r8) :: r!, l_inf + +! MNL: removing check that points are on the unit cube because we allow +! spherical grids to map beyond the extent of the cube (though we probably +! should still have an upper bound for how far past the edge the element lies) +! l_inf = MAX(ABS(cart%x), ABS(cart%y)) +! if (1.01 < l_inf) then +! call endrun('unit_face_based_cube_to_unit_sphere: Input not on unit cube.') +! end if + + sphere%r=one + r = SQRT( one + x**2 + y**2) + select case (face_no) + case (1) + sphere%lat=ASIN(y/r) + sphere%lon=ATAN2(x,one) + case (2) + sphere%lat=ASIN(y/r) + sphere%lon=ATAN2(one,-x) + case (3) + sphere%lat=ASIN(y/r) + sphere%lon=ATAN2(-x,-one) + case (4) + sphere%lat=ASIN(y/r) + sphere%lon=ATAN2(-one,x) + case (5) + if (ABS(y) > DIST_THRESHOLD .or. ABS(x) > DIST_THRESHOLD ) then + sphere%lon=ATAN2(x, y ) + else + sphere%lon= 0.0_r8 ! longitude is meaningless at south pole set to 0.0 + end if + sphere%lat=ASIN(-one/r) + case (6) + if (ABS(y) > DIST_THRESHOLD .or. ABS(x) > DIST_THRESHOLD ) then + sphere%lon = ATAN2(x, -y) + else + sphere%lon= 0.0_r8 ! longitude is meaningless at north pole set to 0.0 + end if + sphere%lat=ASIN(one/r) + case default + call endrun('unit_face_based_cube_to_unit_sphere: Face number not 1 to 6.') + end select + + if (sphere%lon < 0.0_r8) then + sphere%lon=sphere%lon + two*PI + end if + + end function cart2spherical + + + + + + + + +! Note: Output spherical longitude is [-pi,pi] + function projectpoint(cartin, face_no) result(sphere) + +! Projection from a [-pi/4, \pi/4] sized cube. +! This will be checked because unit_face_based_cube_to_unit_sphere checks the ranges. +! See unit_face_based_cube_to_unit_sphere for documentation. + + implicit none + type (cartesian2d_t), intent(in) :: cartin + integer, intent(in) :: face_no + type (spherical_polar_t) :: sphere + type (cartesian2d_t) :: cart + + !ASC This is X and Y and not xhi eta ... + + cart%x = TAN(cartin%x) + cart%y = TAN(cartin%y) + + sphere = unit_face_based_cube_to_unit_sphere(cart, face_no) + + end function projectpoint + + ! takes a 2D point on a face of the cube of size [-\pi/4, \pi/4] and projects it + ! onto a 3D point on a cube of size [-1,1] in R^3 + function cubedsphere2cart(cartin, face_no) result(cart) + implicit none + type (cartesian2d_t), intent(in) :: cartin ! assumed to be cartesian coordinates of cube + integer, intent(in) :: face_no + + type(cartesian3D_t) :: cart + + cart = spherical_to_cart(projectpoint(cartin, face_no)) + + end function cubedsphere2cart + + + ! onto a cube of size [-\pi/2,\pi/2] in R^3 + ! the spherical longitude can be either in [0,2\pi] or [-\pi,\pi] + pure function sphere2cubedsphere (sphere, face_no) result(cart) + implicit none + type(spherical_polar_t), intent(in) :: sphere + integer, intent(in) :: face_no + + type(cartesian2d_t) :: cart + real(kind=r8) :: xp,yp + real(kind=r8) :: lat,lon + real(kind=r8) :: twopi, pi2, pi3, pi4 + + lat = sphere%lat + lon = sphere%lon + + twopi = 2.0_r8 * pi + pi2 = pi * 0.5_r8 + pi3 = pi * 1.5_r8 + pi4 = pi * 0.25_r8 + + select case (face_no) + case (1) + xp = lon + if (pi < lon) xp = lon - twopi !if lon in [0,2\pi] + yp = atan(tan(lat)/cos(xp)) + case (2) + xp = lon - pi2 + yp = atan(tan(lat)/cos(xp)) + case (3) + xp = lon - pi + if (lon < 0) xp = lon + pi !if lon in [0,2\pi] + yp = atan(tan(lat)/cos(xp)) + case (4) + xp = lon - pi3 + if (lon < 0) xp = lon + pi2 !if lon in [0,2\pi] + yp = atan(tan(lat)/cos(xp)) + case (5) + xp = atan(-sin(lon)/tan(lat)) + yp = atan(-cos(lon)/tan(lat)) + case (6) + xp = atan( sin(lon)/tan(lat)) + yp = atan(-cos(lon)/tan(lat)) + end select + + ! coordinates on the cube: + cart%x = xp + cart%y = yp + + end function sphere2cubedsphere + +! Go from an arbitrary sized cube in 3D +! to a [-\pi/4,\pi/4] sized cube with (face,2d) coordinates. +! +! Z +! | +! | +! | +! | +! ---------------Y +! / +! / +! / +! / +! X +! +! NOTE: Face 1 => X positive constant face of cube +! Face 2 => Y positive constant face of cube +! Face 3 => X negative constant face of cube +! Face 4 => Y negative constant face of cube +! Face 5 => Z negative constant face of cube +! Face 6 => Z positive constant face of cube + pure function cart2cubedsphere(cart3D, face_no) result(cart) + + implicit none + type(cartesian3D_t),intent(in) :: cart3d + integer, intent(in) :: face_no + type (cartesian2d_t) :: cart + + real(kind=r8) :: x,y + + select case (face_no) + case (1) + x = cart3D%y/cart3D%x + y = cart3D%z/cart3D%x + case (2) + x = -cart3D%x/cart3D%y + y = cart3D%z/cart3D%y + case (3) + x = cart3D%y/cart3D%x + y = -cart3D%z/cart3D%x + case (4) + x = -cart3D%x/cart3D%y + y = -cart3D%z/cart3D%y + case (5) + x = -cart3D%y/cart3D%z + y = -cart3D%x/cart3D%z + case (6) + x = cart3D%y/cart3D%z + y = -cart3D%x/cart3D%z + end select + cart%x = ATAN(x) + cart%y = ATAN(y) + end function cart2cubedsphere + + function cart2cubedsphere_failsafe(cart3D, face_no) result(cart) + implicit none + type(cartesian3D_t),intent(in) :: cart3d + integer, intent(in) :: face_no + type (cartesian2d_t) :: cart + + real(kind=r8) :: x,y + + select case (face_no) + case (1) + if (abs(cart3D%x) < 1.E-13_r8) then + cart%x=9.0E9_r8 + cart%y=9.0E9_r8 + return + end if + x = cart3D%y/cart3D%x + y = cart3D%z/cart3D%x + case (2) + if (abs(cart3D%y)<1.0E-13_r8) then + cart%x=9.0E9_r8 + cart%y=9.0E9_r8 + return + end if + x = -cart3D%x/cart3D%y + y = cart3D%z/cart3D%y + case (3) + if (abs(cart3D%x)<1.0E-13_r8) then + cart%x=9.0E9_r8 + cart%y=9.0E9_r8 + return + end if + x = cart3D%y/cart3D%x + y = -cart3D%z/cart3D%x + case (4) + if (abs(cart3D%y)<1.0E-13_r8) then + cart%x=9.0E9_r8 + cart%y=9.0E9_r8 + return + end if + x = -cart3D%x/cart3D%y + y = -cart3D%z/cart3D%y + case (5) + if (abs(cart3D%z)<1.0E-13_r8) then + cart%x=9.0E9_r8 + cart%y=9.0E9_r8 + return + end if + x = -cart3D%y/cart3D%z + y = -cart3D%x/cart3D%z + case (6) + if (abs(cart3D%z)<1.0E-13_r8) then + cart%x=9.0E9_r8 + cart%y=9.0E9_r8 + return + end if + x = cart3D%y/cart3D%z + y = -cart3D%x/cart3D%z + case default + write(*,*) "face_no out out range ",face_no + end select + cart%x = ATAN(x) + cart%y = ATAN(y) + end function cart2cubedsphere_failsafe + + + +! This function divides three dimentional space up into +! six sectors. These sectors are then considered as the +! faces of the cube. It should work for any (x,y,z) coordinate +! if on a sphere or on a cube. + pure function cube_face_number_from_cart(cart) result(face_no) + + implicit none + type(cartesian3D_t),intent(in) :: cart + integer :: face_no + + real(r8) :: x,y,z + x=cart%x + y=cart%y + z=cart%z + +! Divide the X-Y plane into for quadrants of +! [-\pi/2,\pi/2], [\pi/2,3\pi/2], ..... +! based on the lines X=Y and X=-Y. This divides +! 3D space up into four sections. Doing the same +! for the XZ and YZ planes divides space into six +! sections. Can also be thought of as conic sections +! in the L_infinity norm. + + if (y-x) then ! x>0, Face 1,5 or 6 + if (z>x) then + face_no=6 ! north pole + else if (z<-x) then + face_no=5 ! south pole + else + face_no = 1 + endif + else if (y>x .and. y<-x) then ! x<0 + if (z>-x) then + face_no=6 ! north pole + else if (zx .and. y>-x) then ! y>0 + if (z>y) then + face_no=6 ! north pole + else if (z<-y) then + face_no = 5 ! south pole + else + face_no=2 + endif + else if (y-y) then + face_no=6 ! north pole + else if (z pi/4,pi/4 + ! this formula gives 2 so normalize by 4pi/6 / 2 = pi/3 + ! use implementation where the nodes a counterclockwise (not as in the paper) + a1 = acos(-sin(atan(x1))*sin(atan(y1))) + a2 =-acos(-sin(atan(x2))*sin(atan(y1))) + a3 = acos(-sin(atan(x2))*sin(atan(y2))) + a4 =-acos(-sin(atan(x1))*sin(atan(y2))) + area = (a1+a2+a3+a4) + return +end function surfareaxy + + + +end module coordinate_systems_mod diff --git a/src/dynamics/se/dycore/cube_mod.F90 b/src/dynamics/se/dycore/cube_mod.F90 new file mode 100644 index 00000000..e467fc42 --- /dev/null +++ b/src/dynamics/se/dycore/cube_mod.F90 @@ -0,0 +1,2332 @@ +module cube_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use coordinate_systems_mod, only: spherical_polar_t, cartesian3D_t, cartesian2d_t, & + projectpoint, cubedsphere2cart, spherical_to_cart, sphere_tri_area,dist_threshold, & + change_coordinates + + use physconst, only: pi, rearth + use control_mod, only: hypervis_scaling, cubed_sphere_map + use cam_abortutils, only: endrun + + implicit none + private + + integer,public, parameter :: nfaces = 6 ! number of faces on the cube + integer,public, parameter :: nInnerElemEdge = 8 ! number of edges for an interior element + integer,public, parameter :: nCornerElemEdge = 4 ! number of corner elements + + real(kind=r8), public, parameter :: cube_xstart = -0.25_R8*PI + real(kind=r8), public, parameter :: cube_xend = 0.25_R8*PI + real(kind=r8), public, parameter :: cube_ystart = -0.25_R8*PI + real(kind=r8), public, parameter :: cube_yend = 0.25_R8*PI + + + type, public :: face_t + type (spherical_polar_t) :: sphere0 ! tangent point of face on sphere + type (spherical_polar_t) :: sw ! sw corner of face on sphere + type (spherical_polar_t) :: se ! se corner of face on sphere + type (spherical_polar_t) :: ne ! ne corner of face on sphere + type (spherical_polar_t) :: nw ! nw corner of face on sphere + type (cartesian3D_t) :: P0 + type (cartesian3D_t) :: X0 + type (cartesian3D_t) :: Y0 + integer :: number + integer :: padding ! pad the struct + end type face_t + + type, public :: cube_face_coord_t + real(r8) :: x ! x coordinate + real(r8) :: y ! y coordinate + type (face_t), pointer :: face ! face + end type cube_face_coord_t + + ! ========================================== + ! Public Interfaces + ! ========================================== + + public :: CubeTopology + + ! Rotate the North Pole: used for JW baroclinic test case + ! Settings this only changes Coriolis. + ! User must also rotate initial condition + real (kind=r8), public :: rotate_grid = 0 + + ! =============================== + ! Public methods for cube + ! =============================== + + public :: cube_init_atomic + public :: convert_gbl_index + public :: vmap,dmap + public :: covariant_rot + public :: contravariant_rot + public :: set_corner_coordinates + public :: assign_node_numbers_to_elem + + + public :: CubeEdgeCount + public :: CubeElemCount + public :: CubeSetupEdgeIndex + public :: rotation_init_atomic + public :: ref2sphere + + ! =============================== + ! Private methods + ! =============================== + private :: coordinates_atomic + private :: metric_atomic + private :: coreolis_init_atomic + +contains + + ! ======================================= + ! cube_init_atomic: + ! + ! Initialize element descriptors for + ! cube sphere case for each element ... + ! ======================================= + subroutine cube_init_atomic(elem, gll_points, alpha_in) + use element_mod, only : element_t + use dimensions_mod, only : np + + type (element_t), intent(inout) :: elem + real(r8), intent(in) :: gll_points(np) + real(r8), optional, intent(in) :: alpha_in + + real(r8) :: alpha + + if (present(alpha_in)) then + alpha = alpha_in + else + alpha = 1.0_r8 + end if + + elem%FaceNum=elem%vertex%face_number + call coordinates_atomic(elem,gll_points) + + call metric_atomic(elem, gll_points, alpha) + + call coreolis_init_atomic(elem) + elem%desc%use_rotation= 0 + + end subroutine cube_init_atomic + + ! ======================================= + ! coordinates_atomic: + ! + ! Initialize element coordinates for + ! cube-sphere case ... (atomic) + ! + ! ======================================= + + subroutine coordinates_atomic(elem, gll_points) + use element_mod, only: element_t, element_var_coordinates + use dimensions_mod, only: np + + type(element_t), intent(inout) :: elem + real(r8), intent(in) :: gll_points(np) + + real(r8) :: area1,area2 + type (cartesian3d_t) :: quad(4) + integer :: face_no,i,j + + face_no = elem%vertex%face_number + ! compute the corners in Cartesian coordinates + do i=1,4 + elem%corners3D(i)=cubedsphere2cart(elem%corners(i),face_no) + enddo + + ! ========================================= + ! compute lat/lon coordinates of each GLL point + ! ========================================= + do i=1,np + do j=1,np + elem%spherep(i,j)=ref2sphere(gll_points(i),gll_points(j),elem%corners3D,cubed_sphere_map,elem%corners,elem%facenum) + enddo + enddo + + ! also compute the [-pi/2,pi/2] cubed sphere coordinates: + elem%cartp=element_var_coordinates(elem%corners,gll_points) + + ! Matrix describing vector conversion to cartesian + ! Zonal direction + elem%vec_sphere2cart(:,:,1,1) = -SIN(elem%spherep(:,:)%lon) + elem%vec_sphere2cart(:,:,2,1) = COS(elem%spherep(:,:)%lon) + elem%vec_sphere2cart(:,:,3,1) = 0.0_r8 + ! Meridional direction + elem%vec_sphere2cart(:,:,1,2) = -SIN(elem%spherep(:,:)%lat)*COS(elem%spherep(:,:)%lon) + elem%vec_sphere2cart(:,:,2,2) = -SIN(elem%spherep(:,:)%lat)*SIN(elem%spherep(:,:)%lon) + elem%vec_sphere2cart(:,:,3,2) = COS(elem%spherep(:,:)%lat) + + end subroutine coordinates_atomic + + ! elem_jacobians: + ! + ! Calculate Jacobian associated with mapping + ! from arbitrary quadrilateral to [-1,1]^2 + ! along with its inverse and determinant + ! ========================================== + + subroutine elem_jacobians(coords, unif2quadmap) + + use dimensions_mod, only : np + type (cartesian2D_t), dimension(np,np), intent(in) :: coords + ! unif2quadmap is the bilinear map from [-1,1]^2 -> arbitrary quadrilateral + real (kind=r8), dimension(4,2), intent(out) :: unif2quadmap + integer :: ii,jj + + unif2quadmap(1,1)=(coords(1,1)%x+coords(np,1)%x+coords(np,np)%x+coords(1,np)%x)/4.0_r8 + unif2quadmap(1,2)=(coords(1,1)%y+coords(np,1)%y+coords(np,np)%y+coords(1,np)%y)/4.0_r8 + unif2quadmap(2,1)=(-coords(1,1)%x+coords(np,1)%x+coords(np,np)%x-coords(1,np)%x)/4.0_r8 + unif2quadmap(2,2)=(-coords(1,1)%y+coords(np,1)%y+coords(np,np)%y-coords(1,np)%y)/4.0_r8 + unif2quadmap(3,1)=(-coords(1,1)%x-coords(np,1)%x+coords(np,np)%x+coords(1,np)%x)/4.0_r8 + unif2quadmap(3,2)=(-coords(1,1)%y-coords(np,1)%y+coords(np,np)%y+coords(1,np)%y)/4.0_r8 + unif2quadmap(4,1)=(coords(1,1)%x-coords(np,1)%x+coords(np,np)%x-coords(1,np)%x)/4.0_r8 + unif2quadmap(4,2)=(coords(1,1)%y-coords(np,1)%y+coords(np,np)%y-coords(1,np)%y)/4.0_r8 + + end subroutine elem_jacobians + + ! ========================================= + ! metric_atomic: + ! + ! Initialize cube-sphere metric terms: + ! equal angular elements (atomic) + ! initialize: + ! metdet, rmetdet (analytic) = detD, 1/detD + ! met (analytic) D^t D (symmetric) + ! metdet (analytic) = detD + ! metinv (analytic) Dinv Dinv^t (symmetic) + ! D (from subroutine vmap) + ! Dinv (computed directly from D) + ! + ! ucontra = Dinv * u = metinv * ucov + ! ucov = D^t * u = met * ucontra + ! + ! we also compute DE = D*E, where + ! E = eigenvectors of metinv as a basis metinv = E LAMBDA E^t + ! + ! ueig = E^t ucov = E^t D^t u = (DE)^t u + ! + ! + ! so if we want to tweak the mapping by a factor alpha (so he weights add up to 4pi, for example) + ! we take: + ! NEW OLD + ! D = sqrt(alpha) D and then rederive all quantities. + ! detD = alpha detD + ! + ! where alpha = 4pi/SEMarea, SEMarea = global sum elem(ie)%mv(i,j)*elem(ie)%metdet(i,j) + ! + ! ========================================= + + subroutine metric_atomic(elem,gll_points,alpha) + use element_mod, only: element_t + use dimensions_mod, only: np + use physconst, only: ra + + type (element_t), intent(inout) :: elem + real(r8), intent(in) :: alpha + real(r8), intent(in) :: gll_points(np) + + ! Local variables + integer ii + integer i,j,nn + integer iptr + + real (kind=r8) :: r ! distance from origin for point on cube tangent to unit sphere + + real (kind=r8) :: const, norm + real (kind=r8) :: detD ! determinant of vector field mapping matrix. + + real (kind=r8) :: x1 ! 1st cube face coordinate + real (kind=r8) :: x2 ! 2nd cube face coordinate + real (kind=r8) :: tmpD(2,2) + real (kind=r8) :: M(2,2),E(2,2),eig(2),DE(2,2),DEL(2,2),V(2,2), nu1, nu2, lamStar1, lamStar2 + integer :: imaxM(2) + real (kind=r8) :: l1, l2, sc,min_svd,max_svd,max_normDinv + + + ! ============================================== + ! Initialize differential mapping operator + ! to and from vector fields on the sphere to + ! contravariant vector fields on the cube + ! i.e. dM/dx^i in Sadourney (1972) and it's + ! inverse + ! ============================================== + + ! MNL: Calculate Jacobians of bilinear map from cubed-sphere to ref element + if (cubed_sphere_map==0) then + call elem_jacobians(elem%cartp, elem%u2qmap) + endif + + max_svd = 0.0_r8 + max_normDinv = 0.0_r8 + min_svd = 1d99 + do j=1,np + do i=1,np + x1=gll_points(i) + x2=gll_points(j) + call Dmap(elem%D(i,j,:,:),x1,x2,elem%corners3D,cubed_sphere_map,elem%corners,elem%u2qmap,elem%facenum) + + + ! Numerical metric tensor based on analytic D: met = D^T times D + ! (D maps between sphere and reference element) + elem%met(i,j,1,1) = elem%D(i,j,1,1)*elem%D(i,j,1,1) + & + elem%D(i,j,2,1)*elem%D(i,j,2,1) + elem%met(i,j,1,2) = elem%D(i,j,1,1)*elem%D(i,j,1,2) + & + elem%D(i,j,2,1)*elem%D(i,j,2,2) + elem%met(i,j,2,1) = elem%D(i,j,1,1)*elem%D(i,j,1,2) + & + elem%D(i,j,2,1)*elem%D(i,j,2,2) + elem%met(i,j,2,2) = elem%D(i,j,1,2)*elem%D(i,j,1,2) + & + elem%D(i,j,2,2)*elem%D(i,j,2,2) + + ! compute D^-1... + ! compute determinant of D mapping matrix... if not zero compute inverse + + detD = elem%D(i,j,1,1)*elem%D(i,j,2,2) - elem%D(i,j,1,2)*elem%D(i,j,2,1) + + elem%Dinv(i,j,1,1) = elem%D(i,j,2,2)/detD + elem%Dinv(i,j,1,2) = -elem%D(i,j,1,2)/detD + elem%Dinv(i,j,2,1) = -elem%D(i,j,2,1)/detD + elem%Dinv(i,j,2,2) = elem%D(i,j,1,1)/detD + + ! L2 norm = sqrt max eigenvalue of metinv + ! = 1/sqrt(min eigenvalue of met) + ! l1 and l2 are eigenvalues of met + ! (should both be positive, l1 > l2) + l1 = (elem%met(i,j,1,1) + elem%met(i,j,2,2) + sqrt(4.0_r8*elem%met(i,j,1,2)*elem%met(i,j,2,1) + & + (elem%met(i,j,1,1) - elem%met(i,j,2,2))**2))/2.0_r8 + l2 = (elem%met(i,j,1,1) + elem%met(i,j,2,2) - sqrt(4.0_r8*elem%met(i,j,1,2)*elem%met(i,j,2,1) + & + (elem%met(i,j,1,1) - elem%met(i,j,2,2))**2))/2.0_r8 + ! Max L2 norm of Dinv is sqrt of max eigenvalue of metinv + ! max eigenvalue of metinv is 1/min eigenvalue of met + norm = 1.0_r8/sqrt(min(abs(l1),abs(l2))) + max_svd = max(norm, max_svd) + ! Min L2 norm of Dinv is sqrt of min eigenvalue of metinv + ! min eigenvalue of metinv is 1/max eigenvalue of met + norm = 1.0_r8/sqrt(max(abs(l1),abs(l2))) + min_svd = min(norm, min_svd) + + ! some kind of pseudo-norm of Dinv + ! C = 1/sqrt(2) sqrt( |g^x|^2 + |g^y|^2 + 2*|g^x dot g^y|) + ! = 1/sqrt(2) sqrt( |g_x|^2 + |g_y|^2 + 2*|g_x dot g_y|) / J + ! g^x = Dinv(:,1) g_x = D(1,:) + ! g^y = Dinv(:,2) g_y = D(2,:) + norm = (2*abs(sum(elem%Dinv(i,j,:,1)*elem%Dinv(i,j,:,2))) + sum(elem%Dinv(i,j,:,1)**2) + sum(elem%Dinv(i,j,:,2)**2)) + norm = sqrt(norm) +! norm = (2*abs(sum(elem%D(1,:,i,j)*elem%D(2,:,i,j))) + sum(elem%D(1,:,i,j)**2) + sum(elem%D(2,:,i,j)**2)) +! norm = sqrt(norm)/detD + max_normDinv = max(norm,max_normDinv) + + + ! Need inverse of met if not calculated analytically + elem%metdet(i,j) = abs(detD) + elem%rmetdet(i,j) = 1.0_R8/abs(detD) + + elem%metinv(i,j,1,1) = elem%met(i,j,2,2)/(detD*detD) + elem%metinv(i,j,1,2) = -elem%met(i,j,1,2)/(detD*detD) + elem%metinv(i,j,2,1) = -elem%met(i,j,2,1)/(detD*detD) + elem%metinv(i,j,2,2) = elem%met(i,j,1,1)/(detD*detD) + + ! matricies for tensor hyper-viscosity + ! compute eigenvectors of metinv (probably same as computed above) + M = elem%metinv(i,j,:,:) + + eig(1) = (M(1,1) + M(2,2) + sqrt(4.0_r8*M(1,2)*M(2,1) + & + (M(1,1) - M(2,2))**2))/2.0_r8 + eig(2) = (M(1,1) + M(2,2) - sqrt(4.0_r8*M(1,2)*M(2,1) + & + (M(1,1) - M(2,2))**2))/2.0_r8 + + ! use DE to store M - Lambda, to compute eigenvectors + DE=M + DE(1,1)=DE(1,1)-eig(1) + DE(2,2)=DE(2,2)-eig(1) + + imaxM = maxloc(abs(DE)) + if (maxval(abs(DE))==0) then + E(1,1)=1; E(2,1)=0; + elseif ( imaxM(1)==1 .and. imaxM(2)==1 ) then + E(2,1)=1; E(1,1) = -DE(2,1)/DE(1,1) + else if ( imaxM(1)==1 .and. imaxM(2)==2 ) then + E(2,1)=1; E(1,1) = -DE(2,2)/DE(1,2) + else if ( imaxM(1)==2 .and. imaxM(2)==1 ) then + E(1,1)=1; E(2,1) = -DE(1,1)/DE(2,1) + else if ( imaxM(1)==2 .and. imaxM(2)==2 ) then + E(1,1)=1; E(2,1) = -DE(1,2)/DE(2,2) + else + call endrun('Impossible error in cube_mod.F90::metric_atomic()') + endif + + ! the other eigenvector is orthgonal: + E(1,2)=-E(2,1) + E(2,2)= E(1,1) + +!normalize columns + E(:,1)=E(:,1)/sqrt(sum(E(:,1)*E(:,1))); + E(:,2)=E(:,2)/sqrt(sum(E(:,2)*E(:,2))); + + +! OBTAINING TENSOR FOR HV: + +! Instead of the traditional scalar Laplace operator \grad \cdot \grad +! we introduce \grad \cdot V \grad +! where V = D E LAM LAM^* E^T D^T. +! Recall (metric_tensor)^{-1}=(D^T D)^{-1} = E LAM E^T. +! Here, LAM = diag( 4/((np-1)dx)^2 , 4/((np-1)dy)^2 ) = diag( 4/(dx_elem)^2, 4/(dy_elem)^2 ) +! Note that metric tensors and LAM correspondingly are quantities on a unit sphere. + +! This motivates us to use V = D E LAM LAM^* E^T D^T +! where LAM^* = diag( nu1, nu2 ) where nu1, nu2 are HV coefficients scaled like (dx)^{hv_scaling/2}, (dy)^{hv_scaling/2}. +! (Halves in powers come from the fact that HV consists of two Laplace iterations.) + +! Originally, we took LAM^* = diag( +! 1/(eig(1)**(hypervis_scaling/4.0_r8))*(rearth**(hypervis_scaling/2.0_r8)) +! 1/(eig(2)**(hypervis_scaling/4.0_r8))*(rearth**(hypervis_scaling/2.0_r8)) ) = +! = diag( lamStar1, lamStar2) +! \simeq ((np-1)*dx_sphere / 2 )^hv_scaling/2 = SQRT(OPERATOR_HV) +! because 1/eig(...) \simeq (dx_on_unit_sphere)^2 . +! Introducing the notation OPERATOR = lamStar^2 is useful for conversion formulas. + +! This leads to the following conversion formula: nu_const is nu used for traditional HV on uniform grids +! nu_tensor = nu_const * OPERATOR_HV^{-1}, so +! nu_tensor = nu_const *((np-1)*dx_sphere / 2 )^{ - hv_scaling} or +! nu_tensor = nu_const *(2/( (np-1) * dx_sphere) )^{hv_scaling} . +! dx_sphere = 2\pi *rearth/(np-1)/4/NE +! [nu_tensor] = [meter]^{4-hp_scaling}/[sec] + +! (1) Later developments: +! Apply tensor V only at the second Laplace iteration. Thus, LAM^* should be scaled as (dx)^{hv_scaling}, (dy)^{hv_scaling}, +! see this code below: +! DEL(1:2,1) = (lamStar1**2) *eig(1)*DE(1:2,1) +! DEL(1:2,2) = (lamStar2**2) *eig(2)*DE(1:2,2) + +! (2) Later developments: +! Bringing [nu_tensor] to 1/[sec]: +! lamStar1=1/(eig(1)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) +! lamStar2=1/(eig(2)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) +! OPERATOR_HV = ( (np-1)*dx_unif_sphere / 2 )^{hv_scaling} * rearth^4 +! Conversion formula: +! nu_tensor = nu_const * OPERATOR_HV^{-1}, so +! nu_tensor = nu_const *( 2*rearth /((np-1)*dx))^{hv_scaling} * rearth^{-4.0}. + +! For the baseline coefficient nu=1e15 for NE30, +! nu_tensor=7e-8 (BUT RUN TWICE AS SMALL VALUE FOR NOW) for hv_scaling=3.2 +! and +! nu_tensor=1.3e-6 for hv_scaling=4.0. + + +!matrix D*E + DE(1,1)=sum(elem%D(i,j,1,:)*E(:,1)) + DE(1,2)=sum(elem%D(i,j,1,:)*E(:,2)) + DE(2,1)=sum(elem%D(i,j,2,:)*E(:,1)) + DE(2,2)=sum(elem%D(i,j,2,:)*E(:,2)) + + lamStar1=1/(eig(1)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) + lamStar2=1/(eig(2)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) + +!matrix (DE) * Lam^* * Lam , tensor HV when V is applied at each Laplace calculation +! DEL(1:2,1) = lamStar1*eig(1)*DE(1:2,1) +! DEL(1:2,2) = lamStar2*eig(2)*DE(1:2,2) + +!matrix (DE) * (Lam^*)^2 * Lam, tensor HV when V is applied only once, at the last Laplace calculation +!will only work with hyperviscosity, not viscosity + DEL(1:2,1) = (lamStar1**2) *eig(1)*DE(1:2,1) + DEL(1:2,2) = (lamStar2**2) *eig(2)*DE(1:2,2) + +!matrix (DE) * Lam^* * Lam *E^t *D^t or (DE) * (Lam^*)^2 * Lam *E^t *D^t + V(1,1)=sum(DEL(1,:)*DE(1,:)) + V(1,2)=sum(DEL(1,:)*DE(2,:)) + V(2,1)=sum(DEL(2,:)*DE(1,:)) + V(2,2)=sum(DEL(2,:)*DE(2,:)) + + elem%tensorVisc(i,j,:,:)=V(:,:) + + end do + end do + +! see Paul Ullrich writeup: +! max_normDinv might be a tighter bound than max_svd for deformed elements +! max_svd >= max_normDinv/sqrt(2), with equality holding if |g^x| = |g^y| +! elem%normDinv=max_normDinv/sqrt(2) + + ! this norm is consistent with length scales defined below: + elem%normDinv=max_svd + + + ! compute element length scales, based on SVDs, in km: + elem%dx_short = 1.0_r8/(max_svd*0.5_r8*dble(np-1)*ra*1000.0_r8) + elem%dx_long = 1.0_r8/(min_svd*0.5_r8*dble(np-1)*ra*1000.0_r8) + + ! optional noramlization: + elem%D = elem%D * sqrt(alpha) + elem%Dinv = elem%Dinv / sqrt(alpha) + elem%metdet = elem%metdet * alpha + elem%rmetdet = elem%rmetdet / alpha + elem%met = elem%met * alpha + elem%metinv = elem%metinv / alpha + + end subroutine metric_atomic + + + ! ======================================== + ! covariant_rot: + ! + ! 2 x 2 matrix multiply: Db^T * Da^-T + ! for edge rotations: maps face a to face b + ! + ! ======================================== + + function covariant_rot(Da,Db) result(R) + + real (kind=r8) :: Da(2,2) + real (kind=r8) :: Db(2,2) + real (kind=r8) :: R(2,2) + + real (kind=r8) :: detDa + + detDa = Da(2,2)*Da(1,1) - Da(1,2)*Da(2,1) + + R(1,1)=(Da(2,2)*Db(1,1) - Da(1,2)*Db(2,1))/detDa + R(1,2)=(Da(1,1)*Db(2,1) - Da(2,1)*Db(1,1))/detDa + R(2,1)=(Da(2,2)*Db(1,2) - Da(1,2)*Db(2,2))/detDa + R(2,2)=(Da(1,1)*Db(2,2) - Da(2,1)*Db(1,2))/detDa + + end function covariant_rot + + ! ======================================== + ! contravariant_rot: + ! + ! 2 x 2 matrix multiply: Db^-1 * Da + ! that maps a contravariant vector field + ! from an edge of cube face a to a contiguous + ! edge of cube face b. + ! + ! ======================================== + + function contravariant_rot(Da,Db) result(R) + + real(kind=r8), intent(in) :: Da(2,2) + real(kind=r8), intent(in) :: Db(2,2) + real(kind=r8) :: R(2,2) + + real(kind=r8) :: detDb + + detDb = Db(2,2)*Db(1,1) - Db(1,2)*Db(2,1) + + R(1,1)=(Da(1,1)*Db(2,2) - Da(2,1)*Db(1,2))/detDb + R(1,2)=(Da(1,2)*Db(2,2) - Da(2,2)*Db(1,2))/detDb + R(2,1)=(Da(2,1)*Db(1,1) - Da(1,1)*Db(2,1))/detDb + R(2,2)=(Da(2,2)*Db(1,1) - Da(1,2)*Db(2,1))/detDb + + end function contravariant_rot + + ! ======================================================== + ! Dmap: + ! + ! Initialize mapping that tranforms contravariant + ! vector fields on the reference element onto vector fields on + ! the sphere. + ! ======================================================== + subroutine Dmap(D, a,b, corners3D, ref_map, corners, u2qmap, facenum) + real (kind=r8), intent(out) :: D(2,2) + real (kind=r8), intent(in) :: a,b + type (cartesian3D_t) :: corners3D(4) !x,y,z coords of element corners + integer :: ref_map + ! only needed for ref_map=0,1 + type (cartesian2D_t),optional :: corners(4) ! gnomonic coords of element corners + real (kind=r8),optional :: u2qmap(4,2) + integer,optional :: facenum + + + + if (ref_map==0) then + if (.not. present ( corners ) ) & + call endrun('Dmap(): missing arguments for equiangular map') + call dmap_equiangular(D,a,b,corners,u2qmap,facenum) + else if (ref_map==1) then + call endrun('equi-distance gnomonic map not yet implemented') + else if (ref_map==2) then + call dmap_elementlocal(D,a,b,corners3D) + else + call endrun('bad value of ref_map') + endif + end subroutine Dmap + + + + ! ======================================================== + ! Dmap: + ! + ! Equiangular Gnomonic Projection + ! Composition of equiangular Gnomonic projection to cubed-sphere face, + ! followd by bilinear map to reference element + ! ======================================================== + subroutine dmap_equiangular(D, a,b, corners,u2qmap,facenum ) + use dimensions_mod, only : np + real (kind=r8), intent(out) :: D(2,2) + real (kind=r8), intent(in) :: a,b + real (kind=r8) :: u2qmap(4,2) + type (cartesian2D_t) :: corners(4) ! gnomonic coords of element corners + integer :: facenum + ! local + real (kind=r8) :: tmpD(2,2), Jp(2,2),x1,x2,pi,pj,qi,qj + real (kind=r8), dimension(4,2) :: unif2quadmap + +#if 0 + ! we shoud get rid of elem%u2qmap() and routine cube_mod.F90::elem_jacobian() + ! and replace with this code below: + ! but this produces roundoff level changes + !unif2quadmap(1,1)=(elem%cartp(1,1)%x+elem%cartp(np,1)%x+elem%cartp(np,np)%x+elem%cartp(1,np)%x)/4.0_r8 + !unif2quadmap(1,2)=(elem%cartp(1,1)%y+elem%cartp(np,1)%y+elem%cartp(np,np)%y+elem%cartp(1,np)%y)/4.0_r8 + unif2quadmap(2,1)=(-elem%cartp(1,1)%x+elem%cartp(np,1)%x+elem%cartp(np,np)%x-elem%cartp(1,np)%x)/4.0_r8 + unif2quadmap(2,2)=(-elem%cartp(1,1)%y+elem%cartp(np,1)%y+elem%cartp(np,np)%y-elem%cartp(1,np)%y)/4.0_r8 + unif2quadmap(3,1)=(-elem%cartp(1,1)%x-elem%cartp(np,1)%x+elem%cartp(np,np)%x+elem%cartp(1,np)%x)/4.0_r8 + unif2quadmap(3,2)=(-elem%cartp(1,1)%y-elem%cartp(np,1)%y+elem%cartp(np,np)%y+elem%cartp(1,np)%y)/4.0_r8 + unif2quadmap(4,1)=(elem%cartp(1,1)%x-elem%cartp(np,1)%x+elem%cartp(np,np)%x-elem%cartp(1,np)%x)/4.0_r8 + unif2quadmap(4,2)=(elem%cartp(1,1)%y-elem%cartp(np,1)%y+elem%cartp(np,np)%y-elem%cartp(1,np)%y)/4.0_r8 + Jp(1,1) = unif2quadmap(2,1) + unif2quadmap(4,1)*b + Jp(1,2) = unif2quadmap(3,1) + unif2quadmap(4,1)*a + Jp(2,1) = unif2quadmap(2,2) + unif2quadmap(4,2)*b + Jp(2,2) = unif2quadmap(3,2) + unif2quadmap(4,2)*a +#else + ! input (a,b) shold be a point in the reference element [-1,1] + ! compute Jp(a,b) + Jp(1,1) = u2qmap(2,1) + u2qmap(4,1)*b + Jp(1,2) = u2qmap(3,1) + u2qmap(4,1)*a + Jp(2,1) = u2qmap(2,2) + u2qmap(4,2)*b + Jp(2,2) = u2qmap(3,2) + u2qmap(4,2)*a +#endif + + ! map (a,b) to the [-pi/2,pi/2] equi angular cube face: x1,x2 + ! a = gp%points(i) + ! b = gp%points(j) + pi = (1-a)/2 + pj = (1-b)/2 + qi = (1+a)/2 + qj = (1+b)/2 + x1 = pi*pj*corners(1)%x & + + qi*pj*corners(2)%x & + + qi*qj*corners(3)%x & + + pi*qj*corners(4)%x + x2 = pi*pj*corners(1)%y & + + qi*pj*corners(2)%y & + + qi*qj*corners(3)%y & + + pi*qj*corners(4)%y + + call vmap(tmpD,x1,x2,facenum) + + ! Include map from element -> ref element in D + D(1,1) = tmpD(1,1)*Jp(1,1) + tmpD(1,2)*Jp(2,1) + D(1,2) = tmpD(1,1)*Jp(1,2) + tmpD(1,2)*Jp(2,2) + D(2,1) = tmpD(2,1)*Jp(1,1) + tmpD(2,2)*Jp(2,1) + D(2,2) = tmpD(2,1)*Jp(1,2) + tmpD(2,2)*Jp(2,2) + end subroutine dmap_equiangular + + + + ! ======================================================== + ! vmap: + ! + ! Initialize mapping that tranforms contravariant + ! vector fields on the cube onto vector fields on + ! the sphere. This follows Taylor's D matrix + ! + ! | cos(theta)dlambda/dx1 cos(theta)dlambda/dx2 | + ! D = | | + ! | dtheta/dx1 dtheta/dx2 | + ! + ! ======================================================== + + subroutine vmap(D, x1, x2, face_no) + real(kind=r8), intent(inout) :: D(2,2) + real(kind=r8), intent(in) :: x1 + real(kind=r8), intent(in) :: x2 + integer, intent(in) :: face_no + + ! Local variables + + real (kind=r8) :: poledist ! SQRT(TAN(x1)**2 +TAN(x2)**2) + real (kind=r8) :: r ! distance from cube point to center of sphere + + real (kind=r8) :: D11 + real (kind=r8) :: D12 + real (kind=r8) :: D21 + real (kind=r8) :: D22 + character(len=64) :: errmsg + + r = SQRT(1.0_r8 + TAN(x1)**2 + TAN(x2)**2) + + if (face_no >= 1 .and. face_no <= 4) then + + D11 = 1.0_r8 / (r * COS(x1)) + D12 = 0.0_r8 + D21 = -TAN(x1)*TAN(x2) / (COS(x1)*r*r) + D22 = 1.0_r8 / (r*r*COS(x1)*COS(x2)*COS(x2)) + + D(1,1) = D11 + D(1,2) = D12 + D(2,1) = D21 + D(2,2) = D22 + + + else if (face_no == 6) then + poledist = SQRT( TAN(x1)**2 + TAN(x2)**2) + if (poledist <= DIST_THRESHOLD) then + + ! we set the D transform to the identity matrix + ! which works ONLY for swtc1, phi starting at + ! 3*PI/2... assumes lon at pole == 0 + + D(1,1) = 1.0_r8 + D(1,2) = 0.0_r8 + D(2,1) = 0.0_r8 + D(2,2) = 1.0_r8 + + else + + D11 = -TAN(x2)/(poledist*COS(x1)*COS(x1)*r) + D12 = TAN(x1)/(poledist*COS(x2)*COS(x2)*r) + D21 = -TAN(x1)/(poledist*COS(x1)*COS(x1)*r*r) + D22 = -TAN(x2)/(poledist*COS(x2)*COS(x2)*r*r) + + D(1,1) = D11 + D(1,2) = D12 + D(2,1) = D21 + D(2,2) = D22 + + end if + else if (face_no == 5) then + poledist = SQRT( TAN(x1)**2 + TAN(x2)**2) + if (poledist <= DIST_THRESHOLD) then + + ! we set the D transform to the identity matrix + ! which works ONLY for swtc1, phi starting at + ! 3*PI/2... assumes lon at pole == 0, i.e. very specific + + D(1,1) = 1.0_r8 + D(1,2) = 0.0_r8 + D(2,1) = 0.0_r8 + D(2,2) = 1.0_r8 + + else + + D11 = TAN(x2)/(poledist*COS(x1)*COS(x1)*r) + D12 = -TAN(x1)/(poledist*COS(x2)*COS(x2)*r) + D21 = TAN(x1)/(poledist*COS(x1)*COS(x1)*r*r) + D22 = TAN(x2)/(poledist*COS(x2)*COS(x2)*r*r) + + D(1,1) = D11 + D(1,2) = D12 + D(2,1) = D21 + D(2,2) = D22 + + end if + else + write(errmsg, '(a,i0)') 'VMAP: Bad face number, ',face_no + call endrun(errmsg) + end if + + end subroutine vmap + + + + + ! ======================================================== + ! Dmap: + ! + ! Initialize mapping that tranforms contravariant + ! vector fields on the reference element onto vector fields on + ! the sphere. + ! For Gnomonic, followed by bilinear, this code uses the old vmap() + ! for unstructured grids, this code uses the parametric map that + ! maps quads on the sphere directly to the reference element + ! ======================================================== + subroutine dmap_elementlocal(D, a,b, corners3D) + use element_mod, only : element_t + + type (element_t) :: elem + real (kind=r8), intent(out) :: D(2,2) + real (kind=r8), intent(in) :: a,b + type (cartesian3d_t) :: corners3D(4) + + type (spherical_polar_t) :: sphere + + real(kind=r8) :: c(3,4), q(4), xx(3), r, lam, th, dd(4,2) + real(kind=r8) :: sinlam, sinth, coslam, costh + real(kind=r8) :: D1(2,3), D2(3,3), D3(3,2), D4(3,2) + integer :: i,j + + sphere = ref2sphere(a,b,corners3D,2) ! use element local map, ref_map=2 + + c(1,1)=corners3D(1)%x; c(2,1)=corners3D(1)%y; c(3,1)=corners3D(1)%z; + c(1,2)=corners3D(2)%x; c(2,2)=corners3D(2)%y; c(3,2)=corners3D(2)%z; + c(1,3)=corners3D(3)%x; c(2,3)=corners3D(3)%y; c(3,3)=corners3D(3)%z; + c(1,4)=corners3D(4)%x; c(2,4)=corners3D(4)%y; c(3,4)=corners3D(4)%z; + + q(1)=(1-a)*(1-b); q(2)=(1+a)*(1-b); q(3)=(1+a)*(1+b); q(4)=(1-a)*(1+b); + q=q/4.0_r8; + + do i=1,3 + xx(i)=sum(c(i,:)*q(:)) + enddo + + r=sqrt(xx(1)**2+xx(2)**2+xx(3)**2) + + lam=sphere%lon; th=sphere%lat; + sinlam=sin(lam); sinth=sin(th); + coslam=cos(lam); costh=cos(th); + + D1(1,1)=-sinlam; D1(1,2)=coslam; D1(1,3)=0.0_r8; + D1(2,1)=0.0_r8; D1(2,2)=0.0_r8; D1(2,3)=1.0_r8; + + D2(1,1)=(sinlam**2)*(costh**2)+sinth**2; D2(1,2)=-sinlam*coslam*(costh**2); D2(1,3)=-coslam*sinth*costh; + D2(2,1)=-sinlam*coslam*(costh**2); D2(2,2)=(coslam**2)*(costh**2)+sinth**2; D2(2,3)=-sinlam*sinth*costh; + D2(3,1)=-coslam*sinth; D2(3,2)=-sinlam*sinth; D2(3,3)=costh; + + dd(1,1)=-1+b; dd(1,2)=-1+a; + dd(2,1)=1-b; dd(2,2)=-1-a; + dd(3,1)=1+b; dd(3,2)=1+a; + dd(4,1)=-1-b; dd(4,2)=1-a; + + dd=dd/4.0_r8 + + do i=1,3 + do j=1,2 + D3(i,j)=sum(c(i,:)*dd(:,j)) + enddo + enddo + + do i=1,3 + do j=1,2 + D4(i,j)=sum(D2(i,:)*D3(:,j)) + enddo + enddo + + do i=1,2 + do j=1,2 + D(i,j)=sum(D1(i,:)*D4(:,j)) + enddo + enddo + + D=D/r + end subroutine dmap_elementlocal + + + + + + ! ======================================== + ! coreolis_init_atomic: + ! + ! Initialize coreolis term ... + ! + ! ======================================== + + subroutine coreolis_init_atomic(elem) + use element_mod, only: element_t + use dimensions_mod, only: np + use physconst, only: omega + + type (element_t) :: elem + + ! Local variables + + integer :: i,j + real (kind=r8) :: lat,lon,rangle + + rangle = rotate_grid * PI / 180._r8 + do j=1,np + do i=1,np + if ( rotate_grid /= 0) then + lat = elem%spherep(i,j)%lat + lon = elem%spherep(i,j)%lon + elem%fcor(i,j)= 2*omega* & + (-cos(lon)*cos(lat)*sin(rangle) + sin(lat)*cos(rangle)) + else + elem%fcor(i,j) = 2.0_r8*omega*SIN(elem%spherep(i,j)%lat) + endif + end do + end do + + end subroutine coreolis_init_atomic + + ! ========================================= + ! rotation_init_atomic: + ! + ! Initialize cube rotation terms resulting + ! from changing cube face coordinate systems + ! + ! ========================================= + + + subroutine rotation_init_atomic(elem, rot_type) + use element_mod, only : element_t + use dimensions_mod, only : np + use control_mod, only : north, south, east, west, neast, seast, swest, nwest + + type (element_t) :: elem + character(len=*) rot_type + + ! ======================================= + ! Local variables + ! ======================================= + + integer :: myface_no ! current element face number + integer :: nbrface_no ! neighbor element face number + integer :: inbr + integer :: nrot,irot + integer :: ii,i,j,k + integer :: ir,jr + integer :: start, cnt + + real (kind=r8) :: Dloc(2,2,np) + real (kind=r8) :: Drem(2,2,np) + real (kind=r8) :: x1,x2 + + + myface_no = elem%vertex%face_number + + nrot = 0 + + do inbr=1,8 + cnt = elem%vertex%nbrs_ptr(inbr+1) - elem%vertex%nbrs_ptr(inbr) + start = elem%vertex%nbrs_ptr(inbr) + + do k = 0, cnt-1 + nbrface_no = elem%vertex%nbrs_face(start+k) + if (myface_no /= nbrface_no) nrot=nrot+1 + end do + + end do + + if(associated(elem%desc%rot)) then + if (size(elem%desc%rot) > 0) then + ! deallocate(elem%desc%rot) + NULLIFY(elem%desc%rot) + endif + end if + + ! ===================================================== + ! If there are neighbors on other cube faces, allocate + ! an array of rotation matrix structs. + ! ===================================================== + + if (nrot > 0) then + allocate(elem%desc%rot(nrot)) + elem%desc%use_rotation=1 + irot=0 + + do inbr=1,8 + cnt = elem%vertex%nbrs_ptr(inbr+1) - elem%vertex%nbrs_ptr(inbr) + start = elem%vertex%nbrs_ptr(inbr) + + do k= 0, cnt-1 + + nbrface_no = elem%vertex%nbrs_face(start+k) + ! The cube edge (myface_no,nbrface_no) and inbr defines + ! a unique rotation given by (D^-1) on myface_no x (D) on nbrface_no + + if (myface_no /= nbrface_no .and. elem%vertex%nbrs(start+k) /= -1 ) then + irot=irot+1 + + if (inbr <= 4) then + allocate(elem%desc%rot(irot)%R(2,2,np)) ! edge + else + allocate(elem%desc%rot(irot)%R(2,2,1 )) ! corner + end if + ! Initialize Dloc and Drem for no-rotation possibilities + Dloc(1,1,:) = 1.0_r8 + Dloc(1,2,:) = 0.0_r8 + Dloc(2,1,:) = 0.0_r8 + Dloc(2,2,:) = 1.0_r8 + Drem(1,1,:) = 1.0_r8 + Drem(1,2,:) = 0.0_r8 + Drem(2,1,:) = 0.0_r8 + Drem(2,2,:) = 1.0_r8 + + ! must compute Dloc on my face, Drem on neighbor face, + ! for each point on edge or corner. + + ! ==================================== + ! Equatorial belt east/west neighbors + ! ==================================== + + if (nbrface_no <= 4 .and. myface_no <= 4) then + + if (inbr == west) then + do j=1,np + x1 = elem%cartp(1,j)%x + x2 = elem%cartp(1,j)%y + call Vmap(Dloc(1,1,j), x1,x2,myface_no) + call Vmap(Drem(1,1,j),-x1,x2,nbrface_no) + end do + else if (inbr == east) then + do j=1,np + x1 = elem%cartp(np,j)%x + x2 = elem%cartp(np,j)%y + call Vmap(Dloc(1,1,j), x1,x2,myface_no) + call Vmap(Drem(1,1,j),-x1,x2,nbrface_no) + end do + else if (inbr == swest ) then + x1 = elem%cartp(1,1)%x + x2 = elem%cartp(1,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + else if (inbr == nwest ) then + x1 = elem%cartp(1,np)%x + x2 = elem%cartp(1,np)%y + call Vmap(Dloc(1,1,1), x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + else if (inbr == seast ) then + x1 = elem%cartp(np,1)%x + x2 = elem%cartp(np,1)%y + call Vmap(Dloc(1,1,1), x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + else if (inbr == neast ) then + x1 = elem%cartp(np,np)%x + x2 = elem%cartp(np,np)%y + call Vmap(Dloc(1,1,1), x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + end if + + end if + + ! Northern Neighbors of Equatorial Belt + + if ( myface_no <= 4 .and. nbrface_no == 6 ) then + if (inbr == north) then + do i=1,np + ir=np+1-i + x1 = elem%cartp(i,np)%x + x2 = elem%cartp(i,np)%y + if ( myface_no == 1) then + call Vmap(Dloc(1,1,i), x1,x2,myface_no) + call Vmap(Drem(1,1,i),x1,-x2,nbrface_no) + end if + if ( myface_no == 2) then + call Vmap(Dloc(1,1,i),x1,x2,myface_no) + call Vmap(Drem(1,1,i),x2,x1,nbrface_no) + + end if + if ( myface_no == 3) then + call Vmap(Dloc(1,1,ir), x1,x2,myface_no) + call Vmap(Drem(1,1,ir),-x1,x2,nbrface_no) + end if + if ( myface_no == 4) then + call Vmap(Dloc(1,1,ir), x1,x2,myface_no) + call Vmap(Drem(1,1,ir),-x2,-x1,nbrface_no) + end if + end do + else if (inbr == nwest) then + x1 = elem%cartp(1,np)%x + x2 = elem%cartp(1,np)%y + call Vmap(Dloc(1,1,1), x1,x2,myface_no) + if ( myface_no == 1) call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + if ( myface_no == 2) call Vmap(Drem(1,1,1),x2, x1,nbrface_no) + if ( myface_no == 3) call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + if ( myface_no == 4) call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + else if (inbr == neast) then + x1 = elem%cartp(np,np)%x + x2 = elem%cartp(np,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + if ( myface_no == 1) call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + if ( myface_no == 2) call Vmap(Drem(1,1,1),x2, x1,nbrface_no) + if ( myface_no == 3) call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + if ( myface_no == 4) call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + end if + + end if + + ! Southern Neighbors of Equatorial Belt + + if ( myface_no <= 4 .and. nbrface_no == 5 ) then + if (inbr == south) then + do i=1,np + ir=np+1-i + x1 = elem%cartp(i,1)%x + x2 = elem%cartp(i,1)%y + if ( myface_no == 1) then + call Vmap(Dloc(1,1,i), x1, x2,myface_no) + call Vmap(Drem(1,1,i), x1,-x2,nbrface_no) + end if + if ( myface_no == 2) then + call Vmap(Dloc(1,1,ir),x1,x2,myface_no) + call Vmap(Drem(1,1,ir),-x2,-x1,nbrface_no) + end if + if ( myface_no == 3) then + call Vmap(Dloc(1,1,ir), x1,x2,myface_no) + call Vmap(Drem(1,1,ir),-x1,x2,nbrface_no) + end if + if ( myface_no == 4) then + call Vmap(Dloc(1,1,i), x1,x2,myface_no) + call Vmap(Drem(1,1,i), x2,x1,nbrface_no) + end if + end do + else if (inbr == swest) then + x1 = elem%cartp(1,1)%x + x2 = elem%cartp(1,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + + + if ( myface_no == 1) call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + if ( myface_no == 2) call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + if ( myface_no == 3) call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + if ( myface_no == 4) call Vmap(Drem(1,1,1),x2,x1,nbrface_no) + + else if (inbr == seast) then + x1 = elem%cartp(np,1)%x + x2 = elem%cartp(np,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + if ( myface_no == 1) call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + if ( myface_no == 2) call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + if ( myface_no == 3) call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + if ( myface_no == 4) call Vmap(Drem(1,1,1),x2,x1,nbrface_no) + end if + + end if + + ! Neighbors of Northern Capping Face Number 6 + + if ( myface_no == 6 ) then + if (nbrface_no == 1) then + if (inbr == south) then + do i=1,np + x1 = elem%cartp(i,1)%x + x2 = elem%cartp(i,1)%y + call Vmap(Dloc(1,1,i),x1,x2,myface_no) + call Vmap(Drem(1,1,i),x1,-x2,nbrface_no) + end do + else if (inbr == swest) then + x1 = elem%cartp(1,1)%x + x2 = elem%cartp(1,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + else if (inbr == seast) then + x1 = elem%cartp(np,1)%x + x2 = elem%cartp(np,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + end if + else if (nbrface_no == 2) then + if (inbr == east) then + do j=1,np + x1 = elem%cartp(np,j)%x + x2 = elem%cartp(np,j)%y + call Vmap(Dloc(1,1,j),x1,x2,myface_no) + call Vmap(Drem(1,1,j),x2,x1,nbrface_no) + end do + else if (inbr == seast) then + x1 = elem%cartp(np,1)%x + x2 = elem%cartp(np,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x2,x1,nbrface_no) + else if (inbr == neast) then + x1 = elem%cartp(np,np)%x + x2 = elem%cartp(np,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x2,x1,nbrface_no) + end if + else if (nbrface_no == 3) then + if (inbr == north) then + do i=1,np + ir =np+1-i + x1 = elem%cartp(i,np)%x + x2 = elem%cartp(i,np)%y + call Vmap(Dloc(1,1,ir),x1,x2,myface_no) + call Vmap(Drem(1,1,ir),-x1,x2,nbrface_no) + end do + else if (inbr == nwest) then + x1 = elem%cartp(1,np)%x + x2 = elem%cartp(1,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + else if (inbr == neast) then + x1 = elem%cartp(np,np)%x + x2 = elem%cartp(np,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + end if + else if (nbrface_no == 4) then + if (inbr == west) then + do j=1,np + jr=np+1-j + x1 = elem%cartp(1,j)%x + x2 = elem%cartp(1,j)%y + call Vmap(Dloc(1,1,jr), x1, x2,myface_no ) + call Vmap(Drem(1,1,jr),-x2,-x1,nbrface_no) + end do + else if (inbr == swest) then + x1 = elem%cartp(1,1)%x + x2 = elem%cartp(1,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + else if (inbr == nwest) then + x1 = elem%cartp(1,np)%x + x2 = elem%cartp(1,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + end if + end if + end if + + ! Neighbors of South Capping Face Number 5 + + if ( myface_no == 5 ) then + if (nbrface_no == 1) then + if (inbr == north) then + do i=1,np + x1 = elem%cartp(i,np)%x + x2 = elem%cartp(i,np)%y + call Vmap(Dloc(1,1,i),x1,x2,myface_no) + call Vmap(Drem(1,1,i),x1,-x2,nbrface_no) + end do + else if (inbr == nwest) then + x1 = elem%cartp(1,np)%x + x2 = elem%cartp(1,np)%y + call Vmap(Dloc(:,:,1),x1,x2,myface_no) + call Vmap(Drem(:,:,1),x1,-x2,nbrface_no) + else if (inbr == neast) then + x1 = elem%cartp(np,np)%x + x2 = elem%cartp(np,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x1,-x2,nbrface_no) + end if + else if (nbrface_no == 2) then + if (inbr == east) then + do j=1,np + jr=np+1-j + x1 = elem%cartp(np,j)%x + x2 = elem%cartp(np,j)%y + call Vmap(Dloc(1,1,jr),x1, x2,myface_no) + call Vmap(Drem(1,1,jr),-x2,-x1,nbrface_no) + end do + else if (inbr == seast) then + x1 = elem%cartp(np,1)%x + x2 = elem%cartp(np,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + else if (inbr == neast) then + x1 = elem%cartp(np,np)%x + x2 = elem%cartp(np,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x2,-x1,nbrface_no) + end if + else if (nbrface_no == 3) then + if (inbr == south) then + do i=1,np + ir=np+1-i + x1 = elem%cartp(i,1)%x + x2 = elem%cartp(i,1)%y + call Vmap(Dloc(1,1,ir),x1,x2,myface_no) + call Vmap(Drem(1,1,ir),-x1,x2,nbrface_no) + end do + else if (inbr == swest) then + x1 = elem%cartp(1,1)%x + x2 = elem%cartp(1,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + else if (inbr == seast) then + x1 = elem%cartp(np,1)%x + x2 = elem%cartp(np,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),-x1,x2,nbrface_no) + end if + else if (nbrface_no == 4) then + if (inbr == west) then + do j=1,np + x1 = elem%cartp(1,j)%x + x2 = elem%cartp(1,j)%y + call Vmap(Dloc(1,1,j),x1,x2,myface_no) + call Vmap(Drem(1,1,j),x2,x1,nbrface_no) + end do + else if (inbr == swest) then + x1 = elem%cartp(1,1)%x + x2 = elem%cartp(1,1)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x2,x1,nbrface_no) + else if (inbr == nwest) then + x1 = elem%cartp(1,np)%x + x2 = elem%cartp(1,np)%y + call Vmap(Dloc(1,1,1),x1,x2,myface_no) + call Vmap(Drem(1,1,1),x2,x1,nbrface_no) + end if + end if + end if + + elem%desc%rot(irot)%nbr = inbr + if (rot_type == "covariant") then + do i=1,SIZE(elem%desc%rot(irot)%R(:,:,:),3) + elem%desc%rot(irot)%R(:,:,i)=covariant_rot(Dloc(:,:,i),Drem(:,:,i)) + end do + else if (rot_type == "contravariant") then + do i=1,SIZE(elem%desc%rot(irot)%R(:,:,:),3) + elem%desc%rot(irot)%R(:,:,i)=contravariant_rot(Dloc(:,:,i),Drem(:,:,i)) + end do + end if + + end if ! end of a unique rotation + end do !k loop over neighbors in that direction + end do !inbr loop + end if !nrot > 0 + + end subroutine rotation_init_atomic + + + subroutine set_corner_coordinates(elem) + use element_mod, only : element_t + use dimensions_mod, only : ne + + type (element_t) :: elem + + ! Local variables + integer i,ie,je,face_no,nn + real (kind=r8) :: dx,dy, startx, starty + + if (0==ne) call endrun('Error in set_corner_coordinates: ne is zero') + + ! ======================================== + ! compute cube face coordinates of element + ! ========================================= + + call convert_gbl_index(elem%vertex%number,ie,je,face_no) + + elem%vertex%face_number = face_no + dx = (cube_xend-cube_xstart)/ne + dy = (cube_yend-cube_ystart)/ne + + startx = cube_xstart+ie*dx + starty = cube_ystart+je*dy + + elem%corners(1)%x = startx + elem%corners(1)%y = starty + elem%corners(2)%x = startx+dx + elem%corners(2)%y = starty + elem%corners(3)%x = startx+dx + elem%corners(3)%y = starty+dy + elem%corners(4)%x = startx + elem%corners(4)%y = starty+dy + +#if 0 + do i=1,4 + elem%node_multiplicity(i) = 4 + end do + ie = ie + 1 + je = je + 1 + if (ie == 1 .and. je == 1) then + elem%node_multiplicity(1) = 3 + else if (ie == ne .and. je == 1) then + elem%node_multiplicity(2) = 3 + else if (ie == ne .and. je == ne) then + elem%node_multiplicity(3) = 3 + else if (ie == 1 .and. je == ne) then + elem%node_multiplicity(4) = 3 + end if +#endif + end subroutine set_corner_coordinates + + + subroutine assign_node_numbers_to_elem(elements, GridVertex) + use dimensions_mod, only : ne + use element_mod, only : element_t + use control_mod, only : north, south, east, west, neast, seast, swest, nwest + use gridgraph_mod, only : GridVertex_t + implicit none + type (element_t), intent(inout) :: elements(:) + type (GridVertex_t), intent(in) :: GridVertex(:) + + type (GridVertex_t) :: vertex + integer :: connectivity(6*ne*ne, 4) + integer :: nn(4), en(4) + integer el, i, n, direction + integer current_node_num, tot_ne + integer :: start, cnt + + current_node_num = 0 + tot_ne = 6*ne*ne + + if (0==ne) call endrun('Error in assign_node_numbers_to_elem: ne is zero') + if (tot_ne /= SIZE(GridVertex)) call endrun('Error in assign_node_numbers_to_elem: GridVertex not correct length') + + connectivity = 0 + + do el = 1,tot_ne + vertex = GridVertex(el) + en = 0 + do direction = 1,8 + cnt = vertex%nbrs_ptr(direction+1) - vertex%nbrs_ptr(direction) + start = vertex%nbrs_ptr(direction) + + do i=0, cnt-1 + n = vertex%nbrs(start+i) + if (n /= -1) then + nn = connectivity(n,:) + select case (direction) + case (north) + if (nn(1)/=0) en(4) = nn(1) + if (nn(2)/=0) en(3) = nn(2) + case (south) + if (nn(4)/=0) en(1) = nn(4) + if (nn(3)/=0) en(2) = nn(3) + case (east) + if (nn(1)/=0) en(2) = nn(1) + if (nn(4)/=0) en(3) = nn(4) + case (west) + if (nn(2)/=0) en(1) = nn(2) + if (nn(3)/=0) en(4) = nn(3) + case (neast) + if (nn(1)/=0) en(3) = nn(1) + case (seast) + if (nn(4)/=0) en(2) = nn(4) + case (swest) + if (nn(3)/=0) en(1) = nn(3) + case (nwest) + if (nn(2)/=0) en(4) = nn(2) + end select + end if + end do + end do !direction + + do i=1,4 + if (en(i) == 0) then + current_node_num = current_node_num + 1 + en(i) = current_node_num + end if + end do + connectivity(el,:) = en + end do + + if (current_node_num /= (6*ne*ne+2)) then + call endrun('Error in assignment of node numbers: Failed Euler test') + end if +! do el = 1,SIZE(elements) +! elements(el)%node_numbers = connectivity(elements(el)%vertex%number, :) +! end do + end subroutine assign_node_numbers_to_elem + + + ! ================================================ + ! convert_gbl_index: + ! + ! Convert global element index to cube index + ! ================================================ + + subroutine convert_gbl_index(number,ie,je,face_no) + use dimensions_mod, only : ne + integer, intent(in) :: number + integer, intent(out) :: ie,je,face_no + + if (0==ne) call endrun('Error in cube_mod:convert_gbl_index: ne is zero') + + ! inverse of the function: number = 1 + ie + ne*je + ne*ne*(face_no-1) + face_no=((number-1)/(ne*ne))+1 + ie=MODULO(number-1,ne) + je=(number-1)/ne - (face_no-1)*ne + + end subroutine convert_gbl_index + + subroutine CubeTopology(GridEdge, GridVertex) + use gridgraph_mod, only: GridEdge_t, GridVertex_t, initgridedge + use gridgraph_mod, only: allocate_gridvertex_nbrs, deallocate_gridvertex_nbrs + use dimensions_mod, only: np, ne + use spacecurve_mod, only: IsFactorable, genspacecurve + use control_mod, only: north, south, east, west, neast, seast, swest, nwest + !----------------------- + + ! Since GridVertex fields must be allocated before calling this, it + ! must be intent(inout). +!og: is 'target' here necessary? +!GridEdge : changed its 'out' attribute to 'inout' + type (GridEdge_t), intent(inout),target :: GridEdge(:) + type (GridVertex_t), intent(inout),target :: GridVertex(:) + + + integer,allocatable :: Mesh(:,:) + integer,allocatable :: Mesh2(:,:),Mesh2_map(:,:,:),sfcij(:,:) + type (GridVertex_t),allocatable :: GridElem(:,:,:) + integer :: i,j,k,ll,number,irev,ne2,i2,j2,sfc_index + integer :: EdgeWgtP,CornerWgt + integer :: ielem, nedge + integer :: offset, ierr, loc + logical, allocatable :: nbrs_used(:,:,:,:) + + + if (0==ne) call endrun('Error in CubeTopology: ne is zero') + + allocate(GridElem(ne,ne,nfaces),stat=ierr) + do k = 1, nfaces + do j = 1, ne + do i = 1, ne + call allocate_gridvertex_nbrs(GridElem(i,j,k)) + end do + end do + end do + + if(ierr/=0) then + call endrun('error in allocation of GridElem structure') + end if + + allocate(nbrs_used(ne,ne,nfaces,8)) + nbrs_used = .false. + + + number=1 + EdgeWgtP = np + CornerWgt = 1 + do k=1,nfaces + do j=1,ne + do i=1,ne + ! ==================================== + ! Number elements + ! ==================================== + GridElem(i,j,k)%nbrs(:)=0 + GridElem(i,j,k)%nbrs_wgt(:)=0 + GridElem(i,j,k)%nbrs_ptr(:)=0 + GridElem(i,j,k)%nbrs_wgt_ghost(:)=1 ! always this value + GridElem(i,j,k)%SpaceCurve=0 + GridElem(i,j,k)%number=number + number=number+1 + + end do + end do + end do + + allocate(Mesh(ne,ne)) + if(IsFactorable(ne)) then + call GenspaceCurve(Mesh) + else + ! find the smallest ne2 which is a power of 2 and ne2>ne + ne2 = 2**ceiling(log(real(ne)) / log(2.0_r8)) + if (ne2 < ne) then + call endrun('Fatal SFC error') + end if + + allocate(Mesh2(ne2,ne2)) + allocate(Mesh2_map(ne2,ne2,2)) + allocate(sfcij(0:ne2*ne2,2)) + + call GenspaceCurve(Mesh2) ! SFC partition for ne2 + + ! associate every element on the ne x ne mesh (Mesh) + ! with its closest element on the ne2 x ne2 mesh (Mesh2) + ! Store this as a map from Mesh2 -> Mesh in Mesh2_map. + ! elements in Mesh2 which are not mapped get assigned a value of 0 + Mesh2_map=0 + do j=1,ne + do i=1,ne + ! map this element to an (i2,j2) element + ! [ (i-.5)/ne , (j-.5)/ne ] = [ (i2-.5)/ne2 , (j2-.5)/ne2 ] + i2=nint( ((i-0.5_r8)/ne)*ne2 + 0.5_r8 ) + j2=nint( ((j-0.5_r8)/ne)*ne2 + 0.5_r8 ) + if (i2<1) i2=1 + if (i2>ne2) i2=ne2 + if (j2<1) j2=1 + if (j2>ne2) j2=ne2 + Mesh2_map(i2,j2,1)=i + Mesh2_map(i2,j2,2)=j + enddo + enddo + + ! create a reverse index array for Mesh2 + ! k = Mesh2(i,j) + ! (i,j) = (sfcij(k,1),sfci(k,2)) + do j=1,ne2 + do i=1,ne2 + k=Mesh2(i,j) + sfcij(k,1)=i + sfcij(k,2)=j + enddo + enddo + + ! generate a SFC for Mesh with the same ordering as the + ! elements in Mesh2 which map to Mesh. + sfc_index=0 + do k=0,ne2*ne2-1 + i2=sfcij(k,1) + j2=sfcij(k,2) + i=Mesh2_map(i2,j2,1) + j=Mesh2_map(i2,j2,2) + if (i/=0) then + ! (i2,j2) element maps to (i,j) element + Mesh(i,j)=sfc_index + sfc_index=sfc_index+1 + endif + enddo + + deallocate(Mesh2) + deallocate(Mesh2_map) + deallocate(sfcij) + endif + + ! ------------------------------------------- + ! Setup the space-filling curve for face 1 + ! ------------------------------------------- + offset=0 + do j=1,ne + do i=1,ne + GridElem(i,j,1)%SpaceCurve = offset + Mesh(i,ne-j+1) + enddo + enddo + + ! ------------------------------------------- + ! Setup the space-filling curve for face 2 + ! ------------------------------------------- + offset = offset + ne*ne + do j=1,ne + do i=1,ne + GridElem(i,j,2)%SpaceCurve = offset + Mesh(i,ne-j+1) + enddo + enddo + + ! ------------------------------------------- + ! Setup the space-filling curve for face 6 + ! ------------------------------------------- + offset = offset + ne*ne + do j=1,ne + do i=1,ne + GridElem(i,j,6)%SpaceCurve = offset + Mesh(ne-i+1,ne-j+1) + enddo + enddo + + ! ------------------------------------------- + ! Setup the space-filling curve for face 4 + ! ------------------------------------------- + offset = offset + ne*ne + do j=1,ne + do i=1,ne + GridElem(i,j,4)%SpaceCurve = offset + Mesh(ne-j+1,i) + enddo + enddo + + ! ------------------------------------------- + ! Setup the space-filling curve for face 5 + ! ------------------------------------------- + offset = offset + ne*ne + do j=1,ne + do i=1,ne + GridElem(i,j,5)%SpaceCurve = offset + Mesh(i,j) + enddo + enddo + + + ! ------------------------------------------- + ! Setup the space-filling curve for face 3 + ! ------------------------------------------- + offset = offset + ne*ne + do j=1,ne + do i=1,ne + GridElem(i,j,3)%SpaceCurve = offset + Mesh(i,j) + enddo + enddo + + ! ================== + ! face interiors + ! ================== + do k=1,6 + ! setup SOUTH, WEST, SW neighbors + do j=2,ne + do i=2,ne + nbrs_used(i,j,k,west) = .true. + nbrs_used(i,j,k,south) = .true. + nbrs_used(i,j,k,swest) = .true. + + + GridElem(i,j,k)%nbrs(west) = GridElem(i-1,j,k)%number + GridElem(i,j,k)%nbrs_face(west) = k + GridElem(i,j,k)%nbrs_wgt(west) = EdgeWgtP + GridElem(i,j,k)%nbrs(south) = GridElem(i,j-1,k)%number + GridElem(i,j,k)%nbrs_face(south) = k + GridElem(i,j,k)%nbrs_wgt(south) = EdgeWgtP + GridElem(i,j,k)%nbrs(swest) = GridElem(i-1,j-1,k)%number + GridElem(i,j,k)%nbrs_face(swest) = k + GridElem(i,j,k)%nbrs_wgt(swest) = CornerWgt + end do + end do + + ! setup EAST, NORTH, NE neighbors + do j=1,ne-1 + do i=1,ne-1 + nbrs_used(i,j,k,east) = .true. + nbrs_used(i,j,k,north) = .true. + nbrs_used(i,j,k,neast) = .true. + + GridElem(i,j,k)%nbrs(east) = GridElem(i+1,j,k)%number + GridElem(i,j,k)%nbrs_face(east) = k + GridElem(i,j,k)%nbrs_wgt(east) = EdgeWgtP + GridElem(i,j,k)%nbrs(north) = GridElem(i,j+1,k)%number + GridElem(i,j,k)%nbrs_face(north) = k + GridElem(i,j,k)%nbrs_wgt(north) = EdgeWgtP + GridElem(i,j,k)%nbrs(neast) = GridElem(i+1,j+1,k)%number + GridElem(i,j,k)%nbrs_face(neast) = k + GridElem(i,j,k)%nbrs_wgt(neast) = CornerWgt + end do + end do + + ! Setup the remaining SOUTH, EAST, and SE neighbors + do j=2,ne + do i=1,ne-1 + nbrs_used(i,j,k,south) = .true. + nbrs_used(i,j,k,east) = .true. + nbrs_used(i,j,k,seast) = .true. + + + + GridElem(i,j,k)%nbrs(south) = GridElem(i,j-1,k)%number + GridElem(i,j,k)%nbrs_face(south) = k + GridElem(i,j,k)%nbrs_wgt(south) = EdgeWgtP + GridElem(i,j,k)%nbrs(east) = GridElem(i+1,j,k)%number + GridElem(i,j,k)%nbrs_face(east) = k + GridElem(i,j,k)%nbrs_wgt(east) = EdgeWgtP + GridElem(i,j,k)%nbrs(seast) = GridElem(i+1,j-1,k)%number + GridElem(i,j,k)%nbrs_face(seast) = k + GridElem(i,j,k)%nbrs_wgt(seast) = CornerWgt + enddo + enddo + + ! Setup the remaining NORTH, WEST, and NW neighbors + do j=1,ne-1 + do i=2,ne + nbrs_used(i,j,k,north) = .true. + nbrs_used(i,j,k,west) = .true. + nbrs_used(i,j,k,nwest) = .true. + + + + GridElem(i,j,k)%nbrs(north) = GridElem(i,j+1,k)%number + GridElem(i,j,k)%nbrs_face(north) = k + GridElem(i,j,k)%nbrs_wgt(north) = EdgeWgtP + GridElem(i,j,k)%nbrs(west) = GridElem(i-1,j,k)%number + GridElem(i,j,k)%nbrs_face(west) = k + GridElem(i,j,k)%nbrs_wgt(west) = EdgeWgtP + GridElem(i,j,k)%nbrs(nwest) = GridElem(i-1,j+1,k)%number + GridElem(i,j,k)%nbrs_face(nwest) = k + GridElem(i,j,k)%nbrs_wgt(nwest) = CornerWgt + enddo + enddo + end do + + ! ====================== + ! west/east "belt" edges + ! ====================== + + do k=1,4 + do j=1,ne + nbrs_used(1,j,k,west) = .true. + nbrs_used(ne,j,k,east) = .true. + + + GridElem(1 ,j,k)%nbrs(west) = GridElem(ne,j,MODULO(2+k,4)+1)%number + GridElem(1 ,j,k)%nbrs_face(west) = MODULO(2+k,4)+1 + GridElem(1 ,j,k)%nbrs_wgt(west) = EdgeWgtP + GridElem(ne,j,k)%nbrs(east) = GridElem(1 ,j,MODULO(k ,4)+1)%number + GridElem(ne,j,k)%nbrs_face(east) = MODULO(k ,4)+1 + GridElem(ne,j,k)%nbrs_wgt(east) = EdgeWgtP + + ! Special rules for corner 'edges' + if( j /= 1) then + nbrs_used(1,j,k,swest) = .true. + nbrs_used(ne,j,k,seast) = .true. + + + GridElem(1 ,j,k)%nbrs(swest) = GridElem(ne,j-1,MODULO(2+k,4)+1)%number + GridElem(1 ,j,k)%nbrs_face(swest) = MODULO(2+k,4)+1 + GridElem(1 ,j,k)%nbrs_wgt(swest) = CornerWgt + GridElem(ne,j,k)%nbrs(seast) = GridElem(1 ,j-1,MODULO(k ,4)+1)%number + GridElem(ne,j,k)%nbrs_face(seast) = MODULO(k ,4)+1 + GridElem(ne,j,k)%nbrs_wgt(seast) = CornerWgt + endif + if( j /= ne) then + nbrs_used(1,j,k,nwest) = .true. + nbrs_used(ne,j,k,neast) = .true. + + + GridElem(1 ,j,k)%nbrs(nwest) = GridElem(ne,j+1,MODULO(2+k,4)+1)%number + GridElem(1 ,j,k)%nbrs_face(nwest) = MODULO(2+k,4)+1 + GridElem(1 ,j,k)%nbrs_wgt(nwest) = CornerWgt + GridElem(ne,j,k)%nbrs(neast) = GridElem(1 ,j+1,MODULO(k ,4)+1)%number + GridElem(ne,j,k)%nbrs_face(neast) = MODULO(k ,4)+1 + GridElem(ne,j,k)%nbrs_wgt(neast) = CornerWgt + endif + end do + end do + + + ! ================================== + ! south edge of 1 / north edge of 5 + ! ================================== + + do i=1,ne + nbrs_used(i,1,1,south) = .true. + nbrs_used(i,ne,5,north) = .true. + + GridElem(i,1 ,1)%nbrs(south) = GridElem(i,ne,5)%number + GridElem(i,1 ,1)%nbrs_face(south) = 5 + GridElem(i,1 ,1)%nbrs_wgt(south) = EdgeWgtP + GridElem(i,ne,5)%nbrs(north) = GridElem(i,1 ,1)%number + GridElem(i,ne,5)%nbrs_face(north) = 1 + GridElem(i,ne,5)%nbrs_wgt(north) = EdgeWgtP + + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,1,1,swest) = .true. + nbrs_used(i,ne,5,nwest) = .true. + + GridElem(i,1 ,1)%nbrs(swest) = GridElem(i-1,ne,5)%number + GridElem(i,1 ,1)%nbrs_face(swest) = 5 + GridElem(i,1 ,1)%nbrs_wgt(swest) = CornerWgt + GridElem(i,ne,5)%nbrs(nwest) = GridElem(i-1,1 ,1)%number + GridElem(i,ne,5)%nbrs_face(nwest) = 1 + GridElem(i,ne,5)%nbrs_wgt(nwest) = CornerWgt + endif + if( i /= ne) then + nbrs_used(i,1,1,seast) = .true. + nbrs_used(i,ne,5,neast) = .true. + + GridElem(i,1 ,1)%nbrs(seast) = GridElem(i+1,ne,5)%number + GridElem(i,1 ,1)%nbrs_face(seast) = 5 + GridElem(i,1 ,1)%nbrs_wgt(seast) = CornerWgt + GridElem(i,ne,5)%nbrs(neast) = GridElem(i+1,1 ,1)%number + GridElem(i,ne,5)%nbrs_face(neast) = 1 + GridElem(i,ne,5)%nbrs_wgt(neast) = CornerWgt + endif + + end do + + ! ================================== + ! south edge of 2 / east edge of 5 + ! ================================== + + do i=1,ne + irev=ne+1-i + nbrs_used(i,1,2,south) = .true. + nbrs_used(ne,i,5,east) = .true. + + + GridElem(i,1 ,2)%nbrs(south) = GridElem(ne,irev,5)%number + GridElem(i,1 ,2)%nbrs_face(south) = 5 + GridElem(i,1 ,2)%nbrs_wgt(south) = EdgeWgtP + GridElem(ne,i,5)%nbrs(east) = GridElem(irev,1 ,2)%number + GridElem(ne,i,5)%nbrs_face(east) = 2 + GridElem(ne,i,5)%nbrs_wgt(east) = EdgeWgtP + + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,1,2,swest) = .true. + nbrs_used(ne,i,5,seast) = .true. + + + GridElem(i,1 ,2)%nbrs(swest) = GridElem(ne,irev+1,5)%number + GridElem(i,1 ,2)%nbrs_face(swest) = 5 + GridElem(i,1 ,2)%nbrs_wgt(swest) = CornerWgt + GridElem(ne,i,5)%nbrs(seast) = GridElem(irev+1,1 ,2)%number + GridElem(ne,i,5)%nbrs_face(seast) = 2 + GridElem(ne,i,5)%nbrs_wgt(seast) = CornerWgt + endif + if(i /= ne) then + nbrs_used(i,1,2,seast) = .true. + nbrs_used(ne,i,5,neast) = .true. + + + GridElem(i,1 ,2)%nbrs(seast) = GridElem(ne,irev-1,5)%number + GridElem(i,1 ,2)%nbrs_face(seast) = 5 + GridElem(i,1 ,2)%nbrs_wgt(seast) = CornerWgt + GridElem(ne,i,5)%nbrs(neast) = GridElem(irev-1,1 ,2)%number + GridElem(ne,i,5)%nbrs_face(neast) = 2 + GridElem(ne,i,5)%nbrs_wgt(neast) = CornerWgt + endif + enddo + ! ================================== + ! south edge of 3 / south edge of 5 + ! ================================== + + do i=1,ne + irev=ne+1-i + nbrs_used(i,1,3,south) = .true. + nbrs_used(i,1,5,south) = .true. + + GridElem(i,1,3)%nbrs(south) = GridElem(irev,1,5)%number + GridElem(i,1,3)%nbrs_face(south) = 5 + GridElem(i,1,3)%nbrs_wgt(south) = EdgeWgtP + GridElem(i,1,5)%nbrs(south) = GridElem(irev,1,3)%number + GridElem(i,1,5)%nbrs_face(south) = 3 + GridElem(i,1,5)%nbrs_wgt(south) = EdgeWgtP + + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,1,3,swest) = .true. + nbrs_used(i,1,5,swest) = .true. + + + GridElem(i,1,3)%nbrs(swest) = GridElem(irev+1,1,5)%number + GridElem(i,1,3)%nbrs_face(swest) = 5 + GridElem(i,1,3)%nbrs_wgt(swest) = CornerWgt + GridElem(i,1,5)%nbrs(swest) = GridElem(irev+1,1,3)%number + GridElem(i,1,5)%nbrs_face(swest) = 3 + GridElem(i,1,5)%nbrs_wgt(swest) = CornerWgt + endif + if(i /= ne) then + nbrs_used(i,1,3,seast) = .true. + nbrs_used(i,1,5,seast) = .true. + + GridElem(i,1,3)%nbrs(seast) = GridElem(irev-1,1,5)%number + GridElem(i,1,3)%nbrs_face(seast) = 5 + GridElem(i,1,3)%nbrs_wgt(seast) = CornerWgt + GridElem(i,1,5)%nbrs(seast) = GridElem(irev-1,1,3)%number + GridElem(i,1,5)%nbrs_face(seast) = 3 + GridElem(i,1,5)%nbrs_wgt(seast) = CornerWgt + endif + end do + + ! ================================== + ! south edge of 4 / west edge of 5 + ! ================================== + + do i=1,ne + irev=ne+1-i + nbrs_used(i,1,4,south) = .true. + nbrs_used(1,i,5,west) = .true. + + GridElem(i,1,4)%nbrs(south) = GridElem(1,i,5)%number + GridElem(i,1,4)%nbrs_face(south) = 5 + GridElem(i,1,4)%nbrs_wgt(south) = EdgeWgtP + GridElem(1,i,5)%nbrs(west) = GridElem(i,1,4)%number + GridElem(1,i,5)%nbrs_face(west) = 4 + GridElem(1,i,5)%nbrs_wgt(west) = EdgeWgtP + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,1,4,swest) = .true. + nbrs_used(1,i,5,swest) = .true. + + GridElem(i,1,4)%nbrs(swest) = GridElem(1,i-1,5)%number + GridElem(i,1,4)%nbrs_face(swest) = 5 + GridElem(i,1,4)%nbrs_wgt(swest) = CornerWgt + GridElem(1,i,5)%nbrs(swest) = GridElem(i-1,1,4)%number + GridElem(1,i,5)%nbrs_face(swest) = 4 + GridElem(1,i,5)%nbrs_wgt(swest) = CornerWgt + endif + if( i /= ne) then + nbrs_used(i,1,4,seast) = .true. + nbrs_used(1,i,5,nwest) = .true. + + GridElem(i,1,4)%nbrs(seast) = GridElem(1,i+1,5)%number + GridElem(i,1,4)%nbrs_face(seast) = 5 + GridElem(i,1,4)%nbrs_wgt(seast) = CornerWgt + GridElem(1,i,5)%nbrs(nwest) = GridElem(i+1,1,4)%number + GridElem(1,i,5)%nbrs_face(nwest) = 4 + GridElem(1,i,5)%nbrs_wgt(nwest) = CornerWgt + endif + end do + + ! ================================== + ! north edge of 1 / south edge of 6 + ! ================================== + + do i=1,ne + nbrs_used(i,ne,1,north) = .true. + nbrs_used(i,1,6,south) = .true. + + + GridElem(i,ne,1)%nbrs(north) = GridElem(i,1 ,6)%number + GridElem(i,ne,1)%nbrs_face(north) = 6 + GridElem(i,ne,1)%nbrs_wgt(north) = EdgeWgtP + GridElem(i,1 ,6)%nbrs(south) = GridElem(i,ne,1)%number + GridElem(i,1 ,6)%nbrs_face(south) = 1 + GridElem(i,1 ,6)%nbrs_wgt(south) = EdgeWgtP + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,ne,1,nwest) = .true. + nbrs_used(i,1,6,swest) = .true. + + GridElem(i,ne,1)%nbrs(nwest) = GridElem(i-1,1 ,6)%number + GridElem(i,ne,1)%nbrs_face(nwest) = 6 + GridElem(i,ne,1)%nbrs_wgt(nwest) = CornerWgt + GridElem(i,1 ,6)%nbrs(swest) = GridElem(i-1,ne,1)%number + GridElem(i,1 ,6)%nbrs_face(swest) = 1 + GridElem(i,1 ,6)%nbrs_wgt(swest) = CornerWgt + endif + if( i /= ne) then + nbrs_used(i,ne,1,neast) = .true. + nbrs_used(i,1,6,seast) = .true. + + + GridElem(i,ne,1)%nbrs(neast) = GridElem(i+1,1 ,6)%number + GridElem(i,ne,1)%nbrs_face(neast) = 6 + GridElem(i,ne,1)%nbrs_wgt(neast) = CornerWgt + GridElem(i,1 ,6)%nbrs(seast) = GridElem(i+1,ne,1)%number + GridElem(i,1 ,6)%nbrs_face(seast) = 1 + GridElem(i,1 ,6)%nbrs_wgt(seast) = CornerWgt + endif + end do + + ! ================================== + ! north edge of 2 / east edge of 6 + ! ================================== + + do i=1,ne + nbrs_used(i,ne,2,north) = .true. + nbrs_used(ne,i,6,east ) = .true. + + GridElem(i,ne,2)%nbrs(north) = GridElem(ne,i,6)%number + GridElem(i,ne,2)%nbrs_face(north) = 6 + GridElem(i,ne,2)%nbrs_wgt(north) = EdgeWgtP + GridElem(ne,i,6)%nbrs(east) = GridElem(i,ne,2)%number + GridElem(ne,i,6)%nbrs_face(east) = 2 + GridElem(ne,i,6)%nbrs_wgt(east) = EdgeWgtP + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,ne,2,nwest) = .true. + nbrs_used(ne,i,6,seast) = .true. + + GridElem(i,ne,2)%nbrs(nwest) = GridElem(ne,i-1,6)%number + GridElem(i,ne,2)%nbrs_face(nwest) = 6 + GridElem(i,ne,2)%nbrs_wgt(nwest) = CornerWgt + GridElem(ne,i,6)%nbrs(seast) = GridElem(i-1,ne,2)%number + GridElem(ne,i,6)%nbrs_face(seast) = 2 + GridElem(ne,i,6)%nbrs_wgt(seast) = CornerWgt + endif + if( i /= ne) then + nbrs_used(i,ne,2,neast) = .true. + nbrs_used(ne,i,6,neast) = .true. + + + GridElem(i,ne,2)%nbrs(neast) = GridElem(ne,i+1,6)%number + GridElem(i,ne,2)%nbrs_face(neast) = 6 + GridElem(i,ne,2)%nbrs_wgt(neast) = CornerWgt + GridElem(ne,i,6)%nbrs(neast) = GridElem(i+1,ne,2)%number + GridElem(ne,i,6)%nbrs_face(neast) = 2 + GridElem(ne,i,6)%nbrs_wgt(neast) = CornerWgt + endif + end do + + ! =================================== + ! north edge of 3 / north edge of 6 + ! =================================== + + do i=1,ne + irev=ne+1-i + nbrs_used(i,ne,3,north) = .true. + nbrs_used(i,ne,6,north) = .true. + + GridElem(i,ne,3)%nbrs(north) = GridElem(irev,ne,6)%number + GridElem(i,ne,3)%nbrs_face(north) = 6 + GridElem(i,ne,3)%nbrs_wgt(north) = EdgeWgtP + GridElem(i,ne,6)%nbrs(north) = GridElem(irev,ne,3)%number + GridElem(i,ne,6)%nbrs_face(north) = 3 + GridElem(i,ne,6)%nbrs_wgt(north) = EdgeWgtP + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,ne,3,nwest) = .true. + nbrs_used(i,ne,6,nwest) = .true. + + GridElem(i,ne,3)%nbrs(nwest) = GridElem(irev+1,ne,6)%number + GridElem(i,ne,3)%nbrs_face(nwest) = 6 + GridElem(i,ne,3)%nbrs_wgt(nwest) = CornerWgt + GridElem(i,ne,6)%nbrs(nwest) = GridElem(irev+1,ne,3)%number + GridElem(i,ne,6)%nbrs_face(nwest) = 3 + GridElem(i,ne,6)%nbrs_wgt(nwest) = CornerWgt + endif + if( i /= ne) then + nbrs_used(i,ne,3,neast) = .true. + nbrs_used(i,ne,6,neast) = .true. + + GridElem(i,ne,3)%nbrs(neast) = GridElem(irev-1,ne,6)%number + GridElem(i,ne,3)%nbrs_face(neast) = 6 + GridElem(i,ne,3)%nbrs_wgt(neast) = CornerWgt + GridElem(i,ne,6)%nbrs(neast) = GridElem(irev-1,ne,3)%number + GridElem(i,ne,6)%nbrs_face(neast) = 3 + GridElem(i,ne,6)%nbrs_wgt(neast) = CornerWgt + endif + end do + + ! =================================== + ! north edge of 4 / west edge of 6 + ! =================================== + + do i=1,ne + irev=ne+1-i + nbrs_used(i,ne,4,north) = .true. + nbrs_used(1,i,6,west) = .true. + + GridElem(i,ne,4)%nbrs(north) = GridElem(1,irev,6)%number + GridElem(i,ne,4)%nbrs_face(north) = 6 + GridElem(i,ne,4)%nbrs_wgt(north) = EdgeWgtP + GridElem(1,i,6)%nbrs(west) = GridElem(irev,ne,4)%number + GridElem(1,i,6)%nbrs_face(west) = 4 + GridElem(1,i,6)%nbrs_wgt(west) = EdgeWgtP + ! Special rules for corner 'edges' + if( i /= 1) then + nbrs_used(i,ne,4,nwest) = .true. + nbrs_used(1,i,6,swest) = .true. + + GridElem(i,ne,4)%nbrs(nwest) = GridElem(1,irev+1,6)%number + GridElem(i,ne,4)%nbrs_face(nwest) = 6 + GridElem(i,ne,4)%nbrs_wgt(nwest) = CornerWgt + GridElem(1,i,6)%nbrs(swest) = GridElem(irev+1,ne,4)%number + GridElem(1,i,6)%nbrs_face(swest) = 4 + GridElem(1,i,6)%nbrs_wgt(swest) = CornerWgt + endif + if( i /= ne) then + nbrs_used(i,ne,4,neast) = .true. + nbrs_used(1,i,6,nwest) = .true. + + GridElem(i,ne,4)%nbrs(neast) = GridElem(1,irev-1,6)%number + GridElem(i,ne,4)%nbrs_face(neast) = 6 + GridElem(i,ne,4)%nbrs_wgt(neast) = CornerWgt + GridElem(1,i,6)%nbrs(nwest) = GridElem(irev-1,ne,4)%number + GridElem(1,i,6)%nbrs_face(nwest) = 4 + GridElem(1,i,6)%nbrs_wgt(nwest) = CornerWgt + endif + end do + + + ielem = 1 ! Element counter + do k=1,6 + do j=1,ne + do i=1,ne + GridVertex(ielem)%nbrs_ptr(1) = 1 + do ll=1,8 + loc = GridVertex(ielem)%nbrs_ptr(ll) + if (nbrs_used(i,j,k,ll)) then + GridVertex(ielem)%nbrs(loc) = GridElem(i,j,k)%nbrs(ll) + GridVertex(ielem)%nbrs_face(loc) = GridElem(i,j,k)%nbrs_face(ll) + GridVertex(ielem)%nbrs_wgt(loc) = GridElem(i,j,k)%nbrs_wgt(ll) + GridVertex(ielem)%nbrs_wgt_ghost(loc) = GridElem(i,j,k)%nbrs_wgt_ghost(ll) + + GridVertex(ielem)%nbrs_ptr(ll+1) = GridVertex(ielem)%nbrs_ptr(ll)+1 + else + GridVertex(ielem)%nbrs_ptr(ll+1) = GridVertex(ielem)%nbrs_ptr(ll) + end if + end do + GridVertex(ielem)%number = GridElem(i,j,k)%number + GridVertex(ielem)%processor_number = 0 + GridVertex(ielem)%SpaceCurve = GridElem(i,j,k)%SpaceCurve + ielem=ielem+1 + end do + end do + end do + + DEALLOCATE(Mesh) + do k = 1, nfaces + do j = 1, ne + do i = 1, ne + call deallocate_gridvertex_nbrs(GridElem(i,j,k)) + end do + end do + end do + DEALLOCATE(GridElem) + DEALLOCATE(nbrs_used) + + ! ======================================= + ! Generate cube graph... + ! ======================================= + + ! ============================================ + ! Setup the Grid edges (topology independent) + ! ============================================ + call initgridedge(GridEdge,GridVertex) + + ! ============================================ + ! Setup the Grid edge Indirect addresses + ! (topology dependent) + ! ============================================ + nedge = SIZE(GridEdge) + do i=1,nedge + call CubeSetupEdgeIndex(GridEdge(i)) + enddo + + end subroutine CubeTopology + + ! =================================================================== + ! CubeEdgeCount: + ! + ! Determine the number of Grid Edges + ! + ! =================================================================== + + function CubeEdgeCount() result(nedge) + use dimensions_mod, only : ne + implicit none + integer :: nedge + + if (0==ne) call endrun('Error in CubeEdgeCount: ne is zero') + nedge = nfaces*(ne*ne*nInnerElemEdge - nCornerElemEdge) + + end function CubeEdgeCount + + ! =================================================================== + ! CubeElemCount: + ! + ! Determine the number of Grid Elem + ! + ! =================================================================== + + function CubeElemCount() result(nelem) + + use dimensions_mod, only : ne + + implicit none + integer :: nelem + if (0==ne) call endrun('Error in CubeElemCount: ne is zero') + + nelem = nfaces*ne*ne + end function CubeElemCount + + subroutine CubeSetupEdgeIndex(Edge) + use gridgraph_mod, only : gridedge_t + use dimensions_mod, only : np + use control_mod, only : north, south, east, west, neast, seast, swest, nwest + type (GridEdge_t),target :: Edge + + integer :: np0,sFace,dFace + logical :: reverse + integer,allocatable :: forwardV(:), forwardP(:) + integer,allocatable :: backwardV(:), backwardP(:) + + sFace = Edge%tail_face + dFace = Edge%head_face + ! Do not reverse the indices + reverse=.FALSE. + + ! Under special conditions use index reversal + if( (SFace == south .AND. dFace == east) & + .OR. (sFace == east .AND. dFace == south) & + .OR. (sFace == north .AND. dFace == west) & + .OR. (sFace == west .AND. dFace == north) & + .OR. (sFace == south .AND. dFace == south) & + .OR. (sFace == north .AND. dFace == north) & + .OR. (sFace == east .AND. dFace == east ) & + .OR. (sFace == west .AND. dFace == west ) ) then + reverse=.TRUE. + Edge%reverse=.TRUE. + endif + + + end subroutine CubeSetupEdgeIndex + +! +! HOMME mapping from sphere (or other manifold) to reference element +! one should be able to add any mapping here. For each new map, +! an associated dmap() routine (which computes the map derivative matrix) +! must also be written +! Note that for conservation, the parameterization of element edges must be +! identical for adjacent elements. (this is violated with HOMME's default +! equi-angular cubed-sphere mapping for non-cubed sphere grids, hence the +! need for a new map) +! + function ref2sphere(a,b, corners3D, ref_map, corners, facenum) result(sphere) + real(kind=r8) :: a,b + type (spherical_polar_t) :: sphere + type (cartesian3d_t) :: corners3D(4) + integer :: ref_map + ! only needed for gnominic maps + type (cartesian2d_t), optional :: corners(4) + integer, optional :: facenum + + + if (ref_map==0) then + if (.not. present(corners) ) & + call endrun('ref2sphere(): missing arguments for equiangular map') + sphere = ref2sphere_equiangular(a,b,corners,facenum) + elseif (ref_map==1) then + call endrun('gnomonic map not yet coded') + elseif (ref_map==2) then + sphere = ref2sphere_elementlocal(a,b,corners3D) + else + call endrun('ref2sphere(): bad value of ref_map') + endif + end function ref2sphere + +! +! map a point in the referece element to the sphere +! + function ref2sphere_equiangular(a,b, corners, face_no) result(sphere) + implicit none + real(kind=r8) :: a,b + integer,intent(in) :: face_no + type (spherical_polar_t) :: sphere + type (cartesian2d_t) :: corners(4) + ! local + real(kind=r8) :: pi,pj,qi,qj + type (cartesian2d_t) :: cart + + ! map (a,b) to the [-pi/2,pi/2] equi angular cube face: x1,x2 + ! a = gp%points(i) + ! b = gp%points(j) + pi = (1-a)/2 + pj = (1-b)/2 + qi = (1+a)/2 + qj = (1+b)/2 + cart%x = pi*pj*corners(1)%x & + + qi*pj*corners(2)%x & + + qi*qj*corners(3)%x & + + pi*qj*corners(4)%x + cart%y = pi*pj*corners(1)%y & + + qi*pj*corners(2)%y & + + qi*qj*corners(3)%y & + + pi*qj*corners(4)%y + ! map from [pi/2,pi/2] equ angular cube face to sphere: + sphere=projectpoint(cart,face_no) + + end function ref2sphere_equiangular + +!----------------------------------------------------------------------------------------- +! ELEMENT LOCAL MAP (DOES NOT USE CUBE FACES) +! unlike gnomonic equiangular map, this map will map all straight lines to +! great circle arcs +! +! map a point in the referece element to the quad on the sphere by a +! general map, without using faces the map works this way: first, fix +! a coordinate (say, X). Map 4 corners of the ref element (corners are +! (-1,-1),(-1,1),(1,1), and (1,-1)) into 4 X-components of the quad in +! physical space via a bilinear map. Do so for Y and Z components as +! well. It produces a map: Ref element (\xi, \eta) ---> A quad in XYZ +! (ess, a piece of a twisted plane) with vertices of our target quad. though +! the quad lies in a plane and not on the sphere manifold, its +! vertices belong to the sphere (by initial conditions). The last step +! is to utilize a map (X,Y,X) --> (X,Y,Z)/SQRT(X**2+Y**2+Z**2) to +! project the quad to the unit sphere. +! ----------------------------------------------------------------------------------------- + function ref2sphere_elementlocal(a,b, corners3D) result(sphere) + use element_mod, only : element_t + implicit none + real(kind=r8) :: a,b + type (cartesian3d_t) :: corners3D(4) + type (spherical_polar_t) :: sphere + real(kind=r8) :: q(4) ! local + + q(1)=(1-a)*(1-b); q(2)=(1+a)*(1-b); q(3)=(1+a)*(1+b); q(4)=(1-a)*(1+b); + q=q/4.0_r8; + sphere = ref2sphere_elementlocal_q(q,corners3D) + end function ref2sphere_elementlocal + + function ref2sphere_elementlocal_q(q, corners) result(sphere) + implicit none + real(kind=r8) :: q(4) + type (spherical_polar_t) :: sphere + type (cartesian3d_t) :: corners(4) + ! local + type (cartesian3d_t) :: cart + real(kind=r8) :: c(3,4), xx(3), r + integer :: i + +!3D corners fo the quad + c(1,1)=corners(1)%x; c(2,1)=corners(1)%y; c(3,1)=corners(1)%z; + c(1,2)=corners(2)%x; c(2,2)=corners(2)%y; c(3,2)=corners(2)%z; + c(1,3)=corners(3)%x; c(2,3)=corners(3)%y; c(3,3)=corners(3)%z; + c(1,4)=corners(4)%x; c(2,4)=corners(4)%y; c(3,4)=corners(4)%z; + +!physical point on a plane (sliced), not yet on the sphere + do i=1,3 + xx(i)=sum(c(i,:)*q(:)) + end do + +!distance from the plane point to the origin + r = sqrt(xx(1)**2+xx(2)**2+xx(3)**2) + +!projecting the plane point to the sphere + cart%x=xx(1)/r; cart%y=xx(2)/r; cart%z=xx(3)/r; + +!XYZ coords of the point to lon/lat + sphere=change_coordinates(cart) + + end function ref2sphere_elementlocal_q + +end module cube_mod diff --git a/src/dynamics/se/dycore/derivative_mod.F90 b/src/dynamics/se/dycore/derivative_mod.F90 new file mode 100644 index 00000000..682afeae --- /dev/null +++ b/src/dynamics/se/dycore/derivative_mod.F90 @@ -0,0 +1,2525 @@ +module derivative_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_abortutils, only: endrun + use dimensions_mod, only : np, nc, npdg, nelemd, nlev + use quadrature_mod, only : quadrature_t, gauss, gausslobatto,legendre, jacobi + ! needed for spherical differential operators: + use physconst, only: ra + use element_mod, only : element_t + use control_mod, only : hypervis_scaling, hypervis_power + use perf_mod, only : t_startf, t_stopf + +implicit none +private + + type, public :: derivative_t + real (kind=r8) :: Dvv(np,np) + real (kind=r8) :: Dvv_diag(np,np) + real (kind=r8) :: Dvv_twt(np,np) + real (kind=r8) :: Mvv_twt(np,np) ! diagonal matrix of GLL weights + real (kind=r8) :: Mfvm(np,nc+1) + real (kind=r8) :: Cfvm(np,nc) + real (kind=r8) :: legdg(np,np) + end type derivative_t + + type, public :: derivative_stag_t + real (kind=r8) :: D(np,np) + real (kind=r8) :: M(np,np) + real (kind=r8) :: Dpv(np,np) + real (kind=r8) :: D_twt(np,np) + real (kind=r8) :: M_twt(np,np) + real (kind=r8) :: M_t(np,np) + end type derivative_stag_t + + real (kind=r8), allocatable :: integration_matrix(:,:) + real (kind=r8), allocatable :: integration_matrix_physgrid(:,:) + real (kind=r8), allocatable :: boundary_interp_matrix(:,:,:) + +! ====================================== +! Public Interfaces +! ====================================== + + public :: subcell_integration + public :: subcell_dss_fluxes + public :: subcell_div_fluxes + public :: subcell_Laplace_fluxes + public :: allocate_subcell_integration_matrix_cslam !for consistent se-cslam algorithm + public :: allocate_subcell_integration_matrix_physgrid !for integration se basis functions over physgrid control volumes + + public :: derivinit + + public :: gradient + public :: gradient_wk + public :: vorticity + public :: divergence + + public :: interpolate_gll2fvm_corners + public :: interpolate_gll2fvm_points + public :: remap_phys2gll + + + interface divergence + module procedure divergence_nonstag + module procedure divergence_stag + end interface + + interface gradient + module procedure gradient_str_nonstag + module procedure gradient_str_stag + end interface + + interface gradient_wk + module procedure gradient_wk_nonstag + module procedure gradient_wk_stag + end interface + + public :: v2pinit + + private :: dmatinit + private :: dvvinit + private :: dpvinit + +! these routines compute spherical differential operators as opposed to +! the gnomonic coordinate operators above. Vectors (input or output) +! are always expressed in lat-lon coordinates +! +! note that weak derivatives (integrated by parts form) can be defined using +! contra or co-variant test functions, so +! + public :: gradient_sphere + public :: gradient_sphere_wk_testcov + public :: gradient_sphere_wk_testcontra ! only used for debugging + public :: ugradv_sphere + public :: vorticity_sphere + public :: vorticity_sphere_diag + public :: divergence_sphere + public :: curl_sphere + public :: curl_sphere_wk_testcov + public :: divergence_sphere_wk + public :: laplace_sphere_wk + public :: vlaplace_sphere_wk + public :: vlaplace_sphere_wk_mol + public :: element_boundary_integral + public :: edge_flux_u_cg + public :: gll_to_dgmodal + public :: dgmodal_to_gll + + public :: limiter_optim_iter_full + +contains + +! ========================================== +! derivinit: +! +! Initialize the matrices for taking +! derivatives and interpolating +! ========================================== + + subroutine derivinit(deriv,fvm_corners, fvm_points) + type (derivative_t) :: deriv + real (kind=r8),optional :: fvm_corners(nc+1) + real (kind=r8),optional :: fvm_points(nc) + + ! Local variables + type (quadrature_t) :: gp ! Quadrature points and weights on pressure grid + + real (kind=r8) :: dmat(np,np) + real (kind=r8) :: dpv(np,np) + real (kind=r8) :: v2p(np,np) + real (kind=r8) :: p2v(np,np) + real (kind=r8) :: dvv(np,np) + real (kind=r8) :: dvv_diag(np,np) + real (kind=r8) :: v2v(np,np) + real (kind=r8) :: xnorm + integer i,j + + ! ============================================ + ! initialize matrices in r8 precision + ! and transfer results into r8 + ! floating point precision + ! ============================================ + + gp=gausslobatto(np) + + ! Legendre polynomials of degree npdg-1, on the np GLL grid: + if (npdg>np) call endrun( 'FATAL ERROR: npdg>np') + if (npdg>0 .and. npdgp +! derivative matrix (dmat) +! ======================================= + + subroutine dmatinit(dmat) + + real (kind=r8) :: dmat(np,np) + + ! Local variables + + type (quadrature_t) :: gll + type (quadrature_t) :: gs + + integer i,j + real(kind=r8) fact,f1,f2 + real(kind=r8) func0,func1 + real(kind=r8) dis,c0,c1 + + real(kind=r8) :: leg(np,np) + real(kind=r8) :: jac(0:np-1) + real(kind=r8) :: djac(0:np-1) + + c0 = 0.0_r8 + c1 = 1.0_r8 + + gll= gausslobatto(np) + gs = gauss(np) + + ! ============================================================= + ! Compute Legendre polynomials on Gauss-Lobatto grid (velocity) + ! ============================================================= + + do i=1,np + leg(:,i) = legendre(gll%points(i),np-1) + end do + + ! ================================================================ + ! Derivatives of velocity cardinal functions on pressure grid + ! d(i,j) = D(j,i) = D' (D-transpose) since D(i,j) = dh_j(x_i)/dx + ! ================================================================ + + fact = np*(np-1) + + do j=1,np + call jacobi(np-1,gs%points(j),c0,c0,jac(0:np-1),djac(0:np-1)) + func0 = jac(np-1) + func1 = djac(np-1) + f1 = fact*func0 + f2 = (c1 - gs%points(j))*(c1 + gs%points(j)) * func1 + do i = 1, np + if ( gs%points(j) /= gll%points(i) ) then + dis = gs%points(j) - gll%points(i) + dmat(i,j) = func0 / ( leg(np,i)*dis ) + f2 / (fact*leg(np,i)*dis*dis) + else + dmat(i,j) = c0 + endif + end do + end do + + deallocate(gll%points) + deallocate(gll%weights) + + deallocate(gs%points) + deallocate(gs%weights) + +end subroutine dmatinit + +! ======================================= +! dpvinit: +! +! Compute rectangular p->v +! derivative matrix (dmat) +! for strong gradients +! ======================================= + +subroutine dpvinit(dmat) + +real (kind=r8) :: dmat(np,np) + +! Local variables + +type (quadrature_t) :: gll +type (quadrature_t) :: gs + +integer i,j +real(kind=r8) dis,c0,c1 + +real(kind=r8) :: legv(0:np,np) +real(kind=r8) :: dlegv(0:np,np) + +real(kind=r8) :: leg(0:np) +real(kind=r8) :: dleg(0:np) + +c0 = 0.0_r8 +c1 = 1.0_r8 + +gll= gausslobatto(np) +gs = gauss(np) + +! ============================================================= +! Compute Legendre polynomials on Gauss-Lobatto grid (velocity) +! ============================================================= + +do i=1,np +call jacobi(np,gll%points(i),c0,c0,legv(0:np,i),dlegv(0:np,i)) +end do + +! ================================================================ +! Derivatives of velocity cardinal functions on pressure grid +! d(i,j) = D(j,i) = D' (D-transpose) since D(i,j) = dh_j(x_i)/dx +! ================================================================ + + do j=1,np + call jacobi(np,gs%points(j),c0,c0,leg(0:np),dleg(0:np)) + do i = 1, np + if ( gs%points(j) /= gll%points(i) ) then + dis = gll%points(i) - gs%points(j) + dmat(j,i) = dlegv(np,i)/( dleg(np)*dis ) - legv(np,i)/ (dleg(np)*dis*dis) + else + dmat(j,i) = c0 + endif + end do + end do + + deallocate(gll%points) + deallocate(gll%weights) + + deallocate(gs%points) + deallocate(gs%weights) + + end subroutine dpvinit + +! ======================================= +! v2pinit: +! Compute interpolation matrix from gll(1:n1) -> gs(1:n2) +! ======================================= + subroutine v2pinit(v2p,gll,gs,n1,n2) + integer :: n1,n2 + real(kind=r8) :: v2p(n1,n2) + real(kind=r8) :: v2p_new(n1,n2) + real(kind=r8) :: gll(n1),gs(n2) + ! Local variables + + integer i,j,k,m,l + real(kind=r8) fact,f1, sum + real(kind=r8) func0,func1 + + real(kind=r8) :: leg(n1,n1) + real(kind=r8) :: jac(0:n1-1) + real(kind=r8) :: djac(0:n1-1) + real(kind=r8) :: c0,c1 + + type(quadrature_t) :: gll_pts + real(kind=r8) :: leg_out(n1,n2) + real(kind=r8) :: gamma(n1) + + c0 = 0.0_r8 + c1 = 1.0_r8 + + ! ============================================================== + ! Compute Legendre polynomials on Gauss-Lobatto grid (velocity) + ! ============================================================== + + fact = -n1*(n1-1) + do i=1,n1 + leg(:,i) = legendre(gll(i),n1-1) + leg(n1,i) = fact * leg(n1,i) + end do + + ! =================================================== + ! Velocity cardinal functions on pressure grid + ! =================================================== + ! NEW VERSION, with no division by (gs(j)-gll(i)): + + ! compute legendre polynomials at output points: + gll_pts = gausslobatto(n1) + + fact = -n1*(n1-1) + do i=1,n2 + leg_out(:,i) = legendre(gs(i),n1-1) + leg_out(n1,i) = fact * leg_out(n1,i) + end do + + + ! compute gamma: (normalization factor for inv(leg) + do m=1,n1 + gamma(m)=0 + do i=1,n1 + gamma(m)=gamma(m)+leg(m,i)*leg(m,i)*gll_pts%weights(i) + enddo + gamma(m)=1/gamma(m) + enddo + + ! compute product of leg_out * inv(leg): + do j=1,n2 ! this should be fvm points + do l=1,n1 ! this should be GLL points + sum=0 + do k=1,n1 ! number of polynomials = number of GLL points + sum=sum + leg_out(k,j)*gamma(k)*leg(k,l) + enddo + v2p_new(l,j) = gll_pts%weights(l)*sum + enddo + enddo + deallocate(gll_pts%points) + deallocate(gll_pts%weights) + + v2p=v2p_new + end subroutine v2pinit + + + +! ======================================= +! dvvinit: +! +! Compute rectangular v->v +! derivative matrix (dvv) +! ======================================= + + subroutine dvvinit(dvv,gll) + + real(kind=r8) :: dvv(np,np) + type (quadrature_t) :: gll + + ! Local variables + + real(kind=r8) :: leg(np,np) + real(kind=r8) :: c0,c1,c4 + + integer i,j + + c0 = 0.0_r8 + c1 = 1.0_r8 + c4 = 4.0_r8 + + do i=1,np + leg(:,i) = legendre(gll%points(i),np-1) + end do + + dvv(:,:) = c0 + do j=1,np + do i=1,j-1 + dvv(j,i) = (c1/(gll%points(i)-gll%points(j)))*leg(np,i)/leg(np,j) + end do + dvv(j,j) = c0 + do i=j+1,np + dvv(j,i) = (c1/(gll%points(i)-gll%points(j)))*leg(np,i)/leg(np,j) + end do + end do + + + dvv(np,np) = + np*(np-1)/c4 + dvv(1,1) = - np*(np-1)/c4 + + end subroutine dvvinit + +! ================================================ +! divergence_stag: +! +! Compute divergence (maps v grid -> p grid) +! ================================================ + + subroutine divergence_stag(v,deriv,div) + + real(kind=r8), intent(in) :: v(np,np,2) + type (derivative_stag_t), intent(in) :: deriv + real(kind=r8), intent(out) :: div(np,np) + + ! Local + + integer i + integer j + integer l + + real(kind=r8) sumx00 + real(kind=r8) sumy00 + + real(kind=r8) :: vtemp(np,np,2) + + + do j=1,np + do l=1,np + + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + sumx00 = sumx00 + deriv%D(i,l )*v(i,j ,1) + sumy00 = sumy00 + deriv%M(i,l )*v(i,j ,2) + enddo + vtemp(j ,l ,1) = sumx00 + vtemp(j ,l ,2) = sumy00 + enddo + enddo + do j=1,np + do i=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do l=1,np + sumx00 = sumx00 + deriv%M(l,j )*vtemp(l,i ,1) + sumy00 = sumy00 + deriv%D(l,j )*vtemp(l,i ,2) + enddo + div(i ,j ) = sumx00 + sumy00 + + enddo + enddo + + end subroutine divergence_stag + +! ================================================ +! divergence_nonstag: +! +! Compute divergence (maps v->v) +! ================================================ + + subroutine divergence_nonstag(v,deriv,div) + + real(kind=r8), intent(in) :: v(np,np,2) + type (derivative_t), intent(in) :: deriv + + real(kind=r8), intent(out) :: div(np,np) + + ! Local + + integer i + integer j + integer l + + real(kind=r8) :: dudx00 + real(kind=r8) :: dvdy00 + + real(kind=r8) :: vvtemp(np,np) + + do j=1,np + do l=1,np + dudx00=0.0d0 + dvdy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dudx00 = dudx00 + deriv%Dvv(i,l )*v(i,j ,1) + dvdy00 = dvdy00 + deriv%Dvv(i,l )*v(j ,i,2) + end do + + div(l ,j ) = dudx00 + vvtemp(j ,l ) = dvdy00 + end do + end do + do j=1,np + do i=1,np + div(i,j)=div(i,j)+vvtemp(i,j) + end do + end do + + end subroutine divergence_nonstag + +! ================================================ +! gradient_wk_stag: +! +! Compute the weak form gradient: +! maps scalar field on the pressure grid to the +! velocity grid +! ================================================ + + function gradient_wk_stag(p,deriv) result(dp) + + type (derivative_stag_t), intent(in) :: deriv + real(kind=r8), intent(in) :: p(np,np) + + real(kind=r8) :: dp(np,np,2) + + ! Local + + integer i + integer j + integer l + + real(kind=r8) sumx00,sumx01 + real(kind=r8) sumy00,sumy01 + + real(kind=r8) :: vtempt(np,np,2) + + do j=1,np + do l=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + sumx00 = sumx00 + deriv%D_twt(i,l )*p(i,j ) + sumy00 = sumy00 + deriv%M_twt(i,l )*p(i,j ) + enddo + vtempt(j ,l ,1) = sumx00 + vtempt(j ,l ,2) = sumy00 + enddo + enddo + do j=1,np + do i=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do l=1,np + sumx00 = sumx00 + deriv%M_twt(l,j )*vtempt(l,i ,1) + sumy00 = sumy00 + deriv%D_twt(l,j )*vtempt(l,i ,2) + enddo + dp(i ,j ,1) = sumx00 + dp(i ,j ,2) = sumy00 + enddo + enddo + + + end function gradient_wk_stag + +! ================================================ +! gradient_wk_nonstag: +! +! Compute the weak form gradient: +! maps scalar field on the Gauss-Lobatto grid to the +! weak gradient on the Gauss-Lobbatto grid +! ================================================ + + function gradient_wk_nonstag(p,deriv) result(dp) + + type (derivative_t), intent(in) :: deriv + real(kind=r8), intent(in) :: p(np,np) + + real(kind=r8) :: dp(np,np,2) + + ! Local + + integer i + integer j + integer l + + real(kind=r8) sumx00 + real(kind=r8) sumy00 + + real(kind=r8) :: vvtempt(np,np,2) + + do j=1,np + do l=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + sumx00 = sumx00 + deriv%Dvv_twt(i,l )*p(i,j ) + sumy00 = sumy00 + deriv%Mvv_twt(i,l )*p(i,j ) + end do + vvtempt(j ,l ,1) = sumx00 + vvtempt(j ,l ,2) = sumy00 + end do + end do + + do j=1,np + do i=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do l=1,np + sumx00 = sumx00 + deriv%Mvv_twt(l,j )*vvtempt(l,i ,1) + sumy00 = sumy00 + deriv%Dvv_twt(l,j )*vvtempt(l,i ,2) + end do + dp(i ,j ,1) = sumx00 + dp(i ,j ,2) = sumy00 + end do + end do + end function gradient_wk_nonstag + +! ================================================ +! gradient_str_stag: +! +! Compute the *strong* form gradient: +! maps scalar field on the pressure grid to the +! velocity grid +! ================================================ + + subroutine gradient_str_stag(p,deriv,dp) + + type (derivative_stag_t), intent(in) :: deriv + real(kind=r8), intent(in) :: p(np,np) + + real(kind=r8), intent(out) :: dp(np,np,2) + + ! Local + + integer i + integer j + integer l + + real(kind=r8) sumx00 + real(kind=r8) sumy00 + + real(kind=r8) :: vtempt(np,np,2) + do j=1,np + do l=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + sumx00 = sumx00 + deriv%Dpv(i,l )*p(i,j ) + sumy00 = sumy00 + deriv%M_t(i,l )*p(i,j ) + enddo + vtempt(j ,l ,1) = sumx00 + vtempt(j ,l ,2) = sumy00 + enddo + enddo + do j=1,np + do i=1,np + sumx00=0.0d0 + sumy00=0.0d0 +!DIR$ UNROLL(NP) + do l=1,np + sumx00 = sumx00 + deriv%M_t(l,j )*vtempt(l,i ,1) + sumy00 = sumy00 + deriv%Dpv(l,j )*vtempt(l,i ,2) + enddo + dp(i ,j ,1) = sumx00 + dp(i ,j ,2) = sumy00 + enddo + enddo + + end subroutine gradient_str_stag + +! ================================================ +! gradient_str_nonstag: +! +! Compute the *strong* gradient on the velocity grid +! of a scalar field on the velocity grid +! ================================================ + + subroutine gradient_str_nonstag(s,deriv,ds) + + type (derivative_t), intent(in) :: deriv + real(kind=r8), intent(in) :: s(np,np) + real(kind=r8), intent(out) :: ds(np,np,2) + + integer i + integer j + integer l + real(kind=r8) :: dsdx00,dsdx01 + real(kind=r8) :: dsdy00,dsdy01 + do j=1,np + do l=1,np + dsdx00=0.0d0 + dsdy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) + dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) + end do + ds(l ,j ,1) = dsdx00 + ds(j ,l ,2) = dsdy00 + end do + end do + end subroutine gradient_str_nonstag + +! ================================================ +! vorticity: +! +! Compute the vorticity of the velocity field on the +! velocity grid +! ================================================ + + subroutine vorticity(v,deriv,vort) + + type (derivative_t), intent(in) :: deriv + real(kind=r8), intent(in) :: v(np,np,2) + + real(kind=r8), intent(out) :: vort(np,np) + + integer i + integer j + integer l + + real(kind=r8) :: dvdx00,dvdx01 + real(kind=r8) :: dudy00,dudy01 + + real(kind=r8) :: vvtemp(np,np) + do j=1,np + do l=1,np + dudy00=0.0d0 + dvdx00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dvdx00 = dvdx00 + deriv%Dvv(i,l )*v(i,j ,2) + dudy00 = dudy00 + deriv%Dvv(i,l )*v(j ,i,1) + enddo + vort(l ,j ) = dvdx00 + vvtemp(j ,l ) = dudy00 + enddo + enddo + do j=1,np + do i=1,np + vort(i,j)=vort(i,j)-vvtemp(i,j) + end do + end do + + end subroutine vorticity + +! ================================================ +! interpolate_gll2fvm_points: +! +! shape funtion interpolation from data on GLL grid to cellcenters on physics grid +! Author: Christoph Erath +! ================================================ + function interpolate_gll2fvm_points(v,deriv) result(p) + + real(kind=r8), intent(in) :: v(np,np) + type (derivative_t) :: deriv + real(kind=r8) :: p(nc,nc) + + ! Local + integer i + integer j + integer l + + real(kind=r8) sumx00,sumx01 + real(kind=r8) sumx10,sumx11 + real(kind=r8) vtemp(np,nc) + + do j=1,np + do l=1,nc + sumx00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + sumx00 = sumx00 + deriv%Cfvm(i,l )*v(i,j ) + enddo + vtemp(j ,l) = sumx00 + enddo + enddo + do j=1,nc + do i=1,nc + sumx00=0.0d0 +!DIR$ UNROLL(NP) + do l=1,np + sumx00 = sumx00 + deriv%Cfvm(l,j )*vtemp(l,i) + enddo + p(i ,j ) = sumx00 + enddo + enddo + end function interpolate_gll2fvm_points +! ================================================ +! interpolate_gll2fvm_corners: +! +! shape funtion interpolation from data on GLL grid to physics grid +! +! ================================================ + function interpolate_gll2fvm_corners(v,deriv) result(p) + + real(kind=r8), intent(in) :: v(np,np) + type (derivative_t), intent(in) :: deriv + real(kind=r8) :: p(nc+1,nc+1) + + ! Local + integer i + integer j + integer l + + real(kind=r8) sumx00,sumx01 + real(kind=r8) sumx10,sumx11 + real(kind=r8) vtemp(np,nc+1) + + do j=1,np + do l=1,nc+1 + sumx00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + sumx00 = sumx00 + deriv%Mfvm(i,l )*v(i,j ) + enddo + vtemp(j ,l) = sumx00 + enddo + enddo + do j=1,nc+1 + do i=1,nc+1 + sumx00=0.0d0 +!DIR$ UNROLL(NP) + do l=1,np + sumx00 = sumx00 + deriv%Mfvm(l,j )*vtemp(l,i) + enddo + p(i ,j ) = sumx00 + enddo + enddo + end function interpolate_gll2fvm_corners + + +! ================================================ +! remap_phys2gll: +! +! interpolate to an equally spaced (in reference element coordinate system) +! "physics" grid to the GLL grid +! +! 1st order, monotone, conservative +! MT initial version 2013 +! ================================================ + function remap_phys2gll(pin,nphys) result(pout) + integer :: nphys + real(kind=r8), intent(in) :: pin(nphys*nphys) + real(kind=r8) :: pout(np,np) + + ! Local + integer, save :: nphys_init=0 + integer, save :: nintersect + real(kind=r8),save,pointer :: acell(:) ! arrivial cell index of i'th intersection + real(kind=r8),save,pointer :: dcell(:) ! departure cell index of i'th intersection + real(kind=r8),save,pointer :: delta(:) ! length of i'th intersection + real(kind=r8),save,pointer :: delta_a(:) ! length of arrival cells + integer in_i,in_j,ia,ja,id,jd,count,i,j + logical :: found + + real(kind=r8) :: tol = 1.0e-13_r8 + real(kind=r8) :: weight,x1,x2,dx + real(kind=r8) :: gll_edges(np+1),phys_edges(nphys+1) + type(quadrature_t) :: gll_pts + if (nphys_init/=nphys) then + ! setup (most be done on masterthread only) since all data is static + ! MT: move barrier inside if loop - we dont want a barrier every regular call +!OMP BARRIER +!OMP MASTER + nphys_init=nphys + ! find number of intersections + nintersect = np+nphys-1 ! max number of possible intersections + allocate(acell(nintersect)) + allocate(dcell(nintersect)) + allocate(delta(nintersect)) + allocate(delta_a(np)) + + ! compute phys grid cell edges on [-1,1] + do i=1,nphys+1 + dx = 2d0/nphys + phys_edges(i)=-1 + (i-1)*dx + enddo + + ! compute GLL cell edges on [-1,1] + gll_pts = gausslobatto(np) + gll_edges(1)=-1 + do i=2,np + gll_edges(i) = gll_edges(i-1) + gll_pts%weights(i-1) + enddo + gll_edges(np+1)=1 + delta_a=gll_pts%weights + deallocate(gll_pts%points) + deallocate(gll_pts%weights) + + count=0 + x1=-1 + do while ( abs(x1-1) > tol ) + ! find point x2 closet to x1 and x2>x1: + x2 = 1.1_r8 + do ia=2,np+1 + if (gll_edges(ia)>x1) then + if ( ( gll_edges(ia)-x1) < (x2-x1) ) then + x2=gll_edges(ia) + endif + endif + enddo + do id=2,nphys+1 + if (phys_edges(id)>x1) then + if ( ( phys_edges(id)-x1) < (x2-x1) ) then + x2=phys_edges(id) + endif + endif + enddo + print *,'x2=',x2 + if (x2>1+tol) call endrun('ERROR: did not find next intersection point') + if (x2<=x1) call endrun('ERROR: next intersection point did not advance') + count=count+1 + if (count>nintersect) call endrun('ERROR: search failuer: nintersect was too small') + delta(count)=x2-x1 + + found=.false. + do ia=1,np + if (gll_edges(ia) <= x1+tol .and. x2-tol <= gll_edges(ia+1)) then + found=.true. + acell(count)=ia + endif + enddo + if (.not. found) call endrun('ERROR: interval search problem') + + found=.false. + do id=1,nphys + if (phys_edges(id) <= x1+tol .and. x2-tol <= phys_edges(id+1)) then + found=.true. + dcell(count)=id + endif + enddo + if (.not. found) call endrun('ERROR: interval search problem') + x1=x2 + enddo + ! reset to actual number of intersections + nintersect=count +!OMP END MASTER +!OMP BARRIER + endif + + pout=0 + do in_i = 1,nintersect + do in_j = 1,nintersect + ia = acell(in_i) + ja = acell(in_j) + id = dcell(in_i) + jd = dcell(in_j) + ! mass in intersection region: value*area_intersect + ! value_arrival = value*area_intersect/area_arrival + weight = ( delta(in_i)*delta(in_j) ) / ( delta_a(ia)*delta_a(ja)) + ! accumulate contribution from each intersection region: + pout(ia,ja) = pout(ia,ja) + weight*pin(id+(jd-1)*nphys) + enddo + enddo + + end function remap_phys2gll + +!---------------------------------------------------------------- + +!DIR$ ATTRIBUTES FORCEINLINE :: gradient_sphere + subroutine gradient_sphere(s,deriv,Dinv,ds) +! +! input s: scalar +! output ds: spherical gradient of s, lat-lon coordinates +! + + type (derivative_t), intent(in) :: deriv + real(kind=r8), intent(in), dimension(np,np,2,2) :: Dinv + real(kind=r8), intent(in) :: s(np,np) + real(kind=r8), intent(out) :: ds(np,np,2) + + integer i + integer j + integer l + + real(kind=r8) :: dsdx00, dsdy00 + real(kind=r8) :: v1(np,np),v2(np,np) + + do j=1,np + do l=1,np + dsdx00=0.0d0 + dsdy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) + dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) + end do + v1(l ,j ) = dsdx00*ra + v2(j ,l ) = dsdy00*ra + end do + end do + ! convert covarient to latlon + do j=1,np + do i=1,np + ds(i,j,1)=Dinv(i,j,1,1)*v1(i,j) + Dinv(i,j,2,1)*v2(i,j) + ds(i,j,2)=Dinv(i,j,1,2)*v1(i,j) + Dinv(i,j,2,2)*v2(i,j) + enddo + enddo + + end subroutine gradient_sphere + + + function curl_sphere_wk_testcov(s,deriv,elem) result(ds) +! +! integrated-by-parts gradient, w.r.t. COVARIANT test functions +! input s: scalar (assumed to be s*khat) +! output ds: weak curl, lat/lon coordinates +! +! starting with: +! PHIcov1 = (PHI,0) covariant vector +! PHIcov2 = (0,PHI) covariant vector +! +! ds1 = integral[ PHIcov1 dot curl(s*khat) ] +! ds2 = integral[ PHIcov2 dot curl(s*khat) ] +! integrate by parts: +! ds1 = integral[ vor(PHIcov1) * s ] +! ds2 = integral[ vor(PHIcov1) * s ] +! +! PHIcov1 = (PHI^mn,0) +! PHIcov2 = (0,PHI^mn) +! vorticity() acts on covariant vectors: +! ds1 = sum wij g s_ij 1/g ( (PHIcov1_2)_x - (PHIcov1_1)_y ) +! = -sum wij s_ij d/dy (PHI^mn ) +! for d/dy component, only sum over i=m +! = -sum w_mj s_mj d( PHI^n)(j) +! j +! +! ds2 = sum wij g s_ij 1/g ( (PHIcov2_2)_x - (PHIcov2_1)_y ) +! = +sum wij s_ij d/dx (PHI^mn ) +! for d/dx component, only sum over j=n +! = +sum w_in s_in d( PHI^m)(i) +! i +! + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: s(np,np) + + real(kind=r8) :: ds(np,np,2) + + integer i,j,l,m,n + real(kind=r8) :: dscontra(np,np,2) + + dscontra=0 + do n=1,np + do m=1,np +!DIR$ UNROLL(NP) + do j=1,np + ! phi(n)_y sum over second index, 1st index fixed at m + dscontra(m,n,1)=dscontra(m,n,1)-(elem%mp(m,j)*s(m,j)*deriv%Dvv(n,j) )*ra + ! phi(m)_x sum over first index, second index fixed at n + dscontra(m,n,2)=dscontra(m,n,2)+(elem%mp(j,n)*s(j,n)*deriv%Dvv(m,j) )*ra + enddo + enddo + enddo + + ! convert contra -> latlon + do j=1,np + do i=1,np + ds(i,j,1)=(elem%D(i,j,1,1)*dscontra(i,j,1) + elem%D(i,j,1,2)*dscontra(i,j,2)) + ds(i,j,2)=(elem%D(i,j,2,1)*dscontra(i,j,1) + elem%D(i,j,2,2)*dscontra(i,j,2)) + enddo + enddo + end function curl_sphere_wk_testcov + + + function gradient_sphere_wk_testcov(s,deriv,elem) result(ds) +! +! integrated-by-parts gradient, w.r.t. COVARIANT test functions +! input s: scalar +! output ds: weak gradient, lat/lon coordinates +! ds = - integral[ div(PHIcov) s ] +! +! PHIcov1 = (PHI^mn,0) +! PHIcov2 = (0,PHI^mn) +! div() acts on contra components, so convert test function to contra: +! PHIcontra1 = metinv PHIcov1 = (a^mn,b^mn)*PHI^mn +! a = metinv(1,1) b=metinv(2,1) +! +! ds1 = sum wij g s_ij 1/g ( g a PHI^mn)_x + ( g b PHI^mn)_y ) +! = sum wij s_ij ag(m,n) d/dx( PHI^mn ) + bg(m,n) d/dy( PHI^mn) +! i,j +! for d/dx component, only sum over j=n +! = sum w_in s_in ag(m,n) d( PHI^m)(i) +! i +! for d/dy component, only sum over i=m +! = sum w_mj s_mj bg(m,n) d( PHI^n)(j) +! j +! +! +! This formula is identical to gradient_sphere_wk_testcontra, except that +! g(m,n) is replaced by a(m,n)*g(m,n) +! and we have two terms for each componet of ds +! +! + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: s(np,np) + + real(kind=r8) :: ds(np,np,2) + + integer i,j,l,m,n + real(kind=r8) :: dscontra(np,np,2) + + + dscontra=0 + do n=1,np + do m=1,np +!DIR$ UNROLL(NP) + do j=1,np + dscontra(m,n,1)=dscontra(m,n,1)-(& + (elem%mp(j,n)*elem%metinv(m,n,1,1)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) ) +& + (elem%mp(m,j)*elem%metinv(m,n,2,1)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) ) & + ) *ra + + dscontra(m,n,2)=dscontra(m,n,2)-(& + (elem%mp(j,n)*elem%metinv(m,n,1,2)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) ) +& + (elem%mp(m,j)*elem%metinv(m,n,2,2)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) ) & + ) *ra + enddo + enddo + enddo + ! convert contra -> latlon + do j=1,np + do i=1,np + ds(i,j,1)=(elem%D(i,j,1,1)*dscontra(i,j,1) + elem%D(i,j,1,2)*dscontra(i,j,2)) + ds(i,j,2)=(elem%D(i,j,2,1)*dscontra(i,j,1) + elem%D(i,j,2,2)*dscontra(i,j,2)) + enddo + enddo + + end function gradient_sphere_wk_testcov + + + function gradient_sphere_wk_testcontra(s,deriv,elem) result(ds) +! +! integrated-by-parts gradient, w.r.t. CONTRA test functions +! input s: scalar +! output ds: weak gradient, lat/lon coordinates +! +! integral[ div(phivec) s ] = sum spheremp()* divergence_sphere(phivec) *s +! ds1 = above formual with phivec=(PHI,0) in CONTRA coordinates +! ds2 = above formual with phivec=(0,PHI) in CONTRA coordinates +! +! PHI = (phi,0) +! s1 = sum w_ij s_ij g_ij 1/g_ij ( g_ij PHI^mn )x +! = sum w_ij s_ij g_mn dx(PHI^mn)_ij +! ij +! because x derivative is zero for j<>n, only have to sum over j=n +! s1(m,n) = sum w_i,n g_mn dx(PHI^m)_i,n s_i,n +! i +! + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: s(np,np) + + real(kind=r8) :: ds(np,np,2) + + integer i,j,l,m,n + real(kind=r8) :: dscov(np,np,2) + + dscov=0 + do n=1,np + do m=1,np +!DIR$ UNROLL(NP) + do j=1,np + ! phi(m)_x sum over first index, second index fixed at n + dscov(m,n,1)=dscov(m,n,1)-(elem%mp(j,n)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) )*ra + ! phi(n)_y sum over second index, 1st index fixed at m + dscov(m,n,2)=dscov(m,n,2)-(elem%mp(m,j)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) )*ra + enddo + enddo + enddo + + ! convert covariant -> latlon + ds(:,:,1)=elem%Dinv(:,:,1,1)*dscov(:,:,1) + elem%Dinv(:,:,2,1)*dscov(:,:,2) + ds(:,:,2)=elem%Dinv(:,:,1,2)*dscov(:,:,1) + elem%Dinv(:,:,2,2)*dscov(:,:,2) + + end function gradient_sphere_wk_testcontra + + function ugradv_sphere(u,v,deriv,elem) result(ugradv) +! +! input: vectors u and v (latlon coordinates) +! output: vector [ u dot grad ] v (latlon coordinates) +! + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: u(np,np,2) + real(kind=r8), intent(in) :: v(np,np,2) + + real(kind=r8) :: ugradv(np,np,2) + real(kind=r8) :: dum_cart(np,np,3) + real(kind=r8) :: temp(np,np,2) + + integer :: component + + ! latlon -> cartesian + do component=1,3 + ! Summing along the third dimension is a sum over components for each point. + ! (This is just a faster way of doing a dot product for each grid point, + ! since reindexing the inputs to use the intrinsic effectively would be + ! just asking for trouble.) + dum_cart(:,:,component)=sum( elem%vec_sphere2cart(:,:,component,:)*v(:,:,:) ,3) + end do + + ! Do ugradv on the cartesian components. + do component=1,3 + ! Dot u with the gradient of each component + call gradient_sphere(dum_cart(:,:,component),deriv,elem%Dinv,temp) + dum_cart(:,:,component) = sum( u(:,:,:) * temp,3) + enddo + + ! cartesian -> latlon + do component=1,2 + ! vec_sphere2cart is its own pseudoinverse. + ugradv(:,:,component) = sum(dum_cart(:,:,:)*elem%vec_sphere2cart(:,:,:,component), 3) + end do + + end function ugradv_sphere + + + + function curl_sphere(s,deriv,elem) result(ds) +! +! input s: scalar (assumed to be s khat) +! output curl(s khat) vector in lat-lon coordinates +! +! This subroutine can be used to compute divergence free velocity fields, +! since div(ds)=0 +! +! first compute: +! curl(s khat) = (1/jacobian) ( ds/dy, -ds/dx ) in contra-variant coordinates +! then map to lat-lon +! + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: s(np,np) + + real(kind=r8) :: ds(np,np,2) + + integer i + integer j + integer l + + real(kind=r8) :: dsdx00 + real(kind=r8) :: dsdy00 + real(kind=r8) :: v1(np,np),v2(np,np) + + do j=1,np + do l=1,np + dsdx00=0.0d0 + dsdy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) + dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) + end do + v2(l ,j ) = -dsdx00*ra + v1(j ,l ) = dsdy00*ra + end do + end do + ! convert contra -> latlon *and* divide by jacobian + do j=1,np + do i=1,np + ds(i,j,1)=(elem%D(i,j,1,1)*v1(i,j) + elem%D(i,j,1,2)*v2(i,j))/elem%metdet(i,j) + ds(i,j,2)= (elem%D(i,j,2,1)*v1(i,j) + elem%D(i,j,2,2)*v2(i,j))/elem%metdet(i,j) + enddo + enddo + + end function curl_sphere + + +!-------------------------------------------------------------------------- + + + + subroutine divergence_sphere_wk(v,deriv,elem,div) +! +! input: v = velocity in lat-lon coordinates +! ouput: div(v) spherical divergence of v, integrated by parts +! +! Computes -< grad(psi) dot v > +! (the integrated by parts version of < psi div(v) > ) +! +! note: after DSS, divergence_sphere() and divergence_sphere_wk() +! are identical to roundoff, as theory predicts. +! + real(kind=r8), intent(in) :: v(np,np,2) ! in lat-lon coordinates + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8),intent(out) :: div(np,np) + + ! Local + + integer i,j,m,n + + real(kind=r8) :: vtemp(np,np,2) + real(kind=r8) :: ggtemp(np,np,2) + real(kind=r8) :: gtemp(np,np,2) + real(kind=r8) :: psi(np,np) + real(kind=r8) :: xtmp + + ! latlon- > contra + do j=1,np + do i=1,np + vtemp(i,j,1)=(elem%Dinv(i,j,1,1)*v(i,j,1) + elem%Dinv(i,j,1,2)*v(i,j,2)) + vtemp(i,j,2)=(elem%Dinv(i,j,2,1)*v(i,j,1) + elem%Dinv(i,j,2,2)*v(i,j,2)) + enddo + enddo + + do n=1,np + do m=1,np + + div(m,n)=0 +!DIR$ UNROLL(NP) + do j=1,np + div(m,n)=div(m,n)-(elem%spheremp(j,n)*vtemp(j,n,1)*deriv%Dvv(m,j) & + +elem%spheremp(m,j)*vtemp(m,j,2)*deriv%Dvv(n,j)) & + * ra + enddo + + end do + end do + + end subroutine divergence_sphere_wk + + + + function element_boundary_integral(v,deriv,elem) result(result) +! +! input: v = velocity in lat-lon coordinates +! ouput: result(i,j) = contour integral of PHI_ij * v dot normal +! where PHI_ij = cardinal function at i,j GLL point +! +! this routine is used just to check spectral element integration by parts identities +! + real(kind=r8), intent(in) :: v(np,np,2) ! in lat-lon coordinates + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8) :: result(np,np) + + ! Local + real(kind=r8) :: ucontra(np,np,2) ! in lat-lon coordinates + integer i,j + + ! latlon->contra + do j=1,np + do i=1,np + ucontra(i,j,1)=(elem%Dinv(i,j,1,1)*v(i,j,1) + elem%Dinv(i,j,1,2)*v(i,j,2)) + ucontra(i,j,2)=(elem%Dinv(i,j,2,1)*v(i,j,1) + elem%Dinv(i,j,2,2)*v(i,j,2)) + enddo + enddo + + ! note: GLL weights weight(i) = Mvv_twt(i,i) + result=0 + j=1 + do i=1,np + result(i,j)=result(i,j)-deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*ra + enddo + + j=np + do i=1,np + result(i,j)=result(i,j)+deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*ra + enddo + + i=1 + do j=1,np + result(i,j)=result(i,j)-deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*ra + enddo + + i=np + do j=1,np + result(i,j)=result(i,j)+deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*ra + enddo + end function element_boundary_integral + + + + function edge_flux_u_cg( v,p,pedges, deriv, elem, u_is_contra) result(result) +! +! +! input: v = velocity in contra or lat-lon coordinates (CONTINUIOUS) +! p = scalar on this element +! pedges = scalar edge data from neighbor elements +! +! ouput: result(i,j) = contour integral of PHI_ij * pstar * v dot normal +! where PHI_ij = cardinal function at i,j GLL point +! pstar = centered or other flux +! + real(kind=r8), intent(in) :: v(np,np,2) + real(kind=r8), intent(in) :: p(np,np) + real(kind=r8), intent(in) :: pedges(0:np+1,0:np+1) + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8) :: result(np,np) + logical :: u_is_contra + + ! Local + real(kind=r8) :: ucontra(np,np,2) ! in lat-lon coordinates + real(kind=r8) :: flux,pstar + integer i,j + + + result=0 + + + if (u_is_contra) then + ucontra=v + else + ! latlon->contra + do j=1,np + do i=1,np + ucontra(i,j,1)=(elem%Dinv(i,j,1,1)*v(i,j,1) + elem%Dinv(i,j,1,2)*v(i,j,2)) + ucontra(i,j,2)=(elem%Dinv(i,j,2,1)*v(i,j,1) + elem%Dinv(i,j,2,2)*v(i,j,2)) + enddo + enddo + endif + ! upwind + do i=1,np + j=1 + pstar=p(i,j) + if (ucontra(i,j,2)>0) pstar=pedges(i,0) + flux = -pstar*ucontra(i,j,2)*( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ra) + result(i,j)=result(i,j)+flux + + j=np + pstar=p(i,j) + if (ucontra(i,j,2)<0) pstar=pedges(i,np+1) + flux = pstar*ucontra(i,j,2)* ( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ra) + result(i,j)=result(i,j)+flux + enddo + + do j=1,np + i=1 + pstar=p(i,j) + if (ucontra(i,j,1)>0) pstar=pedges(0,j) + flux = -pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ra) + result(i,j)=result(i,j)+flux + + i=np + pstar=p(i,j) + if (ucontra(i,j,1)<0) pstar=pedges(np+1,j) + flux = pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ra) + result(i,j)=result(i,j)+flux + end do + + end function edge_flux_u_cg + + +!DIR$ ATTRIBUTES FORCEINLINE :: vorticity_sphere + subroutine vorticity_sphere(v,deriv,elem,vort) +! +! input: v = velocity in lat-lon coordinates +! ouput: spherical vorticity of v +! + + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: v(np,np,2) + + real(kind=r8), intent(out) :: vort(np,np) + + integer i + integer j + integer l + + real(kind=r8) :: dvdx00,dudy00 + real(kind=r8) :: vco(np,np,2) + real(kind=r8) :: vtemp(np,np) + + ! convert to covariant form + do j=1,np + do i=1,np + vco(i,j,1)=(elem%D(i,j,1,1)*v(i,j,1) + elem%D(i,j,2,1)*v(i,j,2)) + vco(i,j,2)=(elem%D(i,j,1,2)*v(i,j,1) + elem%D(i,j,2,2)*v(i,j,2)) + enddo + enddo + + do j=1,np + do l=1,np + + dudy00=0.0d0 + dvdx00=0.0d0 + +!DIR$ UNROLL(NP) + do i=1,np + dvdx00 = dvdx00 + deriv%Dvv(i,l )*vco(i,j ,2) + dudy00 = dudy00 + deriv%Dvv(i,l )*vco(j ,i,1) + enddo + + vort(l ,j ) = dvdx00 + vtemp(j ,l ) = dudy00 + enddo + enddo + + do j=1,np + do i=1,np + vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*ra) + end do + end do + + end subroutine vorticity_sphere + + function vorticity_sphere_diag(v,deriv,elem) result(vort) + ! + ! input: v = velocity in lat-lon coordinates + ! ouput: diagonal component of spherical vorticity of v + ! + + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(in) :: v(np,np,2) + + real(kind=r8) :: vort(np,np) + + integer i + integer j + integer l + + real(kind=r8) :: dvdx00,dudy00 + real(kind=r8) :: vco(np,np,2) + real(kind=r8) :: vtemp(np,np) + real(kind=r8) :: rdx + real(kind=r8) :: rdy + + ! convert to covariant form + + do j=1,np + do i=1,np + vco(i,j,1)=(elem%D(i,j,1,1)*v(i,j,1) + elem%D(i,j,2,1)*v(i,j,2)) + vco(i,j,2)=(elem%D(i,j,1,2)*v(i,j,1) + elem%D(i,j,2,2)*v(i,j,2)) + enddo + enddo + + + do j=1,np + do l=1,np + dudy00=0.0d0 + dvdx00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dvdx00 = dvdx00 + deriv%Dvv_diag(i,l)*vco(i,j ,2) + dudy00 = dudy00 + deriv%Dvv_diag(i,l)*vco(j ,i,1) + enddo + vort(l ,j) = dvdx00 + vtemp(j ,l) = dudy00 + enddo + enddo + + do j=1,np + do i=1,np + vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*ra) + end do + end do + + end function vorticity_sphere_diag + +!DIR$ ATTRIBUTES FORCEINLINE :: divergence_sphere + subroutine divergence_sphere(v,deriv,elem,div) +! +! input: v = velocity in lat-lon coordinates +! ouput: div(v) spherical divergence of v +! + + + real(kind=r8), intent(in) :: v(np,np,2) ! in lat-lon coordinates + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8), intent(out) :: div(np,np) + + ! Local + + integer i + integer j + integer l + + real(kind=r8) :: dudx00 + real(kind=r8) :: dvdy00 + real(kind=r8) :: gv(np,np,2),vvtemp(np,np) + + ! convert to contra variant form and multiply by g + do j=1,np + do i=1,np + gv(i,j,1)=elem%metdet(i,j)*(elem%Dinv(i,j,1,1)*v(i,j,1) + elem%Dinv(i,j,1,2)*v(i,j,2)) + gv(i,j,2)=elem%metdet(i,j)*(elem%Dinv(i,j,2,1)*v(i,j,1) + elem%Dinv(i,j,2,2)*v(i,j,2)) + enddo + enddo + + ! compute d/dx and d/dy + do j=1,np + do l=1,np + dudx00=0.0d0 + dvdy00=0.0d0 +!DIR$ UNROLL(NP) + do i=1,np + dudx00 = dudx00 + deriv%Dvv(i,l )*gv(i,j ,1) + dvdy00 = dvdy00 + deriv%Dvv(i,l )*gv(j ,i,2) + end do + div(l ,j ) = dudx00 + vvtemp(j ,l ) = dvdy00 + end do + end do + + do j=1,np + do i=1,np + div(i,j)=(div(i,j)+vvtemp(i,j))*(elem%rmetdet(i,j)*ra) + end do + end do + + end subroutine divergence_sphere + + +!DIR$ ATTRIBUTES FORCEINLINE :: laplace_sphere_wk + subroutine laplace_sphere_wk(s,deriv,elem,laplace,var_coef,mol_nu) +! +! input: s = scalar +! ouput: -< grad(PHI), grad(s) > = weak divergence of grad(s) +! note: for this form of the operator, grad(s) does not need to be made C0 +! + real(kind=r8), intent(in) :: s(np,np) + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + real(kind=r8) :: laplace(np,np) + logical, intent(in) :: var_coef + real(kind=r8), intent(in), optional :: mol_nu(np,np) !variable nu (e.g. molecular diffusion) + + real(kind=r8) :: laplace2(np,np) + integer i,j + + ! Local + real(kind=r8) :: grads(np,np,2), oldgrads(np,np,2) + + call gradient_sphere(s,deriv,elem%Dinv,grads) + ! + ! molecular diffusion coefficient + ! + if (present(mol_nu)) then + if (var_coef) & + call endrun('ERROR: this option is only for non-grid dependent variable viscosity') + grads(:,:,1) = grads(:,:,1)*mol_nu(:,:) + grads(:,:,2) = grads(:,:,2)*mol_nu(:,:) + end if + + if (var_coef) then + if (hypervis_power/=0 ) then + ! scalar viscosity with variable coefficient + grads(:,:,1) = grads(:,:,1)*elem%variable_hyperviscosity(:,:) + grads(:,:,2) = grads(:,:,2)*elem%variable_hyperviscosity(:,:) + else if (hypervis_scaling /=0 ) then + ! tensor hv, (3) + oldgrads=grads + do j=1,np + do i=1,np + grads(i,j,1) = oldgrads(i,j,1)*elem%tensorVisc(i,j,1,1) + & + oldgrads(i,j,2)*elem%tensorVisc(i,j,1,2) + grads(i,j,2) = oldgrads(i,j,1)*elem%tensorVisc(i,j,2,1) + & + oldgrads(i,j,2)*elem%tensorVisc(i,j,2,2) + end do + end do + else + ! do nothing: constant coefficient viscosity + endif + endif + + ! note: divergnece_sphere and divergence_sphere_wk are identical *after* bndry_exchange + ! if input is C_0. Here input is not C_0, so we should use divergence_sphere_wk(). + ! laplace=divergence_sphere_wk(grads,deriv,elem) + call divergence_sphere_wk(grads,deriv,elem,laplace) + + end subroutine laplace_sphere_wk + +!DIR$ ATTRIBUTES FORCEINLINE :: vlaplace_sphere_wk + subroutine vlaplace_sphere_wk(v,deriv,elem,undamprrcart,laplace,var_coef,nu_ratio) +! +! input: v = vector in lat-lon coordinates +! ouput: weak laplacian of v, in lat-lon coordinates +! +! logic: +! tensorHV: requires cartesian +! nu_div/=nu: requires contra formulatino +! +! One combination NOT supported: tensorHV and nu_div/=nu then abort +! + real(kind=r8), intent(in) :: v(np,np,2) + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + logical, intent(in) :: undamprrcart + real(kind=r8), intent(out) :: laplace(np,np,2) + logical, optional, intent(in) :: var_coef + real(kind=r8), optional, intent(in) :: nu_ratio + + + if (hypervis_scaling/=0 .and. var_coef) then + ! tensorHV is turned on - requires cartesian formulation + if (present(nu_ratio)) then + if (nu_ratio /= 1._r8) then + call endrun('ERROR: tensorHV can not be used with nu_div/=nu') + endif + endif + laplace=vlaplace_sphere_wk_cartesian(v,deriv,elem,var_coef,undamprrcart) + else + ! all other cases, use contra formulation: + laplace=vlaplace_sphere_wk_contra(v,deriv,elem,var_coef,undamprrcart,nu_ratio) + endif + + end subroutine vlaplace_sphere_wk + ! + ! version of vlaplace_sphere_wk for molecular diffusion + ! + subroutine vlaplace_sphere_wk_mol(v,deriv,elem,undamprrcart,mol_nu,laplace) + ! + ! input: v = vector in lat-lon coordinates + ! ouput: weak laplacian of v, in lat-lon coordinates + ! + real(kind=r8), intent(in) :: v(np,np,2) + type (derivative_t), intent(in):: deriv + type (element_t), intent(in) :: elem + logical, intent(in) :: undamprrcart + real(kind=r8), intent(in) :: mol_nu(np,np) + real(kind=r8), intent(out) :: laplace(np,np,2) + + real(kind=r8) :: vor(np,np),div(np,np) + + integer :: n,m + + call divergence_sphere(v,deriv,elem,div) + call vorticity_sphere(v,deriv,elem,vor) + + div = div*mol_nu(:,:) + vor = vor*mol_nu(:,:) + + laplace = gradient_sphere_wk_testcov(div,deriv,elem) - & + curl_sphere_wk_testcov(vor,deriv,elem) + + if (undamprrcart) then + do n=1,np + do m=1,np + ! add in correction so we dont damp rigid rotation + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) + enddo + enddo + end if + + + end subroutine vlaplace_sphere_wk_mol + + + + function vlaplace_sphere_wk_cartesian(v,deriv,elem,var_coef,undamprrcart) result(laplace) +! +! input: v = vector in lat-lon coordinates +! ouput: weak laplacian of v, in lat-lon coordinates + + real(kind=r8), intent(in) :: v(np,np,2) + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + logical, intent(in) :: var_coef + logical, intent(in) :: undamprrcart + real(kind=r8) :: laplace(np,np,2) + ! Local + + integer component + real(kind=r8) :: dum_cart(np,np,3) + real(kind=r8) :: dum_cart2(np,np) + + + ! latlon -> cartesian + do component=1,3 + dum_cart2(:,:) = elem%vec_sphere2cart(:,:,component,1)*v(:,:,1) + & + elem%vec_sphere2cart(:,:,component,2)*v(:,:,2) + ! Do laplace on cartesian comps + call laplace_sphere_wk(dum_cart2,deriv,elem,dum_cart(:,:,component),var_coef) + enddo + + ! cartesian -> latlon + do component=1,2 + ! vec_sphere2cart is its own pseudoinverse. + laplace(:,:,component) = dum_cart(:,:,1)*elem%vec_sphere2cart(:,:,1,component) + & + dum_cart(:,:,2)*elem%vec_sphere2cart(:,:,2,component) + & + dum_cart(:,:,3)*elem%vec_sphere2cart(:,:,3,component) + end do + + if (undamprrcart) then + ! add in correction so we dont damp rigid rotation + laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*(ra**2) + laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*(ra**2) + end if + + end function vlaplace_sphere_wk_cartesian + + + + function vlaplace_sphere_wk_contra(v,deriv,elem,var_coef,undamprrcart,nu_ratio) result(laplace) +! +! input: v = vector in lat-lon coordinates +! ouput: weak laplacian of v, in lat-lon coordinates +! +! du/dt = laplace(u) = grad(div) - curl(vor) +! < PHI du/dt > = < PHI laplace(u) > PHI = covariant, u = contravariant +! = < PHI grad(div) > - < PHI curl(vor) > +! = grad_wk(div) - curl_wk(vor) +! + real(kind=r8), intent(in) :: v(np,np,2) + logical, intent(in) :: var_coef + type (derivative_t), intent(in) :: deriv + type (element_t), intent(in) :: elem + logical, intent(in) :: undamprrcart + real(kind=r8), optional, intent(in) :: nu_ratio + + real(kind=r8) :: laplace(np,np,2) + + ! Local + + integer i,j,l,m,n + real(kind=r8) :: vor(np,np),div(np,np) + real(kind=r8) :: v1,v2,div1,div2,vor1,vor2,phi_x,phi_y + + call divergence_sphere(v,deriv,elem,div) + call vorticity_sphere(v,deriv,elem,vor) + + if (var_coef .and. hypervis_power/=0 ) then + ! scalar viscosity with variable coefficient + div = div*elem%variable_hyperviscosity(:,:) + vor = vor*elem%variable_hyperviscosity(:,:) + endif + + if (present(nu_ratio)) div = nu_ratio*div + + laplace = gradient_sphere_wk_testcov(div,deriv,elem) - & + curl_sphere_wk_testcov(vor,deriv,elem) + + if (undamprrcart) then + do n=1,np + do m=1,np + ! add in correction so we dont damp rigid rotation + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) + enddo + enddo + end if + end function vlaplace_sphere_wk_contra + + function gll_to_dgmodal(p,deriv) result(phat) +! +! input: v = velocity in lat-lon coordinates +! ouput: phat = Legendre coefficients +! +! Computes < g dot p > = SUM g(i,j) p(i,j) w(i) w(j) +! (the quadrature approximation on the *reference element* of the integral of p against +! all Legendre polynomials up to degree npdg +! +! for npdg < np, this routine gives the (exact) modal expansion of p/spheremp() +! + real(kind=r8), intent(in) :: p(np,np) + type (derivative_t), intent(in) :: deriv + real(kind=r8) :: phat(npdg,npdg) + + ! Local + integer i,j,m,n + real(kind=r8) :: A(np,npdg) + A=0 + phat=0 + + ! N^3 tensor product formulation: + do m=1,npdg + do j=1,np +!DIR$ UNROLL(NP) + do i=1,np + A(j,m)=A(j,m)+( p(i,j)*deriv%Mvv_twt(i,i)*deriv%Mvv_twt(j,j) )*deriv%legdg(m,i) + enddo + enddo + enddo + + do n=1,npdg + do m=1,npdg +!DIR$ UNROLL(NP) + do j=1,np + phat(m,n)=phat(m,n)+A(j,m)*deriv%legdg(n,j) + enddo + enddo + enddo + + end function + + function dgmodal_to_gll(phat,deriv) result(p) + ! + ! input: phat = coefficients of Legendre expansion + ! ouput: p = sum expansion to evaluate phat at GLL points + ! + real(kind=r8) :: p(np,np) + type (derivative_t), intent(in) :: deriv + real(kind=r8) :: phat(npdg,npdg) + ! Local + integer i,j,m,n + real(kind=r8) :: A(npdg,np) + + p(:,:)=0 + ! tensor product version + A=0 + do i=1,np + do n=1,npdg + do m=1,npdg + A(n,i)=A(n,i)+phat(m,n)*deriv%legdg(m,i) + enddo + enddo + enddo + do j=1,np + do i=1,np + do n=1,npdg + p(i,j) = p(i,j)+A(n,i)*deriv%legdg(n,j) + enddo + enddo + enddo + + end function dgmodal_to_gll + + subroutine subcell_dss_fluxes(dss, p, n, metdet, C, fluxes) + + integer , intent(in) :: p + integer , intent(in) :: n + real (kind=r8), intent(in) :: dss (p,p) + real (kind=r8), intent(in) :: metdet (p,p) + real (kind=r8), intent(in) :: C (2,2,2) + + real (kind=r8) :: fluxes (n,n,4) + + real (kind=r8) :: Bp(p,p) + real (kind=r8) :: Tp(p,p) + real (kind=r8) :: Lp(p,p) + real (kind=r8) :: Rp(p,p) + + real (kind=r8) :: B(n,n) + real (kind=r8) :: T(n,n) + real (kind=r8) :: L(n,n) + real (kind=r8) :: R(n,n) + + integer :: i,j + + fluxes = 0 + + Bp = 0 + Tp = 0 + Rp = 0 + Lp = 0 + + Bp(:,1) = dss(:,1) ! bottom + Tp(:,p) = dss(:,p) ! top + Rp(p,:) = dss(p,:) ! right + Lp(1,:) = dss(1,:) ! left + + Bp(1,1) = C(1,1,2) + Lp(1,1) = C(1,1,1) + Bp(p,1) = C(2,1,2) + Rp(p,1) = C(2,1,1) + + Tp(1,p) = C(1,2,2) + Lp(1,p) = C(1,2,1) + Tp(p,p) = C(2,2,2) + Rp(p,p) = C(2,2,1) + + + call subcell_integration(Bp, p, n, metdet,B) + call subcell_integration(Tp, p, n, metdet,T) + call subcell_integration(Lp, p, n, metdet,L) + call subcell_integration(Rp, p, n, metdet,R) + + do i = 1,n + do j = 1,n + if (1 [a,b] + ! all the GLL points by y = (a/2)(1-x) + (b/2)(1+x) + do i=1,intervals + a = -one + (i-one)*two/intervals + b = -one + i *two/intervals + sub_gll(i,:) = (a+b)/two + gll%points(:)/intervals + end do + + ! Now to interpolate from the values at the input GLL + ! points to the sub-GLL points. Do this by Lagrange + ! interpolation. The jth Lagrange interpolating polynomial + ! for points x_i is + ! \prod_{i\ne j} (x-x_i)/(x_j-x_i) + ! These are then multiplied by the sampled values y_i + ! and summed. + + ! Save some time by pre-computing the denominitor. I think + ! this is OK since all the points are of order 1 so should + ! be well behaved. + do n = 1,np + x_j = gll%points(n) + x = one + do m = 1,np + if (m.ne.n) then + x_i = gll%points(m) + x = x * (x_j-x_i) + endif + end do + legrange_div(n)= x + end do + do i=1,intervals + do n=1,np + x = sub_gll(i,n) + do j = 1,np + y = one + do m = 1,np + if (m.ne.j) then + x_i = gll%points(m) + y = y * (x-x_i) + end if + end do + Lagrange_interp(i,n,j) = y/legrange_div(j) + end do + end do + end do + + ! Integration is the GLL weights times Jacobians times + ! the interpolated values: + ! w^t I Y I^t w + ! where + ! w is GLL weights and Jacobians, + ! I is the Lagrange_interp matrix, and + ! Y is the coefficient matrix, sampled_val. + ! This can be written J Y J^t where + ! J = w^t I + ! J is integration_matrix + do i=1,intervals + integration_matrix(i,:) = MATMUL(gll%weights(:),Lagrange_interp(i,:,:)) + end do + + ! There is still the Jacobian to consider. We are + ! integrating over [a,b] x [c,d] where + ! |b-a| = |d-c| = 2/Intervals + ! Multiply the weights appropriately given that + ! they are defined for a 2x2 square + integration_matrix = integration_matrix/intervals + + boundary_interp_matrix(:,1,:) = Lagrange_interp(:,1,:) + boundary_interp_matrix(:,2,:) = Lagrange_interp(:,np,:) + end subroutine allocate_subcell_integration_matrix_cslam + + subroutine allocate_subcell_integration_matrix_physgrid(np, intervals) + !----------------- + !----------------- + use quadrature_mod, only : gausslobatto, quadrature_t + + implicit none + + integer , intent(in) :: np + integer , intent(in) :: intervals + real (kind=r8) :: values(intervals,intervals) + + + real(kind=r8), parameter :: zero = 0.0_r8, one=1.0_r8, two=2.0_r8 + + + real (kind=r8) :: sub_gll (intervals,np) + + real (kind=r8) :: Lagrange_interp(intervals,np,np) + type (quadrature_t) :: gll + + real (kind=r8) :: legrange_div(np) + real (kind=r8) :: a,b,x,y, x_j, x_i + real (kind=r8) :: r(1) + integer i,j,n,m + + if (ALLOCATED(integration_matrix_physgrid)) deallocate(integration_matrix_physgrid) + allocate(integration_matrix_physgrid(intervals,np)) + + gll = gausslobatto(np) + + ! The GLL (Gauss-Lobatto-Legendre) points are from [-1,1], + ! we have a bunch of sub-intervals defined by intervals that + ! go from [a,b] so we need to linearly map [-1,1] -> [a,b] + ! all the GLL points by y = (a/2)(1-x) + (b/2)(1+x) + do i=1,intervals + a = -one + (i-one)*two/intervals + b = -one + i *two/intervals + sub_gll(i,:) = (a+b)/two + gll%points(:)/intervals + end do + + ! Now to interpolate from the values at the input GLL + ! points to the sub-GLL points. Do this by Lagrange + ! interpolation. The jth Lagrange interpolating polynomial + ! for points x_i is + ! \prod_{i\ne j} (x-x_i)/(x_j-x_i) + ! These are then multiplied by the sampled values y_i + ! and summed. + + ! Save some time by pre-computing the denominitor. I think + ! this is OK since all the points are of order 1 so should + ! be well behaved. + do n = 1,np + x_j = gll%points(n) + x = one + do m = 1,np + if (m.ne.n) then + x_i = gll%points(m) + x = x * (x_j-x_i) + endif + end do + legrange_div(n)= x + end do + do i=1,intervals + do n=1,np + x = sub_gll(i,n) + do j = 1,np + y = one + do m = 1,np + if (m.ne.j) then + x_i = gll%points(m) + y = y * (x-x_i) + end if + end do + Lagrange_interp(i,n,j) = y/legrange_div(j) + end do + end do + end do + do i=1,intervals + integration_matrix_physgrid(i,:) = MATMUL(gll%weights(:),Lagrange_interp(i,:,:)) + end do + integration_matrix_physgrid = integration_matrix_physgrid/intervals + end subroutine allocate_subcell_integration_matrix_physgrid + + + + subroutine limiter_optim_iter_full(ptens,sphweights,minp,maxp,dpmass,kbeg,kend) + ! + !The idea here is the following: We need to find a grid field which is closest + !to the initial field (in terms of weighted sum), but satisfies the min/max constraints. + !So, first we find values which do not satisfy constraints and bring these values + !to a closest constraint. This way we introduce some mass change (addmass), + !so, we redistribute addmass in the way that l2 error is smallest. + !This redistribution might violate constraints thus, we do a few iterations. + ! + ! O. Guba ~2012 Documented in Guba, Taylor & St-Cyr, JCP 2014 + ! I. Demeshko & M. Taylor 7/2015: Removed indirect addressing. + ! N. Lopez & M. Taylor 8/2015: Mass redistributon tweak which is better at + ! linear coorelation preservation + ! + use dimensions_mod, only : np, np, nlev + + real (kind=r8), dimension(nlev), intent(inout) :: minp, maxp + real (kind=r8), dimension(np*np,nlev), intent(inout) :: ptens + real (kind=r8), dimension(np*np,nlev), intent(in), optional :: dpmass + real (kind=r8), dimension(np*np), intent(in) :: sphweights + integer, intent(in) :: kbeg, kend + + real (kind=r8), dimension(np,np) :: ptens_mass + integer k1, k, i, j, iter, weightsnum + real (kind=r8) :: addmass, weightssum, mass, sumc + real (kind=r8) :: x(np*np),c(np*np) + integer :: maxiter = np*np-1 + real (kind=r8) :: tol_limiter = 5.0e-14_r8 + + do k = kbeg, kend + + do k1=1,np*np + c(k1)=sphweights(k1)*dpmass(k1,k) + x(k1)=ptens(k1,k)/dpmass(k1,k) + enddo + + sumc=sum(c) + if (sumc <= 0 ) CYCLE ! this should never happen, but if it does, dont limit + mass=sum(c*x) + + + + ! relax constraints to ensure limiter has a solution: + ! This is only needed if runnign with the SSP CFL>1 or + ! due to roundoff errors + if( mass < minp(k)*sumc ) then + minp(k) = mass / sumc + endif + if( mass > maxp(k)*sumc ) then + maxp(k) = mass / sumc + endif + + + + do iter=1,maxiter + + addmass=0.0d0 + + do k1=1,np*np + if((x(k1)>maxp(k))) then + addmass=addmass+(x(k1)-maxp(k))*c(k1) + x(k1)=maxp(k) + endif + if((x(k1)0)then + do k1=1,np*np + if(x(k1)minp(k))then + weightssum=weightssum+c(k1) + endif + enddo + do k1=1,np*np + if(x(k1)>minp(k))then + x(k1)=x(k1)+addmass/weightssum + endif + enddo + endif + + + enddo!end of iteration + + do k1=1,np*np + ptens(k1,k)=x(k1) + enddo + + enddo + + do k = kbeg, kend + do k1=1,np*np + ptens(k1,k)=ptens(k1,k)*dpmass(k1,k) + enddo + enddo + + end subroutine limiter_optim_iter_full + + + + + +end module derivative_mod diff --git a/src/dynamics/se/dycore/dimensions_mod.F90 b/src/dynamics/se/dycore/dimensions_mod.F90 new file mode 100644 index 00000000..a012c761 --- /dev/null +++ b/src/dynamics/se/dycore/dimensions_mod.F90 @@ -0,0 +1,141 @@ +module dimensions_mod + use shr_kind_mod, only: r8=>shr_kind_r8 +#ifdef FVM_TRACERS + use constituents, only: ntrac_d=>pcnst ! _EXTERNAL +#else + use constituents, only: qsize_d=>pcnst ! _EXTERNAL +#endif + + implicit none + private + +! set MAX number of tracers. actual number of tracers is a run time argument +#ifdef FVM_TRACERS + integer, parameter :: qsize_d =10 ! SE tracers (currently SE supports 10 condensate loading tracers) +#else + integer, parameter :: ntrac_d = 0 ! No fvm tracers if CSLAM is off +#endif + + ! + ! The variables below hold indices of water vapor and condensate loading tracers as well as + ! associated heat capacities (initialized in dyn_init): + ! + ! qsize_condensate_loading_idx = index of water tracers included in condensate loading according to CAM physics + ! qsize_condensate_loading_idx_gll = index of water tracers included in condensate loading terms for SE tracers + ! + ! Note that when running without CSLAM then + ! + ! qsize_condensate_loading_idx_gll = qsize_condensate_loading_idx + ! + ! but when running with CSLAM then SE tracers are only the water tracers included in the condensate loading + ! + character(len=16), allocatable, public :: cnst_name_gll(:) ! constituent names for SE tracers + character(len=128), allocatable, public :: cnst_longname_gll(:) ! long name of SE tracers + ! + !moist cp in energy conversion term + ! + ! .false.: force dycore to use cpd (cp dry) instead of moist cp + ! .true. : use moist cp in dycore + ! + logical , public :: lcp_moist = .true. + + integer, parameter, public :: np = NP + integer, parameter, public :: nc = 3 !cslam resolution + integer , public :: fv_nphys !physics-grid resolution - the "MAX" is so that the code compiles with NC=0 + + integer :: ntrac = 0 !ntrac is set in dyn_comp + integer :: qsize = 0 !qsize is set in dyn_comp + ! + ! hyperviscosity is applied on approximate pressure levels + ! Similar to CAM-EUL; see CAM5 scietific documentation (Note TN-486), equation (3.09), page 58. + ! + logical, public :: hypervis_dynamic_ref_state = .false. + ! fvm dimensions: + logical, public :: lprint!for debugging + integer, parameter, public :: ngpc=3 !number of Gausspoints for the fvm integral approximation !phl change from 4 + integer, parameter, public :: irecons_tracer=6!=1 is PCoM, =3 is PLM, =6 is PPM for tracer reconstruction + integer, public :: irecons_tracer_lev(PLEV) + integer, parameter, public :: nhe=1 !Max. Courant number + integer, parameter, public :: nhr=2 !halo width needed for reconstruction - phl + integer, parameter, public :: nht=nhe+nhr !total halo width where reconstruction is needed (nht<=nc) - phl + integer, parameter, public :: ns=3!quadratic halo interpolation - recommended setting for nc=3 + !nhc determines width of halo exchanged with neighboring elements + integer, parameter, public :: nhc = nhr+(nhe-1)+(ns-MOD(ns,2))/2 + !(different from halo needed for elements on edges and corners + integer, parameter, public :: lbc = 1-nhc + integer, parameter, public :: ubc = nc+nhc + logical, public :: large_Courant_incr + + integer, public :: kmin_jet,kmax_jet !min and max level index for the jet + integer, public :: fvm_supercycling + integer, public :: fvm_supercycling_jet + + integer, allocatable, public :: kord_tr(:), kord_tr_cslam(:) + + real(r8), public :: nu_scale_top(PLEV)! scaling of del2 viscosity in sopnge layer (initialized in dyn_comp) + real(r8), public :: nu_lev(PLEV) + real(r8), public :: otau(PLEV) + integer, public :: ksponge_end ! sponge is active k=1,ksponge_end + real(r8), public :: nu_div_lev(PLEV) = 1.0_r8 ! scaling of viscosity in sponge layer + ! (set in prim_state; if applicable) + real(r8), public :: kmvis_ref(PLEV) !reference profiles for molecular diffusion + real(r8), public :: kmcnd_ref(PLEV) !reference profiles for molecular diffusion + real(r8), public :: rho_ref(PLEV) !reference profiles for rho + real(r8), public :: km_sponge_factor(PLEV) !scaling for molecular diffusion (when used as sponge) + real(r8), public :: kmvisi_ref(PLEV+1) !reference profiles for molecular diffusion + real(r8), public :: kmcndi_ref(PLEV+1) !reference profiles for molecular diffusion + real(r8), public :: rhoi_ref(PLEV+1) !reference profiles for rho + + + integer, public :: nhc_phys + integer, public :: nhe_phys + integer, public :: nhr_phys + integer, public :: ns_phys + + integer, public :: npdg = 0 ! dg degree for hybrid cg/dg element 0=disabled + + integer, parameter, public :: npsq = np*np + integer, parameter, public :: nlev=PLEV + integer, parameter, public :: nlevp=nlev+1 + + +! params for a mesh +! integer, public, parameter :: max_elements_attached_to_node = 7 +! integer, public, parameter :: s_nv = 2*max_elements_attached_to_node + + !default for non-refined mesh (note that these are *not* parameters now) + integer, public :: max_elements_attached_to_node = 4 + integer, public :: s_nv = 6 + integer, public :: max_corner_elem = 1 !max_elements_attached_to_node-3 + integer, public :: max_neigh_edges = 8 !4 + 4*max_corner_elem + + public :: qsize,qsize_d,ntrac_d,ntrac + + integer, public :: ne + integer, public :: nelem ! total number of elements + integer, public :: nelemd ! number of elements per MPI task + integer, public :: nelemdmax ! max number of elements on any MPI task + integer, public :: nPhysProc ! This is the number of physics processors/ per dynamics processor + integer, public :: nnodes,npart,nmpi_per_node + integer, public :: GlobalUniqueCols + + public :: set_mesh_dimensions + +contains + + subroutine set_mesh_dimensions() + + ! new "params" + max_elements_attached_to_node = 7 ! variable resolution + s_nv = 2*max_elements_attached_to_node + + !recalculate these + max_corner_elem = max_elements_attached_to_node-3 + max_neigh_edges = 4 + 4*max_corner_elem + + + end subroutine set_mesh_dimensions + + +end module dimensions_mod + diff --git a/src/dynamics/se/dycore/dof_mod.F90 b/src/dynamics/se/dycore/dof_mod.F90 new file mode 100644 index 00000000..4b33c278 --- /dev/null +++ b/src/dynamics/se/dycore/dof_mod.F90 @@ -0,0 +1,402 @@ +module dof_mod + use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 + use dimensions_mod, only: np, npsq, nelem, nelemd + use quadrature_mod, only: quadrature_t + use element_mod, only: element_t,index_t + use spmd_utils, only: mpi_integer + use parallel_mod, only: parallel_t + use edge_mod, only: initedgebuffer,freeedgebuffer, & + longedgevpack, longedgevunpackmin + use edgetype_mod, only: longedgebuffer_t + use bndry_mod, only: bndry_exchange +implicit none +private + ! public data + ! public subroutines + public :: global_dof + public :: UniquePoints + public :: PutUniquePoints + public :: UniqueNcolsP + public :: UniqueCoords + public :: CreateUniqueIndex + public :: SetElemOffset + public :: CreateMetaData + + interface UniquePoints + module procedure UniquePoints2D + module procedure UniquePoints3D + module procedure UniquePoints4D + end interface + interface PutUniquePoints + module procedure PutUniquePoints2D + module procedure PutUniquePoints3D + module procedure PutUniquePoints4D + end interface + + +contains + + subroutine genLocalDof(ig,npts,ldof) + + integer, intent(in) :: ig + integer, intent(in) :: npts + integer, intent(inout) :: ldof(:,:) + + integer :: i,j,npts2 + + + npts2=npts*npts + do j=1,npts + do i=1,npts + ldof(i,j) = (ig-1)*npts2 + (j-1)*npts + i + enddo + enddo + + end subroutine genLocalDOF + +! =========================================== +! global_dof +! +! Compute the global degree of freedom for each element... +! =========================================== + + subroutine global_dof(par,elem) + + type (parallel_t),intent(in) :: par + type (element_t) :: elem(:) + + type (LongEdgeBuffer_t) :: edge + + real(kind=r8) da ! area element + + type (quadrature_t) :: gp + + integer :: ldofP(np,np,nelemd) + + integer ii + integer i,j,ig,ie + integer kptr + integer iptr + + ! =================== + ! begin code + ! =================== + call initEdgeBuffer(edge,1) + + ! ================================================= + ! mass matrix on the velocity grid + ! ================================================= + + + do ie=1,nelemd + ig = elem(ie)%vertex%number + call genLocalDOF(ig,np,ldofP(:,:,ie)) + + kptr=0 + call LongEdgeVpack(edge,ldofP(:,:,ie),1,kptr,elem(ie)%desc) + end do + + ! ============================== + ! Insert boundary exchange here + ! ============================== + + call bndry_exchange(par,edge) + + do ie=1,nelemd + ! we should unpack directly into elem(ie)%gdofV, but we dont have + ! a VunpackMIN that takes integer*8. gdofV integer*8 means + ! more than 2G grid points. + kptr=0 + call LongEdgeVunpackMIN(edge,ldofP(:,:,ie),1,kptr,elem(ie)%desc) + elem(ie)%gdofP(:,:)=ldofP(:,:,ie) + end do +!$OMP BARRIER + call FreeEdgeBuffer(edge) + + end subroutine global_dof + + + subroutine UniquePoints2D(idxUnique,src,dest) + type (index_t) :: idxUnique + real (kind=r8) :: src(:,:) + real (kind=r8) :: dest(:) + + integer :: i,j,ii + + + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + dest(ii)=src(i,j) + enddo + + end subroutine UniquePoints2D + +! putUniquePoints first zeros out the destination array, then fills the unique points of the +! array with values from src. A boundary communication should then be called to fill in the +! redundent points of the array + + subroutine putUniquePoints2D(idxUnique,src,dest) + type (index_t) :: idxUnique + real (kind=r8),intent(in) :: src(:) + real (kind=r8),intent(out) :: dest(:,:) + + integer :: i,j,ii + + dest=0.0D0 + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + dest(i,j)=src(ii) + enddo + + end subroutine putUniquePoints2D + + subroutine UniqueNcolsP(elem,idxUnique,cid) + use element_mod, only : GetColumnIdP, element_t + type (element_t), intent(in) :: elem + type (index_t), intent(in) :: idxUnique + integer,intent(out) :: cid(:) + integer :: i,j,ii + + + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + cid(ii)=GetColumnIdP(elem,i,j) + enddo + + end subroutine UniqueNcolsP + + + subroutine UniqueCoords(idxUnique,src,lat,lon) + + use coordinate_systems_mod, only : spherical_polar_t + type (index_t), intent(in) :: idxUnique + + type (spherical_polar_t) :: src(:,:) + real (kind=r8), intent(out) :: lat(:) + real (kind=r8), intent(out) :: lon(:) + + integer :: i,j,ii + + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + lat(ii)=src(i,j)%lat + lon(ii)=src(i,j)%lon + enddo + + end subroutine UniqueCoords + + subroutine UniquePoints3D(idxUnique,nlyr,src,dest) + type (index_t) :: idxUnique + integer :: nlyr + real (kind=r8) :: src(:,:,:) + real (kind=r8) :: dest(:,:) + + integer :: i,j,k,ii + + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + do k=1,nlyr + dest(ii,k)=src(i,j,k) + enddo + enddo + + end subroutine UniquePoints3D + subroutine UniquePoints4D(idxUnique,d3,d4,src,dest) + type (index_t) :: idxUnique + integer :: d3,d4 + real (kind=r8) :: src(:,:,:,:) + real (kind=r8) :: dest(:,:,:) + + integer :: i,j,k,n,ii + + do n=1,d4 + do k=1,d3 + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + dest(ii,k,n)=src(i,j,k,n) + enddo + end do + enddo + + end subroutine UniquePoints4D + +! putUniquePoints first zeros out the destination array, then fills the unique points of the +! array with values from src. A boundary communication should then be called to fill in the +! redundent points of the array + + subroutine putUniquePoints3D(idxUnique,nlyr,src,dest) + type (index_t) :: idxUnique + integer :: nlyr + real (kind=r8),intent(in) :: src(:,:) + real (kind=r8),intent(out) :: dest(:,:,:) + + integer :: i,j,k,ii + + dest=0.0D0 + do k=1,nlyr + do ii=1,idxUnique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + dest(i,j,k)=src(ii,k) + enddo + enddo + + end subroutine putUniquePoints3D + + subroutine putUniquePoints4D(idxUnique,d3,d4,src,dest) + type (index_t) :: idxUnique + integer :: d3,d4 + real (kind=r8),intent(in) :: src(:,:,:) + real (kind=r8),intent(out) :: dest(:,:,:,:) + + integer :: i,j,k,n,ii + + dest=0.0D0 + do n=1,d4 + do k=1,d3 + do ii=1,idxunique%NumUniquePts + i=idxUnique%ia(ii) + j=idxUnique%ja(ii) + dest(i,j,k,n)=src(ii,k,n) + enddo + enddo + end do + end subroutine putUniquePoints4D + + subroutine SetElemOffset(par,elem,GlobalUniqueColsP) + use spmd_utils, only : mpi_sum + + type (parallel_t) :: par + type (element_t) :: elem(:) + integer, intent(out) :: GlobalUniqueColsP + + integer, allocatable :: numElemP(:),numElem2P(:) + integer, allocatable :: numElemV(:),numElem2V(:) + integer, allocatable :: gOffset(:) + + integer :: ie, ig, nprocs, ierr + logical, parameter :: Debug = .FALSE. + + nprocs = par%nprocs + allocate(numElemP(nelem)) + allocate(numElem2P(nelem)) + allocate(gOffset(nelem)) + numElemP=0;numElem2P=0;gOffset=0 + + do ie = 1, nelemd + ig = elem(ie)%GlobalId + numElemP(ig) = elem(ie)%idxP%NumUniquePts + end do + call MPI_Allreduce(numElemP,numElem2P,nelem,MPI_INTEGER,MPI_SUM,par%comm,ierr) + + gOffset(1)=1 + do ig = 2, nelem + gOffset(ig) = gOffset(ig-1)+numElem2P(ig-1) + end do + do ie = 1, nelemd + ig = elem(ie)%GlobalId + elem(ie)%idxP%UniquePtOffset=gOffset(ig) + end do + GlobalUniqueColsP = gOffset(nelem)+numElem2P(nelem)-1 + + deallocate(numElemP) + deallocate(numElem2P) + deallocate(gOffset) + end subroutine SetElemOffset + + subroutine CreateUniqueIndex(ig,gdof,idx) + + integer :: ig + type (index_t) :: idx + integer(i8) :: gdof(:,:) + + integer, allocatable :: ldof(:,:) + integer :: i,j,ii,npts + + + npts = size(gdof,dim=1) + allocate(ldof(npts,npts)) + ! ==================== + ! Form the local DOF + ! ==================== + call genLocalDOF(ig,npts,ldof) + + ii=1 + + do j=1,npts + do i=1,npts + ! ========================== + ! check for point ownership + ! ========================== + if(gdof(i,j) .eq. ldof(i,j)) then + idx%ia(ii) = i + idx%ja(ii) = j + ii=ii+1 + endif + enddo + enddo + + idx%NumUniquePts=ii-1 + deallocate(ldof) + + end subroutine CreateUniqueIndex + + + subroutine CreateMetaData(par,elem,subelement_corners, fdofp) + type (parallel_t), intent(in) :: par + type (element_t), target :: elem(:) + + integer, optional, intent(out) :: subelement_corners((np-1)*(np-1)*nelemd,4) + integer, optional :: fdofp(np,np,nelemd) + + type (index_t), pointer :: idx + type (LongEdgeBuffer_t) :: edge + integer :: i, j, ii, ie, base + integer(i8), pointer :: gdof(:,:) + integer :: fdofp_local(np,np,nelemd) + + call initEdgeBuffer(edge,1) + fdofp_local=0 + + do ie=1,nelemd + idx => elem(ie)%idxP + do ii=1,idx%NumUniquePts + i=idx%ia(ii) + j=idx%ja(ii) + + fdofp_local(i,j,ie) = -(idx%UniquePtoffset+ii-1) + end do + call LongEdgeVpack(edge,fdofp_local(:,:,ie),1,0,elem(ie)%desc) + end do + call bndry_exchange(par,edge) + do ie=1,nelemd + base = (ie-1)*(np-1)*(np-1) + call LongEdgeVunpackMIN(edge,fdofp_local(:,:,ie),1,0,elem(ie)%desc) + if(present(subelement_corners)) then + ii=0 + do j=1,np-1 + do i=1,np-1 + ii=ii+1 + subelement_corners(base+ii,1) = -fdofp_local(i,j,ie) + subelement_corners(base+ii,2) = -fdofp_local(i,j+1,ie) + subelement_corners(base+ii,3) = -fdofp_local(i+1,j+1,ie) + subelement_corners(base+ii,4) = -fdofp_local(i+1,j,ie) + end do + end do + end if + end do + if(present(fdofp)) then + fdofp=-fdofp_local + end if + + + + end subroutine CreateMetaData + +end module dof_mod diff --git a/src/dynamics/se/dycore/edge_mod.F90 b/src/dynamics/se/dycore/edge_mod.F90 new file mode 100644 index 00000000..7fa1e146 --- /dev/null +++ b/src/dynamics/se/dycore/edge_mod.F90 @@ -0,0 +1,2629 @@ +module edge_mod + + use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 + use dimensions_mod, only: max_neigh_edges, nelemd + use perf_mod, only: t_startf, t_stopf, t_adj_detailf ! _EXTERNAL + use thread_mod, only: max_num_threads, omp_get_num_threads, omp_get_thread_num + use coordinate_systems_mod, only: cartesian3D_t + use schedtype_mod, only: cycle_t, schedule_t, pgindex_t, schedule, HME_Ordinal,HME_Cardinal + use cam_abortutils, only: endrun + use cam_logfile, only: iulog + use parallel_mod, only: parallel_t, & + MAX_ACTIVE_MSG, HME_status_size, BNDRY_TAG_BASE, HME_BNDRY_A2A, HME_BNDRY_P2P, & + HME_BNDRY_A2AO + use edgetype_mod, only: edgedescriptor_t, edgebuffer_t, & + Longedgebuffer_t, initedgebuffer_callid, Ghostbuffer3D_t + use element_mod, only: element_t + use gbarrier_mod, only: gbarrier_init, gbarrier_delete + use spmd_utils, only: mpi_real8, mpi_integer, mpi_info_null, mpi_success + + implicit none + private + save + + ! 8-byte Integer routines + public :: LongEdgeVpack, LongEdgeVunpackMIN + + ! 8-byte Real routines + public :: zeroEdgeBuffer + + interface initEdgeBuffer + module procedure initEdgeBuffer_r8 + module procedure initEdgeBuffer_i8 + end interface + interface initEdgeSBuffer + module procedure initEdgeSbuffer_r8 + end interface + interface freeEdgeBuffer + module procedure freeEdgeBuffer_r8 + module procedure freeEdgeBuffer_i8 + end interface + interface freeGhostBuffer + module procedure freeGhostBuffer_r8 + end interface + + public :: initEdgeBuffer + public :: initEdgeSBuffer + public :: freeEdgeBuffer + + public :: initGhostBuffer + public :: ghostpack, ghostunpack + public :: freeGhostBuffer + + !--------------------------------------------------------- + ! Pack/unpack routines that use the New format Edge buffer + !--------------------------------------------------------- + + public :: edgeVpack, edgeVunpack + public :: edgeVunpackMIN, edgeVunpackMAX + public :: edgeDGVpack, edgeDGVunpack + public :: edgeVunpackVert + + + public :: initGhostBuffer3D + public :: FreeGhostBuffer3D + public :: ghostVpack3D, ghostVunpack3D + + !---------------------------------------------------------------- + ! Pack/unpack routines that communicate a fixed number values + ! per element. This is used to communicate MIN/MAX values from + ! neighboring elemeents + !---------------------------------------------------------------- + interface edgeSpack + module procedure edgeSpack_r8 + end interface + public :: edgeSpack + public :: edgeSunpackMIN, edgeSunpackMAX + + logical, private :: threadsafe=.true. + + real(kind=r8), parameter, public :: edgeDefaultVal = 1.11e+100_r8 + +! NOTE ON ELEMENT ORIENTATION +! +! Element orientation: index V(i,j) +! +! (1,np) NWEST (np,np) NEAST +! +! (1,1) SWEST (np,1) SEAST +! +! +! for the edge neighbors: +! we set the "reverse" flag if two elements who share an edge use a +! reverse orientation. The data is reversed during the *pack* stage +! For corner neighbors: +! for edge buffers, there is no orientation because two corner neighbors +! only share a single point. +! For ghost cell data, there is a again two posible orientations. For +! this case, we set the "reverse" flag if the corner element is using +! the reverse orientation. In this case, the data is reversed during the +! *unpack* stage (not sure why) +! +! The edge orientation is set at startup. The corner orientation is computed +! at run time, via the call to compute_ghost_corner_orientation() +! This routine only works for meshes with at most 1 corner element. It's +! not called and the corner orientation flag is not set for unstructured meshes + +! +! Christoph Erath +! pack/unpack partial element of data of size (nx,nx) with user specifed halo size nh +! user specifies the sizes when creating the buffer +! buffer has 1 extra dimension (as compared to subroutines above) for multiple tracers +! input/output arrays are cartesian, and thus assume at most 1 element at each corner +! hence currently only supports cube-sphere grids. +! +! +! routines which including element edge data +! (used for FVM arrays where edge data is not shared by neighboring elements) +! these routines pack/unpack element data with user specified halo size + + ! Wrap pointer so we can make an array of them. + type :: wrap_ptr + real (kind=r8), dimension(:,:), pointer :: ptr => null() + end type wrap_ptr + + type(wrap_ptr) :: edgebuff_ptrs(0:1) + +contains + + subroutine initEdgeSBuffer_r8(par,edge,elem,nlyr,bndry_type, nthreads) + type (parallel_t), intent(in) :: par + type (EdgeBuffer_t), target, intent(out) :: edge + type (element_t), intent(in) :: elem(:) + integer, intent(in) :: nlyr + integer , optional, intent(in) :: bndry_type + integer, optional, intent(in) :: nthreads + + + call initEdgeBuffer(par,edge,elem,nlyr,bndry_type=bndry_type, & + nthreads=nthreads,CardinalLength=1,OrdinalLength=1) + + end subroutine initEdgeSBuffer_r8 + + subroutine initGhostBuffer(par,edge,elem,nlyr,ndepth, npoints,bndry_type,nthreads) + + type (parallel_t), intent(in) :: par + type (Edgebuffer_t), target, intent(out) :: edge + type (element_t), intent(in) :: elem(:) + integer,intent(in) :: nlyr,ndepth, npoints + integer , optional, intent(in) :: bndry_type + integer, optional, intent(in) :: nthreads + + call initEdgeBuffer(par,edge,elem,nlyr,bndry_type=bndry_type, & + nthreads=nthreads,CardinalLength=ndepth*npoints,OrdinalLength=ndepth*ndepth) + ! set some parameters need to support deep halos + edge%ndepth = ndepth + edge%npoints = npoints + edge%lb = 1 - edge%ndepth + edge%ub = edge%npoints + edge%ndepth + + end subroutine initGhostBuffer + + + + subroutine zeroEdgeBuffer(edge) + + type (EdgeBuffer_t), intent(inout) :: edge + integer :: i + + do i=1,edge%nbuf + edge%buf(i) = 0.0d0 + edge%receive(i) = 0.0d0 + enddo + + end subroutine zeroEdgeBuffer + + ! ========================================= + ! initEdgeBuffer: + ! + ! create an Real based communication buffer + ! ========================================= + subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLength,OrdinalLength) + use dimensions_mod, only: np, nelemd, max_corner_elem + use schedtype_mod, only: cycle_t, schedule_t, schedule + use mpi, only: MPI_VERSION + + type (parallel_t), intent(in) :: par + type (EdgeBuffer_t), target, intent(out) :: edge + type (element_t), intent(in) :: elem(:) + integer, intent(in) :: nlyr + integer, optional, intent(in) :: bndry_type + integer, optional, intent(in) :: nthreads + integer, optional, intent(in) :: CardinalLength + integer, optional, intent(in) :: OrdinalLength + + ! Notes about the buf_ptr/receive_ptr options: + ! + ! If an EdgeBuffer_t object is initialized from pre-existing storage + ! (i.e. buf_ptr is provided and not null), it must *not* be freed, + ! and must not be used if the underlying storage has been deallocated. + ! + ! All these restrictions also applied to the old newbuf and newreceive + ! options. + + ! Workaround for NAG bug. + ! NAG 5.3.1 dies if you use pointer bounds remapping to set + ! a pointer that is also a component. So remap to temporary, + ! then use that to set component pointer. + + ! Local variables + integer :: nbuf,ith + integer :: nSendCycles, nRecvCycles + integer :: icycle, ierr + integer :: ie, i + integer :: edgeid,elemid + integer :: ptr,llen,moveLength, mLen, tlen + type (Cycle_t), pointer :: pCycle + type (Schedule_t), pointer :: pSchedule + integer :: dest, source, length, tag, iptr + integer :: nlen, ithr + + integer :: len, lenP,lenS + integer :: j,jj,il,mesgid, dst0,src0 + integer :: moveptr + integer :: nbuf2,ilm1,iem1,lenm1 + integer,allocatable :: putmap2(:,:),getmap2(:,:) + integer,allocatable :: scounts(:), rcounts(:) + integer,allocatable :: sdispls(:), rdispls(:) + integer :: nInter, nIntra + integer :: icInter, icIntra + + integer :: maxnsend + integer :: tmpnMesg + integer :: wintmpnMesg, wintmpDest, wintmpDisp + integer(kind=i8) :: winsize + integer :: win + integer :: sizeofreal + integer, allocatable :: tmpDest(:),tmpDisp(:) + integer :: nFull + integer :: disp, one + integer :: errorcode,errorlen + integer :: CardinalLen, OrdinalLen + character(len=80) :: errorstring + character(len=80), parameter :: subname='initedgeBuffer' + + if(present(bndry_type)) then + if ( MPI_VERSION >= 3 ) then + edge%bndry_type = bndry_type + else + edge%bndry_type = HME_BNDRY_P2P + endif + else + edge%bndry_type = HME_BNDRY_P2P + endif + + ! Set the length of the cardinal and ordinal message lengths + if(present(CardinalLength)) then + CardinalLen = CardinalLength + else + CardinalLen = np + endif + if(present(OrdinalLength)) then + OrdinalLen = OrdinalLength + else + OrdinalLen = 1 + endif + +! DO NOT REMOVE THIS NEXT BARRIER +! MT: This initial barrier fixes a long standing issue with Intel compilers on +! two different platforms. Without this barrier, edge buffers initialized from +! within the threaded region would not work in a reproducable way with certain +! thread combinations. I cant explain why, but this fixes that issue on Edison +!$OMP BARRIER + + if (nlyr==0) return ! tracer code might call initedgebuffer() with zero tracers + + +!$OMP MASTER + ! + ! Keep a counter of how many times initedgebuffer is called. + ! This is used to assign a unique message ID for the boundary exchange + ! + initedgebuffer_callid=initedgebuffer_callid+1 + edge%id = initedgebuffer_callid + edge%tag = BNDRY_TAG_BASE + MODULO(edge%id, MAX_ACTIVE_MSG) + + allocate(edge%putmap(max_neigh_edges,nelemd)) + allocate(edge%getmap(max_neigh_edges,nelemd)) + allocate(edge%reverse(max_neigh_edges,nelemd)) + + edge%putmap(:,:)=-1 + edge%getmap(:,:)=-1 + + allocate(putmap2(max_neigh_edges,nelemd)) + allocate(getmap2(max_neigh_edges,nelemd)) + putmap2(:,:)=-1 + getmap2(:,:)=-1 + do ie=1,nelemd + do i=1,max_neigh_edges + edge%reverse(i,ie) = elem(ie)%desc%reverse(i) + enddo + enddo + + pSchedule => Schedule(1) + nSendCycles = pSchedule%nSendCycles + nRecvCycles = pSchedule%nRecvCycles + nInter = pSchedule%nInter + nIntra = pSchedule%nIntra + nFull = nInter+nIntra + + edge%nInter=nInter + edge%nIntra=nIntra + + if(nInter>0) then + allocate(edge%rcountsInter(nInter),edge%rdisplsInter(nInter)) + allocate(edge%scountsInter(nInter),edge%sdisplsInter(nInter)) + endif + if(nIntra>0) then + allocate(edge%rcountsIntra(nIntra),edge%rdisplsIntra(nIntra)) + allocate(edge%scountsIntra(nIntra),edge%sdisplsIntra(nIntra)) + endif + + if (nSendCycles>0) then + allocate(edge%scountsFull(nSendCycles),edge%sdisplsFull(nSendCycles)) + allocate(edge%Srequest(nSendCycles)) + edge%scountsFull(:) = 0 + endif + ! + ! Setup the data-structures for the sends + ! + j = 1 + icycle = 1 + dst0 = pSchedule%pIndx(j)%mesgid + il = pSchedule%pIndx(j)%edgeid + ie = pSchedule%pIndx(j)%elemid + len = CalcSegmentLength(pSchedule%pIndx(j),CardinalLen,OrdinalLen,nlyr) + edge%putmap(il,ie) = 0 + if(nSendCycles>0) then + edge%sdisplsFull(icycle) = edge%putmap(il,ie) + edge%scountsFull(icycle) = len + endif + ilm1 = il + iem1 = ie + lenm1 = len + + do j=2,SIZE(pSchedule%pIndx) + il = pSchedule%pIndx(j)%edgeid + ie = pSchedule%pIndx(j)%elemid + mesgid = pSchedule%pIndx(j)%mesgid + if(il>0 .and. ie >0) then + len = CalcSegmentLength(pSchedule%pIndx(j),CardinalLen,OrdinalLen,nlyr) + edge%putmap(il,ie) = edge%putmap(ilm1,iem1)+lenm1 + if(mesgid .ne. par%rank) then ! don't enter if this is a move cycle where (mesgid == par%rank) + if(mesgid .ne. dst0) then + icycle=icycle+1 + if (nSendCycles>0) edge%sdisplsFull(icycle) = edge%putmap(il,ie) + dst0=mesgid + endif + if (nSendCycles>0) edge%scountsFull(icycle) = edge%scountsFull(icycle)+len + endif + ilm1=il + iem1=ie + lenm1=len + endif + enddo + + icInter=0 + icIntra=0 + do icycle=1,nSendCycles + if(pSchedule%SendCycle(icycle)%onNode .eqv. .FALSE.) then + icInter=icInter+1 + edge%sdisplsInter(icInter)=edge%sdisplsFull(icycle) + edge%scountsInter(icInter)=edge%scountsFull(icycle) + else + icIntra=icIntra+1 + edge%sdisplsIntra(icIntra)=edge%sdisplsFull(icycle) + edge%scountsIntra(icIntra)=edge%scountsFull(icycle) + endif + enddo + + if (nRecvCycles>0) then + allocate(edge%rcountsFull(nRecvCycles),edge%rdisplsFull(nRecvCycles)) + allocate(edge%getDisplsFull(nRecvCycles),edge%putDisplsFull(nRecvCycles)) + edge%rcountsFull(:) = 0 + ! allocate the MPI Send/Recv request handles + allocate(edge%Rrequest(nRecvCycles)) + allocate(edge%status(HME_status_size,nRecvCycles)) + endif + + ! + ! Setup the data-structures for the receives + ! + j = 1 + icycle = 1 + src0 = pSchedule%gIndx(j)%mesgid + il = pSchedule%gIndx(j)%edgeid + ie = pSchedule%gIndx(j)%elemid + len = CalcSegmentLength(pSchedule%gIndx(j),CardinalLen,OrdinalLen,nlyr) + edge%getmap(il,ie) = 0 + if (nRecvCycles>0) then + edge%rdisplsFull(icycle) = edge%getmap(il,ie) + edge%rcountsFull(icycle) = len + endif + ilm1=il + iem1=ie + lenm1=len + + do j=2,SIZE(pSchedule%gIndx) + il = pSchedule%gIndx(j)%edgeid + ie = pSchedule%gIndx(j)%elemid + mesgid = pSchedule%gIndx(j)%mesgid + if(il>0 .and. ie >0) then + len = CalcSegmentLength(pSchedule%gIndx(j),CardinalLen,OrdinalLen,nlyr) + edge%getmap(il,ie) = edge%getmap(ilm1,iem1)+lenm1 + if(mesgid .ne. par%rank) then ! don't enter if this is a move cycle where (mesgid == par%rank) + if(mesgid .ne. src0) then + if (nRecvCycles>0) edge%rdisplsFull(icycle+1) = edge%getmap(il,ie) + icycle=icycle+1 + src0=mesgid + endif + if (nRecvCycles>0) edge%rcountsFull(icycle) = edge%rcountsFull(icycle)+len + endif + ilm1=il + iem1=ie + lenm1=len + endif + enddo + + + ! + ! populate the Inter and Intra node communication data-structures + ! + icInter=0 + icIntra=0 + do icycle=1,nRecvCycles + if(pSchedule%RecvCycle(icycle)%onNode .eqv. .FALSE.) then + icInter=icInter+1 + edge%rdisplsInter(icInter)=edge%rdisplsFull(icycle) + edge%rcountsInter(icInter)=edge%rcountsFull(icycle) + else + icIntra=icIntra+1 + edge%rdisplsIntra(icIntra)=edge%rdisplsFull(icycle) + edge%rcountsIntra(icIntra)=edge%rcountsFull(icycle) + endif + enddo + + + ! Setup the data-structures for the on process moves + ! Note that this assumes that the data to move is at + ! the end of the message buffer. + if(nRecvCycles>0) then + moveptr = edge%rdisplsFull(nRecvCycles)+edge%rcountsFull(nRecvCycles)+1 + else + moveptr = 1 + endif + moveLength = 0 + do j=1,SIZE(pSchedule%gIndx) + il = pSchedule%gIndx(j)%edgeid + ie = pSchedule%gIndx(j)%elemid + mesgid = pSchedule%gIndx(j)%mesgid + if(mesgid == par%rank) then + len = CalcSegmentLength(pSchedule%gIndx(j),CardinalLen,OrdinalLen,nlyr) + moveLength = moveLength + len + endif + enddo + + ! decompose the move data between the available threads + if(max_num_threads<=0) then + nlen = 1 + else + if(present(nthreads)) then + if (nthreads > 0) then + nlen = nthreads + else + nlen = max_num_threads + end if + else + nlen = 1 + end if + end if + call gbarrier_init(edge%gbarrier, nlen) + + allocate(edge%moveLength(nlen)) + allocate(edge%movePtr(nlen)) + + if (nlen > 1) then + ! the master thread performs no data movement because it is busy with the + ! MPI messaging + edge%moveLength(1) = -1 + edge%movePtr(1) = 0 + + ! Calculate the length of the local copy in bndy_exchange + llen = ceiling(real(moveLength,kind=r8)/real(nlen-1,kind=r8)) + iptr = moveptr + mLen = 0 + do i=2,nlen + if( (mLen+llen) <= moveLength) then + tlen = llen + else + tlen = moveLength - mLen + endif + edge%moveLength(i) = tlen + edge%movePtr(i) = iptr + iptr = iptr + tlen + mLen = mLen + tLen + enddo + else + edge%moveLength(1) = moveLength + edge%movePtr(1) = moveptr + endif + + ! Set the maximum length of the message buffer + nbuf = movePtr+moveLength + + edge%nlyr=nlyr + edge%nbuf=nbuf + + allocate(edge%receive(nbuf)) + allocate(edge%buf(nbuf)) + +21 format('RANK: ',i2, A,8(i6)) + +!$OMP END MASTER +! MT: This next barrier is also needed - threads cannot start using edge() +! until MASTER is done initializing it +!$OMP BARRIER + + end subroutine initEdgeBuffer_r8 + + integer function CalcSegmentLength(pgIndx,CardinalLength,OrdinalLength,nlyr) result(len) + + type(pgindex_t) :: pgIndx + integer, intent(in) :: CardinalLength,OrdinalLength + integer, intent(in) :: nlyr + + integer :: rem + integer, parameter :: alignment=1 ! align on word boundaries +! integer, parameter :: alignment=2 ! align on 2 word boundaries +! integer, parameter :: alignment=8 ! align on 8 word boundaries + + select case(pgIndx%edgeType) + CASE(HME_Cardinal) + len = nlyr*CardinalLength + CASE(HME_Ordinal) + len = nlyr*OrdinalLength + end select + + rem = MODULO(len,alignment) + if(rem .ne. 0) then + len = len + (alignment-rem) + endif + + end function calcSegmentLength + + ! ========================================= + ! initEdgeBuffer: + ! + ! create an Integer based communication buffer + ! ========================================= + subroutine initEdgeBuffer_i8(edge,nlyr) + use dimensions_mod, only : np, nelemd, max_corner_elem + + integer, intent(in) :: nlyr + type (LongEdgeBuffer_t), intent(out) :: edge + + ! Local variables + integer :: nbuf + + ! sanity check for threading + if (omp_get_num_threads()>1) then + call endrun('ERROR: initEdgeBuffer must be called before threaded reagion') + endif + + nbuf=4*(np+max_corner_elem)*nelemd + edge%nlyr=nlyr + edge%nbuf=nbuf + allocate(edge%buf(nlyr,nbuf)) + edge%buf(:,:)=0 + + allocate(edge%receive(nlyr,nbuf)) + edge%receive(:,:)=0 + + end subroutine initEdgeBuffer_i8 + ! ========================================= + ! edgeDGVpack: + ! + ! Pack edges of v into buf for DG stencil + ! ========================================= + subroutine edgeDGVpack(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np + + type (EdgeBuffer_t) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(in) :: v(np,np,vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! ========================================= + ! This code is just a wrapper call the + ! normal oldedgeVpack + ! ========================================= + call edgeVpack(edge,v,vlyr,kptr,ielem) + + end subroutine edgeDGVpack + + subroutine FreeGhostBuffer_r8(edge) + type (EdgeBuffer_t), intent(inout) :: edge + call FreeEdgeBuffer_r8(edge) + end subroutine FreeGhostBuffer_r8 + ! =========================================== + ! FreeEdgeBuffer: + ! + ! Freed an edge communication buffer + ! ========================================= + subroutine FreeEdgeBuffer_r8(edge) + + type (EdgeBuffer_t),intent(inout) :: edge + +!$OMP BARRIER +!$OMP MASTER + if(allocated(edge%buf)) deallocate(edge%buf) + if(allocated(edge%receive)) deallocate(edge%receive) + if(associated(edge%putmap)) deallocate(edge%putmap) + if(associated(edge%getmap)) deallocate(edge%getmap) + if(associated(edge%reverse)) deallocate(edge%reverse) + if(associated(edge%moveLength)) deallocate(edge%moveLength) + if(associated(edge%movePtr)) deallocate(edge%movePtr) + + ! All MPI communications + if(associated(edge%rcountsFull)) deallocate(edge%rcountsFull) + if(associated(edge%scountsFull)) deallocate(edge%scountsFull) + if(associated(edge%sdisplsFull)) deallocate(edge%sdisplsFull) + if(associated(edge%rdisplsFull)) deallocate(edge%rdisplsFull) + + ! Intra-node MPI Communication + if(edge%nIntra>0) then + if(associated(edge%rcountsIntra)) deallocate(edge%rcountsIntra) + if(associated(edge%scountsIntra)) deallocate(edge%scountsIntra) + if(associated(edge%sdisplsIntra)) deallocate(edge%sdisplsIntra) + if(associated(edge%rdisplsIntra)) deallocate(edge%rdisplsIntra) + endif + + ! Inter-node MPI Communication + if(edge%nInter>0) then + if(associated(edge%rcountsInter)) deallocate(edge%rcountsInter) + if(associated(edge%scountsInter)) deallocate(edge%scountsInter) + if(associated(edge%sdisplsInter)) deallocate(edge%sdisplsInter) + if(associated(edge%rdisplsInter)) deallocate(edge%rdisplsInter) + endif + if(allocated(edge%rRequest)) deallocate(edge%rRequest) + if(allocated(edge%sRequest)) deallocate(edge%sRequest) + if(allocated(edge%status)) deallocate(edge%status) + call gbarrier_delete(edge%gbarrier) + +!$OMP END MASTER + + end subroutine FreeEdgeBuffer_r8 + + ! =========================================== + ! FreeEdgeBuffer: + ! + ! Freed an edge communication buffer + ! ========================================= + subroutine FreeEdgeBuffer_i8(edge) + + type (LongEdgeBuffer_t),intent(inout) :: edge + + edge%nbuf=0 + edge%nlyr=0 + deallocate(edge%buf) + deallocate(edge%receive) + + end subroutine FreeEdgeBuffer_i8 + + ! ========================================= + ! + !> @brief Pack edges of v into an edge buffer for boundary exchange. + ! + !> This subroutine packs for one or more vertical layers into an edge + !! buffer. If the buffer associated with edge is not large enough to + !! hold all vertical layers you intent to pack, the method will + !! halt the program with a call to endrum(). + !! @param[in] edge Edge Buffer into which the data will be packed. + !! This buffer must be previously allocated with initEdgeBuffer(). + !! @param[in] v The data to be packed. + !! @param[in] vlyr Number of vertical level coming into the subroutine + !! for packing for input v. + !! @param[in] kptr Vertical pointer to the place in the edge buffer where + !! data will be located. + ! ========================================= + subroutine edgeVpack(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(in) :: v(np,np,vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local variables + integer :: i,k,ir,ll,iptr + integer :: is,ie,in,iw,edgeptr + + is = edge%putmap(south,ielem) + ie = edge%putmap(east,ielem) + in = edge%putmap(north,ielem) + iw = edge%putmap(west,ielem) + if (edge%nlyr < (kptr+vlyr) ) then + print *,'edge%nlyr = ',edge%nlyr + print *,'kptr+vlyr = ',kptr+vlyr + call endrun('edgeVpack: Buffer overflow: size of the vertical dimension must be increased!') + endif + +!dir$ ivdep + do k=1,vlyr + iptr = np*(kptr+k-1) + do i=1,np + edge%buf(iptr+ie+i) = v(np ,i ,k) ! East + edge%buf(iptr+is+i) = v(i ,1 ,k) ! South + edge%buf(iptr+in+i) = v(i ,np,k) ! North + edge%buf(iptr+iw+i) = v(1 ,i ,k) ! West + enddo + enddo + + ! This is really kludgy way to setup the index reversals + ! But since it is so a rare event not real need to spend time optimizing + + if(edge%reverse(south,ielem)) then + do k=1,vlyr + iptr = np*(kptr+k-1)+is + do i=1,np + ir = np-i+1 + edge%buf(iptr+ir)=v(i,1,k) + enddo + enddo + endif + + if(edge%reverse(east,ielem)) then + do k=1,vlyr + iptr=np*(kptr+k-1)+ie + do i=1,np + ir = np-i+1 + edge%buf(iptr+ir)=v(np,i,k) + enddo + enddo + endif + + if(edge%reverse(north,ielem)) then + do k=1,vlyr + iptr=np*(kptr+k-1)+in + do i=1,np + ir = np-i+1 + edge%buf(iptr+ir)=v(i,np,k) + enddo + enddo + endif + + if(edge%reverse(west,ielem)) then + do k=1,vlyr + iptr=np*(kptr+k-1)+iw + do i=1,np + ir = np-i+1 + edge%buf(iptr+ir)=v(1,i,k) + enddo + enddo + endif + +! SWEST + do ll=swest,swest+max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr = edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + if (iptr > size(edge%buf)) then + write(6, *) 'ERROR SW: ',size(edge%buf),iptr,edge%putmap(ll,ielem) + call endrun('pointer bounds ERROR SW') + end if + edge%buf(iptr) = v(1, 1, k) + end do + end if + end do + +! SEAST + do ll=swest+max_corner_elem,swest+2*max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr = edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + if (iptr > size(edge%buf)) then + write(6, *) 'ERROR SE: ',size(edge%buf),iptr,edge%putmap(ll,ielem) + call endrun('pointer bounds ERROR SE') + end if + edge%buf(iptr)=v(np, 1, k) + end do + end if + end do + +! NEAST + do ll=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr = edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + if (iptr > size(edge%buf)) then + write(6, *) 'ERROR NE: ',size(edge%buf),iptr,edge%putmap(ll,ielem) + call endrun('pointer bounds ERROR NE') + end if + edge%buf(iptr) = v(np, np, k) + end do + end if + end do + +! NWEST + do ll=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr = edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + if (iptr > size(edge%buf)) then + write(6, *) 'ERROR NW: ',size(edge%buf),iptr,edge%putmap(ll,ielem) + call endrun('pointer bounds ERROR NW') + end if + edge%buf(iptr) = v(1, np, k) + end do + end if + end do + + end subroutine edgeVpack + + subroutine edgeSpack_r8(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(in) :: v(vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local variables + integer :: i,k,ir,ll,iptr + integer :: is,ie,in,iw,edgeptr + real (kind=r8) :: tmp + + is = edge%putmap(south,ielem) + ie = edge%putmap(east,ielem) + in = edge%putmap(north,ielem) + iw = edge%putmap(west,ielem) + if (edge%nlyr < (kptr+vlyr) ) then + call endrun('edgeSpack: Buffer overflow: size of the vertical dimension must be increased!') + endif + + do k=1,vlyr + iptr = kptr+k-1 + edge%buf(iptr+ie+1) = v(k) ! East + edge%buf(iptr+is+1) = v(k) ! South + edge%buf(iptr+in+1) = v(k) ! North + edge%buf(iptr+iw+1) = v(k) ! West + enddo + +! SWEST + do ll=swest,swest+max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr=edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + edge%buf(iptr)=v(k) + end do + end if + end do + +! SEAST + do ll=swest+max_corner_elem,swest+2*max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr=edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + edge%buf(iptr)=v(k) + end do + end if + end do + +! NEAST + do ll=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr=edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + edge%buf(iptr)=v(k) + end do + end if + end do + +! NWEST + do ll=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if (edge%putmap(ll,ielem) /= -1) then + edgeptr=edge%putmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + edge%buf(iptr)=v(k) + end do + end if + end do + + end subroutine edgeSpack_r8 + + ! ========================================= + ! LongEdgeVpack: + ! + ! Pack edges of v into buf... + ! ========================================= + subroutine LongEdgeVpack(edge,v,vlyr,kptr,desc) + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + use dimensions_mod, only: np, max_corner_elem + + type (LongEdgeBuffer_t) :: edge + integer, intent(in) :: vlyr + integer , intent(in) :: v(np,np,vlyr) + integer, intent(in) :: kptr + type (EdgeDescriptor_t), intent(in) :: desc + + ! Local variables + logical, parameter :: UseUnroll = .TRUE. + integer :: i,k,ir,l + integer :: is,ie,in,iw + + if(.not. threadsafe) then +!$OMP BARRIER + threadsafe=.true. + end if + + is = desc%putmapP(south) + ie = desc%putmapP(east) + in = desc%putmapP(north) + iw = desc%putmapP(west) + + if(MODULO(np,2) == 0 .and. UseUnroll) then + do k=1,vlyr + do i=1,np,2 + edge%buf(kptr+k,is+i) = v(i ,1 ,k) + edge%buf(kptr+k,is+i+1) = v(i+1,1 ,k) + edge%buf(kptr+k,ie+i) = v(np ,i ,k) + edge%buf(kptr+k,ie+i+1) = v(np ,i+1 ,k) + edge%buf(kptr+k,in+i) = v(i ,np,k) + edge%buf(kptr+k,in+i+1) = v(i+1 ,np,k) + edge%buf(kptr+k,iw+i) = v(1 ,i ,k) + edge%buf(kptr+k,iw+i+1) = v(1 ,i+1 ,k) + + enddo + end do + else + do k=1,vlyr + do i=1,np + edge%buf(kptr+k,is+i) = v(i ,1 ,k) + edge%buf(kptr+k,ie+i) = v(np ,i ,k) + edge%buf(kptr+k,in+i) = v(i ,np,k) + edge%buf(kptr+k,iw+i) = v(1 ,i ,k) + enddo + end do + + endif + + + ! This is really kludgy way to setup the index reversals + ! But since it is so a rare event not real need to spend time optimizing + + if(desc%reverse(south)) then + is = desc%putmapP(south) + do k=1,vlyr + do i=1,np + ir = np-i+1 + edge%buf(kptr+k,is+ir)=v(i,1,k) + enddo + enddo + endif + + if(desc%reverse(east)) then + ie = desc%putmapP(east) + do k=1,vlyr + do i=1,np + ir = np-i+1 + edge%buf(kptr+k,ie+ir)=v(np,i,k) + enddo + enddo + endif + + if(desc%reverse(north)) then + in = desc%putmapP(north) + do k=1,vlyr + do i=1,np + ir = np-i+1 + edge%buf(kptr+k,in+ir)=v(i,np,k) + enddo + enddo + endif + + if(desc%reverse(west)) then + iw = desc%putmapP(west) + do k=1,vlyr + do i=1,np + ir = np-i+1 + edge%buf(kptr+k,iw+ir)=v(1,i,k) + enddo + enddo + endif + +! SWEST + do l=swest,swest+max_corner_elem-1 + if (desc%putmapP(l) /= -1) then + do k=1,vlyr + edge%buf(kptr+k,desc%putmapP(l)+1)=v(1 ,1 ,k) + end do + end if + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if (desc%putmapP(l) /= -1) then + do k=1,vlyr + edge%buf(kptr+k,desc%putmapP(l)+1)=v(np ,1 ,k) + end do + end if + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if (desc%putmapP(l) /= -1) then + do k=1,vlyr + edge%buf(kptr+k,desc%putmapP(l)+1)=v(np ,np,k) + end do + end if + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if (desc%putmapP(l) /= -1) then + do k=1,vlyr + edge%buf(kptr+k,desc%putmapP(l)+1)=v(1 ,np,k) + end do + end if + end do + + end subroutine LongEdgeVpack + + subroutine edgeVunpack(edge,v,vlyr,kptr,ielem,rank) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(inout) :: v(np,np,vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + integer, optional, intent(in) :: rank + + ! Local + integer :: i,k,ll,iptr + integer :: is,ie,in,iw,edgeptr + integer :: ise,isw,ine,inw + integer :: ks,ke,kblock + logical :: done + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + isw=edge%getmap(swest,ielem) + ise=edge%getmap(seast,ielem) + inw=edge%getmap(nwest,ielem) + ine=edge%getmap(neast,ielem) + + !DIR$ IVDEP + do k=1,vlyr + iptr=np*(kptr+k-1) + do i=1,np + v(np ,i ,k) = v(np ,i ,k)+edge%receive(iptr+i+ie) ! East + v(i ,1 ,k) = v(i ,1 ,k)+edge%receive(iptr+i+is) ! South + v(i ,np ,k) = v(i ,np ,k)+edge%receive(iptr+i+in) ! North + v(1 ,i ,k) = v(1 ,i ,k)+edge%receive(iptr+i+iw) ! West + enddo + enddo + +! SWEST + do ll=swest,swest+max_corner_elem-1 + if(edge%getmap(ll,ielem) /= -1) then + edgeptr=edge%getmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(1 ,1 ,k)=v(1 ,1 ,k)+edge%receive(iptr) + enddo + endif + end do + +! SEAST + do ll=swest+max_corner_elem,swest+2*max_corner_elem-1 + if(edge%getmap(ll,ielem) /= -1) then + edgeptr=edge%getmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(np ,1 ,k)=v(np,1 ,k)+edge%receive(iptr) + enddo + endif + end do + +! NEAST + do ll=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if(edge%getmap(ll,ielem) /= -1) then + edgeptr=edge%getmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(np ,np,k)=v(np,np,k)+edge%receive(iptr) + enddo + endif + end do + +! NWEST + do ll=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if(edge%getmap(ll,ielem) /= -1) then + edgeptr=edge%getmap(ll,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(1 ,np,k)=v(1 ,np,k)+edge%receive(iptr) + enddo + endif + end do + + + end subroutine edgeVunpack +! + subroutine edgeVunpackVert(edge,v,ielem) + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + use dimensions_mod, only: np, max_corner_elem, ne + use coordinate_systems_mod, only: cartesian3D_t + + type (EdgeBuffer_t), intent(inout) :: edge + type (cartesian3D_t), intent(inout) :: v(:,:,:) + integer, intent(in) :: ielem + + ! Local + logical, parameter :: UseUnroll = .TRUE. + integer :: i,k,l, nce + integer :: is,ie,in,iw,ine,inw,isw,ise + + threadsafe=.false. + + if (max_corner_elem.ne.1 .and. ne==0) then + ! MNL: this is used to construct the dual grid on the cube, + ! currently only supported for the uniform grid. If + ! this is desired on a refined grid, a little bit of + ! work will be required. + call endrun("edgeVunpackVert should not be called with unstructured meshes") + end if + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + + + ! N+S + do i=1,np/2 + ! North + v(3,i ,np)%x = edge%receive(in+i) + v(3,i ,np)%y = edge%receive(np+in+i) + v(3,i ,np)%z = edge%receive(2*np+in+i) + + ! South + v(2,i ,1)%x = edge%receive(is+i) + v(2,i ,1)%y = edge%receive(np+is+i) + v(2,i ,1)%z = edge%receive(2*np+is+i) + enddo + + do i=np/2+1,np + ! North + v(4,i ,np)%x = edge%receive(in+i) + v(4,i ,np)%y = edge%receive(np+in+i) + v(4,i ,np)%z = edge%receive(2*np+in+i) + ! South + v(1,i ,1)%x = edge%receive(is+i) + v(1,i ,1)%y = edge%receive(np+is+i) + v(1,i ,1)%z = edge%receive(2*np+is+i) + enddo + + do i=1,np/2 + ! East + v(3,np,i)%x = edge%receive(ie+i) + v(3,np,i)%y = edge%receive(np+ie+i) + v(3,np,i)%z = edge%receive(2*np+ie+i) + ! West + v(4,1,i)%x = edge%receive(iw+i) + v(4,1,i)%y = edge%receive(np+iw+i) + v(4,1,i)%z = edge%receive(2*np+iw+i) + end do + + do i=np/2+1,np + ! East + v(2,np,i)%x = edge%receive(ie+i) + v(2,np,i)%y = edge%receive(np+ie+i) + v(2,np,i)%z = edge%receive(2*np+ie+i) + ! West + v(1,1,i)%x = edge%receive(iw+i) + v(1,1,i)%y = edge%receive(np+iw+i) + v(1,1,i)%z = edge%receive(2*np+iw+i) + end do + +! SWEST + nce = max_corner_elem + do l=swest,swest+max_corner_elem-1 + ! find the one active corner, then exist + isw=edge%getmap(l,ielem) + if(isw /= -1) then + v(1,1,1)%x=edge%receive(isw+1) + v(1,1,1)%y=edge%receive(nce+isw+1) + v(1,1,1)%z=edge%receive(2*nce+isw+1) + exit + else + v(1,1,1)%x=0_r8 + v(1,1,1)%y=0_r8 + v(1,1,1)%z=0_r8 + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + ! find the one active corner, then exist + ise=edge%getmap(l,ielem) + if(ise /= -1) then + v(2,np,1)%x=edge%receive(ise+1) + v(2,np,1)%y=edge%receive(nce+ise+1) + v(2,np,1)%z=edge%receive(2*nce+ise+1) + exit + else + v(2,np,1)%x=0_r8 + v(2,np,1)%y=0_r8 + v(2,np,1)%z=0_r8 + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + ! find the one active corner, then exist + ine=edge%getmap(l,ielem) + if(ine /= -1) then + v(3,np,np)%x=edge%receive(ine+1) + v(3,np,np)%y=edge%receive(nce+ine+1) + v(3,np,np)%z=edge%receive(2*nce+ine+1) + exit + else + v(3,np,np)%x=0_r8 + v(3,np,np)%y=0_r8 + v(3,np,np)%z=0_r8 + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + ! find the one active corner, then exist + inw = edge%getmap(l,ielem) + if(inw/= -1) then + v(4,1,np)%x=edge%receive(inw+1) + v(4,1,np)%y=edge%receive(nce+inw+1) + v(4,1,np)%z=edge%receive(2*nce+inw+1) + exit + else + v(4,1,np)%x=0_r8 + v(4,1,np)%y=0_r8 + v(4,1,np)%z=0_r8 + endif + end do + + ! Fill the missing vertex info + + do i=2,np/2 + ! North + v(4,i ,np)%x = v(3,i-1 ,np)%x + v(4,i ,np)%y = v(3,i-1 ,np)%y + v(4,i ,np)%z = v(3,i-1 ,np)%z + ! South + v(1,i ,1)%x = v(2,i-1 ,1)%x + v(1,i ,1)%y = v(2,i-1 ,1)%y + v(1,i ,1)%z = v(2,i-1 ,1)%z + enddo + + do i=np/2+1,np-1 + ! North + v(3,i ,np)%x = v(4,i+1 ,np)%x + v(3,i ,np)%y = v(4,i+1 ,np)%y + v(3,i ,np)%z = v(4,i+1 ,np)%z + ! South + v(2,i ,1)%x = v(1,i+1 ,1)%x + v(2,i ,1)%y = v(1,i+1 ,1)%y + v(2,i ,1)%z = v(1,i+1 ,1)%z + enddo + + do i=2,np/2 + ! East + v(2,np,i)%x = v(3,np,i-1)%x + v(2,np,i)%y = v(3,np,i-1)%y + v(2,np,i)%z = v(3,np,i-1)%z + ! West + v(1,1,i)%x = v(4,1,i-1)%x + v(1,1,i)%y = v(4,1,i-1)%y + v(1,1,i)%z = v(4,1,i-1)%z + end do + + do i=np/2+1,np-1 + ! East + v(3,np,i)%x = v(2,np,i+1)%x + v(3,np,i)%y = v(2,np,i+1)%y + v(3,np,i)%z = v(2,np,i+1)%z + ! West + v(4,1,i)%x = v(1,1,i+1)%x + v(4,1,i)%y = v(1,1,i+1)%y + v(4,1,i)%z = v(1,1,i+1)%z + end do + + end subroutine edgeVunpackVert + ! ======================================== + ! edgeDGVunpack: + ! + ! Unpack edges from edge buffer into v... + ! ======================================== + + subroutine edgeDGVunpack(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(inout) :: v(0:np+1,0:np+1,vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local + integer :: i,k,iptr + integer :: is,ie,in,iw + + threadsafe=.false. + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + do k=1,vlyr + iptr=np*(kptr+k-1) + do i=1,np + v(i ,0 ,k)=edge%receive(iptr+is+i) + v(np+1,i ,k)=edge%receive(iptr+ie+i) + v(i ,np+1,k)=edge%receive(iptr+in+i) + v(0 ,i ,k)=edge%receive(iptr+iw+i) + end do + end do + + i = swest + if(edge%getmap(i,ielem) /= -1) then + do k=1,vlyr + iptr=(kptr+k-1) + v(0,0,k) = edge%receive(iptr+edge%getmap(i,ielem)+1) + end do + end if + i = swest+max_corner_elem + if(edge%getmap(i,ielem) /= -1) then + do k=1,vlyr + iptr=(kptr+k-1) + v(np+1,0,k) = edge%receive(iptr+edge%getmap(i,ielem)+1) + end do + end if + i = swest+3*max_corner_elem + if(edge%getmap(i,ielem) /= -1) then + do k=1,vlyr + iptr=(kptr+k-1) + v(np+1,np+1,k) = edge%receive(iptr+edge%getmap(i,ielem)+1) + end do + end if + i = swest+2*max_corner_elem + if(edge%getmap(i,ielem) /= -1) then + do k=1,vlyr + iptr=(kptr+k-1) + v(0,np+1,k) = edge%receive(iptr+edge%getmap(i,ielem)+1) + end do + end if + + end subroutine edgeDGVunpack + + ! ======================================== + ! edgeVunpackMIN/MAX: + ! + ! Finds the Min/Max edges from edge buffer into v... + ! ======================================== + subroutine edgeVunpackMAX(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(inout) :: v(np,np,vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local + integer :: i,k,l,iptr + integer :: is,ie,in,iw + + threadsafe=.false. + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + do k=1,vlyr + iptr=np*(kptr+k-1) + do i=1,np + v(np ,i ,k) = MAX(v(np ,i ,k),edge%receive(iptr+ie+i )) + v(i ,1 ,k) = MAX(v(i ,1 ,k),edge%receive(iptr+is+i )) + v(i ,np ,k) = MAX(v(i ,np ,k),edge%receive(iptr+in+i )) + v(1 ,i ,k) = MAX(v(1 ,i ,k),edge%receive(iptr+iw+i )) + end do + end do + +! SWEST + do l=swest,swest+max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + do k=1,vlyr + v(1 ,1 ,k)=MAX(v(1 ,1 ,k),edge%receive((kptr+k-1)+edge%getmap(l,ielem)+1)) + enddo + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + do k=1,vlyr + v(np ,1 ,k)=MAX(v(np,1 ,k),edge%receive((kptr+k-1)+edge%getmap(l,ielem)+1)) + enddo + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + do k=1,vlyr + v(np ,np,k)=MAX(v(np,np,k),edge%receive((kptr+k-1)+edge%getmap(l,ielem)+1)) + enddo + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + do k=1,vlyr + v(1 ,np,k)=MAX(v(1 ,np,k),edge%receive((kptr+k-1)+edge%getmap(l,ielem)+1)) + enddo + endif + end do + + end subroutine edgeVunpackMAX + + subroutine edgeSunpackMAX(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(inout) :: v(vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local + integer :: i,k,l,iptr + integer :: is,ie,in,iw,edgeptr + + threadsafe=.false. + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + do k=1,vlyr + iptr=(kptr+k-1) + v(k) = MAX(v(k),edge%receive(iptr+is+1),edge%receive(iptr+ie+1),edge%receive(iptr+in+1),edge%receive(iptr+iw+1)) + end do + +! SWEST + do l=swest,swest+max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MAX(v(k),edge%receive(iptr)) + enddo + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MAX(v(k),edge%receive(iptr)) + enddo + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MAX(v(k),edge%receive(iptr)) + enddo + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MAX(v(k),edge%receive(iptr)) + enddo + endif + end do + + end subroutine edgeSunpackMAX + + subroutine edgeSunpackMIN(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(inout) :: v(vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local + integer :: i,k,l,iptr + integer :: is,ie,in,iw,edgeptr + + threadsafe=.false. + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + do k=1,vlyr + iptr=(kptr+k-1) + v(k) = MIN(v(k),edge%receive(iptr+is+1),edge%receive(iptr+ie+1),edge%receive(iptr+in+1),edge%receive(iptr+iw+1)) + end do + +! SWEST + do l=swest,swest+max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MiN(v(k),edge%receive(iptr)) + enddo + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MIN(v(k),edge%receive(iptr)) + enddo + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MIN(v(k),edge%receive(iptr)) + enddo + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr = edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr = (kptr+k-1)+edgeptr + v(k)=MIN(v(k),edge%receive(iptr)) + enddo + endif + end do + + end subroutine edgeSunpackMIN + + subroutine edgeVunpackMIN(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only: np, max_corner_elem + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + + type (EdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + real (kind=r8), intent(inout) :: v(np,np,vlyr) + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + ! Local + integer :: i,k,l,iptr + integer :: is,ie,in,iw,edgeptr + + threadsafe=.false. + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + do k=1,vlyr + iptr = np*(kptr+k-1) + do i=1,np + v(np ,i ,k) = MIN(v(np ,i ,k),edge%receive(iptr+ie+i )) + v(i ,1 ,k) = MIN(v(i ,1 ,k),edge%receive(iptr+is+i )) + v(i ,np ,k) = MIN(v(i ,np ,k),edge%receive(iptr+in+i )) + v(1 ,i ,k) = MIN(v(1 ,i ,k),edge%receive(iptr+iw+i )) + end do + end do + +! SWEST + do l=swest,swest+max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr=edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr=(kptr+k-1)+edgeptr + v(1 ,1 ,k)=MIN(v(1 ,1 ,k),edge%receive(iptr)) + enddo + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr=edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr=(kptr+k-1)+edgeptr + v(np ,1 ,k)=MIN(v(np,1 ,k),edge%receive(iptr)) + enddo + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr=edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr=(kptr+k-1)+edgeptr + v(np ,np,k)=MIN(v(np,np,k),edge%receive(iptr)) + enddo + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if(edge%getmap(l,ielem) /= -1) then + edgeptr=edge%getmap(l,ielem)+1 + do k=1,vlyr + iptr=(kptr+k-1)+edgeptr + v(1 ,np,k)=MIN(v(1 ,np,k),edge%receive(iptr)) + enddo + endif + end do + + end subroutine edgeVunpackMIN + + ! ======================================== + ! LongEdgeVunpackMIN: + ! + ! Finds the Min edges from edge buffer into v... + ! ======================================== + subroutine LongEdgeVunpackMIN(edge,v,vlyr,kptr,desc) + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + use dimensions_mod, only: np, max_corner_elem + + type (LongEdgeBuffer_t), intent(in) :: edge + integer, intent(in) :: vlyr + integer , intent(inout) :: v(np,np,vlyr) + integer, intent(in) :: kptr + type (EdgeDescriptor_t), intent(in) :: desc + + ! Local + + integer :: i,k,l + integer :: is,ie,in,iw + + threadsafe=.false. + + is=desc%getmapP(south) + ie=desc%getmapP(east) + in=desc%getmapP(north) + iw=desc%getmapP(west) + do k=1,vlyr + do i=1,np + v(i ,1 ,k) = MIN(v(i ,1 ,k),edge%buf(kptr+k,is+i )) + v(np ,i ,k) = MIN(v(np ,i ,k),edge%buf(kptr+k,ie+i )) + v(i ,np ,k) = MIN(v(i ,np ,k),edge%buf(kptr+k,in+i )) + v(1 ,i ,k) = MIN(v(1 ,i ,k),edge%buf(kptr+k,iw+i )) + end do + end do + +! SWEST + do l=swest,swest+max_corner_elem-1 + if(desc%getmapP(l) /= -1) then + do k=1,vlyr + v(1 ,1 ,k)=MIN(v(1 ,1 ,k),edge%buf(kptr+k,desc%getmapP(l)+1)) + enddo + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if(desc%getmapP(l) /= -1) then + do k=1,vlyr + v(np ,1 ,k)=MIN(v(np,1 ,k),edge%buf(kptr+k,desc%getmapP(l)+1)) + enddo + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if(desc%getmapP(l) /= -1) then + do k=1,vlyr + v(np ,np,k)=MIN(v(np,np,k),edge%buf(kptr+k,desc%getmapP(l)+1)) + enddo + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if(desc%getmapP(l) /= -1) then + do k=1,vlyr + v(1 ,np,k)=MIN(v(1 ,np,k),edge%buf(kptr+k,desc%getmapP(l)+1)) + enddo + endif + end do + + end subroutine LongEdgeVunpackMIN + + +subroutine ghostpack(edge,v,vlyr,kptr,ielem) + + use dimensions_mod, only : max_corner_elem + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + use edgetype_mod, only : EdgeDescriptor_t + + implicit none + + type (Edgebuffer_t) :: edge + integer, intent(in) :: vlyr + integer, intent(in) :: kptr + + real (kind=r8),intent(in) :: v(edge%lb:edge%ub,edge%lb:edge%ub,vlyr) + integer, intent(in) :: ielem + + ! Local variables + integer :: i,j,k,ir,l,itr,ktmp + + integer :: is,ie,in,iw,isw,ise,inw,ine + integer :: nhc, npoints + integer :: edgeptr,iptr + + is = edge%putmap(south,ielem) + ie = edge%putmap(east,ielem) + in = edge%putmap(north,ielem) + iw = edge%putmap(west,ielem) + if (edge%nlyr < (kptr+vlyr) ) then + print *,'edge%nlyr = ',edge%nlyr + print *,'kptr+vlyr = ',kptr+vlyr + call endrun('ghostpack: Buffer overflow: size of the vertical dimension must be increased!') + endif + + + nhc = edge%ndepth + npoints = edge%npoints + + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = npoints*(ktmp + j - 1) + do i=1,npoints + edge%buf(iptr+is+i) = v(i ,j ,k) + edge%buf(iptr+ie+i) = v(npoints-j+1 ,i ,k) + edge%buf(iptr+in+i) = v(i ,npoints-j+1,k) + edge%buf(iptr+iw+i) = v(j ,i ,k) + enddo + end do + end do + + + ! This is really kludgy way to setup the index reversals + ! But since it is so a rare event not real need to spend time optimizing + ! Check if the edge orientation of the recieving element is different + ! if it is, swap the order of data in the edge + if(edge%reverse(south,ielem)) then + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = npoints*(ktmp + j - 1) + do i=1,npoints + ir = npoints-i+1 + edge%buf(iptr+is+i)=v(ir,j,k) + enddo + enddo + enddo + endif + + if(edge%reverse(east,ielem)) then + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = npoints*(ktmp + j - 1) + do i=1,npoints + ir = npoints-i+1 + edge%buf(iptr+ie+i)=v(npoints-j+1,ir,k) + enddo + enddo + enddo + endif + + if(edge%reverse(north,ielem)) then + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = npoints*(ktmp + j - 1) + do i=1,npoints + ir = npoints-i+1 + edge%buf(iptr+in+i)=v(ir,npoints-j+1,k) + enddo + enddo + enddo + endif + + if(edge%reverse(west,ielem)) then + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = npoints*(ktmp + j - 1) + do i=1,npoints + ir = npoints-i+1 + edge%buf(iptr+iw+i)=v(j,ir,k) + enddo + enddo + enddo + endif + + + ! corners. this is difficult because we dont know the orientaton + ! of the corners, and this which (i,j) dimension maps to which dimension +! SWEST + do l=swest,swest+max_corner_elem-1 + if (edge%putmap(l,ielem) /= -1) then + isw = edge%putmap(l,ielem) + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + edge%buf(iptr+isw+i)=v(i ,j ,k) + enddo + end do + end do + end if + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if (edge%putmap(l,ielem) /= -1) then + ise = edge%putmap(l,ielem) + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + edge%buf(iptr+ise+i)=v(npoints-i+1 ,j ,k) + enddo + end do + end do + end if + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if (edge%putmap(l,ielem) /= -1) then + ine = edge%putmap(l,ielem) + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + edge%buf(iptr+ine+i)=v(npoints-i+1,npoints-j+1,k) + enddo + enddo + end do + end if + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if (edge%putmap(l,ielem) /= -1) then + inw = edge%putmap(l,ielem) + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + edge%buf(iptr+inw+i)=v(i ,npoints-j+1,k) + enddo + end do + end do + end if + end do + +end subroutine ghostpack + +subroutine ghostunpack(edge,v,vlyr,kptr,ielem) + use dimensions_mod, only : max_corner_elem + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + type (Edgebuffer_t), intent(in) :: edge + + integer, intent(in) :: vlyr + integer, intent(in) :: kptr + integer, intent(in) :: ielem + + real (kind=r8), intent(inout) :: v(edge%lb:edge%ub,edge%lb:edge%ub,vlyr) + + + ! Local + logical, parameter :: UseUnroll = .TRUE. + integer :: i,j,k,l,itr, ktmp + integer :: is,ie,in,iw,isw,ise,inw,ine + integer :: nhc,npoints,iptr + logical :: reverse + + threadsafe=.false. + + is=edge%getmap(south,ielem) + ie=edge%getmap(east,ielem) + in=edge%getmap(north,ielem) + iw=edge%getmap(west,ielem) + + nhc = edge%ndepth + npoints = edge%npoints + + ! example for north buffer + ! first row ('edge') goes in v(:,np+1,k) + ! 2nd row ('edge') goes in v(:,np+2,k) + ! etc... + !DIR$ IVDEP + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = npoints*(ktmp + j - 1) + do i=1,npoints + v(i ,1-j ,k) = edge%receive(iptr+is+i) ! South + v(npoints+j ,i ,k) = edge%receive(iptr+ie+i) ! East + v(i ,npoints+j ,k) = edge%receive(iptr+in+i) ! North + v(1-j ,i ,k) = edge%receive(iptr+iw+i) ! West + end do + end do + end do + + +! SWEST + do l=swest,swest+max_corner_elem-1 + isw = edge%getmap(l,ielem) + if(isw /= -1) then + ! note the following is the the correct meaning of reverse in this code. + ! It is best described as a transponse operation + if (edge%reverse(l,ielem)) then + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + v(1-j,1-i,k)=edge%receive(iptr+isw+i) + enddo + enddo + enddo + else + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do i=1,nhc + iptr = nhc*(ktmp + i - 1) + do j=1,nhc + v(1-j,1-i,k)=edge%receive(iptr+isw+j) + enddo + enddo + enddo + endif + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i,1-j,k)=edgeDefaultVal + enddo + enddo + enddo + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + ise = edge%getmap(l,ielem) + if(ise /= -1) then + if (edge%reverse(l,ielem)) then + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do i=1,nhc + iptr = nhc*(ktmp + i - 1) + do j=1,nhc + v(npoints+i,1-j,k)=edge%receive(iptr+ise+j) + enddo + enddo + enddo + else + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + v(npoints+i ,1-j ,k)=edge%receive(iptr+ise+i) + enddo + enddo + enddo + endif + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(npoints+i,1-j,k)=edgeDefaultVal + enddo + enddo + enddo + endif + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + ine = edge%getmap(l,ielem) + if(ine /= -1) then + if (edge%reverse(l,ielem)) then + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + do i=1,nhc + iptr = nhc*(ktmp + i - 1) + v(npoints+i ,npoints+j,k)=edge%receive(iptr+ine+j) + enddo + enddo + enddo + else + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + v(npoints+i ,npoints+j,k)=edge%receive(iptr+ine+i) + enddo + enddo + enddo + endif + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(npoints+i,npoints+j,k)=edgeDefaultVal + enddo + enddo + enddo + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + inw = edge%getmap(l,ielem) + if(inw /= -1) then + if (edge%reverse(l,ielem)) then + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do i=1,nhc + iptr = nhc*(ktmp + i - 1) + do j=1,nhc + v(1-i ,npoints+j,k)=edge%receive(iptr+inw+j) + enddo + enddo + enddo + else + do k=1,vlyr + ktmp = nhc*(kptr+k-1) + do j=1,nhc + iptr = nhc*(ktmp + j - 1) + do i=1,nhc + v(1-i ,npoints+j,k)=edge%receive(iptr+inw+i) + enddo + enddo + enddo + endif + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i,npoints+j,k)=edgeDefaultVal + enddo + enddo + enddo + endif + end do + +end subroutine ghostunpack + + ! ========================================= + ! initGhostBuffer3d: + ! Author: James Overfelt + ! create an Real based communication buffer + ! npoints is the number of points on one side + ! nhc is the deep of the ghost/halo zone + ! ========================================= + subroutine initGhostBuffer3d(ghost,nlyr,np,nhc_in) + + implicit none + integer,intent(in) :: nlyr, np + integer,intent(in),optional :: nhc_in + type (Ghostbuffer3d_t),intent(out) :: ghost + + ! Local variables + + integer :: nbuf,nhc,i + + ! sanity check for threading + if (omp_get_num_threads()>1) then + call endrun('ERROR: initGhostBuffer must be called before threaded region') + endif + + if (present(nhc_in)) then + nhc=nhc_in + else + nhc = np-1 + endif + + nbuf=max_neigh_edges*nelemd + + ghost%nlyr = nlyr + ghost%nhc = nhc + ghost%np = np + ghost%nbuf = nbuf + ghost%elem_size = np*(nhc+1) + allocate(ghost%buf (np,(nhc+1),nlyr,nbuf)) + allocate(ghost%receive(np,(nhc+1),nlyr,nbuf)) + ghost%buf=0 + ghost%receive=0 + + end subroutine initGhostBuffer3d + + ! ================================================================================= + ! GHOSTVPACK3D + ! AUTHOR: James Overfelt (from a subroutine of Christoph Erath, ghostvpack2D) + ! Pack edges of v into an ghost buffer for boundary exchange. + ! + ! This subroutine packs for many vertical layers into an ghost + ! buffer. + ! If the buffer associated with edge is not large enough to + ! hold all vertical layers you intent to pack, the method will + ! halt the program with a call to endrun(). + ! INPUT: + ! - ghost Buffer into which the data will be packed. + ! This buffer must be previously allocated with initGhostBuffer(). + ! - v The data to be packed. + ! - nhc deep of ghost/halo zone + ! - npoints number of points on on side + ! - kptr Vertical pointer to the place in the edge buffer where + ! data will be located. + ! ================================================================================= + subroutine ghostVpack3d(ghost, v, vlyr, kptr, desc) + use dimensions_mod, only : max_corner_elem + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + use edgetype_mod, only : edgedescriptor_t, ghostbuffer3d_t + implicit none + + type (Ghostbuffer3d_t) :: ghost + integer, intent(in) :: kptr,vlyr + real (kind=r8),intent(in) :: v(ghost%np, ghost%np, vlyr) + type (EdgeDescriptor_t),intent(in) :: desc + + integer :: nhc, np + + ! Local variables + integer :: i,j,k,ir,l,e + + integer :: is,ie,in,iw + + if(.not. threadsafe) then +!$OMP BARRIER + threadsafe=.true. + end if + ! Example convenction for buffer to the north: + ! buf(:,,:,i,e) + ! each "edge" is a row of data (i=1,np) in the element + ! north most row of data goes into e=1 + ! next row of data goes into e=2 + ! .... + ! south most row of data goes into e=np + ! We need to pack this way to preserve the orientation + ! so the data can be unpacked correctly + + ! note: we think of buf as dimensioned buf(k,is,i,e) + ! but this array is flatted to: buf(k,is+(i-1)+(e-1)*np) + ! + nhc = ghost%nhc + np = ghost%np + is = desc%putmapP_ghost(south) + ie = desc%putmapP_ghost(east) + in = desc%putmapP_ghost(north) + iw = desc%putmapP_ghost(west) + + do k=1,vlyr + do j=1,nhc + do i=1,np + ghost%buf(i,j,kptr+k,is) = v(i, j+1 , k) + ghost%buf(i,j,kptr+k,ie) = v(np-j, i , k) + ghost%buf(i,j,kptr+k,in) = v(i, np-j , k) + ghost%buf(i,j,kptr+k,iw) = v(j+1, i , k) + enddo + end do + end do + ! This is really kludgy way to setup the index reversals + ! But since it is so a rare event not real need to spend time optimizing + ! Check if the edge orientation of the recieving element is different + ! if it is, swap the order of data in the edge + if(desc%reverse(south)) then + do k=1,vlyr + do j=1,nhc + do i=1,np + ir = np-i+1 + ghost%buf(ir, j, kptr+k, is)=v(i, j+1, k) + enddo + enddo + enddo + endif + + if(desc%reverse(east)) then + do k=1,vlyr + do j=1,nhc + do i=1,np + ir = np-i+1 + ghost%buf(ir, j, kptr+k, ie)=v(np-j, i, k) + enddo + enddo + enddo + endif + + if(desc%reverse(north)) then + do k=1,vlyr + do j=1,nhc + do i=1,np + ir = np-i+1 + ghost%buf(ir, j, kptr+k, in)=v(i, np-j, k) + enddo + enddo + enddo + endif + + if(desc%reverse(west)) then + do k=1,vlyr + do j=1,nhc + do i=1,np + ir = np-i+1 + ghost%buf(ir, j, kptr+k, iw)=v(j+1, i, k) + enddo + enddo + enddo + endif + + ! corners. this is difficult because we dont know the orientaton + ! of the corners, and this which (i,j) dimension maps to which dimension +! SWEST + do l=swest, swest+max_corner_elem-1 + if (desc%putmapP_ghost(l) /= -1) then + do k=1,vlyr + do j=1,nhc+1 + do i=1,nhc+1 + ghost%buf(i, j, kptr+k, desc%putmapP_ghost(l))=v(i, j, k) + enddo + enddo + enddo + end if + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + if (desc%putmapP_ghost(l) /= -1) then + do k=1,vlyr + do j=1,nhc+1 + do i=1,nhc+1 + ghost%buf(i, j, kptr+k, desc%putmapP_ghost(l))=v(np-i+1, j, k) + enddo + enddo + enddo + end if + end do + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + if (desc%putmapP_ghost(l) /= -1) then + do k=1,vlyr + do j=1,nhc+1 + do i=1,nhc+1 + ghost%buf(i, j, kptr+k,desc%putmapP_ghost(l))=v(np-i+1, np-j+1, k) + enddo + enddo + enddo + end if + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + if (desc%putmapP_ghost(l) /= -1) then + do k=1,vlyr + do j=1,nhc+1 + do i=1,nhc+1 + ghost%buf(i, j, kptr+k,desc%putmapP_ghost(l))=v(i, np-j+1, k) + enddo + enddo + enddo + end if + end do + end subroutine ghostVpack3d + + ! ================================================================================= + ! GHOSTVUNPACK3D + ! AUTHOR: James Overfelt (from a subroutine of Christoph Erath, + ! ghostVunpack2d) + ! Unpack ghost points from ghost buffer into v... + ! It is for cartesian points (v is only two dimensional). + ! INPUT SAME arguments as for GHOSTVPACK + ! ================================================================================= + + subroutine ghostVunpack3d(g, v, vlyr, kptr, desc, sw, se, nw, ne, mult) + use dimensions_mod, only : max_corner_elem + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + use edgetype_mod, only : edgedescriptor_t, ghostbuffer3d_t + implicit none + type (Ghostbuffer3d_t), intent(in) :: g + + integer, intent(in) :: kptr,vlyr + real (kind=r8), intent(inout) :: v (1-g%nhc : g%np+g%nhc, 1-g%nhc : g%np+g%nhc, vlyr) + integer, intent(out) :: mult(5:8) + real (kind=r8), intent(out) :: sw(1-g%nhc : 1, 1-g%nhc : 1, vlyr, max_corner_elem-1) + real (kind=r8), intent(out) :: se( g%np : g%np+g%nhc, 1-g%nhc : 1, vlyr, max_corner_elem-1) + real (kind=r8), intent(out) :: ne( g%np : g%np+g%nhc, g%np : g%np+g%nhc, vlyr, max_corner_elem-1) + real (kind=r8), intent(out) :: nw(1-g%nhc : 1, g%np : g%np+g%nhc, vlyr, max_corner_elem-1) + type (EdgeDescriptor_t) :: desc + + integer :: nhc, np + + ! Local + logical, parameter :: UseUnroll = .TRUE. + integer :: i,j,k,l + integer :: is,ie,in,iw,ic + logical :: reverse + + threadsafe=.false. + + nhc = g%nhc + np = g%np + + is=desc%getmapP_ghost(south) + ie=desc%getmapP_ghost(east) + in=desc%getmapP_ghost(north) + iw=desc%getmapP_ghost(west) + +! fill in optional values with edgeDefaultVal + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i, 1-j, k)=edgeDefaultVal + v(np+i , 1-j, k)=edgeDefaultVal + v(np+i, np+j, k)=edgeDefaultVal + v(1-i , np+j, k)=edgeDefaultVal + enddo + enddo + enddo + + ! example for north buffer + ! first row ('edge') goes in v(:,np+1) + ! 2nd row ('edge') goes in v(:,np+2) + ! etc... + + do k=1,vlyr + do j=1,nhc + do i=1,np + v(i , 1-j , k) = g%buf(i,j,kptr+k,is ) + v(np+j , i , k) = g%buf(i,j,kptr+k,ie ) + v(i , np+j, k) = g%buf(i,j,kptr+k,in ) + v(1-j , i , k) = g%buf(i,j,kptr+k,iw ) + end do + end do + end do + + ! four sides are always just one + mult(swest) = 0 + mult(seast) = 0 + mult(neast) = 0 + mult(nwest) = 0 + + + +! SWEST + do l=swest, swest+max_corner_elem-1 + ic = desc%getmapP_ghost(l) + if(ic /= -1) then + reverse=desc%reverse(l) + if (mult(swest) .eq. 0) then + if (reverse) then + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i, 1-j, k)=g%buf(j+1, i+1, kptr+k, ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i,1-j,k)=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + else + if (reverse) then + do k=1,vlyr + do j=0,nhc + do i=0,nhc + sw(1-i,1-j,k,mult(swest))=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=0,nhc + do i=0,nhc + sw(1-i,1-j,k,mult(swest))=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + endif + mult(swest) = mult(swest) + 1 + endif + end do + +! SEAST + do l=swest+max_corner_elem,swest+2*max_corner_elem-1 + ic = desc%getmapP_ghost(l) + if(ic /= -1) then + reverse=desc%reverse(l) + if (mult(seast) .eq. 0) then + if (reverse) then + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(np+i,1-j,k)=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(np+i ,1-j,k)=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + else + if (reverse) then + do k=1,vlyr + do j=0,nhc + do i=0,nhc + se(np+i,1-j,k,mult(seast))=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=0,nhc + do i=0,nhc + se(np+i ,1-j,k,mult(seast))=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + endif + mult(seast) = mult(seast) + 1 + endif + end do + + +! NEAST + do l=swest+3*max_corner_elem,swest+4*max_corner_elem-1 + ic = desc%getmapP_ghost(l) + if(ic /= -1) then + reverse=desc%reverse(l) + if (mult(neast) .eq. 0) then + if (reverse) then + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(np+i ,np+j,k)=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(np+i ,np+j,k)=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + else + if (reverse) then + do k=1,vlyr + do j=0,nhc + do i=0,nhc + ne(np+i ,np+j,k,mult(neast))=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=0,nhc + do i=0,nhc + ne(np+i ,np+j,k,mult(neast))=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + endif + mult(neast) = mult(neast) + 1 + endif + end do + +! NWEST + do l=swest+2*max_corner_elem,swest+3*max_corner_elem-1 + ic = desc%getmapP_ghost(l) + if(ic /= -1) then + reverse=desc%reverse(l) + if (mult(nwest) .eq. 0) then + if (reverse) then + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i ,np+j,k)=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=1,nhc + do i=1,nhc + v(1-i ,np+j,k)=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + else + if (reverse) then + do k=1,vlyr + do j=0,nhc + do i=0,nhc + nw(1-i ,np+j,k,mult(nwest))=g%buf(j+1,i+1,kptr+k,ic) + enddo + enddo + enddo + else + do k=1,vlyr + do j=0,nhc + do i=0,nhc + nw(1-i ,np+j,k,mult(nwest))=g%buf(i+1,j+1,kptr+k,ic) + enddo + enddo + enddo + endif + endif + mult(nwest) = mult(nwest) + 1 + endif + end do + + end subroutine ghostVunpack3d + + subroutine FreeGhostBuffer3D(buffer) + use edgetype_mod, only : ghostbuffer3d_t + implicit none + type (Ghostbuffer3d_t),intent(inout) :: buffer + +!$OMP BARRIER +!$OMP MASTER + buffer%nbuf=0 + buffer%nlyr=0 + deallocate(buffer%buf) + deallocate(buffer%receive) +!$OMP END MASTER + + end subroutine FreeGhostBuffer3D + + +End module edge_mod diff --git a/src/dynamics/se/dycore/edgetype_mod.F90 b/src/dynamics/se/dycore/edgetype_mod.F90 new file mode 100644 index 00000000..4cfe1202 --- /dev/null +++ b/src/dynamics/se/dycore/edgetype_mod.F90 @@ -0,0 +1,94 @@ +module edgetype_mod + + use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 + use coordinate_systems_mod, only : cartesian3D_t + use gbarriertype_mod, only : gbarrier_t + + implicit none + private + save + + integer, public :: initedgebuffer_callid = 0 + + type, public :: rotation_t + integer :: nbr ! nbr direction: north south east west + integer :: reverse ! 0 = do not reverse order + ! 1 = reverse order + real (kind=r8), pointer :: R(:,:,:) => null() ! rotation matrix + end type rotation_t + + type, public :: EdgeDescriptor_t + integer :: use_rotation + integer :: padding + integer, pointer :: putmapP(:) => null() + integer, pointer :: getmapP(:) => null() + integer, pointer :: putmapP_ghost(:) => null() + integer, pointer :: getmapP_ghost(:) => null() + integer, pointer :: putmapS(:) => null() + integer, pointer :: getmapS(:) => null() + integer, pointer :: globalID(:) => null() + integer, pointer :: loc2buf(:) => null() + type(cartesian3D_t), pointer :: neigh_corners(:,:) => null() + integer :: actual_neigh_edges + logical, pointer :: reverse(:) => null() + type (rotation_t), pointer :: rot(:) => null() ! Identifies list of edges + ! that must be rotated, and how + end type EdgeDescriptor_t + + type, public :: EdgeBuffer_t + real (kind=r8), allocatable :: buf(:) + real (kind=r8), allocatable :: receive(:) + integer, pointer :: putmap(:,:) => null() + integer, pointer :: getmap(:,:) => null() + logical, pointer :: reverse(:,:) => null() + integer, pointer :: moveLength(:) => null() + integer, pointer :: movePtr(:) => null() + integer, pointer :: rcountsFull(:) => null() + integer, pointer :: scountsFull(:) => null() + integer, pointer :: sdisplsFull(:) => null() + integer, pointer :: rdisplsFull(:) => null() + integer, pointer :: rcountsInter(:) => null() + integer, pointer :: scountsInter(:) => null() + integer, pointer :: sdisplsInter(:) => null() + integer, pointer :: rdisplsInter(:) => null() + integer, pointer :: rcountsIntra(:) => null() + integer, pointer :: scountsIntra(:) => null() + integer, pointer :: sdisplsIntra(:) => null() + integer, pointer :: rdisplsIntra(:) => null() + integer, pointer :: getDisplsFull(:) => null() + integer, pointer :: putDisplsFull(:) => null() + integer, allocatable :: Rrequest(:),Srequest(:) + integer, allocatable :: status(:,:) + type (gbarrier_t) :: gbarrier + integer :: nlyr ! Number of layers + integer :: nbuf ! total size of message passing buffer, includes vertical levels + integer :: ndepth ! Depth of halo + integer :: npoints ! length of edge + integer :: lb,ub ! lower and upper bound of arrays + integer :: nInter, nIntra + integer :: id + integer :: bndry_type + integer :: tag + integer :: win + integer(kind=i8) :: winsize + end type EdgeBuffer_t + + type, public :: LongEdgeBuffer_t + integer :: nlyr + integer :: nbuf + integer, pointer :: buf(:,:) => null() + integer, pointer :: receive(:,:) => null() + end type LongEdgeBuffer_t + + type, public :: GhostBuffer3D_t + real (kind=r8), dimension(:,:,:,:), pointer :: buf => null() + real (kind=r8), dimension(:,:,:,:), pointer :: receive => null() + integer :: nlyr ! Number of layers + integer :: nhc ! Number of layers of ghost cells + integer :: np ! Number of points in a cell + integer :: nbuf ! size of the horizontal dimension of the buffers. + integer :: elem_size ! size of 2D array (first two dimensions of buf()) + end type GhostBuffer3D_t + + +end module edgetype_mod diff --git a/src/dynamics/se/dycore/element_mod.F90 b/src/dynamics/se/dycore/element_mod.F90 new file mode 100644 index 00000000..422799b8 --- /dev/null +++ b/src/dynamics/se/dycore/element_mod.F90 @@ -0,0 +1,377 @@ +module element_mod + + use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 + use coordinate_systems_mod, only: spherical_polar_t, cartesian2D_t, cartesian3D_t, distance + use dimensions_mod, only: np, nc, npsq, nlev, nlevp, qsize_d, max_neigh_edges,ntrac_d + use edgetype_mod, only: edgedescriptor_t + use gridgraph_mod, only: gridvertex_t + + implicit none + private + integer, public, parameter :: timelevels = 3 + + +! =========== PRIMITIVE-EQUATION DATA-STRUCTURES ===================== + + type, public :: elem_state_t + + ! prognostic variables for preqx solver + + ! prognostics must match those in prim_restart_mod.F90 + ! vertically-lagrangian code advects dp3d instead of ps + ! tracers Q, Qdp always use 2 level time scheme + + real (kind=r8) :: v (np,np,2,nlev,timelevels) ! velocity + real (kind=r8) :: T (np,np,nlev,timelevels) ! temperature + real (kind=r8) :: dp3d (np,np,nlev,timelevels) ! dry delta p on levels + real (kind=r8) :: psdry (np,np) ! dry surface pressure + real (kind=r8) :: phis (np,np) ! surface geopotential (prescribed) + real (kind=r8) :: Qdp (np,np,nlev,qsize_d,2) ! Tracer mass + + end type elem_state_t + + !___________________________________________________________________ + type, public :: derived_state_t + ! + ! storage for subcycling tracers/dynamics + ! + real (kind=r8) :: vn0 (np,np,2,nlev) ! velocity for SE tracer advection + real (kind=r8) :: dpdiss_biharmonic(np,np,nlev) ! mean dp dissipation tendency, if nu_p>0 + real (kind=r8) :: dpdiss_ave(np,np,nlev) ! mean dp used to compute psdiss_tens + + ! diagnostics for explicit timestep + real (kind=r8) :: phi(np,np,nlev) ! geopotential + real (kind=r8) :: omega(np,np,nlev) ! vertical velocity + + ! semi-implicit diagnostics: computed in explict-component, reused in Helmholtz-component. + real (kind=r8) :: zeta(np,np,nlev) ! relative vorticity + real (kind=r8) :: div(np,np,nlev,timelevels) ! divergence + + ! tracer advection fields used for consistency and limiters + real (kind=r8) :: dp(np,np,nlev) ! for dp_tracers at physics timestep + real (kind=r8) :: divdp(np,np,nlev) ! divergence of dp + real (kind=r8) :: divdp_proj(np,np,nlev) ! DSSed divdp + real (kind=r8) :: mass(MAX(qsize_d,ntrac_d)+9) ! total tracer mass for diagnostics + + ! forcing terms for CAM + real (kind=r8) :: FQ(np,np,nlev,qsize_d) ! tracer forcing + real (kind=r8) :: FM(np,np,2,nlev) ! momentum forcing + real (kind=r8) :: FDP(np,np,nlev) ! save full updated dp right after physics + real (kind=r8) :: FT(np,np,nlev) ! temperature forcing + real (kind=r8) :: etadot_prescribed(np,np,nlevp) ! prescribed vertical tendency + real (kind=r8) :: u_met(np,np,nlev) ! zonal component of prescribed meteorology winds + real (kind=r8) :: dudt_met(np,np,nlev) ! rate of change of zonal component of prescribed meteorology winds + real (kind=r8) :: v_met(np,np,nlev) ! meridional component of prescribed meteorology winds + real (kind=r8) :: dvdt_met(np,np,nlev) ! rate of change of meridional component of prescribed meteorology winds + real (kind=r8) :: T_met(np,np,nlev) ! prescribed meteorology temperature + real (kind=r8) :: dTdt_met(np,np,nlev) ! rate of change of prescribed meteorology temperature + real (kind=r8) :: ps_met(np,np) ! surface pressure of prescribed meteorology + real (kind=r8) :: dpsdt_met(np,np) ! rate of change of surface pressure of prescribed meteorology + real (kind=r8) :: nudge_factor(np,np,nlev) ! nudging factor (prescribed) + real (kind=r8) :: Utnd(npsq,nlev) ! accumulated U tendency due to nudging towards prescribed met + real (kind=r8) :: Vtnd(npsq,nlev) ! accumulated V tendency due to nudging towards prescribed met + real (kind=r8) :: Ttnd(npsq,nlev) ! accumulated T tendency due to nudging towards prescribed met + + real (kind=r8) :: pecnd(np,np,nlev) ! pressure perturbation from condensate + + end type derived_state_t + + !___________________________________________________________________ + type, public :: elem_accum_t + + + ! the "4" timelevels represents data computed at: + ! 1 t-.5 + ! 2 t+.5 after dynamics + ! 3 t+.5 after forcing + ! 4 t+.5 after Robert + ! after calling TimeLevelUpdate, all times above decrease by 1.0 + + + end type elem_accum_t + + +! ============= DATA-STRUCTURES COMMON TO ALL SOLVERS ================ + + type, public :: index_t + integer :: ia(npsq),ja(npsq) + integer :: is,ie + integer :: NumUniquePts + integer :: UniquePtOffset + end type index_t + + !___________________________________________________________________ + type, public :: element_t + integer :: LocalId + integer :: GlobalId + + ! Coordinate values of element points + type (spherical_polar_t) :: spherep(np,np) ! Spherical coords of GLL points + + ! Equ-angular gnomonic projection coordinates + type (cartesian2D_t) :: cartp(np,np) ! gnomonic coords of GLL points + type (cartesian2D_t) :: corners(4) ! gnomonic coords of element corners + real (kind=r8) :: u2qmap(4,2) ! bilinear map from ref element to quad in cubedsphere coordinates + ! SHOULD BE REMOVED + ! 3D cartesian coordinates + type (cartesian3D_t) :: corners3D(4) + + ! Element diagnostics + real (kind=r8) :: area ! Area of element + real (kind=r8) :: normDinv ! some type of norm of Dinv used for CFL + real (kind=r8) :: dx_short ! short length scale in km + real (kind=r8) :: dx_long ! long length scale in km + + real (kind=r8) :: variable_hyperviscosity(np,np) ! hyperviscosity based on above + real (kind=r8) :: hv_courant ! hyperviscosity courant number + real (kind=r8) :: tensorVisc(np,np,2,2) !og, matrix V for tensor viscosity + + ! Edge connectivity information +! integer :: node_numbers(4) +! integer :: node_multiplicity(4) ! number of elements sharing corner node + + type (GridVertex_t) :: vertex ! element grid vertex information + type (EdgeDescriptor_t) :: desc + + type (elem_state_t) :: state + + type (derived_state_t) :: derived + ! Metric terms + real (kind=r8) :: met(np,np,2,2) ! metric tensor on velocity and pressure grid + real (kind=r8) :: metinv(np,np,2,2) ! metric tensor on velocity and pressure grid + real (kind=r8) :: metdet(np,np) ! g = SQRT(det(g_ij)) on velocity and pressure grid + real (kind=r8) :: rmetdet(np,np) ! 1/metdet on velocity pressure grid + real (kind=r8) :: D(np,np,2,2) ! Map covariant field on cube to vector field on the sphere + real (kind=r8) :: Dinv(np,np,2,2) ! Map vector field on the sphere to covariant v on cube + + + ! Mass flux across the sides of each sub-element. + ! The storage is redundent since the mass across shared sides + ! must be equal in magnitude and opposite in sign. + ! The layout is like: + ! -------------------------------------------------------------- + ! ^| (1,4,3) | | | (4,4,3) | + ! || | | | | + ! ||(1,4,4) | | |(4,4,4) | + ! || (1,4,2)| | | (4,4,2)| + ! || | | | | + ! || (1,4,1) | | | (4,4,1) | + ! |--------------------------------------------------------------- + ! S| | | | | + ! e| | | | | + ! c| | | | | + ! o| | | | | + ! n| | | | | + ! d| | | | | + ! --------------------------------------------------------------- + ! C| | | | | + ! o| | | | | + ! o| | | | | + ! r| | | | | + ! d| | | | | + ! i| | | | | + ! n--------------------------------------------------------------- + ! a| (1,1,3) | | | (4,1,3) | + ! t| | | |(4,1,4) | + ! e|(1,1,4) | | | | + ! | (1,1,2)| | | (4,1,2)| + ! | | | | | + ! | (1,1,1) | | | (4,1,1) | + ! --------------------------------------------------------------- + ! First Coordinate -------> + real (kind=r8) :: sub_elem_mass_flux(nc,nc,4,nlev) + + ! Convert vector fields from spherical to rectangular components + ! The transpose of this operation is its pseudoinverse. + real (kind=r8) :: vec_sphere2cart(np,np,3,2) + + ! Mass matrix terms for an element on a cube face + real (kind=r8) :: mp(np,np) ! mass matrix on v and p grid + real (kind=r8) :: rmp(np,np) ! inverse mass matrix on v and p grid + + ! Mass matrix terms for an element on the sphere + ! This mass matrix is used when solving the equations in weak form + ! with the natural (surface area of the sphere) inner product + real (kind=r8) :: spheremp(np,np) ! mass matrix on v and p grid + real (kind=r8) :: rspheremp(np,np) ! inverse mass matrix on v and p grid + + integer(i8) :: gdofP(np,np) ! global degree of freedom (P-grid) + + real (kind=r8) :: fcor(np,np) ! Coreolis term + + type (index_t) :: idxP + type (index_t),pointer :: idxV + integer :: FaceNum + + ! force element_t to be a multiple of 8 bytes. + ! on BGP, code will crash (signal 7, or signal 15) if 8 byte alignment is off + ! check core file for: + ! core.63:Generated by interrupt..(Alignment Exception DEAR=0xa1ef671c ESR=0x01800000 CCR0=0x4800a002) + integer :: dummy + end type element_t + + !___________________________________________________________________ + public :: element_coordinates + public :: element_var_coordinates + public :: element_var_coordinates3D + public :: GetColumnIdP,GetColumnIdV + public :: allocate_element_desc + public :: PrintElem + +contains + + subroutine PrintElem(arr) + + real(kind=r8) :: arr(:,:) + integer :: i,j + + do j=np,1,-1 + write(6,*) (arr(i,j), i=1,np) + enddo + + end subroutine PrintElem +! ===================== ELEMENT_MOD METHODS ========================== + + function GetColumnIdP(elem,i,j) result(col_id) + + ! Get unique identifier for a Physics column on the P-grid + + type(element_t), intent(in) :: elem + integer, intent(in) :: i,j + integer :: col_id + col_id = elem%gdofP(i,j) + end function GetColumnIdP + + !___________________________________________________________________ + function GetColumnIdV(elem,i,j) result(col_id) + + ! Get unique identifier for a Physics column on the V-grid + + type(element_t), intent(in) :: elem + integer, intent(in) :: i,j + integer :: col_id + col_id = elem%gdofP(i,j) + end function GetColumnIdV + + !___________________________________________________________________ + function element_coordinates(start,end,points) result(cart) + + ! Initialize 2D rectilinear element colocation points + + type (cartesian2D_t), intent(in) :: start + type (cartesian2D_t), intent(in) :: end + real(r8), intent(in) :: points(:) + type (cartesian2D_t) :: cart(SIZE(points),SIZE(points)) + + type (cartesian2D_t) :: length, centroid + real(r8) :: y + integer :: i,j + + length%x = 0.50D0*(end%x-start%x) + length%y = 0.50D0*(end%y-start%y) + centroid%x = 0.50D0*(end%x+start%x) + centroid%y = 0.50D0*(end%y+start%y) + do j=1,SIZE(points) + y = centroid%y + length%y*points(j) + do i=1,SIZE(points) + cart(i,j)%x = centroid%x + length%x*points(i) + cart(i,j)%y = y + end do + end do + end function element_coordinates + + !___________________________________________________________________ + function element_var_coordinates(c,points) result(cart) + + type (cartesian2D_t), intent(in) :: c(4) + real(r8), intent(in) :: points(:) + type (cartesian2D_t) :: cart(SIZE(points),SIZE(points)) + + real(r8) :: p(size(points)) + real(r8) :: q(size(points)) + integer :: i,j + + p(:) = (1.0D0-points(:))/2.0D0 + q(:) = (1.0D0+points(:))/2.0D0 + + do j=1,SIZE(points) + do i=1,SIZE(points) + cart(i,j)%x = p(i)*p(j)*c(1)%x & + + q(i)*p(j)*c(2)%x & + + q(i)*q(j)*c(3)%x & + + p(i)*q(j)*c(4)%x + cart(i,j)%y = p(i)*p(j)*c(1)%y & + + q(i)*p(j)*c(2)%y & + + q(i)*q(j)*c(3)%y & + + p(i)*q(j)*c(4)%y + end do + end do + end function element_var_coordinates + + !___________________________________________________________________ + function element_var_coordinates3d(c,points) result(cart) + + type(cartesian3D_t), intent(in) :: c(4) + real(r8), intent(in) :: points(:) + + type(cartesian3D_t) :: cart(SIZE(points),SIZE(points)) + + real(r8) :: p(size(points)) + real(r8) :: q(size(points)), r + integer :: i,j + + p(:) = (1.0D0-points(:))/2.0D0 + q(:) = (1.0D0+points(:))/2.0D0 + + do j=1,SIZE(points) + do i=1,SIZE(points) + cart(i,j)%x = p(i)*p(j)*c(1)%x & + + q(i)*p(j)*c(2)%x & + + q(i)*q(j)*c(3)%x & + + p(i)*q(j)*c(4)%x + cart(i,j)%y = p(i)*p(j)*c(1)%y & + + q(i)*p(j)*c(2)%y & + + q(i)*q(j)*c(3)%y & + + p(i)*q(j)*c(4)%y + cart(i,j)%z = p(i)*p(j)*c(1)%z & + + q(i)*p(j)*c(2)%z & + + q(i)*q(j)*c(3)%z & + + p(i)*q(j)*c(4)%z + + ! project back to sphere: + r = distance(cart(i,j)) + cart(i,j)%x = cart(i,j)%x/r + cart(i,j)%y = cart(i,j)%y/r + cart(i,j)%z = cart(i,j)%z/r + end do + end do + end function element_var_coordinates3d + + !___________________________________________________________________ + subroutine allocate_element_desc(elem) + + type (element_t), intent(inout) :: elem(:) + integer :: num, j,i + + num = SIZE(elem) + + do j=1,num + allocate(elem(j)%desc%putmapP(max_neigh_edges)) + allocate(elem(j)%desc%getmapP(max_neigh_edges)) + allocate(elem(j)%desc%putmapP_ghost(max_neigh_edges)) + allocate(elem(j)%desc%getmapP_ghost(max_neigh_edges)) + allocate(elem(j)%desc%putmapS(max_neigh_edges)) + allocate(elem(j)%desc%getmapS(max_neigh_edges)) + allocate(elem(j)%desc%reverse(max_neigh_edges)) + allocate(elem(j)%desc%globalID(max_neigh_edges)) + allocate(elem(j)%desc%loc2buf(max_neigh_edges)) + do i=1,max_neigh_edges + elem(j)%desc%loc2buf(i)=i + elem(j)%desc%globalID(i)=-1 + enddo + + end do + end subroutine allocate_element_desc + + +end module element_mod diff --git a/src/dynamics/se/dycore/fv_mapz.F90 b/src/dynamics/se/dycore/fv_mapz.F90 new file mode 100644 index 00000000..aa6ec968 --- /dev/null +++ b/src/dynamics/se/dycore/fv_mapz.F90 @@ -0,0 +1,1658 @@ + +!************************************************************************************** +! +! fv_mapz contains vertical remapping algorithms that come from the FV3 dycore. +! They have been minimally modified for use in CAM. +! +! The following license statement is from the original code. +! +!************************************************************************************** + +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** +module fv_mapz + + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_abortutils, only: endrun + + implicit none + + public map_scalar, map1_ppm, mapn_tracer + + real(kind=r8), parameter:: r3 = 1._r8/3._r8, r23 = 2._r8/3._r8, r12 = 1._r8/12._r8 +contains + + subroutine map_scalar( km, pe1, q1, qs, & + kn, pe2, q2, i1, i2, & + j, ibeg, iend, jbeg, jend, iv, kord, q_min) + ! iv=1 + integer, intent(in) :: i1 !< Starting longitude + integer, intent(in) :: i2 !< Finishing longitude + integer, intent(in) :: iv !< Mode: 0 == constituents 1 == temp 2 == remap temp with cs scheme + integer, intent(in) :: kord !< Method order + integer, intent(in) :: j !< Current latitude + integer, intent(in) :: ibeg, iend, jbeg, jend + integer, intent(in) :: km !< Original vertical dimension + integer, intent(in) :: kn !< Target vertical dimension + real(kind=r8), intent(in) :: qs(i1:i2) !< bottom BC + real(kind=r8), intent(in) :: pe1(i1:i2,km+1) !< pressure at layer edges from model top to bottom surface in the original vertical coordinate + real(kind=r8), intent(in) :: pe2(i1:i2,kn+1) !< pressure at layer edges from model top to bottom surface in the new vertical coordinate + real(kind=r8), intent(in) :: q1(ibeg:iend,jbeg:jend,km) !< Field input + ! INPUT/OUTPUT PARAMETERS: + real(kind=r8), intent(inout):: q2(ibeg:iend,jbeg:jend,kn) !< Field output + real(kind=r8), intent(in):: q_min + + ! DESCRIPTION: + ! IV = 0: constituents + ! pe1: pressure at layer edges (from model top to bottom surface) + ! in the original vertical coordinate + ! pe2: pressure at layer edges (from model top to bottom surface) + ! in the new vertical coordinate + ! LOCAL VARIABLES: + real(kind=r8) dp1(i1:i2,km) + real(kind=r8) q4(4,i1:i2,km) + real(kind=r8) pl, pr, qsum, dp, esl + integer i, k, l, m, k0 + + do k=1,km + do i=i1,i2 + dp1(i,k) = pe1(i,k+1) - pe1(i,k) + q4(1,i,k) = q1(i,j,k) + enddo + enddo + + ! Compute vertical subgrid distribution + if ( kord >7 ) then + call scalar_profile( qs, q4, dp1, km, i1, i2, iv, kord, q_min ) + else + call ppm_profile( q4, dp1, km, i1, i2, iv, kord ) + endif + + do i=i1,i2 + k0 = 1 + do 555 k=1,kn + do l=k0,km + ! locate the top edge: pe2(i,k) + if( pe2(i,k) >= pe1(i,l) .and. pe2(i,k) <= pe1(i,l+1) ) then + pl = (pe2(i,k)-pe1(i,l)) / dp1(i,l) + if( pe2(i,k+1) <= pe1(i,l+1) ) then + ! entire new grid is within the original grid + pr = (pe2(i,k+1)-pe1(i,l)) / dp1(i,l) + q2(i,j,k) = q4(2,i,l) + 0.5_r8*(q4(4,i,l)+q4(3,i,l)-q4(2,i,l)) & + *(pr+pl)-q4(4,i,l)*r3*(pr*(pr+pl)+pl**2) + k0 = l + goto 555 + else + ! Fractional area... + qsum = (pe1(i,l+1)-pe2(i,k))*(q4(2,i,l)+0.5_r8*(q4(4,i,l)+ & + q4(3,i,l)-q4(2,i,l))*(1._r8+pl)-q4(4,i,l)* & + (r3*(1._r8+pl*(1._r8+pl)))) + do m=l+1,km + ! locate the bottom edge: pe2(i,k+1) + if( pe2(i,k+1) > pe1(i,m+1) ) then + ! Whole layer + qsum = qsum + dp1(i,m)*q4(1,i,m) + else + dp = pe2(i,k+1)-pe1(i,m) + esl = dp / dp1(i,m) + qsum = qsum + dp*(q4(2,i,m)+0.5_r8*esl* & + (q4(3,i,m)-q4(2,i,m)+q4(4,i,m)*(1._r8-r23*esl))) + k0 = m + goto 123 + endif + enddo + goto 123 + endif + endif + enddo +123 q2(i,j,k) = qsum / ( pe2(i,k+1) - pe2(i,k) ) +555 continue + enddo + end subroutine map_scalar + + + subroutine mapn_tracer(nq, km, pe1, pe2, q1, dp2, kord, j, & + i1, i2, isd, ied, jsd, jed, q_min, fill) + ! INPUT PARAMETERS: + integer, intent(in):: km !< vertical dimension + integer, intent(in):: j, nq, i1, i2 + integer, intent(in):: isd, ied, jsd, jed + integer, intent(in):: kord(nq) + real(kind=r8), intent(in):: pe1(i1:i2,km+1) !< pressure at layer edges from model top to bottom surface in the original vertical coordinate + real(kind=r8), intent(in):: pe2(i1:i2,km+1) !< pressure at layer edges from model top to bottom surface in the new vertical coordinate + real(kind=r8), intent(in):: dp2(i1:i2,km) + real(kind=r8), intent(in):: q_min + logical, intent(in):: fill + real(kind=r8), intent(inout):: q1(isd:ied,jsd:jed,km,nq) ! Field input + ! LOCAL VARIABLES: + real(kind=r8):: q4(4,i1:i2,km,nq) + real(kind=r8):: q2(i1:i2,km,nq) !< Field output + real(kind=r8):: qsum(nq) + real(kind=r8):: dp1(i1:i2,km) + real(kind=r8):: qs(i1:i2) + real(kind=r8):: pl, pr, dp, esl, fac1, fac2 + integer:: i, k, l, m, k0, iq + + do k=1,km + do i=i1,i2 + dp1(i,k) = pe1(i,k+1) - pe1(i,k) + enddo + enddo + + do iq=1,nq + do k=1,km + do i=i1,i2 + q4(1,i,k,iq) = q1(i,j,k,iq) + enddo + enddo + call scalar_profile( qs, q4(1,i1,1,iq), dp1, km, i1, i2, 0, kord(iq), q_min ) + enddo + ! Mapping + do 1000 i=i1,i2 + k0 = 1 + do 555 k=1,km + do 100 l=k0,km + ! locate the top edge: pe2(i,k) + if(pe2(i,k) >= pe1(i,l) .and. pe2(i,k) <= pe1(i,l+1)) then + pl = (pe2(i,k)-pe1(i,l)) / dp1(i,l) + if(pe2(i,k+1) <= pe1(i,l+1)) then + ! entire new grid is within the original grid + pr = (pe2(i,k+1)-pe1(i,l)) / dp1(i,l) + fac1 = pr + pl + fac2 = r3*(pr*fac1 + pl*pl) + fac1 = 0.5_r8*fac1 + do iq=1,nq + q2(i,k,iq) = q4(2,i,l,iq) + (q4(4,i,l,iq)+q4(3,i,l,iq)-q4(2,i,l,iq))*fac1 & + - q4(4,i,l,iq)*fac2 + enddo + k0 = l + goto 555 + else + ! Fractional area... + dp = pe1(i,l+1) - pe2(i,k) + fac1 = 1.0_r8 + pl + fac2 = r3*(1.0_r8+pl*fac1) + fac1 = 0.5_r8*fac1 + do iq=1,nq + qsum(iq) = dp*(q4(2,i,l,iq) + (q4(4,i,l,iq)+ & + q4(3,i,l,iq) - q4(2,i,l,iq))*fac1 - q4(4,i,l,iq)*fac2) + enddo + do m=l+1,km + ! locate the bottom edge: pe2(i,k+1) + if(pe2(i,k+1) > pe1(i,m+1) ) then + ! Whole layer.. + do iq=1,nq + qsum(iq) = qsum(iq) + dp1(i,m)*q4(1,i,m,iq) + enddo + else + dp = pe2(i,k+1)-pe1(i,m) + esl = dp / dp1(i,m) + fac1 = 0.5_r8*esl + fac2 = 1.0_r8-r23*esl + do iq=1,nq + qsum(iq) = qsum(iq) + dp*( q4(2,i,m,iq) + fac1*( & + q4(3,i,m,iq)-q4(2,i,m,iq)+q4(4,i,m,iq)*fac2 ) ) + enddo + k0 = m + goto 123 + endif + enddo + goto 123 + endif + endif +100 continue +123 continue + do iq=1,nq + q2(i,k,iq) = qsum(iq) / dp2(i,k) + enddo +555 continue +1000 continue + + if (fill) call fillz(i2-i1+1, km, nq, q2, dp2) + + do iq=1,nq + ! if (fill) call fillz(i2-i1+1, km, 1, q2(i1,1,iq), dp2) + do k=1,km + do i=i1,i2 + q1(i,j,k,iq) = q2(i,k,iq) + enddo + enddo + enddo + + end subroutine mapn_tracer + + + subroutine map1_ppm( km, pe1, q1, qs, & + kn, pe2, q2, i1, i2, & + j, ibeg, iend, jbeg, jend, iv, kord) + integer, intent(in) :: i1 !< Starting longitude + integer, intent(in) :: i2 !< Finishing longitude + integer, intent(in) :: iv !< Mode: 0 == constituents 1 == ??? 2 == remap temp with cs scheme + integer, intent(in) :: kord !< Method order + integer, intent(in) :: j !< Current latitude + integer, intent(in) :: ibeg, iend, jbeg, jend + integer, intent(in) :: km !< Original vertical dimension + integer, intent(in) :: kn !< Target vertical dimension + real(kind=r8), intent(in) :: qs(i1:i2) !< bottom BC + real(kind=r8), intent(in) :: pe1(i1:i2,km+1) !< pressure at layer edges from model top to bottom surface in the original vertical coordinate + real(kind=r8), intent(in) :: pe2(i1:i2,kn+1) !< pressure at layer edges from model top to bottom surface in the new vertical coordinate + real(kind=r8), intent(in) :: q1(ibeg:iend,jbeg:jend,km) !< Field input + ! INPUT/OUTPUT PARAMETERS: + real(kind=r8), intent(inout):: q2(ibeg:iend,jbeg:jend,kn) !< Field output + + ! DESCRIPTION: + ! IV = 0: constituents + ! pe1: pressure at layer edges (from model top to bottom surface) + ! in the original vertical coordinate + ! pe2: pressure at layer edges (from model top to bottom surface) + ! in the new vertical coordinate + + ! LOCAL VARIABLES: + real(kind=r8) dp1(i1:i2,km) + real(kind=r8) q4(4,i1:i2,km) + real(kind=r8) pl, pr, qsum, dp, esl + integer i, k, l, m, k0 + + do k=1,km + do i=i1,i2 + dp1(i,k) = pe1(i,k+1) - pe1(i,k) + q4(1,i,k) = q1(i,j,k) + enddo + enddo + + ! Compute vertical subgrid distribution + if ( kord >7 ) then + call cs_profile( qs, q4, dp1, km, i1, i2, iv, kord ) + else + call ppm_profile( q4, dp1, km, i1, i2, iv, kord ) + endif + + do i=i1,i2 + k0 = 1 + do 555 k=1,kn + do l=k0,km + ! locate the top edge: pe2(i,k) + if( pe2(i,k) >= pe1(i,l) .and. pe2(i,k) <= pe1(i,l+1) ) then + pl = (pe2(i,k)-pe1(i,l)) / dp1(i,l) + if( pe2(i,k+1) <= pe1(i,l+1) ) then + ! entire new grid is within the original grid + pr = (pe2(i,k+1)-pe1(i,l)) / dp1(i,l) + q2(i,j,k) = q4(2,i,l) + 0.5_r8*(q4(4,i,l)+q4(3,i,l)-q4(2,i,l)) & + *(pr+pl)-q4(4,i,l)*r3*(pr*(pr+pl)+pl**2) + k0 = l + goto 555 + else + ! Fractional area... + qsum = (pe1(i,l+1)-pe2(i,k))*(q4(2,i,l)+0.5_r8*(q4(4,i,l)+ & + q4(3,i,l)-q4(2,i,l))*(1.0_r8+pl)-q4(4,i,l)* & + (r3*(1.0_r8+pl*(1.0_r8+pl)))) + do m=l+1,km + ! locate the bottom edge: pe2(i,k+1) + if( pe2(i,k+1) > pe1(i,m+1) ) then + ! Whole layer + qsum = qsum + dp1(i,m)*q4(1,i,m) + else + dp = pe2(i,k+1)-pe1(i,m) + esl = dp / dp1(i,m) + qsum = qsum + dp*(q4(2,i,m)+0.5_r8*esl* & + (q4(3,i,m)-q4(2,i,m)+q4(4,i,m)*(1.0_r8-r23*esl))) + k0 = m + goto 123 + endif + enddo + goto 123 + endif + endif + enddo +123 q2(i,j,k) = qsum / ( pe2(i,k+1) - pe2(i,k) ) +555 continue + enddo + + end subroutine map1_ppm + + subroutine ppm_profile(a4, delp, km, i1, i2, iv, kord) + + ! INPUT PARAMETERS: + integer, intent(in):: iv !< iv =-1: winds iv = 0: positive definite scalars iv = 1: others iv = 2: temp (if remap_t) and w (iv=-2) + integer, intent(in):: i1 !< Starting longitude + integer, intent(in):: i2 !< Finishing longitude + integer, intent(in):: km !< Vertical dimension + integer, intent(in):: kord !< Order (or more accurately method no.): + ! + real(kind=r8) , intent(in):: delp(i1:i2,km) !< Layer pressure thickness + + ! !INPUT/OUTPUT PARAMETERS: + real(kind=r8) , intent(inout):: a4(4,i1:i2,km) !< Interpolated values + + ! DESCRIPTION: + ! + ! Perform the piecewise parabolic reconstruction + ! + ! !REVISION HISTORY: + ! S.-J. Lin revised at GFDL 2007 + !----------------------------------------------------------------------- + ! local arrays: + real(kind=r8) dc(i1:i2,km) + real(kind=r8) h2(i1:i2,km) + real(kind=r8) delq(i1:i2,km) + real(kind=r8) df2(i1:i2,km) + real(kind=r8) d4(i1:i2,km) + + ! local scalars: + integer i, k, km1, lmt, it + real(kind=r8) fac + real(kind=r8) a1, a2, c1, c2, c3, d1, d2 + real(kind=r8) qm, dq, lac, qmp, pmp + + km1 = km - 1 + it = i2 - i1 + 1 + + do k=2,km + do i=i1,i2 + delq(i,k-1) = a4(1,i,k) - a4(1,i,k-1) + d4(i,k ) = delp(i,k-1) + delp(i,k) + enddo + enddo + + do k=2,km1 + do i=i1,i2 + c1 = (delp(i,k-1)+0.5_r8*delp(i,k))/d4(i,k+1) + c2 = (delp(i,k+1)+0.5_r8*delp(i,k))/d4(i,k) + df2(i,k) = delp(i,k)*(c1*delq(i,k) + c2*delq(i,k-1)) / & + (d4(i,k)+delp(i,k+1)) + dc(i,k) = sign( min(abs(df2(i,k)), & + max(a4(1,i,k-1),a4(1,i,k),a4(1,i,k+1))-a4(1,i,k), & + a4(1,i,k)-min(a4(1,i,k-1),a4(1,i,k),a4(1,i,k+1))), df2(i,k) ) + enddo + enddo + + !----------------------------------------------------------- + ! 4th order interpolation of the provisional cell edge value + !----------------------------------------------------------- + + do k=3,km1 + do i=i1,i2 + c1 = delq(i,k-1)*delp(i,k-1) / d4(i,k) + a1 = d4(i,k-1) / (d4(i,k) + delp(i,k-1)) + a2 = d4(i,k+1) / (d4(i,k) + delp(i,k)) + a4(2,i,k) = a4(1,i,k-1) + c1 + 2.0_r8/(d4(i,k-1)+d4(i,k+1)) * & + ( delp(i,k)*(c1*(a1 - a2)+a2*dc(i,k-1)) - & + delp(i,k-1)*a1*dc(i,k ) ) + enddo + enddo + + ! if(km>8 .and. kord>4) call steepz(i1, i2, km, a4, df2, dc, delq, delp, d4) + + ! Area preserving cubic with 2nd deriv. = 0 at the boundaries + ! Top + do i=i1,i2 + d1 = delp(i,1) + d2 = delp(i,2) + qm = (d2*a4(1,i,1)+d1*a4(1,i,2)) / (d1+d2) + dq = 2.0_r8*(a4(1,i,2)-a4(1,i,1)) / (d1+d2) + c1 = 4.0_r8*(a4(2,i,3)-qm-d2*dq) / ( d2*(2.0_r8*d2*d2+d1*(d2+3.0_r8*d1)) ) + c3 = dq - 0.5_r8*c1*(d2*(5.0_r8*d1+d2)-3.0_r8*d1*d1) + a4(2,i,2) = qm - 0.25_r8*c1*d1*d2*(d2+3.0_r8*d1) + ! Top edge: + !------------------------------------------------------- + a4(2,i,1) = d1*(2.0_r8*c1*d1**2-c3) + a4(2,i,2) + !------------------------------------------------------- + ! a4(2,i,1) = (12./7.)*a4(1,i,1)-(13./14.)*a4(1,i,2)+(3./14.)*a4(1,i,3) + !------------------------------------------------------- + ! No over- and undershoot condition + a4(2,i,2) = max( a4(2,i,2), min(a4(1,i,1), a4(1,i,2)) ) + a4(2,i,2) = min( a4(2,i,2), max(a4(1,i,1), a4(1,i,2)) ) + dc(i,1) = 0.5_r8*(a4(2,i,2) - a4(1,i,1)) + enddo + + ! Enforce monotonicity within the top layer + + if( iv==0 ) then + do i=i1,i2 + a4(2,i,1) = max(0.0_r8, a4(2,i,1)) + a4(2,i,2) = max(0.0_r8, a4(2,i,2)) + enddo + elseif( iv==-1 ) then + do i=i1,i2 + if ( a4(2,i,1)*a4(1,i,1) <= 0.0_r8 ) a4(2,i,1) = 0.0_r8 + enddo + elseif( abs(iv)==2 ) then + do i=i1,i2 + a4(2,i,1) = a4(1,i,1) + a4(3,i,1) = a4(1,i,1) + enddo + endif + + ! Bottom + ! Area preserving cubic with 2nd deriv. = 0 at the surface + do i=i1,i2 + d1 = delp(i,km) + d2 = delp(i,km1) + qm = (d2*a4(1,i,km)+d1*a4(1,i,km1)) / (d1+d2) + dq = 2.0_r8*(a4(1,i,km1)-a4(1,i,km)) / (d1+d2) + c1 = (a4(2,i,km1)-qm-d2*dq) / (d2*(2.0_r8*d2*d2+d1*(d2+3.0_r8*d1))) + c3 = dq - 2.0_r8*c1*(d2*(5._r8*d1+d2)-3.0_r8*d1*d1) + a4(2,i,km) = qm - c1*d1*d2*(d2+3.0_r8*d1) + ! Bottom edge: + !----------------------------------------------------- + a4(3,i,km) = d1*(8.0_r8*c1*d1**2-c3) + a4(2,i,km) + ! dc(i,km) = 0.5*(a4(3,i,km) - a4(1,i,km)) + !----------------------------------------------------- + ! a4(3,i,km) = (12./7.)*a4(1,i,km)-(13./14.)*a4(1,i,km-1)+(3./14.)*a4(1,i,km-2) + ! No over- and under-shoot condition + a4(2,i,km) = max( a4(2,i,km), min(a4(1,i,km), a4(1,i,km1)) ) + a4(2,i,km) = min( a4(2,i,km), max(a4(1,i,km), a4(1,i,km1)) ) + dc(i,km) = 0.5_r8*(a4(1,i,km) - a4(2,i,km)) + enddo + + + ! Enforce constraint on the "slope" at the surface + +#ifdef BOT_MONO + do i=i1,i2 + a4(4,i,km) = 0 + if( a4(3,i,km) * a4(1,i,km) <= 0.0_r8 ) a4(3,i,km) = 0.0_r8 + d1 = a4(1,i,km) - a4(2,i,km) + d2 = a4(3,i,km) - a4(1,i,km) + if ( d1*d2 < 0.0_r8 ) then + a4(2,i,km) = a4(1,i,km) + a4(3,i,km) = a4(1,i,km) + else + dq = sign(min(abs(d1),abs(d2),0.5_r8*abs(delq(i,km-1))), d1) + a4(2,i,km) = a4(1,i,km) - dq + a4(3,i,km) = a4(1,i,km) + dq + endif + enddo +#else + if( iv==0 ) then + do i=i1,i2 + a4(2,i,km) = max(0.0_r8,a4(2,i,km)) + a4(3,i,km) = max(0.0_r8,a4(3,i,km)) + enddo + elseif( iv<0 ) then + do i=i1,i2 + if( a4(1,i,km)*a4(3,i,km) <= 0.0_r8 ) a4(3,i,km) = 0.0_r8 + enddo + endif +#endif + + do k=1,km1 + do i=i1,i2 + a4(3,i,k) = a4(2,i,k+1) + enddo + enddo + + !----------------------------------------------------------- + ! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) + !----------------------------------------------------------- + ! Top 2 and bottom 2 layers always use monotonic mapping + do k=1,2 + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + call ppm_limiters(dc(i1,k), a4(1,i1,k), it, 0) + enddo + + if(kord >= 7) then + !----------------------- + ! Huynh's 2nd constraint + !----------------------- + do k=2,km1 + do i=i1,i2 + ! Method#1 + ! h2(i,k) = delq(i,k) - delq(i,k-1) + ! Method#2 - better + h2(i,k) = 2.0_r8*(dc(i,k+1)/delp(i,k+1) - dc(i,k-1)/delp(i,k-1)) & + / ( delp(i,k)+0.5_r8*(delp(i,k-1)+delp(i,k+1)) ) & + * delp(i,k)**2 + ! Method#3 +!!! h2(i,k) = dc(i,k+1) - dc(i,k-1) + enddo + enddo + + fac = 1.5_r8 ! original quasi-monotone + + do k=3,km-2 + do i=i1,i2 + ! Right edges + ! qmp = a4(1,i,k) + 2.0*delq(i,k-1) + ! lac = a4(1,i,k) + fac*h2(i,k-1) + 0.5*delq(i,k-1) + ! + pmp = 2.0_r8*dc(i,k) + qmp = a4(1,i,k) + pmp + lac = a4(1,i,k) + fac*h2(i,k-1) + dc(i,k) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), qmp, lac)), & + max(a4(1,i,k), qmp, lac) ) + ! Left edges + ! qmp = a4(1,i,k) - 2.0*delq(i,k) + ! lac = a4(1,i,k) + fac*h2(i,k+1) - 0.5*delq(i,k) + ! + qmp = a4(1,i,k) - pmp + lac = a4(1,i,k) + fac*h2(i,k+1) - dc(i,k) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), qmp, lac)), & + max(a4(1,i,k), qmp, lac)) + !------------- + ! Recompute A6 + !------------- + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + ! Additional constraint to ensure positivity when kord=7 + if (iv == 0 .and. kord >= 6 ) & + call ppm_limiters(dc(i1,k), a4(1,i1,k), it, 2) + enddo + + else + + lmt = kord - 3 + lmt = max(0, lmt) + if (iv == 0) lmt = min(2, lmt) + + do k=3,km-2 + if( kord /= 4) then + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + endif + if(kord/=6) call ppm_limiters(dc(i1,k), a4(1,i1,k), it, lmt) + enddo + endif + + do k=km1,km + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + call ppm_limiters(dc(i1,k), a4(1,i1,k), it, 0) + enddo + + end subroutine ppm_profile + + subroutine ppm_limiters(dm, a4, itot, lmt) + + ! INPUT PARAMETERS: + real(kind=r8) , intent(in):: dm(*) !< Linear slope + integer, intent(in) :: itot !< Total Longitudes + integer, intent(in) :: lmt !< 0: Standard PPM constraint 1: Improved full monotonicity constraint + !< (Lin) 2: Positive definite constraint + !< 3: do nothing (return immediately) + ! INPUT/OUTPUT PARAMETERS: + real(kind=r8) , intent(inout) :: a4(4,*) !< PPM array AA <-- a4(1,i) AL <-- a4(2,i) AR <-- a4(3,i) A6 <-- a4(4,i) + ! LOCAL VARIABLES: + real(kind=r8) qmp + real(kind=r8) da1, da2, a6da + real(kind=r8) fmin + integer i + + ! Developer: S.-J. Lin + + if ( lmt == 3 ) return + + if(lmt == 0) then + ! Standard PPM constraint + do i=1,itot + if(dm(i) == 0.0_r8) then + a4(2,i) = a4(1,i) + a4(3,i) = a4(1,i) + a4(4,i) = 0.0_r8 + else + da1 = a4(3,i) - a4(2,i) + da2 = da1**2 + a6da = a4(4,i)*da1 + if(a6da < -da2) then + a4(4,i) = 3.0_r8*(a4(2,i)-a4(1,i)) + a4(3,i) = a4(2,i) - a4(4,i) + elseif(a6da > da2) then + a4(4,i) = 3.0_r8*(a4(3,i)-a4(1,i)) + a4(2,i) = a4(3,i) - a4(4,i) + endif + endif + enddo + + elseif (lmt == 1) then + + ! Improved full monotonicity constraint (Lin 2004) + ! Note: no need to provide first guess of A6 <-- a4(4,i) + do i=1, itot + qmp = 2.0_r8*dm(i) + a4(2,i) = a4(1,i)-sign(min(abs(qmp),abs(a4(2,i)-a4(1,i))), qmp) + a4(3,i) = a4(1,i)+sign(min(abs(qmp),abs(a4(3,i)-a4(1,i))), qmp) + a4(4,i) = 3.0_r8*( 2.0_r8*a4(1,i) - (a4(2,i)+a4(3,i)) ) + enddo + + elseif (lmt == 2) then + + ! Positive definite constraint + do i=1,itot + if( abs(a4(3,i)-a4(2,i)) < -a4(4,i) ) then + fmin = a4(1,i)+0.25_r8*(a4(3,i)-a4(2,i))**2/a4(4,i)+a4(4,i)*r12 + if( fmin < 0.0_r8 ) then + if(a4(1,i) a4(2,i)) then + a4(4,i) = 3.0_r8*(a4(2,i)-a4(1,i)) + a4(3,i) = a4(2,i) - a4(4,i) + else + a4(4,i) = 3.0_r8*(a4(3,i)-a4(1,i)) + a4(2,i) = a4(3,i) - a4(4,i) + endif + endif + endif + enddo + + endif + + end subroutine ppm_limiters + + + subroutine scalar_profile(qs, a4, delp, km, i1, i2, iv, kord, qmin) + ! Optimized vertical profile reconstruction: + ! Latest: Apr 2008 S.-J. Lin, NOAA/GFDL + integer, intent(in):: i1, i2 + integer, intent(in):: km !< vertical dimension + integer, intent(in):: iv !< iv =-1: winds iv = 0: positive definite scalars iv = 1: others + integer, intent(in):: kord + real(kind=r8), intent(in) :: qs(i1:i2) + real(kind=r8), intent(in) :: delp(i1:i2,km) !< Layer pressure thickness + real(kind=r8), intent(inout):: a4(4,i1:i2,km) !< Interpolated values + real(kind=r8), intent(in):: qmin + !----------------------------------------------------------------------- + logical, dimension(i1:i2,km):: extm, ext5, ext6 + real(kind=r8) gam(i1:i2,km) + real(kind=r8) q(i1:i2,km+1) + real(kind=r8) d4(i1:i2) + real(kind=r8) bet, a_bot, grat + real(kind=r8) pmp_1, lac_1, pmp_2, lac_2, x0, x1 + integer i, k, im + + if ( iv .eq. -2 ) then + do i=i1,i2 + gam(i,2) = 0.5_r8 + q(i,1) = 1.5_r8*a4(1,i,1) + enddo + do k=2,km-1 + do i=i1, i2 + grat = delp(i,k-1) / delp(i,k) + bet = 2.0_r8 + grat + grat - gam(i,k) + q(i,k) = (3.0_r8*(a4(1,i,k-1)+a4(1,i,k)) - q(i,k-1))/bet + gam(i,k+1) = grat / bet + enddo + enddo + do i=i1,i2 + grat = delp(i,km-1) / delp(i,km) + q(i,km) = (3.0_r8*(a4(1,i,km-1)+a4(1,i,km)) - grat*qs(i) - q(i,km-1)) / & + (2.0_r8 + grat + grat - gam(i,km)) + q(i,km+1) = qs(i) + enddo + do k=km-1,1,-1 + do i=i1,i2 + q(i,k) = q(i,k) - gam(i,k+1)*q(i,k+1) + enddo + enddo + else + do i=i1,i2 + grat = delp(i,2) / delp(i,1) ! grid ratio + bet = grat*(grat+0.5_r8) + q(i,1) = ( (grat+grat)*(grat+1.0_r8)*a4(1,i,1) + a4(1,i,2) ) / bet + gam(i,1) = ( 1.0_r8 + grat*(grat+1.5_r8) ) / bet + enddo + + do k=2,km + do i=i1,i2 + d4(i) = delp(i,k-1) / delp(i,k) + bet = 2.0_r8 + d4(i) + d4(i) - gam(i,k-1) + q(i,k) = ( 3.0_r8*(a4(1,i,k-1)+d4(i)*a4(1,i,k)) - q(i,k-1) )/bet + gam(i,k) = d4(i) / bet + enddo + enddo + + do i=i1,i2 + a_bot = 1.0_r8 + d4(i)*(d4(i)+1.5_r8) + q(i,km+1) = (2.0_r8*d4(i)*(d4(i)+1.0_r8)*a4(1,i,km)+a4(1,i,km-1)-a_bot*q(i,km)) & + / ( d4(i)*(d4(i)+0.5_r8) - a_bot*gam(i,km) ) + enddo + + do k=km,1,-1 + do i=i1,i2 + q(i,k) = q(i,k) - gam(i,k)*q(i,k+1) + enddo + enddo + endif + + !----- Perfectly linear scheme -------------------------------- + if ( abs(kord) > 16 ) then + do k=1,km + do i=i1,i2 + a4(2,i,k) = q(i,k ) + a4(3,i,k) = q(i,k+1) + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + enddo + return + endif + !----- Perfectly linear scheme -------------------------------- + !------------------ + ! Apply constraints + !------------------ + im = i2 - i1 + 1 + + ! Apply *large-scale* constraints + do i=i1,i2 + q(i,2) = min( q(i,2), max(a4(1,i,1), a4(1,i,2)) ) + q(i,2) = max( q(i,2), min(a4(1,i,1), a4(1,i,2)) ) + enddo + + do k=2,km + do i=i1,i2 + gam(i,k) = a4(1,i,k) - a4(1,i,k-1) + enddo + enddo + + ! Interior: + do k=3,km-1 + do i=i1,i2 + if ( gam(i,k-1)*gam(i,k+1)>0.0_r8 ) then + ! Apply large-scale constraint to ALL fields if not local max/min + q(i,k) = min( q(i,k), max(a4(1,i,k-1),a4(1,i,k)) ) + q(i,k) = max( q(i,k), min(a4(1,i,k-1),a4(1,i,k)) ) + else + if ( gam(i,k-1) > 0.0_r8 ) then + ! There exists a local max + q(i,k) = max(q(i,k), min(a4(1,i,k-1),a4(1,i,k))) + else + ! There exists a local min + q(i,k) = min(q(i,k), max(a4(1,i,k-1),a4(1,i,k))) + if ( iv==0 ) q(i,k) = max(0.0_r8, q(i,k)) + endif + endif + enddo + enddo + + ! Bottom: + do i=i1,i2 + q(i,km) = min( q(i,km), max(a4(1,i,km-1), a4(1,i,km)) ) + q(i,km) = max( q(i,km), min(a4(1,i,km-1), a4(1,i,km)) ) + enddo + + do k=1,km + do i=i1,i2 + a4(2,i,k) = q(i,k ) + a4(3,i,k) = q(i,k+1) + enddo + enddo + + do k=1,km + if ( k==1 .or. k==km ) then + do i=i1,i2 + extm(i,k) = (a4(2,i,k)-a4(1,i,k)) * (a4(3,i,k)-a4(1,i,k)) > 0.0_r8 + enddo + else + do i=i1,i2 + extm(i,k) = gam(i,k)*gam(i,k+1) < 0.0_r8 + enddo + endif + if ( abs(kord) > 9 ) then + do i=i1,i2 + x0 = 2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k)) + x1 = abs(a4(2,i,k)-a4(3,i,k)) + a4(4,i,k) = 3.0_r8*x0 + ext5(i,k) = abs(x0) > x1 + ext6(i,k) = abs(a4(4,i,k)) > x1 + enddo + endif + enddo + + !--------------------------- + ! Apply subgrid constraints: + !--------------------------- + ! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) + ! Top 2 and bottom 2 layers always use monotonic mapping + + if ( iv==0 ) then + do i=i1,i2 + a4(2,i,1) = max(0.0_r8, a4(2,i,1)) + enddo + elseif ( iv==-1 ) then + do i=i1,i2 + if ( a4(2,i,1)*a4(1,i,1) <= 0.0_r8 ) a4(2,i,1) = 0.0_r8 + enddo + elseif ( iv==2 ) then + do i=i1,i2 + a4(2,i,1) = a4(1,i,1) + a4(3,i,1) = a4(1,i,1) + a4(4,i,1) = 0.0_r8 + enddo + endif + + if ( iv/=2 ) then + do i=i1,i2 + a4(4,i,1) = 3.0_r8*(2.0_r8*a4(1,i,1) - (a4(2,i,1)+a4(3,i,1))) + enddo + call cs_limiters(im, extm(i1,1), a4(1,i1,1), 1) + endif + + ! k=2 + do i=i1,i2 + a4(4,i,2) = 3.0_r8*(2.0_r8*a4(1,i,2) - (a4(2,i,2)+a4(3,i,2))) + enddo + call cs_limiters(im, extm(i1,2), a4(1,i1,2), 2) + + !------------------------------------- + ! Huynh's 2nd constraint for interior: + !------------------------------------- + do k=3,km-2 + if ( abs(kord)<9 ) then + do i=i1,i2 + ! Left edges + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + ! Right edges + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + + elseif ( abs(kord)==9 ) then + do i=i1,i2 + if ( extm(i,k) .and. extm(i,k-1) ) then + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else if ( extm(i,k) .and. extm(i,k+1) ) then + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else if ( extm(i,k) .and. a4(1,i,k) abs(a4(2,i,k)-a4(3,i,k)) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + endif + endif + enddo + elseif ( abs(kord)==10 ) then + do i=i1,i2 + if( ext5(i,k) ) then + if( ext5(i,k-1) .or. ext5(i,k+1) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif + elseif( ext6(i,k) ) then + if( ext5(i,k-1) .or. ext5(i,k+1) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif + endif + enddo + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + elseif ( abs(kord)==12 ) then + do i=i1,i2 + if( extm(i,k) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else ! not a local extremum + a4(4,i,k) = 6.0_r8*a4(1,i,k) - 3.0_r8*(a4(2,i,k)+a4(3,i,k)) + ! Check within the smooth region if subgrid profile is non-monotonic + if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + a4(4,i,k) = 6.0_r8*a4(1,i,k) - 3.0_r8*(a4(2,i,k)+a4(3,i,k)) + endif + endif + enddo + elseif ( abs(kord)==13 ) then + do i=i1,i2 + if( ext6(i,k) ) then + if ( ext6(i,k-1) .and. ext6(i,k+1) ) then + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + endif + endif + enddo + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + elseif ( abs(kord)==14 ) then + + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + + elseif ( abs(kord)==15 ) then ! Revised abs(kord)=9 scheme + do i=i1,i2 + if ( ext5(i,k) .and. ext5(i,k-1) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + else if ( ext5(i,k) .and. ext5(i,k+1) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + else if ( ext5(i,k) .and. a4(1,i,k) 16 ) then + do k=1,km + do i=i1,i2 + a4(2,i,k) = q(i,k ) + a4(3,i,k) = q(i,k+1) + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + enddo + return + endif + !----- Perfectly linear scheme -------------------------------- + + !------------------ + ! Apply constraints + !------------------ + im = i2 - i1 + 1 + + ! Apply *large-scale* constraints + do i=i1,i2 + q(i,2) = min( q(i,2), max(a4(1,i,1), a4(1,i,2)) ) + q(i,2) = max( q(i,2), min(a4(1,i,1), a4(1,i,2)) ) + enddo + + do k=2,km + do i=i1,i2 + gam(i,k) = a4(1,i,k) - a4(1,i,k-1) + enddo + enddo + + ! Interior: + do k=3,km-1 + do i=i1,i2 + if ( gam(i,k-1)*gam(i,k+1)>0.0_r8 ) then + ! Apply large-scale constraint to ALL fields if not local max/min + q(i,k) = min( q(i,k), max(a4(1,i,k-1),a4(1,i,k)) ) + q(i,k) = max( q(i,k), min(a4(1,i,k-1),a4(1,i,k)) ) + else + if ( gam(i,k-1) > 0.0_r8 ) then + ! There exists a local max + q(i,k) = max(q(i,k), min(a4(1,i,k-1),a4(1,i,k))) + else + ! There exists a local min + q(i,k) = min(q(i,k), max(a4(1,i,k-1),a4(1,i,k))) + if ( iv==0 ) q(i,k) = max(0.0_r8, q(i,k)) + endif + endif + enddo + enddo + + ! Bottom: + do i=i1,i2 + q(i,km) = min( q(i,km), max(a4(1,i,km-1), a4(1,i,km)) ) + q(i,km) = max( q(i,km), min(a4(1,i,km-1), a4(1,i,km)) ) + enddo + + do k=1,km + do i=i1,i2 + a4(2,i,k) = q(i,k ) + a4(3,i,k) = q(i,k+1) + enddo + enddo + + do k=1,km + if ( k==1 .or. k==km ) then + do i=i1,i2 + extm(i,k) = (a4(2,i,k)-a4(1,i,k)) * (a4(3,i,k)-a4(1,i,k)) > 0.0_r8 + enddo + else + do i=i1,i2 + extm(i,k) = gam(i,k)*gam(i,k+1) < 0.0_r8 + enddo + endif + if ( abs(kord) > 9 ) then + do i=i1,i2 + x0 = 2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k)) + x1 = abs(a4(2,i,k)-a4(3,i,k)) + a4(4,i,k) = 3.0_r8*x0 + ext5(i,k) = abs(x0) > x1 + ext6(i,k) = abs(a4(4,i,k)) > x1 + enddo + endif + enddo + + !--------------------------- + ! Apply subgrid constraints: + !--------------------------- + ! f(s) = AL + s*[(AR-AL) + A6*(1-s)] ( 0 <= s <= 1 ) + ! Top 2 and bottom 2 layers always use monotonic mapping + + if ( iv==0 ) then + do i=i1,i2 + a4(2,i,1) = max(0.0_r8, a4(2,i,1)) + enddo + elseif ( iv==-1 ) then + do i=i1,i2 + if ( a4(2,i,1)*a4(1,i,1) <= 0.0_r8 ) a4(2,i,1) = 0.0_r8 + enddo + elseif ( iv==2 ) then + do i=i1,i2 + a4(2,i,1) = a4(1,i,1) + a4(3,i,1) = a4(1,i,1) + a4(4,i,1) = 0.0_r8 + enddo + endif + + if ( iv/=2 ) then + do i=i1,i2 + a4(4,i,1) = 3.0_r8*(2.0_r8*a4(1,i,1) - (a4(2,i,1)+a4(3,i,1))) + enddo + call cs_limiters(im, extm(i1,1), a4(1,i1,1), 1) + endif + + do i=i1,i2 + a4(4,i,2) = 3.0_r8*(2.0_r8*a4(1,i,2) - (a4(2,i,2)+a4(3,i,2))) + enddo + call cs_limiters(im, extm(i1,2), a4(1,i1,2), 2) + + !------------------------------------- + ! Huynh's 2nd constraint for interior: + !------------------------------------- + do k=3,km-2 + if ( abs(kord)<9 ) then + do i=i1,i2 + ! Left edges + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + ! Right edges + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + + elseif ( abs(kord)==9 ) then + do i=i1,i2 + if ( extm(i,k) .and. extm(i,k-1) ) then ! c90_mp122 + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else if ( extm(i,k) .and. extm(i,k+1) ) then ! c90_mp122 + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else + a4(4,i,k) = 6.0_r8*a4(1,i,k) - 3.0_r8*(a4(2,i,k)+a4(3,i,k)) + ! Check within the smooth region if subgrid profile is non-monotonic + if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + a4(4,i,k) = 6.0_r8*a4(1,i,k) - 3.0_r8*(a4(2,i,k)+a4(3,i,k)) + endif + endif + enddo + elseif ( abs(kord)==10 ) then + do i=i1,i2 + if( ext5(i,k) ) then + if( ext5(i,k-1) .or. ext5(i,k+1) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif + elseif( ext6(i,k) ) then + if( ext5(i,k-1) .or. ext5(i,k+1) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif + endif + enddo + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + elseif ( abs(kord)==12 ) then + do i=i1,i2 + if( extm(i,k) ) then + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else ! not a local extremum + a4(4,i,k) = 6.0_r8*a4(1,i,k) - 3.0_r8*(a4(2,i,k)+a4(3,i,k)) + ! Check within the smooth region if subgrid profile is non-monotonic + if( abs(a4(4,i,k)) > abs(a4(2,i,k)-a4(3,i,k)) ) then + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + a4(4,i,k) = 6.0_r8*a4(1,i,k) - 3.0_r8*(a4(2,i,k)+a4(3,i,k)) + endif + endif + enddo + elseif ( abs(kord)==13 ) then + do i=i1,i2 + if( ext6(i,k) ) then + if ( ext6(i,k-1) .and. ext6(i,k+1) ) then + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + endif + endif + enddo + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + elseif ( abs(kord)==14 ) then + + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + + elseif ( abs(kord)==15 ) then ! revised kord=9 scehem + do i=i1,i2 + if ( ext5(i,k) ) then ! c90_mp122 + if ( ext5(i,k-1) .or. ext5(i,k+1) ) then ! c90_mp122 + ! grid-scale 2-delta-z wave detected + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + endif + elseif( ext6(i,k) ) then + ! Check within the smooth region if subgrid profile is non-monotonic + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif + enddo + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + elseif ( abs(kord)==16 ) then + do i=i1,i2 + if( ext5(i,k) ) then + if ( ext5(i,k-1) .or. ext5(i,k+1) ) then + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + elseif ( ext6(i,k-1) .or. ext6(i,k+1) ) then + ! Left edges + pmp_1 = a4(1,i,k) - 2.0_r8*gam(i,k+1) + lac_1 = pmp_1 + 1.5_r8*gam(i,k+2) + a4(2,i,k) = min(max(a4(2,i,k), min(a4(1,i,k), pmp_1, lac_1)), & + max(a4(1,i,k), pmp_1, lac_1) ) + ! Right edges + pmp_2 = a4(1,i,k) + 2.0_r8*gam(i,k) + lac_2 = pmp_2 - 1.5_r8*gam(i,k-1) + a4(3,i,k) = min(max(a4(3,i,k), min(a4(1,i,k), pmp_2, lac_2)), & + max(a4(1,i,k), pmp_2, lac_2) ) + endif + endif + enddo + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + else ! kord = 11 + do i=i1,i2 + if ( ext5(i,k) .and. (ext5(i,k-1) .or. ext5(i,k+1)) ) then + ! Noisy region: + a4(2,i,k) = a4(1,i,k) + a4(3,i,k) = a4(1,i,k) + a4(4,i,k) = 0.0_r8 + else + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + endif + enddo + endif + + ! Additional constraint to ensure positivity + if ( iv==0 ) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 0) + + enddo ! k-loop + + !---------------------------------- + ! Bottom layer subgrid constraints: + !---------------------------------- + if ( iv==0 ) then + do i=i1,i2 + a4(3,i,km) = max(0.0_r8, a4(3,i,km)) + enddo + elseif ( iv .eq. -1 ) then + do i=i1,i2 + if ( a4(3,i,km)*a4(1,i,km) <= 0.0_r8 ) a4(3,i,km) = 0.0_r8 + enddo + endif + + do k=km-1,km + do i=i1,i2 + a4(4,i,k) = 3.0_r8*(2.0_r8*a4(1,i,k) - (a4(2,i,k)+a4(3,i,k))) + enddo + if(k==(km-1)) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 2) + if(k== km ) call cs_limiters(im, extm(i1,k), a4(1,i1,k), 1) + enddo + + end subroutine cs_profile + + subroutine cs_limiters(im, extm, a4, iv) + integer, intent(in) :: im + integer, intent(in) :: iv + logical, intent(in) :: extm(im) + real(kind=r8) , intent(inout) :: a4(4,im) !< PPM array + ! LOCAL VARIABLES: + real(kind=r8) da1, da2, a6da + integer i + + if ( iv==0 ) then + ! Positive definite constraint + do i=1,im + if( a4(1,i)<=0.0_r8) then + a4(2,i) = a4(1,i) + a4(3,i) = a4(1,i) + a4(4,i) = 0.0_r8 + else + if( abs(a4(3,i)-a4(2,i)) < -a4(4,i) ) then + if( (a4(1,i)+0.25_r8*(a4(3,i)-a4(2,i))**2/a4(4,i)+a4(4,i)*r12) < 0.0_r8 ) then + ! local minimum is negative + if( a4(1,i) a4(2,i) ) then + a4(4,i) = 3.0_r8*(a4(2,i)-a4(1,i)) + a4(3,i) = a4(2,i) - a4(4,i) + else + a4(4,i) = 3.0_r8*(a4(3,i)-a4(1,i)) + a4(2,i) = a4(3,i) - a4(4,i) + endif + endif + endif + endif + enddo + elseif ( iv==1 ) then + do i=1,im + if( (a4(1,i)-a4(2,i))*(a4(1,i)-a4(3,i))>=0.0_r8 ) then + a4(2,i) = a4(1,i) + a4(3,i) = a4(1,i) + a4(4,i) = 0.0_r8 + else + da1 = a4(3,i) - a4(2,i) + da2 = da1**2 + a6da = a4(4,i)*da1 + if(a6da < -da2) then + a4(4,i) = 3.0_r8*(a4(2,i)-a4(1,i)) + a4(3,i) = a4(2,i) - a4(4,i) + elseif(a6da > da2) then + a4(4,i) = 3.0_r8*(a4(3,i)-a4(1,i)) + a4(2,i) = a4(3,i) - a4(4,i) + endif + endif + enddo + else + ! Standard PPM constraint + do i=1,im + if( extm(i) ) then + a4(2,i) = a4(1,i) + a4(3,i) = a4(1,i) + a4(4,i) = 0.0_r8 + else + da1 = a4(3,i) - a4(2,i) + da2 = da1**2 + a6da = a4(4,i)*da1 + if(a6da < -da2) then + a4(4,i) = 3.0_r8*(a4(2,i)-a4(1,i)) + a4(3,i) = a4(2,i) - a4(4,i) + elseif(a6da > da2) then + a4(4,i) = 3.0_r8*(a4(3,i)-a4(1,i)) + a4(2,i) = a4(3,i) - a4(4,i) + endif + endif + enddo + endif + end subroutine cs_limiters + + + subroutine fillz(im, km, nq, q, dp) + integer, intent(in):: im !< No. of longitudes + integer, intent(in):: km !< No. of levels + integer, intent(in):: nq !< Total number of tracers + real(kind=r8) , intent(in):: dp(im,km) !< pressure thickness + real(kind=r8) , intent(inout) :: q(im,km,nq) !< tracer mixing ratio + ! LOCAL VARIABLES: + logical:: zfix(im) + real(kind=r8) :: dm(km) + integer i, k, ic, k1 + real(kind=r8) qup, qly, dup, dq, sum0, sum1, fac + + do ic=1,nq +#ifdef DEV_GFS_PHYS + ! Bottom up: + do k=km,2,-1 + k1 = k-1 + do i=1,im + if( q(i,k,ic) < 0.0_r8 ) then + q(i,k1,ic) = q(i,k1,ic) + q(i,k,ic)*dp(i,k)/dp(i,k1) + q(i,k ,ic) = 0.0_r8 + endif + enddo + enddo + ! Top down: + do k=1,km-1 + k1 = k+1 + do i=1,im + if( q(i,k,ic) < 0.0_r8 ) then + q(i,k1,ic) = q(i,k1,ic) + q(i,k,ic)*dp(i,k)/dp(i,k1) + q(i,k ,ic) = 0.0_r8 + endif + enddo + enddo +#else + ! Top layer + do i=1,im + if( q(i,1,ic) < 0.0_r8 ) then + q(i,2,ic) = q(i,2,ic) + q(i,1,ic)*dp(i,1)/dp(i,2) + q(i,1,ic) = 0.0_r8 + endif + enddo + + ! Interior + zfix(:) = .false. + do k=2,km-1 + do i=1,im + if( q(i,k,ic) < 0.0_r8 ) then + zfix(i) = .true. + if ( q(i,k-1,ic) > 0.0_r8 ) then + ! Borrow from above + dq = min ( q(i,k-1,ic)*dp(i,k-1), -q(i,k,ic)*dp(i,k) ) + q(i,k-1,ic) = q(i,k-1,ic) - dq/dp(i,k-1) + q(i,k ,ic) = q(i,k ,ic) + dq/dp(i,k ) + endif + if ( q(i,k,ic)<0.0_r8 .and. q(i,k+1,ic)>0.0_r8 ) then + ! Borrow from below: + dq = min ( q(i,k+1,ic)*dp(i,k+1), -q(i,k,ic)*dp(i,k) ) + q(i,k+1,ic) = q(i,k+1,ic) - dq/dp(i,k+1) + q(i,k ,ic) = q(i,k ,ic) + dq/dp(i,k ) + endif + endif + enddo + enddo + + ! Bottom layer + k = km + do i=1,im + if( q(i,k,ic)<0.0_r8 .and. q(i,k-1,ic)>0.0_r8) then + zfix(i) = .true. + ! Borrow from above + qup = q(i,k-1,ic)*dp(i,k-1) + qly = -q(i,k ,ic)*dp(i,k ) + dup = min(qly, qup) + q(i,k-1,ic) = q(i,k-1,ic) - dup/dp(i,k-1) + q(i,k, ic) = q(i,k, ic) + dup/dp(i,k ) + endif + enddo + + ! Perform final check and non-local fix if needed + do i=1,im + if ( zfix(i) ) then + sum0 = 0.0_r8 + do k=2,km + dm(k) = q(i,k,ic)*dp(i,k) + sum0 = sum0 + dm(k) + enddo + + if ( sum0 > 0.0_r8 ) then + sum1 = 0.0_r8 + do k=2,km + sum1 = sum1 + max(0.0_r8, dm(k)) + enddo + fac = sum0 / sum1 + do k=2,km + q(i,k,ic) = max(0.0_r8, fac*dm(k)/dp(i,k)) + enddo + endif + + endif + enddo +#endif + + enddo + end subroutine fillz + end module fv_mapz diff --git a/src/dynamics/se/dycore/fvm_analytic_mod.F90 b/src/dynamics/se/dycore/fvm_analytic_mod.F90 new file mode 100644 index 00000000..2fc43829 --- /dev/null +++ b/src/dynamics/se/dycore/fvm_analytic_mod.F90 @@ -0,0 +1,1214 @@ +!MODULE FVM_ANALYTIC_MOD--------------------------------------------CE-for FVM! +! AUTHOR: CHRISTOPH ERATH, 17.October 2011 ! +! This module contains all analytical terms for fvm ! +!-----------------------------------------------------------------------------! +module fvm_analytic_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + use cam_abortutils, only: endrun + + implicit none + private + + public :: get_high_order_weights_over_areas, compute_reconstruct_matrix + public :: compute_halo_vars, init_flux_orient + public :: I_00, I_10, I_01, I_20, I_02, I_11, gauss_points + public :: F_00, F_10, F_01, F_20, F_02, F_11 + public :: create_interpolation_points, compute_basic_coordinate_vars + +CONTAINS + + subroutine compute_basic_coordinate_vars(elem,& + nc,irecons,dalpha,dbeta,vtx_cart,center_cart,area_sphere,spherecentroid) + use coordinate_systems_mod, only: cart2spherical + use element_mod, only: element_t + use coordinate_systems_mod, only: spherical_polar_t + + type (element_t), intent(in ) :: elem + integer, intent(in) :: nc,irecons + + real (kind=r8), intent(out) :: dalpha, dbeta + real (kind=r8), intent(out) :: vtx_cart (4,2,nc,nc) + real (kind=r8), intent(out) :: area_sphere(nc,nc) + real (kind=r8), intent(out) :: spherecentroid(irecons-1,nc,nc) + type (spherical_polar_t), intent(out) :: center_cart(nc,nc) ! Spherical coordinates of fvm grid + + integer :: i,j + real (kind=r8) :: centerx,centery + real (kind=r8) :: acartx(nc+1), acarty(nc+1) + + dalpha=abs(elem%corners(1)%x-elem%corners(2)%x)/nc + dbeta =abs(elem%corners(1)%y-elem%corners(4)%y)/nc + + do i=1,nc+1 + acartx(i) = tan(elem%corners(1)%x+(i-1)*dalpha) + acarty(i) = tan(elem%corners(1)%y+(i-1)*dbeta) + end do + + do j=1,nc + do i=1,nc + centerx = tan(elem%corners(1)%x+(i-0.5_r8)*dalpha) + centery = tan(elem%corners(1)%y+(j-0.5_r8)*dbeta) + center_cart(i,j) = cart2spherical(centerx,centery,elem%FaceNum) + enddo + enddo + + vtx_cart = -9D9 + do j=1,nc + do i=1,nc + vtx_cart(1,1,i,j) = acartx(i ) + vtx_cart(1,2,i,j) = acarty(j ) + + vtx_cart(2,1,i,j) = acartx(i+1) + vtx_cart(2,2,i,j) = acarty(j ) + + vtx_cart(3,1,i,j) = acartx(i+1) + vtx_cart(3,2,i,j) = acarty(j+1) + + vtx_cart(4,1,i,j) = acartx(i ) + vtx_cart(4,2,i,j) = acarty(j+1) + end do + end do + ! compute area and centroid for the interior and halo zone of interior elements + call moment_onsphere(nc,irecons,area_sphere,vtx_cart,.true.,spherecentroid) + end subroutine compute_basic_coordinate_vars + + subroutine compute_halo_vars(faceno,cubeboundary,nc,nhc,nhe,& + jx_min,jx_max,jy_min,jy_max,flux_orient, ifct, rot_matrix) + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + + integer, intent(in) :: faceno,nc,nhc,nhe,cubeboundary + + integer, intent(out) :: jx_min(3),jx_max(3),jy_min(3),jy_max(3) + real (kind=r8), intent(out) :: flux_orient(2, 1-nhc:nc+nhc,1-nhc:nc+nhc) + integer, intent(out) :: ifct (1-nhc:nc+nhc,1-nhc:nc+nhc) + integer, intent(out) :: rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc) + + integer :: i,j + integer :: rot90_matrix(2,2) + integer :: ishft + + + jx_min(2) = 0; jx_max(2) = -1; jy_min(2) = 0; jy_max(2) = -1 + jx_min(3) = 0; jx_max(3) = -1; jy_min(3) = 0; jy_max(3) = -1 + + select case (cubeboundary) + case (0) + jx_min(1)=1-nhe; jx_max(1)=nc+1+nhe; jy_min(1)=1-nhe; jy_max(1)=nc+1+nhe + case (west) + jx_min(1)=1 ; jx_max(1)=nc+1+nhe; jy_min(1)=1-nhe; jy_max(1)=nc+1+nhe + jx_min(2)=1-nhe; jx_max(2)=1 ; jy_min(2)=1-nhe; jy_max(2)=nc+1+nhe + case(east) + jx_min(1)=1-nhe; jx_max(1)=nc+1 ; jy_min(1)=1-nhe; jy_max(1)=nc+1+nhe + jx_min(2)=nc+1 ; jx_max(2)=nc+1+nhe; jy_min(2)=1-nhe; jy_max(2)=nc+1+nhe + case(north) + jx_min(1)=1-nhe; jx_max(1)=nc+1+nhe; jy_min(1)=1-nhe; jy_max(1)=nc+1 + jx_min(2)=1-nhe; jx_max(2)=nc+1+nhe; jy_min(2)=nc+1 ; jy_max(2)=nc+1+nhe + case(south) + jx_min(1)=1-nhe; jx_max(1)=nc+1+nhe; jy_min(1)=1 ; jy_max(1)=nc+1+nhe + jx_min(2)=1-nhe; jx_max(2)=nc+1+nhe; jy_min(2)=1-nhe; jy_max(2)=1 + case(swest) + jx_min(1)=1 ; jx_max(1)=nc+1+nhe; jy_min(1)=1 ; jy_max(1)=nc+1+nhe + jx_min(2)=1 ; jx_max(2)=nc+1+nhe; jy_min(2)=1-nhe; jy_max(2)=1 + jx_min(3)=1-nhe; jx_max(3)=1 ; jy_min(3)=1 ; jy_max(3)=nc+1+nhe + case(seast) + jx_min(1)=1-nhe; jx_max(1)=nc+1 ; jy_min(1)=1 ; jy_max(1)=nc+1+nhe + jx_min(2)=1-nhe; jx_max(2)=nc+1 ; jy_min(2)=1-nhe; jy_max(2)=1 + jx_min(3)=nc+1 ; jx_max(3)=nc+1+nhe; jy_min(3)=1 ; jy_max(3)=nc+1+nhe + case(neast) + jx_min(1)=1-nhe; jx_max(1)=nc+1 ; jy_min(1)=1-nhe; jy_max(1)=nc+1 + jx_min(2)=1-nhe; jx_max(2)=nc+1 ; jy_min(2)=nc+1 ; jy_max(2)=nc+1+nhe + jx_min(3)=nc+1 ; jx_max(3)=nc+1+nhe; jy_min(3)=1-nhe; jy_max(3)=nc+1 + case(nwest) + jx_min(1)=1 ; jx_max(1)=nc+1+nhe; jy_min(1)=1-nhe; jy_max(1)=nc+1 + jx_min(2)=1 ; jx_max(2)=nc+1+nhe; jy_min(2)=nc+1 ; jy_max(2)=nc+1+nhe + jx_min(3)=1-nhe; jx_max(3)=1 ; jy_min(3)=1-nhe; jy_max(3)=nc+1 + + case default + print *, 'Fatal Error in fvm_line_integrals_mod.F90.' + call endrun('Selected case for cubeboundary does not exists!') + end select + ! + ! init location of flux-sides + ! + call init_flux_orient(flux_orient,ifct,nc,nhc,cubeboundary,faceno) + rot_matrix(1,1,:,:) = 1; rot_matrix(1,2,:,:) = 0; + rot_matrix(2,1,:,:) = 0; rot_matrix(2,2,:,:) = 1; + + if (cubeboundary>0) then + ! + ! clockwise 90 rotation of vectors + ! + rot90_matrix(1,1) = 0; rot90_matrix(2,1) = -1; + rot90_matrix(1,2) = 1; rot90_matrix(2,2) = 0; + do j=1-nhc,nc+nhc + do i=1-nhc,nc+nhc + do ishft=1,4-nint(flux_orient(2,i,j)) + rot_matrix(:,:,i,j) = MATMUL(rot90_matrix,rot_matrix(:,:,i,j)) + end do + enddo + enddo + end if + end subroutine compute_halo_vars + + + ! ----------------------------------------------------------------------------------! + !SUBROUTINE MOMENT_ONSPHERE-----------------------------------------------CE-for FVM! + ! AUTHOR: CHRISTOPH ERATH, 20.July 2011 ! + ! DESCRIPTION: Compute area and centroids/moments via line integrals ! + ! ! + ! INPUT: x ... x cartesian coordinats of the arrival grid on the cube ! + ! y ... y cartesian coordinats of the arrival grid on the cube ! + ! ... cell boundaries in x and y directions ! + ! INPUT/OUTPUT: ! + ! area ... area of cells on the sphere ! + ! centroid ... x,y,x^2,y^2,xy ! + !-----------------------------------------------------------------------------------! + subroutine moment_onsphere(nc,irecons,area,vtx_cart,lanalytic,spherecentroid) + use dimensions_mod, only: ngpc + + integer, intent(in) :: nc,irecons + real (kind=r8), dimension(nc,nc) , intent(out) :: area + real (kind=r8), dimension(irecons-1,nc,nc), intent(out) :: spherecentroid + real (kind=r8), dimension(4,2,nc,nc) , intent(in) :: vtx_cart + logical, optional, intent(in) :: lanalytic + integer :: i,j + ! + ! variables for call to get_high_order_weights_over_areas + ! + integer, parameter :: num_area=1, num_seg_max=2 + REAL(KIND=r8), dimension(2,num_seg_max,num_area) :: xx, dxx + integer , dimension(num_area ), parameter :: num_seg=2 + REAL(KIND=r8), dimension(irecons,num_area):: weights + real (kind=r8), dimension(nc+1) :: x, y + + + real (kind=r8), dimension(ngpc):: gsweights, gspts + ! + ! initialize quadrature weights for get_high_order_weights_over_areas + ! + call gauss_points(ngpc,gsweights,gspts) !set gauss points/weights + gspts = 0.5_r8*(gspts+1.0_r8) !shift location so in [0:1] instead of [-1:1] + + x(1:nc) = vtx_cart(1,1,1:nc,1 ) + y(1:nc) = vtx_cart(1,2,1 ,1:nc) + x(nc+1) = vtx_cart(2,1, nc,1 ) + y(nc+1) = vtx_cart(3,2,1 ,nc ) + + select case (irecons) + case(1) + if (present(lanalytic)) then + do j=1,nc + do i=1,nc + area(i,j) = (I_00(x(i+1),y(j+1)) - I_00(x(i),y(j+1)) + & + I_00(x(i),y(j)) - I_00(x(i+1),y(j))) + end do + end do + else + call endrun("non-analytic moments not coded for irecons=1") + end if + + case(3) + if (present(lanalytic)) then + do j=1,nc + do i=1,nc + area(i,j) = (I_00(x(i+1),y(j+1)) - I_00(x(i),y(j+1)) + & + I_00(x(i),y(j)) - I_00(x(i+1),y(j))) + ! Compute centroids via line integrals + spherecentroid(1,i,j) = (I_10(x(i+1),y(j+1)) - I_10(x(i),y(j+1)) + & + I_10(x(i),y(j)) - I_10(x(i+1),y(j))) / area(i,j) + spherecentroid(2,i,j) = (I_01(x(i+1),y(j+1)) - I_01(x(i),y(j+1)) + & + I_01(x(i),y(j)) - I_01(x(i+1),y(j))) / area(i,j) + end do + end do + else + call endrun("non-analytic moments not coded for irecons=3") + end if + + + case(6) + if (present(lanalytic)) then + do j=1,nc + do i=1,nc + ! area(i,j) = surfareaxy(x(i),x(i+1),y(j),y(j+1)) + area(i,j) = (I_00(x(i+1),y(j+1)) - I_00(x(i),y(j+1)) + & + I_00(x(i),y(j)) - I_00(x(i+1),y(j))) + ! Compute centroids via line integrals + spherecentroid(1,i,j) = (I_10(x(i+1),y(j+1)) - I_10(x(i),y(j+1)) + & + I_10(x(i),y(j)) - I_10(x(i+1),y(j))) / area(i,j) + spherecentroid(2,i,j) = (I_01(x(i+1),y(j+1)) - I_01(x(i),y(j+1)) + & + I_01(x(i),y(j)) - I_01(x(i+1),y(j))) / area(i,j) + ! TAN(alpha)^2 component + spherecentroid(3,i,j) = (I_20(x(i+1),y(j+1)) - I_20(x(i),y(j+1)) + & + I_20(x(i),y(j)) - I_20(x(i+1),y(j))) / area(i,j) + ! TAN(beta)^2 component + spherecentroid(4,i,j) = (I_02(x(i+1),y(j+1)) - I_02(x(i),y(j+1)) + & + I_02(x(i),y(j)) - I_02(x(i+1),y(j))) / area(i,j) + ! TAN(alpha) TAN(beta) component + spherecentroid(5,i,j) = (I_11(x(i+1),y(j+1)) - I_11(x(i),y(j+1)) + & + I_11(x(i),y(j)) - I_11(x(i+1),y(j))) / area(i,j) + end do + end do + else + do j=1,nc + do i=1,nc + + xx (1,1,1) = x(i) ; xx (2,1,1) = y(j+1); + dxx(1,1,1) = x(i+1)-x(i); dxx(2,1,1) = 0.0_r8 ; + + xx (1,2,1) = x(i+1) ; xx (2,2,1) = y(j) ; + dxx(1,2,1) = x(i)-x(i+1); dxx(2,2,1) = 0.0_r8 ; + + call get_high_order_weights_over_areas(xx,dxx,num_seg,num_seg_max,num_area,weights,ngpc,gsweights,gspts,irecons) + + area(i,j) = weights(1,1) + + spherecentroid(1:5,i,j) = weights(2:6,1)/area(i,j) + end do + end do + end if + case default + call endrun('SUBROUTINE moment_on_sphere: irecons out of range') + end select + end subroutine moment_onsphere + + + ! ----------------------------------------------------------------------------------! + !SUBROUTINES I_00, I_01, I_20, I_02, I11----------------------------------CE-for FVM! + ! AUTHOR: CHRISTOPH ERATH, 17.October 2011 ! + ! DESCRIPTION: calculates the exact integrals ! + ! ! + ! CALLS: none ! + ! INPUT: x ... x coordinate of the evaluation point (Cartesian on the cube) ! + ! y ... y coordinate of the evaluation point (Cartesian on the cube) ! + ! OUTPUT: I_00, I_01, I_20, I_02, I11 ! + !-----------------------------------------------------------------------------------! + function I_00(x,y) + implicit none + real (kind=r8) :: I_00 + real (kind=r8), intent(in) :: x,y + + I_00 = ATAN(x*y/SQRT(1.0_r8+x*x+y*y)) + end function I_00 + + function I_10(x,y) + implicit none + real (kind=r8) :: I_10 + real (kind=r8), intent(in) :: x,y + real (kind=r8) :: tmp + + ! tmp = ATAN(x) + ! I_10 = -ASINH(y*COS(tmp)) + tmp = y*COS(ATAN(x)) + I_10 = -log(tmp+sqrt(tmp*tmp+1)) + end function I_10 + + + function I_01(x,y) + implicit none + real (kind=r8) :: I_01 + real (kind=r8), intent(in) :: x,y + real (kind=r8) :: tmp + + ! I_01 = -ASINH(x/SQRT(1+y*y)) + tmp=x/SQRT(1+y*y) + I_01 = -log(tmp+sqrt(tmp*tmp+1)) + end function I_01 + + function I_20(x,y) + implicit none + real (kind=r8) :: I_20 + real (kind=r8), intent(in) :: x,y + real (kind=r8) :: tmp,tmp1 + + tmp = 1.0_r8+y*y + tmp1=x/SQRT(tmp) + I_20 = y*log(tmp1+sqrt(tmp1*tmp1+1))+ACOS(x*y/(SQRT((1.0_r8+x*x)*tmp))) + end function I_20 + + function I_02(x,y) + implicit none + real (kind=r8) :: I_02 + real (kind=r8), intent(in) :: x,y + real (kind=r8) :: tmp,tmp1 + + ! tmp=1.0_r8+x*x + ! I_02 = x*ASINH(y/SQRT(tmp))+ACOS(x*y/SQRT(tmp*(1+y*y))) + tmp=1.0_r8+x*x + tmp1=y/SQRT(tmp) + + I_02 = x*log(tmp1+sqrt(tmp1*tmp1+1))+ACOS(x*y/SQRT(tmp*(1+y*y))) + + end function I_02 + + function I_11(x,y) + implicit none + real (kind=r8) :: I_11 + real (kind=r8), intent(in) :: x,y + + I_11 = -SQRT(1+x*x+y*y) + end function I_11 + !END SUBROUTINES I_00, I_01, I_20, I_02, I11------------------------------CE-for FVM! + + + real (kind=r8) function F_00(x_in,y_in) + implicit none + real (kind=r8), intent(in) :: x_in,y_in + real (kind=r8) :: x,y + ! + x = x_in + y = y_in + F_00 =y/((1.0_r8+x*x)*SQRT(1.0_r8+x*x+y*y)) + end function F_00 + + real (kind=r8) function F_10(x_in,y_in) + implicit none + real (kind=r8), intent(in) :: x_in,y_in + real (kind=r8) :: x,y + + x = x_in + y = y_in + + F_10 =x*y/((1.0_r8+x*x)*SQRT(1.0_r8+x*x+y*y)) + end function F_10 + + real (kind=r8) function F_01(x_in,y_in) + implicit none + real (kind=r8), intent(in) :: x_in,y_in + real (kind=r8) :: x,y + + x = x_in + y = y_in + + F_01 =-1.0_r8/(SQRT(1.0_r8+x*x+y*y)) + end function F_01 + + real (kind=r8) function F_20(x_in,y_in) + implicit none + real (kind=r8), intent(in) :: x_in,y_in + real (kind=r8) :: x,y + + x = x_in + y = y_in + + F_20 =x*x*y/((1.0_r8+x*x)*SQRT(1.0_r8+x*x+y*y)) + end function F_20 + + real (kind=r8) function F_02(x_in,y_in) + implicit none + real (kind=r8), intent(in) :: x_in,y_in + real (kind=r8) :: x,y,alpha,tmp + + x = x_in + y = y_in + + alpha = ATAN(x) +! F_02 =-y/SQRT(1.0_r8+x*x+y*y)+ASINH(y*COS(alpha)) + tmp=y*COS(alpha) + F_02 =-y/SQRT(1.0_r8+x*x+y*y)+log(tmp+sqrt(tmp*tmp+1)) + + ! + ! cos(alpha) = 1/sqrt(1+x*x) + ! + end function F_02 + + real (kind=r8) function F_11(x_in,y_in) + implicit none + real (kind=r8), intent(in) :: x_in,y_in + real (kind=r8) :: x,y + + x = x_in + y = y_in + + F_11 =-x/(SQRT(1.0_r8+x*x+y*y)) + end function F_11 + + + + ! + ! matrix version of reconstruct_cubic_onface + ! + subroutine compute_reconstruct_matrix(nc,nhe,nhc,irecons,dalpha,dbeta,spherecentroid,vtx_cart,& + centroid_stretch,vertex_recons_weights,recons_metrics,recons_metrics_integral) + implicit none + integer , intent(in) :: nc,nhe,irecons,nhc + real (kind=r8), intent(in) :: dalpha,dbeta + real (kind=r8), dimension(irecons-1,1-nhc:nc+nhc,1-nhc:nc+nhc), intent(in) :: spherecentroid + real (kind=r8), dimension(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) , intent(in) :: vtx_cart + + real (kind=r8), dimension(7,1-nhe:nc+nhe,1-nhe:nc+nhe) , intent(out):: centroid_stretch + real (kind=r8), dimension(4,1:irecons-1,1-nhe:nc+nhe,1-nhe:nc+nhe), intent(out):: vertex_recons_weights + real (kind=r8), dimension(3,1-nhe:nc+nhe,1-nhe:nc+nhe) , intent(out):: recons_metrics + real (kind=r8), dimension(3,1-nhe:nc+nhe,1-nhe:nc+nhe) , intent(out):: recons_metrics_integral + + ! + integer :: i, j, count, m, n + real (kind=r8) :: coef,tmp,cartx,carty + ! + ! pre-compute variables for reconstruction + ! + select case (irecons) + case(3) + do j= 1-nhe,nc+nhe + do i=1-nhe,nc+nhe + count = 1 + do n = j, j+1 + do m = i, i+1 + cartx = vtx_cart(count,1,i,j); carty = vtx_cart(count,2,i,j); + + vertex_recons_weights(count,1,i,j) = cartx - spherecentroid(1,i,j) + vertex_recons_weights(count,2,i,j) = carty - spherecentroid(2,i,j) + + count=count+1 + end do + enddo + end do + end do + call endrun("recons_metrics and recons_metrics_integral not initialize") + ! + ! for reconstruction + ! + do j= 1-nhe,nc+nhe + do i=1-nhe,nc+nhe + ! + !*************** + !* dfdx * + !*************** + ! + coef = 1.0_r8/(12.0_r8 * dalpha) !finite difference coefficient + coef = coef /( 1.0_r8 + spherecentroid(1,i,j)**2) !stretching coefficient + + centroid_stretch(1,i,j) = coef + ! + !*************** + !* dfdy * + !*************** + ! + coef = 1.0_r8/(12.0_r8 * dbeta) !finite difference coefficient + coef = coef /( 1.0_r8 + spherecentroid(2,i,j)**2) !stretching coefficient + + centroid_stretch(2,i,j) = coef + end do + end do + case(6) + do j= 1-nhe,nc+nhe + do i=1-nhe,nc+nhe + do count=1,4 + cartx = vtx_cart(count,1,i,j); carty = vtx_cart(count,2,i,j); + + vertex_recons_weights(count,1,i,j) = cartx - spherecentroid(1,i,j) + vertex_recons_weights(count,2,i,j) = carty - spherecentroid(2,i,j) + + vertex_recons_weights(count,3,i,j) = (spherecentroid(1,i,j)**2 - & + spherecentroid(3,i,j)) + & + (cartx - spherecentroid(1,i,j))**2 + vertex_recons_weights(count,4,i,j) = (spherecentroid(2,i,j)**2 - & + spherecentroid(4,i,j)) + & + (carty - spherecentroid(2,i,j))**2 + + vertex_recons_weights(count,5,i,j) = (cartx - spherecentroid(1,i,j))* & + (carty - spherecentroid(2,i,j))+ & + (spherecentroid(1,i,j) * & + spherecentroid(2,i,j) - & + spherecentroid(5,i,j)) + end do + end do + end do + + do j= 1-nhe,nc+nhe + do i=1-nhe,nc+nhe + recons_metrics(1,i,j) = spherecentroid(1,i,j)**2 -spherecentroid(3,i,j) + recons_metrics(2,i,j) = spherecentroid(2,i,j)**2 -spherecentroid(4,i,j) + recons_metrics(3,i,j) = spherecentroid(1,i,j)*spherecentroid(2,i,j)-& + spherecentroid(5,i,j) + + recons_metrics_integral(1,i,j) = & + 2.0_r8*spherecentroid(1,i,j)**2 -spherecentroid(3,i,j) + recons_metrics_integral(2,i,j) = & + 2.0_r8*spherecentroid(2,i,j)**2 -spherecentroid(4,i,j) + recons_metrics_integral(3,i,j) = & + 2.0_r8*spherecentroid(1,i,j)*spherecentroid(2,i,j)-& + spherecentroid(5,i,j) + end do + end do + + + + ! + ! pre-compute variables for reconstruction + ! + do j= 1-nhe,nc+nhe + do i=1-nhe,nc+nhe + ! + !*************** + !* dfdx * + !*************** + ! + coef = 1.0_r8/(12.0_r8 * dalpha) !finite difference coefficient + coef = coef /( 1.0_r8 + spherecentroid(1,i,j)**2) !stretching coefficient + + centroid_stretch(1,i,j) = coef + ! + !*************** + !* dfdy * + !*************** + ! + coef = 1.0_r8/(12.0_r8 * dbeta) !finite difference coefficient + coef = coef /( 1.0_r8 + spherecentroid(2,i,j)**2) !stretching coefficient + + centroid_stretch(2,i,j) = coef + + !***************** + !* d2fdx2 * + !***************** + ! + coef = 1.0_r8 / (12.0_r8 * dalpha**2) !finite difference coefficient + ! + ! stretching coefficient part 2 + ! recons(3,i,j) = (a * recons(1,i,j)+ recons(3,i,j))*b + ! + tmp = 0.5_r8/((1.0_r8 + spherecentroid(1,i,j)**2)**2) + + centroid_stretch(3,i,j) = coef*tmp + centroid_stretch(6,i,j) = -spherecentroid(1,i,j)/(1.0_r8 + spherecentroid(1,i,j)**2) + + ! + !***************** + !* d2fdy2 * + !***************** + ! + ! + coef = 1.0_r8 / (12.0_r8 * dbeta**2) !finite difference coefficient + ! + ! stretching coefficient part 2 + ! + ! recons(4,i,j) = (a * recons(1,i,j)+ recons(4,i,j))*b + ! + tmp =0.5_r8/((1.0_r8 + spherecentroid(2,i,j)**2)**2) + + centroid_stretch(4,i,j) = coef*tmp + centroid_stretch(7,i,j) = -spherecentroid(2,i,j)/(1.0_r8 + spherecentroid(2,i,j)**2) + ! + !***************** + !* d2fdxdy * + !***************** + ! + ! + coef = 1.0_r8 / (4.0_r8 * dalpha * dbeta) !finite difference coefficient + coef = coef / ((1.0_r8 + spherecentroid(1,i,j)**2) * & + (1.0_r8 + spherecentroid(2,i,j)**2)) !stretching coefficient + + centroid_stretch(5,i,j) = coef + enddo + enddo + case default + call endrun('SUBROUTINE compute_reconstruct_matrix: irecons out of range') + end select + end subroutine compute_reconstruct_matrix + + + subroutine get_high_order_weights_over_areas(x,dx,num_seg,num_seg_max,num_area,weights,ngpc,gsweights, gspts,irecons) + implicit none + integer , intent(in) :: num_area, num_seg_max, irecons + REAL(KIND=r8), dimension(2,num_seg_max,num_area ), intent(inout) :: x, dx + integer , intent(in) :: ngpc + integer , dimension(num_area ), intent(in) :: num_seg + REAL(KIND=r8), dimension(irecons,num_area), intent(out) :: weights + + real (kind=r8), dimension(ngpc,num_seg_max ) :: xq,yq !quadrature points along line segments + real (kind=r8), dimension(ngpc,num_seg_max,irecons) :: F !potentials + real (kind=r8), dimension( irecons) :: weights_area + real (kind=r8), dimension(ngpc,num_seg_max) :: xq2, yrh, rho, tmp !intermediate variables for optimization + REAL(KIND=r8) , dimension(ngpc,num_seg_max) :: xq2ir, xq2i, rhoi !intermediate variables for optimization + + integer :: iseg,iarea,i,j,k + + real (kind=r8), dimension(ngpc) :: gsweights, gspts + + weights(1:irecons,1:num_area) = 0.0_r8 !may not be necessary dbgxxx + do iarea=1,num_area + do iseg=1,num_seg(iarea) + xq(:,iseg) = x(1,iseg,iarea)+dx(1,iseg,iarea)*gspts(:) + yq(:,iseg) = x(2,iseg,iarea)+dx(2,iseg,iarea)*gspts(:) + end do + ! + ! potentials (equation's 23-28 in CSLAM paper; Lauritzen et al., 2010): + ! + ! (Rory Kelly optimization) + ! + do j=1,num_seg(iarea) +!DIR$ SIMD + do i=1,ngpc + xq2(i,j) = xq(i,j)*xq(i,j) + xq2i(i,j) = 1.0_r8/(1.0_r8+xq2(i,j)) + xq2ir(i,j) = SQRT(xq2i(i,j)) + rho(i,j) = SQRT(1.0_r8+xq2(i,j)+yq(i,j)*yq(i,j)) + rhoi(i,j) = 1.0_r8/rho(i,j) + yrh(i,j) = yq(i,j)*rhoi(i,j) + tmp(i,j) = yq(i,j)*xq2ir(i,j) + F(i,j,1) = yrh(i,j)*xq2i(i,j) !F_00 !F_00 + F(i,j,2) = xq(i,j)*yrh(i,j)*xq2i(i,j) !F_10 !F_10 + F(i,j,3) = -1.0_r8*rhoi(i,j) !F_01 !F_01 + F(i,j,4) = xq2(i,j)*yrh(i,j)*xq2i(i,j) !F_20 !F_20 + F(i,j,6) = -xq(i,j)*rhoi(i,j) !F_11 !F_11 + enddo + ! + ! take F(i,j,5) out of loop above since it prevents vectorization + ! + do i=1,ngpc + F(i,j,5) = -yq(i,j)*rhoi(i,j)+log(tmp(i,j)+rho(i,j)*xq2ir(i,j)) !F_02 !F_02 + end do + enddo + weights_area = 0.0_r8 + do k=1,irecons + do iseg=1,num_seg(iarea) + weights_area(k) = weights_area(k) + sum(gsweights(:)*F(:,iseg,k))*0.5_r8*dx(1,iseg,iarea) + end do + end do + weights(1:irecons,iarea) = weights_area(1:irecons) + end do + end subroutine get_high_order_weights_over_areas + + + !******************************************************************************** + ! + ! Gauss-Legendre quadrature + ! + ! Tabulated values + ! + !******************************************************************************** + subroutine gauss_points(n,weights,points) + implicit none + integer, intent(in ) :: n + real (kind=r8), dimension(:), intent(out) :: weights, points !dimension(n) + + select case (n) + ! CASE(1) + ! abscissae(1) = 0.0_r8 + ! weights(1) = 2.0_r8 + case(2) + points(1) = -sqrt(1.0_r8/3.0_r8) + points(2) = sqrt(1.0_r8/3.0_r8) + weights(1) = 1.0_r8 + weights(2) = 1.0_r8 + case(3) + points(1) = -0.774596669241483377035853079956_r8 + points(2) = 0.0_r8 + points(3) = 0.774596669241483377035853079956_r8 + weights(1) = 0.555555555555555555555555555556_r8 + weights(2) = 0.888888888888888888888888888889_r8 + weights(3) = 0.555555555555555555555555555556_r8 + case(4) + points(1) = -0.861136311594052575223946488893_r8 + points(2) = -0.339981043584856264802665659103_r8 + points(3) = 0.339981043584856264802665659103_r8 + points(4) = 0.861136311594052575223946488893_r8 + weights(1) = 0.347854845137453857373063949222_r8 + weights(2) = 0.652145154862546142626936050778_r8 + weights(3) = 0.652145154862546142626936050778_r8 + weights(4) = 0.347854845137453857373063949222_r8 + case(5) + points(1) = -(1.0_r8/3.0_r8)*sqrt(5.0_r8+2.0_r8*sqrt(10.0_r8/7.0_r8)) + points(2) = -(1.0_r8/3.0_r8)*sqrt(5.0_r8-2.0_r8*sqrt(10.0_r8/7.0_r8)) + points(3) = 0.0_r8 + points(4) = (1.0_r8/3.0_r8)*sqrt(5.0_r8-2.0_r8*sqrt(10.0_r8/7.0_r8)) + points(5) = (1.0_r8/3.0_r8)*sqrt(5.0_r8+2.0_r8*sqrt(10.0_r8/7.0_r8)) + weights(1) = (322.0_r8-13.0_r8*sqrt(70.0_r8))/900.0_r8 + weights(2) = (322.0_r8+13.0_r8*sqrt(70.0_r8))/900.0_r8 + weights(3) = 128.0_r8/225.0_r8 + weights(4) = (322.0_r8+13.0_r8*sqrt(70.0_r8))/900.0_r8 + weights(5) = (322.0_r8-13.0_r8*sqrt(70.0_r8))/900.0_r8 + case default + call endrun('SUBROUTINE gauss_points: n out of range in (00) then + + ! + ! cshift (permute) value needed to be applied to vertex number so that they match orientation + ! of the interior of the panel + ! + ! + ib = cubeboundary + if (faceno==2) then + if (ib==north.or.ib==nwest.or.ib==neast) flux_orient(2,1-nhc:nc+nhc,nc+1 :nc+nhc) = 1 + if (ib==south.or.ib==swest.or.ib==seast) flux_orient(2,1-nhc:nc+nhc,1-nhc:0 ) = 3 + end if + if (faceno==3) then + if (ib==north.or.ib==nwest.or.ib==neast) flux_orient (2,1-nhc:nc+nhc,nc+1 :nc+nhc) = 2 + if (ib==south.or.ib==swest.or.ib==seast) flux_orient (2,1-nhc:nc+nhc,1-nhc:0 ) = 2 + end if + if (faceno==4) then + if (ib==north.or.ib==nwest.or.ib==neast) flux_orient (2,1-nhc:nc+nhc,nc+1 :nc+nhc) = 3 + if (ib==south.or.ib==swest.or.ib==seast) flux_orient (2,1-nhc:nc+nhc,1-nhc:0 ) = 1 + end if + if (faceno==5) then + if (ib==south.or.ib==swest.or.ib==seast) flux_orient (2,1-nhc:nc+nhc,1-nhc:0 ) = 2 + if (ib== west.or.ib==swest.or.ib==nwest) flux_orient (2,1-nhc:0 ,1-nhc:nc+nhc) = 3 + if (ib== east.or.ib==seast.or.ib==neast) flux_orient (2, nc+1:nc+nhc,1-nhc:nc+nhc) = 1 + end if + + if (faceno==6) then + if (ib==north.or.ib==nwest.or.ib==neast ) flux_orient (2,1-nhc:nc+nhc,nc+1 :nc+nhc) = 2 + if (ib==west .or.ib==swest.or.ib==nwest ) flux_orient (2,1-nhc:0 ,1-nhc:nc+nhc) = 1 + if (ib==east .or.ib==seast.or.ib==neast ) flux_orient (2,nc+1:nc+nhc,1-nhc:nc+nhc) = 3 + end if + ! + ! non-existent cells in physical space + ! + if (cubeboundary==nwest) then + flux_orient(2,1-nhc:0 ,nc+1 :nc+nhc) = 0 + ifct ( 1-nhc:0 ,nc+1 :nc+nhc) = 0 + else if (cubeboundary==swest) then + flux_orient (2,1-nhc:0 ,1-nhc:0 ) = 0 + ifct ( 1-nhc:0 ,1-nhc:0 ) = 0 + else if (cubeboundary==neast) then + flux_orient (2,nc+1 :nc+nhc,nc+1 :nc+nhc) = 0 + ifct ( nc+1 :nc+nhc,nc+1 :nc+nhc) = 0 + else if (cubeboundary==seast) then + flux_orient (2,nc+1 :nc+nhc,1-nhc:0 ) = 0 + ifct ( nc+1 :nc+nhc,1-nhc:0 ) = 0 + end if + end if + + end subroutine init_flux_orient + +! +! +! + +! ----------------------------------------------------------------------------------! +!SUBROUTINE CREATE_INTERPOLATIION_POINTS----------------------------------CE-for FVM! +! AUTHOR: CHRISTOPH ERATH, 17.October 2011 ! +! DESCRIPTION: for elements, which share a cube edge, we have to do some ! +! interpolation on different cubic faces, also in the halo region: ! +! because we also need the reconstruction coefficients in the halo zone, ! +! which is basically calculated twice, on the original cell of an element ! +! on face A and on a cell in the halo region of an element of face B ! +! The crux is, that the interpolation has to be the same to ensure ! +! conservation of the scheme ! +! SYMMETRY of the CUBE is used for calucaltion the interpolation_point ! +! ! +! CALLS: interpolation_point ! +! INPUT/OUTPUT: ! +! elem ... element structure from HOMME ! +! fvm ... structure ! +!-----------------------------------------------------------------------------------! + subroutine create_interpolation_points(elem,& + nc,nhc,nhr,ns,nh,cubeboundary,& + dalpha,dbeta,ibase,halo_interp_weight) + use element_mod , only: element_t + use coordinate_systems_mod, only: cartesian2D_t + use control_mod , only: north, south, east, west, neast, nwest, seast, swest + use cube_mod , only: cube_xstart, cube_xend, cube_ystart, cube_yend + + implicit none + type (element_t), intent(in) :: elem + + integer , intent(in) :: nc,nhc,nhr,ns,nh,cubeboundary + integer , intent(out) :: ibase(1-nh:nc+nh,1:nhr,2) + real (kind=r8), intent(out) :: halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) + ! + ! pre-compute weight/index matrices + ! + integer :: imin,imax,jmin,jmax,iinterp + real (kind=r8), intent(in) :: dalpha,dbeta + + real (kind=r8), dimension(1-nhc:nc+nhc) :: gnomxstart, gnomxend, gnomystart, gnomyend + integer :: i, halo, ida, ide, iref1 + type (cartesian2D_t) :: tmpgnom + real (kind=r8) :: interp(1-nh:nc+nh,1:nhr,2) + integer ::ibaseref + integer :: ibase_tmp(1-nh:nc+nh,1:nhr,2) + + ibase = 99999 !dbg + halo_interp_weight(:,:,:,:) = 9.99E9_r8 !dbg + + ! element is not on a corner, but shares a cube edge (call of subroutine) + if(cubeboundary <= 4) then + gnomxstart(1-nhc)=elem%corners(1)%x-(nhc-0.5_r8)*dalpha + gnomystart(1-nhc)=elem%corners(1)%y-(nhc-0.5_r8)*dbeta + do i=2-nhc,nc+nhc + gnomxstart(i)=gnomxstart(i-1)+dalpha + gnomystart(i)=gnomystart(i-1)+dbeta + end do + ida=1-nhc !lower bound + ide=nc+nhc !upper bound + select case (cubeboundary) + !INTERIOR element + case(0) + ! nothing to do! + !CASE WEST + case(west) + do halo=1,nhr +! iref1=ida + tmpgnom%x=cube_xstart-(halo-0.5_r8)*dalpha + do i=halo-nh,nc+nh-(halo-1) !see fvm_reconstruction to understand these boundaries + iref1=ida + tmpgnom%y=gnomystart(i) + call interpolation_point(nc,ns,tmpgnom,gnomystart,1,4,1,interp(i,halo,1),& + ida,ide,iref1,ibase_tmp(i,halo,1)) + end do + end do + + !CASE EAST + case(east) + ! east zone + do halo=1,nhr + iref1=ida + tmpgnom%x=cube_xend+(halo-0.5_r8)*dalpha + do i=halo-nh,nc+nh-(halo-1) + tmpgnom%y=gnomystart(i) + call interpolation_point(nc,ns,tmpgnom,gnomystart,1,2,1,interp(i,halo,1),& + ida,ide,iref1,ibase_tmp(i,halo,1)) + end do + end do + + !CASE NORTH + case(north) + ! north zone + do halo=1,nhr + tmpgnom%y=cube_yend+(halo-0.5_r8)*dbeta + iref1=ida + do i=halo-nh,nc+nh-(halo-1) + tmpgnom%x=gnomxstart(i) + ! + ! dbg - change to interp(i,halo,1) instead of interp(i,halo,2) + ! so that I can get rid of iinterp = 1 in fvm_reconstruction_mod + ! + call interpolation_point(nc,ns,tmpgnom,gnomxstart,1,6,0,interp(i,halo,2),& + ida,ide,iref1,ibase_tmp(i,halo,2)) + end do + end do + !CASE SOUTH + case(south) + !south zone + do halo=1,nhr + iref1=ida + tmpgnom%y=cube_ystart-(halo-0.5_r8)*dbeta + do i=halo-nh,nc+nh-(halo-1) + tmpgnom%x=gnomxstart(i) + call interpolation_point(nc,ns,tmpgnom,gnomxstart,1,5,0,interp(i,halo,2),& + ida,ide,iref1,ibase_tmp(i,halo,2)) + end do + end do + + ! + !THIS CASE SHOULD NOT HAPPEN! + case default + print *,'Fatal Error in first select statement:' + call endrun('fvm_reconstruction_mod.F90 subroutine fillhalo_cubic!' ) + end select + !CORNER TREATMENT + else + gnomxstart(1-nhc)=cube_xstart-(nhc-0.5_r8)*dalpha + gnomxend(nc+nhc)=cube_xend+(nhc-0.5_r8)*dalpha + gnomystart(1-nhc)=cube_ystart-(nhc-0.5_r8)*dbeta + gnomyend(nc+nhc)=cube_yend+(nhc-0.5_r8)*dbeta + do i=2-nhc,nc+nhc + gnomxstart(i)=gnomxstart(i-1)+dalpha + gnomxend(nc+1-i)=gnomxend(nc+2-i)-dalpha + gnomystart(i)=gnomystart(i-1)+dbeta + gnomyend(nc+1-i)=gnomyend(nc+2-i)-dbeta + end do + + select case (cubeboundary) + !CASE SOUTH WEST + case(swest) + ! west zone + do halo=1,nhr + tmpgnom%x=cube_xstart-(halo-0.5_r8)*dalpha + ida=1 + ide=nc+nc + iref1=ida + do i=0,nc+nh-(halo-1) + tmpgnom%y=gnomystart(i) + call interpolation_point(nc,ns,tmpgnom,gnomystart,1,4,1,interp(i,halo,1),& + ida,ide,iref1,ibase_tmp(i,halo,1)) + end do + end do + !CASE SOUTH EAST + case(seast) + ! east zone + do halo=1,nhr + tmpgnom%x=cube_xend+(halo-0.5_r8)*dalpha + ida=1 + ide=nc+nc + iref1=ida + do i=0,nc+nh-(halo-1) + tmpgnom%y=gnomystart(i) + call interpolation_point(nc,ns,tmpgnom,gnomystart,1,2,1, interp(i,halo,1),& + ida,ide,iref1,ibase_tmp(i,halo,1)) + end do + end do + !CASE NORTH EAST + case(neast) + ! east zone + do halo=1,nhr + tmpgnom%x=cube_xend+(halo-0.5_r8)*dalpha + ida=1-nc + ide=nc + iref1=ida + do i=halo-nh,nc+1 + tmpgnom%y=gnomyend(i) + call interpolation_point(nc,ns,tmpgnom,gnomyend,1,2,1, interp(i,halo,1),& + ida,ide,iref1,ibase_tmp(i,halo,1)) + end do + end do + !CASE NORTH WEST + case(nwest) + ! west zone + do halo=1,2 + tmpgnom%x=cube_xstart-(halo-0.5_r8)*dalpha + ida=1-nc + ide=nc + iref1=ida + do i=halo-nh,nc+1 + tmpgnom%y=gnomyend(i) + call interpolation_point(nc,ns,tmpgnom,gnomyend,1,4,1, interp(i,halo,1),& + ida,ide,iref1,ibase_tmp(i,halo,1)) + end do + end do + !THIS CASE SHOULD NOT HAPPEN! + case default + print *,'Fatal Error in second select statement:' + call endrun('fvm_reconstruction_mod.F90 subroutine create_interpolationpoint!') + end select + endif + + !************************** + ! + ! compute haloe weights and indices + ! + if (cubeboundary>0) then + if (cubeboundary<5) then + ! + ! element is located at a panel side but is not a corner element + ! (west,east,south,north) = (1,2,3,4) + ! + if (cubeboundary==west .or.cubeboundary==east ) then + iinterp = 1 + end if + if (cubeboundary==north.or.cubeboundary==south) iinterp = 2 + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref=ibase_tmp(i,halo,iinterp) + ibase(i,halo,1) = ibaseref + call get_equispace_weights(dbeta, interp(i,halo,iinterp),& + halo_interp_weight(:,i,halo,1),ns) + end do + end do + else + ! + ! element is located at a cube corner + ! (swest,seast,nwest,neast)=(5,6,7,8) + ! + do halo=1,nhr + if (cubeboundary==swest .or.cubeboundary==seast) then + imin = 0 ; imax = nc+nh-(halo-1); + jmin = halo-nh; jmax = nc+1; + else + jmin = 0 ; jmax = nc+nh-(halo-1); + imin = halo-nh; imax = nc+1; + end if + do i=imin,imax + ibaseref=ibase_tmp(i,halo,1) + ibase(i,halo,1) = ibaseref + call get_equispace_weights(dbeta, interp(i,halo,1),halo_interp_weight(:,i,halo,1),ns) + end do + ! + ! reverse weights/indices for fotherpanel (see details on reconstruct_matrix) + ! + halo_interp_weight(1:ns,jmin:jmax,halo,2) = halo_interp_weight(ns:1:-1,imax:imin:-1,halo,1) + ibase (jmin:jmax,halo ,2) = nc+1-(ns-1)-ibase(imax:imin:-1,halo ,1) + end do + end if + + end if + + +end subroutine create_interpolation_points + + + + +!END SUBROUTINE CREATE_INTERPOLATION_POINTS-------------------------------CE-for FVM! + + + +! ----------------------------------------------------------------------------------! +!SUBROUTINE INTERPOLATION_POINT-------------------------------------------CE-for FVM! +! AUTHOR: CHRISTOPH ERATH, 14.November 2011 ! +! DESCRIPTION: calculates the interpolation point on from face 1 in face 2 in ! +! alpha/beta coordinates, only 1D ! +! ! +! CALLS: cubedsphere2cart, cart2cubedsphere ! +! INPUT: gnom... 1D coordinates ! +! gnom1d... 1d coordinates ! +! face1... orginal face ! +! face2... target face (where the interpolation has to be done) ! +! xy ... 0 for alpha coordinate, any other for beta ! +! except.which type, interior, left edge (-1), right edge (1) ! +! point... interpolation point ! +! ida ... begin of interpval ! +! ide ... end of interpval ! + + +! INPUT/OUTPUT/RETURN: ! +! iref ... where we start the search, is also an OUTPUT, so we know for the ! +! next point where to start ! +!-----------------------------------------------------------------------------------! + ! +! DESCRIPTION: searchs where the interpolation point has to be (iref), two values ! +! of interpval on the left and on the right, except if we are out of range ! +! which is indicated through ia and ie, respectively ! +! It is a 1D interpolation, use alpha/beta coordinates!!! ! +! ! +! CALLS: cubic_equispace_interp ! +! INPUT: iref ... where we start the search, is also an OUTPUT, so we know for the ! +! next point where to start ! +! ibaseref ... startindex of the four tracer value for the reconstruction ! +! point ... provides the difference of the interpolation point to use it ! +! directly in CUBIC_EQUISPACE_INTERP ! +!-----------------------------------------------------------------------------------! +function get_gno_point(gnom,face1,face2,xy) result(point) + use coordinate_systems_mod, only : cubedsphere2cart, cart2cubedsphere, & + cartesian2D_t,cartesian3D_t + implicit none + type (cartesian2D_t), intent(in) :: gnom + integer, intent(in) :: face1, face2, xy + real (kind=r8) :: point + + type(cartesian3D_t) :: tmpcart3d + type (cartesian2D_t) :: tmpgnom + + tmpcart3d=cubedsphere2cart(gnom,face1) + tmpgnom=cart2cubedsphere(tmpcart3d,face2) + if(xy==0) then + point=tmpgnom%x + else + point=tmpgnom%y + end if +end function get_gno_point + +subroutine interpolation_point(nc,ns,gnom,gnom1d,face1,face2,xy,point,ida,ide,iref,ibaseref) + use coordinate_systems_mod, only : cartesian2D_t + implicit none + integer , intent(in) :: nc,ns + type (cartesian2D_t), intent(in) :: gnom + real (kind=r8), dimension(1-nc:), intent(in) :: gnom1d !dimension(1-nhc:nc+nhc) + integer, intent(in) :: face1, face2, xy + integer,intent(in) :: ida, ide + integer,intent(inout) :: iref,ibaseref + real (kind=r8), intent(inout) :: point + +! type(cartesian3D_t) :: tmpcart3d +! type (cartesian2D_t) :: tmpgnom + + point = get_gno_point(gnom,face1,face2,xy) + +! tmpcart3d=cubedsphere2cart(gnom,face1) +! tmpgnom=cart2cubedsphere(tmpcart3d,face2) +! if(xy==0) then +! point=tmpgnom%x +! else +! point=tmpgnom%y +! end if + ! + ! in which cell is interpolation point located? gno(iref) is location of point to the right that is closest + ! + ! |----------|---------|------x---|----------|------|------ + ! gno(iref-1) gno(iref) + ! + iref=ida + do while (point>gnom1d(iref)) + iref = iref + 1 + if (iref>ide+1) then + call endrun("error in search - ABORT; probably invalid ns-nc combination") + end if + if (iref>ide) then +! write(*,*) "extrapolation in interpolation_point",iref,ide + iref=ide + exit + endif + end do + ! + ! this routine works for ns=1 and ns even + ! + if (MOD(ns,2)==1) then + iref = max(iref,ida+1)!make sure gnom1d does not go out of bounds for extrapolation + if (gnom1d(iref)-point>point-gnom1d(iref-1)) iref=iref-1 + iref=iref-((ns-1)/2) + ibaseref = min(max(iref,ida),ide-(ns-1))!extrapolation + point=point-gnom1d(ibaseref) + else if (MOD(ns, 2)==0) then + ! + ! this code is only coded for ns even + ! + ! ibaseref is the left most index used for 1D interpolation + ! (hence iref = iref-ns/2 except near corners) + ! + iref = iref-ns/2 + ibaseref = min(max(iref,ida),ide-(ns-1)) + point=point-gnom1d(ibaseref) + end if +end subroutine interpolation_point +!END SUBROUTINE INTERPOLATION_POINT---------------------------------------CE-for FVM! +! ---------------------------------------------------------------------! +! ! +! Precompute weights for Lagrange interpolation ! +! for equi-distant source grid values ! +! ! +!----------------------------------------------------------------------! + +subroutine get_equispace_weights(dx, x, w,ns) + ! + ! Coordinate system for Lagrange interpolation: + ! + ! |------|------|------|------| + ! 0 dx 2*dx 3*dx ns*dx + ! + implicit none + real (kind=r8),intent(in) :: dx ! spacing of points, alpha/beta + real (kind=r8),intent(in) :: x ! X coordinate where interpolation is to be applied + real (kind=r8),dimension(:),intent(out) :: w ! dimension(ns) + integer ,intent(in) :: ns + ! + integer :: j,k + ! + ! use Lagrange interpolation formulae, e.g.,: + ! + ! http://mathworld.wolfram.com/LagrangeInterpolatingPolynomial.html + ! + w = 1.0_r8 + if (ns.ne.1) then + do j=1,ns + do k=1,ns + if (k.ne.j) then + w(j)=w(j)*(x-dble(k-1)*dx)/(dble(j-1)*dx-dble(k-1)*dx) + end if + end do + end do + end if +end subroutine get_equispace_weights + +end module fvm_analytic_mod diff --git a/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 b/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 new file mode 100644 index 00000000..ede1f440 --- /dev/null +++ b/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 @@ -0,0 +1,2031 @@ +module fvm_consistent_se_cslam + use shr_kind_mod, only: r8=>shr_kind_r8 + use dimensions_mod, only: nc, nhe, nlev, ntrac, np, nhr, nhc, ngpc, ns, nht + use dimensions_mod, only: irecons_tracer + use dimensions_mod, only: kmin_jet,kmax_jet + use cam_abortutils, only: endrun + use cam_logfile, only: iulog + + use time_mod, only: timelevel_t + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + use hybrid_mod, only: hybrid_t, config_thread_region, get_loop_ranges, threadOwnsVertLevel + use perf_mod, only: t_startf, t_stopf + implicit none + private + save + + real (kind=r8),parameter , private :: eps=1.0e-14_r8 + public :: run_consistent_se_cslam +contains + ! + !************************************************************************************** + ! + ! Consistent CSLAM-SE algorithm documented in + ! + ! Lauritzen et al. (2017): CAM-SE-CSLAM: Consistent finite-volume transport with + ! spectral-element dynamics. Mon. Wea. Rev. + ! + ! + !************************************************************************************** + ! + subroutine run_consistent_se_cslam(elem,fvm,hybrid,dt_fvm,tl,nets,nete,hvcoord,& + ghostbufQnhc,ghostBufQ1, ghostBufFlux,kminp,kmaxp) + ! --------------------------------------------------------------------------------- + use fvm_mod , only: fill_halo_fvm + use fvm_reconstruction_mod, only: reconstruction + use fvm_analytic_mod , only: gauss_points + use edge_mod , only: ghostpack, ghostunpack + use edgetype_mod , only: edgebuffer_t + use bndry_mod , only: ghost_exchange + use hybvcoord_mod , only: hvcoord_t + use constituents , only: qmin + use dimensions_mod , only: large_Courant_incr,irecons_tracer_lev + use thread_mod , only: vert_num_threads, omp_set_nested + implicit none + type (element_t) , intent(inout) :: elem(:) + type (fvm_struct) , intent(inout) :: fvm(:) + type (hybrid_t) , intent(in) :: hybrid ! distributed parallel structure (shared) + type (TimeLevel_t) , intent(in) :: tl ! time level struct + type (hvcoord_t) , intent(in) :: hvcoord + integer , intent(in) :: nets ! starting thread element number (private) + integer , intent(in) :: nete ! ending thread element number (private) + real (kind=r8) , intent(in) :: dt_fvm + type (EdgeBuffer_t) , intent(inout) :: ghostbufQnhc,ghostBufQ1, ghostBufFlux + integer , intent(in) :: kminp,kmaxp + + !high-order air density reconstruction + real (kind=r8) :: ctracer(irecons_tracer,1-nhe:nc+nhe,1-nhe:nc+nhe,ntrac) + real (kind=r8) :: inv_dp_area(nc,nc) + type (hybrid_t) :: hybridnew + + real (kind=r8), dimension(ngpc) :: gsweights, gspts + + logical :: llimiter(ntrac) + integer :: i,j,k,ie,itr,kptr,q + integer :: kmin_jet_local,kmax_jet_local + integer :: kmin,kmax + integer :: ir + integer :: kblk ! total number of vertical levels per thread + integer :: klev ! total number of vertical levels in the JET region + integer :: region_num_threads + logical :: inJetCall + logical :: ActiveJetThread + + + llimiter = .true. + + inJetCall = .false. + if(((kminp .ne. 1) .or. (kmaxp .ne. nlev)) .and. vert_num_threads>1) then + write(iulog,*)'WARNING: deactivating vertical threading for JET region call' + inJetCall = .true. + region_num_threads = 1 + else + region_num_threads = vert_num_threads + endif + + call omp_set_nested(.true.) + !$OMP PARALLEL NUM_THREADS(region_num_threads), DEFAULT(SHARED), & + !$OMP PRIVATE(hybridnew,kblk,ie,k,kmin,gspts,inv_dp_area,itr), & + !$OMP PRIVATE(kmin_jet_local,kmax,kmax_jet_local,kptr,q,ctracer,ActiveJetThread) + call gauss_points(ngpc,gsweights,gspts) !set gauss points/weights + gspts = 0.5_r8*(gspts+1.0_r8) !shift location so in [0:1] instead of [-1:1] + + if(inJetCall) then + ! =============================================================================== + ! if this is the reduced Jet region call then do not thread over the vertical.... + ! Just use the number of vertical levels that were passed into subroutine + ! =============================================================================== + hybridnew = config_thread_region(hybrid,'serial') + kmin = kminp + kmax = kmaxp + else + hybridnew = config_thread_region(hybrid,'vertical') + call get_loop_ranges(hybridnew,kbeg=kmin,kend=kmax) + endif + + kblk = kmax-kmin+1 + !call t_startf('fvm:before_Qnhc') + do ie=nets,nete + do k=kmin,kmax + elem(ie)%sub_elem_mass_flux(:,:,:,k) = dt_fvm*elem(ie)%sub_elem_mass_flux(:,:,:,k)*fvm(ie)%dp_ref_inverse(k) + fvm(ie)%dp_fvm(1:nc,1:nc,k) = fvm(ie)%dp_fvm (1:nc,1:nc,k)*fvm(ie)%dp_ref_inverse(k) + end do + kptr = kmin-1 + call ghostpack(ghostbufQnhc,fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,kmin:kmax) ,kblk, kptr,ie) + do q=1,ntrac + kptr = kptr + nlev + call ghostpack(ghostbufQnhc,fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,kmin:kmax,q),kblk,kptr,ie) + enddo + end do + !call t_stopf('fvm:before_Qnhc') + !call t_startf('fvm:ghost_exchange:Qnhc') + call ghost_exchange(hybridnew,ghostbufQnhc,location='ghostbufQnhc') + !call t_stopf('fvm:ghost_exchange:Qnhc') + !call t_startf('fvm:orthogonal_swept_areas') + do ie=nets,nete + do k=kmin,kmax + fvm(ie)%se_flux (1:nc,1:nc,:,k) = elem(ie)%sub_elem_mass_flux(:,:,:,k) + end do + kptr = kmin-1 + call ghostunpack(ghostbufQnhc, fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,kmin:kmax) , kblk ,kptr,ie) + do q=1,ntrac + kptr = kptr + nlev + call ghostunpack(ghostbufQnhc, fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,kmin:kmax,q),kblk,kptr,ie) + enddo + do k=kmin,kmax + call compute_displacements_for_swept_areas (fvm(ie),fvm(ie)%dp_fvm(:,:,k),k,gsweights,gspts) + end do + kptr = 4*(kmin-1) + call ghostpack(ghostBufFlux, fvm(ie)%se_flux(:,:,:,kmin:kmax),4*kblk,kptr,ie) + end do + + call ghost_exchange(hybridnew,ghostBufFlux,location='ghostBufFlux') + + do ie=nets,nete + kptr = 4*(kmin-1) + call ghostunpack(ghostBufFlux, fvm(ie)%se_flux(:,:,:,kmin:kmax),4*kblk,kptr,ie) + do k=kmin,kmax + call ghost_flux_unpack(fvm(ie),fvm(ie)%se_flux(:,:,:,k)) + end do + enddo + + !call t_stopf('fvm:orthogonal_swept_areas') + do ie=nets,nete + do k=kmin,kmax + !call t_startf('fvm:tracers_reconstruct') + call reconstruction(fvm(ie)%c(:,:,:,:),nlev,k,& + ctracer(:,:,:,:),irecons_tracer,llimiter,ntrac,& + nc,nhe,nhr,nhc,nht,ns,nhr+(nhe-1),& + fvm(ie)%jx_min,fvm(ie)%jx_max,fvm(ie)%jy_min,fvm(ie)%jy_max,& + fvm(ie)%cubeboundary,fvm(ie)%halo_interp_weight,fvm(ie)%ibase,& + fvm(ie)%spherecentroid(:,1-nhe:nc+nhe,1-nhe:nc+nhe),& + fvm(ie)%recons_metrics,fvm(ie)%recons_metrics_integral,& + fvm(ie)%rot_matrix,fvm(ie)%centroid_stretch,& + fvm(ie)%vertex_recons_weights,fvm(ie)%vtx_cart,& + irecons_tracer_lev(k)) + !call t_stopf('fvm:tracers_reconstruct') + !call t_startf('fvm:swept_flux') + call swept_flux(elem(ie),fvm(ie),k,ctracer,irecons_tracer_lev(k),gsweights,gspts) + !call t_stopf('fvm:swept_flux') + end do + end do + ! + !*************************************** + ! + ! Large Courant number increment + ! + !*************************************** + ! + ! In the jet region the effective Courant number + ! in the cslam trajectory algorithm can be > 1 + ! (by up to 20%) in CAM + ! + ! We limit the trajectories to < 1 but in this step + ! we do a piecewise constant update for the + ! amount of mass for which the Courant number is >1 + ! + ! + if (large_Courant_incr) then + !call t_startf('fvm:fill_halo_fvm:large_Courant') + !if (kmin_jetkmax) then + ! call endrun('ERROR: kmax_jet must be .le. kmax passed to run_consistent_se_cslam') + !end if + ! Determine the extent of the JET that is owned by this thread + ActiveJetThread = threadOwnsVertLevel(hybridnew,kmin_jet) .or. threadOwnsVertLevel(hybridnew,kmax_jet) + kmin_jet_local = max(kmin_jet,kmin) + kmax_jet_local = min(kmax_jet,kmax) + klev = kmax_jet-kmin_jet+1 + call fill_halo_fvm(ghostbufQ1,elem,fvm,hybridnew,nets,nete,1,kmin_jet_local,kmax_jet_local,klev,active=ActiveJetThread) + !call t_stopf('fvm:fill_halo_fvm:large_Courant') + !call t_startf('fvm:large_Courant_number_increment') + if(ActiveJetThread) then + do k=kmin_jet_local,kmax_jet_local !1,nlev + do ie=nets,nete + call large_courant_number_increment(fvm(ie),k) + end do + end do + endif + !call t_stopf('fvm:large_Courant_number_increment') + end if + + !call t_startf('fvm:end_of_reconstruct_subroutine') + do k=kmin,kmax + ! + ! convert to mixing ratio + ! + do ie=nets,nete + do j=1,nc + do i=1,nc + inv_dp_area(i,j) = 1.0_r8/fvm(ie)%dp_fvm(i,j,k) + end do + end do + + do itr=1,ntrac + do j=1,nc + do i=1,nc + ! convert to mixing ratio + fvm(ie)%c(i,j,k,itr) = fvm(ie)%c(i,j,k,itr)*inv_dp_area(i,j) + ! remove round-off undershoots + fvm(ie)%c(i,j,k,itr) = MAX(fvm(ie)%c(i,j,k,itr),qmin(itr)) + end do + end do + end do + ! + ! convert to dp and scale back dp + ! + fvm(ie)%dp_fvm(1:nc,1:nc,k) = fvm(ie)%dp_fvm(1:nc,1:nc,k)*fvm(ie)%dp_ref(k)*fvm(ie)%inv_area_sphere +#ifdef waccm_debug + do j=1,nc + do i=1,nc + fvm(ie)%CSLAM_gamma(i,j,k,1) = MAXVAL(fvm(ie)%CSLAM_gamma(i,j,k,:)) + end do + end do +#endif + elem(ie)%sub_elem_mass_flux(:,:,:,k)=0 + end do + end do + !call t_stopf('fvm:end_of_reconstruct_subroutine') + !$OMP END PARALLEL + call omp_set_nested(.false.) + end subroutine run_consistent_se_cslam + + subroutine swept_flux(elem,fvm,ilev,ctracer,irecons_tracer_actual,gsweights,gspts) + use fvm_analytic_mod , only: get_high_order_weights_over_areas + use dimensions_mod, only : kmin_jet,kmax_jet + implicit none + type (element_t) , intent(in) :: elem + type (fvm_struct), intent(inout):: fvm + integer , intent(in) :: ilev, irecons_tracer_actual + real (kind=r8), intent(inout) :: ctracer(irecons_tracer,1-nhe:nc+nhe,1-nhe:nc+nhe,ntrac) + real (kind=r8), dimension(ngpc), intent(in) :: gsweights, gspts + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + real (kind=r8) , dimension(0:7 , imin:imax,imin:imax,num_sides) :: displ + integer (kind=r8) , dimension(1:2,11 , imin:imax,imin:imax,num_sides) :: base_vec + real (kind=r8) , dimension(1:2, 6 , imin:imax,imin:imax,num_sides) :: base_vtx + integer , dimension(2,num_area, imin:imax,imin:imax,num_sides) :: idx + real (kind=r8) , dimension(imin:imax,imin:imax,num_sides) :: mass_flux_se + real (kind=r8) , dimension(irecons_tracer,num_area) :: weights + real (kind=r8) :: gamma + integer :: i,j,iside,iarea,iw + + integer, parameter :: num_seg_max=5 + REAL(KIND=r8), dimension(2,num_seg_max,num_area) :: x, dx, x_static, dx_static + integer , dimension(num_area) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8) :: x_start, dgam_vec + REAL(KIND=r8) :: gamma_max, displ_first_guess + + REAL(KIND=r8) :: flux,flux_tracer(ntrac) + + REAL(KIND=r8), dimension(num_area) :: dp_area + + real (kind=r8) :: dp(1-nhc:nc+nhc,1-nhc:nc+nhc) + + logical :: tl1,tl2,tr1,tr2 + + integer, dimension(4), parameter :: imin_side = (/1 ,0 ,1 ,1 /) + integer, dimension(4), parameter :: imax_side = (/nc ,nc ,nc ,nc+1/) + integer, dimension(4), parameter :: jmin_side = (/1 ,1 ,0 ,1 /) + integer, dimension(4), parameter :: jmax_side = (/nc+1,nc ,nc ,nc /) + + integer :: iseg, iseg_tmp,flowcase,ii,jj,itr + + call define_swept_areas(fvm,ilev,displ,base_vec,base_vtx,idx) + + mass_flux_se(1:nc,1:nc,1:4) = -elem%sub_elem_mass_flux(1:nc,1:nc,1:4,ilev) + mass_flux_se(0 ,1:nc,2 ) = elem%sub_elem_mass_flux(1 ,1:nc,4 ,ilev) + mass_flux_se(nc+1,1:nc,4 ) = elem%sub_elem_mass_flux(nc ,1:nc,2 ,ilev) + mass_flux_se(1:nc,0 ,3 ) = elem%sub_elem_mass_flux(1:nc,1 ,1 ,ilev) + mass_flux_se(1:nc,nc+1,1 ) = elem%sub_elem_mass_flux(1:nc,nc ,3 ,ilev) + ! + ! prepare for air/tracer update + ! +! dp = fvm%dp_fvm(1-nhe:nc+nhe,1-nhe:nc+nhe,ilev) + dp = fvm%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,ilev) + fvm%dp_fvm(1:nc,1:nc,ilev) = fvm%dp_fvm(1:nc,1:nc,ilev)*fvm%area_sphere + do itr=1,ntrac + fvm%c(1:nc,1:nc,ilev,itr) = fvm%c(1:nc,1:nc,ilev,itr)*fvm%dp_fvm(1:nc,1:nc,ilev) + do iw=1,irecons_tracer_actual + ctracer(iw,1-nhe:nc+nhe,1-nhe:nc+nhe,itr)=ctracer(iw,1-nhe:nc+nhe,1-nhe:nc+nhe,itr)*& + dp(1-nhe:nc+nhe,1-nhe:nc+nhe) + end do + end do + + do iside=1,4 + do j=jmin_side(iside),jmax_side(iside) + do i=imin_side(iside),imax_side(iside) + !DO NOT USE MASS_FLUX_SE AS THRESHOLD - THRESHOLD CONDITION MUST BE CONSISTENT WITH + !THE ONE USED IN DEFINE_SWEPT_AREAS +! if (mass_flux_se(i,j,iside)>eps) then + if (fvm%se_flux(i,j,iside,ilev)>eps) then + ! + ! || || + ! tl1 || || tr1 + ! || || + ! ============================= + ! || || + ! tl2 || || tr2 + ! || || + ! + tl1 = displ(3,i,j,iside)<0.0_r8.and.displ(6,i,j,iside).ge.0.0_r8 !departure point in tl1 quadrant + tl2 = displ(6,i,j,iside)<0.0_r8.and.displ(7,i,j,iside) >0.0_r8 !departure point in tl2 quadrant + tr1 = displ(2,i,j,iside)<0.0_r8.and.displ(4,i,j,iside).ge.0.0_r8 !departure point in tr1 quadrant + tr2 = displ(4,i,j,iside)<0.0_r8.and.displ(5,i,j,iside) >0.0_r8 !departure point in tr2 quadrant + + ! + ! pathological cases + ! + ! | || || || || + ! | ||-----------|| ||-----------|| + ! | || || || || + ! ================================ ================================= + ! || || | || || + ! ---------|| || ------|--|| || + ! || || | || || + ! + ! tl1=tl1.or.tl2 + ! tr1=tr1.or.tr2 + ! tl1=displ(3,i,j,iside)<0.0_r8.and..not.(tl1.and.tl2) + ! tr1=displ(2,i,j,iside)<0.0_r8.and..not.(tr1.and.tr2) + + num_seg=-1; num_seg_static=-1 !initialization + if (.not.tl1.and..not.tl2.and..not.tr1.and..not.tr2) then + flowcase=0 + ! + ! || || || || || || + ! || * * || || *----------* |*----------* || + ! || / \ || || / || || \ || + ! ||/ \|| ||/ || || \|| + ! ============================= ============================= ============================= + ! || || || || || || + ! + ! + call define_area3_center (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec,fvm%se_flux(i,j,iside,ilev),displ_first_guess) + + gamma=1.0_r8!fvm%se_flux(i,j,iside,ilev) + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else + if (tl1.and.tr1) then + flowcase=1 + ! + ! + ! tl1 || || tr1 || || || || + ! *--||-------------||--* *--||-------------|| ||-------------||--* + ! \ || || / \ || ||\ /|| || / + ! \|| ||/ \|| || \ / || ||/ + ! ============================= =========================*=== ==*========================== + ! || || || || || || + ! + call define_area2 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static,& + num_seg, num_seg_static,x_start, dgam_vec,displ_first_guess) + call define_area3_left_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static,& + num_seg, num_seg_static,x_start, dgam_vec) + call define_area4 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static,& + num_seg, num_seg_static,x_start, dgam_vec) + gamma=1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tl1.and..not.tr1.and..not.tr2) then + flowcase=2 + ! + ! || || || || || || + ! *--||----------* || /||----------* || *--||-------------* + ! \ || \ || / || \ || \ || || + ! \|| \|| / || \|| \|| || + ! ============================= ==*========================== ============================= + ! || || || || || || + ! + call define_area2 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, num_seg_static,& + x_start, dgam_vec,displ_first_guess) + call define_area3_left(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, num_seg_static,& + x_start, dgam_vec) + gamma=1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tr1.and..not.tl1.and..not.tl2) then !displ(3).ge.0.0_r8) then + flowcase=3 + ! + ! || *----------||--* || *----------||\ *-------------||--* + ! || / || / || / || \ || || / + ! ||/ ||/ ||/ || \ || ||/ + ! ============================= ==========================*== ============================= + ! || || || || || || + ! || || || || || || + ! || || || || || || + ! + call define_area3_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, & + num_seg_static, x_start, dgam_vec) + call define_area4 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, & + num_seg_static, x_start, dgam_vec,displ_first_guess) + gamma=1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tl2.and..not.tr1.and..not.tr2) then !displ(2).ge.0.0_r8) then + flowcase=4 + ! + ! ||----------* || ||-------------* + ! /|| \ || /|| || + ! / || \|| / || || + ! ===/========================= ===/========================= + ! | /|| || | /|| || + ! |/ || || |/ || || + ! * || || * || || + ! + call define_area1_area2(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area3_left (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,& + x_start, dgam_vec,displ_first_guess) + gamma = 1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tr2.and..not.tl1.and..not.tl2) then !displ(3).ge.0.0_r8) then + flowcase=5 + ! case(5) + ! + ! + ! || *-----2----|| + ! || /1 3||\ + ! ||/ 4 || \ + ! ============================= + ! || ||\ | + ! || || \| + ! || || * + ! + call define_area3_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area4_area5(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec,displ_first_guess) + gamma=1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tl2.and.tr1.and..not.tr2) then + flowcase=6 + ! case(6) + ! + ! + ! ||-------------||--* + ! /|| || / + ! / || ||/ + ! ===/========================= + ! | /|| || + ! |/ || || + ! * || || + ! + ! + call define_area1_area2 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area3_left_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area4 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec,displ_first_guess) + + gamma=1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tr2.and.tl1.and..not.tl2) then + flowcase=7 + ! case(7) + ! + ! + ! *--||-------------|| + ! \ || ||\ + ! \|| || \ + ! ============================= + ! || ||\ | + ! || || \| + ! || || * + ! + ! + call define_area2 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec,displ_first_guess) + call define_area3_left_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area4_area5 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + gamma = 1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else if (tl2.and.tr2) then + flowcase=8 + ! case(8) + ! + ! + ! ||-------------|| + ! /|| ||\ + ! / || || \ + ! ============================= + ! | /|| ||\ | + ! |/ || || \| + ! * || || * + ! + ! + ! + ! + ! + call define_area1_area2 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area3_left_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec) + call define_area4_area5 (i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg,& + num_seg_static,x_start, dgam_vec,displ_first_guess) + gamma = 1.0_r8 + gamma_max = fvm%displ_max(i,j,iside)/displ_first_guess + else + call endrun('ERROR - unknown flow case') + end if + end if + ! + ! iterate to get flux area + ! + !call t_startf('fvm:swept_area:get_gamma') + do iarea=1,num_area + dp_area(iarea) = dp(idx(1,iarea,i,j,iside),idx(2,iarea,i,j,iside)) + end do + call get_flux_segments_area_iterate(x,x_static,dx_static,dx,x_start,dgam_vec,num_seg,num_seg_static,& + num_seg_max,num_area,dp_area,flowcase,gamma,mass_flux_se(i,j,iside),0.0_r8,gamma_max, & + gsweights,gspts,ilev) + !call t_stopf('fvm:swept_area:get_gamma') + ! + ! pack segments for high-order weights computation + ! + do iarea=1,num_area + do iseg=1,num_seg_static(iarea) + iseg_tmp=num_seg(iarea)+iseg + x (:,iseg_tmp,iarea) = x_static (:,iseg,iarea) + dx(:,iseg_tmp,iarea) = dx_static(:,iseg,iarea) + end do + num_seg(iarea)=num_seg(iarea)+MAX(0,num_seg_static(iarea)) + end do + ! + ! compute higher-order weights + ! + !call t_startf('fvm:swept_area:get_high_order_w') + call get_high_order_weights_over_areas(x,dx,num_seg,num_seg_max,num_area,weights,ngpc,& + gsweights, gspts,irecons_tracer) + !call t_stopf('fvm:swept_area:get_high_order_w') + ! + !************************************************** + ! + ! remap air and tracers + ! + !************************************************** + ! + !call t_startf('fvm:swept_area:remap') + flux=0.0_r8; flux_tracer=0.0_r8 + do iarea=1,num_area + if (num_seg(iarea)>0) then + ii=idx(1,iarea,i,j,iside); jj=idx(2,iarea,i,j,iside) + flux=flux+weights(1,iarea)*dp(ii,jj) + do itr=1,ntrac + do iw=1,irecons_tracer_actual + flux_tracer(itr) = flux_tracer(itr)+weights(iw,iarea)*ctracer(iw,ii,jj,itr) + end do + end do + end if + end do + fvm%se_flux(i,j,iside,ilev) = mass_flux_se(i,j,iside)-flux + if (fvm%se_flux(i,j,iside,ilev)>1.0E-13_r8.and.(ilevkmax_jet)) then + write(iulog,*) "CN excess flux outside of pre-scribed jet region" + write(iulog,*) "Increase jet region with kmin_jet and kmax_jet ",& + ilev,fvm%se_flux(i,j,iside,ilev),mass_flux_se(i,j,iside),flux,flowcase,& + kmin_jet,kmax_jet + call endrun('ERROR in CSLAM: local Courant number is > 1; Increase kmin_jet/kmax_jet?') + end if + + fvm%dp_fvm(i ,j ,ilev ) = fvm%dp_fvm(i ,j ,ilev )-flux + fvm% c(i ,j ,ilev,1:ntrac) = fvm% c(i ,j ,ilev,1:ntrac)-flux_tracer(1:ntrac) + ! + ! update flux in nearest neighbor cells + ! + if (iside==1) then + fvm%dp_fvm(i,j-1,ilev ) = fvm%dp_fvm(i,j-1,ilev )+flux + fvm% c(i,j-1,ilev,1:ntrac) = fvm% c(i,j-1,ilev,1:ntrac)+flux_tracer(1:ntrac) + end if + if (iside==2) then + fvm%dp_fvm(i+1,j,ilev ) = fvm%dp_fvm(i+1,j,ilev )+flux + fvm% c(i+1,j,ilev,1:ntrac) = fvm% c(i+1,j,ilev,1:ntrac)+flux_tracer(1:ntrac) + end if + if (iside==3) then + fvm%dp_fvm(i,j+1,ilev ) = fvm%dp_fvm(i,j+1,ilev )+flux + fvm% c(i,j+1,ilev,1:ntrac) = fvm% c(i,j+1,ilev,1:ntrac)+flux_tracer(1:ntrac) + end if + if (iside==4) then + fvm%dp_fvm(i-1,j,ilev ) = fvm%dp_fvm(i-1,j,ilev )+flux + fvm% c(i-1,j,ilev,1:ntrac) = fvm% c(i-1,j,ilev,1:ntrac)+flux_tracer(1:ntrac) + end if + !call t_stopf('fvm:swept_area:remap') + end if + end do + end do + end do + end subroutine swept_flux + + + subroutine large_courant_number_increment(fvm,ilev) + implicit none + type (fvm_struct), intent(inout):: fvm + integer , intent(in) :: ilev + + integer, parameter :: num_sides=4, imin= 0, imax=nc+1 + + integer, dimension(4), parameter :: imin_side = (/1 ,0 ,1 ,1 /) + integer, dimension(4), parameter :: imax_side = (/nc ,nc ,nc ,nc+1/) + integer, dimension(4), parameter :: jmin_side = (/1 ,1 ,0 ,1 /) + integer, dimension(4), parameter :: jmax_side = (/nc+1,nc ,nc ,nc /) + + integer :: i,j,iside,itr + real (kind=r8) :: flux,flux_tracer(ntrac) + real (kind=r8), dimension(0:nc+1,0:nc+1) :: inv_dp_area + real (kind=r8), dimension(0:nc+1,0:nc+1,ntrac):: c_tmp + + inv_dp_area=1.0_r8/fvm%dp_fvm(0:nc+1,0:nc+1,ilev) + c_tmp = fvm%c(0:nc+1,0:nc+1,ilev,1:ntrac) + do iside=1,4 + do j=jmin_side(iside),jmax_side(iside) + do i=imin_side(iside),imax_side(iside) + if (fvm%se_flux(i,j,iside,ilev)>eps) then + flux = fvm%se_flux(i,j,iside,ilev) +#ifdef waccm_debug + if (i>0.and.j>0.and.i | | --2-->| |--1--> | + ! -4----------3- /\ -4----------3- -4----------3- -4----------3- || + ! | | /||\ |\\\\\\\\\\| || | |\\\\\\| |\\\\\\| | + ! | --2--> | || dv(1) |\\\\\\\\\\| || | |\\\\\\| |\\\\\\| | + ! |----------| || |----------| || dv(3) | |\\\\\\| |\\\\\\| | + ! |\\\\\\\\\\| || | <--2--- | \||/ | |\\\\\\| |\\\\\\| | + ! |\\\\\\\\\\| || | | \/ | |\\\\\\| |\\\\\\| | + ! -1----------2- -1----------2- -1----------2- -1----------2- + ! | <--1-- | | | | <--1--| |<--2-- + ! + ! / \ + ! line-integral <========== =========> + ! from vertex 2 \ dv(2) dv(4)/ + ! to 1 + ! + ! Note vertical + ! lines have + ! zero line- + ! integral! + ! + integer :: i,j,iside,ix + integer, parameter :: num_area=1, num_seg_max=2 + REAL(KIND=r8), dimension(2,num_seg_max,num_area,4,nc,nc) :: x_static, dx_static + REAL(KIND=r8), dimension(2,num_seg_max,num_area,4,nc,nc) :: x, dx + REAL(KIND=r8), dimension(2,num_seg_max,num_area) :: x_tmp, dx_tmp + integer , dimension( num_area,4 ) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8, 4,nc,nc) :: x_start, dgam_vec + REAL(KIND=r8), dimension(num_area) :: dp_area + integer, dimension(4) :: flowcase + REAL(KIND=r8) :: gamma(4), flux_se + + num_seg_static(1,1) = 1; num_seg(1,1) = 1; flowcase(1) = -1 + num_seg_static(1,2) = 0; num_seg(1,2) = 2; flowcase(2) = -2 + num_seg_static(1,3) = 1; num_seg(1,3) = 1; flowcase(3) = -1 + num_seg_static(1,4) = 0; num_seg(1,4) = 2; flowcase(4) = -4 + + do j=1,nc + do i=1,nc + do ix=1,2 + iside=1; + x_static (ix,1,1,iside,i,j) = fvm%vtx_cart(2,ix,i,j) + dx_static(ix,1,1,iside,i,j) = fvm%vtx_cart(1,ix,i,j)-fvm%vtx_cart(2,ix,i,j) + x_start (ix,1, iside,i,j) = fvm%vtx_cart(1,ix,i,j) + x_start (ix,2, iside,i,j) = fvm%vtx_cart(2,ix,i,j) + dgam_vec (ix,1, iside,i,j) = fvm%vtx_cart(4,ix,i,j)-fvm%vtx_cart(1,ix,i,j) + ! + ! compute first guess + ! + gamma(iside) = 0.5_r8 + x (ix,1,1,iside,i,j) = x_start(ix,1,iside,i,j)+gamma(iside)*dgam_vec(ix,1,iside,i,j) + dx (ix,1,1,iside,i,j) = -dx_static(ix,1,1,iside,i,j) + ! + ! side 2 + ! + iside=2; + x_start (ix,1, iside,i,j) = fvm%vtx_cart(2,ix,i,j) + x_start (ix,2, iside,i,j) = fvm%vtx_cart(3,ix,i,j) + dgam_vec (ix,1, iside,i,j) = fvm%vtx_cart(1,ix,i,j)-fvm%vtx_cart(2,ix,i,j) + x (ix,1,1,iside,i,j) = x_start(ix,1,iside,i,j) + ! + ! compute first guess - gamma=1 + ! + gamma(iside) = 0.5_r8 + dx (ix,1,1,iside,i,j) = gamma(iside)*dgam_vec (ix,1, iside,i,j) + x (ix,2,1,iside,i,j) = x_start(ix,2,iside,i,j)+gamma(iside)*dgam_vec(ix,1,iside,i,j) + dx (ix,2,1,iside,i,j) = -gamma(iside)*dgam_vec (ix,1, iside,i,j) + ! + ! side 3 + ! + iside=3; + x_static (ix,1,1,iside,i,j) = fvm%vtx_cart(4,ix,i,j) + dx_static(ix,1,1,iside,i,j) = fvm%vtx_cart(3,ix,i,j)-fvm%vtx_cart(4,ix,i,j) + x_start (ix,1, iside,i,j) = fvm%vtx_cart(3,ix,i,j) + x_start (ix,2, iside,i,j) = fvm%vtx_cart(4,ix,i,j) + dgam_vec (ix,1, iside,i,j) = fvm%vtx_cart(2,ix,i,j)-fvm%vtx_cart(3,ix,i,j) + ! + ! compute first guess - gamma(iside)=1 + ! + gamma(iside) = 0.5_r8 + x (ix,1,1,iside,i,j) = x_start(ix,1,iside,i,j)+gamma(iside)*dgam_vec(ix,1,iside,i,j) + dx (ix,1,1,iside,i,j) = -dx_static(ix,1,1,iside,i,j) + ! + ! side 4 + ! + iside=4; + x_start (ix,1, iside,i,j) = fvm%vtx_cart(1,ix,i,j) + x_start (ix,2, iside,i,j) = fvm%vtx_cart(4,ix,i,j) + dgam_vec (ix,1, iside,i,j) = fvm%vtx_cart(2,ix,i,j)-fvm%vtx_cart(1,ix,i,j) + x (ix,2,1,iside,i,j) = x_start(ix,2,iside,i,j) + ! + ! compute first guess - gamma(iside)=1 + ! + gamma(iside) = 0.5_r8 + dx (ix,2,1,iside,i,j) = gamma(iside)*dgam_vec (ix,1, iside,i,j) + x (ix,1,1,iside,i,j) = x_start(ix,1,iside,i,j)+gamma(iside)*dgam_vec(ix,1,iside,i,j) + dx (ix,1,1,iside,i,j) = -gamma(iside)*dgam_vec (ix,1, iside,i,j) + end do + end do + end do + +! do k=1,nlev + do j=1,nc + do i=1,nc + dp_area = cair(i,j) + do iside=1,4 + flux_se = -fvm%se_flux(i,j,iside,k) + if (flux_se>eps) then + gamma(iside)=0.5_r8 + ! + ! this copying is necessary since get_flux_segments_area_iterate change x and dx + ! + x_tmp (:,1:num_seg(1,iside),:)=x (:,1:num_seg(1,iside),:,iside,i,j) + dx_tmp(:,1:num_seg(1,iside),:)=dx(:,1:num_seg(1,iside),:,iside,i,j) + call get_flux_segments_area_iterate(& + x_tmp(:,:,:),x_static(:,:,:,iside,i,j),dx_static(:,:,:,iside,i,j),dx_tmp(:,:,:),& + x_start(:,:,iside,i,j),dgam_vec(:,:,iside,i,j),num_seg(:,iside),num_seg_static(:,iside),& + num_seg_max,num_area,dp_area,flowcase(iside),gamma(iside),flux_se,0.0_r8,1.0_r8, & + gsweights,gspts,k) + fvm%se_flux(i,j,iside,k) = ABS(SUM(gamma(iside)*dgam_vec(:,1,iside,i,j))) +#ifdef waccm_debug + fvm%CSLAM_gamma(i,j,k,iside) = gamma(iside) +#endif + if (gamma(iside)>1_r8) then + if (.not.large_Courant_incr) then + write(iulog,*) 'ERROR in CSLAM: local Courant number is >1: gamma=',gamma(iside),' k=',k + call endrun('ERROR in CSLAM: local Courant number is > 1; set namelist se_large_Courant_incr=.true. ') + endif + gamma(iside)=1.0_r8-eps + end if + else + fvm%se_flux(i,j,iside,k) = 0.0_r8 +#ifdef waccm_debug + fvm%CSLAM_gamma(i,j,k,iside) = 0.0_r8 +#endif + end if + enddo + end do + end do +! end do + end subroutine compute_displacements_for_swept_areas + + + + subroutine get_flux_segments_area_iterate(x,x_static,dx_static,dx,x_start,dgam_vec,num_seg,num_seg_static,& + num_seg_max,num_area,c,flow_case,gamma,flux,gamma_min,gamma_max,gsweights,gspts,ilev) + implicit none + integer , intent(in) :: num_area, num_seg_max + REAL(KIND=r8), dimension(2,num_seg_max,num_area), intent(in) :: x_static, dx_static + REAL(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx + integer , dimension(num_area ), intent(in) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8) , intent(in) :: x_start, dgam_vec + REAL(KIND=r8) , intent(inout) :: gamma + REAL(KIND=r8) , intent(in) :: flux,gamma_min,gamma_max + integer , intent(in) :: flow_case,ilev + + real (kind=r8), dimension(num_area) , intent(in) :: c + real (kind=r8), dimension(ngpc) , intent(in) :: gsweights, gspts + + real (kind=r8) :: flux_static + real (kind=r8) :: weight_area(num_area), xtmp(2), xtmp2(2) + real (kind=r8) :: gamma1, gamma2, gamma3, dgamma, f1, f2 + + real (kind=r8), dimension( ngpc ) :: xq,yq + real (kind=r8), dimension( ngpc,1) :: F !linear + + real (kind=r8) :: xq2,xq2i, rho, rhoi, yrh, w_static(num_area) + + integer :: iseg,iarea,iter,ipt + integer, parameter :: iter_max=40 + logical :: lexit_after_one_more_iteration + + lexit_after_one_more_iteration = .false. + ! + ! compute static line-integrals (not necessary to recompute them for every iteration) + ! + flux_static = 0.0_r8 + w_static = 0.0_r8 + weight_area = 0.0_r8 + do iarea=1,num_area + do iseg=1,num_seg_static(iarea) + +!rck vector directive needed here +!DIR$ SIMD + do ipt=1,ngpc + xq(ipt) = x_static(1,iseg,iarea)+dx_static(1,iseg,iarea)*gspts(ipt)! create quadrature point locations + yq(ipt) = x_static(2,iseg,iarea)+dx_static(2,iseg,iarea)*gspts(ipt) + F(ipt,1) = yq(ipt)/(SQRT(1.0_r8+xq(ipt)*xq(ipt) + yq(ipt)*yq(ipt))*(1.0_r8+xq(ipt)*xq(ipt)))! potential ! potential + enddo + weight_area(iarea) = weight_area(iarea)+sum(gsweights(:)*F(:,1))*0.5_r8*dx_static(1,iseg,iarea) !integral + end do + w_static(iarea)= weight_area(iarea) + flux_static = flux_static+weight_area(iarea)*c(iarea) !add to swept flux + end do + ! + ! initilization + ! + gamma1=0.0_r8; f1=-flux ! zero flux guess 1 + ! + ! compute flux integrals of first guess passed to subroutine + ! + gamma2=gamma + f2 = 0.0_r8 + weight_area=w_static + do iarea=1,num_area + do iseg=1,num_seg(iarea) +!rck vector directive needed here +!DIR$ SIMD + do ipt=1,ngpc + xq(ipt) = x(1,iseg,iarea)+dx(1,iseg,iarea)*gspts(ipt)! create quadrature point locations + yq(ipt) = x(2,iseg,iarea)+dx(2,iseg,iarea)*gspts(ipt) + xq2 = xq(ipt)*xq(ipt) + xq2i = 1.0_r8/(1.0_r8+xq2) + rho = SQRT(1.0_r8+xq2+yq(ipt)*yq(ipt)) + rhoi = 1.0_r8/rho + yrh = yq(ipt)*rhoi + F(ipt,1) = yrh*xq2i + enddo + weight_area(iarea) = weight_area(iarea)+sum(gsweights(:)*F(:,1))*0.5_r8*dx(1,iseg,iarea)! integral + end do + f2 = f2+weight_area(iarea)*c(iarea) + end do + f2 = f2-flux !integral error + iter=0 + if (abs(f2-f1)gamma_max) then + lexit_after_one_more_iteration=.true. + gamma=gamma_max + gamma3=gamma_max + else + exit + end if + else + ! + ! Newton increment + ! + if (abs(f2-f1)eps and abs(f1)>eps but abs(f2-f1)eps) then + gamma3 = gamma2-dgamma; + else + ! + ! dgamma set to minimum displacement to avoid f2-f1=0 + ! + gamma3=gamma2-SIGN(1.0_r8,dgamma)*eps + end if + gamma3=MAX(gamma3,gamma_min) + ! + ! prepare for next iteration + ! + gamma1 = gamma2; f1 = f2; gamma2 = gamma3; + endif + end do + if (iter>iter_max) write(iulog,*) "WARNING: iteration not converged",& + ABS(f2),flux,gamma1,gamma2,gamma3,ilev + end subroutine get_flux_segments_area_iterate + + subroutine define_swept_areas(fvm,ilev,displ,base_vec,base_vtx,idx) + use control_mod, only : neast, nwest, seast, swest + implicit none + type (fvm_struct), intent(inout) :: fvm + integer , intent(in) :: ilev + + + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + real (kind=r8) , dimension(0:7 , imin:imax,imin:imax,num_sides), intent(out) :: displ + integer (kind=r8) , dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(out) :: base_vec + real (kind=r8) , dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(out) :: base_vtx + integer , dimension(2,num_area, imin:imax,imin:imax,num_sides), intent(out) :: idx + + real (kind=r8) :: flux_sum (0:nc+1,0:nc+1,2) + integer :: degenerate (1:nc+1,1:nc+1 ) + integer :: circular_flow(1:nc+1,1:nc+1 ) + integer :: illcond (1:nc+1,1:nc+1) + integer :: ib,i,j,sgn, iside, iarea + + ! + ! set where reconstruction function is as a function of area and side + ! + integer, dimension(num_area*4), parameter :: idx_shift_tmp = (/-1,-1, 0, 1, 1,& !iside=1 + 1, 0, 0, 0, 1,& !iside=2 + 1, 1, 0,-1,-1,& !iside=3 + -1, 0, 0, 0,-1/) !iside=4 + + integer, dimension(num_area*4), parameter :: idy_shift_tmp = (/-1, 0, 0, 0,-1,& !iside=1 + -1,-1, 0, 1, 1,& !iside=2 + 1, 0, 0, 0, 1,& !iside=3 + 1, 1, 0,-1,-1/) !iside=4 + + integer, dimension(num_area,4), parameter :: idx_shift = RESHAPE(idx_shift_tmp,(/num_area,4/)) + integer, dimension(num_area,4), parameter :: idy_shift = RESHAPE(idy_shift_tmp,(/num_area,4/)) + + integer, dimension(4), parameter :: iside_m1 = (/4,1,2,3/) + integer, dimension(4), parameter :: iside_p1 = (/2,3,4,1/) + integer, dimension(4), parameter :: iside_p2 = (/3,4,1,2/) + integer, dimension(4), parameter :: iside_p3 = (/4,1,2,3/) + + integer, dimension(4), parameter :: imin_side = (/1 ,0 ,1 ,1 /) + integer, dimension(4), parameter :: imax_side = (/nc ,nc ,nc ,nc+1/) + integer, dimension(4), parameter :: jmin_side = (/1 ,1 ,0 ,1 /) + integer, dimension(4), parameter :: jmax_side = (/nc+1,nc ,nc ,nc /) + + + + integer :: iur,jur,ilr,jlr,iul,jul,ill,jll + + ib = fvm%cubeboundary + flux_sum(0:nc+1,1:nc+1,1) = fvm%se_flux(0:nc+1,0:nc ,3,ilev)-fvm%se_flux(0:nc+1,1:nc+1,1,ilev) + flux_sum(1:nc+1,0:nc+1,2) = fvm%se_flux(0:nc ,0:nc+1,2,ilev)-fvm%se_flux(1:nc+1,0:nc+1,4,ilev) + + ! + ! Degenerate case ("two departure points") + ! + ! || | || no change in this situation || no change in this situation + ! || | || || + ! ||-------- ||---------- ||---------- + ! || | || || + ! ======================= ======================= ===================== + ! | || | || || + ! -----|---|| ------|---|| ---------|| + ! | || | || || + ! | || | || || + ! + ! + where (flux_sum(0:nc,1:nc+1,1)*flux_sum(1:nc+1,1:nc+1,1)<0.0_r8.and.flux_sum(1:nc+1,0:nc,2)*flux_sum(1:nc+1,1:nc+1,2)<0.0_r8) + degenerate(:,:) = 0 + elsewhere + degenerate(:,:) = 1 + end where + + if (ib>0) then + if (ib==swest) degenerate(1 ,1 ) = 1 + if (ib==nwest) degenerate(1 ,nc+1) = 1 + if (ib==neast) degenerate(nc+1,nc+1) = 1 + if (ib==seast) degenerate(nc+1,1 ) = 1 + end if + + do j=1,nc+1 + do i=1,nc+1 + do sgn=-1,1,2 + if (& + sgn*flux_sum(i-1,j,1)<0.0_r8.and.sgn*flux_sum(i,j-1,2)>0.0_r8.and.& + sgn*flux_sum(i ,j,1)>0.0_r8.and.sgn*flux_sum(i,j ,2)<0.0_r8) then + circular_flow(i,j) = 0 + else + circular_flow(i,j) = 1 + end if + end do + end do + end do + ! + ! wrap around corners + ! + if (ib==nwest) then + flux_sum(0,nc+1,1) = fvm%se_flux(0,nc,3,ilev)-fvm%se_flux(1,nc+1,4,ilev) + flux_sum(1,nc+1,2) = fvm%se_flux(0,nc,3,ilev)-fvm%se_flux(1,nc+1,4,ilev) + + i=1;j=nc+1; + circular_flow(i,j) = 1 + do sgn=-1,1,2 + if (& + sgn*flux_sum(i,j-1,2)>0.0_r8.and.& + sgn*flux_sum(i ,j,1)>0.0_r8.and.sgn*flux_sum(i,j ,2)<0.0_r8) then + circular_flow(i,j) = 0 + end if + end do + else if (ib==swest) then + flux_sum(0,1,1) = fvm%se_flux(1,0,4,ilev)-fvm%se_flux(0,1,1,ilev) + flux_sum(1,0,2) = fvm%se_flux(0,1,1,ilev)-fvm%se_flux(1,0,4,ilev) + i=1;j=1; + circular_flow(i,j) = 1 + do sgn=-1,1,2 + if (& + sgn*flux_sum(i-1,j,1)<0.0_r8.and.& + sgn*flux_sum(i ,j,1)>0.0_r8.and.sgn*flux_sum(i,j ,2)<0.0_r8) then + circular_flow(i,j) = 0 + end if + end do + else if (ib==neast) then + flux_sum(nc+1,nc+1,1) = fvm%se_flux(nc+1,nc,3,ilev)-fvm%se_flux(nc,nc+1,2,ilev) + flux_sum(nc+1,nc+1,2) = fvm%se_flux(nc,nc+1,2,ilev)-fvm%se_flux(nc+1,nc,3,ilev) + i=nc+1;j=nc+1; + circular_flow(i,j) = 1 + do sgn=-1,1,2 + if (& + sgn*flux_sum(i-1,j,1)<0.0_r8.and.sgn*flux_sum(i,j-1,2)>0.0_r8.and.& + sgn*flux_sum(i,j ,2)<0.0_r8) then + circular_flow(i,j) = 0 + end if + end do + else if (ib==seast) then + flux_sum(nc+1,1 ,1) = fvm%se_flux(nc,0,2,ilev)-fvm%se_flux(nc+1,1,1,ilev) + flux_sum(nc+1,0 ,2) = fvm%se_flux(nc,0,2,ilev)-fvm%se_flux(nc+1,1,1,ilev) + i=nc+1;j=1; + circular_flow(i,j) = 1 + do sgn=-1,1,2 + if (& + sgn*flux_sum(i-1,j,1)<0.0_r8.and.sgn*flux_sum(i,j-1,2)>0.0_r8.and.& + sgn*flux_sum(i,j ,2)<0.0_r8) then + circular_flow(i,j) = 0 + end if + end do + end if + illcond = circular_flow*degenerate + ! + ! + ! + ! + do iside=1,4 + do j=jmin_side(iside),jmax_side(iside) + do i=imin_side(iside),imax_side(iside) + if (fvm%se_flux(i,j,iside,ilev)>eps) then + iur = i+idx_shift(4,iside); jur = j+idy_shift(4,iside) !(i,j) index of upper right quadrant + ilr = i+idx_shift(5,iside); jlr = j+idy_shift(5,iside) !(i,j) index of lower left quadrant + iul = i+idx_shift(2,iside); jul = j+idy_shift(2,iside) !(i,j) index of upper right quadrant + ill = i+idx_shift(1,iside); jll = j+idy_shift(1,iside) !(i,j) index of lower left quadrant + + !iside=1 + if (iside==1) then + displ(0,i,j,iside) = -flux_sum (i ,j ,1)*illcond(i,j) !center left + displ(1,i,j,iside) = -flux_sum (i ,j ,1)*illcond(i+1,j) !center right + displ(2,i,j,iside) = flux_sum (i+1,j ,2)*illcond(i+1,j) !c2 + displ(3,i,j,iside) = -flux_sum (i ,j ,2)*illcond(i ,j) !c3 + displ(4,i,j,iside) = -flux_sum (i+1,j ,1)*illcond(i+1,j) !r1 + displ(5,i,j,iside) = -flux_sum (i+1,j-1,2)*illcond(i+1,j) !r2 + displ(6,i,j,iside) = -flux_sum (i-1,j ,1)*illcond(i ,j) !l1 + displ(7,i,j,iside) = flux_sum (i ,j-1,2)*illcond(i ,j) !l2 + + end if + if (iside==2) then + !iside=2 + displ(0,i,j,iside) = flux_sum (i+1,j ,2)*illcond(i+1,j ) !center left + displ(1,i,j,iside) = flux_sum (i+1,j ,2)*illcond(i+1,j+1) !center right + displ(2,i,j,iside) = flux_sum (i ,j+1,1)*illcond(i+1,j+1) !c2 + displ(3,i,j,iside) = -flux_sum (i ,j ,1)*illcond(i+1,j ) !c3 + displ(4,i,j,iside) = flux_sum (i+1,j+1,2)*illcond(i+1,j+1) !r1 + displ(5,i,j,iside) = -flux_sum (i+1,j+1,1)*illcond(i+1,j+1) !r2 + displ(6,i,j,iside) = flux_sum (i+1,j-1,2)*illcond(i+1,j) !l1 + displ(7,i,j,iside) = flux_sum (i+1,j ,1)*illcond(i+1,j) !l2 + end if + !iside=3 + if (iside==3) then + displ(0,i,j,iside) = flux_sum (i ,j+1,1)*illcond(i+1,j+1) !center left + displ(1,i,j,iside) = flux_sum (i ,j+1,1)*illcond(i ,j+1) !center right + displ(2,i,j,iside) = -flux_sum (i ,j ,2)*illcond(i ,j+1) !c2 + displ(3,i,j,iside) = flux_sum (i+1,j ,2)*illcond(i+1,j+1) !c3 + displ(4,i,j,iside) = flux_sum (i-1,j+1,1)*illcond(i ,j+1) !r1 + displ(5,i,j,iside) = flux_sum (i ,j+1,2)*illcond(i ,j+1) !r2 + displ(6,i,j,iside) = flux_sum (i+1,j+1,1)*illcond(i+1,j+1) !l1 + displ(7,i,j,iside) = -flux_sum (i+1,j+1,2)*illcond(i+1,j+1) !l2 + end if + if (iside==4) then + !iside=4 + displ(0,i,j,iside) = -flux_sum (i ,j ,2)*illcond(i ,j+1) !center left + displ(1,i,j,iside) = -flux_sum (i ,j ,2)*illcond(i ,j ) !center right + displ(2,i,j,iside) = -flux_sum (i ,j ,1)*illcond(i ,j ) !c2 + displ(3,i,j,iside) = flux_sum (i ,j+1,1)*illcond(i ,j+1) !c3 + displ(4,i,j,iside) = -flux_sum (i ,j-1,2)*illcond(i ,j ) !r1 + displ(5,i,j,iside) = flux_sum (i-1,j ,1)*illcond(i ,j ) !r2 + displ(6,i,j,iside) = -flux_sum (i ,j+1,2)*illcond(i ,j+1) !l1 + displ(7,i,j,iside) = -flux_sum (i-1,j+1,1)*illcond(i ,j+1) !l2 + end if + + base_vtx(:,1,i,j,iside) = fvm%vtx_cart(iside,:,i ,j ) !vertex center left + base_vtx(:,2,i,j,iside) = fvm%vtx_cart(iside_p1(iside),:,i ,j ) !vertex center right + base_vtx(:,3,i,j,iside) = fvm%vtx_cart(iside,:,iur,jur ) !vertex upper right + base_vtx(:,4,i,j,iside) = fvm%vtx_cart(iside_p3(iside),:,ilr,jlr) !vertex lower right + base_vtx(:,5,i,j,iside) = fvm%vtx_cart(iside_p1(iside),:,iul,jul) !vertex upper left + base_vtx(:,6,i,j,iside) = fvm%vtx_cart(iside_p2(iside),:,ill,jll) !vertex lower left + + base_vec(:, 1,i,j,iside) = fvm%flux_vec (:,i ,j ,iside ) !vector center + base_vec(:, 2,i,j,iside) = fvm%flux_vec (:,i ,j ,iside_p1(iside)) !vector center right + base_vec(:, 3,i,j,iside) = fvm%flux_vec (:,i ,j ,iside_p3(iside)) !vector center left + base_vec(:, 4,i,j,iside) = fvm%flux_vec (:,iur,jur,iside ) !vector upper right 1 + base_vec(:, 5,i,j,iside) = fvm%flux_vec (:,iur,jur,iside_p3(iside)) !vector upper right 2 + base_vec(:, 6,i,j,iside) = fvm%flux_vec (:,ilr,jlr,iside_p3(iside)) !vector lower right 1 + base_vec(:, 7,i,j,iside) = fvm%flux_vec (:,ilr,jlr,iside_p2(iside)) !vector lower right 2 + base_vec(:, 8,i,j,iside) = fvm%flux_vec (:,iul,jul,iside ) !vector upper left 1 + base_vec(:, 9,i,j,iside) = fvm%flux_vec (:,iul,jul,iside_p1(iside)) !vector upper left 2 + base_vec(:,10,i,j,iside) = fvm%flux_vec (:,ill,jll,iside_p1(iside)) !vector lower left 1 + base_vec(:,11,i,j,iside) = fvm%flux_vec (:,ill,jll,iside_p2(iside)) !vector lower left 2 + + do iarea=1,5 + idx(1,iarea,i,j,iside) = i+idx_shift(iarea,iside) + idx(2,iarea,i,j,iside) = j+idy_shift(iarea,iside) + end do + else + displ(:,i,j,iside) = 9D99!for debugging + end if + end do + end do + end do + ! + ! wrap around corners here + ! + + end subroutine define_swept_areas + + + ! + ! Notation conventions used in define_area subroutines + ! + ! + ! + ! ^ ||---> ^ <---|| ^ + ! /|\ || 3 /|\ 2 || /|\ + ! | 6 || 1 | || | 4 + ! | || | || | + ! ================================= + ! || || + ! || || + ! 7 || || 5 + ! <---|| ||---> + ! + + subroutine define_area1_area2(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, num_seg_static,& + x_start, dgam_vec) + implicit none + integer, intent(in) :: i,j,iside + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + real (kind=r8) , dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8) , dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8) , dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + integer, parameter :: num_seg_max=5 + REAL(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer , dimension(num_area) , intent(inout) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8) , intent(inout):: x_start, dgam_vec + + + real (kind=r8) , dimension(2,3) :: xdep !departure points + real (kind=r8) :: gamma + integer :: iarea + + + REAL(KIND=r8) :: xtmp(2),xtmp2(2) + ! + ! + ! ||----- || + ! /|| || + ! / || || + ! ===X========================= + ! | /|| || + ! |/ || || + ! * || || + ! + ! + ! crossing X + if (SUM(ABS(base_vec(:,9,i,j,iside))).NE.0) then + gamma = displ(0,i,j,iside)*displ(7,i,j,iside)/(displ(0,i,j,iside)-displ(6,i,j,iside)) +! gamma = MAX(MIN(gamma,displ(7,i,j,iside),-displ(3,i,j,iside)),0.0_r8)!MWR manuscript + gamma = MAX(MIN(gamma,displ(7,i,j,iside),-0.25_r8*displ(3,i,j,iside)),0.0_r8) + else + ! + ! corner case + ! + gamma=displ(0,i,j,iside) + end if + + + xdep (:,1) = base_vtx(:, 6,i,j,iside)+displ(7,i,j,iside)*base_vec(:,10,i,j,iside)-displ(6,i,j,iside)*base_vec(:,11,i,j,iside) + x_start (:,1) = base_vtx(:, 6,i,j,iside) + dgam_vec(:,1) = base_vec(:,10,i,j,iside)*gamma + + xdep(:,2) = base_vtx(:,2,i,j,iside)+displ(1,i,j,iside)*base_vec(:, 1,i,j,iside)+displ(2,i,j,iside)*base_vec(:, 2,i,j,iside) + + iarea = 1 + num_seg (iarea) = 2 + num_seg_static(iarea) = 1 + + x_static (:,1,iarea) = base_vtx(:,6,i,j,iside) !static + dx_static(:,1,iarea) = xdep(:,1)-x_static(:,1,iarea) !static + + xtmp(: ) = x_start(:,1)+dgam_vec(:,1) + x (:,1,iarea) = xdep(:,1) !static + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic + + x (:,2,iarea) = xtmp(:) !dynamic + dx(:,2,iarea) = x_static(:,1,iarea)-xtmp(:) !dynamic + ! + ! + ! + iarea = 2 + num_seg (iarea) = 3 + + x_start (:,2) = base_vtx(:,5,i,j,iside) + dgam_vec(:,2) = base_vec(:,9,i,j,iside)*gamma + xtmp (: ) = x_start(:,2)+dgam_vec(:,2) + + x_start (:,3) = base_vtx(:,5,i,j,iside) + dgam_vec(:,3) = base_vec(:,8,i,j,iside)*displ(0,i,j,iside) + xtmp2 (: ) = x_start(:,3)+dgam_vec(:,3) + + x (:,1,iarea) = base_vtx(:,5,i,j,iside) !static + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic + + x (:,2,iarea) = xtmp (:) !dynamic + dx(:,2,iarea) = xtmp2(:)-xtmp(:) !dynamic + + x (:,3,iarea) = xtmp2(:) !dynamic + dx(:,3,iarea) = x(:,1,iarea)-xtmp2(:) !dynamic + end subroutine define_area1_area2 + + + subroutine define_area2(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, num_seg_static,x_start, dgam_vec,& + displ_first_guess) + implicit none + integer, intent(in) :: i,j,iside + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + real (kind=r8) , dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8) , dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8) , dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + integer, parameter :: num_seg_max=5 + REAL(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer , dimension(num_area) , intent(inout) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8) , intent(inout):: x_start, dgam_vec + + + real (kind=r8) , dimension(2,3) :: xdep !departure points + real (kind=r8), optional, intent(out) :: displ_first_guess + real (kind=r8) :: gamma + integer :: iarea + + + REAL(KIND=r8) :: xtmp(2) + ! *: xdep(:,1) + ! x: xtmp + ! + ! 2 || || + ! *--x || + ! 1\3||1 || + ! \|| || + ! ============================= + ! || || + ! + ! + ! compute departure points (xdep(1) is left; xdep(3) is right and xdep(2) is midway + ! + xdep(:,1) = base_vtx(:,5,i,j,iside)+& + MAX(0.0_r8,displ(6,i,j,iside))*base_vec(:,8,i,j,iside)-displ(3,i,j,iside)*base_vec(:,9,i,j,iside) + x_start (:,1) = base_vtx(:,5,i,j,iside) + gamma = displ(0,i,j,iside) + dgam_vec(:,1) = base_vec(:,8,i,j,iside)*gamma + if (present(displ_first_guess)) displ_first_guess = gamma + + iarea = 2 + num_seg (iarea) = 2 + num_seg_static(iarea) = 1 + + x_static (:,1,iarea) = base_vtx(:,5,i,j,iside) !static - line 1 + dx_static(:,1,iarea) = xdep(:,1)-x_static(:,1,iarea) !static - line 1 + + xtmp (: ) = x_start(:,1)+dgam_vec(:,1) + x (:,1,iarea) = xdep(:,1) !static - line 2 + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic - line 2 + + x (:,2,iarea) = xtmp(:) !dynamic - line 3 + dx (:,2,iarea) = x_static(:,1,iarea)-xtmp(:) !dynamic - line 3 + end subroutine define_area2 + + + subroutine define_area3_left(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, & + num_seg, num_seg_static,x_start, dgam_vec,displ_first_guess) + implicit none + integer, intent(in) :: i,j,iside + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + real (kind=r8) , dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8) , dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8) , dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + integer, parameter :: num_seg_max=5 + REAL(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer , dimension(num_area) , intent(inout) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8) , intent(inout):: x_start, dgam_vec + real (kind=r8), optional, intent(out) :: displ_first_guess + + real (kind=r8) , dimension(2,3) :: xdep !departure points + real (kind=r8) :: gamma + integer :: iarea + + + REAL(KIND=r8) :: xtmp(2) + + ! iarea = 3 + !------------------------------------------------------------------------------------------- + ! + ! xtmp xdep(2) + ! |x-----2------* || + ! || \ || + ! |1 3 || + ! || \|| + ! ===========4============== + ! + ! + xdep(:,2) = base_vtx(:,2,i,j,iside)+displ(1,i,j,iside)*base_vec(:,1,i,j,iside)& + +MAX(0.0_r8,displ(2,i,j,iside))*base_vec(:,2,i,j,iside) + x_start (:,4) = base_vtx(:,1,i,j,iside) + gamma = displ(0,i,j,iside) + dgam_vec(:,4) = base_vec(:,1,i,j,iside)*gamma + xtmp (: ) = x_start(:,4)+dgam_vec(:,4) + + if (present(displ_first_guess)) displ_first_guess = gamma + + iarea = 3 + num_seg (iarea) = 2 + num_seg_static(iarea) = 2 + + x_static (:,1,iarea) = xdep(:,2) !static - line 3 + dx_static(:,1,iarea) = base_vtx(:,2,i,j,iside)-xdep(:,2) !static - line 3 + + x_static (:,2,iarea) = base_vtx(:,2,i,j,iside) !static - line 4 + dx_static(:,2,iarea) = base_vtx(:,1,i,j,iside)-base_vtx(:,2,i,j,iside) !static - line 4 + + x (:,1,iarea) = base_vtx(:,1,i,j,iside) !static - line 1 + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic - line 1 + + x (:,2,iarea) = xtmp(:) !dynamic -line 2 + dx (:,2,iarea) = x_static(:,1,iarea)-xtmp(:) !dynamic - line 2 + end subroutine define_area3_left + + subroutine define_area3_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, & + num_seg_static,x_start, dgam_vec) + implicit none + integer, intent(in) :: i,j,iside + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + real (kind=r8) , dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8) , dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8) , dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + integer, parameter :: num_seg_max=5 + REAL(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer , dimension(num_area) , intent(inout) :: num_seg, num_seg_static + REAL(KIND=r8), dimension(2,8) , intent(inout):: x_start, dgam_vec + + + real (kind=r8) , dimension(2,3) :: xdep !departure points + real (kind=r8) :: gamma + integer :: iarea + + REAL(KIND=r8) :: xtmp(2) + ! + ! + ! || *-----2----||\ + ! || /1 3|| \ + ! ||/ 4 || + ! ============================= + ! || || + ! || || + ! || || + ! + xdep(:,1) = base_vtx(:,1,i,j,iside)+displ(0,i,j,iside)*base_vec(:,1,i,j,iside)& + +MAX(0.0_r8,displ(3,i,j,iside))*base_vec(:,3,i,j,iside) + x_start (:,5) = base_vtx(:,2,i,j,iside) + gamma = displ(1,i,j,iside) + dgam_vec(:,5) = base_vec(:,1,i,j,iside)*gamma + xtmp (: ) = x_start(:,5)+dgam_vec(:,5) + + iarea = 3 + num_seg (iarea) = 2 + num_seg_static(iarea) = 2 + + x_static (:,1,iarea) = base_vtx(:,1,i,j,iside) !static - line 1 + dx_static(:,1,iarea) = xdep(:,1)-base_vtx(:,1,i,j,iside) !static - line 1 + + x_static (:,2,iarea) = base_vtx(:,2,i,j,iside) !static - line 4 + dx_static(:,2,iarea) = base_vtx(:,1,i,j,iside)-base_vtx(:,2,i,j,iside) !static - line 4 + + x (:,1,iarea) = xdep(:,1) !static - line 2 + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic - line 2 + + x (:,2,iarea) = xtmp(:) !dynamic -line 2 + dx (:,2,iarea) = x_static(:,2,iarea)-xtmp(:) !dynamic - line 2 + end subroutine define_area3_right + + + subroutine define_area3_left_right(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, & + num_seg_static,x_start, dgam_vec) + implicit none + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + integer, parameter :: num_seg_max=5 + integer, intent(in) :: i,j,iside + real (kind=r8), dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout):: displ + integer (kind=r8), dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout):: base_vec + real (kind=r8), dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout):: base_vtx + real(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout):: x, dx, x_static, dx_static + integer, dimension(num_area), intent(inout):: num_seg, num_seg_static + real(KIND=r8), dimension(2,8), intent(inout):: x_start, dgam_vec + + real (kind=r8) :: gamma + integer :: iarea + real(KIND=r8) :: xtmp(2),xtmp2(2) + ! + ! ||-------------|| + ! /|| ||\ + ! || || + ! ============================= + ! || || + ! || || + ! || || + ! + x_start (:,4) = base_vtx(:,1,i,j,iside) + x_start (:,5) = base_vtx(:,2,i,j,iside) + gamma = displ(0,i,j,iside) + dgam_vec(:,4) = base_vec(:,1,i,j,iside)*gamma + dgam_vec(:,5) = base_vec(:,1,i,j,iside)*gamma + xtmp (: ) = x_start(:,4)+dgam_vec(:,4) + xtmp2 (: ) = x_start(:,5)+dgam_vec(:,5) + + iarea = 3 + num_seg (iarea) = 3 + num_seg_static(iarea) = 1 + + x_static (:,1,iarea) = base_vtx(:,2,i,j,iside) !static + dx_static(:,1,iarea) = base_vtx(:,1,i,j,iside)-base_vtx(:,2,i,j,iside) !static + + x (:,1,iarea) = base_vtx(:,1,i,j,iside) !static + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic + + x (:,2,iarea) = xtmp (:) !dynamic + dx (:,2,iarea) = xtmp2(:)-xtmp(:) !dynamic + + x (:,3,iarea) = xtmp2(:) !dynamic + dx (:,3,iarea) = x_start(:,5)-xtmp2(:) !dynamic + end subroutine define_area3_left_right + + subroutine define_area4_area5(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, & + num_seg_static,x_start, dgam_vec,displ_first_guess) + implicit none + integer, intent(in) :: i,j,iside + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + integer, parameter :: num_seg_max=5 + real (kind=r8), dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8), dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8), dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + real(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer, dimension(num_area), intent(inout) :: num_seg, num_seg_static + real(KIND=r8), dimension(2,8), intent(inout) :: x_start, dgam_vec + real(KIND=r8), optional, intent(out) :: displ_first_guess + + + real (kind=r8) , dimension(2,3) :: xdep !departure points + real (kind=r8) :: gamma + integer :: iarea + + real(KIND=r8) :: xtmp(2),xtmp2(2) + ! + ! || --------|| + ! || ||\ + ! || || \ + ! ============================= + ! || ||\ | + ! || || \| + ! || || * + ! + ! + ! iarea = 4 + ! + iarea = 4 + num_seg (iarea) = 3 + + if (SUM(ABS(base_vec(:,5,i,j,iside))).NE.0) then + gamma = displ(1,i,j,iside)*displ(5,i,j,iside)/(displ(1,i,j,iside)-displ(4,i,j,iside)) +! gamma = MAX(MIN(gamma,displ(5,i,j,iside),-displ(2,i,j,iside)),0.0_r8)!MWR manuscript + gamma = MAX(MIN(gamma,displ(5,i,j,iside),-0.25_r8*displ(2,i,j,iside)),0.0_r8) + else + ! + ! corner case + ! + gamma = displ(1,i,j,iside) + end if + + if (present(displ_first_guess)) displ_first_guess = displ(1,i,j,iside) + + x_start (:,6) = base_vtx(:,3,i,j,iside) + dgam_vec(:,6) = base_vec(:,4,i,j,iside)*displ(1,i,j,iside) + xtmp (: ) = x_start(:,6)+dgam_vec(:,6) + x_start (:,7) = base_vtx(:,3,i,j,iside) + dgam_vec(:,7) = base_vec(:,5,i,j,iside)*gamma + xtmp2 (: ) = x_start(:,7)+dgam_vec(:,7) + + x (:,1,iarea) = base_vtx(:,3,i,j,iside)!static -line 1 + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic - line 1 + + x (:,2,iarea) = xtmp(:) !dynamic -line 2 + dx (:,2,iarea) = xtmp2(:)-xtmp(:) !dynamic - line 2 + + x (:,3,iarea) = xtmp2(:) !static -line 1 + dx (:,3,iarea) = x(:,1,iarea)-xtmp2(:) !dynamic - line 1 + ! + !iarea = 5 + ! + xdep(:,1) = base_vtx(:,4,i,j,iside)+displ(5,i,j,iside)*base_vec(:,6,i,j,iside)& + -displ(4,i,j,iside)*base_vec(:,7,i,j,iside) + x_start (:,8) = base_vtx(:,4,i,j,iside) + dgam_vec(:,8) = base_vec(:,6,i,j,iside)*gamma + xtmp (: ) = x_start(:,8)+dgam_vec(:,8) + + iarea = 5 + num_seg (iarea) = 2 + num_seg_static(iarea) = 1 + + x (:,1,iarea) = base_vtx(:,4,i,j,iside)!static -line 1 + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic - line 1 + + x_static (:,1,iarea) = xdep(:,1) !static - line 1 + dx_static(:,1,iarea) = x(:,1,iarea)-x_static(:,1,iarea) !static - line 1 + + x (:,2,iarea) = xtmp(:) !dynamic -line 2 + dx (:,2,iarea) = x_static(:,1,iarea)-xtmp(:) !dynamic - line 2 + end subroutine define_area4_area5 + + + subroutine define_area4(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, & + num_seg_static,x_start, dgam_vec,displ_first_guess) + implicit none + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + integer, parameter :: num_seg_max=5 + integer, intent(in) :: i,j,iside + real (kind=r8), dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8), dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8), dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + + real(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer, dimension(num_area) , intent(inout) :: num_seg, num_seg_static + real(KIND=r8), dimension(2,8) , intent(inout) :: x_start, dgam_vec + real(KIND=r8), optional, intent(out) :: displ_first_guess + + + + real (kind=r8), dimension(2,3) :: xdep !departure points + real (kind=r8) :: gamma + integer :: iarea + real(KIND=r8) :: xtmp(2) + + iarea = 4 + num_seg (iarea) = 2 + num_seg_static(iarea) = 1 + + xdep(:,1) = base_vtx(:,3,i,j,iside)+MAX(0.0_r8,displ(4,i,j,iside))*base_vec(:,4,i,j,iside)& + -displ(2,i,j,iside)*base_vec(:,5,i,j,iside) + x_start (:,6) = base_vtx(:,3,i,j,iside) + gamma = displ(1,i,j,iside) + dgam_vec(:,6) = base_vec(:,4,i,j,iside)*gamma + xtmp (: ) = x_start(:,6)+dgam_vec(:,6) + + if (present(displ_first_guess)) displ_first_guess = gamma + + x_static (:,1,iarea) = xdep(:,1) !static + dx_static(:,1,iarea) = base_vtx(:,3,i,j,iside)-xdep(:,1) !static + + x (:,1,iarea) = base_vtx(:,3,i,j,iside) !static - line 2 + dx (:,1,iarea) = xtmp(:)-x(:,1,iarea) !dynamic - line 2 + + x (:,2,iarea) = xtmp(:) !dynamic -line 2 + dx (:,2,iarea) = x_static(:,1,iarea)-xtmp(:) !dynamic - line 2 + end subroutine define_area4 + + subroutine define_area3_center(i,j,iside,displ,base_vec,base_vtx,x, dx, x_static, dx_static, num_seg, num_seg_static,& + x_start, dgam_vec,se_flux_center,displ_first_guess) + implicit none + integer, intent(in) :: i,j,iside + integer, parameter :: num_area=5, num_sides=4, imin= 0, imax=nc+1 + integer, parameter :: num_seg_max=5 + real (kind=r8), dimension(0:7 , imin:imax,imin:imax,num_sides), intent(inout) :: displ + integer (kind=r8), dimension(1:2,11 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vec + real (kind=r8), dimension(1:2, 6 , imin:imax,imin:imax,num_sides), intent(inout) :: base_vtx + + real(KIND=r8), dimension(2,num_seg_max,num_area), intent(inout) :: x, dx, x_static, dx_static + integer, dimension(num_area), intent(inout) :: num_seg, num_seg_static + real(KIND=r8), dimension(2,8), intent(inout) :: x_start, dgam_vec + real(KIND=r8) , intent(in ) :: se_flux_center + real(KIND=r8), optional, intent(out) :: displ_first_guess + + real (kind=r8) , dimension(2,3) :: xdep !departure points + real (kind=r8) :: gamma + integer :: iarea + ! + ! xdep(2) + ! ______X______ + ! || / \ || + ! || *--/ \--* || + ! || /xdep(1) xdep(3)\ || + ! ||/ \|| + ! ======================================== + ! || || + ! + ! + ! compute departure points (xdep(1) is left; xdep(3) is right and xdep(2) is midway + ! + + xdep(:,1) = base_vtx(:,1,i,j,iside)+& + displ(0,i,j,iside)*base_vec(:,1,i,j,iside)+displ(3,i,j,iside)*base_vec(:,3,i,j,iside) + xdep(:,3) = base_vtx(:,2,i,j,iside)+& + displ(1,i,j,iside)*base_vec(:,1,i,j,iside)+displ(2,i,j,iside)*base_vec(:,2,i,j,iside) + xdep(:,2) = 0.5_r8*(xdep(:,1)+xdep(:,3)) + + gamma= se_flux_center + x_start(:,1) = ABS(base_vec(:,3,i,j,iside))*((xdep(:,2)-base_vtx(:,1,i,j,iside)))+& + base_vtx(:,1,i,j,iside) !xdep(2) - midway between departure points projected to side 1 + + dgam_vec(:,1) = gamma*base_vec(:,1,i,j,iside) + + if (present(displ_first_guess)) displ_first_guess = gamma + + xdep(:,2) = x_start(:,1)+dgam_vec(:,1) + iarea = 3 + num_seg (iarea) = 2 + num_seg_static(iarea) = 3 + + ! ______X______ + ! || 2 / \ 3 || + ! || *--/ \--* || + ! || / \ || + ! ||/ 1 5 4\|| + ! ======================================== + ! || || + ! + x_static (:,1,iarea) = base_vtx(:,1,i,j,iside) !static - line 1 + dx_static(:,1,iarea) = xdep(:,1)-x_static(:,1,iarea) !static - line 1 + + x (:,1,iarea) = xdep(:,1) !static - line 2 + dx (:,1,iarea) = xdep(:,2)-x(:,1,iarea) !dynamic - line 2 + + x (:,2,iarea) = xdep(:,2) !dynamic - line 3 + dx (:,2,iarea) = xdep(:,3)-x(:,2,iarea) !dynamic - line 3 + + x_static (:,2,iarea) = xdep(:,3) !static - line 4 + dx_static(:,2,iarea) = base_vtx(:,2,i,j,iside)-x_static(:,2,iarea)!static - line 4 + + x_static (:,3,iarea) = base_vtx(:,2,i,j,iside) !static - line 5 + dx_static(:,3,iarea) = base_vtx(:,1,i,j,iside)-base_vtx(:,2,i,j,iside) !static - line 5 + + end subroutine define_area3_center +end module fvm_consistent_se_cslam diff --git a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 new file mode 100644 index 00000000..c1b3c6fc --- /dev/null +++ b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 @@ -0,0 +1,311 @@ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +!MODULE FVM_CONTROL_VOLUME_MOD---------------------------------------------CE-for FVM +! AUTHOR: Christoph Erath, 11.June 2011 ! +! This module contains everything to initialize the arrival. It also provides the ! +! interpolation points for the reconstruction (projection from one face to another ! +! when the element is on the cube edge) ! +! It also intialize the start values, see also fvm_analytic ! +!-----------------------------------------------------------------------------------! +module fvm_control_volume_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use coordinate_systems_mod, only: spherical_polar_t + use element_mod, only: element_t + use dimensions_mod, only: nc, nhe, nlev, ntrac_d, qsize_d,ne, np, nhr, ns, nhc + use dimensions_mod, only: fv_nphys, nhe_phys, nhr_phys, ns_phys, nhc_phys,fv_nphys + use dimensions_mod, only: irecons_tracer + use cam_abortutils, only: endrun + + implicit none + private + integer, parameter, private:: nh = nhr+(nhe-1) ! = 2 (nhr=2; nhe=1) + ! = 3 (nhr=2; nhe=2) + + type, public :: fvm_struct + ! fvm tracer mixing ratio: (kg/kg) + real (kind=r8) :: c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d) + real (kind=r8) :: se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev) + + real (kind=r8) :: dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev) + real (kind=r8) :: dp_ref(nlev) + real (kind=r8) :: dp_ref_inverse(nlev) + real (kind=r8) :: psc(nc,nc) + + real (kind=r8) :: inv_area_sphere(nc,nc) ! inverse area_sphere + real (kind=r8) :: inv_se_area_sphere(nc,nc) ! inverse area_sphere + + integer :: faceno !face number + ! number of south,....,swest and 0 for interior element + integer :: cubeboundary + +#ifdef waccm_debug + real (kind=r8) :: CSLAM_gamma(nc,nc,nlev,4) +#endif + real (kind=r8) :: displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4) + integer :: flux_vec (2,1-nhc:nc+nhc,1-nhc:nc+nhc,4) + ! + ! + ! cartesian location of vertices for flux sides + ! + ! x-coordinate of vertex 1: vtx_cart(1,1i,j,1,1) = fvm%acartx(i) + ! y-coordinate of vertex 1: vtx_cart(1,2,i,j,2,1) = fvm%acarty(j) + ! + ! x-coordinate of vertex 2: vtx_cart(2,1,i,j) = fvm%acartx(i+1) + ! y-coordinate of vertex 2: vtx_cart(2,2,i,j) = fvm%acarty(j ) + ! + ! x-coordinate of vertex 3: vtx_cart(3,1,i,j) = fvm%acartx(i+1) + ! y-coordinate of vertex 3: vtx_cart(3,2,i,j) = fvm%acarty(j+1) + ! + ! x-coordinate of vertex 4: vtx_cart(4,1,i,j) = fvm%acartx(i ) + ! y-coordinate of vertex 4: vtx_cart(4,2,i,j) = fvm%acarty(j+1) + ! + real (kind=r8) :: vtx_cart (4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) + ! + ! flux_orient(1,i,j) = panel on which control volume (i,j) is located + ! flux_orient(2,i,j) = cshift value for vertex permutation + ! + real (kind=r8) :: flux_orient(2 ,1-nhc:nc+nhc,1-nhc:nc+nhc) + ! + ! i,j: indicator function for non-existent cells (0 for corner halo and 1 elsewhere) + ! + integer :: ifct (1-nhc:nc+nhc,1-nhc:nc+nhc) + integer :: rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc) + ! + real (kind=r8) :: dalpha, dbeta ! central-angle for gnomonic coordinates + type (spherical_polar_t) :: center_cart(nc,nc) ! center of fvm cell in gnomonic coordinates + real (kind=r8) :: area_sphere(nc,nc) ! spherical area of fvm cell + real (kind=r8) :: spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc) ! centroids + ! + ! pre-computed metric terms (for efficiency) + ! + ! recons_metrics(1,:,:) = spherecentroid(1,:,:)**2 -spherecentroid(3,:,:) + ! recons_metrics(2,:,:) = spherecentroid(2,:,:)**2 -spherecentroid(4,:,:) + ! recons_metrics(3,:,:) = spherecentroid(1,:,:)*spherecentroid(2,:,:)-spherecentroid(5,:,:) + ! + real (kind=r8) :: recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe) + ! + ! recons_metrics_integral(1,:,:) = 2.0_r8*spherecentroid(1,:,:)**2 -spherecentroid(3,:,:) + ! recons_metrics_integral(2,:,:) = 2.0_r8*spherecentroid(2,:,:)**2 -spherecentroid(4,:,:) + ! recons_metrics_integral(3,:,:) = 2.0_r8*spherecentroid(1,:,:)*spherecentroid(2,:,:)-spherecentroid(5,:,:) + ! + real (kind=r8) :: recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe) + ! + integer :: jx_min(3), jx_max(3), jy_min(3), jy_max(3) !bounds for computation + + ! provide fixed interpolation points with respect to the arrival grid for + ! reconstruction + integer :: ibase(1-nh:nc+nh,1:nhr,2) + real (kind=r8) :: halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) + real (kind=r8) :: centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe) !for finite-difference reconstruction + ! + ! pre-compute weights for reconstruction at cell vertices + ! + ! ! Evaluate constant order terms + ! value = fcube(a,b) + & + ! ! Evaluate linear order terms + ! recons(1,a,b) * (cartx - centroid(1,a,b)) + & + ! recons(2,a,b) * (carty - centroid(2,a,b)) + & + ! ! Evaluate second order terms + ! recons(3,a,b) * (centroid(1,a,b)**2 - centroid(3,a,b)) + & + ! recons(4,a,b) * (centroid(2,a,b)**2 - centroid(4,a,b)) + & + ! recons(5,a,b) * (centroid(1,a,b) * centroid(2,a,b) - centroid(5,a,b)) + & + ! + ! recons(3,a,b) * (cartx - centroid(1,a,b))**2 + & + ! recons(4,a,b) * (carty - centroid(2,a,b))**2 + & + ! recons(5,a,b) * (cartx - centroid(1,a,b)) * (carty - centroid(2,a,b)) + ! + real (kind=r8) :: vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe) + ! + ! for mapping fvm2dyn + ! + real (kind=r8) :: norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc) + ! + !****************************************** + ! + ! separate physics grid variables + ! + !****************************************** + ! + real (kind=r8) , allocatable :: phis_physgrid(:,:) + real (kind=r8) , allocatable :: vtx_cart_physgrid(:,:,:,:) + real (kind=r8) , allocatable :: flux_orient_physgrid(:,:,:) + integer , allocatable :: ifct_physgrid(:,:) + integer , allocatable :: rot_matrix_physgrid(:,:,:,:) + real (kind=r8) , allocatable :: spherecentroid_physgrid(:,:,:) + real (kind=r8) , allocatable :: recons_metrics_physgrid(:,:,:) + real (kind=r8) , allocatable :: recons_metrics_integral_physgrid(:,:,:) + ! centroid_stretch_physgrid for finite-difference reconstruction + real (kind=r8) , allocatable :: centroid_stretch_physgrid (:,:,:) + real (kind=r8) :: dalpha_physgrid, dbeta_physgrid ! central-angle for gnomonic coordinates + type (spherical_polar_t) , allocatable :: center_cart_physgrid(:,:) ! center of fvm cell in gnomonic coordinates + real (kind=r8) , allocatable :: area_sphere_physgrid(:,:) ! spherical area of fvm cell + integer :: jx_min_physgrid(3), jx_max_physgrid(3) !bounds for computation + integer :: jy_min_physgrid(3), jy_max_physgrid(3) !bounds for computation + integer , allocatable :: ibase_physgrid(:,:,:) + real (kind=r8) , allocatable :: halo_interp_weight_physgrid(:,:,:,:) + real (kind=r8) , allocatable :: vertex_recons_weights_physgrid(:,:,:,:) + + real (kind=r8) , allocatable :: norm_elem_coord_physgrid(:,:,:) + real (kind=r8) , allocatable :: Dinv_physgrid(:,:,:,:) + + real (kind=r8) , allocatable :: fc(:,:,:,:) + real (kind=r8) , allocatable :: fc_phys(:,:,:,:) + real (kind=r8) , allocatable :: ft(:,:,:) + real (kind=r8) , allocatable :: fm(:,:,:,:) + real (kind=r8) , allocatable :: dp_phys(:,:,:) + end type fvm_struct + + public :: fvm_mesh, fvm_set_cubeboundary, allocate_physgrid_vars + + + real (kind=r8),parameter, public :: bignum = 1.0E20_r8 + +contains + subroutine fvm_set_cubeboundary(elem, fvm) + implicit none + type (element_t) , intent(in) :: elem + type (fvm_struct), intent(inout) :: fvm + + logical :: corner + integer :: j, mynbr_cnt, mystart + integer :: nbrsface(8)! store the neighbours in north, south + + fvm%faceno=elem%FaceNum + ! write the neighbors in the structure + fvm%cubeboundary=0 + corner=.FALSE. + do j=1,8 + mynbr_cnt = elem%vertex%nbrs_ptr(j+1) - elem%vertex%nbrs_ptr(j) !length of neighbor location + mystart = elem%vertex%nbrs_ptr(j) + !NOTE: assuming that we do not have multiple corner neighbors (so not a refined mesh) + if (mynbr_cnt > 0 ) then + nbrsface(j)=elem%vertex%nbrs_face(mystart) + ! note that if the element lies on a corner, it will be at j=5,6,7,8 + if ((nbrsface(j) /= fvm%faceno) .AND. (j<5)) then + fvm%cubeboundary=j + endif + else ! corner on the cube + if (.NOT. corner) then + nbrsface(j)=-1 + fvm%cubeboundary=j + corner=.TRUE. + else + if ( ne == 0 ) then + ! dont check this condition. note that we call this code + ! generate phys grid template files, so we need to be able + ! to call create_ari() to create the subcells even though + ! cslam cant run with the unstructed ne=0 case + else + print *,'Error in fvm_CONTROL_VOLUME_MOD - Subroutine fvm_MESH_ARI: ' + call endrun('Do not allow one element per face for fvm, please increase ne!') + endif + endif + end if + end do + end subroutine fvm_set_cubeboundary + + subroutine fvm_mesh(elem, fvm) + use fvm_analytic_mod, only : compute_halo_vars + use fvm_analytic_mod, only : create_interpolation_points + use derivative_mod , only : subcell_integration + + implicit none + type (element_t), intent(in) :: elem + type (fvm_struct), intent(inout) :: fvm + integer :: i,j + real (kind=r8) :: tmp(np,np) + ! + ! initialize metric and related terms on panel + ! + call compute_halo_vars(& !input + fvm%faceno,fvm%cubeboundary,nc,nhc,nhe, & !input + fvm%jx_min,fvm%jx_max,fvm%jy_min,fvm%jy_max,&!output + fvm%flux_orient,fvm%ifct,fvm%rot_matrix) !output + do j=1,nc + do i=1,nc + fvm%norm_elem_coord(1,i,j) = elem%corners(1)%x+(i-0.5_r8)*fvm%dalpha + fvm%norm_elem_coord(2,i,j) = elem%corners(1)%y+(j-0.5_r8)*fvm%dalpha + end do + end do + + ! + ! overwrite areas for consistency with SE areas (that are O(10E-5) incorrect) + ! +! tmp = 1.0_r8 +! call subcell_integration(tmp, np, nc, elem%metdet,fvm%area_sphere) + ! + ! do the same for physics grid + ! + call compute_halo_vars(& + fvm%faceno,fvm%cubeboundary,fv_nphys,nhc_phys,nhe_phys,& + fvm%jx_min_physgrid,fvm%jx_max_physgrid,fvm%jy_min_physgrid,fvm%jy_max_physgrid,& + fvm%flux_orient_physgrid,fvm%ifct_physgrid,fvm%rot_matrix_physgrid) + do j=1,fv_nphys + do i=1,fv_nphys + fvm%norm_elem_coord_physgrid(1,i,j) = elem%corners(1)%x+(i-0.5_r8)*fvm%dalpha_physgrid + fvm%norm_elem_coord_physgrid(2,i,j) = elem%corners(1)%y+(j-0.5_r8)*fvm%dalpha_physgrid + end do + end do + ! + ! initialize halo interpolation variables + ! + call create_interpolation_points(elem,& + nc,nhc,nhr,ns,nh,fvm%cubeboundary,& + fvm%dalpha,fvm%dbeta,fvm%ibase,fvm%halo_interp_weight) + call create_interpolation_points(elem,& + fv_nphys,nhc_phys,nhr_phys,ns_phys,nhr_phys,fvm%cubeboundary,& + fvm%dalpha_physgrid,fvm%dbeta_physgrid,fvm%ibase_physgrid,fvm%halo_interp_weight_physgrid) + end subroutine fvm_mesh + + + subroutine allocate_physgrid_vars(fvm,par) + use cam_logfile , only : iulog + use parallel_mod , only : parallel_t + use dimensions_mod, only : nelemd + type (fvm_struct), intent(inout) :: fvm(:) + type (parallel_t), intent(in) :: par + integer :: ie + + nhc_phys = fv_nphys + nhe_phys = 0 + nhr_phys = 2 + ns_phys = MAX(fv_nphys,2) + + if(par%masterproc) then + write(iulog,*)"allocating physgrid grid vars" + write(iulog,*)"fv_nphys,nhc_phys,nhe_phys,nhr_phys,ns_phys = ",& + fv_nphys,nhc_phys,nhe_phys,nhr_phys,ns_phys + end if + + do ie=1,nelemd + allocate(fvm(ie)%phis_physgrid (fv_nphys,fv_nphys)) + allocate(fvm(ie)%vtx_cart_physgrid (4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + allocate(fvm(ie)%flux_orient_physgrid (2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + allocate(fvm(ie)%ifct_physgrid (1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + allocate(fvm(ie)%rot_matrix_physgrid (2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + + allocate(fvm(ie)%spherecentroid_physgrid(irecons_tracer-1,& + 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + allocate(fvm(ie)%recons_metrics_physgrid (3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)) + allocate(fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)) + allocate(fvm(ie)%centroid_stretch_physgrid (7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)) + allocate(fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys)) + allocate(fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys)) + allocate(fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)) + allocate(fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)) + allocate(fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,& + 1-nhe_phys:fv_nphys+nhe_phys)) + + allocate(fvm(ie)%norm_elem_coord_physgrid(2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys )) + allocate(fvm(ie)%Dinv_physgrid ( 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2)) + + allocate(fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d))) + allocate(fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d))) + allocate(fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)) + allocate(fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev)) + allocate(fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)) + end do + end subroutine allocate_physgrid_vars +end module fvm_control_volume_mod diff --git a/src/dynamics/se/dycore/fvm_mapping.F90 b/src/dynamics/se/dycore/fvm_mapping.F90 new file mode 100644 index 00000000..9ff11819 --- /dev/null +++ b/src/dynamics/se/dycore/fvm_mapping.F90 @@ -0,0 +1,1276 @@ +! +! pg3->GLL and GLL->pg3 mapping algorithm described in: +! +! Adam R. Herrington, Peter H. Lauritzen, Mark A. Taylor, Steve Goldhaber, Brian Eaton, Kevin A Reed and Paul A. Ullrich, 2018: +! Physics-dynamics coupling with element-based high-order Galerkin methods: quasi equal-area physics grid: +! Mon. Wea. Rev., DOI:MWR-D-18-0136.1 +! +! pg2->pg3 mapping algorithm described in: +! +! Adam R. Herrington, Peter H. Lauritzen, Kevin A Reed, Steve Goldhaber, and Brian Eaton, 2019: +! Exploring a lower resolution physics grid in CAM-SE-CSLAM. J. Adv. Model. Earth Syst. +! +!#define PCoM !replace PPM with PCoM for mass variables for fvm2phys and phys2fvm +!#define skip_high_order_fq_map !do mass and correlation preserving phys2fvm mapping but no high-order pre-mapping of fq +#define mass_fix +module fvm_mapping + use shr_kind_mod, only: r8=>shr_kind_r8 + use dimensions_mod, only: irecons_tracer + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + use perf_mod, only: t_startf, t_stopf + + implicit none + private + + public :: phys2dyn_forcings_fvm, dyn2phys, dyn2phys_vector, dyn2phys_all_vars,dyn2fvm_mass_vars + public :: phys2dyn,fvm2dyn,dyn2fvm + save + integer :: save_max_overlap + real(kind=r8), allocatable, dimension(:,:,:,:,:) :: save_air_mass_overlap + real(kind=r8), allocatable, dimension(:,:,:,:,:,:) :: save_q_overlap + real(kind=r8), allocatable, dimension(:,:,:,:,:) :: save_q_phys + real(kind=r8), allocatable, dimension(:,:,:,:) :: save_dp_phys + real(kind=r8), allocatable, dimension(:,:,:,:) :: save_overlap_area + integer , allocatable, dimension(:,:,:,:,:) :: save_overlap_idx + integer , allocatable, dimension(:,:,:,:) :: save_num_overlap +contains + ! + ! map all mass variables from gll to fvm + ! + subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_qdp) + use dimensions_mod, only: np, nc,nlev + use dimensions_mod, only: fv_nphys, nhc_phys,ntrac,nhc,ksponge_end, nu_scale_top + use hybrid_mod, only: hybrid_t + use cam_abortutils, only: endrun + use physconst, only: thermodynamic_active_species_num, thermodynamic_active_species_idx + type (element_t), intent(inout):: elem(:) + type(fvm_struct), intent(inout):: fvm(:) + + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + logical, intent(in) :: no_cslam + integer, intent(in) :: nets, nete, tl_f, tl_qdp + + integer :: ie,i,j,k,m_cnst,nq + real (kind=r8), dimension(:,:,:,:,:) , allocatable :: fld_phys, fld_gll, fld_fvm + real (kind=r8), allocatable, dimension(:,:,:,:,:) :: qgll + real (kind=r8) :: element_ave + ! + ! for tensor product Lagrange interpolation + ! + integer :: nflds + logical, allocatable :: llimiter(:) + + allocate(qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete)) + + do ie=nets,nete + do nq=1,thermodynamic_active_species_num + qgll(:,:,:,nq,ie) = elem(ie)%state%Qdp(:,:,:,nq,tl_qdp)/elem(ie)%state%dp3d(:,:,:,tl_f) + end do + end do + + if (no_cslam) then + call endrun("phys2dyn_forcings_fvm: no cslam case: NOT SUPPORTED") + else if (nc.ne.fv_nphys) then + ! + !*********************************************************** + ! + ! using cslam and different resolution physics grid + ! + !*********************************************************** + ! + call t_startf('p2d-pg2:copying') + nflds = 4+ntrac + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)) + allocate(fld_gll(np,np,nlev,3,nets:nete)) + allocate(llimiter(nflds)) + fld_phys = -9.99E99_r8!xxx necessary? + + llimiter = .false. + + do ie=nets,nete + ! + ! pack fields that need to be interpolated + ! + fld_phys(1:fv_nphys,1:fv_nphys,:,1,ie) = fvm(ie)%ft(1:fv_nphys,1:fv_nphys,:) + fld_phys(1:fv_nphys,1:fv_nphys,:,2,ie) = fvm(ie)%fm(1:fv_nphys,1:fv_nphys,1,:) + fld_phys(1:fv_nphys,1:fv_nphys,:,3,ie) = fvm(ie)%fm(1:fv_nphys,1:fv_nphys,2,:) + fld_phys(1:fv_nphys,1:fv_nphys,:,4,ie) = fvm(ie)%dp_phys(1:fv_nphys,1:fv_nphys,:) + do m_cnst=1,ntrac + fld_phys(1:fv_nphys,1:fv_nphys,:,4+m_cnst,ie) = & + fvm(ie)%fc_phys(1:fv_nphys,1:fv_nphys,:,m_cnst) + end do + end do + call t_stopf('p2d-pg2:copying') + call t_startf('p2d-pg2:fill_halo_phys') + call fill_halo_phys(fld_phys,hybrid,nets,nete,nlev,nflds) + ! + ! do mapping of fu,fv,ft + ! + call phys2dyn(hybrid,elem,fld_phys(:,:,:,1:3,:),fld_gll(:,:,:,1:3,:),nets,nete,nlev,3,fvm,llimiter(1:3),2,.true.) + do ie=nets,nete + elem(ie)%derived%fT(:,:,:) = fld_gll(:,:,:,1,ie) + elem(ie)%derived%fM(:,:,1,:) = fld_gll(:,:,:,2,ie) + elem(ie)%derived%fM(:,:,2,:) = fld_gll(:,:,:,3,ie) + end do + call t_stopf('p2d-pg2:fill_halo_phys') + + deallocate(fld_gll) + ! + ! map fq from phys to fvm + ! + call t_startf('p2d-pg2:phys2fvm') + + do ie=nets,nete + do k=1,nlev + call phys2fvm(ie,k,fvm(ie),& + fld_phys(:,:,k,5:4+ntrac,ie),fvm(ie)%fc(:,:,k,1:ntrac),ntrac) + end do + end do + call t_stopf('p2d-pg2:phys2fvm') + + ! + ! overwrite SE Q with cslam Q + ! + nflds = thermodynamic_active_species_num + allocate(fld_gll(np,np,nlev,nflds,nets:nete)) + allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete)) + do ie=nets,nete + ! + ! compute cslam updated Q value + do m_cnst=1,thermodynamic_active_species_num + fld_fvm(1:nc,1:nc,:,m_cnst,ie) = fvm(ie)%c(1:nc,1:nc,:,thermodynamic_active_species_idx(m_cnst))+& + fvm(ie)%fc(1:nc,1:nc,:,thermodynamic_active_species_idx(m_cnst))/fvm(ie)%dp_fvm(1:nc,1:nc,:) + enddo + end do + call t_startf('p2d-pg2:fvm2dyn') + llimiter(1:nflds) = .false. + call fvm2dyn(fld_fvm,fld_gll(:,:,:,1:nflds,:),hybrid,nets,nete,nlev,nflds,fvm,llimiter(1:nflds)) + call t_stopf('p2d-pg2:fvm2dyn') + ! + ! fld_gll now holds q cslam value on gll grid + ! + ! convert fld_gll to increment (q_new-q_old) + ! + do ie=nets,nete + do m_cnst=1,thermodynamic_active_species_num + elem(ie)%derived%fq(:,:,:,m_cnst) =& + fld_gll(:,:,:,m_cnst,ie)-qgll(:,:,:,m_cnst,ie) + end do + end do + deallocate(fld_fvm) + !deallocate arrays allocated in dyn2phys_all_vars + deallocate(save_air_mass_overlap,save_q_phys,save_q_overlap,& + save_overlap_area,save_num_overlap,save_overlap_idx,save_dp_phys) + else + ! + ! + !***************************************************************************************** + ! + ! using cslam with same physics grid resolution as cslam resolution + ! + !***************************************************************************************** + ! + ! nflds is ft, fu, fv, + thermo species + nflds = 3+thermodynamic_active_species_num + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)) + allocate(fld_gll(np,np,nlev,nflds,nets:nete)) + allocate(llimiter(nflds)) + llimiter(1:nflds) = .false. + do ie=nets,nete + ! + ! pack fields that need to be interpolated + ! + fld_phys(1:fv_nphys,1:fv_nphys,:,1,ie) = fvm(ie)%ft(1:fv_nphys,1:fv_nphys,:) + fld_phys(1:fv_nphys,1:fv_nphys,:,2,ie) = fvm(ie)%fm(1:fv_nphys,1:fv_nphys,1,:) + fld_phys(1:fv_nphys,1:fv_nphys,:,3,ie) = fvm(ie)%fm(1:fv_nphys,1:fv_nphys,2,:) + ! + ! compute cslam mixing ratio with physics update + ! + do m_cnst=1,thermodynamic_active_species_num + do k=1,nlev + fld_phys(1:fv_nphys,1:fv_nphys,k,m_cnst+3,ie) = & + fvm(ie)%c(1:fv_nphys,1:fv_nphys,k,thermodynamic_active_species_idx(m_cnst))+& + fvm(ie)%fc_phys(1:fv_nphys,1:fv_nphys,k,thermodynamic_active_species_idx(m_cnst)) + end do + end do + end do + ! + ! do mapping + ! + call phys2dyn(hybrid,elem,fld_phys,fld_gll,nets,nete,nlev,nflds,fvm,llimiter,2) + do ie=nets,nete + elem(ie)%derived%fT(:,:,:) = fld_gll(:,:,:,1,ie) + elem(ie)%derived%fM(:,:,1,:) = fld_gll(:,:,:,2,ie) + elem(ie)%derived%fM(:,:,2,:) = fld_gll(:,:,:,3,ie) + end do + do ie=nets,nete + do m_cnst=1,thermodynamic_active_species_num + ! + ! convert fq so that it will effectively overwrite SE q with CSLAM q + ! + elem(ie)%derived%fq(:,:,:,m_cnst) = fld_gll(:,:,:,m_cnst+3,ie)-& + qgll(:,:,:,m_cnst,ie) + end do + do m_cnst = 1,ntrac + fvm(ie)%fc(1:nc,1:nc,:,m_cnst) = fvm(ie)%fc_phys(1:nc,1:nc,:,m_cnst)*fvm(ie)%dp_fvm(1:nc,1:nc,:) + end do + end do + end if + deallocate(fld_phys,llimiter,fld_gll,qgll) + end subroutine phys2dyn_forcings_fvm + + subroutine fvm2dyn(fld_fvm,fld_gll,hybrid,nets,nete,numlev,num_flds,fvm,llimiter) + use dimensions_mod, only: np, nhc, nc + use hybrid_mod , only: hybrid_t + use bndry_mod , only: ghost_exchange + use edge_mod , only: ghostpack,ghostunpack + use fvm_mod , only: ghostBufQnhc_s + ! + integer , intent(in) :: nets,nete,num_flds,numlev + real (kind=r8), intent(inout) :: fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,numlev,num_flds,nets:nete) + real (kind=r8), intent(out) :: fld_gll(np,np,numlev,num_flds,nets:nete) + type (hybrid_t) , intent(in) :: hybrid + type(fvm_struct) , intent(in) :: fvm(nets:nete) + logical , intent(in) :: llimiter(num_flds) + integer :: ie, iwidth + ! + !********************************************* + ! + ! halo exchange + ! + !********************************************* + ! + do ie=nets,nete + call ghostpack(ghostBufQnhc_s, fld_fvm(:,:,:,:,ie),numlev*num_flds,0,ie) + end do + call ghost_exchange(hybrid,ghostbufQnhc_s,location='fvm2dyn') + do ie=nets,nete + call ghostunpack(ghostbufQnhc_s, fld_fvm(:,:,:,:,ie),numlev*num_flds,0,ie) + end do + ! + ! mapping + ! + iwidth=2 +! iwidth=1 !low-order mapping + do ie=nets,nete + call tensor_lagrange_interp(fvm(ie)%cubeboundary,np,nc,nhc,numlev,num_flds,fld_fvm(:,:,:,:,ie),& + fld_gll(:,:,:,:,ie),llimiter,iwidth,fvm(ie)%norm_elem_coord) + end do + end subroutine fvm2dyn + + + subroutine fill_halo_phys(fld_phys,hybrid,nets,nete,num_lev,num_flds) + use dimensions_mod, only: nhc_phys, fv_nphys + use hybrid_mod , only: hybrid_t + use bndry_mod , only: ghost_exchange + use edge_mod , only: ghostpack, ghostunpack + use fvm_mod , only: ghostBufPG_s + + integer , intent(in) :: nets,nete,num_lev,num_flds + real (kind=r8), intent(inout) :: fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,num_lev,num_flds, & + nets:nete) + type (hybrid_t) , intent(in) :: hybrid ! distributed parallel structure (shared) + + integer :: ie + ! + !********************************************* + ! + ! halo exchange + ! + !********************************************* + ! + call t_startf('fvm:fill_halo_phys') + do ie=nets,nete + call ghostpack(ghostBufPG_s, fld_phys(:,:,:,:,ie),num_lev*num_flds,0,ie) + end do + + call ghost_exchange(hybrid,ghostBufPG_s,location='fill_halo_phys') + + do ie=nets,nete + call ghostunpack(ghostBufPG_s, fld_phys(:,:,:,:,ie),num_lev*num_flds,0,ie) + end do + ! + call t_stopf('fvm:fill_halo_phys') + end subroutine fill_halo_phys + ! + ! must call fill_halo_phys before calling this subroutine + ! + subroutine phys2dyn(hybrid,elem,fld_phys,fld_gll,nets,nete,num_lev,num_flds,fvm,llimiter,istart_vector,halo_filled) + use dimensions_mod, only: np, nhc_phys, fv_nphys + use hybrid_mod, only : hybrid_t + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + integer , intent(in) :: nets,nete,num_flds,num_lev + real (kind=r8), intent(inout) :: fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,num_lev,num_flds, & + nets:nete) + real (kind=r8), intent(out) :: fld_gll(np,np,num_lev,num_flds,nets:nete) + type (element_t) , intent(inout) :: elem(:) + type(fvm_struct) , intent(in) :: fvm(:) + integer, optional , intent(in) :: istart_vector + logical , intent(in) :: llimiter(num_flds) + logical, optional , intent(in) :: halo_filled + + integer :: i, j, ie, k, iwidth + real (kind=r8) :: v1,v2 + + if (present(halo_filled)) then + if (.not.halo_filled) call fill_halo_phys(fld_phys,hybrid,nets,nete,num_lev,num_flds) + else + call fill_halo_phys(fld_phys,hybrid,nets,nete,num_lev,num_flds) + end if + if (present(istart_vector)) then + do ie=nets,nete + do k=1,num_lev + do j=1-nhc_phys,fv_nphys+nhc_phys + do i=1-nhc_phys,fv_nphys+nhc_phys + ! + ! convert lat-lon vectors to contra-variant gnomonic + ! + v1 = fld_phys(i,j,k,istart_vector ,ie) + v2 = fld_phys(i,j,k,istart_vector+1,ie) + fld_phys(i,j,k,istart_vector ,ie)=fvm(ie)%Dinv_physgrid(i,j,1,1)*v1 + fvm(ie)%Dinv_physgrid(i,j,1,2)*v2 + fld_phys(i,j,k,istart_vector+1,ie)=fvm(ie)%Dinv_physgrid(i,j,2,1)*v1 + fvm(ie)%Dinv_physgrid(i,j,2,2)*v2 + end do + end do + end do + end do + end if + ! + ! mapping + ! + iwidth=2 +! iwidth=1 + if (fv_nphys==1) iwidth=1 + do ie=nets,nete + call tensor_lagrange_interp(fvm(ie)%cubeboundary,np,fv_nphys,nhc_phys,num_lev,num_flds,fld_phys(:,:,:,:,ie),& + fld_gll(:,:,:,:,ie),llimiter,iwidth,fvm(ie)%norm_elem_coord_physgrid) + end do + + if (present(istart_vector)) then + ! + ! convert contra-variant to lat-lon + ! + do ie=nets,nete + do k=1,num_lev + do j=1,np + do i=1,np + v1 = fld_gll(i,j,k,istart_vector ,ie) + v2 = fld_gll(i,j,k,istart_vector+1,ie) + fld_gll(i,j,k,istart_vector ,ie) = elem(ie)%D(i,j,1,1)*v1 + elem(ie)%D(i,j,1,2)*v2 + fld_gll(i,j,k,istart_vector+1,ie) = elem(ie)%D(i,j,2,1)*v1 + elem(ie)%D(i,j,2,2)*v2 + end do + end do + end do + end do + end if + end subroutine phys2dyn + ! + ! map all mass variables from gll to fvm + ! + subroutine dyn2fvm_mass_vars(dp_gll,ps_gll,q_gll,& + dp_fvm,ps_fvm,q_fvm,num_trac,metdet,inv_area) + use dimensions_mod, only: np, nc,nlev + integer, intent(in) :: num_trac + real (kind=r8), dimension(np,np,nlev) , intent(in) :: dp_gll + real (kind=r8), dimension(np,np,nlev,num_trac), intent(in) :: q_gll + real (kind=r8), dimension(np,np) , intent(in) :: ps_gll + + + real (kind=r8), dimension(nc,nc,nlev) , intent(inout) :: dp_fvm + real (kind=r8), dimension(nc,nc,nlev,num_trac), intent(inout) :: q_fvm + real (kind=r8), dimension(nc,nc) , intent(inout) :: ps_fvm + real (kind=r8), dimension(nc,nc) , intent(out) :: inv_area + + real (kind=r8), intent(in) :: metdet(np,np) + + real (kind=r8) :: se_area_sphere(nc,nc), tmp(np,np) + real (kind=r8) :: inv_darea_dp_fvm(nc,nc) + integer :: k,m_cnst + + tmp = 1.0_r8 + se_area_sphere = dyn2fvm(tmp,metdet) + inv_area = 1.0_r8/se_area_sphere + + ps_fvm(:,:) = dyn2fvm(ps_gll,metdet,inv_area) + do k=1,nlev + dp_fvm(:,:,k) = dyn2fvm(dp_gll(:,:,k),metdet,inv_area) + inv_darea_dp_fvm = inv_area/dp_fvm(:,:,k) + do m_cnst=1,num_trac + q_fvm(:,:,k,m_cnst) = & + dyn2fvm(q_gll(:,:,k,m_cnst)*dp_gll(:,:,k),metdet,& + inv_darea_dp_fvm,q_gll(:,:,k,m_cnst)) + end do + end do + end subroutine dyn2fvm_mass_vars + + ! + ! this subroutine assumes that the fvm halo has already been filled + ! (if nc/=fv_nphys) + ! + + subroutine dyn2phys_all_vars(nets,nete,elem,fvm,& + num_trac,ptop,tl,& + dp3d_phys,ps_phys,q_phys,T_phys,omega_phys,phis_phys) + use dimensions_mod, only: np, nc,nlev,fv_nphys + use dp_mapping, only: nphys_pts + use element_mod, only: element_t + integer, intent(in) :: nets,nete,num_trac,tl + + type(fvm_struct), dimension(nets:nete), intent(inout):: fvm + type(element_t), dimension(nets:nete), intent(in) :: elem + + real (kind=r8), intent(in) :: ptop + + real (kind=r8), dimension(nphys_pts,nets:nete) , intent(out) :: ps_phys,phis_phys + real (kind=r8), dimension(nphys_pts,nlev,nets:nete) , intent(out) :: dp3d_phys,T_phys,omega_phys + real (kind=r8), dimension(nphys_pts,nlev,num_trac,nets:nete) , intent(out) :: q_phys + + + real (kind=r8) :: tmp(np,np) + real (kind=r8), dimension(fv_nphys,fv_nphys) :: inv_area,inv_darea_dp_phys,dp3d_tmp + real (kind=r8), dimension(fv_nphys,fv_nphys,num_trac) :: q_phys_tmp + real (kind=r8), dimension(nc,nc) :: inv_darea_dp_fvm + integer :: k,m_cnst,ie + + + + !OMP BARRIER OMP MASTER needed + if (nc.ne.fv_nphys) then + save_max_overlap = 4 !max number of mass overlap areas between phys and fvm grids + allocate(save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete)) + allocate(save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete)) + allocate(save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete)) + allocate(save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete)) + allocate(save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete)) + allocate(save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete)) + save_num_overlap = 0 + allocate(save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete)) + end if + + do ie=nets,nete + tmp = 1.0_r8 + inv_area = 1.0_r8/dyn2phys(tmp,elem(ie)%metdet(:,:)) + phis_phys(:,ie) = RESHAPE(fvm(ie)%phis_physgrid,SHAPE(phis_phys(:,ie))) + ps_phys(:,ie) = ptop + if (nc.ne.fv_nphys) then + tmp = 1.0_r8 + do k=1,nlev + inv_darea_dp_fvm = dyn2fvm(elem(ie)%state%dp3d(:,:,k,tl),elem(ie)%metdet(:,:)) + inv_darea_dp_fvm = 1.0_r8/inv_darea_dp_fvm + + T_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%state%T(:,:,k,tl),elem(ie)%metdet(:,:),inv_area),SHAPE(T_phys(:,k,ie))) + Omega_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%derived%omega(:,:,k),elem(ie)%metdet(:,:),inv_area), & + SHAPE(Omega_phys(:,k,ie))) + call fvm2phys(ie,k,fvm(ie),fvm(ie)%c(:,:,k,:),q_phys_tmp,num_trac) + dp3d_phys(:,k,ie) = RESHAPE(save_dp_phys(:,:,k,ie),SHAPE(dp3d_phys(:,k,ie))) + ps_phys(:,ie) = ps_phys(:,ie)+RESHAPE(save_dp_phys(:,:,k,ie),SHAPE(ps_phys(:,ie))) + do m_cnst=1,num_trac + q_phys(:,k,m_cnst,ie) = RESHAPE(q_phys_tmp(:,:,m_cnst),SHAPE(q_phys(:,k,m_cnst,ie))) + end do + end do + else + do k=1,nlev + dp3d_tmp = dyn2phys(elem(ie)%state%dp3d(:,:,k,tl),elem(ie)%metdet(:,:),inv_area) + inv_darea_dp_phys = inv_area/dp3d_tmp + T_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%state%T(:,:,k,tl)*elem(ie)%state%dp3d(:,:,k,tl),elem(ie)%metdet(:,:),& + inv_darea_dp_phys),SHAPE(T_phys(:,k,ie))) + Omega_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%derived%OMEGA(:,:,k),elem(ie)%metdet(:,:),inv_area), & + SHAPE(Omega_phys(:,k,ie))) + ! + ! no mapping needed - just copy fields into physics structure + ! + dp3d_phys(:,k,ie) = RESHAPE(fvm(ie)%dp_fvm(1:nc,1:nc,k),SHAPE(dp3d_phys(:,k,ie))) + ps_phys(:,ie) = ps_phys(:,ie)+RESHAPE(fvm(ie)%dp_fvm(1:nc,1:nc,k),SHAPE(ps_phys(:,ie))) + do m_cnst=1,num_trac + q_phys(:,k,m_cnst,ie) = RESHAPE(fvm(ie)%c(1:nc,1:nc,k,m_cnst),SHAPE(q_phys(:,k,m_cnst,ie))) + end do + end do + end if + end do + end subroutine dyn2phys_all_vars + + + function dyn2phys(qdp_gll,metdet,inv_dp_darea_phys) result(qdp_phys) + use dimensions_mod, only: np, nc, fv_nphys + use derivative_mod, only: subcell_integration + real (kind=r8), intent(in) :: qdp_gll(np,np) + real (kind=r8) :: qdp_phys(fv_nphys,fv_nphys) + real (kind=r8), intent(in) :: metdet(np,np) + real (kind=r8), intent(in), optional :: inv_dp_darea_phys(fv_nphys,fv_nphys) + + call subcell_integration(qdp_gll(:,:), np, fv_nphys, metdet,qdp_phys,nc.ne.fv_nphys) + if (present(inv_dp_darea_phys)) & + qdp_phys = qdp_phys*inv_dp_darea_phys ! convert qdp to q + end function dyn2phys + + + function dyn2fvm(qdp_gll,metdet,inv_dp_darea_phys,q_gll) result(qdp_phys) + use dimensions_mod, only: np, nc + use derivative_mod, only: subcell_integration + real (kind=r8), intent(in) :: qdp_gll(np,np) + real (kind=r8), intent(in) :: metdet(np,np) + real (kind=r8), intent(in), optional :: inv_dp_darea_phys(nc,nc) + real (kind=r8), intent(in), optional :: q_gll(np,np) + real (kind=r8) :: qdp_phys(nc,nc), min_val, max_val + integer :: i,j + + call subcell_integration(qdp_gll(:,:), np, nc, metdet,qdp_phys) + if (present(inv_dp_darea_phys)) then + ! + ! convert qdp to q + ! + qdp_phys = qdp_phys*inv_dp_darea_phys + ! + ! simple limiter + ! + if (present(q_gll)) then + min_val = minval(q_gll) + max_val = maxval(q_gll) + do j = 1, nc + do i = 1, nc + ! + ! simple limiter: only coded for nc=3 and np4 + ! + qdp_phys(i,j) = max(min_val,min(max_val,qdp_phys(i,j))) + end do + end do + end if + end if + end function dyn2fvm + + function dyn2phys_vector(v_gll,elem) result(v_phys) + use dimensions_mod, only: np, nlev, fv_nphys + use interpolate_mod,only: interpdata_t,interpolate_2d,interpolate_t + use cube_mod ,only: dmap + use control_mod ,only: cubed_sphere_map + + type (interpdata_t):: interpdata + type (element_t), intent(in) :: elem + type (interpolate_t) , target :: interp_p + real (kind=r8), intent(in) :: v_gll(np,np,2,nlev) + real (kind=r8) :: v_phys(fv_nphys*fv_nphys,2,nlev) + + integer :: i,j,k + + ! Local variables + real (kind=r8) :: fld_contra(np,np,2,nlev) ! vector field + + real (kind=r8) :: v1,v2 + real (kind=r8) :: D(2,2,fv_nphys*fv_nphys) ! derivative of gnomonic mapping + ! + ! this could be done at initialization and does not need to be repeated + ! + call setup_interpdata_for_gll_to_phys_vec_mapping(interpdata, interp_p) + ! convert to contra + do k=1,nlev + do j=1,np + do i=1,np + ! latlon->contra + fld_contra(i,j,1,k) = elem%Dinv(i,j,1,1)*v_gll(i,j,1,k) + elem%Dinv(i,j,1,2)*v_gll(i,j,2,k) + fld_contra(i,j,2,k) = elem%Dinv(i,j,2,1)*v_gll(i,j,1,k) + elem%Dinv(i,j,2,2)*v_gll(i,j,2,k) + enddo + enddo + end do + + do k=1,nlev + do i=1,interpdata%n_interp + v_phys(i,1,k)=interpolate_2d(interpdata%interp_xy(i),fld_contra(:,:,1,k),interp_p,np) + v_phys(i,2,k)=interpolate_2d(interpdata%interp_xy(i),fld_contra(:,:,2,k),interp_p,np) + end do + end do + do i=1,interpdata%n_interp + ! convert fld from contra->latlon + call dmap(D(:,:,i),interpdata%interp_xy(i)%x,interpdata%interp_xy(i)%y,& + elem%corners3D,cubed_sphere_map,elem%corners,elem%u2qmap,elem%facenum) + end do + do k=1,nlev + do i=1,interpdata%n_interp + ! convert fld from contra->latlon + v1 = v_phys(i,1,k) + v2 = v_phys(i,2,k) + + v_phys(i,1,k)=D(1,1,i)*v1 + D(1,2,i)*v2 + v_phys(i,2,k)=D(2,1,i)*v1 + D(2,2,i)*v2 + end do + end do + end function dyn2phys_vector + + subroutine setup_interpdata_for_gll_to_phys_vec_mapping(interpdata,interp_p) + ! + ! initialize interpolation data structures to interpolate to phys grid + ! using interpolate_mod subroutines + ! + use interpolate_mod, only: interpolate_t, interpdata_t, interpolate_create + use dimensions_mod, only : np + use quadrature_mod, only : quadrature_t, gausslobatto + use dimensions_mod, only : fv_nphys + type (interpdata_t) , intent(out) :: interpdata + type (interpolate_t) , intent(out), target :: interp_p + + ! local + type (quadrature_t) :: gp_quadrature + integer i,j,ioff,ngrid + real (kind=r8) :: dx + + ngrid = fv_nphys*fv_nphys + interpdata%n_interp=ngrid + ! + ! initialize interpolation stuff related to basis functions + ! + gp_quadrature = gausslobatto(np) + call interpolate_create(gp_quadrature,interp_p) + allocate(interpdata%interp_xy(ngrid)) + allocate(interpdata%ilat(ngrid) ) + allocate(interpdata%ilon(ngrid) ) + ! + !WARNING: THIS CODE INTERFERES WITH LAT-LON OUTPUT + ! OF REGULAR SE IF nc>0 + ! + ioff=1 + dx = 2.0_r8/dble(fv_nphys) + do j=1,fv_nphys + do i=1,fv_nphys + interpdata%interp_xy(ioff)%x = -1_r8+(i-0.5_r8)*dx + interpdata%interp_xy(ioff)%y = -1_r8+(j-0.5_r8)*dx + interpdata%ilon(ioff) = i + interpdata%ilat(ioff) = j + ioff=ioff+1 + enddo + enddo + end subroutine setup_interpdata_for_gll_to_phys_vec_mapping + + + function lagrange_1d(src_grid,src_val,ngrid,dst_point,iwidth) result(val) + integer , intent(in) :: ngrid,iwidth + real (kind=r8), intent(in) :: src_grid(ngrid), src_val(ngrid) + real (kind=r8) :: val + + real (kind=r8), intent(in) :: dst_point + + integer :: iref, j,k + real (kind=r8) :: w(ngrid) + + if (dst_point.LE.src_grid(1)) then + iref=1 + else + iref=1 + do while (dst_point>src_grid(iref)) + iref = iref + 1 + if (iref>ngrid) then + exit + end if + end do + iref=iref-1 + end if + + iref=MIN(MAX(iref,iwidth),ngrid-iwidth) + + w = 1.0_r8 + do j=iref-(iwidth-1),iref+iwidth + do k=iref-(iwidth-1),iref+iwidth + if (k.ne.j) then + w(j)=w(j)*(dst_point-src_grid(k))/(src_grid(j)-src_grid(k)) + end if + end do + end do + + val=0.0_r8 + do j=iref-(iwidth-1),iref+iwidth + val=val+w(j)*src_val(j) + end do + end function lagrange_1d + + subroutine tensor_lagrange_interp(cubeboundary,np,nc,nhc,num_lev,nflds,psi,interp_value,llimiter,iwidth,norm_elem_coord) + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + implicit none + + integer , intent(in) :: cubeboundary,nc, np, iwidth,nhc,num_lev,nflds + logical , intent(in) :: llimiter(nflds) !apply limiter + real (kind=r8), intent(inout) :: psi(1-nhc:nc+nhc,1-nhc:nc+nhc,num_lev,nflds) !fvm grid values with filled halo + real (kind=r8), intent(out) :: interp_value(np,np,num_lev,nflds) !interpolated field + real (kind=r8), intent(in) :: norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc) + integer :: which_nc_cell(np) + + real (kind=r8):: dx,gll_points(np) + real (kind=r8):: nc_points(1-nc:nc+nc) + + real (kind=r8):: value(1-iwidth:nc+iwidth) + real (kind=r8):: val_tmp(1-nhc:nc+nhc,1-nhc:nc+nhc) + + real (kind=r8):: min_value(np,np,num_lev,nflds), max_value(np,np,num_lev,nflds) + + integer :: imin(1-nhc:nc+nhc), imax(1-nhc:nc+nhc) + integer :: k,i,j,isearch,igll,jgll,jrow,h,irow,itr + + gll_points(1) = -1.0_r8 + gll_points(2) = -sqrt(1.0_r8/5.0_r8) + gll_points(3) = sqrt(1.0_r8/5.0_r8) + gll_points(4) = 1.0_r8 + + dx = 2_r8/dble(nc) + do k=1-nc,2*nc + nc_points(k) = -1.0_r8+dx*0.5_r8+dble(k-1)*dx + end do + ! + ! find fvm point surrounding gll points for simple limiter + ! + do k=1,np + do isearch=0,nc+1 + if (nc_points(isearch)4) then + h=1 + select case(cubeboundary) + case (nwest) + psi(0,nc+h ,:,itr) = psi(1-h,nc ,:,itr) + psi(1-h,nc+1,:,itr) = psi(1 ,nc+h,:,itr) + case (swest) + psi(1-h,0,:,itr) = psi(1,1-h,:,itr) + psi(0,1-h,:,itr) = psi(1-h,1,:,itr) + case (seast) + psi(nc+h,0,:,itr) = psi(nc,1-h,:,itr) + psi(nc+1,1-h,:,itr) = psi(nc+h,1,:,itr) + case (neast) + psi(nc+h,nc+1,:,itr) = psi(nc,nc+h,:,itr) + psi(nc+1,nc+h,:,itr) = psi(nc+h,nc,:,itr) + end select + end if + do k=1,num_lev + do j=1,np + do i=1,np + max_value(i,j,k,itr) = max(& + psi(which_nc_cell(i) ,which_nc_cell(j) ,k,itr),& + psi(which_nc_cell(i)+1,which_nc_cell(j) ,k,itr),& + psi(which_nc_cell(i) ,which_nc_cell(j)+1,k,itr),& + psi(which_nc_cell(i)+1,which_nc_cell(j)+1,k,itr) & + ) + min_value(i,j,k,itr) = min(& + psi(which_nc_cell(i) ,which_nc_cell(j) ,k,itr),& + psi(which_nc_cell(i)+1,which_nc_cell(j) ,k,itr),& + psi(which_nc_cell(i) ,which_nc_cell(j)+1,k,itr),& + psi(which_nc_cell(i)+1,which_nc_cell(j)+1,k,itr) & + ) + end do + end do + end do + end if + end do + + imin=1-nhc + imax=nc+nhc + ! + ! special corner treatment + ! + if (cubeboundary==swest) then + do itr=1,nflds + do k=1,num_lev + do jrow=1,nc+iwidth + ! + ! cubic along constant x (i=irow) in west halo to fvm points in halo + ! + do irow=1-iwidth,0 + val_tmp(irow,jrow) = lagrange_1d(norm_elem_coord(2,irow,1:nc+nhc),psi(irow,1:nc+nhc,k,itr),nc+nhc,& + norm_elem_coord(2,1,jrow),iwidth) + end do + end do + psi(1-iwidth:0,1:nc+iwidth,k,itr) = val_tmp(1-iwidth:0,1:nc+iwidth) + enddo + end do + imin(1-nhc:0) = 1 + end if + if (cubeboundary==nwest) then + do itr=1,nflds + do k=1,num_lev + do jrow=1-iwidth,nc + ! + ! cubic along constant x (i=irow) in west halo to fvm points in halo + ! + do irow=1-iwidth,0 + val_tmp(irow,jrow) = lagrange_1d(norm_elem_coord(2,irow,1-nhc:nc),psi(irow,1-nhc:nc,k,itr),nc+nhc,& + norm_elem_coord(2,1,jrow),iwidth) + end do + end do + psi(1-iwidth:0,1-iwidth:nc,k,itr) = val_tmp(1-iwidth:0,1-iwidth:nc) + end do + end do + imin(nc+1:nc+nhc) = 1 + end if + + if (cubeboundary==seast) then + do itr=1,nflds + do k=1,num_lev + do jrow=1,nc+iwidth + value=0.0_r8 + ! + ! cubic along constant y in ease halo to fvm points in halo + ! + do irow=nc+1,nc+iwidth + val_tmp(irow,jrow) = lagrange_1d(norm_elem_coord(2,irow,1:nc+nhc),psi(irow,1:nc+nhc,k,itr),nc+nhc,& + norm_elem_coord(2,1,jrow),iwidth) + end do + end do + psi(nc+1:nc+iwidth,1:nc+iwidth,k,itr) = val_tmp(nc+1:nc+iwidth,1:nc+iwidth) + end do + end do + imax(1-nhc:0) = nc + end if + + if (cubeboundary==neast) then + do itr=1,nflds + do k=1,num_lev + do jrow=1-iwidth,nc + ! + ! cubic along constant y in ease halo to fvm points in halo + ! + do irow=nc+1,nc+iwidth + val_tmp(irow,jrow) = lagrange_1d(norm_elem_coord(2,irow,1-nhc:nc),psi(irow,1-nhc:nc,k,itr),nc+nhc,& + norm_elem_coord(2,1,jrow),iwidth) + end do + end do + psi(nc+1:nc+iwidth,1-iwidth:nc,k,itr) = val_tmp(nc+1:nc+iwidth,1-iwidth:nc) + end do + end do + imax(nc+1:nc+nhc) = nc + end if + ! + ! mapping + ! + ! + if (cubeboundary==0.or.cubeboundary==north.or.cubeboundary==south.or.& + cubeboundary==swest.or.cubeboundary==nwest.or.& + cubeboundary==seast.or.cubeboundary==neast) then + do itr=1,nflds + do k=1,num_lev + do igll=1,np + ! + ! cubic along constant y (j=jrow) + ! + do jrow=1-iwidth,nc+iwidth + value(jrow) = lagrange_1d(norm_elem_coord(1,imin(jrow):imax(jrow),jrow),psi(imin(jrow):imax(jrow),jrow,k,itr),& + imax(jrow)-imin(jrow)+1,gll_points(igll),iwidth) + end do + do jgll=1,np + interp_value(igll,jgll,k,itr) = lagrange_1d(norm_elem_coord(2,1,1-iwidth:nc+iwidth),value,nc+2*iwidth,& + gll_points(jgll),iwidth) + end do + end do + end do + end do + else if (cubeboundary==east.or.cubeboundary==west) then + do itr=1,nflds + do k=1,num_lev + do jgll=1,np + ! + ! cubic along constant x (i=irow) + ! + do irow=1-iwidth,nc+iwidth + value(irow) = lagrange_1d(norm_elem_coord(2,irow,1-nhc:nc+nhc),psi(irow,1-nhc:nc+nhc,k,itr),nc+2*nhc,& + gll_points(jgll),iwidth) + end do + do igll=1,np + interp_value(igll,jgll,k,itr) = lagrange_1d(norm_elem_coord(1,1-iwidth:nc+iwidth,1),value,nc+2*iwidth,& + gll_points(igll),iwidth) + end do + end do + end do + end do + end if + do itr=1,nflds + if (llimiter(itr)) then + do k=1,num_lev + do j=1,np + do i=1,np + interp_value(i,j,k,itr)=max(min_value(i,j,k,itr),min(max_value(i,j,k,itr),interp_value(i,j,k,itr))) + end do + enddo + end do + end if + end do + end subroutine tensor_lagrange_interp + + subroutine fvm2phys(ie,k,fvm,q_fvm,q_phys,num_trac) + use dimensions_mod, only: nc,nhc,fv_nphys + ! + ! weights must be initialized in fvm2phys_init before using these functions + ! + type(fvm_struct) , intent(inout) :: fvm + integer , intent(in) :: ie,k + integer , intent(in) :: num_trac + + real (kind=r8), intent(inout) :: q_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,num_trac) + real (kind=r8), intent(out) :: q_phys(fv_nphys,fv_nphys,num_trac) + + real (kind=r8) :: recons (irecons_tracer,1:nc,1:nc,1) + + integer :: jx,jy + + call get_dp_overlap_save(ie,k,fvm,recons) + do jy=1,fv_nphys + do jx=1,fv_nphys + save_dp_phys(jx,jy,k,ie) = SUM(save_air_mass_overlap(1:save_num_overlap(jx,jy,k,ie),jx,jy,k,ie)) + end do + end do + call get_q_overlap_save(ie,k,fvm,q_fvm,num_trac,q_phys) + save_dp_phys(:,:,k,ie) = save_dp_phys(:,:,k,ie)/fvm%area_sphere_physgrid + end subroutine fvm2phys + + + subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) + use dimensions_mod, only: nhc_phys,fv_nphys,nc + integer , intent(in) :: ie,k + type(fvm_struct) , intent(inout) :: fvm + integer , intent(in) :: num_trac + real (kind=r8), intent(inout) :: fq_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,num_trac) + real (kind=r8), intent(out) :: fqdp_fvm (nc,nc,num_trac) + + + integer :: h,jx,jy,jdx,jdy,m_cnst + + real(kind=r8), dimension(fv_nphys,fv_nphys) :: phys_cdp_max, phys_cdp_min + + integer :: num + real(kind=r8) :: tmp,sum_dq_min,sum_dq_max,fq + + real(kind=r8) :: mass_phys(fv_nphys,fv_nphys) + real(kind=r8) :: min_patch,max_patch,gamma + real (kind=r8):: q_prev,mass_forcing,mass_forcing_phys + + real(kind=r8), allocatable, dimension(:,:,:) :: dq_min_overlap,dq_max_overlap + real(kind=r8), allocatable, dimension(:,:,:) :: dq_overlap + real(kind=r8), allocatable, dimension(:,:,:) :: fq_phys_overlap + + allocate(dq_min_overlap (save_max_overlap,fv_nphys,fv_nphys)) + allocate(dq_max_overlap (save_max_overlap,fv_nphys,fv_nphys)) + allocate(dq_overlap (save_max_overlap,fv_nphys,fv_nphys)) + allocate(fq_phys_overlap (save_max_overlap,fv_nphys,fv_nphys)) + + do m_cnst=1,num_trac + fqdp_fvm(:,:,m_cnst) = 0.0_r8 + call get_fq_overlap(ie,k,fvm,& + fq_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,m_cnst),save_max_overlap,& + fq_phys_overlap,1) + mass_phys(1:fv_nphys,1:fv_nphys) = fq_phys(1:fv_nphys,1:fv_nphys,m_cnst)*& + (save_dp_phys(1:fv_nphys,1:fv_nphys,k,ie)*fvm%area_sphere_physgrid) + + min_patch = MINVAL(fvm%c(0:nc+1,0:nc+1,k,m_cnst)) + max_patch = MAXVAL(fvm%c(0:nc+1,0:nc+1,k,m_cnst)) + do jy=1,fv_nphys + do jx=1,fv_nphys + num = save_num_overlap(jx,jy,k,ie) +#ifdef debug_coupling + save_q_overlap(:,jx,jy,k,m_cnst,ie) = 0.0_r8 + save_q_phys(jx,jy,k,m_cnst,ie) = 0.0_r8 + tmp = save_q_phys(jx,jy,k,m_cnst,ie)+fq_phys(jx,jy,m_cnst) !updated physics grid mixing ratio + phys_cdp_max(jx,jy)= MAX(max_patch,tmp) + phys_cdp_min(jx,jy)= MIN(min_patch,tmp) +#else + tmp = save_q_phys(jx,jy,k,m_cnst,ie)+fq_phys(jx,jy,m_cnst) !updated physics grid mixing ratio + phys_cdp_max(jx,jy)= MAX(MAX(MAXVAL(save_q_overlap(1:num,jx,jy,k,m_cnst,ie)),max_patch),tmp) + phys_cdp_min(jx,jy)= MIN(MIN(MINVAL(save_q_overlap(1:num,jx,jy,k,m_cnst,ie)),min_patch),tmp) +#endif + ! + ! add high-order fq change when it does not violate monotonicity + ! + mass_forcing_phys = 0.0_r8 + do h=1,num + jdx = save_overlap_idx(1,h,jx,jy,ie); jdy = save_overlap_idx(2,h,jx,jy,ie) + q_prev = save_q_overlap(h,jx,jy,k,m_cnst,ie) +#ifndef skip_high_order_fq_map + save_q_overlap(h,jx,jy,k,m_cnst,ie) = save_q_overlap(h,jx,jy,k,m_cnst,ie)+fq_phys_overlap(h,jx,jy) + save_q_overlap(h,jx,jy,k,m_cnst,ie) = MIN(save_q_overlap(h,jx,jy,k,m_cnst,ie),phys_cdp_max(jx,jy)) + save_q_overlap(h,jx,jy,k,m_cnst,ie) = MAX(save_q_overlap(h,jx,jy,k,m_cnst,ie),phys_cdp_min(jx,jy)) + mass_forcing = (save_q_overlap(h,jx,jy,k,m_cnst,ie)-q_prev)*save_air_mass_overlap(h,jx,jy,k,ie) + mass_forcing_phys = mass_forcing_phys + mass_forcing + fqdp_fvm(jdx,jdy,m_cnst) = fqdp_fvm(jdx,jdy,m_cnst)+mass_forcing +#endif + ! + ! prepare for mass fixing algorithm + ! + dq_min_overlap(h,jx,jy) = save_q_overlap(h,jx,jy,k,m_cnst,ie)-phys_cdp_min(jx,jy) + dq_max_overlap (h,jx,jy) = save_q_overlap(h,jx,jy,k,m_cnst,ie)-phys_cdp_max(jx,jy) + end do + mass_phys(jx,jy) = mass_phys(jx,jy) -mass_forcing_phys + end do + end do + ! + ! let physics mass tendency remove excess mass (as defined above) first proportional to how much is availabe + ! +#ifdef mass_fix + do jy=1,fv_nphys + do jx=1,fv_nphys + ! + ! total mass change from physics on physics grid + ! + num = save_num_overlap(jx,jy,k,ie) + fq = mass_phys(jx,jy)/(fvm%area_sphere_physgrid(jx,jy)*save_dp_phys(jx,jy,k,ie)) + if (fq<0.0_r8) then + sum_dq_min = SUM(dq_min_overlap(1:num,jx,jy)*save_air_mass_overlap(1:num,jx,jy,k,ie)) + if (sum_dq_min>1.0E-14_r8) then + gamma=mass_phys(jx,jy)/sum_dq_min + do h=1,num + jdx = save_overlap_idx(1,h,jx,jy,ie); jdy = save_overlap_idx(2,h,jx,jy,ie) + fqdp_fvm(jdx,jdy,m_cnst) = fqdp_fvm(jdx,jdy,m_cnst)& + +gamma*dq_min_overlap(h,jx,jy)*save_air_mass_overlap(h,jx,jy,k,ie) + end do + end if + end if + + if (fq>0.0_r8) then + sum_dq_max = SUM(dq_max_overlap(1:num,jx,jy)*save_air_mass_overlap(1:num,jx,jy,k,ie)) + if (sum_dq_max<-1.0E-14_r8) then + gamma=mass_phys(jx,jy)/sum_dq_max + do h=1,num + jdx = save_overlap_idx(1,h,jx,jy,ie); jdy = save_overlap_idx(2,h,jx,jy,ie) + fqdp_fvm(jdx,jdy,m_cnst) = fqdp_fvm(jdx,jdy,m_cnst)& + +gamma*dq_max_overlap(h,jx,jy)*save_air_mass_overlap(h,jx,jy,k,ie) + end do + end if + end if + end do + end do +#endif + ! + ! convert to mass per unit area + ! + fqdp_fvm(:,:,m_cnst) = fqdp_fvm(:,:,m_cnst)*fvm%inv_area_sphere(:,:) + end do + deallocate(dq_min_overlap) + deallocate(dq_max_overlap) + deallocate(fq_phys_overlap) + end subroutine phys2fvm + + + subroutine get_dp_overlap_save(ie,k,fvm,recons) + use dimensions_mod, only: nc,nhr,nhc + ! + ! weights must be initialized in fvm2phys_init before using these functions + ! + use dp_mapping, only: weights_all_fvm2phys, weights_eul_index_all_fvm2phys + use dp_mapping, only: weights_lgr_index_all_fvm2phys, jall_fvm2phys + ! + ! setting nhe=0 because we do not need reconstruction outside of element + ! + integer, parameter :: nh = nhr!+(nhe-1) ! = 2 (nhr=2; nhe_local=1),! = 3 (nhr=2; nhe_local=2) + + type(fvm_struct) , intent(inout):: fvm + integer , intent(in) :: ie, k + + real (kind=r8) , intent(out) :: recons (irecons_tracer,nc,nc) + logical :: llimiter(1) + integer :: h,jx,jy,jdx,jdy,idx + llimiter=.false. + call get_fvm_recons(fvm,fvm%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,k),recons,1,llimiter) + + do h=1,jall_fvm2phys(ie) + jx = weights_lgr_index_all_fvm2phys(h,1,ie); jy = weights_lgr_index_all_fvm2phys(h,2,ie) + jdx = weights_eul_index_all_fvm2phys(h,1,ie); jdy = weights_eul_index_all_fvm2phys(h,2,ie) + save_num_overlap(jx,jy,k,ie) = save_num_overlap(jx,jy,k,ie)+1!could be pre-computed + idx = save_num_overlap(jx,jy,k,ie) + save_overlap_idx(1,idx,jx,jy,ie) = jdx; save_overlap_idx(2,idx,jx,jy,ie) = jdy; + save_overlap_area(idx,jx,jy,ie) = weights_all_fvm2phys(h,1,ie) + save_air_mass_overlap(idx,jx,jy,k,ie) = SUM(weights_all_fvm2phys(h,:,ie)*recons(:,jdx,jdy)) +#ifdef PCoM + save_air_mass_overlap(idx,jx,jy,k,ie) = fvm%dp_fvm(jdx,jdy,k)*weights_all_fvm2phys(h,1,ie)!PCoM +#endif + end do + + end subroutine get_dp_overlap_save + + + subroutine get_fq_overlap(ie,k,fvm,fq_phys,max_overlap,fq_phys_overlap,num_trac) + use dimensions_mod, only: fv_nphys, nhc_phys, nc + use dp_mapping, only: weights_lgr_index_all_fvm2phys, jall_fvm2phys + use dp_mapping, only: weights_eul_index_all_fvm2phys + use dp_mapping, only: weights_lgr_index_all_phys2fvm, weights_eul_index_all_phys2fvm,jall_phys2fvm + use dp_mapping, only: weights_all_phys2fvm + + integer , intent(in) :: ie,k + type(fvm_struct) , intent(in) :: fvm + integer , intent(in) :: num_trac, max_overlap + real(kind=r8), dimension(max_overlap,fv_nphys,fv_nphys,num_trac),intent(out) :: fq_phys_overlap + + real (kind=r8), intent(inout) :: fq_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,num_trac) + real (kind=r8) :: recons_q (irecons_tracer,fv_nphys,fv_nphys,num_trac) + integer :: num_overlap(fv_nphys,fv_nphys) + logical :: llimiter_q(num_trac) + integer :: h,jx,jy,m_cnst,jdx,jdy + real (kind=r8) :: dp_tmp + integer :: idx,jxx,jyy,jdxx,jdyy,hh + real(kind=r8) :: weights_all_phys2fvm_local((nc+fv_nphys)**2,irecons_tracer) + ! + ! could be pre-computed + ! + do h=1,jall_fvm2phys(ie) + jx = weights_lgr_index_all_fvm2phys(h,1,ie) + jy = weights_lgr_index_all_fvm2phys(h,2,ie) + jdx = weights_eul_index_all_fvm2phys(h,1,ie) + jdy = weights_eul_index_all_fvm2phys(h,2,ie) + do hh=1,jall_phys2fvm(ie) + jxx = weights_lgr_index_all_phys2fvm(hh,1,ie) + jyy = weights_lgr_index_all_phys2fvm(hh,2,ie) + jdxx = weights_eul_index_all_phys2fvm(hh,1,ie) + jdyy = weights_eul_index_all_phys2fvm(hh,2,ie) + if (jx==jdxx.and.jy==jdyy.and.jdx==jxx.and.jdy==jyy) then + weights_all_phys2fvm_local(h,:) = weights_all_phys2fvm(hh,:,ie) + exit + end if + end do + end do + + llimiter_q=.false. + call get_physgrid_recons(fvm,fq_phys,recons_q,num_trac,llimiter_q) + ! + ! q-dp coupling as described in equation (55) in Appendinx B of + ! Nair and Lauritzen, 2010: A Class of Deformational Flow Test Cases for Linear Transport Problems on the Sphere. + ! J. Comput. Phys.: Vol. 229, Issue 23, pp. 8868-8887, DOI:10.1016/j.jcp.2010.08.014. + ! + num_overlap = 0 + do h=1,jall_fvm2phys(ie) + jx = weights_lgr_index_all_fvm2phys(h,1,ie) + jy = weights_lgr_index_all_fvm2phys(h,2,ie) + jdx = weights_eul_index_all_fvm2phys(h,1,ie) + jdy = weights_eul_index_all_fvm2phys(h,2,ie) + num_overlap(jx,jy) = num_overlap(jx,jy)+1 + idx = num_overlap(jx,jy) + dp_tmp = save_air_mass_overlap(idx,jx,jy,k,ie)-fvm%dp_fvm(jdx,jdy,k)*weights_all_phys2fvm_local(h,1) + do m_cnst=1,num_trac + fq_phys_overlap(idx,jx,jy,m_cnst) = & + (fvm%dp_fvm(jdx,jdy,k)*SUM(weights_all_phys2fvm_local(h,:)*recons_q(:,jx,jy,m_cnst))+& + fq_phys(jx,jy,m_cnst)*dp_tmp)/save_air_mass_overlap(idx,jx,jy,k,ie) + end do + end do + end subroutine get_fq_overlap + + subroutine get_physgrid_recons(fvm,field_phys,recons_phys,num_trac,llimiter) + use dimensions_mod, only: fv_nphys,nhr_phys,nhc_phys,ns_phys + use fvm_reconstruction_mod, only: reconstruction + type(fvm_struct), intent(in) :: fvm + integer, intent(in) :: num_trac + real (kind=r8), intent(inout) :: field_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,num_trac) + real (kind=r8), intent(out) :: recons_phys(irecons_tracer,1:fv_nphys,1:fv_nphys,num_trac) + logical, intent(in) :: llimiter(num_trac) + + integer, dimension(3) :: jx_min_local, jx_max_local, jy_min_local, jy_max_local + + jx_min_local(1) = 1 ; jx_max_local(1) = fv_nphys+1 + jy_min_local(1) = 1 ; jy_max_local(1) = fv_nphys+1 + jx_min_local(2) = 0 ; jx_max_local(2) = -1 + jy_min_local(2) = 0 ; jy_max_local(2) = -1 + jx_min_local(3) = 0 ; jx_max_local(3) = -1 + jy_min_local(3) = 0 ; jy_max_local(3) = -1 + + call reconstruction(field_phys,1,1,recons_phys,irecons_tracer,llimiter,num_trac,& + fv_nphys,0,nhr_phys,nhc_phys,nhr_phys,ns_phys,nhr_phys,& + jx_min_local,jx_max_local,jy_min_local,jy_max_local,& + fvm%cubeboundary,fvm%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,:),& + fvm%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,:),& + fvm%spherecentroid_physgrid(:,1:fv_nphys,1:fv_nphys),& + fvm%recons_metrics_physgrid(:,1:fv_nphys,1:fv_nphys),& + fvm%recons_metrics_integral_physgrid(:,1:fv_nphys,1:fv_nphys) ,& + fvm%rot_matrix_physgrid,& + fvm%centroid_stretch_physgrid(1:7,1:fv_nphys,1:fv_nphys),& + fvm%vertex_recons_weights_physgrid(:,1:irecons_tracer-1,1:fv_nphys,1:fv_nphys),& + fvm%vtx_cart_physgrid(:,:,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + end subroutine get_physgrid_recons + + subroutine get_fvm_recons(fvm,field_fvm,recons_fvm,num_trac,llimiter) + use dimensions_mod, only: nc,nhr,nhc,ns + use fvm_reconstruction_mod, only: reconstruction + + type(fvm_struct), intent(in) :: fvm + integer, intent(in) :: num_trac + logical, intent(in) :: llimiter(num_trac) + real (kind=r8), intent(inout):: field_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,1,num_trac) + real (kind=r8), intent(out) :: recons_fvm(irecons_tracer,nc,nc,num_trac) + integer :: jx_min_local(3), jx_max_local(3), jy_min_local(3), jy_max_local(3) + + jx_min_local(1) = 1 ; jx_max_local(1) = nc+1 + jy_min_local(1) = 1 ; jy_max_local(1) = nc+1 + jx_min_local(2) = 0 ; jx_max_local(2) = -1 + jy_min_local(2) = 0 ; jy_max_local(2) = -1 + jx_min_local(3) = 0 ; jx_max_local(3) = -1 + jy_min_local(3) = 0 ; jy_max_local(3) = -1 + + call reconstruction(field_fvm,1,1,recons_fvm,irecons_tracer,& + llimiter,num_trac,nc,0,nhr,nhc,nhr,ns,nhr,& + jx_min_local,jx_max_local,jy_min_local,jy_max_local,& + fvm%cubeboundary,fvm%halo_interp_weight(1:ns,1-nhr:nc+nhr,1:nhr,:),fvm%ibase(1-nhr:nc+nhr,1:nhr,:),& + fvm%spherecentroid(:,1:nc,1:nc),& + fvm%recons_metrics(:,1:nc,1:nc),& + fvm%recons_metrics_integral(:,1:nc,1:nc) ,& + fvm%rot_matrix,fvm%centroid_stretch(1:7,1:nc,1:nc),& + fvm%vertex_recons_weights(:,1:irecons_tracer-1,1:nc,1:nc),& + fvm%vtx_cart(:,:,1-nhc:nc+nhc,1-nhc:nc+nhc)) + end subroutine get_fvm_recons + + subroutine get_q_overlap_save(ie,k,fvm,q_fvm,num_trac,q_phys) + use dimensions_mod, only: nc,nhr,nhc,fv_nphys + ! + ! weights must be initialized in fvm2phys_init before using these functions + ! + use dp_mapping, only: weights_all_fvm2phys, weights_eul_index_all_fvm2phys + use dp_mapping, only: weights_lgr_index_all_fvm2phys, jall_fvm2phys + ! + ! setting nhe=0 because we do not need reconstruction outside of element + ! + integer, parameter :: nhe_local=0 + integer, parameter :: nh = nhr!+(nhe-1) ! = 2 (nhr=2; nhe_local=1),! = 3 (nhr=2; nhe_local=2) + + type(fvm_struct) , intent(inout) :: fvm + integer , intent(in) :: ie, k + integer , intent(in) :: num_trac + + real(kind=r8), dimension(1-nhc:nc+nhc,1-nhc:nc+nhc,1:num_trac) :: q_fvm + real(kind=r8), dimension(fv_nphys,fv_nphys, num_trac), intent(out):: q_phys + + real (kind=r8) :: recons_q (irecons_tracer,1:nc,1:nc,num_trac) + logical :: llimiter_q(num_trac) + integer :: h,jx,jy,jdx,jdy,m_cnst,idx + real (kind=r8) :: dp_tmp, dp_fvm_tmp, tmp + real (kind=r8) :: dp_phys_inv(fv_nphys,fv_nphys) + integer, dimension(fv_nphys,fv_nphys):: num_overlap + + llimiter_q=.true. + call get_fvm_recons(fvm,q_fvm,recons_q,num_trac,llimiter_q) + num_overlap(:,:) = 0 + q_phys = 0.0_r8 + do h=1,jall_fvm2phys(ie) + jx = weights_lgr_index_all_fvm2phys(h,1,ie); jy = weights_lgr_index_all_fvm2phys(h,2,ie) + jdx = weights_eul_index_all_fvm2phys(h,1,ie); jdy = weights_eul_index_all_fvm2phys(h,2,ie) + + num_overlap(jx,jy) = num_overlap(jx,jy)+1 + idx = num_overlap(jx,jy) + + dp_fvm_tmp = fvm%dp_fvm(jdx,jdy,k) + dp_tmp = save_air_mass_overlap(idx,jx,jy,k,ie)-dp_fvm_tmp*weights_all_fvm2phys(h,1,ie) +#ifdef PCoM + dp_tmp = save_air_mass_overlap(idx,jx,jy,k,ie) +#endif + do m_cnst=1,num_trac + tmp = dp_fvm_tmp*SUM(weights_all_fvm2phys(h,:,ie)*recons_q(:,jdx,jdy,m_cnst))+q_fvm(jdx,jdy,m_cnst)*dp_tmp +#ifdef PCoM + tmp = dp_fvm_tmp*weights_all_fvm2phys(h,1,ie)*q_fvm(jdx,jdy,m_cnst) +#endif + save_q_overlap(idx,jx,jy,k,m_cnst,ie) = tmp/save_air_mass_overlap(idx,jx,jy,k,ie) + q_phys(jx,jy,m_cnst) = q_phys(jx,jy,m_cnst)+tmp + end do + end do + ! + ! q_phys holds mass - convert to mixing ratio + ! + dp_phys_inv = 1.0_r8/save_dp_phys(:,:,k,ie)!*fvm%area_sphere_physgrid) + do m_cnst=1,num_trac + q_phys(:,:,m_cnst) = q_phys(:,:,m_cnst)*dp_phys_inv + save_q_phys(:,:,k,m_cnst,ie) = q_phys(:,:,m_cnst) + end do + end subroutine get_q_overlap_save + + +end module fvm_mapping diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 new file mode 100644 index 00000000..b8426b7c --- /dev/null +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -0,0 +1,953 @@ +#define FVM_TIMERS .FALSE. +!-----------------------------------------------------------------------------! +!MODULE FVM_MOD-----------------------------------------------------CE-for FVM! +! FVM_MOD File for the fvm project in HOMME ! +! Author: Christoph Erath ! +! Date: 25.January 2011 ! +! MAIN module to run fvm on HOMME ! +! 14.November 2011: reorganisation done ! +! 7.Februar 2012: cslam_run and cslam_runair ! +!-----------------------------------------------------------------------------! + +module fvm_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use edge_mod, only: initghostbuffer, freeghostbuffer, ghostpack, ghostunpack + use edgetype_mod, only: edgebuffer_t + use bndry_mod, only: ghost_exchange + use thread_mod, only: horz_num_threads, vert_num_threads + + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + use hybrid_mod, only: hybrid_t + + implicit none + private + save + + type (EdgeBuffer_t) :: edgeveloc + type (EdgeBuffer_t), public :: ghostBufQnhc_s + type (EdgeBuffer_t), public :: ghostBufQnhc_vh + type (EdgeBuffer_t), public :: ghostBufQnhc_h + type (EdgeBuffer_t), public :: ghostBufQ1_h + type (EdgeBuffer_t), public :: ghostBufQ1_vh +! type (EdgeBuffer_t), private :: ghostBufFlux_h + type (EdgeBuffer_t), public :: ghostBufFlux_vh + type (EdgeBuffer_t), public :: ghostBufQnhcJet_h + type (EdgeBuffer_t), public :: ghostBufFluxJet_h + type (EdgeBuffer_t), public :: ghostBufPG_s + + interface fill_halo_fvm + module procedure fill_halo_fvm_noprealloc + module procedure fill_halo_fvm_prealloc + end interface + + + public :: edgeveloc, fvm_init1,fvm_init2, fill_halo_fvm, fvm_pg_init,fvm_init3,fill_halo_and_extend_panel + +contains + + subroutine fill_halo_fvm_noprealloc(elem,fvm,hybrid,nets,nete,ndepth,kmin,kmax,ksize) + use perf_mod, only : t_startf, t_stopf ! _EXTERNAL + use dimensions_mod, only: nc, ntrac, nlev + implicit none + type (element_t),intent(inout) :: elem(:) + type (fvm_struct),intent(inout) :: fvm(:) + type (hybrid_t),intent(in) :: hybrid + + type (edgeBuffer_t) :: cellghostbuf + + integer,intent(in) :: nets,nete + integer,intent(in) :: ndepth ! depth of halo + integer,intent(in) :: kmin,kmax ! min and max vertical level + integer,intent(in) :: ksize ! the total number of vertical + + integer :: ie,i1,i2,kblk,kptr,q + ! + ! + + if(kmin .ne. 1 .or. kmax .ne. nlev) then + print *,'WARNING: fill_halo_fvm_noprealloc does not support the passing of non-contigous arrays' + print *,'WARNING: incorrect answers are likely' + endif + if(FVM_TIMERS) call t_startf('FVM:initbuf') + i1=1-ndepth + i2=nc+ndepth + kblk = kmax-kmin+1 + call initghostbuffer(hybrid%par,cellghostbuf,elem,kblk*(ntrac+1),ndepth,nc) + if(FVM_TIMERS) call t_stopf('FVM:initbuf') + if(FVM_TIMERS) call t_startf('FVM:pack') + do ie=nets,nete + kptr = kmin-1 + call ghostpack(cellghostbuf, fvm(ie)%dp_fvm(i1:i2,i1:i2,kmin:kmax),kblk, kptr,ie) + do q=1,ntrac + kptr = kptr + ksize + call ghostpack(cellghostbuf, fvm(ie)%c(i1:i2,i1:i2,kmin:kmax,q) ,kblk,kptr,ie) + enddo + end do + if(FVM_TIMERS) call t_stopf('FVM:pack') + if(FVM_TIMERS) call t_startf('FVM:Communication') + call ghost_exchange(hybrid,cellghostbuf,location='fill_halo_fvm_noprealloc') + if(FVM_TIMERS) call t_stopf('FVM:Communication') + !-----------------------------------------------------------------------------------! + if(FVM_TIMERS) call t_startf('FVM:Unpack') + do ie=nets,nete + kptr = kmin-1 + call ghostunpack(cellghostbuf, fvm(ie)%dp_fvm(i1:i2,i1:i2,kmin:kmax),kblk ,kptr,ie) + do q=1,ntrac + kptr = kptr + ksize + call ghostunpack(cellghostbuf, fvm(ie)%c(i1:i2,i1:i2,kmin:kmax,:), kblk,kptr,ie) + enddo + enddo + if(FVM_TIMERS) call t_stopf('FVM:Unpack') + if(FVM_TIMERS) call t_startf('FVM:freebuf') + call freeghostbuffer(cellghostbuf) + if(FVM_TIMERS) call t_stopf('FVM:freebuf') + end subroutine fill_halo_fvm_noprealloc + +subroutine fill_halo_fvm_prealloc(cellghostbuf,elem,fvm,hybrid,nets,nete,ndepth,kmin,kmax,ksize,active) + use perf_mod, only : t_startf, t_stopf ! _EXTERNAL + use dimensions_mod, only: nc, ntrac, nlev + implicit none + type (EdgeBuffer_t), intent(inout) :: cellghostbuf + type (element_t),intent(inout) :: elem(:) + type (fvm_struct),intent(inout) :: fvm(:) + type (hybrid_t),intent(in) :: hybrid + + + integer,intent(in) :: nets,nete + integer,intent(in) :: ndepth ! depth of halo + integer,intent(in) :: kmin,kmax ! min and max vertical level + integer,intent(in) :: ksize ! the total number of vertical + logical, optional :: active ! indicates if te current thread is active + integer :: ie,i1,i2,kblk,q,kptr + ! + ! + logical :: lactive + + if(present(active)) then + lactive = active + else + lactive = .true. + endif +! call t_startf('FVM:initbuf') + i1=1-ndepth + i2=nc+ndepth + kblk = kmax-kmin+1 + if(FVM_TIMERS) call t_startf('FVM:pack') + if(lactive) then + do ie=nets,nete + kptr = kmin-1 + call ghostpack(cellghostbuf, fvm(ie)%dp_fvm(i1:i2,i1:i2,kmin:kmax),kblk, kptr,ie) + do q=1, ntrac + kptr = kptr + ksize + call ghostpack(cellghostbuf, fvm(ie)%c(i1:i2,i1:i2,kmin:kmax,q) ,kblk,kptr,ie) + enddo + end do + endif + if(FVM_TIMERS) call t_stopf('FVM:pack') + if(FVM_TIMERS) call t_startf('FVM:Communication') + call ghost_exchange(hybrid,cellghostbuf,location='fill_halo_fvm_prealloc') + if(FVM_TIMERS) call t_stopf('FVM:Communication') + !-----------------------------------------------------------------------------------! + if(FVM_TIMERS) call t_startf('FVM:Unpack') + if(lactive) then + do ie=nets,nete + kptr = kmin-1 + call ghostunpack(cellghostbuf, fvm(ie)%dp_fvm(i1:i2,i1:i2,kmin:kmax),kblk, kptr,ie) + do q=1, ntrac + kptr = kptr + ksize + call ghostunpack(cellghostbuf, fvm(ie)%c(i1:i2,i1:i2,kmin:kmax,q), kblk,kptr,ie) + enddo + enddo + endif + if(FVM_TIMERS) call t_stopf('FVM:Unpack') + + end subroutine fill_halo_fvm_prealloc + + subroutine PrintArray(i1,i2,array) + ! debug routine potentially called from any MPI rank + integer :: i1,i2 + real(kind=r8) :: array(i1:i2,i1:i2) + integer :: sz,i,ub + + sz = size(array,dim=1) + + if (sz == 9) then + do i=i2,i1,-1 + write(6,9) array(-2,i),array(-1,i), array(0,i), & + array( 1,i), array(2,i), array(3,i), & + array( 4,i), array(5,i), array(6,i) + enddo + endif + + 9 format('|',9(f10.1,'|')) + + + end subroutine + + + subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, ndepth,numlev,num_flds,lfill_halo,lextend_panel) + use hybrid_mod, only: hybrid_t + use edge_mod, only: initghostbuffer, freeghostbuffer, ghostpack, ghostunpack + + use fvm_reconstruction_mod, only: extend_panel_interpolate + use cam_abortutils, only: endrun + use dimensions_mod, only: fv_nphys,nhr,nhr_phys,nhc,nhc_phys,ns,ns_phys,nhe_phys,nc + use perf_mod, only : t_startf, t_stopf ! _EXTERNAL + + integer , intent(in) :: nets,nete,nphys,ndepth,numlev,num_flds,nhcc + real (kind=r8) , intent(inout) :: fld(1-nhcc:nphys+nhcc,1-nhcc:nphys+nhcc,numlev,num_flds,nets:nete) + type (hybrid_t) , intent(in) :: hybrid ! distributed parallel structure (shared) + type (element_t) , intent(inout) :: elem(:) + type(fvm_struct) , intent(in) :: fvm(:) + logical , intent(in) :: lfill_halo,lextend_panel +! real (kind=r8) , allocatable :: ftmp(:,:) +! real (kind=r8) :: ftmp(1-nhcc:nphys+nhcc,1-nhcc:nphys+nhcc,numlev,num_flds,nets:nete) + real (kind=r8), allocatable :: fld_tmp(:,:) + + integer :: ie,k,itr,nht_phys,nh_phys + type (edgeBuffer_t) :: cellghostbuf + + if (lfill_halo) then + ! + !********************************************* + ! + ! halo exchange + ! + !********************************************* + ! + call t_startf('fill_halo_and_extend_panel initbuffer') + call initghostbuffer(hybrid%par,cellghostbuf,elem,numlev*num_flds,nhcc,nphys) + call t_stopf('fill_halo_and_extend_panel initbuffer') + do ie=nets,nete + call ghostpack(cellghostbuf, fld(:,:,:,:,ie),numlev*num_flds,0,ie) + end do + call ghost_exchange(hybrid,cellghostbuf,location='fill_halo_and_extend_panel') + do ie=nets,nete + call ghostunpack(cellghostbuf, fld(:,:,:,:,ie),numlev*num_flds,0,ie) + end do + call freeghostbuffer(cellghostbuf) + end if + if (lextend_panel) then + ! + !********************************************* + ! + ! extend panel + ! + !********************************************* + ! + if (nphys==fv_nphys) then + if (ndepth>nhr_phys) & + call endrun("fill_halo_and_extend_panel: ndepth>nhr_phys") + nht_phys = nhe_phys+nhr_phys + nh_phys = nhr_phys + allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)) + do ie=nets,nete + do itr=1,num_flds + do k=1,numlev + call extend_panel_interpolate(fv_nphys,nhc_phys,nhr_phys,nht_phys,ns_phys,nh_phys,& + fld(:,:,k,itr,ie),fvm(ie)%cubeboundary,& + fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nh_phys:fv_nphys+nh_phys,1:nhr_phys,:),& + fvm(ie)%ibase_physgrid(1-nh_phys:fv_nphys+nh_phys,1:nhr_phys,:),& + fld_tmp) + fld(1-ndepth:nphys+ndepth,1-ndepth:nphys+ndepth,k,itr,ie) = fld_tmp(1-ndepth:nphys+ndepth,1-ndepth:nphys+ndepth) + end do + end do + end do + deallocate(fld_tmp) + else if (nphys==nc) then + if (ndepth>nhr) & + call endrun("fill_halo_and_extend_panel: ndepth>nhr") + nhe_phys= 0 + nht_phys= nhe_phys+nhr + nh_phys = nhr + allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)) + do ie=nets,nete + do itr=1,num_flds + do k=1,numlev + call extend_panel_interpolate(nc,nhc,nhr,nht_phys,ns,nh_phys,& + fld(:,:,k,itr,ie),fvm(ie)%cubeboundary,& + fvm(ie)%halo_interp_weight(1:ns,1-nh_phys:nc+nh_phys,1:nhr,:),& + fvm(ie)%ibase(1-nh_phys:nc+nh_phys,1:nhr,:),& + fld_tmp) + fld(1-ndepth:nphys+ndepth,1-ndepth:nphys+ndepth,k,itr,ie) = fld_tmp(1-ndepth:nphys+ndepth,1-ndepth:nphys+ndepth) + end do + end do + end do + deallocate(fld_tmp) + else + call endrun("fill_halo_and_extend_panel: resolution not supported") + end if + end if + end subroutine fill_halo_and_extend_panel + + + ! initialize global buffers shared by all threads + subroutine fvm_init1(par,elem) + use parallel_mod, only: parallel_t + use cam_abortutils, only: endrun + use cam_logfile, only: iulog + use control_mod, only: rsplit + use dimensions_mod, only: qsize, qsize_d + use dimensions_mod, only: fvm_supercycling, fvm_supercycling_jet + use dimensions_mod, only: nc,nhe, nhc, nlev,ntrac, ntrac_d,ns, nhr + use dimensions_mod, only: large_Courant_incr + use dimensions_mod, only: kmin_jet,kmax_jet + + type (parallel_t) :: par + type (element_t),intent(inout) :: elem(:) + ! + if (ntrac>0) then + if (par%masterproc) then + write(iulog,*) " " + write(iulog,*) "|-----------------------------------------|" + write(iulog,*) "| FVM tracer transport scheme information |" + write(iulog,*) "|-----------------------------------------|" + write(iulog,*) " " + end if + if (ntrac>0) then + if (par%masterproc) then + write(iulog,*) "Running consistent SE-CSLAM, Lauritzen et al. (2017, MWR)." + write(iulog,*) "CSLAM = Conservative Semi-LAgrangian Multi-tracer scheme" + write(iulog,*) "Lauritzen et al., (2010), J. Comput. Phys." + write(iulog,*) " " + end if + end if + ! + ! PARAMETER ERROR CHECKING + ! + if (kmin_jet>kmax_jet) & + call endrun("PARAMETER ERROR for fvm: kmin_jet must be < kmax_jet") + if (ntrac>ntrac_d) & + call endrun("PARAMETER ERROR for fvm: ntrac > ntrac_d") + + if (qsize>0.and.mod(rsplit,fvm_supercycling).ne.0) then + if (par%masterproc) then + write(iulog,*)'cannot supercycle fvm tracers with respect to se tracers' + write(iulog,*)'with this choice of rsplit =',rsplit + write(iulog,*)'rsplit must be a multiple of fvm_supercycling=',fvm_supercycling + end if + call endrun("PARAMETER ERROR for fvm: mod(rsplit,fvm_supercycling)<>0") + endif + + if (qsize>0.and.mod(rsplit,fvm_supercycling_jet).ne.0) then + if (par%masterproc) then + write(iulog,*)'cannot supercycle fvm tracers with respect to se tracers' + write(iulog,*)'with this choice of rsplit =',rsplit + write(iulog,*)'rsplit must be a multiple of fvm_supercycling_jet=',fvm_supercycling_jet + end if + call endrun("PARAMETER ERROR for fvm: mod(rsplit,fvm_supercycling_jet)<>0") + endif + + if (large_Courant_incr.and.(fvm_supercycling.ne.fvm_supercycling_jet)) then + if (par%masterproc) then + write(iulog,*)'Large Courant number increment requires no level dependent supercycling' + write(iulog,*)'i.e. fvm_supercycling must be equal to fvm_supercycling_jet' + end if + call endrun("PARAMETER ERROR for fvm: large_courant_incr requires fvm_supercycling=fvm_supercycling_jet") + endif + + if (par%masterproc) then + write(iulog,*) " " + write(iulog,*) "Done Tracer transport scheme information " + write(iulog,*) " " + end if + + + if (par%masterproc) write(iulog,*) "fvm resolution is nc*nc in each element: nc = ",nc + if (par%masterproc) write(iulog,*)'ntrac,ntrac_d=',ntrac,ntrac_d + if (par%masterproc) write(iulog,*)'qsize,qsize_d=',qsize,qsize_d + + if (nc.ne.3) then + if (par%masterproc) then + write(iulog,*) "Only nc==3 is supported for CSLAM" + endif + call endrun("PARAMETER ERRROR for fvm: only nc=3 supported for CSLAM") + end if + + if (par%masterproc) then + write(iulog,*) " " + if (ns==1) then + write(iulog,*) "ns==1: using no interpolation for mapping cell averages values across edges" + write(iulog,*) "Note: this is not a recommended setting - large errors at panel edges!" + else if (ns==2) then + write(iulog,*) "ns==2: using linear interpolation for mapping cell averages values across edges" + write(iulog,*) "Note that ns=4 is default CSLAM setting used in Lauritzen et al. (2010)" + write(iulog,*) "so this option is slightly less accurate (but the stencil is smaller near panel edges!)" + + else if (ns==3) then + write(iulog,*) "ns==3: using quadratic interpolation for mapping cell averages values across edges" + write(iulog,*) "Note that ns=4 is default CSLAM setting used in Lauritzen et al. (2010)" + write(iulog,*) "so this option is slightly less accurate (but the stencil is smaller near panel edges!)" + else if (ns==4) then + write(iulog,*) "ns==4: using cubic interpolation for mapping cell averages values across edges" + write(iulog,*) "This is default CSLAM setting used in Lauritzen et al. (2010)" + else + write(iulog,*) "Not a tested value for ns but it should work! You choose ns = ",ns + end if + + ! if (ns.NE.3) then + ! write(*,*) "In fvm_reconstruction_mod function matmul_w has been hard-coded for ns=3 for performance" + ! write(*,*) "Revert to general code - outcommented above" + ! call endrun("stopping") + ! end if + end if + + if (MOD(ns,2)==0.and.nhr+(nhe-1)+ns/2>nc+nc) then + write(iulog,*) "to run this combination of ns and nhr you need to increase nc to ",nhr+ns/2+nhe-1 + write(iulog,*) "You choose (ns,nhr,nc,nhe)=",ns,nhr,nc,nhe + call endrun("stopping") + end if + if (MOD(ns,2)==1.and.nhr+(ns-1)/2+(nhe-1)>nc+nc) then + write(iulog,*) "to run this combination of ns and nhr you need to increase nc to ",nhr+(ns-1)/2+nhe-1 + write(iulog,*) "You choose (ns,nhr,nc,nhe)=",ns,nhr,nc,nhe + call endrun("stopping") + end if + + if (nc==3.and.ns.ne.3) then + if (par%masterproc) then + write(iulog,*) "Recommended setting for nc=3 is ns=3 (linear interpolation in halo)" + write(iulog,*) "You choose ns=",ns + write(iulog,*) "Goto dimensions_mod to change value of ns" + write(iulog,*) "or outcomment call haltmop below (i.e. you know what you are doing!)" + endif + call endrun("stopping") + end if + end if + + if (nc==4.and.ns.ne.4) then + if (par%masterproc) then + write(iulog,*) "Recommended setting for nc=4 is ns=4 (cubic interpolation in halo)" + write(iulog,*) "You choose ns=",ns + write(iulog,*) "Goto dimensions_mod to change value of ns" + write(iulog,*) "or outcomment call haltmop below (i.e. you know what you are doing!)" + endif + call endrun("stopping") + end if + + if (nhe .ne. 1) then + if (par%masterproc) then + write(iulog,*) "PARAMETER ERROR for fvm: Number of halo zone for the extended" + write(iulog,*) "element nhe has to be 1, only this is available now! STOP!" + endif + call endrun("stopping") + end if + end subroutine fvm_init1 + + + + + + ! initialization that can be done in threaded regions + subroutine fvm_init2(elem,fvm,hybrid,nets,nete) + use fvm_control_volume_mod, only: fvm_mesh,fvm_set_cubeboundary + use bndry_mod, only: compute_ghost_corner_orientation + use dimensions_mod, only: nlev, nc, nhc, nhe, ntrac, ntrac_d, np + use dimensions_mod, only: nhc_phys, fv_nphys + use dimensions_mod, only: fvm_supercycling, fvm_supercycling_jet + use dimensions_mod, only: kmin_jet,kmax_jet + use hycoef, only: hyai, hybi, ps0 + use derivative_mod, only: subcell_integration + use physconst, only: thermodynamic_active_species_num + + type (fvm_struct) :: fvm(:) + type (element_t) :: elem(:) + type (hybrid_t) :: hybrid + integer :: ie,nets,nete,k,klev + real(kind=r8) :: one(np,np) + + one = 1.0_r8 + do ie=nets,nete + do k = 1, nlev + fvm(ie)%dp_ref(k) = ( hyai(k+1) - hyai(k) )*ps0 + ( hybi(k+1) - hybi(k) )*ps0 + fvm(ie)%dp_ref_inverse(k) = 1.0_r8/fvm(ie)%dp_ref(k) + end do + end do + + call compute_ghost_corner_orientation(hybrid,elem,nets,nete) + ! run some tests: + ! call test_ghost(hybrid,elem,nets,nete) + + do ie=nets,nete + call fvm_set_cubeboundary(elem(ie),fvm(ie)) + call fvm_mesh(elem(ie),fvm(ie)) + fvm(ie)%inv_area_sphere = 1.0_r8/fvm(ie)%area_sphere + ! + ! compute CSLAM areas consistent with SE area (at 1 degree they can be up to + ! 1E-6 different than the correct spherical areas used in CSLAM) + ! + call subcell_integration(one, np, nc, elem(ie)%metdet,fvm(ie)%inv_se_area_sphere) + fvm(ie)%inv_se_area_sphere = 1.0_r8/fvm(ie)%inv_se_area_sphere + + fvm(ie)%fc(:,:,:,:) = 0.0_r8 + fvm(ie)%fm(:,:,:,:) = 0.0_r8 + fvm(ie)%ft(:,:,: ) = 0.0_r8 + enddo + ! Need to allocate ghostBufQnhc after compute_ghost_corner_orientation because it + ! changes the values for reverse + + call initghostbuffer(hybrid%par,ghostBufQnhc_s,elem,nlev*(ntrac+1),nhc,nc,nthreads=1) + call initghostbuffer(hybrid%par,ghostBufQnhc_h,elem,nlev*(ntrac+1),nhc,nc,nthreads=horz_num_threads) + call initghostbuffer(hybrid%par,ghostBufQnhc_vh,elem,nlev*(ntrac+1),nhc,nc,nthreads=vert_num_threads*horz_num_threads) + klev = kmax_jet-kmin_jet+1 + call initghostbuffer(hybrid%par,ghostBufQ1_h,elem,klev*(ntrac+1),1,nc,nthreads=horz_num_threads) + call initghostbuffer(hybrid%par,ghostBufQ1_vh,elem,klev*(ntrac+1),1,nc,nthreads=vert_num_threads*horz_num_threads) +! call initghostbuffer(hybrid%par,ghostBufFlux_h,elem,4*nlev,nhe,nc,nthreads=horz_num_threads) + call initghostbuffer(hybrid%par,ghostBufFlux_vh,elem,4*nlev,nhe,nc,nthreads=vert_num_threads*horz_num_threads) + ! + ! preallocate buffers for physics-dynamics coupling + ! + if (fv_nphys.ne.nc) then + call initghostbuffer(hybrid%par,ghostBufPG_s,elem,nlev*(4+ntrac),nhc_phys,fv_nphys,nthreads=1) + else + call initghostbuffer(hybrid%par,ghostBufPG_s,elem,nlev*(3+thermodynamic_active_species_num),nhc_phys,fv_nphys,nthreads=1) + end if + + if (fvm_supercycling.ne.fvm_supercycling_jet) then + ! + ! buffers for running different fvm time-steps in the jet region + ! + klev = kmax_jet-kmin_jet+1 + call initghostbuffer(hybrid%par,ghostBufQnhcJet_h,elem,klev*(ntrac+1),nhc,nc,nthreads=horz_num_threads) + call initghostbuffer(hybrid%par,ghostBufFluxJet_h,elem,4*klev,nhe,nc,nthreads=horz_num_threads) + end if + end subroutine fvm_init2 + + + subroutine fvm_init3(elem,fvm,hybrid,nets,nete,irecons) + use control_mod , only: neast, nwest, seast, swest + use fvm_analytic_mod, only: compute_reconstruct_matrix + use dimensions_mod , only: fv_nphys + use dimensions_mod, only: nlev, nc, nhe, nlev, ntrac, ntrac_d,nhc + use coordinate_systems_mod, only: cartesian2D_t,cartesian3D_t + use coordinate_systems_mod, only: cubedsphere2cart, cart2cubedsphere + implicit none + type (element_t) ,intent(inout) :: elem(:) + type (fvm_struct),intent(inout) :: fvm(:) + type (hybrid_t) ,intent(in) :: hybrid + integer ,intent(in) :: nets,nete,irecons + ! + type (edgeBuffer_t) :: cellghostbuf + integer :: ie, ixy, ivertex, i, j,istart,itot,ishft,imin,imax + integer, dimension(2,4) :: unit_vec + integer :: rot90_matrix(2,2), iside + + type (cartesian2D_t) :: tmpgnom + type (cartesian2D_t) :: gnom + type(cartesian3D_t) :: tmpcart3d + + if (ntrac>0.and.nc.ne.fv_nphys) then + ! + ! fill the fvm halo for mapping in d_p_coupling if + ! physics grid resolution is different than fvm resolution + ! + call fill_halo_fvm(elem,fvm,hybrid,nets,nete,nhc,1,nlev,nlev) + end if + + + imin=1-nhc + imax=nc+nhc + ! + ! fill halo start + ! + itot=9+irecons-1+2 + call initghostbuffer(hybrid%par,cellghostbuf,elem,itot,nhc,nc) + do ie=nets,nete + istart = 0 + call ghostpack(cellghostbuf, fvm(ie)%norm_elem_coord(1,:,:),1,istart,ie) + istart = istart+1 + call ghostpack(cellghostbuf, fvm(ie)%norm_elem_coord(2,:,:),1,istart,ie) + istart = istart+1 + do ixy=1,2 + do ivertex=1,4 + call ghostpack(cellghostbuf, fvm(ie)%vtx_cart(ivertex,ixy,:,:) ,1,istart,ie) + istart = istart+1 + end do + end do + call ghostpack(cellghostbuf, fvm(ie)%flux_orient(1,:,:) ,1,istart,ie) + do ixy=1,irecons-1 + istart=istart+1 + call ghostpack(cellghostbuf, fvm(ie)%spherecentroid(ixy,:,:) ,1,istart,ie) + end do + end do + call ghost_exchange(hybrid,cellghostbuf,location='fvm_init3') + do ie=nets,nete + istart = 0 + call ghostunpack(cellghostbuf, fvm(ie)%norm_elem_coord(1,:,:),1,istart,ie) + istart = istart+1 + call ghostunpack(cellghostbuf, fvm(ie)%norm_elem_coord(2,:,:),1,istart,ie) + istart = istart+1 + do ixy=1,2 + do ivertex=1,4 + call ghostunpack(cellghostbuf, fvm(ie)%vtx_cart(ivertex,ixy,:,:) ,1,istart,ie) + istart = istart+1 + end do + end do + call ghostunpack(cellghostbuf, fvm(ie)%flux_orient(1,:,:) ,1,istart,ie) + do ixy=1,irecons-1 + istart=istart+1 + call ghostunpack(cellghostbuf, fvm(ie)%spherecentroid(ixy,:,:) ,1,istart,ie) + end do + enddo + call freeghostbuffer(cellghostbuf) + ! + ! indicator for non-existing cells + ! set vtx_cart to corner value in non-existent cells + ! + do ie=nets,nete + if (fvm(ie)%cubeboundary==nwest) then + fvm(ie)%flux_orient (: ,1-nhc :0 ,nc +1 :nc +nhc ) = -1 + fvm(ie)%spherecentroid (:, 1-nhc :0 ,nc +1 :nc +nhc ) = -1e5_r8 + fvm(ie)%vtx_cart(:,1,1-nhc:0 ,nc+1 :nc+nhc) = fvm(ie)%vtx_cart(4,1,1,nc) + fvm(ie)%vtx_cart(:,2,1-nhc:0 ,nc+1 :nc+nhc) = fvm(ie)%vtx_cart(4,2,1,nc) + else if (fvm(ie)%cubeboundary==swest) then + fvm(ie)%flux_orient (:,1-nhc :0 ,1-nhc :0 ) = -1 + fvm(ie)%spherecentroid (:,1-nhc :0 ,1-nhc :0 ) = -1e5_r8 + fvm(ie)%vtx_cart(:,1,1-nhc:0 ,1-nhc:0 ) = fvm(ie)%vtx_cart(1,1,1,1) + fvm(ie)%vtx_cart(:,2,1-nhc:0 ,1-nhc:0 ) = fvm(ie)%vtx_cart(1,2,1,1) + else if (fvm(ie)%cubeboundary==neast) then + fvm(ie)%flux_orient (:,nc +1 :nc +nhc ,nc +1 :nc +nhc ) = -1 + fvm(ie)%spherecentroid (:,nc +1 :nc +nhc ,nc +1 :nc +nhc ) = -1e5_r8 + fvm(ie)%vtx_cart(:,1,nc+1 :nc+nhc,nc+1 :nc+nhc) = fvm(ie)%vtx_cart(3,1,nc,nc) + fvm(ie)%vtx_cart(:,2,nc+1 :nc+nhc,nc+1 :nc+nhc) = fvm(ie)%vtx_cart(3,2,nc,nc) + else if (fvm(ie)%cubeboundary==seast) then + fvm(ie)%flux_orient (:,nc +1 :nc +nhc ,1-nhc :0 ) = -1 + fvm(ie)%spherecentroid (:,nc +1 :nc +nhc ,1-nhc :0 ) = -1e5_r8 + fvm(ie)%vtx_cart(:,1,nc+1 :nc+nhc,1-nhc:0 ) = fvm(ie)%vtx_cart(2,1,nc,1) + fvm(ie)%vtx_cart(:,2,nc+1 :nc+nhc,1-nhc:0 ) = fvm(ie)%vtx_cart(2,2,nc,1) + end if + end do + + ! + ! set vectors for perpendicular flux vector + ! + rot90_matrix(1,1) = 0; rot90_matrix(2,1) = 1 !counter-clockwise rotation matrix + rot90_matrix(1,2) =-1; rot90_matrix(2,2) = 0 !counter-clockwise rotation matrix + + iside = 1 + unit_vec(1,iside) = 0 !x-component of displacement vector for side 1 + unit_vec(2,iside) = 1 !y-component of displacement vector for side 1 + + do iside=2,4 + unit_vec(:,iside) = MATMUL(rot90_matrix(:,:),unit_vec(:,iside-1)) + end do + + ! + ! fill halo done + ! + !------------------------------- + + do ie=nets,nete + fvm(ie)%displ_max = 0.0_r8 + do j=imin,imax + do i=imin,imax + ! + ! rotate gnomonic coordinate vector + ! + ! fvm(ie)%norm_elem_coord(:,i,j) = MATMUL(fvm(ie)%rot_matrix(:,:,i,j),fvm(ie)%norm_elem_coord(:,i,j)) + ! + ishft = NINT(fvm(ie)%flux_orient(2,i,j)) + do ixy=1,2 + ! + ! rotate coordinates if needed through permutation + ! + fvm(ie)%vtx_cart(1:4,ixy,i,j) = cshift(fvm(ie)%vtx_cart(1:4,ixy,i,j),shift=ishft) + fvm(ie)%flux_vec (ixy,i,j,1:4) = cshift(unit_vec (ixy,1:4 ),shift=ishft) + ! + ! set flux vector to zero in non-existent cells (corner halo) + ! + fvm(ie)%flux_vec (ixy,i,j,1:4) = fvm(ie)%ifct(i,j)*fvm(ie)%flux_vec(ixy,i,j,1:4) + + iside=1 + fvm(ie)%displ_max(i,j,iside) = fvm(ie)%displ_max(i,j,iside)+& + ABS(fvm(ie)%vtx_cart(4,ixy,i,j)-fvm(ie)%vtx_cart(1,ixy,i,j)) + iside=2 + fvm(ie)%displ_max(i,j,iside) = fvm(ie)%displ_max(i,j,iside)+& + ABS(fvm(ie)%vtx_cart(1,ixy,i,j)-fvm(ie)%vtx_cart(2,ixy,i,j)) + iside=3 + fvm(ie)%displ_max(i,j,iside) = fvm(ie)%displ_max(i,j,iside)+& + ABS(fvm(ie)%vtx_cart(2,ixy,i,j)-fvm(ie)%vtx_cart(3,ixy,i,j)) + iside=4 + fvm(ie)%displ_max(i,j,iside) = fvm(ie)%displ_max(i,j,iside)+& + ABS(fvm(ie)%vtx_cart(2,ixy,i,j)-fvm(ie)%vtx_cart(1,ixy,i,j)) + end do + end do + end do + end do + ! + ! pre-compute derived metric terms used for integration, polynomial + ! evaluation at fvm cell vertices, etc. + ! + do ie=nets,nete + call compute_reconstruct_matrix(nc,nhe,nhc,irecons,fvm(ie)%dalpha,fvm(ie)%dbeta,& + fvm(ie)%spherecentroid,fvm(ie)%vtx_cart,fvm(ie)%centroid_stretch,& + fvm(ie)%vertex_recons_weights,fvm(ie)%recons_metrics,fvm(ie)%recons_metrics_integral) + end do + ! + ! create a normalized element coordinate system with a halo + ! + do ie=nets,nete + do j=1-nhc,nc+nhc + do i=1-nhc,nc+nhc + ! + ! only compute for physically existent cells + ! + if (fvm(ie)%ifct(i,j)>0) then + gnom%x = fvm(ie)%norm_elem_coord(1,i,j) + gnom%y = fvm(ie)%norm_elem_coord(2,i,j) + ! + ! coordinate transform only necessary for points on another panel + ! + if (NINT(fvm(ie)%flux_orient(1,1,1)).NE.NINT(fvm(ie)%flux_orient(1,i,j))) then + tmpcart3d=cubedsphere2cart(gnom,NINT(fvm(ie)%flux_orient(1,i,j))) + tmpgnom=cart2cubedsphere(tmpcart3d,NINT(fvm(ie)%flux_orient(1,1,1))) + else + tmpgnom%x = fvm(ie)%norm_elem_coord(1,i,j) + tmpgnom%y = fvm(ie)%norm_elem_coord(2,i,j) + end if + ! + ! convert to element normalized coordinates + ! + fvm(ie)%norm_elem_coord(1,i,j) =(tmpgnom%x-elem(ie)%corners(1)%x)/& + (0.5_r8*dble(nc)*fvm(ie)%dalpha)-1.0_r8 + fvm(ie)%norm_elem_coord(2,i,j) =(tmpgnom%y-elem(ie)%corners(1)%y)/& + (0.5_r8*dble(nc)*fvm(ie)%dalpha)-1.0_r8 + else + fvm(ie)%norm_elem_coord(1,i,j) = 1D9 + fvm(ie)%norm_elem_coord(2,i,j) = 1D9 + end if + end do + end do + end do + + end subroutine fvm_init3 + + + subroutine fvm_pg_init(elem, fvm, hybrid, nets, nete,irecons) + use coordinate_systems_mod, only : cartesian2D_t,cartesian3D_t + use control_mod, only : neast, nwest, seast, swest + use coordinate_systems_mod, only : cubedsphere2cart, cart2cubedsphere + use dimensions_mod, only: fv_nphys, nhe_phys,nhc_phys + use dimensions_mod, only: ntrac_d + use cube_mod ,only: dmap + use control_mod ,only: cubed_sphere_map + use fvm_analytic_mod, only: compute_reconstruct_matrix + + type (element_t) , intent(in) :: elem(:) + type (fvm_struct), intent(inout) :: fvm(:) + type (hybrid_t) , intent(in) :: hybrid + + type (cartesian2D_t) :: gnom + type(cartesian3D_t) :: tmpcart3d + type (cartesian2D_t) :: tmpgnom + + + integer, intent(in) :: nets ! starting thread element number (private) + integer, intent(in) :: nete,irecons ! ending thread element number (private) + + ! ================================== + ! Local variables + ! ================================== + + integer :: ie, ixy, ivertex, i, j,istart,itot,ishft,imin,imax + integer, dimension(2,4) :: unit_vec + integer :: rot90_matrix(2,2), iside + + type (edgeBuffer_t) :: cellghostbuf + + ! D is derivative of gnomonic mapping + real (kind=r8) :: D(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2) + real (kind=r8) :: detD,x1,x2 + + if (fv_nphys>0) then + ! + ! do the same as fvm_init3 for the metric terms of physgrid + ! + imin=1-nhc_phys + imax=fv_nphys+nhc_phys + ! + ! fill halo start + ! + itot=9+irecons-1+2 + call initghostbuffer(hybrid%par,cellghostbuf,elem,itot,nhc_phys,fv_nphys) + do ie=nets,nete + istart = 0 + call ghostpack(cellghostbuf, fvm(ie)%norm_elem_coord_physgrid(1,:,:),1,istart,ie) + istart = istart+1 + call ghostpack(cellghostbuf, fvm(ie)%norm_elem_coord_physgrid(2,:,:),1,istart,ie) + istart = istart+1 + do ixy=1,2 + do ivertex=1,4 + call ghostpack(cellghostbuf, fvm(ie)%vtx_cart_physgrid(ivertex,ixy,:,:) ,1,istart,ie) + istart = istart+1 + end do + end do + call ghostpack(cellghostbuf, fvm(ie)%flux_orient_physgrid(1,:,:) ,1,istart,ie) + do ixy=1,irecons-1 + istart=istart+1 + call ghostpack(cellghostbuf, fvm(ie)%spherecentroid_physgrid(ixy,:,:) ,1,istart,ie) + end do + end do + call ghost_exchange(hybrid,cellghostbuf,location='fvm_pg_init') + do ie=nets,nete + istart = 0 + call ghostunpack(cellghostbuf, fvm(ie)%norm_elem_coord_physgrid(1,:,:),1,istart,ie) + istart = istart+1 + call ghostunpack(cellghostbuf, fvm(ie)%norm_elem_coord_physgrid(2,:,:),1,istart,ie) + istart = istart+1 + do ixy=1,2 + do ivertex=1,4 + call ghostunpack(cellghostbuf, fvm(ie)%vtx_cart_physgrid(ivertex,ixy,:,:) ,1,istart,ie) + istart = istart+1 + end do + end do + call ghostunpack(cellghostbuf, fvm(ie)%flux_orient_physgrid(1,:,:) ,1,istart,ie) + do ixy=1,irecons-1 + istart=istart+1 + call ghostunpack(cellghostbuf, fvm(ie)%spherecentroid_physgrid(ixy,:,:) ,1,istart,ie) + end do + enddo + call freeghostbuffer(cellghostbuf) + ! + ! indicator for non-existing cells + ! set vtx_cart to corner value in non-existent cells + ! + do ie=nets,nete + if (fvm(ie)%cubeboundary==nwest) then + fvm(ie)%flux_orient_physgrid (: ,1-nhc_phys :0 ,fv_nphys +1 :fv_nphys +nhc_phys ) = -1 + fvm(ie)%spherecentroid_physgrid(:, 1-nhc_phys :0 ,fv_nphys +1 :fv_nphys +nhc_phys ) = -1e5_r8 + fvm(ie)%vtx_cart_physgrid(:,1,1-nhc_phys:0 ,fv_nphys+1 :fv_nphys+nhc_phys) = & + fvm(ie)%vtx_cart_physgrid(4,1,1,fv_nphys) + fvm(ie)%vtx_cart_physgrid(:,2,1-nhc_phys:0 ,fv_nphys+1 :fv_nphys+nhc_phys) = & + fvm(ie)%vtx_cart_physgrid(4,2,1,fv_nphys) + else if (fvm(ie)%cubeboundary==swest) then + fvm(ie)%flux_orient_physgrid (:,1-nhc_phys :0 ,1-nhc_phys :0 ) = -1 + fvm(ie)%spherecentroid_physgrid(:,1-nhc_phys :0 ,1-nhc_phys :0 ) = -1e5_r8 + fvm(ie)%vtx_cart_physgrid(:,1,1-nhc_phys:0 ,1-nhc_phys:0 ) = fvm(ie)%vtx_cart_physgrid(1,1,1,1) + fvm(ie)%vtx_cart_physgrid(:,2,1-nhc_phys:0 ,1-nhc_phys:0 ) = fvm(ie)%vtx_cart_physgrid(1,2,1,1) + else if (fvm(ie)%cubeboundary==neast) then + fvm(ie)%flux_orient_physgrid (:,fv_nphys +1 :fv_nphys +nhc_phys , & + fv_nphys +1 :fv_nphys +nhc_phys ) = -1 + fvm(ie)%spherecentroid_physgrid(:,fv_nphys +1 :fv_nphys +nhc_phys , & + fv_nphys +1 :fv_nphys +nhc_phys ) = -1e5_r8 + fvm(ie)%vtx_cart_physgrid(:,1,fv_nphys+1 :fv_nphys+nhc_phys,fv_nphys+1 :fv_nphys+nhc_phys) = & + fvm(ie)%vtx_cart_physgrid(3,1,fv_nphys,fv_nphys) + fvm(ie)%vtx_cart_physgrid(:,2,fv_nphys+1 :fv_nphys+nhc_phys,fv_nphys+1 :fv_nphys+nhc_phys) = & + fvm(ie)%vtx_cart_physgrid(3,2,fv_nphys,fv_nphys) + else if (fvm(ie)%cubeboundary==seast) then + fvm(ie)%flux_orient_physgrid (:,fv_nphys +1 :fv_nphys +nhc_phys ,1-nhc_phys :0 ) = -1 + fvm(ie)%spherecentroid_physgrid(:,fv_nphys +1 :fv_nphys +nhc_phys ,1-nhc_phys :0 ) = -1e5_r8 + fvm(ie)%vtx_cart_physgrid(:,1,fv_nphys+1 :fv_nphys+nhc_phys,1-nhc_phys:0 ) = & + fvm(ie)%vtx_cart_physgrid(2,1,fv_nphys,1) + fvm(ie)%vtx_cart_physgrid(:,2,fv_nphys+1 :fv_nphys+nhc_phys,1-nhc_phys:0 ) = & + fvm(ie)%vtx_cart_physgrid(2,2,fv_nphys,1) + end if + end do + + ! + ! set vectors for perpendicular flux vector + ! + rot90_matrix(1,1) = 0; rot90_matrix(2,1) = 1 !counter-clockwise rotation matrix + rot90_matrix(1,2) =-1; rot90_matrix(2,2) = 0 !counter-clockwise rotation matrix + + iside = 1 + unit_vec(1,iside) = 0 !x-component of displacement vector for side 1 + unit_vec(2,iside) = 1 !y-component of displacement vector for side 1 + + do iside=2,4 + unit_vec(:,iside) = MATMUL(rot90_matrix(:,:),unit_vec(:,iside-1)) + end do + + ! + ! fill halo done + ! + !------------------------------- + + do ie=nets,nete + do j=imin,imax + do i=imin,imax + ! + ! rotate gnomonic coordinate vector + ! + ishft = NINT(fvm(ie)%flux_orient_physgrid(2,i,j)) + do ixy=1,2 + ! + ! rotate coordinates if needed through permutation + ! + fvm(ie)%vtx_cart_physgrid(1:4,ixy,i,j) = cshift(fvm(ie)%vtx_cart_physgrid(1:4,ixy,i,j),shift=ishft) + end do + end do + end do + end do + ! + ! pre-compute derived metric terms used for integration, polynomial + ! evaluation at fvm cell vertices, etc. + ! + do ie=nets,nete + call compute_reconstruct_matrix(fv_nphys,nhe_phys,nhc_phys,irecons,fvm(ie)%dalpha_physgrid,fvm(ie)%dbeta_physgrid,& + fvm(ie)%spherecentroid_physgrid,fvm(ie)%vtx_cart_physgrid,fvm(ie)%centroid_stretch_physgrid,& + fvm(ie)%vertex_recons_weights_physgrid,fvm(ie)%recons_metrics_physgrid,fvm(ie)%recons_metrics_integral_physgrid) + end do + ! + ! code specific for physgrid + ! + ! + ! create a normalized element coordinate system with a halo + ! + do ie=nets,nete + do j=1-nhc_phys,fv_nphys+nhc_phys + do i=1-nhc_phys,fv_nphys+nhc_phys + ! + ! only compute for physically existent cells + ! + if (fvm(ie)%ifct_physgrid(i,j)>0) then + gnom%x = fvm(ie)%norm_elem_coord_physgrid(1,i,j) + gnom%y = fvm(ie)%norm_elem_coord_physgrid(2,i,j) + ! + ! coordinate transform only necessary for points on another panel + ! + if (NINT(fvm(ie)%flux_orient_physgrid(1,1,1)).NE.NINT(fvm(ie)%flux_orient_physgrid(1,i,j))) then + tmpcart3d=cubedsphere2cart(gnom,NINT(fvm(ie)%flux_orient_physgrid(1,i,j))) + tmpgnom=cart2cubedsphere(tmpcart3d,NINT(fvm(ie)%flux_orient_physgrid(1,1,1))) + else + tmpgnom%x = fvm(ie)%norm_elem_coord_physgrid(1,i,j) + tmpgnom%y = fvm(ie)%norm_elem_coord_physgrid(2,i,j) + end if + ! + ! convert to element normalized coordinates + ! + fvm(ie)%norm_elem_coord_physgrid(1,i,j) =(tmpgnom%x-elem(ie)%corners(1)%x)/& + (0.5_r8*dble(fv_nphys)*fvm(ie)%dalpha_physgrid)-1.0_r8 + fvm(ie)%norm_elem_coord_physgrid(2,i,j) =(tmpgnom%y-elem(ie)%corners(1)%y)/& + (0.5_r8*dble(fv_nphys)*fvm(ie)%dalpha_physgrid)-1.0_r8 + else + fvm(ie)%norm_elem_coord_physgrid(1,i,j) = 1D9 + fvm(ie)%norm_elem_coord_physgrid(2,i,j) = 1D9 + end if + end do + end do + end do + ! + ! compute Dinv + ! + do ie=nets,nete + do j=1-nhc_phys,fv_nphys+nhc_phys + do i=1-nhc_phys,fv_nphys+nhc_phys + x1 = fvm(ie)%norm_elem_coord_physgrid(1,i,j) + x2 = fvm(ie)%norm_elem_coord_physgrid(2,i,j) + call Dmap(D(i,j,:,:),x1,x2,elem(ie)%corners3D,cubed_sphere_map,elem(ie)%corners,elem(ie)%u2qmap,elem(ie)%facenum) + detD = D(i,j,1,1)*D(i,j,2,2) - D(i,j,1,2)*D(i,j,2,1) + + fvm(ie)%Dinv_physgrid(i,j,1,1) = D(i,j,2,2)/detD + fvm(ie)%Dinv_physgrid(i,j,1,2) = -D(i,j,1,2)/detD + fvm(ie)%Dinv_physgrid(i,j,2,1) = -D(i,j,2,1)/detD + fvm(ie)%Dinv_physgrid(i,j,2,2) = D(i,j,1,1)/detD + end do + end do + end do + end if + + end subroutine fvm_pg_init + + +end module fvm_mod diff --git a/src/dynamics/se/dycore/fvm_overlap_mod.F90 b/src/dynamics/se/dycore/fvm_overlap_mod.F90 new file mode 100644 index 00000000..1da2cef5 --- /dev/null +++ b/src/dynamics/se/dycore/fvm_overlap_mod.F90 @@ -0,0 +1,877 @@ +module fvm_overlap_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + + real (kind=r8),parameter, private :: bignum = 1.0e20_r8 + real (kind=r8),parameter, private :: tiny = 1.0e-12_r8 + real (kind=r8),parameter, private :: fuzzy_width = 10.0_r8*tiny + + public:: compute_weights_cell + + private + integer, parameter :: max_cross = 10 +contains + subroutine compute_weights_cell(nvertex,lexact_horizontal_line_integrals,& + xcell_in,ycell_in,jx,jy,nreconstruction,xgno,ygno,igno_min,igno_max,& + jx_min, jx_max, jy_min, jy_max,& + ngauss,gauss_weights,abscissae,weights,weights_eul_index,jcollect,jmax_segments) + + implicit none + integer , intent(in) :: nvertex + logical, intent(in) :: lexact_horizontal_line_integrals + integer , intent(in):: nreconstruction, jx,jy,ngauss,jmax_segments + ! + ! dimension(nvertex) + ! + real (kind=r8) , dimension(4), intent(in):: xcell_in,ycell_in + ! + integer , intent(in) :: jx_min, jy_min, jx_max, jy_max,igno_min,igno_max + ! + ! dimension(-ihalo:nc+2+ihalo) + ! + real (kind=r8), dimension(igno_min:igno_max), intent(in) :: xgno, ygno + ! + ! for Gaussian quadrature + ! + real (kind=r8), dimension(:), intent(in) :: gauss_weights, abscissae !dimension(ngauss) + ! + ! Number of Eulerian sub-cell integrals for the cell in question + ! + integer , intent(out) :: jcollect + ! + ! local workspace + ! + ! + ! max number of line segments is: + ! + ! (number of longitudes)*(max average number of crossings per line segment = 3)*ncube*2 + ! + real (kind=r8) , & + dimension(jmax_segments,nreconstruction), intent(out) :: weights + integer , & + dimension(jmax_segments,2), intent(out) :: weights_eul_index + + integer :: jsegment + ! + ! variables for registering crossings with Eulerian latitudes and longitudes + ! + integer :: jcross_lat + ! + ! max. crossings per side is 2*ihalo + ! + real (kind=r8), & + dimension(max_cross,2) :: r_cross_lat + integer , & + dimension(max_cross,2) :: cross_lat_eul_index + real (kind=r8) , dimension(nvertex) :: xcell,ycell + + xcell = xcell_in(1:nvertex) + ycell = ycell_in(1:nvertex) + + jsegment = 0 + weights = 0.0_r8 + jcross_lat = 0 + + call side_integral(lexact_horizontal_line_integrals,xcell,ycell,nvertex,jsegment,jmax_segments,& + weights,weights_eul_index,nreconstruction,jx,jy,xgno,ygno,igno_min,igno_max,jx_min, jx_max, jy_min, jy_max,& + ngauss,gauss_weights,abscissae,& + jcross_lat,r_cross_lat,cross_lat_eul_index) + ! + !********************** + ! + ! Do inner integrals + ! + !********************** + ! + call compute_inner_line_integrals_lat(lexact_horizontal_line_integrals,& + r_cross_lat,cross_lat_eul_index,& + jcross_lat,jsegment,xgno,igno_min,igno_max,jx_min, jx_max, jy_min, jy_max,& + weights,weights_eul_index,& + nreconstruction,ngauss,gauss_weights,abscissae) + + IF (ABS((jcross_lat/2)-DBLE(jcross_lat)/2.0_r8)>tiny) then + WRITE(*,*) "number of latitude crossings are not even: ABORT",jcross_lat,jx,jy + STOP + END IF + + ! + ! collect line-segment that reside in the same Eulerian cell + ! + if (jsegment>0) then + call collect(weights,weights_eul_index,nreconstruction,jcollect,jsegment,jmax_segments) + else + jcollect = 0 + end if + end subroutine compute_weights_cell + ! + !**************************************************************************** + ! + ! organize data and store it + ! + !**************************************************************************** + ! + subroutine collect(weights,weights_eul_index,nreconstruction,jcollect,jsegment,jmax_segments) + implicit none + integer , INTENT(IN ) :: jsegment,jmax_segments + integer , intent(in) :: nreconstruction + ! + real (kind=r8) , dimension(:,:), intent(inout) :: weights !dimension(jmax_segments,nreconstruction) + integer , dimension(:,:), intent(inout) :: weights_eul_index !dimension(jmax_segments,2) + integer , INTENT(OUT ) :: jcollect + ! + ! local workspace + ! + integer :: imin, imax, jmin, jmax, i,j,k,h + logical :: ltmp + + real (kind=r8) , dimension(jmax_segments,nreconstruction) :: weights_out + integer , dimension(jmax_segments,2 ) :: weights_eul_index_out + + weights_out = 0.0_r8 + weights_eul_index_out = -100 + + imin = MINVAL(weights_eul_index(1:jsegment,1)) + imax = MAXVAL(weights_eul_index(1:jsegment,1)) + jmin = MINVAL(weights_eul_index(1:jsegment,2)) + jmax = MAXVAL(weights_eul_index(1:jsegment,2)) + + ltmp = .FALSE. + + jcollect = 1 + + do j=jmin,jmax + do i=imin,imax + do k=1,jsegment + if (weights_eul_index(k,1)==i.AND.weights_eul_index(k,2)==j) then + weights_out(jcollect,1:nreconstruction) = & + weights_out(jcollect,1:nreconstruction) + weights(k,1:nreconstruction) + ltmp = .TRUE. + h = k + endif + enddo + if (ltmp) then + weights_eul_index_out(jcollect,:) = weights_eul_index(h,:) + jcollect = jcollect+1 + endif + ltmp = .FALSE. + enddo + enddo + jcollect = jcollect-1 + weights = weights_out + weights_eul_index = weights_eul_index_out + end subroutine collect + ! + !***************************************************************************************** + ! + ! compute crossings with Eulerian latitudes and longitudes + ! + !***************************************************************************************** + ! + subroutine compute_inner_line_integrals_lat(lexact_horizontal_line_integrals,r_cross_lat,& + cross_lat_eul_index,& + jcross_lat,jsegment,xgno,igno_min,igno_max,jx_min,jx_max,jy_min, jy_max,weights,weights_eul_index,& + nreconstruction,ngauss,gauss_weights,abscissae) + implicit none + logical, intent(in) :: lexact_horizontal_line_integrals + ! + ! variables for registering crossings with Eulerian latitudes and longitudes + ! + integer , intent(in):: jcross_lat, nreconstruction,ngauss,igno_min,igno_max + integer , intent(inout):: jsegment + ! + ! for Gaussian quadrature + ! + real (kind=r8), dimension(ngauss), intent(in) :: gauss_weights, abscissae + ! + ! max. crossings per side is 2*ihalo + ! + + real (kind=r8) , dimension(:,:), intent(in):: r_cross_lat ! dimension(8*ihalo,2) + integer , dimension(:,:), intent(in):: cross_lat_eul_index ! ! dimension(8*ihalo,2) + integer , intent(in):: jx_min, jx_max, jy_min, jy_max + + real (kind=r8), dimension(igno_min:igno_max), intent(in) :: xgno !dimension(-ihalo:nc+2+ihalo) + ! + ! dimension(jmax_segments,nreconstruction) + ! + real (kind=r8), dimension(:,:), intent(inout) :: weights + ! + ! dimension(jmax_segments,2) + ! + integer , dimension(:,:), intent(inout) :: weights_eul_index + + real (kind=r8) , dimension(nreconstruction) :: weights_tmp + integer :: imin,imax,i,j,k,h + real (kind=r8), dimension(2) :: rstart,rend,rend_tmp + real (kind=r8), dimension(2) :: xseg, yseg + 5 FORMAT(10e14.6) + if (jcross_lat>0) then + do i=MINVAL(cross_lat_eul_index(1:jcross_lat,2)),MAXVAL(cross_lat_eul_index(1:jcross_lat,2)) + ! + ! find "first" crossing with Eulerian cell i + ! + do k=1,jcross_lat + if (cross_lat_eul_index(k,2)==i) exit + enddo + do j=k+1,jcross_lat + ! + ! find "second" crossing with Eulerian cell i + ! + if (cross_lat_eul_index(j,2)==i) then + if (r_cross_lat(k,1)10) THEN + WRITE(*,*) "search not converging",iter + STOP + END IF + lsame_cell_x = (x(2).GE.xgno(jx_eul).AND.x(2).LE.xgno(jx_eul+1)) + lsame_cell_y = (y(2).GE.ygno(jy_eul).AND.y(2).LE.ygno(jy_eul+1)) + IF (lsame_cell_x.AND.lsame_cell_y) THEN + ! + !**************************** + ! + ! same cell integral + ! + !**************************** + ! + xseg(1) = x(1); yseg(1) = y(1); xseg(2) = x(2); yseg(2) = y(2) + jx_eul_tmp = jx_eul; jy_eul_tmp = jy_eul; + lcontinue = .FALSE. + ! + ! prepare for next side if (x(2),y(2)) is on a grid line + ! + IF (x(2).EQ.xgno(jx_eul+1).AND.x(3)>xgno(jx_eul+1)) THEN + ! + ! cross longitude jx_eul+1 + ! + jx_eul=jx_eul+1 + ELSE IF (x(2).EQ.xgno(jx_eul ).AND.x(3)ygno(jy_eul+1)) THEN + ! + ! register crossing with latitude: line-segments point Northward + ! + jcross_lat = jcross_lat + 1 + jy_eul = jy_eul + 1 + cross_lat_eul_index(jcross_lat,1) = jx_eul + cross_lat_eul_index(jcross_lat,2) = jy_eul + r_cross_lat(jcross_lat,1) = x(2) + r_cross_lat(jcross_lat,2) = y(2) +! write(*,*) "A register crossing with latitude",x(2),y(2),jx_eul,jy_eul + ELSE IF (y(2).EQ.ygno(jy_eul ).AND.y(3)y(1) else "0" + ysgn2 = INT(SIGN(1.0_r8,y(2)-y(1))) !"1" if y(2)>y(1) else "-1" + ! + !******************************************************************************* + ! + ! there is at least one crossing with latitudes but no crossing with longitudes + ! + !******************************************************************************* + ! + yeul = ygno(jy_eul+ysgn1) + IF (x(1).EQ.x(2)) THEN + ! + ! line segment is parallel to longitude (infinite slope) + ! + xcross = x(1) + ELSE + slope = (y(2)-y(1))/(x(2)-x(1)) + xcross = x_cross_eul_lat(x(1),y(1),yeul,slope) + ! + ! constrain crossing to be "physically" possible + ! + xcross = MIN(MAX(xcross,xgno(jx_eul)),xgno(jx_eul+1)) + ! + ! debugging + ! + IF (xcross.GT.xgno(jx_eul+1).OR.xcross.LT.xgno(jx_eul)) THEN + WRITE(*,*) "xcross is out of range",jx,jy + WRITE(*,*) "xcross-xgno(jx_eul+1), xcross-xgno(jx_eul))",& + xcross-xgno(jx_eul+1), xcross-ygno(jx_eul) + STOP + END IF + END IF + xseg(1) = x(1); yseg(1) = y(1); xseg(2) = xcross; yseg(2) = yeul + jx_eul_tmp = jx_eul; jy_eul_tmp = jy_eul; + ! + ! prepare for next iteration + ! + x(0) = x(1); y(0) = y(1); x(1) = xcross; y(1) = yeul; jy_eul = jy_eul+ysgn2 + ! + ! register crossing with latitude + ! + jcross_lat = jcross_lat+1 + cross_lat_eul_index(jcross_lat,1) = jx_eul + if (ysgn2>0) then + cross_lat_eul_index(jcross_lat,2) = jy_eul + else + cross_lat_eul_index(jcross_lat,2) = jy_eul+1 + end if + r_cross_lat(jcross_lat,1) = xcross + r_cross_lat(jcross_lat,2) = yeul + ELSE IF (lsame_cell_y) THEN + ! + !******************************************************************************* + ! + ! there is at least one crossing with longitudes but no crossing with latitudes + ! + !******************************************************************************* + ! + xsgn1 = (1+INT(SIGN(1.0_r8,x(2)-x(1))))/2 !"1" if x(2)>x(1) else "0" + xsgn2 = INT(SIGN(1.0_r8,x(2)-x(1))) !"1" if x(2)>x(1) else "-1" + xeul = xgno(jx_eul+xsgn1) + IF (ABS(x(2)-x(1))x(1) else "0" + xsgn2 = (INT(SIGN(1.0_r8,x(2)-x(1)))) !"1" if x(2)>x(1) else "0" + xeul = xgno(jx_eul+xsgn1) + ysgn1 = (1+INT(SIGN(1.0_r8,y(2)-y(1))))/2 !"1" if y(2)>y(1) else "0" + ysgn2 = INT(SIGN(1.0_r8,y(2)-y(1))) !"1" if y(2)>y(1) else "-1" + yeul = ygno(jy_eul+ysgn1) + + slope = (y(2)-y(1))/(x(2)-x(1)) + IF (ABS(x(2)-x(1))0.AND.xcross.LE.xeul).OR.(xsgn2<0.AND.xcross.GE.xeul)) THEN + ! + ! cross latitude + ! + xseg(1) = x(1); yseg(1) = y(1); xseg(2) = xcross; yseg(2) = yeul + jx_eul_tmp = jx_eul; jy_eul_tmp = jy_eul; + ! + ! prepare for next iteration + ! + x(0) = x(1); y(0) = y(1); x(1) = xcross; y(1) = yeul; jy_eul = jy_eul+ysgn2 + ! + ! register crossing with latitude + ! + jcross_lat = jcross_lat+1 + cross_lat_eul_index(jcross_lat,1) = jx_eul + if (ysgn2>0) then + cross_lat_eul_index(jcross_lat,2) = jy_eul + else + cross_lat_eul_index(jcross_lat,2) = jy_eul+1 + end if + r_cross_lat(jcross_lat,1) = xcross + r_cross_lat(jcross_lat,2) = yeul +! write(*,*) "D register crossing with latitude",xcross,yeul,jx_eul,cross_lat_eul_index(jcross_lat,2) + ELSE + ! + ! cross longitude + ! + xseg(1) = x(1); yseg(1) = y(1); xseg(2) = xeul; yseg(2) = ycross + jx_eul_tmp = jx_eul; jy_eul_tmp = jy_eul; + ! + ! prepare for next iteration + ! + x(0) = x(1); y(0) = y(1); x(1) = xeul; y(1) = ycross; jx_eul = jx_eul+xsgn2 + END IF + + END IF + END IF + ! + ! register line-segment (don't register line-segment if outside of panel) + ! + if (jx_eul_tmp>=jx_min.AND.jy_eul_tmp>=jy_min.AND.& + jx_eul_tmp<=jx_max-1.AND.jy_eul_tmp<=jy_max-1) then + jsegment=jsegment+1 + weights_eul_index(jsegment,1) = jx_eul_tmp + weights_eul_index(jsegment,2) = jy_eul_tmp + + call get_weights_exact(lexact_horizontal_line_integrals.AND.ABS(yseg(2)-yseg(1))0) THEN + x_cross_eul_lat = x+(yeul-y)/slope + ELSE + x_cross_eul_lat = bignum + END IF + end function x_cross_eul_lat + + subroutine get_weights_exact(lexact_horizontal_line_integrals,weights,xseg,yseg,nreconstruction,& + ngauss,gauss_weights,abscissae) + use fvm_analytic_mod, only: I_00, I_10, I_01, I_20, I_02, I_11 + implicit none + logical, intent(in) :: lexact_horizontal_line_integrals + integer , intent(in) :: nreconstruction, ngauss + real (kind=r8), intent(out) :: weights(:) + real (kind=r8), dimension(:), intent(in) :: gauss_weights, abscissae !dimension(ngauss) + + + real (kind=r8), dimension(:), intent(in) :: xseg,yseg !dimension(2) + ! + ! compute weights + ! + if(lexact_horizontal_line_integrals) then + weights(1) = ((I_00(xseg(2),yseg(2))-I_00(xseg(1),yseg(1)))) + if (ABS(weights(1))>1.0_r8) THEN + WRITE(*,*) "1 exact weights(jsegment)",weights(1),xseg,yseg + stop + end if + if (nreconstruction>1) then + weights(2) = ((I_10(xseg(2),yseg(2))-I_10(xseg(1),yseg(1)))) + weights(3) = ((I_01(xseg(2),yseg(2))-I_01(xseg(1),yseg(1)))) + endif + if (nreconstruction>3) then + weights(4) = ((I_20(xseg(2),yseg(2))-I_20(xseg(1),yseg(1)))) + weights(5) = ((I_02(xseg(2),yseg(2))-I_02(xseg(1),yseg(1)))) + weights(6) = ((I_11(xseg(2),yseg(2))-I_11(xseg(1),yseg(1)))) + endif + else + call get_weights_gauss(weights,xseg,yseg,nreconstruction,ngauss,gauss_weights,abscissae) + endif + end subroutine get_weights_exact + + + + subroutine get_weights_gauss(weights,xseg,yseg,nreconstruction,ngauss,gauss_weights,abscissae) + use fvm_analytic_mod, only: F_00, F_10, F_01, F_20, F_02, F_11 + implicit none + integer , intent(in) :: nreconstruction,ngauss + real (kind=r8), intent(out) :: weights(:) + real (kind=r8), dimension(2 ), intent(in) :: xseg,yseg + real (kind=r8) :: slope + ! + ! compute weights + ! + ! + ! for Gaussian quadrature + ! + real (kind=r8), dimension(ngauss), intent(in) :: gauss_weights, abscissae + + ! if line-segment parallel to x or y use exact formulaes else use qudrature + ! + real (kind=r8) :: b,integral,dx2,xc,x,y + integer :: i + +! if (fuzzy(abs(xseg(1) -xseg(2)),fuzzy_width)==0)then + if (xseg(1).EQ.xseg(2))then + weights = 0.0_r8 + else + slope = (yseg(2)-yseg(1))/(xseg(2)-xseg(1)) + b = yseg(1)-slope*xseg(1) + dx2 = 0.5_r8*(xseg(2)-xseg(1)) + xc = 0.5_r8*(xseg(1)+xseg(2)) + integral = 0.0_r8 + do i=1,ngauss + x = xc+abscissae(i)*dx2 + y = slope*x+b + integral = integral+gauss_weights(i)*F_00(x,y) + enddo + weights(1) = integral*dx2 + if (nreconstruction>1) then + integral = 0.0_r8 + do i=1,ngauss + x = xc+abscissae(i)*dx2 + y = slope*x+b + integral = integral+gauss_weights(i)*F_10(x,y) + enddo + weights(2) = integral*dx2 + integral = 0.0_r8 + do i=1,ngauss + x = xc+abscissae(i)*dx2 + y = slope*x+b + integral = integral+gauss_weights(i)*F_01(x,y) + enddo + weights(3) = integral*dx2 + endif + if (nreconstruction>3) then + integral = 0.0_r8 + do i=1,ngauss + x = xc+abscissae(i)*dx2 + y = slope*x+b + integral = integral+gauss_weights(i)*F_20(x,y) + enddo + weights(4) = integral*dx2 + integral = 0.0_r8 + do i=1,ngauss + x = xc+abscissae(i)*dx2 + y = slope*x+b + integral = integral+gauss_weights(i)*F_02(x,y) + enddo + weights(5) = integral*dx2 + integral = 0.0_r8 + do i=1,ngauss + x = xc+abscissae(i)*dx2 + y = slope*x+b + integral = integral+gauss_weights(i)*F_11(x,y) + enddo + weights(6) = integral*dx2 + endif + end if + end subroutine get_weights_gauss + + subroutine truncate_vertex(x,j_eul,gno,igno_min,igno_max) + implicit none + integer , intent(inout) :: j_eul + integer , intent(in) :: igno_min,igno_max + + real (kind=r8) , intent(inout) :: x + real (kind=r8), dimension(igno_min:igno_max), intent(in) :: gno !dimension(-ihalo:nc+2+ihalo) +! real (kind=r8), intent(in) :: eps + + logical :: lcontinue + integer :: iter, xsgn + real (kind=r8) :: dist,dist_new,tmp + + lcontinue = .TRUE. + iter = 0 + dist = bignum + + xsgn = INT(SIGN(1.0_r8,x-gno(j_eul))) + + DO WHILE (lcontinue) + if ((j_euligno_max)) then + write(*,*) 'something is wrong', j_eul,igno_min,igno_max, iter + stop + endif + iter = iter+1 + tmp = x-gno(j_eul) + dist_new = ABS(tmp) + IF (dist_new>dist) THEN + lcontinue = .FALSE. + ELSE IF (ABS(tmp)<1.0E-9_r8) THEN + x = gno(j_eul) + lcontinue = .FALSE. + ELSE + j_eul = j_eul+xsgn + dist = dist_new + END IF + IF (iter>100) THEN + WRITE(*,*) "truncate vertex not converging" + STOP + END IF + END DO + END subroutine truncate_vertex + + subroutine which_eul_cell(x,j_eul,gno,igno_min,igno_max) + implicit none + integer , intent(inout) :: j_eul + integer , intent(in) :: igno_min,igno_max + real (kind=r8), dimension(:) , intent(in):: x !dimension(3) + real (kind=r8), dimension(igno_min:igno_max), intent(in) :: gno ! dimension(-ihalo:nc+2+ihalo) + + logical :: lcontinue + integer :: iter + + lcontinue = .TRUE. + iter = 0 + + DO WHILE (lcontinue) + iter = iter+1 + IF (x(1).GE.gno(j_eul).AND.x(1).LT.gno(j_eul+1)) THEN + lcontinue = .FALSE. + ! + ! special case when x(1) is on top of grid line + ! + IF (x(1).EQ.gno(j_eul)) THEN + ! + ! x(1) is on top of gno(J_eul) + ! + IF (x(2).GT.gno(j_eul)) THEN + j_eul = j_eul + ELSE IF (x(2).LT.gno(j_eul)) THEN + j_eul = j_eul-1 + ELSE + ! + ! x(2) is on gno(j_eul) grid line; need x(3) to determine Eulerian cell + ! + IF (x(3).GT.gno(j_eul)) THEN + ! + ! x(3) to the right + ! + j_eul = j_eul + ELSE IF (x(3).LT.gno(j_eul)) THEN + ! + ! x(3) to the left + ! + j_eul = j_eul-1 + ELSE + WRITE(*,*) "inconsistent cell: x(1)=x(2)=x(3)",x(1),x(2),x(3) + STOP + END IF + END IF + END IF + ELSE + ! + ! searching - prepare for next iteration + ! + IF (x(1).GE.gno(j_eul+1)) THEN + j_eul = j_eul + 1 + ELSE + ! + ! x(1).LT.gno(j_eul) + ! + j_eul = j_eul - 1 + END IF + END IF + IF (iter>1000.OR.j_euligno_max) THEN + WRITE(*,*) "search is which_eul_cell not converging!", iter, j_eul,igno_min,igno_max + WRITE(*,*) "gno", gno(igno_min), gno(igno_max) + write(*,*) gno + STOP + END IF + END DO + END subroutine which_eul_cell + + + function fuzzy(x,epsilon) + implicit none + + integer :: fuzzy + real (kind=r8), intent(in) :: epsilon + real (kind=r8) :: x + + IF (ABS(x)epsilon) THEN + fuzzy = 1 + ELSE !IF (x < fuzzy_width) THEN + fuzzy = -1 + ENDIF + end function + +end module fvm_overlap_mod diff --git a/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 b/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 new file mode 100644 index 00000000..b7310ad4 --- /dev/null +++ b/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 @@ -0,0 +1,1845 @@ +!================================================================================== +! The subroutine reconstruction is called from both a horizontal +! threaded region and a nested region for horizontal and +! vertical threading. if horz_num_threads != horz_num_threads*vert_num_threads +! then the timers calls will generate segfault... So the simple solution is +! to deactivate them by default. +! +#define FVM_TIMERS .FALSE. +!================================================================================== + !MODULE FVM_RECONSTRUCTION_MOD--------------------------------------CE-for FVM! + ! AUTHOR: CHRISTOPH ERATH, 17.October 2011 ! + ! This module contains everything to do (ONLY) a CUBIC (3rd order) reconstruction ! + ! ! + ! IMPORTANT: the implementation is done for a ncfl > 1, which is not working ! + ! but it works for ncfl=1 ! + ! + ! This module has been recoded for multi-tracer efficiency (May, 2014) + ! + !---------------------------------------------------------------------------! +module fvm_reconstruction_mod + + use shr_kind_mod, only: r8=>shr_kind_r8 + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + use cam_abortutils, only: endrun + use perf_mod, only: t_startf, t_stopf + + + implicit none + private +! integer, parameter, private:: nh = nhr+(nhe-1) ! = 2 (nhr=2; nhe=1) + ! = 3 (nhr=2; nhe=2) + public :: reconstruction, recons_val_cart, extend_panel_interpolate +!reconstruction_gradient, +contains + ! ----------------------------------------------------------------------------------! + !SUBROUTINE RECONSTRUCTION------------------------------------------------CE-for FVM! + ! AUTHOR: CHRISTOPH ERATH, 17.October 2011 ! + ! DESCRIPTION: controls the cubic (3rd order) reconstructions: ! + ! ! + ! CALLS: fillhalo_cubic, reconstruction_cubic ! + ! INPUT: fcube ... tracer values incl. the halo zone ! + ! fvm ... structure incl. tracer values aso ! ! + ! OUTPUT:recons ... has the reconstruction coefficients (5) for the 3rd order ! + ! reconstruction: dx, dy, dx^2, dy^2, dxdy ! + !-----------------------------------------------------------------------------------! + subroutine reconstruction(fcube,nlev_in,k_in,recons,irecons,llimiter,ntrac_in,& + nc,nhe,nhr,nhc,nht,ns,nh,& + jx_min,jx_max,jy_min,jy_max,& + cubeboundary,halo_interp_weight,ibase,& + spherecentroid,& + recons_metrics,recons_metrics_integral,& + rot_matrix,centroid_stretch,& + vertex_recons_weights,vtx_cart,& + irecons_actual_in) + implicit none + ! + ! dimension(1-nhc:nc+nhc, 1-nhc:nc+nhc) + ! + integer, intent(in) :: irecons + integer, intent(in) :: nlev_in, k_in + integer, intent(in) :: ntrac_in,nc,nhe,nhr,nhc,nht,ns,nh,cubeboundary + real (kind=r8), dimension(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev_in,ntrac_in), intent(inout) :: fcube + real (kind=r8), dimension(irecons,1-nhe:nc+nhe,1-nhe:nc+nhe,ntrac_in), intent(out) :: recons + integer, intent(in) :: jx_min(3), jx_max(3), jy_min(3), jy_max(3) + integer , intent(in):: ibase(1-nh:nc+nh,1:nhr,2) + real (kind=r8), intent(in):: halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) + real (kind=r8), intent(in):: spherecentroid(irecons-1,1-nhe:nc+nhe,1-nhe:nc+nhe) + real (kind=r8), intent(in):: recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe) + real (kind=r8), intent(in):: recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe) + integer , intent(in):: rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc) + real (kind=r8), intent(in):: centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe) + real (kind=r8), intent(in):: vertex_recons_weights(4,1:irecons-1,1-nhe:nc+nhe,1-nhe:nc+nhe) + real (kind=r8), intent(in):: vtx_cart(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) + + logical, intent(in) :: llimiter(ntrac_in) + integer, optional, intent(in) :: irecons_actual_in + + integer :: irecons_actual + + real (kind=r8), dimension(1-nht:nc+nht,1-nht:nc+nht,3) :: f + + integer :: i,j,in,h,itr + integer, dimension(2,3) :: jx,jy + + if (present(irecons_actual_in)) then + irecons_actual = irecons_actual_in + else + irecons_actual = irecons + end if + + + jx(1,1)=jx_min(1); jx(2,1)=jx_max(1)-1 + jx(1,2)=jx_min(2); jx(2,2)=jx_max(2)-1 + jx(1,3)=jx_min(3); jx(2,3)=jx_max(3)-1 + + jy(1,1)=jy_min(1); jy(2,1)=jy_max(1)-1 + jy(1,2)=jy_min(2); jy(2,2)=jy_max(2)-1 + jy(1,3)=jy_min(3); jy(2,3)=jy_max(3)-1 + + ! + ! Initialize recons + ! + call zero_non_existent_ghost_cell(recons,irecons,cubeboundary,nc,nhe,ntrac_in) + if (irecons_actual>1) then + if(FVM_TIMERS) call t_startf('FVM:reconstruction:part#1') + if (nhe>0) then + do itr=1,ntrac_in + ! f=-9e9_r8 + call extend_panel_interpolate(nc,nhc,nhr,nht,ns,nh,& + fcube(:,:,k_in,itr),cubeboundary,halo_interp_weight,ibase,f(:,:,1),f(:,:,2:3)) + call get_gradients(f(:,:,:),jx,jy,irecons,recons(:,:,:,itr),& + rot_matrix,centroid_stretch,nc,nht,nhe,nhc,irecons_actual) + end do + else + do itr=1,ntrac_in + ! f=-9e9_r8!to avoid floating point exception for uninitialized variables + ! !in non-existent cells (corners of cube) + call extend_panel_interpolate(nc,nhc,nhr,nht,ns,nh,& + fcube(:,:,k_in,itr),cubeboundary,halo_interp_weight,ibase,f(:,:,1)) + call get_gradients(f(:,:,:),jx,jy,irecons,recons(:,:,:,itr),& + rot_matrix,centroid_stretch,nc,nht,nhe,nhc,irecons_actual) + end do + end if + if(FVM_TIMERS) call t_stopf('FVM:reconstruction:part#1') + if(FVM_TIMERS) call t_startf('FVM:reconstruction:part#2') + ! + ! fill in non-existent (in physical space) corner values to simplify + ! logic in limiter code (min/max operation) + ! + do itr=1,ntrac_in + if (llimiter(itr)) then + if (cubeboundary>4) then + select case(cubeboundary) + case (nwest) + do h=1,nhe+1 + fcube(0,nc+h ,k_in,itr) = fcube(1-h,nc ,k_in,itr) + fcube(1-h,nc+1,k_in,itr) = fcube(1 ,nc+h,k_in,itr) + end do + case (swest) + do h=1,nhe+1 + fcube(1-h,0,k_in,itr) = fcube(1,1-h,k_in,itr) + fcube(0,1-h,k_in,itr) = fcube(1-h,1,k_in,itr) + end do + case (seast) + do h=1,nhe+1 + fcube(nc+h,0 ,k_in,itr) = fcube(nc,1-h,k_in,itr) + fcube(nc+1,1-h,k_in,itr) = fcube(nc+h,1,k_in,itr) + end do + case (neast) + do h=1,nhe+1 + fcube(nc+h,nc+1,k_in,itr) = fcube(nc,nc+h,k_in,itr) + fcube(nc+1,nc+h,k_in,itr) = fcube(nc+h,nc,k_in,itr) + end do + end select + end if + call slope_limiter(nhe,nc,nhc,fcube(:,:,k_in,itr),jx,jy,irecons,recons(:,:,:,itr),& + spherecentroid(:,1-nhe:nc+nhe,1-nhe:nc+nhe),& + recons_metrics,vertex_recons_weights,vtx_cart,irecons_actual) + end if + end do + if(FVM_TIMERS) call t_stopf('FVM:reconstruction:part#2') + end if + if(FVM_TIMERS) call t_startf('FVM:reconstruction:part#3') + select case (irecons_actual) + case(1) + do in=1,3 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + recons(1,i,j,1:ntrac_in) = fcube(i,j,k_in,1:ntrac_in) + recons(2:irecons,i,j,1:ntrac_in) = 0.0_r8 + end do + end do + end do + case(3) +! do j=1-nhe,nc+nhe +! do i=1-nhe,nc+nhe + do in=1,3 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + recons(1,i,j,1:ntrac_in) = fcube(i,j,k_in,1:ntrac_in) & + - recons(2,i,j,1:ntrac_in)*spherecentroid(1,i,j) & + - recons(3,i,j,1:ntrac_in)*spherecentroid(2,i,j) + recons(2,i,j,1:ntrac_in) = recons(2,i,j,1:ntrac_in) + recons(3,i,j,1:ntrac_in) = recons(3,i,j,1:ntrac_in) + recons(4:irecons,i,j,1:ntrac_in) = 0.0_r8 + end do + end do + end do + case(6) + do itr=1,ntrac_in +! do j=1-nhe,nc+nhe +! do i=1-nhe,nc+nhe + do in=1,3 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + recons(1,i,j,itr) = fcube(i,j,k_in,itr) & + - recons(2,i,j,itr)*spherecentroid(1,i,j) & + - recons(3,i,j,itr)*spherecentroid(2,i,j) & + + recons(4,i,j,itr)*recons_metrics_integral(1,i,j) & + + recons(5,i,j,itr)*recons_metrics_integral(2,i,j) & + + recons(6,i,j,itr)*recons_metrics_integral(3,i,j) + recons(2,i,j,itr) = recons(2,i,j,itr) & + - recons(4,i,j,itr)*2.0_r8*spherecentroid(1,i,j) & + - recons(6,i,j,itr) *spherecentroid(2,i,j) + recons(3,i,j,itr) = recons(3,i,j,itr) & + - recons(5,i,j,itr)*2.0_r8*spherecentroid(2,i,j) & + - recons(6,i,j,itr)*spherecentroid(1,i,j) + ! + ! recons(i,j,4:6) already set in get_gradients + ! + end do + end do + end do + end do + case default + write(*,*) "irecons out of range in get_ceof", irecons + end select + if(FVM_TIMERS) call t_stopf('FVM:reconstruction:part#3') + + ! recons(a,b,3) * (centroid(a,b,1)**2 - centroid(a,b,3)) + & + ! recons(a,b,4) * (centroid(a,b,2)**2 - centroid(a,b,4)) + & + ! recons(a,b,5) * (centroid(a,b,1) * centroid(a,b,2) - centroid(a,b,5)) + & + + + ! call debug_halo(fvm,fcubenew,fpanel) + ! call debug_halo_recons(fvm,recons,recons_trunk) + ! call print_which_case(fvm) + ! + ! call debug_halo_neighbor (fvm,fotherface,fotherpanel) + ! call debug_halo_neighbor_recons(fvm,recons,recons_trunk) + end subroutine reconstruction + !END SUBROUTINE RECONSTRUCTION--------------------------------------------CE-for FVM! + + subroutine get_gradients(f,jx,jy,irecons,gradient,rot_matrix,centroid_stretch,nc,nht,nhe,nhc,irecons_actual) + implicit none + integer, intent(in) :: irecons,nc,nht,nhe,nhc,irecons_actual + real (kind=r8), dimension(1-nht:nc+nht,1-nht:nc+nht,3), intent(in) :: f + real (kind=r8), dimension(irecons,1-nhe:nc+nhe,1-nhe:nc+nhe), intent(inout):: gradient + integer, dimension(2,3), intent(in) :: jx,jy + integer , dimension(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc), intent(in) :: rot_matrix + real (kind=r8), dimension(7,1-nhe:nc+nhe,1-nhe:nc+nhe), intent(in) :: centroid_stretch + + integer :: i,j,in + real (kind=r8), dimension(2):: g + real (kind=r8) :: sign + character(len=128) :: errormsg + + + select case (irecons_actual) + case(3) + in=1 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + ! + ! df/dx: 4-th-order finite difference: (-f(i+2)+8f(i+1)-8f(i-1)+f(i-2))/12dx + ! + gradient(2,i,j) = -f(i+2,j ,in)+8.0_r8*f(i+1,j ,in)-8.0_r8*f(i-1,j ,in)+f(i-2,j ,in) + gradient(3,i,j) = -f(i ,j+2,in)+8.0_r8*f(i ,j+1,in)-8.0_r8*f(i ,j-1,in)+f(i ,j-2,in) + end do + end do + do in=2,3 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + g(1) = -f(i+2,j ,in)+8.0_r8*f(i+1,j ,in)-8.0_r8*f(i-1,j ,in)+f(i-2,j ,in) + g(2) = -f(i ,j+2,in)+8.0_r8*f(i ,j+1,in)-8.0_r8*f(i ,j-1,in)+f(i ,j-2,in) + gradient(2:3,i,j) = MATMUL(rot_matrix(:,:,i,j),g(:)) + end do + end do + end do + gradient(2,:,:) = centroid_stretch(1,:,:)*gradient(2,:,:) + gradient(3,:,:) = centroid_stretch(2,:,:)*gradient(3,:,:) + case (6) + in=1 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + ! + ! df/dx: 4-th-order finite difference: (-f(i+2)+8f(i+1)-8f(i-1)+f(i-2))/12dx + ! + gradient(2,i,j) = -f(i+2,j ,in)+ 8.0_r8*f(i+1,j ,in) - 8.0_r8*f(i-1,j ,in)+f(i-2,j ,in) + gradient(3,i,j) = -f(i ,j+2,in)+ 8.0_r8*f(i ,j+1,in) - 8.0_r8*f(i ,j-1,in)+f(i ,j-2,in) + ! + ! d2f/dx2: + ! + gradient(4,i,j) = -f(i+2,j ,in)+16.0_r8*f(i+1,j ,in)-30.0_r8*f(i,j,in)+16.0_r8*f(i-1,j ,in)-f(i-2,j ,in) + gradient(5,i,j) = -f(i ,j+2,in)+16.0_r8*f(i ,j+1,in)-30.0_r8*f(i,j,in)+16.0_r8*f(i ,j-1,in)-f(i ,j-2,in) + + gradient(6,i,j) = f(i+1,j+1,in)- f(i+1,j-1,in) - f(i-1,j+1,in)+f(i-1,j-1,in) + ! + ! "stretching factors + ! + gradient(2,i,j) = centroid_stretch(1,i,j)*gradient(2,i,j) + gradient(3,i,j) = centroid_stretch(2,i,j)*gradient(3,i,j) + + gradient(4,i,j) = centroid_stretch(3,i,j)*gradient(4,i,j)+centroid_stretch(6,i,j)*gradient(2,i,j) + gradient(5,i,j) = centroid_stretch(4,i,j)*gradient(5,i,j)+centroid_stretch(7,i,j)*gradient(3,i,j) + + gradient(6,i,j) = centroid_stretch(5,i,j)*gradient(6,i,j) + end do + end do + do in=2,3 + if (SUM(rot_matrix(:,:,jx(1,in),jy(1,in)))==0) then + sign=-1 + else + sign=1 + end if + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + g(1) = -f(i+2,j ,in)+8.0_r8*f(i+1,j ,in)-8.0_r8*f(i-1,j ,in)+f(i-2,j ,in) + g(2) = -f(i ,j+2,in)+8.0_r8*f(i ,j+1,in)-8.0_r8*f(i ,j-1,in)+f(i ,j-2,in) + gradient(2:3,i,j) = MATMUL(rot_matrix(:,:,i,j),g(:)) + + g(1) = -f(i+2,j ,in)+16.0_r8*f(i+1,j ,in)-30.0_r8*f(i,j,in)+16.0_r8*f(i-1,j ,in)-f(i-2,j ,in) + g(2) = -f(i ,j+2,in)+16.0_r8*f(i ,j+1,in)-30.0_r8*f(i,j,in)+16.0_r8*f(i ,j-1,in)-f(i ,j-2,in) + gradient(4:5,i,j) = MATMUL(ABS(rot_matrix(:,:,i,j)),g(:)) + + gradient(6,i,j) = sign*(f(i+1,j+1,in)- f(i+1,j-1,in) - f(i-1,j+1,in)+f(i-1,j-1,in)) + ! + ! "stretching factors + ! + gradient(2,i,j) = centroid_stretch(1,i,j)*gradient(2,i,j) + gradient(3,i,j) = centroid_stretch(2,i,j)*gradient(3,i,j) + + gradient(4,i,j) = centroid_stretch(3,i,j)*gradient(4,i,j)+centroid_stretch(6,i,j)*gradient(2,i,j) + gradient(5,i,j) = centroid_stretch(4,i,j)*gradient(5,i,j)+centroid_stretch(7,i,j)*gradient(3,i,j) + + gradient(6,i,j) = centroid_stretch(5,i,j)*gradient(6,i,j) + end do + end do + end do + case default + write(errormsg, *) irecons + call endrun('ERROR: irecons out of range in slope_limiter'//errormsg) + end select + end subroutine get_gradients + + + subroutine slope_limiter(nhe,nc,nhc,fcube,jx,jy,irecons,recons,spherecentroid,recons_metrics,& + vertex_recons_weights,vtx_cart,irecons_actual) + implicit none + integer , intent(in) :: irecons,nhe,nc,nhc,irecons_actual + real (kind=r8), dimension(1-nhc:, 1-nhc:) , intent(in) :: fcube + real (kind=r8), dimension(irecons,1-nhe:nc+nhe,1-nhe:nc+nhe) , intent(inout):: recons + integer, dimension(2,3) , intent(in) :: jx,jy + real (kind=r8), dimension(irecons-1,1-nhe:nc+nhe,1-nhe:nc+nhe) , intent(in) :: spherecentroid + real (kind=r8), dimension(3,1-nhe:nc+nhe,1-nhe:nc+nhe) , intent(in) :: recons_metrics + real (kind=r8), dimension(4,1:irecons-1,1-nhe:nc+nhe,1-nhe:nc+nhe), intent(in) :: vertex_recons_weights + real (kind=r8), dimension(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) , intent(in) :: vtx_cart + + real (kind=r8):: minval_patch,maxval_patch + real (kind=r8):: phi, min_val, max_val,disc + + real (kind=r8):: min_phi + real (kind=r8):: extrema(2), xminmax(2),yminmax(2),extrema_value(13) + + real(kind=r8) :: invtmp ! temporary to pre-compute inverses + integer :: itmp1,itmp2,i,j,in,vertex,n + +! real (kind=r8), dimension(-1:5) :: diff_value + real (kind=r8), dimension(-1:1) :: minval_array, maxval_array + real (kind=r8), parameter :: threshold = 1.0E-40_r8 + character(len=128) :: errormsg + select case (irecons_actual) + ! + ! PLM limiter + ! + case(3) + do in=1,3 + do j=jy(1,in),jy(2,in) + do i=jx(1,in),jx(2,in) + !rck combined min/max and unrolled inner loop + !minval_patch = MINVAL(fcube(i-1:i+1,j-1:j+1)) + !maxval_patch = MAXVAL(fcube(i-1:i+1,j-1:j+1)) + !DIR$ SIMD + do itmp2=-1,+1 + itmp1 = j+itmp2 + minval_array(itmp2) = min(fcube(i-1,itmp1),fcube(i,itmp1),fcube(i+1,itmp1)) + maxval_array(itmp2) = max(fcube(i-1,itmp1),fcube(i,itmp1),fcube(i+1,itmp1)) + enddo + minval_patch = min(minval_array(-1),minval_array(0),minval_array(1)) + maxval_patch = max(maxval_array(-1),maxval_array(0),maxval_array(1)) + + min_phi=1.0_r8 + + ! + ! coordinate bounds (could be pre-computed!) + ! + xminmax(1) = min(vtx_cart(1,1,i,j),vtx_cart(2,1,i,j),vtx_cart(3,1,i,j),vtx_cart(4,1,i,j)) + xminmax(2) = max(vtx_cart(1,1,i,j),vtx_cart(2,1,i,j),vtx_cart(3,1,i,j),vtx_cart(4,1,i,j)) + yminmax(1) = min(vtx_cart(1,2,i,j),vtx_cart(2,2,i,j),vtx_cart(3,2,i,j),vtx_cart(4,2,i,j)) + yminmax(2) = max(vtx_cart(1,2,i,j),vtx_cart(2,2,i,j),vtx_cart(3,2,i,j),vtx_cart(4,2,i,j)) + + !rck restructured loop + !DIR$ SIMD + do vertex=1,4 + call recons_val_cart_plm(fcube(i,j), vtx_cart(vertex,1,i,j), vtx_cart(vertex,2,i,j), spherecentroid(:,i,j), & + recons(1:3,i,j), extrema_value(vertex)) + end do + max_val = MAXVAL(extrema_value(1:4)) + min_val = MINVAL(extrema_value(1:4)) + + if (max_val>maxval_patch.and.abs(max_val-fcube(i,j))>threshold) then + phi = (maxval_patch-fcube(i,j))/(max_val-fcube(i,j)) + if (phithreshold) then + phi = (minval_patch-fcube(i,j))/(min_val-fcube(i,j)) + if (phi threshold) then + extrema(1) = recons(6,i,j) * recons(3,i,j) - 2.0_r8 * recons(5,i,j) * recons(2,i,j) + extrema(2) = recons(6,i,j) * recons(2,i,j) - 2.0_r8 * recons(4,i,j) * recons(3,i,j) + + disc=1.0_r8/disc + extrema(1) = extrema(1) * disc + spherecentroid(1,i,j) + extrema(2) = extrema(2) * disc + spherecentroid(2,i,j) + if ( (extrema(1) - xminmax(1) > -threshold) .and. & !xmin + (extrema(1) - xminmax(2) < threshold) .and. & !xmax + (extrema(2) - yminmax(1) > -threshold) .and. & !ymin + (extrema(2) - yminmax(2) < threshold)) then !ymax + call recons_val_cart(fcube(i,j), extrema(1), extrema(2), spherecentroid(:,i,j), & + recons_metrics(:,i,j), recons(:,i,j), extrema_value(5)) + endif + endif + ! + ! Check all potential minimizer points along element boundaries + ! + if (abs(recons(6,i,j)) > threshold) then + invtmp = 1.0_r8 / (recons(6,i,j) + spherecentroid(2,i,j)) + do n=1,2 + ! Left edge, intercept with du/dx = 0 + extrema(2) = invtmp * (-recons(2,i,j) - 2.0_r8 * recons(4,i,j) * (xminmax(n) - spherecentroid(1,i,j))) + if ((extrema(2) > yminmax(1)-threshold) .and. (extrema(2) < yminmax(2)+threshold)) then + call recons_val_cart(fcube(i,j), xminmax(n), extrema(2), spherecentroid(:,i,j), & + recons_metrics(:,i,j), recons(:,i,j), extrema_value(5+n)) + endif + enddo + ! Top/bottom edge, intercept with du/dy = 0 + invtmp = 1.0_r8 / recons(6,i,j) + spherecentroid(1,i,j) + do n = 1,2 + extrema(1) = invtmp * (-recons(3,i,j) - 2.0_r8 * recons(5,i,j) * (yminmax(n) - spherecentroid(2,i,j))) + if ((extrema(1) > xminmax(1)-threshold) .and. (extrema(1) < xminmax(2)+threshold)) then + call recons_val_cart(fcube(i,j), extrema(1), yminmax(n),spherecentroid(:,i,j), & + recons_metrics(:,i,j), recons(:,i,j), extrema_value(7+n)) + endif + enddo + endif + + ! Top/bottom edge, y=const., du/dx=0 + if (abs(recons(4,i,j)) > threshold) then + invtmp = 1.0_r8 / (2.0_r8 * recons(4,i,j))! + spherecentroid(1,i,j) + do n = 1,2 + extrema(1) = spherecentroid(1,i,j)+& + invtmp * (-recons(2,i,j) - recons(6,i,j) * (yminmax(n) - spherecentroid(2,i,j))) + + if ((extrema(1) > xminmax(1)-threshold) .and. (extrema(1) < xminmax(2)+threshold)) then + call recons_val_cart(fcube(i,j), extrema(1), yminmax(n), spherecentroid(:,i,j),& + recons_metrics(:,i,j),recons(:,i,j), extrema_value(9+n)) + endif + enddo + endif + ! Left/right edge, x=const., du/dy=0 + if (abs(recons(5,i,j)) > threshold) then + invtmp = 1.0_r8 / (2.0_r8 * recons(5,i,j)) + do n = 1,2 + extrema(2) = spherecentroid(2,i,j)+& + invtmp * (-recons(3,i,j) - recons(6,i,j) * (xminmax(n) - spherecentroid(1,i,j))) + + if ((extrema(2)>yminmax(1)-threshold) .and. (extrema(2) < yminmax(2)+threshold)) then + call recons_val_cart(fcube(i,j), xminmax(n), extrema(2), spherecentroid(:,i,j), & + recons_metrics(:,i,j), recons(:,i,j), extrema_value(11+n)) + endif + enddo + endif + !rck - combined min/max calculation and unrolled + ! max_val = MAXVAL(extrema_value) + ! min_val = MINVAL(extrema_value) + max_val = extrema_value(13) + min_val = extrema_value(13) + do itmp1 = 1,12,4 + max_val = max(max_val, extrema_value(itmp1),extrema_value(itmp1+1),extrema_value(itmp1+2),extrema_value(itmp1+3)) + min_val = min(min_val, extrema_value(itmp1),extrema_value(itmp1+1),extrema_value(itmp1+2),extrema_value(itmp1+3)) + enddo + !rck + + if (max_val>maxval_patch.and.abs(max_val-fcube(i,j))>threshold) then + phi = (maxval_patch-fcube(i,j))/(max_val-fcube(i,j)) + if (phithreshold) then + phi = (minval_patch-fcube(i,j))/(min_val-fcube(i,j)) + if (phi in cube CARTESIAN coordinates ! + ! ! + ! INPUT: fcube ... tracer values incl. the halo zone ! + ! cartx ... x cartesian coordinate of the evaluation point ! + ! carty ... y cartesian coordinate of the evaluation point ! + ! centroid.. x,y,x^2,y^2,xy ! + ! recons ... array of reconstructed coefficients ! + ! OUTPUT: value ... evaluation at a given point ! + !-----------------------------------------------------------------------------------! + subroutine recons_val_cart(fcube, cartx, carty, centroid, pre_computed_metrics, recons, value) + implicit none + real(kind=r8), intent(in) :: fcube + real(kind=r8), intent(in) :: cartx, carty + real(kind=r8), dimension(1:5), intent(in) :: centroid + real(kind=r8), dimension(3), intent(in) :: pre_computed_metrics + real(kind=r8), dimension(1:6), intent(in) :: recons + real(kind=r8), intent(out) :: value + real(kind=r8) :: dx, dy + dx = cartx - centroid(1) + dy = carty - centroid(2) + ! Evaluate constant order terms + value = fcube + & + ! Evaluate linear order terms + recons(2) * dx + & + recons(3) * dy + & + ! Evaluate second order terms + recons(4) * (pre_computed_metrics(1) + dx*dx) + & + recons(5) * (pre_computed_metrics(2) + dy*dy) + & + recons(6) * (pre_computed_metrics(3) + dx*dy) + END subroutine recons_val_cart + + subroutine recons_val_cart_plm(fcube, cartx, carty, centroid, recons, value) + implicit none + real(kind=r8), intent(in) :: fcube + real(kind=r8), intent(in) :: cartx, carty + real(kind=r8), dimension(1:5), intent(in) :: centroid + real(kind=r8), dimension(1:3), intent(in) :: recons + real(kind=r8), intent(out) :: value + real(kind=r8) :: dx, dy + dx = cartx - centroid(1) + dy = carty - centroid(2) + ! Evaluate constant order terms + value = fcube + & + ! Evaluate linear order terms + recons(2) * dx + & + recons(3) * dy + END subroutine recons_val_cart_plm + + + ! ----------------------------------------------------------------------------------! + !SUBROUTINE SLOPELIMITER_VAL----------------------------------------------CE-for FVM! + ! AUTHOR: CHRISTOPH ERATH, 30.November 2011 ! + ! DESCRIPTION: returns the value from the reconstruction (3rd order Taylor polynom) ! + ! at the point (cartx,carty) -> in cube CARTESIAN coordinates ! + ! ! + ! INPUT: value ... point value (calculated here by recons_val_cart) ! + ! cell_value ... tracer value (in the cell center) of the cell ! + ! local_min ... minmal value in the patch ! + ! local_max ... maximal value in the patch ! + ! INPUT/OUTPUT: min_phi ... slope limiter, inout because we go through any possible ! + ! extrema on the cell ! + !-----------------------------------------------------------------------------------! + subroutine slopelimiter_val(value, cell_value, local_min, local_max, min_phi) + implicit none + real (kind=r8), intent(in) :: value, cell_value + real (kind=r8), intent(in) :: local_min, local_max + real (kind=r8), intent(inout) :: min_phi + real (kind=r8) :: phi + + ! Check against the minimum bound on the reconstruction + if (value - cell_value > 1.0e-12_r8 * value) then + phi = (local_max - cell_value) / (value - cell_value) + if (phi < min_phi) then + min_phi = phi + endif + ! Check against the maximum bound on the reconstruction + elseif (value - cell_value < -1.0e-12_r8 * value) then + phi = (local_min - cell_value) / (value - cell_value) + if(phi < min_phi) then + min_phi = phi + endif + endif + end subroutine slopelimiter_val + !END SUBROUTINE SLOPELIMITER_VAL------------------------------------------CE-for FVM! + + function matmul_w(w,f,ns) + implicit none + real (kind=r8) :: matmul_w + real (kind=r8),dimension(:), intent(in) :: w,f !dimension(ns) + integer, intent(in) :: ns + integer :: k + matmul_w = 0.0_r8 + do k=1,ns + matmul_w = matmul_w+w(k)*f(k) + end do + end function matmul_w + + ! special hard-coded version of the function where ns=3 + ! for performance optimization +! function matmul_w(w, f) +! IMPLICIT NONE +! REAL(KIND=r8), dimension(3), intent(in) :: w +! REAL(KIND=r8), dimension(3), intent(in) :: f +! REAL(KIND=r8) :: matmul_w +! matmul_w = w(1)*f(1) + w(2)*f(2) + w(3)*f(3) +! end function matmul_w + + subroutine extend_panel_interpolate(nc,nhc,nhr,nht,ns,nh,fcube,cubeboundary,halo_interp_weight,ibase,& + fpanel,fotherpanel) + implicit none + integer, intent(in) :: cubeboundary,nc,nhr,nht,nh,nhc,ns + real (kind=r8), & + dimension(1-nhc:nc+nhc, 1-nhc:nc+nhc), intent(in) :: fcube + + real (kind=r8), intent(in) :: halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) + integer , intent(in) :: ibase(1-nh:nc+nh,1:nhr,2) + + real (kind=r8) , dimension(1-nht:nc+nht, 1-nht:nc+nht ), intent(out) :: fpanel + real (kind=r8), dimension(1-nht:nc+nht,1-nht:nc+nht,2), intent(out), optional :: fotherpanel + + integer :: i, halo,ibaseref + real (kind=r8), dimension(1:ns,1-nh:nc+nh,1:nhr) :: w + ! + ! fpanel = 1.0E19 !dbg + ! + ! + ! Stencil for reconstruction is: + ! + ! --------------------- + ! | | | i | | | + ! --------------------- + ! | | i | i | i | | + ! --------------------- + ! | i | i | R | i | i | + ! --------------------- + ! | | i | i | i | | + ! --------------------- + ! | | | i | | | + ! --------------------- + ! + ! where + ! + ! "R" is cell for which we whish to do the reconstruction + ! "i" is the stencil + ! + ! + ! If one or more point in the stencil is on another panel(s) then we need to interpolate + ! to a stencil that is an extension of the panel on which R is located + ! (this is done using one dimensional cubic Lagrange interpolation along panel side) + ! + ! Example: say that southern most "s" on Figure above is on another panels projection then the stencil becomes + ! + ! + ! --------------------------------- + ! | | | | | | i | | | + ! ----------------|---------------- + ! | | | | | i | i | i | | + ! ----------------|---------------- + ! | | | | i | i | R | i | i | + ! ----------------|---------------- + ! | | | | | i | i | i | | + ! --------------------------------- + ! / / / / / S /S&i/ S / S / + ! /---/---/---/---/---/---/---/---/ + ! / / / / / / / / / + !/---/---/---/---/---/---/---/---/ + ! + ! + ! where "S" are the cell average values used for the cubic interpolation (located on the South panel) + ! + ! + if (cubeboundary==0) then + fpanel(1-nht:nc+nht,1-nht:nc+nht)=fcube(1-nht:nc+nht,1-nht:nc+nht) + else if (cubeboundary==west) then + ! ! + ! ! Case shown below: nhr=2, nhe=1, nht=nhr+nhe + ! ! (nhr = reconstruction width along x and y) + ! ! (nhe = max. Courant number) + ! ! + ! ! + ! Figure below shows the element in question ! In terms of data structure: + ! (center element) and the surrounding elements ! + ! on the element in question's projection ! * "H" is on same panel average value + ! ! * "w" is west panel average values that need + ! Notation: "0" marks the element boundaries ! to be interpolated to main element + ! ! projection + ! Elements to the west are on a different projection ! * "i" is extra halo required by the cubic + ! ! interpolation + ! 0 ! + ! |0000 ! + ! | |00000 ! + ! |\--| |000000000000000000000000000000000000 ! -x---x---x---x---x---x---x---x---x---x---x---x + ! | |\--| 0 | | | 0 | | | 0 ! | | | i | | | | | | | | | + ! |\--| |\--0---------------0---------------0 ! -------------x---------------x---------------x + ! | |\--| 0 | | | 0 | | | 0 ! | | i | i | H | H | H | H | H | | | | + ! |\--| |\--0---------------0---------------0 ! -------------x---------------x---------------x + ! 0 |\--| 0 | | | 0 | | | 0 ! | | i | w | H | H | H | H | H | H | | | + ! |0000 |\--0---------------0---------------0 ! -------------x---------------x---------------x + ! | |0000 0 | | | 0 | | | 0 ! | | w | w | r | r | r | r | r | H | H | | + ! |\--| |000000000000000000000000000000000000 ! -x---x---x---00000000000000000---x---x---x---x + ! | |\--| 0 | | | 0 | | | 0 ! | | w | w 0 r | r | r | r 0 r | H | H | | + ! |\--| \---0---------------0---------------0 ! -------------0---------------0---------------x + ! | |\--| 0 | | | 0 | | | 0 ! | | w | w 0 r | r | r | r 0 r | H | H | | + ! |\--| \---0---------------0---------------0 ! -------------0---------------0---------------x + ! 0 |\--| 0 | | | 0 | | | 0 ! | | w | w 0 r | r | r | r 0 r | H | H | | + ! |0000 |\--0---------------0---------------0 ! -------------0---------------0---------------x + ! | |0000 0 | | | 0 | | | 0 ! | | w | w 0 r | r | r | r 0 r | H | H | | + ! |\--| |000000000000000000000000000000000000 ! -x---x---x---00000000000000000---x---x---x---x + ! | |\--| 0 | | | 0 | | | 0 ! | | w | w | r | r | r | r | r | H | H | | + ! |\--| |\--0---------------0---------------0 ! -------------x---------------x---------------x + ! | |\--| 0 | | | 0 | | | 0 ! | | i | w | H | H | H | H | H | H | | | + ! |\--| |\--0---------------0---------------0 ! -------------x---------------x---------------x + ! 0 |\--| 0 | | | 0 | | | 0 ! | | i | i | H | H | H | H | H | | | | + ! 0000 |\--0---------------0---------------0 ! -------------x---------------x---------------x + ! 0000 0 | | | 0 | | | 0 ! | | | i | | | | | | | | | + ! 000000000000000000000000000000000000 ! -x---x---x---x---x---x---x---x---x---x---x---x + ! + ! + ! -2 |-1 | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 + ! + ! + ! + ! fill in values (incl. halo) that are on the "main" panels projection + ! + fpanel(1:nc+nht,1-nht:nc+nht)=fcube(1:nc+nht,1-nht:nc+nht) + ! + ! fill in values that are on the west panels projection + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref=ibase(i,halo,1) + ! ibaseref = ibase(i,halo,1) + fpanel(1-halo ,i) = matmul_w(w(:,i,halo),fcube(1-halo ,ibaseref:ibaseref+ns-1),ns) + end do + end do + + if (present(fotherpanel)) then + ! + ! fill in values that are on the west panels projection + ! + fotherpanel (1-nht:0,1-nht:nc+nht,1)=fcube(1-nht:0,1-nht:nc+nht) + ! + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref=ibase(i,halo,1) + ! + ! Exploit symmetry in interpolation weights + ! + fotherpanel(halo,i,1) = matmul_w(w(:,i,halo),fcube(halo ,ibaseref:ibaseref+ns-1),ns) + end do + end do + end if + else if (cubeboundary==east) then + ! + ! north part is on different panel + ! + ! stencil + ! + ! CN<1 case ! + ! ! + ! + ! + ! 0 ! + ! 0000| ! + ! 0000| | ! + ! 000000000000000000000000000000000000| |--/| ! x---x---x---x---x---x---x---x---x---x---x---x- + ! 0 | | | 0 | | | 0 |--/| | ! | | | | | | | | | i | | | + ! 0---------------0---------------0--/ |--/| ! x---------------x---------------x---x---x---x- + ! 0 | | | 0 | | | 0 |--/| | ! | | | | H | H | H | H | H | i | i | | + ! 0---------------0---------------0--/| |--/| ! x---------------x---------------x---x---x---x- + ! 0 | | | 0 | | | 0 |--/| 0 ! | | | H | H | H | H | H | H | e | i | | + ! 0---------------0---------------0--/| 0000| ! x---------------x---------------x---x---x---x- + ! 0 | | | 0 | | | 0 0000| | ! | | H | H | r | r | r | r | r | e | e | | + ! 000000000000000000000000000000000000| |--/| ! x---x---x---x---00000000000000000---x---x---x- + ! 0 | | | 0 | | | 0 |--/| | ! | | H | H | r 0 r | r | r | r 0 e | e | | + ! 0---------------0---------------0--/| |--/| ! x---------------0---------------0---x---x---x- + ! 0 | | | 0 | | | 0 |--/| | ! | | H | H | r 0 r | r | r | r 0 e | e | | + ! 0---------------0---------------0--/| |--/| ! x---------------0---------------0---x---x---x- + ! 0 | | | 0 | | | 0 |--/| 0 ! | | H | H | r 0 r | r | r | r 0 e | e | | + ! 0---------------0---------------0--/| 0000| ! x---------------0---------------0---x---x---x- + ! 0 | | | 0 | | | 0 0000| | ! | | H | H | r 0 r | r | r | r 0 e | e | | + ! 000000000000000000000000000000000000| |--/| ! x---x---x---x---00000000000000000---x---x---x- + ! 0 | | | 0 | | | 0 |--/| | ! | | H | H | r | r | r | r | r | e | e | | + ! 0---------------0---------------0--/| |--/| ! ----------------x---------------x---x---x---x- + ! 0 | | | 0 | | | 0 |--/| | ! | | | H | H | H | H | H | H | e | i | | + ! 0---------------0---------------0--/| |--/| ! ----------------x---------------x---x---x---x- + ! 0 | | | 0 | | | 0 |--/| 0 ! | | | | H | H | H | H | H | i | i | | + ! 0---------------0---------------0--/| 0000 ! ----------------x---------------x---x---x---x- + ! 0 | | | 0 | | | 0 0000 ! | | | | | | | | | i | | | + ! 000000000000000000000000000000000000 ! x---x---x---x---x---x---x---x---x---x---x---x- + ! + ! + ! -3 |-2 |-1 | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + ! + fpanel (1-nht:nc ,1-nht:nc+nht )=fcube(1-nht:nc ,1-nht:nc+nht) + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref = ibase(i,halo,1) + fpanel (nc+halo ,i ) = matmul_w(w(:,i,halo),fcube(nc +halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + + if (present(fotherpanel)) then + fotherpanel (nc+1 :nc+nht ,1-nht:nc+nht,1)=fcube(nc+1 :nc+nht ,1-nht:nc+nht) ! + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ! ibaseref=ibase(i,halo,1 ) + ibaseref = ibase(i,halo,1) + fotherpanel (nc+1-halo ,i,1) = matmul_w(w(:,i,halo),fcube(nc+1-halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + end if + + else if (cubeboundary==north) then + ! + ! north part is on different panel + ! + ! stencil + ! + ! CN<1 case + ! ! x---------------x---------------x---------------x + ! ! | | | | | | | | | | | | | + !0---\---\---\---0---\---\---\---0---\---\---\---0 ! x---------------x---------------x---------------x + ! 0 \ \ \ 0 \ \ \ 0 \ \ \ 0 ! | | i | i | n | n | n | n | n | n | i | i | | + ! 0---\---\---\---0---\---\---\---0---\---\---\---0 ! x---------------x---------------x---------------x + ! 0 \ \ \ 0 \ \ \ 0 \ \ \ 0 ! | i | i | n | n | n | n | n | n | n | n | i | i | + ! 0000000000000000000000000000000000000000000000000 ! x---x---x---x---00000000000000000---x---x---x---x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0000000000000000000000000000000000000000000000000 ! x---x---x---x---00000000000000000---x---x---x---x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r | r | r | r | r | r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | | H | H | H | H | H | H | H | H | | | + ! 0---------------0---------------0---------------0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | | | H | H | H | H | H | H | | | | + ! 0---------------0---------------0---------------0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | | | | | | | | | | | | + ! 0000000000000000000000000000000000000000000000000 ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! + ! -3 |-2 |-1 | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 ! -3 |-2 |-1 | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 + ! + ! fill in values that are on the same projection as "main" element + fpanel (1-nht:nc+nht ,1-nht:nc)=fcube(1-nht:nc+nht ,1-nht:nc) + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref = ibase(i,halo,1) + fpanel (i,nc+halo ) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+halo ),ns) !north + end do + end do + if (present(fotherpanel)) then + ! fill in halo for north element + fotherpanel (1-nht:nc+nht ,nc+1:nc+nht,1)=fcube(1-nht:nc+nht ,nc+1:nc+nht) + ! + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref = ibase(i,halo,1) + fotherpanel (i,nc+1-halo,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+1-halo),ns) + end do + end do + end if + + + else if (cubeboundary==south) then + ! + ! south part is on different panel + ! + ! stencil + ! + ! ! + ! 0000000000000000000000000000000000000000000000000 ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! 0 | | | 0 | | | 0 | | | 0 ! | | | | | | | | | | | | | + ! 0---------------0---------------0---------------0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | | | H | H | H | H | H | H | | | | + ! 0---------------0---------------0---------------0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | | H | H | H | H | H | H | H | H | | | + ! 0---------------0---------------0---------------0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r | r | r | r | r | r | H | H | | + ! 0000000000000000000000000000000000000000000000000 ! x---x---x---x---00000000000000000---x---x---x---x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0---------------0---------------0---------------0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | | 0 ! | | H | H | r 0 r | r | r | r 0 r | H | H | | + ! 0000000000000000000000000000000000000000000000000 ! x---x---x---x---00000000000000000---x---x---x---x + ! 0 / / / 0 / / / 0 / / / 0 ! | i | i | s | s | s | s | s | s | s | s | i | i | + ! 0---/---/---/---0---/---/---/---0---/---/---/---0 ! x---------------x---------------x---------------x + ! 0 / / / 0 / / / 0 / / / 0 ! | | i | i | s | s | s | s | s | s | i | i | | + !0---/---/---/---0---/---/---/---0---/---/---/---0 ! x---------------x---------------x---------------x + ! ! | | | | | | | | | | | | | + ! + ! 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 ! 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 + ! + ! fill in values that are on the same projection as "main" element (marked with "i" in Figure above) + ! + fpanel (1-nht:nc+nht,1:nc+nht )=fcube(1-nht:nc+nht,1:nc+nht) + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref=ibase(i,halo,1)!ibase(i,halo,2) + fpanel (i,1-halo ) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,1-halo),ns) !south + end do + end do + if (present(fotherpanel)) then + fotherpanel (1-nht:nc+nht,1-nht:0 ,1)=fcube(1-nht:nc+nht,1-nht:0 ) + do halo=1,nhr + do i=halo-nh,nc+nh-(halo-1) + ibaseref=ibase(i,halo,1)!ibase(i,halo,2) + fotherpanel (i, halo,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1, halo),ns) + end do + end do + end if + else if (cubeboundary==swest) then + ! + ! south and west neighboring cells are on different panel + ! + ! stencil + ! + ! + ! CN<1 case + ! + ! + ! + ! |000000000000000000000000000000000000 ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! 0000 0 | | | 0 | | | 0 ! | | | | | | | | | | | | | + ! 0 |/--0---------------0---------------0 ! x---------------x---------------x---------------x + ! |/--| 0 | | | 0 | | | 0 ! | | | | w | H | H | H | H | H | | | | + ! | |/--0---------------0---------------0 ! x---------------x---------------x---------------x + ! |/--| 0 | | | 0 | | | 0 ! | | | w | w | H | H | H | H | H | H | | | + ! | |/--0---------------0---------------0 ! x---------------x---------------x---------------x + ! |/--| 0 | | | 0 | | | 0 ! | | | w | w | r | r | r | r | r | H | H | | + ! | |000000000000000000000000000000000000 ! x---x---x---x---00000000000000000---x---x---x---x + ! |0000 0 | | | 0 | | | 0 ! | | | w | w 0 r | r | r | r 0 r | H | H | | + ! 0 |/--0---------------0---------------0 ! x---------------0---------------0---------------x + ! |/--| 0 | | | 0 | | | 0 ! | | | w | w 0 r | r | r | r 0 r | H | H | | + ! | |/--0---------------0---------------0 ! x---------------0---------------0---------------x + ! |/--| 0 | | | 0 | | | 0 ! | | | w | w 0 r | r | r | r 0 r | H | H | | + ! | |/--0---------------0---------------0 ! x---------------0---------------0---------------x + ! | | 0 | | | 0 | | | 0 ! | | | w | w 0 r | r | r | r 0 r | H | H | | + ! | -/| 000000000000000000000000000000000 ! x---x---x---x---00000000000000000---x---x---x---x + ! |/ | 0 / / / 0 / / / 0 ! | | | w | | s | s | s | s | s | s | | | + ! | 0-----/---/---/---0---/---/---/---0 ! x---------------x---------------x---------------x + ! | 0 / / / 0 / / / 0 ! | | | | s | s | s | s | s | s | | | | + ! 0-------/---/---/---0---/---/---/---0 ! x---------------x---------------x---------------x + ! + ! + ! -1 | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | ! |-3 |-2 |-1 | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + ! + ! fill in values that are on the same projection as "main" element (marked with "i" in Figure above) + ! + fpanel(1:nc+nht,1:nc+nht)=fcube(1:nc+nht,1:nc+nht) + ! + ! fill in west part (marked with "w" on Figure above) and south part (marked with "s") + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=max(halo-nh,0),nc+nh-(halo-1) + ibaseref=ibase(i,halo,1)!ibase(i,halo,1) + fpanel(1-halo ,i) = matmul_w(w(:,i,halo),fcube(1-halo ,ibaseref:ibaseref+ns-1),ns) !west + fpanel(i,1-halo ) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,1-halo) ,ns) !south + end do + end do + ! + ! corner value + ! + fpanel(0,0) =0.25_r8*(fpanel(0,1)+fpanel(1,0)+fpanel(-1,0)+fpanel(0,-1)) + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on south neighbor panel projection + ! + ! **************************************************************** + ! + ! On the south panel projection the neighbors are arragened as follow (nwest case): + ! + ! + ! \ + ! \ p + ! \ + ! \----- + ! | + ! w | s + ! | + ! + ! + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | | 0 | | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | p 0 p | p | p | p 0 p | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | w | wp0 p | p | p | p 0 p | p | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | w | w | r | r | r | r | r | i | i | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | w | i | i | i | i | i | i | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | i | i | i | i | i | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x--- + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + if (present(fotherpanel)) then + fotherpanel(1:nc+nht,1-nht:0,1) = fcube(1:nc+nht,1-nht:0) + ! + ! compute interpolated cell average values in "p" cells on Figure on above + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=max(halo-nh,0),nc+nh-(halo-1) + ibaseref=ibase(i,halo,1) + ! + ! use same weights as interpolation south from main panel (symmetric) + ! + fotherpanel(i,halo,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,halo),ns) + end do + end do + ! + ! compute interpolated cell average values in "w" cells on Figure on above + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=nc+halo-nhr,nc+1 + ibaseref=ibase(i,halo,2)-nc + ! + ! fotherpanel indexing follows main panel indexing + ! fcube indexing most be "rotated": + ! + ! =============================== + ! | | | + ! | W ^ | S | + ! | | | | + ! | x | | | + ! | | | | + ! ! | | + ! ! <----- | | + ! ! y | | + ! ! | | + ! =============================== + ! + fotherpanel(1-halo,i-nc,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,halo),ns) + end do + end do + fotherpanel(0,1,1) = 0.25_r8*(fotherpanel(-1,1,1)+fotherpanel(1,1,1)+fotherpanel(0,2,1)+fotherpanel(0,0,1)) + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on west neighbor panel projection + ! + ! **************************************************************** + ! + ! On the west panel projection the neighbors are arragened as follow (seast case): + ! + ! -------- + ! | | + ! | w | p + ! | | + ! -------\ + ! \ + ! s \ + ! + ! + ! + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | i | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | i | i | e | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | i | i | r | e | e | | | | | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | i | i | r 0 e | e | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | i | i | r 0 e | e | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | i | i | r 0 e | e | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | i | i | r 0 e | e | | 0 | | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | s | s | se| e | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | s | s | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x--- + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + fotherpanel(1-nht:nc,1:nc+nht,2) = fcube(1-nht:nc,1:nc+nht) + ! + ! compute interpolated cell average values in "p" cells on Figure on above + ! + w = halo_interp_weight(:,:,:,1) ! symmetry + do halo=1,nhr + do i=max(halo-nh,0),nc+nh-(halo-1) + ibaseref=ibase(i,halo,1) + ! + ! use same weights as interpolation south from main panel (symmetric) + ! + fotherpanel(halo,i,2) = matmul_w(w(:,i,halo),fcube(halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! compute interpolated cell average values in "s" cells on Figure on above + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=nc+halo-nhr,nc+1 + ibaseref=ibase(i,halo,2)-nc + ! + ! fotherpanel indexing follows main panel indexing + ! fcube indexing most be "rotated": + ! + ! =============================== + ! | | | + ! | W ^ | S | + ! | | | | + ! | x | | | + ! | | | | + ! ! | | + ! ! <----- | | + ! ! y | | + ! ! | | + ! =============================== + ! + fotherpanel(i-nc,1-halo,2) = matmul_w(w(:,i,halo),fcube(halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + fotherpanel(1,0,2) = 0.25_r8*(fotherpanel(0,0,2)+fotherpanel(2,0,2)+fotherpanel(1,-1,2)+fotherpanel(1,1,2)) + end if + else if (cubeboundary==seast) then + ! + ! south and east neighboring cells are on different panel + ! + ! + ! + ! 000000000000000000000000000000000000| + ! 0 | | | 0 | | | 0 0000 ! | | | | | | | | | | | | | + ! 0---------------0---------------0--\| 0 ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 |--\| ! | | | | | H | H | H | H | | | | | + ! 0---------------0---------------0--\| | ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 |--\| ! | | | | H | H | H | H | H | e | | | | + ! 0---------------0---------------0--\| | ! x---------------x---------------x---------------x + ! 0 | | | 0 | | | 0 |--\| ! | | H | H | r | r | r | r | r | e | e | | | + ! 000000000000000000000000000000000000| | ! x---x---x---x---00000000000000000---x---x---x---x + ! 0 | | | 0 | | | 0 0000| ! | | H | H | r 0 r | r | r | r 0 e | e | | | + ! 0---------------0---------------0--\| 0 ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 |--\| ! | | H | H | r 0 r | r | r | r 0 e | e | | | + ! 0---------------0---------------0--\| | ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 |--\| ! | | H | H | r 0 r | r | r | r 0 e | e | | | + ! 0---------------0---------------0--\| | ! x---------------0---------------0---------------x + ! 0 | | | 0 | | | 0 | | ! | | H | H | r 0 r | r | r | r 0 e | e | | | + ! 000000000000000000000000000000000 |\- | ! x---x---x---x---00000000000000000---x---x---x---x + ! 0 \ \ \ 0 \ \ \ 0 | \| ! | | | s | s | s | s | s | s |s/e| e | | | + ! 0---\---\---\---0---\---\---\-----0 | ! x---------------x---------------x---------------x + ! 0 \ \ \ 0 \ \ \ 0 | ! | | | | s | s | s | s | s | s | | | | + ! 0---\---\---\---0---\---\---\-------0 ! x---------------x---------------x---------------x + ! + ! + fpanel (1-nht:nc,1:nc+nht)=fcube(1-nht:nc,1:nc+nht) + ! + ! east + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=max(halo-nh,0),nc+nh-(halo-1) + ibaseref = ibase(i,halo,1) + fpanel(nc+halo,i) = matmul_w(w(:,i,halo),fcube(nc +halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! south + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref = ibase(i,halo,2) + fpanel(i,1-halo ) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,1-halo),ns) !south + end do + end do + fpanel(nc+1,0 )=0.25_r8*(& + fpanel(nc+1,1)+fpanel(nc,0)+fpanel(nc+2,0)+fpanel(nc+1,-1)) + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on south neighbor panel projection + ! + ! **************************************************************** + ! + ! On the south panel projection the neighbors are arragened as follow (neast case): + ! + ! + ! / + ! P / + ! / + ! ------/ + ! | | E + ! | S | + ! | | + ! + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | | 0 | | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | n 0 n | n | n | n 0 n | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | n | n 0 n | n | n | n 0 ne| e | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | i | i | r | r | r | r | r | e | e | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | i | i | i | i | i | i | e | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | i | i | i | i | i | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + if (present(fotherpanel)) then + fotherpanel(1-nht:nc,1-nht:0,1) = fcube(1-nht:nc,1-nht:0) + ! + w = halo_interp_weight(:,:,:,2) + ! + ! fill in "n" on Figure above + ! + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref = ibase(i,halo,2) + fotherpanel (i,halo,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1, halo),ns) + end do + end do + ! + ! fill in "e" on Figure above + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=0,nht-halo!nc+nh-(halo-1) + ibaseref = ibase(i,halo,1) + ! + ! fother panel follows indexing on main panel + ! + ! use symmetry for weights (same weights as East from main panel but for south panel + ! projection the indecies are rotated) + ! + fotherpanel (nc+halo ,1-i,1) = matmul_w(w(:,i,halo),fcube(nc+ibaseref:nc+ibaseref+ns-1,halo),ns) + end do + end do + fotherpanel(nc+1,1,1) = 0.25_r8*(fotherpanel(nc+2,1,1)+fotherpanel(nc,1,1)& + +fotherpanel(nc+1,2,1)+fotherpanel(nc+1,0,1)) + + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on east neighbor panel projection + ! + ! **************************************************************** + ! + ! On the south panel projection the neighbors are arragened as follow (neast case): + ! + ! + ! | | + ! P | E | + ! |-----| + ! / + ! / S + ! / + ! + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | i | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | w | i | i | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | w | w | r | i | i | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | | 0 | | w | w 0 r | i | i | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | w | w 0 r | i | i | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | w | w 0 r | i | i | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | w | w 0 r | i | i | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | | | | | w | ws| s | s | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | s | s | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + fotherpanel(nc+1:nc+nht,1:nc+nht,2) = fcube(nc+1:nc+nht,1:nc+nht) + ! + ! + ! fill in "w" on Figure above + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=0,nc+nh-(halo-1) + ibaseref = ibase(i,halo,1) + fotherpanel(nc+1-halo,i,2) = matmul_w(w(:,i,halo),fcube(nc+1-halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! fill in "s" on Figure above + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=nc+1-nht+halo,nc+1 + ! + ! + ! ! P | E + ! ! | + ! ! | + ! ================ + ! | | + ! | S | | <----- y + ! | | | ^ + ! | x | | | + ! | v | | + ! ! | | + ! ! -----> | x + ! ! y | + ! ! | + ! ================ + ! + ! + ! shift (since we are using south weights from main panel interpolation + ! + ibaseref = ibase(i,halo,2)-nc + ! + ! fotherpanel index: reverse + ! + ! fcube index: due to rotation (see Figure above) + ! + fotherpanel(nc+(nc+1-i),1-halo,2) = matmul_w(w(:,i,halo),fcube(nc+1-halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + fotherpanel(nc,0,2) = 0.25_r8*(fotherpanel(nc+1,0,2)+fotherpanel(nc-1,0,2)& + +fotherpanel(nc,1,2)+fotherpanel(nc,-1,2)) + end if + else if (cubeboundary==nwest) then + ! + ! + ! 0-------\---\---\---0---\---\---\---0 ! --------x---------------x---------------x + ! | 0 \ \ \ 0 \ \ \ 0 ! | | n | n | n | n | n | n | | | | + ! | 0-----\---\---\---0---\---\---\---0 ! --------x---------------x---------------x + ! | | 0 \ \ \ 0 \ \ \ 0 ! | w | a | n | n | n | n | n | n | | | + ! |\ | 000000000000000000000000000000000 ! --------00000000000000000---------------x + ! | -\| 0 | | | 0 | | | 0 ! | w | w 0 r | r | r | r 0 r | H | H | | + ! | |\--0---------------0---------------0 ! --------0---------------0---------------x + ! |\--| 0 | | | 0 | | | 0 ! | w | w 0 r | r | r | r 0 r | H | H | | + ! | |\--0---------------0---------------0 ! --------0---------------0---------------x + ! |\--| 0 | | | 0 | | | 0 ! | w | w 0 r | r | r | r 0 r | H | H | | + ! 0 |\--0---------------0---------------0 ! --------0---------------0---------------x + ! |0000 0 | | | 0 | | | 0 ! | w | w 0 r | r | r | r 0 r | H | H | | + ! | |000000000000000000000000000000000000 ! --------00000000000000000---------------x + ! |\--| 0 | | | 0 | | | 0 ! | w | w | r | r | r | r | r | H | H | | + ! | |\--0---------------0---------------0 ! --------x---------------x---------------x + ! |\--| 0 | | | 0 | | | 0 ! | | w | H | H | H | H | H | H | | | + ! | |\--0---------------0---------------0 ! --------x---------------x---------------x + ! |\--| 0 | | | 0 | | | 0 ! | | | H | H | H | H | H | | | | + ! 0 |\--0---------------0---------------0 ! --------x---------------x---------------x + ! 0000 0 | | | 0 | | | 0 ! | | | | | | | | | | | + ! 000000000000000000000000000000000000 ! --------x---------------x---------------x + ! + ! + ! + fpanel(1:nc+nht,1-nht:nc)=fcube(1:nc+nht,1-nht:nc) + ! + ! west + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref=ibase(i,halo,1) + fpanel(1-halo ,i) = matmul_w(w(:,i,halo),fcube(1-halo ,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! north + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=max(halo-nh,0),nc+nh-(halo-1) + ibaseref = ibase(i,halo,2) + fpanel(i,nc+halo) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+halo ),ns) !north + end do + end do + fpanel(0 ,nc+1)=0.25_r8*(& + fpanel(0,nc)+fpanel(1,nc+1)+fpanel(-1,nc+1)+fpanel(0,nc+2)) + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on north neighbor panel projection + ! + ! **************************************************************** + ! + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | i | i | i | i | i | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | w | i | i | i | i | i | i | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | w | w | r | r | r | r | r | i | i | | + !x---x---x---x---00000000000000000---x---x---x---x + !| | | w | ws0 s | s | s | s 0 s | s | | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | | | s 0 s | s | s | s 0 s | | | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | | | 0 | | | 0 | | | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | | | 0 | | | 0 | | | | + !x---x---x---x---00000000000000000---x---x---x---x + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + if (present(fotherpanel)) then + fotherpanel(1:nc+nht,nc+1:nc+nht,1) = fcube(1:nc+nht,nc+1:nc+nht) + ! + ! + ! fill in "s" on Figure above + ! + ! (use code from north above) + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=max(halo-nh,0),nc+nh-(halo-1) + ibaseref = ibase(i,halo,2) + fotherpanel(i,nc+1-halo,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+1-halo ),ns) + end do + end do + ! + ! fill in "w" on Figure above + ! + ! (use code from west above) + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=nc+1-nht+halo,nc+1 + ibaseref=ibase(i,halo,1)-nc + fotherpanel(1-halo,nc-(i-(nc+1)),1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+1-halo),ns) + end do + end do + fotherpanel(0,nc,1)=0.25_r8*(& + fotherpanel(1,nc,1)+fotherpanel(-1,nc,1)+fotherpanel(0,nc+1,1)+fotherpanel(0,nc-1,1)) + + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on west neighbor panel projection + ! + ! **************************************************************** + ! + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | n | n | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | n | n | ne| e | | | | | | | + !x---x---x---x---00000000000000000---x---x---x---x + !| | i | i | r 0 e | e | | 0 | | | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | i | i | r 0 e | e | | 0 | | | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | i | i | r 0 e | e | | 0 | | | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | i | i | r 0 e | e | | 0 | | | | + !x---x---x---x---00000000000000000---x---x---x---x + !| | i | i | r | e | e | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | i | i | e | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | i | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x--- + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + fotherpanel(1-nht:nc,1-nht:nc,2) = fcube(1-nht:nc,1-nht:nc) + ! + ! + ! fill in "e" on Figure above + ! + ! (use code from west above) + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref=ibase(i,halo,1) + fotherpanel(halo ,i,2) = matmul_w(w(:,i,halo),fcube(halo ,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! + ! fill in "n" on Figure above + ! + ! (use code from north above) + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=0,nht-halo + ibaseref = ibase(i,halo,2)+nc + fotherpanel(1-i,nc+halo,2) = matmul_w(w(:,i,halo),fcube(halo,ibaseref:ibaseref+ns-1),ns) !north + end do + end do + fotherpanel(1,nc+1,2)=0.25_r8*(& + fotherpanel(2,nc+1,2)+fotherpanel(0,nc+1,2)+fotherpanel(1,nc+2,2)+fotherpanel(1,nc,2)) + end if + + else if (cubeboundary==neast) then + ! + ! + ! 0---/---/---/---0---/---/---/-------0 ! x---------------x---------------x-------- + ! 0 / / / 0 / / / 0 | ! | | | | | n | n | n | n | n | | + ! 0---/---/---/---0---/---/---/-----0 | ! x---------------x---------------x-------- + ! 0 / / / 0 / / / 0 | | ! | | | | n | n | n | n | n | a | e | + ! 000000000000000000000000000000000 | | ! x---------------00000000000000000-------- + ! 0 | | | 0 | | | 0 |--/| ! | | | H | H 0 r | r | r | r 0 e | e | + ! 0---------------0---------------0--/| | ! x---------------0---------------0-------- + ! 0 | | | 0 | | | 0 |--/| ! | | | H | H 0 r | r | r | r 0 e | e | + ! 0---------------0---------------0--/| | ! x---------------0---------------0-------- + ! 0 | | | 0 | | | 0 |--/| ! | | | H | H 0 r | r | r | r 0 e | e | + ! 0---------------0---------------0--/| 0 ! x---------------0---------------0-------- + ! 0 | | | 0 | | | 0 0000| ! | | | H | H 0 r | r | r | r 0 e | e | + ! 000000000000000000000000000000000000| | ! x---------------00000000000000000-------- + ! 0 | | | 0 | | | 0 |--/| ! | | | H | H | r | r | r | r | e | e | + ! 0---------------0---------------0--/| | ! x---------------x---------------x-------- + ! 0 | | | 0 | | | 0 |--/| ! | | | | H | H | H | H | H | e | | + ! 0---------------0---------------0--/| | ! x---------------x---------------x-------- + ! 0 | | | 0 | | | 0 |--/| ! | | | | | H | H | H | H | | | + ! 0---------------0---------------0--/| 0 ! x---------------x---------------x-------- + ! 0 | | | 0 | | | 0 0000 ! | | | | | | | | | | | + ! 000000000000000000000000000000000000 ! x---------------x---------------x-------- + ! + ! + ! + fpanel(1-nht:nc,1-nht:nc)=fcube(1-nht:nc,1-nht:nc) + ! fotherpanel (nc+1 :nc+nht ,1-nht:nc+nht)=fcube(nc+1 :nc+nht ,1-nht:nc+nht) + ! + ! east + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref=ibase(i,halo,1 ) + fpanel(nc+halo,i) = matmul_w(w(:,i,halo),fcube(nc +halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! north + ! + ! w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref=ibase(i,halo,1) + fpanel(i,nc+halo) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+halo ),ns) !north + end do + end do + fpanel(nc+1,nc+1)=0.25_r8*(& + fpanel(nc,nc+1)+fpanel(nc+1,nc)+fpanel(nc+1,nc+2)+fpanel(nc+2,nc+1)) + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on north neighbor panel projection + ! + ! **************************************************************** + ! + ! On the north panel projection the neighbors are arragened as follow (seast case): + ! + ! + ! | | + ! | N | E + ! |-----| + ! \ + ! S \ + ! \ + ! + ! + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | | i | i | i | i | i | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | | i | i | i | i | i | i | e | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! | | i | i | r | r | r | r | r | e | e | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | s | s 0 s | s | s | s 0 se| e | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | s 0 s | s | s | s 0 s | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | | 0 | | | | + ! x---x---x---x---0---x---x---x---0---x---x---x---x + ! | | | | 0 | | | 0 | | | | + ! x---x---x---x---00000000000000000---x---x---x---x + ! | | | | | | | | | | | | | + ! x---x---x---x---x---x---x---x---x---x---x---x---x + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + if (present(fotherpanel)) then + fotherpanel(1-nht:nc,nc+1:nc+nht,1) = fcube(1-nht:nc,nc+1:nc+nht) + ! + ! fill in "s" on Figure above + ! + ! (use north case from above and shift/reverse j-index + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref=ibase(i,halo,1) + fotherpanel (i,nc+1-halo,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+1-halo),ns) + end do + end do + ! + ! fill in "e" on Figure above + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=max(halo-nh,0),nht-halo + ibaseref=ibase(i,halo,2) +nc + ! + ! fotherpanel uses indexing of main panel's projection + ! fcube: rotated indexing + ! + fotherpanel (nc+halo,nc+i,1) = matmul_w(w(:,i,halo),fcube(ibaseref:ibaseref+ns-1,nc+1-halo),ns) + end do + end do + fotherpanel(nc+1,nc,1)=0.25_r8*(& + fotherpanel(nc+2,nc,1)+fotherpanel(nc,nc,1)+fotherpanel(nc+1,nc+1,1)+fotherpanel(nc+1,nc-1,1)) + ! + ! **************************************************************** + ! + ! fill halo for reconstruction on east neighbor panel projection + ! + ! **************************************************************** + ! + ! On the north panel projection the neighbors are arragened as follow (seast case): + ! + ! + ! \ N + ! \ + ! \------ + ! | | + ! P | E | + ! | | + ! ------- + ! + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | n | n | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | w | wn| n | n | | | + !x---x---x---x---00000000000000000---x---x---x---x + !| | | | 0 | | w | w 0 r | i | i | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | | | 0 | | w | w 0 r | i | i | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | | | 0 | | w | w 0 r | i | i | | + !x---x---x---x---0---x---x---x---0---x---x---x---x + !| | | | 0 | | w | w 0 r | i | i | | + !x---x---x---x---00000000000000000---x---x---x---x + !| | | | | | | w | w | r | i | i | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | w | i | i | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | i | | | | + !x---x---x---x---x---x---x---x---x---x---x---x---x + !| | | | | | | | | | | | | + !x---x---x---x---x---x---x---x---x---x---x---x--- + ! + ! + ! + ! fill values on same panel projection ("r" and "i" on Figure above) + ! + fotherpanel(nc+1:nc+nht,1-nht:nc,2) = fcube(nc+1:nc+nht,1-nht:nc) + ! + ! fill in "w" on Figure above + ! + ! (use east case from above and shift/reverse j-index + ! + w = halo_interp_weight(:,:,:,1) + do halo=1,nhr + do i=halo-nh,min(nc+nh-(halo-1),nc+1) + ibaseref=ibase(i,halo,1 ) + fotherpanel(nc+1-halo,i,2) = matmul_w(w(:,i,halo),fcube(nc+1-halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + ! + ! fill in "n" on Figure above + ! + w = halo_interp_weight(:,:,:,2) + do halo=1,nhr + do i=max(halo-nh,0),nht-halo + ibaseref=ibase(i,halo,2) +nc + ! + ! fotherpanel uses indexing of main panel's projection + ! fcube: rotated indexing + ! + fotherpanel (nc+i,nc+halo,2) = matmul_w(w(:,i,halo),fcube(nc+1-halo,ibaseref:ibaseref+ns-1),ns) + end do + end do + fotherpanel(nc,nc+1,2)=0.25_r8*(& + fotherpanel(nc+1,nc+1,2)+fotherpanel(nc-1,nc+1,2)+fotherpanel(nc,nc+2,2)+fotherpanel(nc,nc,2)) + end if + end if + end subroutine extend_panel_interpolate + ! + ! initialize non-existent ghost cells + ! + subroutine zero_non_existent_ghost_cell(recons,irecons,cubeboundary,nc,nhe,ntrac_in) + use control_mod, only : north, south, east, west, neast, nwest, seast, swest + + integer, intent(in) :: nc,nhe,cubeboundary,irecons,ntrac_in + real (kind=r8), dimension(irecons,1-nhe:nc+nhe,1-nhe:nc+nhe,ntrac_in), intent(out):: recons + + integer :: i,j + + if (cubeboundary>0) then + if (cubeboundary==nwest) then + do j=nc+1,nc+nhe + do i=1-nhe,0 + recons(:,i,j,:) = 0.0_r8 + end do + end do + else if (cubeboundary==swest) then + do j=1-nhe,0 + do i=1-nhe,0 + recons(:,i,j,:) = 0.0_r8 + end do + end do + else if (cubeboundary==neast) then + do j=nc+1,nc+nhe + do i=nc+1,nc+nhe + recons(:,i,j,:) = 0.0_r8 + end do + end do + else if (cubeboundary==seast) then + do j=1-nhe,0 + do i=nc+1,nc+nhe + recons(:,i,j,:) = 0.0_r8 + end do + end do + end if + end if + end subroutine zero_non_existent_ghost_cell +end module fvm_reconstruction_mod diff --git a/src/dynamics/se/dycore/gbarrier.c b/src/dynamics/se/dycore/gbarrier.c new file mode 100644 index 00000000..69843778 --- /dev/null +++ b/src/dynamics/se/dycore/gbarrier.c @@ -0,0 +1,109 @@ +//: multi-level barrier code; predefined to a max of 64 threads, as below + +// We need to define the Log2 of the maximum number of threads: +#define LOG2MAX 6 +#define NTHREADS 64 + +#include +#include +#include +#include +#include +#include +#include +#include + +// utility functions: +int ipow2 (int val) { + int result = 1; + while (val > 0) { + result *= 2; + --val; + } + return result; +} + +// Define the data associated with a global barrier: +typedef struct gbt { + volatile bool LocalFlags [2][LOG2MAX]; + volatile bool *PartnerFlags [2][LOG2MAX]; + bool sense; + int parity; + int id; +} GBarrier_Type; + +// Define a singular type for the global barrier: +typedef struct gb { + GBarrier_Type threadData[NTHREADS]; + int numThreads; + int log2Threads; +} GBarrier; + +void initializeThread(GBarrier_Type *threadData, int thread, int numThreads) { + // Local loop variables: (p)arity, (r)ound and (x) [temporary] + int p, r; + unsigned int x; + + // local log2 threads: + int log2Threads = ceil(log2(numThreads)); + + threadData[thread].id = thread; + threadData[thread].sense = true; + threadData[thread].parity = 0; + + for (p = 0; p < 2; p++) { + for (r = 0; r < log2Threads; r++) { + x = (threadData[thread].id + ipow2(r)) % numThreads; + threadData[thread].LocalFlags[p][r] = 0; + threadData[thread].PartnerFlags[p][r] = &threadData[x].LocalFlags[p][r]; + } + } +} + +void gbarrier_synchronize(GBarrier* b, int thread) +{ + // Local: + int i; + + // Get the pointer to our thread's data: + GBarrier_Type *my = &b->threadData[thread]; + + // Loop through the log2 rounds: + for (i = 0; i < b->log2Threads; i++) { + *my->PartnerFlags[my->parity][i] = my->sense; + + while (my->LocalFlags[my->parity][i] != my->sense) { sched_yield(); } + } + + // Reverse the sense for reuse on parity=1 + if (my->parity == 1) { my->sense = !my->sense; } + + // Swap our parity between 0 & 1: + my->parity = 1 - my->parity; +} + +void gbarrier_initialize(GBarrier **ptb, int numThreads) { + // Local variables: + int t; + + GBarrier *b; + (*ptb) = malloc(sizeof(GBarrier)); + b = (*ptb); + + b->numThreads = numThreads; + b->log2Threads = ceil(log2(b->numThreads)); + + for (t = 0; t < b->numThreads; t++) { + initializeThread(b->threadData, t, b->numThreads); + } +} + +void gbarrier_print(GBarrier *b) { + printf("GBarrier Info: %d threads \n", b->numThreads); +} + +void gbarrier_free(GBarrier **ptb) { + GBarrier *b = (*ptb); + free(b); +} + diff --git a/src/dynamics/se/dycore/gbarrier_mod.F90 b/src/dynamics/se/dycore/gbarrier_mod.F90 new file mode 100644 index 00000000..94c34242 --- /dev/null +++ b/src/dynamics/se/dycore/gbarrier_mod.F90 @@ -0,0 +1,79 @@ +module gbarrier_mod + use gbarriertype_mod, only: gbarrier_t + implicit none + + integer, parameter :: LOG2MAX = 6 + integer, parameter :: MAXTHREADS = 64 + + public :: gbarrier_init + public :: gbarrier_info + public :: gbarrier + + contains + + subroutine gbarrier_init(barrier, nthreads) + type (gbarrier_t), intent(out) :: barrier + integer, intent(in) :: nthreads + + interface + subroutine gbarrier_initialize(c_barrier, nthreads) bind(C) + use, intrinsic :: ISO_C_Binding, only: C_ptr, C_int + implicit none + + type (C_ptr), intent(out) :: c_barrier + integer (C_int), intent(in), value :: nthreads + end subroutine gbarrier_initialize + end interface + + call gbarrier_initialize(barrier%c_barrier, nthreads) + end subroutine gbarrier_init + + subroutine gbarrier_delete(barrier) + type (gbarrier_t), intent(in) :: barrier + + interface + subroutine gbarrier_free(c_barrier) bind(C) + use, intrinsic :: ISO_C_Binding, only: C_ptr + implicit none + + type (C_ptr), intent(in) :: c_barrier + end subroutine gbarrier_free + end interface + + call gbarrier_free(barrier%c_barrier) + end subroutine gbarrier_delete + + subroutine gbarrier_info(barrier) + type (gbarrier_t), intent(in) :: barrier + + interface + subroutine gbarrier_print(c_barrier) bind(C) + use, intrinsic :: ISO_C_Binding, only: C_ptr + implicit none + type (C_ptr), value :: c_barrier + end subroutine gbarrier_print + end interface + + call gbarrier_print(barrier%c_barrier) + end subroutine gbarrier_info + + + subroutine gbarrier(barrier, threadID) + type (gbarrier_t), intent(in) :: barrier + integer, intent(in) :: threadID + + interface + subroutine gbarrier_synchronize(c_barrier, thread) bind(C) + use, intrinsic :: ISO_C_Binding, only: C_ptr, C_int + implicit none + + type (C_ptr), intent(in), value :: c_barrier + integer (C_int), intent(in), value :: thread + end subroutine gbarrier_synchronize + end interface + + call gbarrier_synchronize(barrier%c_barrier, threadID) + end subroutine gbarrier + +end module gbarrier_mod + diff --git a/src/dynamics/se/dycore/gbarriertype_mod.F90 b/src/dynamics/se/dycore/gbarriertype_mod.F90 new file mode 100644 index 00000000..6503c04c --- /dev/null +++ b/src/dynamics/se/dycore/gbarriertype_mod.F90 @@ -0,0 +1,8 @@ +module gbarriertype_mod + use ISO_C_Binding, only: C_ptr + + type, public :: gbarrier_t + type (C_ptr) :: c_barrier + end type gbarrier_t + +end module gbarriertype_mod diff --git a/src/dynamics/se/dycore/global_norms_mod.F90 b/src/dynamics/se/dycore/global_norms_mod.F90 new file mode 100644 index 00000000..8f55f639 --- /dev/null +++ b/src/dynamics/se/dycore/global_norms_mod.F90 @@ -0,0 +1,1133 @@ +module global_norms_mod + + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_logfile, only: iulog + use edgetype_mod, only: EdgeBuffer_t + + implicit none + private + save + + public :: l1_snorm + public :: l2_snorm + public :: linf_snorm + + public :: l1_vnorm + public :: l2_vnorm + public :: linf_vnorm + + public :: print_cfl + public :: global_integral + public :: global_integrals_general + public :: wrap_repro_sum + + private :: global_maximum + type (EdgeBuffer_t), private :: edgebuf + +contains + + + subroutine global_integrals(elem, h,hybrid,npts,num_flds,nets,nete,I_sphere) + use hybrid_mod, only: hybrid_t + use element_mod, only: element_t + use dimensions_mod, only: np, nelemd + use physconst, only: pi + use parallel_mod, only: global_shared_buf, global_shared_sum + + type(element_t) , intent(in) :: elem(:) + integer , intent(in) :: npts,nets,nete,num_flds + real (kind=r8), intent(in) :: h(npts,npts,num_flds,nets:nete) + type (hybrid_t) , intent(in) :: hybrid + + real (kind=r8) :: I_sphere(num_flds) + + real (kind=r8) :: I_priv + real (kind=r8) :: I_shared + common /gblintcom/I_shared + ! + ! Local variables + ! + integer :: ie,j,i,q + + real (kind=r8) :: da + real (kind=r8) :: J_tmp(nets:nete,num_flds) + ! + ! This algorithm is independent of thread count and task count. + ! This is a requirement of consistancy checking in cam. + ! + J_tmp = 0.0_r8 + +!JMD print *,'global_integral: before loop' + do ie=nets,nete + do q=1,num_flds + do j=1,np + do i=1,np + da = elem(ie)%mp(i,j)*elem(ie)%metdet(i,j) + J_tmp(ie,q) = J_tmp(ie,q) + da*h(i,j,q,ie) + end do + end do + end do + end do + do ie=nets,nete + global_shared_buf(ie,1:num_flds) = J_tmp(ie,:) + enddo + !JMD print *,'global_integral: before wrap_repro_sum' + call wrap_repro_sum(nvars=num_flds, comm=hybrid%par%comm) + !JMD print *,'global_integral: after wrap_repro_sum' + I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*PI) + end subroutine global_integrals + + subroutine global_integrals_general(h,hybrid,npts,da,num_flds,nets,nete,I_sphere) + use hybrid_mod, only: hybrid_t + use dimensions_mod, only: nc, nelemd + use physconst, only: pi + use parallel_mod, only: global_shared_buf, global_shared_sum + + integer, intent(in) :: npts,nets,nete,num_flds + real (kind=r8), intent(in) :: h(npts,npts,num_flds,nets:nete) + type (hybrid_t), intent(in) :: hybrid + real (kind=r8), intent(in) :: da(npts,npts,nets:nete) + + real (kind=r8) :: I_sphere(num_flds) + + real (kind=r8) :: I_priv + real (kind=r8) :: I_shared + common /gblintcom/I_shared + ! + ! Local variables + ! + integer :: ie,j,i,q + + real (kind=r8) :: J_tmp(nets:nete,num_flds) + ! + ! This algorithm is independent of thread count and task count. + ! This is a requirement of consistancy checking in cam. + ! + J_tmp = 0.0_r8 + +!JMD print *,'global_integral: before loop' + do ie=nets,nete + do q=1,num_flds + do j=1,npts + do i=1,npts + J_tmp(ie,q) = J_tmp(ie,q) + da(i,j,ie)*h(i,j,q,ie) + end do + end do + end do + end do + do ie=nets,nete + global_shared_buf(ie,1:num_flds) = J_tmp(ie,:) + enddo + !JMD print *,'global_integral: before wrap_repro_sum' + call wrap_repro_sum(nvars=num_flds, comm=hybrid%par%comm) + !JMD print *,'global_integral: after wrap_repro_sum' + I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*PI) + end subroutine global_integrals_general + + + ! ================================ + ! global_integral: + ! + ! eq 81 in Williamson, et. al. p 218 + ! for spectral elements + ! + ! ================================ + ! -------------------------- + function global_integral(elem, h,hybrid,npts,nets,nete) result(I_sphere) + use hybrid_mod, only: hybrid_t + use element_mod, only: element_t + use dimensions_mod, only: np, nelemd + use physconst, only: pi + use parallel_mod, only: global_shared_buf, global_shared_sum + + type(element_t) , intent(in) :: elem(:) + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: h(npts,npts,nets:nete) + type (hybrid_t) , intent(in) :: hybrid + + real (kind=r8) :: I_sphere + + real (kind=r8) :: I_priv + real (kind=r8) :: I_shared + common /gblintcom/I_shared + + ! Local variables + + integer :: ie,j,i + real(kind=r8) :: I_tmp(1) + + real (kind=r8) :: da + real (kind=r8) :: J_tmp(nets:nete) +! +! This algorythm is independent of thread count and task count. +! This is a requirement of consistancy checking in cam. +! + J_tmp = 0.0_r8 + +!JMD print *,'global_integral: before loop' + do ie=nets,nete + do j=1,np + do i=1,np + da = elem(ie)%mp(i,j)*elem(ie)%metdet(i,j) + J_tmp(ie) = J_tmp(ie) + da*h(i,j,ie) + end do + end do + end do + do ie=nets,nete + global_shared_buf(ie,1) = J_tmp(ie) + enddo +!JMD print *,'global_integral: before wrap_repro_sum' + call wrap_repro_sum(nvars=1, comm=hybrid%par%comm) +!JMD print *,'global_integral: after wrap_repro_sum' + I_tmp = global_shared_sum(1) +!JMD print *,'global_integral: after global_shared_sum' + I_sphere = I_tmp(1)/(4.0_r8*PI) + + end function global_integral + +!------------------------------------------------------------------------------------ + + ! ================================ + ! print_cfl: + ! + ! Calculate / output CFL info + ! (both advective and based on + ! viscosity or hyperviscosity) + ! + ! ================================ + + subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& + dt_remap_actual,dt_tracer_fvm_actual,dt_tracer_se_actual,& + dt_dyn_actual,dt_dyn_visco_actual,dt_dyn_del2_actual,dt_tracer_visco_actual,dt_phys) + ! + ! estimate various CFL limits + ! also, for variable resolution viscosity coefficient, make sure + ! worse viscosity CFL (given by dtnu) is not violated by reducing + ! viscosity coefficient in regions where CFL is violated + ! + use hybrid_mod, only: hybrid_t, PrintHybrid + use element_mod, only: element_t + use dimensions_mod, only: np,ne,nelem,nelemd,nc,nhe,qsize,ntrac,nlev,large_Courant_incr + use dimensions_mod, only: nu_scale_top,nu_div_lev,nu_lev + + use quadrature_mod, only: gausslobatto, quadrature_t + + use reduction_mod, only: ParallelMin,ParallelMax + use physconst, only: ra, rearth, pi + use control_mod, only: nu, nu_div, nu_q, nu_p, nu_s, nu_top, fine_ne, rk_stage_user, max_hypervis_courant + use control_mod, only: tstep_type, hypervis_power, hypervis_scaling + use cam_abortutils, only: endrun + use parallel_mod, only: global_shared_buf, global_shared_sum + use edge_mod, only: initedgebuffer, FreeEdgeBuffer, edgeVpack, edgeVunpack + use bndry_mod, only: bndry_exchange + use time_mod, only: tstep + use mesh_mod, only: MeshUseMeshFile + use dimensions_mod, only: ksponge_end, kmvis_ref, kmcnd_ref,rho_ref + use physconst, only: cpair + + type(element_t) , intent(inout) :: elem(:) + integer , intent(in) :: nets,nete + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8), intent(in) :: dtnu, ptop, pmid(nlev) + ! + ! actual time-steps + ! + real (kind=r8), intent(in) :: dt_remap_actual,dt_tracer_fvm_actual,dt_tracer_se_actual,& + dt_dyn_actual,dt_dyn_visco_actual,dt_dyn_del2_actual, & + dt_tracer_visco_actual, dt_phys + + ! Element statisics + real (kind=r8) :: max_min_dx,min_min_dx,min_max_dx,max_unif_dx ! used for normalizing scalar HV + real (kind=r8) :: max_normDinv, min_normDinv ! used for CFL + real (kind=r8) :: min_area, max_area,max_ratio !min/max element area + real (kind=r8) :: avg_area, avg_min_dx + real (kind=r8) :: min_hypervis, max_hypervis, avg_hypervis, stable_hv + real (kind=r8) :: normDinv_hypervis + real (kind=r8) :: x, y, noreast, nw, se, sw + real (kind=r8), dimension(np,np,nets:nete) :: zeta + real (kind=r8) :: lambda_max, lambda_vis, min_gw, lambda,umax, ugw + real (kind=r8) :: press,scale1,scale2,scale3, max_laplace + integer :: ie,corner, i, j, rowind, colind, k + type (quadrature_t) :: gp + character(LEN=256) :: rk_str + + real (kind=r8) :: s_laplacian, s_hypervis, s_rk, s_rk_tracer !Stability region + real (kind=r8) :: dt_max_adv, dt_max_gw, dt_max_tracer_se, dt_max_tracer_fvm + real (kind=r8) :: dt_max_hypervis, dt_max_hypervis_tracer, dt_max_laplacian_top + + real(kind=r8) :: I_sphere + real(kind=r8) :: h(np,np,nets:nete) + + + + ! Eigenvalues calculated by folks at UMich (Paul U & Jared W) + select case (np) + case (2) + lambda_max = 0.5_r8 + lambda_vis = 0.0_r8 ! need to compute this + case (3) + lambda_max = 1.5_r8 + lambda_vis = 12.0_r8 + case (4) + lambda_max = 2.74_r8 + lambda_vis = 30.0_r8 + case (5) + lambda_max = 4.18_r8 + lambda_vis = 91.6742_r8 + case (6) + lambda_max = 5.86_r8 + lambda_vis = 190.1176_r8 + case (7) + lambda_max = 7.79_r8 + lambda_vis = 374.7788_r8 + case (8) + lambda_max = 10.0_r8 + lambda_vis = 652.3015_r8 + case DEFAULT + lambda_max = 0.0_r8 + lambda_vis = 0.0_r8 + end select + + if ((lambda_max.eq.0_r8).and.(hybrid%masterthread)) then + print*, "lambda_max not calculated for NP = ",np + print*, "Estimate of gravity wave timestep will be incorrect" + end if + if ((lambda_vis.eq.0_r8).and.(hybrid%masterthread)) then + print*, "lambda_vis not calculated for NP = ",np + print*, "Estimate of viscous CFLs will be incorrect" + end if + + do ie=nets,nete + elem(ie)%variable_hyperviscosity = 1.0_r8 + end do + + gp=gausslobatto(np) + min_gw = minval(gp%weights) + ! + !****************************************************************************************** + ! + ! compute some local and global grid metrics + ! + !****************************************************************************************** + ! + h(:,:,nets:nete)=1.0_r8 + ! Calculate surface area by integrating 1.0_r8 over sphere and dividing by 4*PI (Should be 1) + I_sphere = global_integral(elem, h(:,:,nets:nete),hybrid,np,nets,nete) + + min_normDinv = 1E99_r8 + max_normDinv = 0 + min_max_dx = 1E99_r8 + min_min_dx = 1E99_r8 + max_min_dx = 0 + min_area = 1E99_r8 + max_area = 0 + max_ratio = 0 + do ie=nets,nete + max_normDinv = max(max_normDinv,elem(ie)%normDinv) + min_normDinv = min(min_normDinv,elem(ie)%normDinv) + min_min_dx = min(min_min_dx,elem(ie)%dx_short) + max_min_dx = max(max_min_dx,elem(ie)%dx_short) + min_max_dx = min(min_max_dx,elem(ie)%dx_long) + + elem(ie)%area = sum(elem(ie)%spheremp(:,:)) + min_area = min(min_area,elem(ie)%area) + max_area = max(max_area,elem(ie)%area) + max_ratio = max(max_ratio,elem(ie)%dx_long/elem(ie)%dx_short) + + global_shared_buf(ie,1) = elem(ie)%area + global_shared_buf(ie,2) = elem(ie)%dx_short + enddo + call wrap_repro_sum(nvars=2, comm=hybrid%par%comm) + avg_area = global_shared_sum(1)/dble(nelem) + avg_min_dx = global_shared_sum(2)/dble(nelem) + + min_area = ParallelMin(min_area,hybrid) + max_area = ParallelMax(max_area,hybrid) + min_normDinv = ParallelMin(min_normDinv,hybrid) + max_normDinv = ParallelMax(max_normDinv,hybrid) + min_min_dx = ParallelMin(min_min_dx,hybrid) + max_min_dx = ParallelMax(max_min_dx,hybrid) + min_max_dx = ParallelMin(min_max_dx,hybrid) + max_ratio = ParallelMax(max_ratio,hybrid) + ! Physical units for area + min_area = min_area*rearth*rearth/1000000._r8 + max_area = max_area*rearth*rearth/1000000._r8 + avg_area = avg_area*rearth*rearth/1000000._r8 + if (hybrid%masterthread) then + write(iulog,* )"" + write(iulog,* )"Running Global Integral Diagnostic..." + write(iulog,*)"Area of unit sphere is",I_sphere + write(iulog,*)"Should be 1.0 to round off..." + write(iulog,'(a,f9.3)') 'Element area: max/min',(max_area/min_area) + if (.not.MeshUseMeshFile) then + write(iulog,'(a,f6.3,f8.2)') "Average equatorial node spacing (deg, km) = ", & + dble(90)/dble(ne*(np-1)), PI*rearth/(2000.0_r8*dble(ne*(np-1))) + end if + write(iulog,'(a,2f9.3)') 'norm of Dinv (min, max): ', min_normDinv, max_normDinv + write(iulog,'(a,1f8.2)') 'Max Dinv-based element distortion: ', max_ratio + write(iulog,'(a,3f8.2)') 'dx based on Dinv svd: ave,min,max = ', avg_min_dx, min_min_dx, max_min_dx + write(iulog,'(a,3f8.2)') "dx based on sqrt element area: ave,min,max = ", & + sqrt(avg_area)/(np-1),sqrt(min_area)/(np-1),sqrt(max_area)/(np-1) + end if + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! SCALAR, RESOLUTION-AWARE HYPERVISCOSITY + ! this block of code initializes the variable_hyperviscsoity() array + ! based on largest length scale in each element and user specified scaling + ! it then limits the coefficient if the user specifed a max CFL + ! this limiting is based on the smallest length scale of each element + ! since that controls the CFL. + ! Mike Levy + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + if (hypervis_power /= 0) then + + min_hypervis = 1d99 + max_hypervis = 0 + avg_hypervis = 0 + + + max_unif_dx = min_max_dx ! use this for average resolution, unless: + ! viscosity in namelist specified for smallest element: + if (fine_ne>0) then + ! viscosity in namelist specified for regions with a resolution + ! equivilant to a uniform grid with ne=fine_ne + if (np /= 4 ) call endrun('ERROR: setting fine_ne only supported with NP=4') + max_unif_dx = (111.28_r8*30)/dble(fine_ne) ! in km + endif + + ! + ! note: if L = eigenvalue of metinv, then associated length scale (km) is + ! dx = 1.0_r8/( sqrt(L)*0.5_r8*dble(np-1)*ra*1000.0_r8) + ! + ! for viscosity *tensor*, we take at each point: + ! nu1 = nu*(dx1/max_unif_dx)**3.2 dx1 associated with eigenvalue 1 + ! nu2 = nu*(dx2/max_unif_dx)**3.2 dx2 associated with eigenvalue 2 + ! with this approach: + ! - with this formula, no need to adjust for CFL violations + ! - if nu comes from a 3.2 scaling that is stable for coarse and fine resolutions, + ! this formulat will be stable. + ! - gives the correct answer in long skinny rectangles: + ! large viscosity in the long direction, small viscosity in the short direction + ! + normDinv_hypervis = 0 + do ie=nets,nete + ! variable viscosity based on map from ulatlon -> ucontra + + ! dx_long + elem(ie)%variable_hyperviscosity = sqrt((elem(ie)%dx_long/max_unif_dx) ** hypervis_power) + elem(ie)%hv_courant = dtnu*(elem(ie)%variable_hyperviscosity(1,1)**2) * & + (lambda_vis**2) * ((ra*elem(ie)%normDinv)**4) + + ! Check to see if this is stable + if (elem(ie)%hv_courant.gt.max_hypervis_courant) then + stable_hv = sqrt( max_hypervis_courant / & + ( dtnu * (lambda_vis)**2 * (ra*elem(ie)%normDinv)**4 ) ) + +#if 0 + ! Useful print statements for debugging the adjustments to hypervis + print*, "Adjusting hypervis on elem ", elem(ie)%GlobalId + print*, "From ", nu*elem(ie)%variable_hyperviscosity(1,1)**2, " to ", nu*stable_hv + print*, "Difference = ", nu*(/elem(ie)%variable_hyperviscosity(1,1)**2-stable_hv/) + print*, "Factor of ", elem(ie)%variable_hyperviscosity(1,1)**2/stable_hv + print*, " " +#endif + ! make sure that: elem(ie)%hv_courant <= max_hypervis_courant + elem(ie)%variable_hyperviscosity = stable_hv + elem(ie)%hv_courant = dtnu*(stable_hv**2) * (lambda_vis)**2 * (ra*elem(ie)%normDinv)**4 + end if + normDinv_hypervis = max(normDinv_hypervis, elem(ie)%hv_courant/dtnu) + + min_hypervis = min(min_hypervis, elem(ie)%variable_hyperviscosity(1,1)) + max_hypervis = max(max_hypervis, elem(ie)%variable_hyperviscosity(1,1)) + global_shared_buf(ie,1) = elem(ie)%variable_hyperviscosity(1,1) + end do + + min_hypervis = ParallelMin(min_hypervis, hybrid) + max_hypervis = ParallelMax(max_hypervis, hybrid) + call wrap_repro_sum(nvars=1, comm=hybrid%par%comm) + avg_hypervis = global_shared_sum(1)/dble(nelem) + + normDinv_hypervis = ParallelMax(normDinv_hypervis, hybrid) + + ! apply DSS (aka assembly procedure) to variable_hyperviscosity (makes continuous) + call initEdgeBuffer(hybrid%par,edgebuf,elem,1) + do ie=nets,nete + zeta(:,:,ie) = elem(ie)%variable_hyperviscosity(:,:)*elem(ie)%spheremp(:,:) + call edgeVpack(edgebuf,zeta(1,1,ie),1,0,ie) + end do + call bndry_exchange(hybrid,edgebuf,location='print_cfl #1') + do ie=nets,nete + call edgeVunpack(edgebuf,zeta(1,1,ie),1,0,ie) + elem(ie)%variable_hyperviscosity(:,:) = zeta(:,:,ie)*elem(ie)%rspheremp(:,:) + end do + call FreeEdgeBuffer(edgebuf) + + ! replace hypervis w/ bilinear based on continuous corner values + do ie=nets,nete + noreast = elem(ie)%variable_hyperviscosity(np,np) + nw = elem(ie)%variable_hyperviscosity(1,np) + se = elem(ie)%variable_hyperviscosity(np,1) + sw = elem(ie)%variable_hyperviscosity(1,1) + do i=1,np + x = gp%points(i) + do j=1,np + y = gp%points(j) + elem(ie)%variable_hyperviscosity(i,j) = 0.25_r8*( & + (1.0_r8-x)*(1.0_r8-y)*sw + & + (1.0_r8-x)*(y+1.0_r8)*nw + & + (x+1.0_r8)*(1.0_r8-y)*se + & + (x+1.0_r8)*(y+1.0_r8)*noreast) + end do + end do + end do + else if (hypervis_scaling/=0) then + ! tensorHV. New eigenvalues are the eigenvalues of the tensor V + ! formulas here must match what is in cube_mod.F90 + ! for tensorHV, we scale out the rearth dependency + lambda = max_normDinv**2 + normDinv_hypervis = (lambda_vis**2) * (max_normDinv**4) * & + (lambda**(-hypervis_scaling/2) ) + else + ! constant coefficient formula: + normDinv_hypervis = (lambda_vis**2) * (ra*max_normDinv)**4 + endif + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! TENSOR, RESOLUTION-AWARE HYPERVISCOSITY + ! The tensorVisc() array is computed in cube_mod.F90 + ! this block of code will DSS it so the tensor if C0 + ! and also make it bilinear in each element. + ! Oksana Guba + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + if (hypervis_scaling /= 0) then + + call initEdgeBuffer(hybrid%par,edgebuf,elem,1) + do rowind=1,2 + do colind=1,2 + do ie=nets,nete + zeta(:,:,ie) = elem(ie)%tensorVisc(:,:,rowind,colind)*elem(ie)%spheremp(:,:) + call edgeVpack(edgebuf,zeta(1,1,ie),1,0,ie) + end do + + call bndry_exchange(hybrid,edgebuf) + do ie=nets,nete + call edgeVunpack(edgebuf,zeta(1,1,ie),1,0,ie) + elem(ie)%tensorVisc(:,:,rowind,colind) = zeta(:,:,ie)*elem(ie)%rspheremp(:,:) + end do + enddo !rowind + enddo !colind + call FreeEdgeBuffer(edgebuf) + + !IF BILINEAR MAP OF V NEEDED + + do rowind=1,2 + do colind=1,2 + ! replace hypervis w/ bilinear based on continuous corner values + do ie=nets,nete + noreast = elem(ie)%tensorVisc(np,np,rowind,colind) + nw = elem(ie)%tensorVisc(1,np,rowind,colind) + se = elem(ie)%tensorVisc(np,1,rowind,colind) + sw = elem(ie)%tensorVisc(1,1,rowind,colind) + do i=1,np + x = gp%points(i) + do j=1,np + y = gp%points(j) + elem(ie)%tensorVisc(i,j,rowind,colind) = 0.25_r8*( & + (1.0_r8-x)*(1.0_r8-y)*sw + & + (1.0_r8-x)*(y+1.0_r8)*nw + & + (x+1.0_r8)*(1.0_r8-y)*se + & + (x+1.0_r8)*(y+1.0_r8)*noreast) + end do + end do + end do + enddo !rowind + enddo !colind + endif + deallocate(gp%points) + deallocate(gp%weights) + + call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu_p ,1.0_r8 ,'_p ') + call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu ,0.5_r8,' ') + if (ptop>100.0_r8) then + ! + ! CAM setting + ! + call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu_div,2.5_r8 ,'_div') + nu_div_lev(:) = nu_div + nu_lev(:) = nu + else + ! + ! WACCM setting + ! + call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu_div,2.5_r8 ,'_div') + if (hybrid%masterthread) write(iulog,*) ": sponge layer viscosity scaling factor" + do k=1,nlev + press = pmid(k) + + scale1 = 0.5_r8*(1.0_r8+tanh(2.0_r8*log(100.0_r8/press))) + nu_div_lev(k) = (1.0_r8-scale1)*nu_div+scale1*2.0_r8*nu_div + nu_div_lev(k) = nu_div + nu_lev(k) = (1.0_r8-scale1)*nu +scale1*nu_p + nu_lev(k) = nu + if (hybrid%masterthread) write(iulog,*) "nu_lev=",k,nu_lev(k) + if (hybrid%masterthread) write(iulog,*) "nu_div_lev=",k,nu_div_lev(k) + end do + end if + + if (nu_q<0) nu_q = nu_p ! necessary for consistency + if (nu_s<0) nu_s = nu_p ! temperature damping is always equal to nu_p + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! + ! time-step information + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! + ! S=time-step stability region (i.e. advection w/leapfrog: S=1, viscosity w/forward Euler: S=2) + ! + if (tstep_type==1) then + S_rk = 2.0_r8 + rk_str = ' * RK2-SSP 3 stage (same as tracers)' + elseif (tstep_type==2) then + S_rk = 2.0_r8 + rk_str = ' * classic RK3' + elseif (tstep_type==3) then + S_rk = 2.0_r8 + rk_str = ' * Kinnmark&Gray RK4' + elseif (tstep_type==4) then + S_rk = 3.0_r8 + rk_str = ' * Kinnmark&Gray RK3 5 stage (3rd order)' + end if + if (hybrid%masterthread) then + write(iulog,'(a,f12.8,a)') 'Model top is ',ptop,'Pa' + write(iulog,'(a)') ' ' + write(iulog,'(a)') 'Timestepping methods used in dynamical core:' + write(iulog,'(a)') + write(iulog,*) rk_str + write(iulog,'(a)') ' * Spectral-element advection uses SSP preservation RK3' + write(iulog,'(a)') ' * Viscosity operators use forward Euler' + if (ntrac>0) then + write(iulog,'(a)') ' * CSLAM uses two time-levels backward trajectory method' + end if + end if + S_laplacian = 2.0_r8 !using forward Euler for sponge diffusion + S_hypervis = 2.0_r8 !using forward Euler for hyperviscosity + S_rk_tracer = 2.0_r8 + ! + ! estimate max winds + ! + if (ptop>100.0_r8) then + umax = 120.0_r8 + else + umax = 400.0_r8 + end if + ugw = 342.0_r8 !max gravity wave speed + + dt_max_adv = S_rk/(umax*max_normDinv*lambda_max*ra) + dt_max_gw = S_rk/(ugw*max_normDinv*lambda_max*ra) + dt_max_tracer_se = S_rk_tracer*min_gw/(umax*max_normDinv*ra) + if (ntrac>0) then + if (large_Courant_incr) then + dt_max_tracer_fvm = dble(nhe)*(4.0_r8*pi*Rearth/dble(4.0_r8*ne*nc))/umax + else + dt_max_tracer_fvm = dble(nhe)*(2.0_r8*pi*Rearth/dble(4.0_r8*ne*nc))/umax + end if + else + dt_max_tracer_fvm = -1.0_r8 + end if + dt_max_hypervis = s_hypervis/(MAX(MAXVAL(nu_div_lev(:)),MAXVAL(nu_lev(:)))*normDinv_hypervis) + dt_max_hypervis_tracer = s_hypervis/(nu_q*normDinv_hypervis) + + max_laplace = MAX(MAXVAL(nu_scale_top(:))*nu_top,MAXVAL(kmvis_ref(:)/rho_ref(:))) + max_laplace = MAX(max_laplace,MAXVAL(kmcnd_ref(:)/(cpair*rho_ref(:)))) + dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) + + if (hybrid%masterthread) then + write(iulog,'(a,f10.2,a)') ' ' + write(iulog,'(a,f10.2,a)') 'Estimates for maximum stable and actual time-steps for different aspects of algorithm:' + write(iulog,'(a,f12.8,a)') '(assume max wind is ',umax,'m/s)' + write(iulog,'(a)') '(assume max gravity wave speed is 342m/s)' + write(iulog,'(a,f10.2,a)') ' ' + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_dyn (time-stepping dycore ; u,v,T,dM) < ',& + MIN(dt_max_adv,dt_max_gw),'s ',dt_dyn_actual,'s' + if (dt_dyn_actual>MIN(dt_max_adv,dt_max_gw)) write(iulog,*) 'WARNING: dt_dyn theoretically unstable' + + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_dyn_vis (hyperviscosity) ; u,v,T,dM) < ',dt_max_hypervis,& + 's ',dt_dyn_visco_actual,'s' + if (dt_dyn_visco_actual>dt_max_hypervis) write(iulog,*) 'WARNING: dt_dyn_vis theoretically unstable' + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_se (time-stepping tracers ; q ) < ',dt_max_tracer_se,'s ',& + dt_tracer_se_actual,'s' + if (dt_tracer_se_actual>dt_max_tracer_se) write(iulog,*) 'WARNING: dt_tracer_se theoretically unstable' + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_vis (hyperviscosity tracers; q ) < ',dt_max_hypervis_tracer,'s',& + dt_tracer_visco_actual,'s' + if (dt_tracer_visco_actual>dt_max_hypervis_tracer) write(iulog,*) 'WARNING: dt_tracer_hypervis theoretically unstable' + + if (ntrac>0) then + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_fvm (time-stepping tracers ; q ) < ',dt_max_tracer_fvm,& + 's ',dt_tracer_fvm_actual + if (dt_tracer_fvm_actual>dt_max_tracer_fvm) write(iulog,*) 'WARNING: dt_tracer_fvm theortically unstable' + end if + write(iulog,'(a,f10.2)') '* dt_remap (vertical remap dt) ',dt_remap_actual + do k=1,ksponge_end + max_laplace = MAX(nu_scale_top(k)*nu_top,kmvis_ref(k)/rho_ref(k)) + max_laplace = MAX(max_laplace,kmcnd_ref(k)/(cpair*rho_ref(k))) + dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) + + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt (del2 sponge ; u,v,T,dM) < ',& + dt_max_laplacian_top,'s',dt_dyn_del2_actual,'s' + if (dt_dyn_del2_actual>dt_max_laplacian_top) & + write(iulog,*) 'WARNING: theoretically unstable in sponge; increase se_hypervis_subcycle_sponge' + end do + write(iulog,*) ' ' + if (hypervis_power /= 0) then + write(iulog,'(a,3e11.4)')'Scalar hyperviscosity (dynamics): ave,min,max = ', & + nu*(/avg_hypervis**2,min_hypervis**2,max_hypervis**2/) + end if + write(iulog,*) 'tstep_type = ',tstep_type + end if + end subroutine print_cfl + + ! + ! ============================ + ! global_maximum: + ! + ! Find global maximum on sphere + ! + ! ================================ + + function global_maximum(h,hybrid,npts,nets,nete) result(Max_sphere) + + use hybrid_mod, only : hybrid_t + use reduction_mod, only : red_max, pmax_mt + + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: h(npts,npts,nets:nete) + type (hybrid_t) , intent(in) :: hybrid + + real (kind=r8) :: Max_sphere + + ! Local variables + + real (kind=r8) :: redp(1) + + Max_sphere = MAXVAL(h(:,:,nets:nete)) + + redp(1) = Max_sphere + call pmax_mt(red_max,redp,1,hybrid) + Max_sphere = red_max%buf(1) + + end function global_maximum + + ! ========================================================== + ! l1_snorm: + ! + ! computes the l1 norm per Williamson et al, p. 218 eq(8) + ! for a scalar quantity + ! =========================================================== + + function l1_snorm(elem, h,ht,hybrid,npts,nets,nete) result(l1) + + use element_mod, only : element_t + use hybrid_mod, only : hybrid_t + + type(element_t) , intent(in) :: elem(:) + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: h(npts,npts,nets:nete) ! computed soln + real (kind=r8), intent(in) :: ht(npts,npts,nets:nete) ! true soln + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8) :: l1 + + ! Local variables + + real (kind=r8) :: dhabs(npts,npts,nets:nete) + real (kind=r8) :: htabs(npts,npts,nets:nete) + real (kind=r8) :: dhabs_int + real (kind=r8) :: htabs_int + integer i,j,ie + + do ie=nets,nete + do j=1,npts + do i=1,npts + dhabs(i,j,ie) = ABS(h(i,j,ie)-ht(i,j,ie)) + htabs(i,j,ie) = ABS(ht(i,j,ie)) + end do + end do + end do + + dhabs_int = global_integral(elem, dhabs(:,:,nets:nete),hybrid,npts,nets,nete) + htabs_int = global_integral(elem, htabs(:,:,nets:nete),hybrid,npts,nets,nete) + + l1 = dhabs_int/htabs_int + + end function l1_snorm + + ! =========================================================== + ! l1_vnorm: + ! + ! computes the l1 norm per Williamson et al, p. 218 eq(97), + ! for a contravariant vector quantity on the velocity grid. + ! + ! =========================================================== + + function l1_vnorm(elem, v,vt,hybrid,npts,nets,nete) result(l1) + use element_mod, only : element_t + use hybrid_mod, only : hybrid_t + + type(element_t) , intent(in), target :: elem(:) + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: v(npts,npts,2,nets:nete) ! computed soln + real (kind=r8), intent(in) :: vt(npts,npts,2,nets:nete) ! true soln + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8) :: l1 + + ! Local variables + + real (kind=r8), dimension(:,:,:,:), pointer :: met + real (kind=r8) :: dvsq(npts,npts,nets:nete) + real (kind=r8) :: vtsq(npts,npts,nets:nete) + real (kind=r8) :: dvco(npts,npts,2) ! covariant velocity + real (kind=r8) :: vtco(npts,npts,2) ! covariant velocity + real (kind=r8) :: dv1,dv2 + real (kind=r8) :: vt1,vt2 + real (kind=r8) :: dvsq_int + real (kind=r8) :: vtsq_int + + integer i,j,ie + + do ie=nets,nete + met => elem(ie)%met + do j=1,npts + do i=1,npts + + dv1 = v(i,j,1,ie)-vt(i,j,1,ie) + dv2 = v(i,j,2,ie)-vt(i,j,2,ie) + + vt1 = vt(i,j,1,ie) + vt2 = vt(i,j,2,ie) + + dvco(i,j,1) = met(i,j,1,1)*dv1 + met(i,j,1,2)*dv2 + dvco(i,j,2) = met(i,j,2,1)*dv1 + met(i,j,2,2)*dv2 + + vtco(i,j,1) = met(i,j,1,1)*vt1 + met(i,j,1,2)*vt2 + vtco(i,j,2) = met(i,j,2,1)*vt1 + met(i,j,2,2)*vt2 + + dvsq(i,j,ie) = SQRT(dvco(i,j,1)*dv1 + dvco(i,j,2)*dv2) + vtsq(i,j,ie) = SQRT(vtco(i,j,1)*vt1 + vtco(i,j,2)*vt2) + + end do + end do + end do + + dvsq_int = global_integral(elem, dvsq(:,:,nets:nete),hybrid,npts,nets,nete) + vtsq_int = global_integral(elem, vtsq(:,:,nets:nete),hybrid,npts,nets,nete) + + l1 = dvsq_int/vtsq_int + + end function l1_vnorm + + ! ========================================================== + ! l2_snorm: + ! + ! computes the l2 norm per Williamson et al, p. 218 eq(83) + ! for a scalar quantity on the pressure grid. + ! + ! =========================================================== + + function l2_snorm(elem, h,ht,hybrid,npts,nets,nete) result(l2) + use element_mod, only : element_t + use hybrid_mod, only : hybrid_t + + type(element_t), intent(in) :: elem(:) + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: h(npts,npts,nets:nete) ! computed soln + real (kind=r8), intent(in) :: ht(npts,npts,nets:nete) ! true soln + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8) :: l2 + + ! Local variables + + real (kind=r8) :: dh2(npts,npts,nets:nete) + real (kind=r8) :: ht2(npts,npts,nets:nete) + real (kind=r8) :: dh2_int + real (kind=r8) :: ht2_int + integer i,j,ie + + do ie=nets,nete + do j=1,npts + do i=1,npts + dh2(i,j,ie)=(h(i,j,ie)-ht(i,j,ie))**2 + ht2(i,j,ie)=ht(i,j,ie)**2 + end do + end do + end do + + dh2_int = global_integral(elem,dh2(:,:,nets:nete),hybrid,npts,nets,nete) + ht2_int = global_integral(elem,ht2(:,:,nets:nete),hybrid,npts,nets,nete) + + l2 = SQRT(dh2_int)/SQRT(ht2_int) + + end function l2_snorm + + ! ========================================================== + ! l2_vnorm: + ! + ! computes the l2 norm per Williamson et al, p. 219 eq(98) + ! for a contravariant vector quantity on the velocity grid. + ! + ! =========================================================== + + function l2_vnorm(elem, v,vt,hybrid,npts,nets,nete) result(l2) + use element_mod, only : element_t + use hybrid_mod, only : hybrid_t + + type(element_t) , intent(in), target :: elem(:) + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: v(npts,npts,2,nets:nete) ! computed soln + real (kind=r8), intent(in) :: vt(npts,npts,2,nets:nete) ! true soln + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8) :: l2 + + ! Local variables + + real (kind=r8), dimension(:,:,:,:), pointer :: met + real (kind=r8) :: dvsq(npts,npts,nets:nete) + real (kind=r8) :: vtsq(npts,npts,nets:nete) + real (kind=r8) :: dvco(npts,npts,2) ! covariant velocity + real (kind=r8) :: vtco(npts,npts,2) ! covariant velocity + real (kind=r8) :: dv1,dv2 + real (kind=r8) :: vt1,vt2 + real (kind=r8) :: dvsq_int + real (kind=r8) :: vtsq_int + integer i,j,ie + + do ie=nets,nete + met => elem(ie)%met + do j=1,npts + do i=1,npts + + dv1 = v(i,j,1,ie)-vt(i,j,1,ie) + dv2 = v(i,j,2,ie)-vt(i,j,2,ie) + + vt1 = vt(i,j,1,ie) + vt2 = vt(i,j,2,ie) + + dvco(i,j,1) = met(i,j,1,1)*dv1 + met(i,j,1,2)*dv2 + dvco(i,j,2) = met(i,j,2,1)*dv1 + met(i,j,2,2)*dv2 + + vtco(i,j,1) = met(i,j,1,1)*vt1 + met(i,j,1,2)*vt2 + vtco(i,j,2) = met(i,j,2,1)*vt1 + met(i,j,2,2)*vt2 + + dvsq(i,j,ie) = dvco(i,j,1)*dv1 + dvco(i,j,2)*dv2 + vtsq(i,j,ie) = vtco(i,j,1)*vt1 + vtco(i,j,2)*vt2 + + end do + end do + end do + + dvsq_int = global_integral(elem, dvsq(:,:,nets:nete),hybrid,npts,nets,nete) + vtsq_int = global_integral(elem, vtsq(:,:,nets:nete),hybrid,npts,nets,nete) + + l2 = SQRT(dvsq_int)/SQRT(vtsq_int) + + end function l2_vnorm + + ! ========================================================== + ! linf_snorm: + ! + ! computes the l infinity norm per Williamson et al, p. 218 eq(84) + ! for a scalar quantity on the pressure grid... + ! + ! =========================================================== + + function linf_snorm(h,ht,hybrid,npts,nets,nete) result(linf) + use hybrid_mod, only : hybrid_t + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: h(npts,npts,nets:nete) ! computed soln + real (kind=r8), intent(in) :: ht(npts,npts,nets:nete) ! true soln + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8) :: linf + + ! Local variables + + real (kind=r8) :: dhabs(npts,npts,nets:nete) + real (kind=r8) :: htabs(npts,npts,nets:nete) + real (kind=r8) :: dhabs_max + real (kind=r8) :: htabs_max + integer i,j,ie + + do ie=nets,nete + do j=1,npts + do i=1,npts + dhabs(i,j,ie)=ABS(h(i,j,ie)-ht(i,j,ie)) + htabs(i,j,ie)=ABS(ht(i,j,ie)) + end do + end do + end do + + dhabs_max = global_maximum(dhabs(:,:,nets:nete),hybrid,npts,nets,nete) + htabs_max = global_maximum(htabs(:,:,nets:nete),hybrid,npts,nets,nete) + + linf = dhabs_max/htabs_max + + end function linf_snorm + + + ! ========================================================== + ! linf_vnorm: + ! + ! computes the linf norm per Williamson et al, p. 218 eq(99), + ! for a contravariant vector quantity on the velocity grid. + ! + ! =========================================================== + + function linf_vnorm(elem,v,vt,hybrid,npts,nets,nete) result(linf) + use hybrid_mod, only : hybrid_t + use element_mod, only : element_t + + type(element_t) , intent(in), target :: elem(:) + integer , intent(in) :: npts,nets,nete + real (kind=r8), intent(in) :: v(npts,npts,2,nets:nete) ! computed soln + real (kind=r8), intent(in) :: vt(npts,npts,2,nets:nete) ! true soln + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8) :: linf + + ! Local variables + + real (kind=r8), dimension(:,:,:,:), pointer :: met + real (kind=r8) :: dvsq(npts,npts,nets:nete) + real (kind=r8) :: vtsq(npts,npts,nets:nete) + real (kind=r8) :: dvco(npts,npts,2) ! covariant velocity + real (kind=r8) :: vtco(npts,npts,2) ! covariant velocity + real (kind=r8) :: dv1,dv2 + real (kind=r8) :: vt1,vt2 + real (kind=r8) :: dvsq_max + real (kind=r8) :: vtsq_max + integer i,j,ie + + do ie=nets,nete + met => elem(ie)%met + + do j=1,npts + do i=1,npts + + dv1 = v(i,j,1,ie)-vt(i,j,1,ie) + dv2 = v(i,j,2,ie)-vt(i,j,2,ie) + + vt1 = vt(i,j,1,ie) + vt2 = vt(i,j,2,ie) + + dvco(i,j,1) = met(i,j,1,1)*dv1 + met(i,j,1,2)*dv2 + dvco(i,j,2) = met(i,j,2,1)*dv1 + met(i,j,2,2)*dv2 + + vtco(i,j,1) = met(i,j,1,1)*vt1 + met(i,j,1,2)*vt2 + vtco(i,j,2) = met(i,j,2,1)*vt1 + met(i,j,2,2)*vt2 + + dvsq(i,j,ie) = SQRT(dvco(i,j,1)*dv1 + dvco(i,j,2)*dv2) + vtsq(i,j,ie) = SQRT(vtco(i,j,1)*vt1 + vtco(i,j,2)*vt2) + + end do + end do + end do + + dvsq_max = global_maximum(dvsq(:,:,nets:nete),hybrid,npts,nets,nete) + vtsq_max = global_maximum(vtsq(:,:,nets:nete),hybrid,npts,nets,nete) + + linf = dvsq_max/vtsq_max + + end function linf_vnorm + + subroutine wrap_repro_sum (nvars, comm, nsize) + use dimensions_mod, only: nelemd + use shr_reprosum_mod, only: repro_sum => shr_reprosum_calc + use cam_abortutils, only: endrun + use parallel_mod, only: global_shared_buf, global_shared_sum, nrepro_vars + + integer :: nvars ! number of variables to be summed (cannot exceed nrepro_vars) + integer :: comm ! mpi communicator + integer, optional :: nsize ! local buffer size (defaults to nelemd - number of elements in mpi task) + + integer nsize_use + + if (present(nsize)) then + nsize_use = nsize + else + nsize_use = nelemd + endif + if (nvars .gt. nrepro_vars) call endrun('ERROR: repro_sum_buffer_size exceeded') + +! Repro_sum contains its own OpenMP, so only one thread should call it (AAM) + +!$OMP BARRIER +!$OMP MASTER + + call repro_sum(global_shared_buf, global_shared_sum, nsize_use, nelemd, nvars, commid=comm) + + +!$OMP END MASTER +!$OMP BARRIER + + end subroutine wrap_repro_sum + + subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu,factor,str) + use physconst, only: rearth + use control_mod, only: hypervis_scaling,hypervis_power + use hybrid_mod, only: hybrid_t + use cam_abortutils, only: endrun + + type (hybrid_t), intent(in) :: hybrid + integer , intent(in) :: ne + real (kind=r8), intent(in) :: max_min_dx,min_min_dx,factor + real (kind=r8), intent(inout) :: nu + character(len=4), intent(in) :: str + + real(r8) :: uniform_res_hypervis_scaling,nu_fac + real(kind=r8) :: nu_min, nu_max + ! + !************************************************************************************************************ + ! + ! automatically set viscosity coefficients + ! + ! + ! Use scaling from + ! + ! - Boville, B. A., 1991: Sensitivity of simulated climate to + ! model resolution. J. Climate, 4, 469-485. + ! + ! - TAKAHASHI ET AL., 2006: GLOBAL SIMULATION OF MESOSCALE SPECTRUM + ! + uniform_res_hypervis_scaling = 1.0_r8/log10(2.0_r8) + ! + ! compute factor so that at ne30 resolution nu=1E15 + ! scale so that scaling works for other planets + ! + ! grid spacing in meters = max_min_dx*1000.0_r8 + ! + nu_fac = (rearth/6.37122E6_r8)*1.0E15_r8/(110000.0_r8**uniform_res_hypervis_scaling) + + if (nu < 0) then + if (ne <= 0) then + if (hypervis_scaling/=0) then + nu_min = factor*nu_fac*(max_min_dx*1000.0_r8)**uniform_res_hypervis_scaling + nu_max = factor*nu_fac*(min_min_dx*1000.0_r8)**uniform_res_hypervis_scaling + nu = factor*nu_min + if (hybrid%masterthread) then + write(iulog,'(a,a)') "Automatically setting nu",TRIM(str) + write(iulog,'(a,2e9.2,a,2f9.2)') "Value at min/max grid spacing: ",nu_min,nu_max,& + " Max/min grid spacing (km) = ",max_min_dx,min_min_dx + end if + nu = nu_min*(2.0_r8*rearth/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(rearth**4) + if (hybrid%masterthread) & + write(iulog,'(a,a,a,e9.3)') "Nu_tensor",TRIM(str)," = ",nu + else if (hypervis_power/=0) then + call endrun('ERROR: Automatic scaling of scalar viscosity not implemented') + end if + else + nu = factor*nu_fac*((30.0_r8/ne)*110000.0_r8)**uniform_res_hypervis_scaling + if (hybrid%masterthread) then + write(iulog,'(a,a,a,e9.2)') "Automatically setting nu",TRIM(str)," =",nu + end if + end if + end if + end subroutine automatically_set_viscosity_coefficients +end module global_norms_mod diff --git a/src/dynamics/se/dycore/gridgraph_mod.F90 b/src/dynamics/se/dycore/gridgraph_mod.F90 new file mode 100644 index 00000000..cbafebcb --- /dev/null +++ b/src/dynamics/se/dycore/gridgraph_mod.F90 @@ -0,0 +1,555 @@ +module GridGraph_mod + !------------------------- + use shr_kind_mod, only: r8=>shr_kind_r8 + !------------------------------- + use dimensions_mod, only: max_neigh_edges + !------------------------- + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + !----- + use cam_logfile, only: iulog + !----- + implicit none + + + private + + integer, public, parameter :: num_neighbors=8 ! for north, south, east, west, neast, nwest, seast, swest + + + type, public :: GridVertex_t + + integer, pointer :: nbrs(:) => null() ! The numbers of the neighbor elements + integer, pointer :: nbrs_face(:) => null() ! The cube face number of the neighbor element (nbrs array) + integer, pointer :: nbrs_wgt(:) => null() ! The weights for edges defined by nbrs array + integer, pointer :: nbrs_wgt_ghost(:) => null() ! The weights for edges defined by nbrs array + integer :: nbrs_ptr(num_neighbors + 1) !index into the nbrs array for each neighbor direction + + integer :: face_number ! which face of the cube this vertex is on + integer :: number ! element number + integer :: processor_number ! processor number + integer :: SpaceCurve ! index in Space-Filling curve + end type GridVertex_t + + type, public :: GridEdge_t + integer :: head_face ! needed if head vertex has shape (i.e. square) + integer :: tail_face ! needed if tail vertex has shape (i.e. square) + integer :: head_dir !which of 8 neighbor directions is the head + integer :: tail_dir !which of 8 neighbor directions is the tail + integer :: wgtP, wgtS + type (GridVertex_t),pointer :: head => null() ! edge head vertex + type (GridVertex_t),pointer :: tail => null() ! edge tail vertex + logical :: reverse + + end type GridEdge_t + +! ========================================== +! Public Interfaces +! ========================================== + + public :: set_GridVertex_number + public :: PrintGridVertex + + public :: allocate_gridvertex_nbrs + public :: deallocate_gridvertex_nbrs + public :: initgridedge + public :: gridedge_search + public :: gridedge_type + public :: grid_edge_uses_vertex + public :: PrintGridEdge + public :: CheckGridNeighbors + public :: PrintChecksum + + public :: CreateSubGridGraph + + public :: assignment ( = ) + + interface assignment ( = ) + module procedure copy_gridedge + module procedure copy_gridvertex + end interface + +contains + +!====================================================================== + + subroutine allocate_gridvertex_nbrs(vertex, dim) + + type (GridVertex_t), intent(inout) :: vertex + integer, optional, intent(in) :: dim + integer :: num + + if (present(dim)) then + num = dim + else + num = max_neigh_edges + end if + + allocate(vertex%nbrs(num)) + allocate(vertex%nbrs_face(num)) + allocate(vertex%nbrs_wgt(num)) + allocate(vertex%nbrs_wgt_ghost(num)) + + + end subroutine allocate_gridvertex_nbrs +!====================================================================== + + subroutine deallocate_gridvertex_nbrs(vertex) + + type (GridVertex_t), intent(inout) :: vertex + + deallocate(vertex%nbrs) + deallocate(vertex%nbrs_face) + deallocate(vertex%nbrs_wgt) + deallocate(vertex%nbrs_wgt_ghost) + + end subroutine deallocate_gridvertex_nbrs + +!====================================================================== + +! ===================================== +! copy edge: +! copy device for overloading = sign. +! ===================================== + + + recursive subroutine copy_gridedge(edge2, edge1) + + type (GridEdge_t), intent(out) :: edge2 + type (GridEdge_t), intent(in) :: edge1 + + edge2%tail_face = edge1%tail_face + edge2%head_face = edge1%head_face + edge2%tail_dir = edge1%tail_dir + edge2%head_dir = edge1%head_dir + edge2%reverse = edge1%reverse + edge2%wgtP = edge1%wgtP + edge2%wgtS = edge1%wgtS + + + if (associated(edge1%tail)) then + edge2%tail=>edge1%tail + end if + if (associated(edge1%head)) then + edge2%head=>edge1%head + end if + + end subroutine copy_gridedge + +!====================================================================== + + recursive subroutine copy_gridvertex(vertex2, vertex1) + + implicit none + + type (GridVertex_t), intent(out) :: vertex2 + type (GridVertex_t), intent(in) :: vertex1 + + integer :: i,j,n + + n = SIZE(vertex1%nbrs) + + if (associated(vertex2%nbrs)) then + nullify(vertex2%nbrs) + end if + if (associated(vertex2%nbrs_face)) then + nullify(vertex2%nbrs_face) + end if + if (associated(vertex2%nbrs_wgt)) then + nullify(vertex2%nbrs_wgt) + end if + if (associated(vertex2%nbrs_wgt_ghost)) then + nullify(vertex2%nbrs_wgt_ghost) + end if + + call allocate_gridvertex_nbrs(vertex2) + + do i=1,n + vertex2%nbrs(i) = vertex1%nbrs(i) + vertex2%nbrs_face(i) = vertex1%nbrs_face(i) + vertex2%nbrs_wgt(i) = vertex1%nbrs_wgt(i) + vertex2%nbrs_wgt_ghost(i) = vertex1%nbrs_wgt_ghost(i) + enddo + + do i=1, num_neighbors+1 + vertex2%nbrs_ptr(i) = vertex1%nbrs_ptr(i) + enddo + + vertex2%face_number = vertex1%face_number + vertex2%number = vertex1%number + vertex2%processor_number = vertex1%processor_number + vertex2%SpaceCurve = vertex1%SpaceCurve + + end subroutine copy_gridvertex + +!=========================== +! search edge list for match +!=========================== + + function gridedge_search(nvert1, nvert2, edge) result(number) + + integer, intent(in) :: nvert1 + integer, intent(in) :: nvert2 + type(GridEdge_t), intent(in) :: edge(:) + integer :: number + + integer :: tmp + integer :: head + integer :: tail + + integer :: nedge + integer :: i + + nedge=SIZE(edge) + + tail=nvert1 + head=nvert2 + + if (tail > head) then + tmp = tail + tail = head + head = tmp + end if + + do i=1,nedge + if (edge(i)%tail%number==tail .and. edge(i)%head%number==head)then + number=i + end if + end do + + end function gridedge_search + +!====================================================================== + + function gridedge_type(edge) result(type) + + use params_mod, only : INTERNAL_EDGE, EXTERNAL_EDGE + type (GridEdge_t), intent(in) :: edge + integer :: type + + if (edge%head%processor_number==edge%tail%processor_number) then + type=INTERNAL_EDGE + else + type=EXTERNAL_EDGE + endif + + end function gridedge_type + +!====================================================================== + + + + function grid_edge_uses_vertex(Vertex,Edge) result(log) + + type(GridVertex_t), intent(in) :: Vertex + type(GridEdge_t), intent(in) :: Edge + logical :: log + integer :: number + + number = Vertex%number + if(number == Edge%head%number .or. number == Edge%tail%number) then + log = .TRUE. + else + log = .FALSE. + endif + + end function grid_edge_uses_vertex + +!====================================================================== + + subroutine PrintChecksum(TestPattern,Checksum) + + use dimensions_mod, only : nlev, nelemd, np + + implicit none + + real(kind=r8), target,intent(in) :: TestPattern(:,:,:,:) + real(kind=r8), target,intent(in) :: Checksum(:,:,:,:) + + integer :: i,k,ix,iy + + print * + write (iulog,*) 'checksums:' + do i=1,nelemd + ! Lets start out only looking at the first element + write(iulog,*) + do k=1,nlev + do iy=1,np + do ix=1,np + write(iulog,*)INT(TestPattern(ix,iy,k,i))," checksum = ",INT(Checksum(ix,iy,k,i)) + enddo + enddo + enddo + enddo + + + end subroutine PrintChecksum + +!====================================================================== + + subroutine CreateSubGridGraph(Vertex, SVertex, local2global) + + implicit none + + type (GridVertex_t),intent(in) :: Vertex(:) + type (GridVertex_t),intent(inout) :: SVertex(:) + integer,intent(in) :: local2global(:) + + integer :: nelem,nelem_s,n,ncount,cnt,pos, orig_start + integer :: inbr,i,ig,j,k, new_pos + + integer,allocatable :: global2local(:) + + nelem = SIZE(Vertex) + nelem_s = SiZE(SVertex) + + allocate(global2local(nelem)) + + global2local(:) = 0 + do i=1,nelem_s + ig = local2global(i) + global2local(ig) = i + enddo + + do i=1,nelem_s + ig = local2global(i) + + call copy_gridvertex(SVertex(i),Vertex(ig)) !svertex(i) = vertex(ig) + + n = SIZE(SVertex(i)%nbrs(:)) + ! ============================================== + ! Apply the correction to the neighbors list to + ! reflect new subgraph numbers + ! ============================================== + + orig_start = 1 + + do j=1,num_neighbors + + cnt = Svertex(i)%nbrs_ptr(j+1) - orig_start !number of neighbors for this direction + ncount = 0 + do k = 1, cnt + pos = orig_start + k-1 + inbr = global2local(Svertex(i)%nbrs(pos)) + + if(inbr .gt. 0) then + new_pos = Svertex(i)%nbrs_ptr(j) + ncount + + Svertex(i)%nbrs(new_pos) = inbr + Svertex(i)%nbrs_face(new_pos) = Svertex(i)%nbrs_face(pos) + Svertex(i)%nbrs_wgt(new_pos) = Svertex(i)%nbrs_wgt(pos) + Svertex(i)%nbrs_wgt_ghost(new_pos) = Svertex(i)%nbrs_wgt_ghost(pos) + ncount = ncount+1 + endif + enddo + !set neighbors ptr + orig_start = Svertex(i)%nbrs_ptr(j+1); + Svertex(i)%nbrs_ptr(j+1) = Svertex(i)%nbrs_ptr(j) + ncount + + + enddo !num_neighbors loop + + + Svertex(i)%number = i + enddo !nelem_s loop + deallocate(global2local) + + end subroutine CreateSubGridGraph + +!====================================================================== + + subroutine PrintGridEdge(Edge) + + implicit none + type (GridEdge_t), intent(in) :: Edge(:) + + integer :: i,nedge,ii,wgtP + + nedge = SIZE(Edge) + + write(iulog,95) + do i=1,nedge + ii=Edge(i)%tail_face + + !map to correct location - for now all on same nbr side have same wgt, so take the first one + ii = Edge(i)%tail%nbrs_ptr(ii) + + wgtP=Edge(i)%tail%nbrs_wgt(ii) + write(iulog,100) i, & + Edge(i)%tail%number,Edge(i)%tail_face, wgtP, & + Edge(i)%head%number,Edge(i)%head_face, gridedge_type(Edge(i)) + enddo + 95 format(5x,'GRIDEDGE #',3x,'Tail (face)',5x,'Head (face)',3x,'Type') + 100 format(10x,I6,8x,I4,1x,'(',I1,') --',I2,'--> ',I6,1x,'(',I1,')',5x,'[',I1,']') + + end subroutine PrintGridEdge + +!====================================================================== +! ========================================== +! set_GridVertex_neighbors: +! +! Set global element number for element elem +! ========================================== + + subroutine set_GridVertex_number(elem,number) + + type(GridVertex_t) :: elem + integer :: number + + elem%number=number + + end subroutine set_GridVertex_number + +!====================================================================== + subroutine PrintGridVertex(Vertex) + + implicit none + type (GridVertex_t), intent(in),target :: Vertex(:) + + integer :: i,nvert + integer ::n_west, n_east, n_south, n_north, n_swest, n_seast, n_nwest, n_neast + integer ::w_west, w_east, w_south, w_north, w_swest, w_seast, w_nwest, w_neast + integer ::n, print_buf(90), nbr(8), j, k, start, cnt, nbrs_cnt(8) + + nbr = (/ west, east, south, north, swest, seast, nwest, neast/) + + nvert = SIZE(Vertex) + + write(iulog,98) + do i=1,nvert + + print_buf(:) = 0 + nbrs_cnt(:) = 0 + cnt = 1 + do j = 1,num_neighbors + n = Vertex(i)%nbrs_ptr(nbr(j)+1) - Vertex(i)%nbrs_ptr(nbr(j)) !num neigbors in that directions + start = Vertex(i)%nbrs_ptr(nbr(j)) !start in array + nbrs_cnt(j) = n + do k = 1, n + print_buf(cnt) = Vertex(i)%nbrs(start+k-1) + print_buf(cnt+1) = Vertex(i)%nbrs_wgt(start+k-1) + print_buf(cnt+2) = Vertex(i)%nbrs_face(start+k-1) + cnt = cnt + 3 + end do + enddo + + write(iulog,991) Vertex(i)%number, Vertex(i)%processor_number, & + Vertex(i)%face_number, & + print_buf(1:cnt-1) + + write(iulog,992) nbrs_cnt(1:8) + + + enddo + 98 format(5x,'GRIDVERTEX #',2x,'PART',2x,'DEG',4x,'W',9x,'E',9x, & + 'S',9x,'N',9x,'SW',9x,'SE',9x,'NW',9x,'NE') + + 991 format(10x,I3,8x,I4,8x,I4,2x,30(1x,I4,1x,'(',I2,I2,')')) + 992 format(30x,'nbrs_cnt:', 2x,8(1x,I4)) + + end subroutine PrintGridVertex + + +!====================================================================== + + subroutine CheckGridNeighbors(Vertex) + + implicit none + type (GridVertex_t), intent(in) :: Vertex(:) + + integer :: i,j,k,l,m,nnbrs,inbrs,nvert + nvert = SIZE(Vertex) + + do i=1,nvert + nnbrs = SIZE(Vertex(i)%nbrs) + do j=1,nnbrs + inbrs = Vertex(i)%nbrs(j) + if(inbrs > 0) then + do k=1,nnbrs + if( inbrs .eq. Vertex(i)%nbrs(k) .and. (j/=k) ) & + write(iulog,*)'CheckGridNeighbors: ERROR identical neighbors detected for Vertex ',i + + enddo + endif + enddo + enddo + + end subroutine CheckGridNeighbors + +!====================================================================== + subroutine initgridedge(GridEdge,GridVertex) + use cam_abortutils, only : endrun + use dimensions_mod, only : max_corner_elem + + type (GridEdge_t), intent(inout) :: GridEdge(:) + type (GridVertex_t), intent(in),target :: GridVertex(:) + + integer :: i,j,k,iptr,m,n,wgtV,wgtP + integer :: nelem,nelem_edge,inbr + logical :: Verbose=.FALSE. + integer :: mynbr_cnt, cnt, mystart, start + + nelem = SIZE(GridVertex) + nelem_edge = SIZE(GridEdge) + + GridEdge(:)%reverse=.FALSE. + GridEdge(:)%wgtP=-1 + GridEdge(:)%wgtS=-1 + + iptr=1 + do j=1,nelem + do i=1,num_neighbors + mynbr_cnt = GridVertex(j)%nbrs_ptr(i+1) - GridVertex(j)%nbrs_ptr(i) !length of neighbor location + mystart = GridVertex(j)%nbrs_ptr(i) + do m=0,mynbr_cnt-1 + if((GridVertex(j)%nbrs_wgt(mystart + m) .gt. 0)) then ! Do this only if has a non-zero weight + if (nelem_edge GridVertex(j) + GridEdge(iptr)%tail_face = mystart + m ! needs to be mystart + m (location in array) + GridEdge(iptr)%tail_dir = i*max_corner_elem + m !conversion needed for setcycle + inbr = GridVertex(j)%nbrs(mystart+m) + GridEdge(iptr)%head => GridVertex(inbr) + + ! =========================================== + ! Need this awful piece of code to determine + ! which "face" of the neighbor element the + ! edge links (i.e. the "head_face") + ! =========================================== + do k=1,num_neighbors + cnt = GridVertex(inbr)%nbrs_ptr(k+1) -GridVertex(inbr)%nbrs_ptr(k) + start = GridVertex(inbr)%nbrs_ptr(k) + do n = 0, cnt-1 + if(GridVertex(inbr)%nbrs(start+n) == GridVertex(j)%number) then + GridEdge(iptr)%head_face=start+n !needs to be start + n (location in array) + GridEdge(iptr)%head_dir=k*max_corner_elem+n !conversion (un-done in setcycle) + endif + enddo + enddo + GridEdge(iptr)%wgtP = GridVertex(j)%nbrs_wgt(mystart+m) + GridEdge(iptr)%wgtS = 1 + iptr=iptr+1 + end if + end do ! m loop + end do !end i loop + end do !end j loop + if (nelem_edge+1 /= iptr) then + call endrun('Error in initgridedge: Number of edges less than expected.') + end if + if (Verbose) then + + print * + write(iulog,*)"element edge tail,head list: (TEST)" + do i=1,nelem_edge + write(iulog,*)GridEdge(i)%tail%number,GridEdge(i)%head%number + end do + + print * + write(iulog,*)"element edge tail_face, head_face list: (TEST)" + do i=1,nelem_edge + write(iulog,*)GridEdge(i)%tail_face,GridEdge(i)%head_face + end do + end if + + end subroutine initgridedge +!====================================================================== + +end module GridGraph_mod diff --git a/src/dynamics/se/dycore/hybrid_mod.F90 b/src/dynamics/se/dycore/hybrid_mod.F90 new file mode 100644 index 00000000..19f1043a --- /dev/null +++ b/src/dynamics/se/dycore/hybrid_mod.F90 @@ -0,0 +1,566 @@ +! =========================================== +! Module to support hybrid programming model +! hybrid_t is assumed to be a private struct +! =========================================== +module hybrid_mod + +use parallel_mod , only : parallel_t, copy_par +use thread_mod , only : omp_set_num_threads, omp_get_thread_num +use thread_mod , only : horz_num_threads, vert_num_threads, tracer_num_threads +use dimensions_mod, only : nlev, qsize, ntrac + +implicit none +private + + type, private :: hybrid_p + integer :: ibeg, iend + integer :: kbeg, kend + integer :: qbeg, qend + end type + + type, public :: hybrid_t + type (parallel_t) :: par + integer :: ithr + integer :: nthreads + integer :: ibeg, iend + integer :: kbeg, kend + integer :: qbeg, qend + logical :: masterthread + end type + + integer, allocatable :: work_pool_horz(:,:) + integer, allocatable :: work_pool_vert(:,:) + integer, allocatable :: work_pool_trac(:,:) + integer, allocatable :: work_pool_ctrac(:,:) + + integer :: nelemd_save + logical :: init_ranges = .true. + integer :: region_num_threads + character(len=64) :: region_name + + public :: PrintHybrid + public :: set_region_num_threads + private :: set_loop_ranges + public :: get_loop_ranges + public :: init_loop_ranges + public :: threadOwnsTracer, threadOwnsVertlevel + public :: config_thread_region + + interface config_thread_region + module procedure config_thread_region_par + module procedure config_thread_region_hybrid + end interface + interface PrintHybrid + module procedure PrintHybridnew + end interface + +contains + + subroutine PrintHybridnew(hybt,vname) + type (hybrid_t) :: hybt + character(len=*) :: vname + + write(*,21) vname, hybt%par%rank, hybt%ithr, hybt%nthreads, & + hybt%ibeg, hybt%iend,hybt%kbeg,hybt%kend, & + hybt%qbeg, hybt%qend +21 format('PrintHybrid: (',a, ', rank: ',i8, ', ithrd: ',i4,', nthreads: ',i4, & + ', i{beg,end}: ',2(i4),', k{beg,end}: ',2(i4),', q{beg,end}: ',2(i4),')') + + end subroutine PrintHybridnew + + + function config_thread_region_hybrid(old,region_name) result(new) + type (hybrid_t), intent(in) :: old + character(len=*), intent(in) :: region_name + type (hybrid_t) :: new + + integer :: ithr + integer :: kbeg_range, kend_range, qbeg_range, qend_range + + + ithr = omp_get_thread_num() + + if ( TRIM(region_name) == 'serial') then + region_num_threads = 1 + new%ibeg = old%ibeg; new%iend = old%iend + new%kbeg = old%kbeg; new%kend = old%kend + new%qbeg = old%qbeg; new%qend = old%qend + endif + if ( TRIM(region_name) == 'vertical') then + region_num_threads = vert_num_threads + call set_thread_ranges_1D ( work_pool_vert, kbeg_range, kend_range, ithr ) + new%ibeg = old%ibeg; new%iend = old%iend + new%kbeg = kbeg_range; new%kend = kend_range + new%qbeg = old%qbeg; new%qend = old%qend + endif + + if ( TRIM(region_name) == 'tracer' ) then + region_num_threads = tracer_num_threads + call set_thread_ranges_1D ( work_pool_trac, qbeg_range, qend_range, ithr) + new%ibeg = old%ibeg; new%iend = old%iend + new%kbeg = old%kbeg; new%kend = old%kend + new%qbeg = qbeg_range; new%qend = qend_range + endif + + if ( TRIM(region_name) == 'ctracer' ) then + region_num_threads = tracer_num_threads + call set_thread_ranges_1D ( work_pool_ctrac, qbeg_range, qend_range, ithr) + new%ibeg = old%ibeg; new%iend = old%iend + new%kbeg = old%kbeg; new%kend = old%kend + new%qbeg = qbeg_range; new%qend = qend_range + endif + + if ( TRIM(region_name) == 'vertical_and_tracer' ) then + region_num_threads = vert_num_threads*tracer_num_threads + call set_thread_ranges_2D ( work_pool_vert, work_pool_trac, kbeg_range, kend_range, & + qbeg_range, qend_range, ithr ) + new%ibeg = old%ibeg; new%iend = old%iend + new%kbeg = kbeg_range; new%kend = kend_range + new%qbeg = qbeg_range; new%qend = qend_range + endif + + + new%par = old%par ! relies on parallel_mod copy constructor + new%nthreads = old%nthreads * region_num_threads + if( region_num_threads .ne. 1 ) then + new%ithr = old%ithr * region_num_threads + ithr + else + new%ithr = old%ithr + endif + new%masterthread = old%masterthread +! Do we want to make this following call? +! call omp_set_num_threads(new%nthreads) + + end function config_thread_region_hybrid + + function config_thread_region_par(par,region_name) result(hybrid) + type (parallel_t) , intent(in) :: par + character(len=*), intent(in) :: region_name + type (hybrid_t) :: hybrid + ! local + integer :: ithr + integer :: ibeg_range, iend_range + integer :: kbeg_range, kend_range + integer :: qbeg_range, qend_range + integer :: nthreads + + ithr = omp_get_thread_num() + + if ( TRIM(region_name) == 'serial') then + region_num_threads = 1 + if ( .NOT. allocated(work_pool_horz) ) allocate(work_pool_horz(horz_num_threads,2)) + call set_thread_ranges_1D ( work_pool_horz, ibeg_range, iend_range, ithr ) + hybrid%ibeg = 1; hybrid%iend = nelemd_save + hybrid%kbeg = 1; hybrid%kend = nlev + hybrid%qbeg = 1; hybrid%qend = qsize + endif + + if ( TRIM(region_name) == 'horizontal') then + region_num_threads = horz_num_threads + call set_thread_ranges_1D ( work_pool_horz, ibeg_range, iend_range, ithr ) + hybrid%ibeg = ibeg_range; hybrid%iend = iend_range + hybrid%kbeg = 1; hybrid%kend = nlev + hybrid%qbeg = 1; hybrid%qend = qsize + endif + + if ( TRIM(region_name) == 'vertical') then + region_num_threads = vert_num_threads + call set_thread_ranges_1D ( work_pool_vert, kbeg_range, kend_range, ithr ) + hybrid%ibeg = 1; hybrid%iend = nelemd_save + hybrid%kbeg = kbeg_range; hybrid%kend = kend_range + hybrid%qbeg = 1; hybrid%qend = qsize + endif + + if ( TRIM(region_name) == 'tracer' ) then + region_num_threads = tracer_num_threads + call set_thread_ranges_1D ( work_pool_trac, qbeg_range, qend_range, ithr) + hybrid%ibeg = 1; hybrid%iend = nelemd_save + hybrid%kbeg = 1; hybrid%kend = nlev + hybrid%qbeg = qbeg_range; hybrid%qend = qend_range + endif + + if ( TRIM(region_name) == 'ctracer' ) then + region_num_threads = tracer_num_threads + call set_thread_ranges_1D ( work_pool_ctrac, qbeg_range, qend_range, ithr) + hybrid%ibeg = 1; hybrid%iend = nelemd_save + hybrid%kbeg = 1; hybrid%kend = nlev + hybrid%qbeg = qbeg_range; hybrid%qend = qend_range + endif + + if ( TRIM(region_name) == 'vertical_and_tracer' ) then + region_num_threads = vert_num_threads*tracer_num_threads + call set_thread_ranges_2D ( work_pool_vert, work_pool_trac, kbeg_range, kend_range, & + qbeg_range, qend_range, ithr ) + hybrid%ibeg = 1; hybrid%iend = nelemd_save + hybrid%kbeg = kbeg_range; hybrid%kend = kend_range + hybrid%qbeg = qbeg_range; hybrid%qend = qend_range + endif + call omp_set_num_threads(region_num_threads) + +! hybrid%par = par ! relies on parallel_mod copy constructor + call copy_par(hybrid%par,par) + hybrid%nthreads = region_num_threads + hybrid%ithr = ithr + hybrid%masterthread = (par%masterproc .and. ithr==0) + + end function config_thread_region_par + + subroutine init_loop_ranges(nelemd) + + integer, intent(in) :: nelemd + integer :: ith, beg_index, end_index + + + if ( init_ranges ) then + nelemd_save=nelemd + if ( .NOT. allocated(work_pool_horz) ) allocate(work_pool_horz(horz_num_threads,2)) + if(nelemd0 .and. ntrac= hybrid%kbeg) .and. (value <= hybrid%kend)) then + found = .true. + endif + + end function threadOwnsVertlevel + + function threadOwnsTracer(hybrid,value) result(found) + + type (hybrid_t), intent(in) :: hybrid + integer, intent(in) :: value + logical :: found + + found = .false. + if ((value >= hybrid%qbeg) .and. (value <= hybrid%qend)) then + found = .true. + endif + + end function threadOwnsTracer + + subroutine reset_loop_ranges (pybrid, region_name) + + type (hybrid_p) :: pybrid + character(len=*), intent(in) :: region_name + + if ( TRIM(region_name) == 'vertical' ) then + pybrid%kbeg = 1; pybrid%kend = nlev + endif + + if ( TRIM(region_name) == 'tracer' ) then + pybrid%qbeg = 1; pybrid%qend = qsize + endif + + if ( TRIM(region_name) == 'vertical_and_tracer' ) then + pybrid%kbeg = 1; pybrid%kend = nlev + pybrid%qbeg = 1; pybrid%qend = qsize + endif + + end subroutine reset_loop_ranges + + subroutine set_thread_ranges_3D ( work_pool_x, work_pool_y, work_pool_z, & + beg_range_1, end_range_1, beg_range_2, end_range_2, & + beg_range_3, end_range_3, idthread ) + + integer, intent (in ) :: work_pool_x(:,:) + integer, intent (in ) :: work_pool_y(:,:) + integer, intent (in ) :: work_pool_z(:,:) + integer, intent (inout) :: beg_range_1 + integer, intent (inout) :: end_range_1 + integer, intent (inout) :: beg_range_2 + integer, intent (inout) :: end_range_2 + integer, intent (inout) :: beg_range_3 + integer, intent (inout) :: end_range_3 + integer, intent (inout) :: idthread + + integer :: index(3) + integer :: i, j, k, ind, irange, jrange, krange + + ind = 0 + + krange = SIZE(work_pool_z,1) + jrange = SIZE(work_pool_y,1) + irange = SIZE(work_pool_x,1) + do k = 1, krange + do j = 1, jrange + do i = 1, irange + if( ind == idthread ) then + index(1) = i + index(2) = j + index(3) = k + endif + ind = ind + 1 + enddo + enddo + enddo + beg_range_1 = work_pool_x(index(1),1) + end_range_1 = work_pool_x(index(1),2) + beg_range_2 = work_pool_y(index(2),1) + end_range_2 = work_pool_y(index(2),2) + beg_range_3 = work_pool_z(index(3),1) + end_range_3 = work_pool_z(index(3),2) + +! write(6,1000) idthread, beg_range_1, end_range_1, & +! beg_range_2, end_range_2, & +! beg_range_3, end_range_3 +! call flush(6) +1000 format( 'set_thread_ranges_3D', 7(i4) ) + + end subroutine set_thread_ranges_3D + + subroutine set_thread_ranges_2D( work_pool_x, work_pool_y, beg_range_1, end_range_1, & + beg_range_2, end_range_2, idthread ) + + integer, intent (in ) :: work_pool_x(:,:) + integer, intent (in ) :: work_pool_y(:,:) + integer, intent (inout) :: beg_range_1 + integer, intent (inout) :: end_range_1 + integer, intent (inout) :: beg_range_2 + integer, intent (inout) :: end_range_2 + integer, intent (inout) :: idthread + + integer :: index(2) + integer :: i, j, ind, irange, jrange + + ind = 0 + + jrange = SIZE(work_pool_y,1) + irange = SIZE(work_pool_x,1) + do j = 1, jrange + do i = 1, irange + if( ind == idthread ) then + index(1) = i + index(2) = j + endif + ind = ind + 1 + enddo + enddo + beg_range_1 = work_pool_x(index(1),1) + end_range_1 = work_pool_x(index(1),2) + beg_range_2 = work_pool_y(index(2),1) + end_range_2 = work_pool_y(index(2),2) + +! write(6,1000) idthread, beg_range_1, end_range_1, & +! beg_range_2, end_range_2 +! call flush(6) + +1000 format( 'set_thread_ranges_2D', 7(i4) ) + + end subroutine set_thread_ranges_2D + + subroutine set_thread_ranges_1D( work_pool, beg_range, end_range, idthread ) + + integer, intent (in ) :: work_pool(:,:) + integer, intent (inout) :: beg_range + integer, intent (inout) :: end_range + integer, intent (inout) :: idthread + + integer :: index + integer :: i, j, ind, irange + + ind = 0 + + irange = SIZE(work_pool) + do i = 1, irange + if( ind == idthread ) then + index = i + endif + ind = ind + 1 + enddo + beg_range = work_pool(index,1) + end_range = work_pool(index,2) + +! write(6,1000) idthread, beg_range, end_range +! call flush(6) +1000 format( 'set_thread_ranges_1D', 7(i4) ) + + end subroutine set_thread_ranges_1D + + subroutine create_work_pool( start_domain, end_domain, ndomains, ipe, beg_index, end_index ) + + integer, intent(in) :: start_domain, end_domain + integer, intent(in) :: ndomains, ipe + integer, intent(out) ::beg_index, end_index + + integer :: beg(0:ndomains) + integer :: length + integer :: n + + length = end_domain - start_domain + 1 + beg(0) = start_domain + + do n=1,ndomains-1 + if (n.le.mod(length,ndomains)) then + beg(n)=beg(n-1)+(length-1)/ndomains+1 + else + beg(n)=beg(n-1)+length/ndomains + end if + end do + + beg(ndomains) = start_domain + length + + beg_index = beg(ipe) + end_index = beg(ipe+1) - 1 + + end subroutine create_work_pool + +end module hybrid_mod diff --git a/src/dynamics/se/dycore/hybvcoord_mod.F90 b/src/dynamics/se/dycore/hybvcoord_mod.F90 new file mode 100644 index 00000000..641a255e --- /dev/null +++ b/src/dynamics/se/dycore/hybvcoord_mod.F90 @@ -0,0 +1,28 @@ +module hybvcoord_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_logfile, only: iulog + use dimensions_mod, only: plev => nlev, plevp => nlevp + use physconst, only: pstd + + implicit none + private + + !----------------------------------------------------------------------- + ! hvcoord_t: Hybrid level definitions: p = a*p0 + b*ps + ! interfaces p(k) = hyai(k)*ps0 + hybi(k)*ps + ! midpoints p(k) = hyam(k)*ps0 + hybm(k)*ps + !----------------------------------------------------------------------- + type, public :: hvcoord_t + real(r8) ps0 ! base state surface-pressure for level definitions + real(r8) hyai(plevp) ! ps0 component of hybrid coordinate - interfaces + real(r8) hyam(plev) ! ps0 component of hybrid coordinate - midpoints + real(r8) hybi(plevp) ! ps component of hybrid coordinate - interfaces + real(r8) hybm(plev) ! ps component of hybrid coordinate - midpoints + real(r8) hybd(plev) ! difference in b (hybi) across layers + real(r8) prsfac ! log pressure extrapolation factor (time, space independent) + real(r8) etam(plev) ! eta-levels at midpoints + real(r8) etai(plevp) ! eta-levels at interfaces + integer nprlev ! number of pure pressure levels at top + integer pad + end type hvcoord_t +end module hybvcoord_mod diff --git a/src/dynamics/se/dycore/interpolate_mod.F90 b/src/dynamics/se/dycore/interpolate_mod.F90 new file mode 100644 index 00000000..10716fb3 --- /dev/null +++ b/src/dynamics/se/dycore/interpolate_mod.F90 @@ -0,0 +1,1828 @@ +module interpolate_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use element_mod, only: element_t + use dimensions_mod, only: np, ne, nelemd, nc, nhe, nhc + use quadrature_mod, only: quadrature_t, legendre, quad_norm + use coordinate_systems_mod, only: spherical_polar_t, cartesian2d_t, & + cartesian3D_t, sphere2cubedsphere, spherical_to_cart, & + cubedsphere2cart, distance, change_coordinates, projectpoint + use physconst, only: PI + use quadrature_mod, only: quadrature_t, gauss, gausslobatto + use parallel_mod, only: syncmp, parallel_t + use cam_abortutils, only: endrun + use spmd_utils, only: MPI_MAX, MPI_SUM, MPI_MIN, mpi_real8, MPI_integer + use cube_mod, only: convert_gbl_index, dmap, ref2sphere + use mesh_mod, only: MeshUseMeshFile + use control_mod, only: cubed_sphere_map + use cam_logfile, only: iulog + + implicit none + private + save + + logical :: debug=.false. + + type, public :: interpolate_t + real (kind=r8), dimension(:,:), pointer :: Imat ! P_k(xj)*wj/gamma(k) + real (kind=r8), dimension(:) , pointer :: rk ! 1/k + real (kind=r8), dimension(:) , pointer :: vtemp ! temp results + real (kind=r8), dimension(:) , pointer :: glp ! GLL pts (nair) + end type interpolate_t + + type, public :: interpdata_t + ! Output Interpolation points. Used to output data on lat-lon (or other grid) + ! with native element interpolation. Each element keeps a list of points from the + ! interpolation grid that are in this element + type (cartesian2D_t),pointer,dimension(:):: interp_xy ! element coordinate + integer, pointer,dimension(:) :: ilat,ilon ! position of interpolation point in lat-lon grid + integer :: n_interp + integer :: nlat + integer :: nlon + logical :: first_entry = .TRUE. + end type interpdata_t + + real (kind=r8), private :: delta = 1.0e-9_r8 ! move tiny bit off center to + ! avoid landing on element edges + + + ! static data for interp_tracers + logical :: interp_tracers_init=.false. + real (kind=r8 ) :: interp_c(np,np) + real (kind=r8 ) :: interp_gll(np) + + public :: interp_init + public :: setup_latlon_interp + public :: interpolate_scalar + public :: interpolate_ce + + public :: interpol_phys_latlon + public :: interpolate_vector + public :: set_interp_parameter + public :: get_interp_parameter + public :: get_interp_gweight + public :: get_interp_lat + public :: get_interp_lon + public :: cube_facepoint_ne + public :: cube_facepoint_unstructured + public :: parametric_coordinates + + public :: interpolate_tracers + public :: interpolate_tracers_init + public :: minmax_tracers + public :: interpolate_2d + public :: interpolate_create + public :: point_inside_quad + public :: vec_latlon_to_contra + + + interface interpolate_scalar + module procedure interpolate_scalar2d + module procedure interpolate_scalar3d + end interface + interface interpolate_vector + module procedure interpolate_vector2d + module procedure interpolate_vector3d + end interface + + type (interpolate_t), target :: interp_p + + ! store the lat-lon grid + ! gridtype = 1 equally spaced, including poles (FV scalars output grid) + ! gridtype = 2 Gauss grid (CAM Eulerian) + ! gridtype = 3 equally spaced, no poles (FV staggered velocity) + ! Seven possible history files, last one is inithist and should be native grid + integer :: nlat,nlon + real (kind=r8), pointer, public :: lat(:) => NULL() + real (kind=r8), pointer, public :: lon(:) => NULL() + real (kind=r8), pointer, public :: gweight(:) => NULL() + integer :: gridtype = 1 ! + integer :: itype = 1 ! 0 = native high order + ! 1 = bilinear + + integer :: auto_grid = 0 ! 0 = interpolation grid set by namelist + ! 1 = grid set via mesh resolution + + + ! static data, used by bilin_phys2gll() + ! shared by all threads. only allocate if subroutine will be used +!JMD integer :: nphys_init=0 +!JMD integer :: index_l(np),index_r(np) +!JMD real(kind=r8),allocatable :: weights(:,:,:,:,:) ! np,np,2,2,nelemd + +!JMD public :: bilin_phys2gll +!JMD public :: bilin_phys2gll_init +contains + + + subroutine set_interp_parameter(parm_name, value) + character*(*), intent(in) :: parm_name + character(len=80) :: msg + integer :: value,power + real (kind=r8) :: value_target + + if(parm_name .eq. 'itype') then + itype=value + else if(parm_name .eq. 'nlon') then + nlon=value + else if(parm_name .eq. 'nlat') then + nlat=value + else if(parm_name.eq. 'gridtype') then + gridtype=value + else if(parm_name.eq. 'auto') then + auto_grid=1 + ! compute recommended nlat,nlon which has slightly higher + ! resolution than the specifed number of points around equator given in "value" + ! computed recommended lat-lon grid. + ! nlon > peq peq = points around equator cubed sphere grid + ! take nlon power of 2, and at most 1 power of 3 + if (value.eq.0) then + ! If reading in unstructured mesh, ne = 0 + ! This makes it hard to guess how many interpolation points to use + ! So We'll set the default as 720 x 360 + ! BUT if you're running with an unstructured mesh, set interp_nlon and interp_nlat + nlon = 1536 + nlat = 768 + else + value_target=value*1.25_r8 + power = nint(0.5_r8 + log( value_target)/log(2.0_r8) ) + power = max(power,7) ! min grid: 64x128 + if ( 3*2**(power-2) > value_target) then + nlon=3*2**(power-2) ! use 1 power of 3 + else + nlon=2**power + endif + endif + nlat=nlon/2 + if (gridtype==1) nlat=nlat+1 + else + write(msg,*) 'Did not recognize parameter named ',parm_name,' in interpolate_mod:set_interp_parameter' + call endrun(msg) + end if + end subroutine set_interp_parameter + function get_interp_parameter(parm_name) result(value) + character*(*), intent(in) :: parm_name + integer :: value + character(len=80) :: msg + if(parm_name .eq. 'itype') then + value=itype + else if(parm_name .eq. 'nlon') then + value=nlon + else if(parm_name .eq. 'nlat') then + value=nlat + else if(parm_name.eq. 'gridtype') then + value=gridtype + else if(parm_name.eq. 'auto_grid') then + value=auto_grid + else + write(msg,*) 'Did not recognize parameter named ',parm_name,' in interpolate_mod:get_interp_parameter' + value=-1 + call endrun(msg) + end if + return + end function get_interp_parameter + function get_interp_gweight() result(gw) + real(kind=r8) :: gw(nlat) + gw=gweight + return + end function get_interp_gweight + function get_interp_lat() result(thislat) + real(kind=r8) :: thislat(nlat) + thislat=lat*180.0_r8/PI + return + end function get_interp_lat + function get_interp_lon() result(thislon) + real(kind=r8) :: thislon(nlon) + thislon=lon*180.0_r8/PI + return + end function get_interp_lon + + subroutine interpolate_create(gquad,interp) + type (quadrature_t) , intent(in) :: gquad + type (interpolate_t), intent(out) :: interp + + + ! Local variables + + integer k,j + integer npts + real (kind=r8), dimension(:), allocatable :: gamma + real (kind=r8), dimension(:), allocatable :: leg + + npts = size(gquad%points) + + allocate(interp%Imat(npts,npts)) + allocate(interp%rk(npts)) + allocate(interp%vtemp(npts)) + allocate(interp%glp(npts)) + allocate(gamma(npts)) + allocate(leg(npts)) + + gamma = quad_norm(gquad,npts) + + do k=1,npts + interp%rk(k) = 1.0_r8/k + interp%glp(k) = gquad%points(k) !nair + end do + + do j=1,npts + leg=legendre(gquad%points(j),npts-1) + do k=1,npts + interp%Imat(j,k)=leg(k)*gquad%weights(j)/gamma(k) + end do + end do + + deallocate(gamma) + deallocate(leg) + + end subroutine interpolate_create + + + subroutine interpolate_tracers_init() + use dimensions_mod, only : np, qsize + use quadrature_mod, only : quadrature_t, gausslobatto + + + implicit none + + type (quadrature_t ) :: gll + real (kind=r8 ) :: dp (np) + integer :: i,j + + gll=gausslobatto(np) + dp = 1 + do i=1,np + do j=1,np + if (i /= j) then + dp(i) = dp(i) * (gll%points(i) - gll%points(j)) + end if + end do + end do + do i=1,np + do j=1,np + interp_c(i,j) = 1/(dp(i)*dp(j)) + end do + end do + interp_gll(:) = gll%points(:) + interp_tracers_init = .true. + + deallocate(gll%points) + deallocate(gll%weights) + + + end subroutine interpolate_tracers_init + + + + + subroutine interpolate_tracers(r, tracers, f) + use dimensions_mod, only : np, qsize + + + implicit none + type (cartesian2D_t), intent(in) :: r + real (kind=r8),intent(in) :: tracers(np*np,qsize) + real (kind=r8),intent(out) :: f(qsize) + + real (kind=r8 ) :: x (np) + real (kind=r8 ) :: y (np) + real (kind=r8 ) :: xy (np*np) + + integer :: i,j + + + if (.not. interp_tracers_init ) then + call endrun('ERROR: interpolate_tracers() was not initialized') + endif + + x = 1 + y = 1 + do i=1,np + do j=1,np + if (i /= j) then + x(i) = x(i) * (r%x - interp_gll(j)) + y(i) = y(i) * (r%y - interp_gll(j)) + end if + end do + end do + + do j=1,np + do i=1,np + xy(i + (j-1)*np) = x(i)*y(j)*interp_c(i,j) + end do + end do + f = MATMUL(xy,tracers) + end subroutine interpolate_tracers + + + + subroutine linear_interpolate_2d(x,y,s,v) + use dimensions_mod, only : np, qsize + + real(kind=r8) , intent(in) :: x(np) + real(kind=r8), intent(in) :: y(np,np,qsize) + type (cartesian2D_t), intent(in) :: s + real(kind=r8), intent(inout) :: v(qsize) + + integer :: i,j,q + real (kind=r8) dx, dy(qsize), dydx(qsize) + real (kind=r8) y0(qsize), y1(qsize) + type (cartesian2D_t) :: r + + r = s + if (r%x < -1) r%x = -1 + if (r%y < -1) r%y = -1 + if ( 1 < r%x) r%x = 1 + if ( 1 < r%y) r%y = 1 + do i=1,np + if (r%x < x(i)) exit + end do + do j=1,np + if (r%y < x(j)) exit + end do + if (1 < i) i = i-1 + if (1 < j) j = j-1 + if (np==i) i = i-1 + if (np==j) j = j-1 + + dx = x(i+1) - x(i) + dy = y(i+1,j,:) - y(i,j,:) + dydx = dy/dx + y0 = y(i,j,:) + (r%x-x(i))*dydx + + dy = y(i+1,j+1,:) - y(i,j+1,:) + dydx = dy/dx + y1 = y(i,j+1,:) + (r%x-x(i))*dydx + + dx = x(j+1) - x(j) + dy = y1 - y0 + dydx = dy/dx + v = y0 + (r%y-x(j))*dydx + + end subroutine linear_interpolate_2d + + subroutine minmax_tracers(r, tracers, mint, maxt) + use dimensions_mod, only : np, qsize + use quadrature_mod, only : quadrature_t, gausslobatto + + + implicit none + + type (cartesian2D_t), intent(in) :: r + real (kind=r8),intent(in) :: tracers(np,np,qsize) + real (kind=r8),intent(out) :: mint (qsize) + real (kind=r8),intent(out) :: maxt (qsize) + + type (quadrature_t), save :: gll + integer :: i,j + logical , save :: first_time=.true. + real (kind=r8) :: y1 (qsize) + real (kind=r8) :: y2 (qsize) + real (kind=r8) :: q_interp (4,qsize) + type (cartesian2D_t) :: s + real (kind=r8) :: delta + integer :: q + + do q=1,qsize + mint(q) = minval(tracers(:,:,q)) + maxt(q) = maxval(tracers(:,:,q)) + enddo + return + + delta = 1._r8/8._r8 + + if (first_time) then + first_time = .false. + gll=gausslobatto(np) + end if + + do i=1,np + if (r%x < gll%points(i)) exit + end do + do j=1,np + if (r%y < gll%points(j)) exit + end do + if (1 < i) i = i-1 + if (1 < j) j = j-1 + if (np==i) i = i-1 + if (np==j) j = j-1 + +! mint(:) = minval(minval(tracers(i:i+1,j:j+1,:),1),1) +! maxt(:) = maxval(maxval(tracers(i:i+1,j:j+1,:),1),1) + +! Or check this out: + s = r + s%x = s%x - delta + s%y = s%y - delta + call linear_interpolate_2d(gll%points,tracers,s,q_interp(1,:)) + s = r + s%x = s%x + delta + s%y = s%y - delta + call linear_interpolate_2d(gll%points,tracers,s,q_interp(2,:)) + s = r + s%x = s%x - delta + s%y = s%y + delta + call linear_interpolate_2d(gll%points,tracers,s,q_interp(3,:)) + s = r + s%x = s%x + delta + s%y = s%y + delta + call linear_interpolate_2d(gll%points,tracers,s,q_interp(4,:)) + + mint(:) = minval(q_interp(:,:),1) + maxt(:) = maxval(q_interp(:,:),1) + end subroutine minmax_tracers + + function interpolate_2d(cart, f, interp, npts, fillvalue) result(fxy) + integer, intent(in) :: npts + type (cartesian2D_t), intent(in) :: cart + real (kind=r8), intent(in) :: f(npts,npts) + type (interpolate_t) :: interp + real (kind=r8) :: fxy ! value of f interpolated to (x,y) + real (kind=r8), intent(in), optional :: fillvalue + ! local variables + + real (kind=r8) :: tmp_1,tmp_2 + real (kind=r8) :: fk0,fk1 + real (kind=r8) :: pk + + integer :: l,j,k + + if(present(fillvalue)) then + if (any(f==fillvalue)) then + fxy = fillvalue + return + endif + endif + + + do l=1,npts,2 + + ! Compute Pk(cart%x) for Legendre order 0 + + pk = 1.0_r8 + + fk0=0.0_r8 + fk1=0.0_r8 + do j=1,npts + fk0 = fk0 + interp%Imat(j,1)*f(j,l ) + fk1 = fk1 + interp%Imat(j,1)*f(j,l+1) + end do + interp%vtemp(l ) = pk*fk0 + interp%vtemp(l+1) = pk*fk1 + + ! Compute Pk(cart%x) for Legendre order 1 + + tmp_2 = pk + pk = cart%x + + fk0=0.0_r8 + fk1=0.0_r8 + do j=1,npts + fk0 = fk0 + interp%Imat(j,2)*f(j,l ) + fk1 = fk1 + interp%Imat(j,2)*f(j,l+1) + end do + interp%vtemp(l ) = interp%vtemp(l ) + pk*fk0 + interp%vtemp(l+1) = interp%vtemp(l+1) + pk*fk1 + + ! Compute Pk(cart%x) for Legendre order 2 to npts-1 + + do k = 2,npts-1 + + tmp_1 = tmp_2 + tmp_2 = pk + pk = ( (2*k-1)*cart%x*tmp_2 - (k-1)*tmp_1 )*interp%rk(k) + + fk0=0.0_r8 + fk1=0.0_r8 + do j=1,npts + fk0 = fk0 + interp%Imat(j,k+1)*f(j,l ) + fk1 = fk1 + interp%Imat(j,k+1)*f(j,l+1) + end do + interp%vtemp(l ) = interp%vtemp(l ) + pk*fk0 + interp%vtemp(l+1) = interp%vtemp(l+1) + pk*fk1 + + end do + + end do + + ! Compute Pk(cart%y) for Legendre order 0 + + pk = 1.0_r8 + + fk0 = 0.0_r8 + do j=1,npts + fk0 = fk0 + interp%Imat(j,1)*interp%vtemp(j) + end do + fxy = pk*fk0 + + ! Compute Pk(cart%y) for Legendre order 1 + + tmp_2 = pk + pk = cart%y + + fk0=0.0_r8 + do j=1,npts + fk0 = fk0 + interp%Imat(j,2)*interp%vtemp(j) + end do + fxy = fxy + pk*fk0 + + ! Compute Pk(cart%y) for Legendre order 2, npts-1 + + do k = 2,npts-1 + tmp_1 = tmp_2 + tmp_2 = pk + pk = ( (2*k-1)*cart%y*tmp_2 - (k-1)*tmp_1 )*interp%rk(k) + + fk0 = 0.0_r8 + do j=1,npts + fk0 = fk0 + interp%Imat(j,k+1)*interp%vtemp(j) + end do + + fxy = fxy + pk*fk0 + + end do + + end function interpolate_2d + + !=============================== + !(Nair) Bilinear interpolation for every GLL grid cell + !=============================== + + function interpol_bilinear(cart, f, xoy, imin, imax, fillvalue) result(fxy) + integer, intent(in) :: imin,imax + type (cartesian2D_t), intent(in) :: cart + real (kind=r8), intent(in) :: f(imin:imax,imin:imax) + real (kind=r8) :: xoy(imin:imax) + real (kind=r8) :: fxy ! value of f interpolated to (x,y) + real (kind=r8), intent(in), optional :: fillvalue + ! local variables + + real (kind=r8) :: p,q,xp,yp ,y4(4) + integer :: l,j,k, ii, jj, na,nb,nm + + xp = cart%x + yp = cart%y + + ! Search index along "x" (bisection method) + + na = imin + nb = imax + do + if ((nb-na) <= 1) exit + nm = (nb + na)/2 + if (xp > xoy(nm)) then + na = nm + else + nb = nm + endif + enddo + ii = na + + ! Search index along "y" + + na = imin + nb = imax + do + if ((nb-na) <= 1) exit + nm = (nb + na)/2 + if (yp > xoy(nm)) then + na = nm + else + nb = nm + endif + enddo + jj = na + + ! GLL cell containing (xp,yp) + + y4(1) = f(ii,jj) + y4(2) = f(ii+1,jj) + y4(3) = f(ii+1,jj+1) + y4(4) = f(ii,jj+1) + + if(present(fillvalue)) then + if (any(y4==fillvalue)) then + fxy = fillvalue + return + endif + endif + + p = (xp - xoy(ii))/(xoy(ii+1) - xoy(ii)) + q = (yp - xoy(jj))/(xoy(jj+1) - xoy(jj)) + + fxy = (1.0_r8 - p)*(1.0_r8 - q)* y4(1) + p*(1.0_r8 - q) * y4(2) & + + p*q* y4(3) + (1.0_r8 - p)*q * y4(4) + end function interpol_bilinear + + ! ----------------------------------------------------------------------------------! + !FUNCTION interpol_phys_latlon----------------------------------------CE-for fvm! + ! AUTHOR: CHRISTOPH ERATH, 23. May 2012 ! + ! DESCRIPTION: evaluation of the reconstruction for every physics grid cell ! + ! ! + ! CALLS: + ! INPUT: + ! + ! OUTPUT: + !-----------------------------------------------------------------------------------! + subroutine interpol_phys_latlon(interpdata,f, fvm, corners, desc, flatlon,lmono) + use fvm_control_volume_mod, only : fvm_struct + ! use fvm_reconstruction_mod, only: reconstruction_gradient, recons_val_cart + use edgetype_mod, only : edgedescriptor_t + + type (interpdata_t), intent(in) :: interpdata + real (kind=r8), intent(inout) :: f(1-nhc:nc+nhc,1-nhc:nc+nhc) + type (fvm_struct), intent(in) :: fvm + type (cartesian2d_t), intent(in) :: corners(:) + type (edgedescriptor_t),intent(in) :: desc + logical, intent(in) :: lmono + + real (kind=r8) :: flatlon(:) + ! local variables + real (kind=r8) :: xp,yp, tmpval + real (kind=r8) :: tmpaxp,tmpaxm, tmpayp, tmpaym + integer :: i, ix, jy, starti,endi,tmpi + real (kind=r8), dimension(1-nhe:nc+nhe,1-nhe:nc+nhe,6) :: recons + + real (kind=r8), dimension(nc+1) :: x, y + + ! call reconstruction_gradient(f, fvm,recons,6,lmono) + ! recons=0.0 ! PCoM + + x(1:nc) = fvm%vtx_cart(1,1,1:nc,1 ) + y(1:nc) = fvm%vtx_cart(1,2,1 ,1:nc) + x(nc+1) = fvm%vtx_cart(2,1,nc,1 ) + y(nc+1) = fvm%vtx_cart(3,2,1 ,nc ) + + tmpaxp=(corners(1)%x+corners(2)%x)/2 + tmpaxm=(corners(2)%x-corners(1)%x)/2 + tmpayp=(corners(1)%y+corners(4)%y)/2 + tmpaym=(corners(4)%y-corners(1)%y)/2 + do i=1,interpdata%n_interp + ! caculation phys grid coordinate of xp point, note the interp_xy are on the reference [-1,1]x[-1,1] + xp=tan(tmpaxp+interpdata%interp_xy(i)%x*tmpaxm) + yp=tan(tmpayp+interpdata%interp_xy(i)%y*tmpaym) + + ! Search index along "x" (bisection method) + starti = 1 + endi = nc+1 + do + if ((endi-starti) <= 1) exit + tmpi = (endi + starti)/2 + if (xp > x(tmpi)) then + starti = tmpi + else + endi = tmpi + endif + enddo + ix = starti + + ! Search index along "y" + starti = 1 + endi = nc+1 + do + if ((endi-starti) <= 1) exit + tmpi = (endi + starti)/2 + if (yp > y(tmpi)) then + starti = tmpi + else + endi = tmpi + endif + enddo + jy = starti + + ! call recons_val_cart(f(ix,jy), xp,yp, fvm%spherecentroid(ix,jy,:), fvm%recons_metrics(ix,jy,:), & + ! recons(ix,jy,:), tmpval) + tmpval=f(ix,jy) + flatlon(i)=tmpval + !phl PCoM + ! flatlon(i)=f(ix,jy) + end do + end subroutine interpol_phys_latlon + + function parametric_coordinates(sphere, corners3D,ref_map_in, corners,u2qmap,facenum) result (ref) + implicit none + type (spherical_polar_t), intent(in) :: sphere + type (cartesian2D_t) :: ref + + type (cartesian3D_t) :: corners3D(4) !x,y,z coords of element corners + integer,optional :: ref_map_in ! default is global variable 'cubed_sphere_map' + ! optional arguments, only needed for ref_map=1 (equi-angle gnomonic projection): + type (cartesian2D_t),optional :: corners(4) ! gnomonic coords of element corners + real (kind=r8),optional :: u2qmap(4,2) + integer,optional :: facenum + + + ! local + integer :: i, MAX_NR_ITER=10 + real(kind=r8) :: D(2,2),Dinv(2,2),detD,a,b,resa,resb,dela,delb,costh + real(kind=r8) :: tol_sq = 1.0e-26_r8 + type (spherical_polar_t) :: sphere1, sphere_tmp + integer :: ref_map + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! newton iteration on: ref=ref - df^-1 (ref2sphere(ref) - sphere) + ! + ! Generic version written in terms of HOMME's 'ref2sphere' and 'Dmap' operaters, + ! with no assumption as to the type of map (gnomonic, equi-angular, parametric) + ! + ! Note that the coordinate increment from newton iterations is not a direction and thus + ! should not be converted into motion along a great circle arc - this routine + ! correclty applies the increment by just adding it to the coordintes + ! + ! f = ref2sphere(xvec) - sphere + ! df = d(ref2sphere) + ! + ! D = diag(cos(theta),1) * d(ref2sphere) d(ref2sphere) = diag(1/cos(theta),1)*D + ! + ! df = diag(1/cos(theta),1)*D + ! df^-1 = D^-1 * diag(cos(theta),1) + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + if (present(ref_map_in)) then + ref_map=ref_map_in + else + ref_map=cubed_sphere_map + endif + costh=cos(sphere%lat) + a=0 + b=0 + i=0 + do + sphere1 = ref2sphere(a,b,corners3D,ref_map,corners,facenum) + resa = sphere1%lon - sphere%lon + if (resa > pi) resa= resa - 2*pi + if (resa < -pi) resa= resa + 2*pi + + resb = sphere1%lat - sphere%lat + + call Dmap(D,a,b,corners3D,ref_map,corners,u2qmap,facenum) + detD = D(1,1)*D(2,2) - D(1,2)*D(2,1) + Dinv(1,1) = D(2,2)/detD + Dinv(1,2) = -D(1,2)/detD + Dinv(2,1) = -D(2,1)/detD + Dinv(2,2) = D(1,1)/detD + + dela = Dinv(1,1)*costh*resa + Dinv(1,2)*resb + delb = Dinv(2,1)*costh*resa + Dinv(2,2)*resb + a = a - dela + b = b - delb + i=i+1 + if ( (costh*resa)**2 + resb**2 < tol_sq .or. MAX_NR_ITER < i) exit + end do + ref%x=a + ref%y=b + + end function parametric_coordinates + + + + +! +! find element containing given point, useing HOMME's standard +! equi-angular gnomonic map. +! note that with this map, only coordinate lines are great circle arcs +! + function point_inside_equiangular(elem, sphere, sphere_xyz) result(inside) + implicit none + type (spherical_polar_t), intent(in) :: sphere + type (cartesian3D_t), intent(in) :: sphere_xyz + type (element_t) , intent(in) :: elem + logical :: inside, inside2 + integer :: i,j + type (cartesian2D_t) :: corners(4),sphere_xy,cart + type (cartesian3D_t) :: corners_xyz(4),center,a,b,cross(4) + real (kind=r8) :: yp(4), y, elem_diam,dotprod + real (kind=r8) :: xp(4), x, xc,yc + real (kind=r8) :: tol_inside + real (kind=r8) :: d1,d2 + + type (spherical_polar_t) :: sphere_tmp + + inside = .false. + + + ! first check if point is near the element: + corners_xyz(:) = elem%corners3D(:) + elem_diam = max( distance(corners_xyz(1),corners_xyz(3)), & + distance(corners_xyz(2),corners_xyz(4)) ) + + center%x = sum(corners_xyz(1:4)%x)/4 + center%y = sum(corners_xyz(1:4)%y)/4 + center%z = sum(corners_xyz(1:4)%z)/4 + if ( distance(center,sphere_xyz) > 1.0_r8*elem_diam ) return + + tol_inside = 1.0e-10_r8*elem_diam**2 + ! the point is close to the element, so project both to cubed sphere + ! and perform contour integral + sphere_xy=sphere2cubedsphere(sphere,elem%FaceNum) + x = sphere_xy%x + y = sphere_xy%y + do i=1,4 + xp(i) = elem%corners(i)%x + yp(i) = elem%corners(i)%y + end do + + + if (debug) then + print *,'point: ',x,y,elem%FaceNum + print *,'element:' + write(*,'(a,4e16.8,a)') 'x=[',xp(1:4),']' + write(*,'(a,4e16.8,a)') 'y=[',yp(1:4),']' + + ! first check if centroid is in this element (sanity check) + sphere_tmp=change_coordinates(center) + sphere_xy=sphere2cubedsphere(sphere_tmp,elem%FaceNum) + xc=sphere_xy%x + yc=sphere_xy%y + print *,'cross product with centroid: all numbers should be negative' + j = 4 + do i=1,4 + print *,i,(xc-xp(j))*(yp(i)-yp(j)) - (yc-yp(j))*(xp(i)-xp(j)) + j = i ! within this loopk j = i-1 + end do + + print *,'cross product with search point' + j = 4 + do i=1,4 + print *,i,(x-xp(j))*(yp(i)-yp(j)) - (y-yp(j))*(xp(i)-xp(j)) + j = i ! within this loopk j = i-1 + end do + endif + + + j = 4 + do i=1,4 + ! a = x-xp(j), y-yp(j) + ! b = xp(i)-xp(j), yp(i)-yp(j) + ! compute a cross b: + if ( -( (x-xp(j))*(yp(i)-yp(j)) - (y-yp(j))*(xp(i)-xp(j))) > tol_inside ) then + return + endif + j = i ! within this loopk j = i-1 + end do + ! all cross products were negative, must be inside: + inside=.true. + end function point_inside_equiangular + + +! +! find if quad contains given point, with quad edges assumed to be great circle arcs +! this will work with any map where straight lines are mapped to great circle arcs. +! (thus it will fail on unstructured grids using the equi-angular gnomonic map) +! + function point_inside_quad(corners_xyz, sphere_xyz) result(inside) + implicit none + type (cartesian3D_t), intent(in) :: sphere_xyz + type (cartesian3D_t) , intent(in) :: corners_xyz(4) + logical :: inside, inside2 + integer :: i,j,ii + type (cartesian2D_t) :: corners(4),sphere_xy,cart + type (cartesian3D_t) :: center,a,b,cross(4) + real (kind=r8) :: yp(4), y, elem_diam,dotprod + real (kind=r8) :: xp(4), x + real (kind=r8) :: d1,d2, tol_inside = 1.0e-12_r8 + + type (spherical_polar_t) :: sphere ! debug + + inside = .false. + + ! first check if point is near the corners: + elem_diam = max( distance(corners_xyz(1),corners_xyz(3)), & + distance(corners_xyz(2),corners_xyz(4)) ) + + center%x = sum(corners_xyz(1:4)%x)/4 + center%y = sum(corners_xyz(1:4)%y)/4 + center%z = sum(corners_xyz(1:4)%z)/4 + if ( distance(center,sphere_xyz) > 1.0_r8*elem_diam ) return + + j = 4 + do i=1,4 + ! outward normal to plane containing j->i edge: corner(i) x corner(j) + ! sphere dot (corner(i) x corner(j) ) = negative if inside + cross(i)%x = corners_xyz(i)%y*corners_xyz(j)%z - corners_xyz(i)%z*corners_xyz(j)%y + cross(i)%y =-(corners_xyz(i)%x*corners_xyz(j)%z - corners_xyz(i)%z*corners_xyz(j)%x) + cross(i)%z = corners_xyz(i)%x*corners_xyz(j)%y - corners_xyz(i)%y*corners_xyz(j)%x + dotprod = cross(i)%x*sphere_xyz%x + cross(i)%y*sphere_xyz%y +& + cross(i)%z*sphere_xyz%z + j = i ! within this loopk j = i-1 + + ! dot product is proportional to elem_diam. positive means outside, + ! but allow machine precision tolorence: + if (dotprod > tol_inside*elem_diam) return + !if (dotprod > 0) return + end do + inside=.true. + return + end function point_inside_quad + +! +! find element containing given point, with element edges assumed to be great circle arcs +! this will work with any map where straight lines are mapped to great circle arcs. +! (thus it will fail on unstructured grids using the equi-angular gnomonic map) +! + function point_inside_gc(elem, sphere_xyz) result(inside) + implicit none + type (cartesian3D_t), intent(in) :: sphere_xyz + type (element_t) , intent(in) :: elem + logical :: inside, inside2 + integer :: i,j,ii + type (cartesian2D_t) :: corners(4),sphere_xy,cart + type (cartesian3D_t) :: corners_xyz(4),center,a,b,cross(4) + real (kind=r8) :: yp(4), y, elem_diam,dotprod + real (kind=r8) :: xp(4), x + real (kind=r8) :: d1,d2, tol_inside = 1.0e-12_r8 + + type (spherical_polar_t) :: sphere ! debug + + inside = .false. + + ! first check if point is near the element: + corners_xyz(:) = elem%corners3D(:) + elem_diam = max( distance(corners_xyz(1),corners_xyz(3)), & + distance(corners_xyz(2),corners_xyz(4)) ) + + center%x = sum(corners_xyz(1:4)%x)/4 + center%y = sum(corners_xyz(1:4)%y)/4 + center%z = sum(corners_xyz(1:4)%z)/4 + if ( distance(center,sphere_xyz) > 1.0_r8*elem_diam ) return + + j = 4 + do i=1,4 + ! outward normal to plane containing j->i edge: corner(i) x corner(j) + ! sphere dot (corner(i) x corner(j) ) = negative if inside + cross(i)%x = corners_xyz(i)%y*corners_xyz(j)%z - corners_xyz(i)%z*corners_xyz(j)%y + cross(i)%y =-(corners_xyz(i)%x*corners_xyz(j)%z - corners_xyz(i)%z*corners_xyz(j)%x) + cross(i)%z = corners_xyz(i)%x*corners_xyz(j)%y - corners_xyz(i)%y*corners_xyz(j)%x + dotprod = cross(i)%x*sphere_xyz%x + cross(i)%y*sphere_xyz%y +& + cross(i)%z*sphere_xyz%z + j = i ! within this loopk j = i-1 + + !if (dotprod>0 .and. dotprod/elem_diam < 1e-5) print *,dotprod/elem_diam + + ! dot product is proportional to elem_diam. positive means outside, + ! but allow machine precision tolorence: + if (dotprod > tol_inside*elem_diam) return + !if (dotprod > 0) return + end do + inside=.true. + return + end function point_inside_gc + + + !================================================ + ! (Nair) Cube face index and local coordinates + !================================================ + + subroutine cube_facepoint_ne(sphere, ne, cart, number) + use coordinate_systems_mod, only : cube_face_number_from_sphere, sphere2cubedsphere + + type(spherical_polar_t), intent(in) :: sphere + integer, intent(in) :: ne + type(cartesian2D_t), intent(out) :: cart + integer, intent(out) :: number + + real(kind=r8) :: xp, yp + type(cartesian2D_t) :: cube + integer :: ie, je, face_no + real(kind=r8) :: x1, x2 + real(kind=r8) :: dx + + face_no = cube_face_number_from_sphere(sphere) + cube = sphere2cubedsphere(sphere, face_no) + xp = cube%x + yp = cube%y + + ! MNL: for uniform grids (on cube face), analytic solution is fine + x1 = xp + 0.25_r8*PI + x2 = yp + 0.25_r8*PI + + dx = (0.5_r8*PI)/ne + ie = INT(ABS(x1)/dx) + je = INT(ABS(x2)/dx) + ! if we are exactly on an element edge, we can put the point in + ! either the ie or ie+1 element, EXCEPT if ie==ne. + if ( ABS(x1) < ne*dx ) then + ie = ie + 1 + end if + if ( ABS(x2) < ne*dx ) then + je = je + 1 + end if + if ((ie > ne) .or. (je > ne)) then + write(iulog, *) 'ERROR: ',ie,je,ne + write(iulog, *) 'lat,lon=',sphere%lat,sphere%lon + write(iulog, *) 'face no=',face_no + write(iulog, *) x1,x2,x1/dx,x2/dx + call endrun('interpolate_mod: bad argument') + endif + + ! bug fix MT 1/2009. This was creating a plotting error at + ! the row of elements in iface=2 at 50 degrees (NE=16 128x256 lat/lon grid) + ! For point on element edge, we can have ie=2, but x1=dx + ! but if ie>1, we must execute this statement. + ! The only time we can skip this statement is if ie=1, but then + ! the statement has no effect, so lets never skip it: + ! if (x1 > dx ) then + x1 = x1 - dble(ie-1)*dx + ! endif + + x1 = 2.0_r8*(x1/dx)-1.0_r8 + + ! if (x2 > dx ) then ! removed MT 1/2009, see above + x2 = x2 - dble(je-1)*dx + ! endif + + x2 = 2.0_r8*(x2/dx)-1.0_r8 + + ! coordinates within an element [-1,1] + cart%x = x1 + cart%y = x2 + number = ie + (je-1)*ne + (face_no-1)*ne*ne + end subroutine cube_facepoint_ne + !================================================ + ! (Nair) Cube face index and local coordinates + !================================================ + + + subroutine cube_facepoint_unstructured(sphere,cart, number, elem) + use coordinate_systems_mod, only : cube_face_number_from_sphere, & + sphere2cubedsphere,change_coordinates,cube_face_number_from_cart + implicit none + + type (element_t) , intent(in), target :: elem(:) + type (spherical_polar_t), intent (in) :: sphere + type (cartesian2D_t), intent(out) :: cart + integer , intent(out) :: number + + integer :: ii + Logical :: found + type (cartesian3D_t) :: sphere_xyz + type (cartesian2D_t) :: cube + sphere_xyz=spherical_to_cart(sphere) + + number=-1 +! print *,'WARNING: using GC map' + do ii = 1,nelemd + ! for equiangular gnomonic map: + ! unstructed grid element edges are NOT great circles + if (cubed_sphere_map==0) then + found = point_inside_equiangular(elem(ii), sphere, sphere_xyz) + else + ! assume element edges are great circle arcs: + found = point_inside_gc(elem(ii), sphere_xyz) + endif + + if (found) then + number = ii + cart = parametric_coordinates(sphere, elem(ii)%corners3D,& + cubed_sphere_map,elem(ii)%corners,elem(ii)%u2qmap,elem(ii)%facenum) + exit + end if + end do + end subroutine cube_facepoint_unstructured + + + subroutine interp_init() + type (quadrature_t) :: gp + + gp = gausslobatto(np) + call interpolate_create(gp,interp_p) + end subroutine interp_init + + + subroutine setup_latlon_interp(elem,interpdata,par) + ! + ! initialize interpolation data structures to interpolate to a lat-lon grid + ! + ! + + implicit none + type (element_t) , intent(in), target :: elem(:) + type (parallel_t) , intent(in) :: par + type (interpdata_t) , intent(out) :: interpdata(:) + + ! local + integer i,j,ii,count_total,n_interp,count_max + integer ngrid, number, elem_num, plat + integer countx, missing_pts,ierr + integer :: npts_mult_claims,max_claims + + real (kind=r8) :: dp,latdeg(nlat+1),clat(nlat+1),w(nlat+1),w_staggered(nlat) + real (kind=r8) :: clat_staggered(nlat),latdeg_st(nlat),err,err2 + + type (spherical_polar_t) :: sphere + type (cartesian2D_t) :: cart + type (cartesian3D_t) :: sphere_xyz,sphere2_xyz + + type (quadrature_t) :: gp + + + ! Array to make sure each interp point is on exactly one process + type (cartesian2D_t),allocatable :: cart_vec(:,:) + integer :: k + integer, allocatable :: global_elem_gid(:,:),local_elem_gid(:,:), local_elem_num(:,:) + + ! these arrays often are too large for stack, so lets make sure + ! they go on the heap: + allocate(local_elem_num(nlat,nlon)) + allocate(local_elem_gid(nlat,nlon)) + allocate(global_elem_gid(nlat,nlon)) + allocate(cart_vec(nlat,nlon)) + + if (par%masterproc) then + write(iulog,'(a,i4,a,i4,a)') 'Initializing ',nlat,' x ',nlon,' lat-lon interpolation grid: ' + endif + + do ii=1,nelemd + interpdata(ii)%n_interp=0 ! reset counter + enddo + + if (associated(lat))then + deallocate(lat) + nullify(lat) + endif + if (associated(gweight))then + deallocate(gweight) + nullify(gweight) + endif + + if (associated(lon))then + deallocate(lon) + nullify(lon) + endif + + allocate(lat(nlat)) + allocate(gweight(nlat)) + allocate(lon(nlon)) + call interp_init() + gweight=0 + do i=1,nlon + lon(i)=2*pi*(i-1)/nlon + enddo + if (gridtype==1) then + do j=1,nlat + lat(j) = -pi/2 + pi*(j-1)/(nlat-1) + end do + plat=nlat + endif + if (gridtype==2) then + gp=gauss(nlat) + do j=1,nlat + lat(j) = asin(gp%points(j)) + gweight(j) = gp%weights(j) + end do + endif + if (gridtype==3) then + do j=1,nlat + lat(j) = -pi/2 + pi*(j-.5_r8)/nlat + end do + plat=nlat+1 + endif + + if (gridtype==1 .or. gridtype==3) then + ! gridtype=1 plat=nlat gweight(1:nlat)=w(1:plat) + ! gridtype=3 plat=nlat+1 gweight(1:nlat)=w_staggered(1:plat-1) + + ! L-R dynamics uses a regular latitude distribution (not gausian). + ! The algorithm below is a bastardized version of LSM: map.F. + dp = 180.0_r8/(plat-1) + do j = 1, plat + latdeg(j) = -90.0_r8 + (j-1)*dp + clat(j) = latdeg(j)*pi/180.0_r8 + end do + + ! Calculate latitudes for the staggered grid + + do j = 1, plat-1 + clat_staggered(j) = (clat(j) + clat(j+1)) / 2 + latdeg_st (j) = clat_staggered(j)*180.0_r8/pi + end do + + ! Weights are defined as cos(phi)*(delta-phi) + ! For a sanity check, the sum of w across all lats should be 2, or 1 across + ! half of the latitudes. + + do j = 2, plat-1 + w(j) = sin(clat_staggered(j)) - sin(clat_staggered(j-1)) + end do + w(1) = sin(clat_staggered(1)) + 1 + w(plat) = w(1) + + ! with nlat=2048, this error was 4e-16 + if (abs(sum(w(1:plat)) - 2) > 1.0e-8_r8) then + write(iulog,*) 'interpolate_mod: w weights do not sum to 2. sum=',sum(w(1:plat)) + call endrun('interpolate_mod: weights do not sum to 2.') + end if + + dp = pi / (plat-1) + do j = 1, plat-1 + w_staggered(j) = sin(clat(j+1)) - sin(clat(j)) + end do + + + if (abs(sum(w_staggered(1:plat-1)) - 2) > 1.0e-8_r8) then + write(iulog,*) 'interpolate_mod: staggered weights do not sum to 2. sum=',sum(w_staggered(1:plat-1)) + call endrun('interpolate_mod: weights do not sum to 2.') + end if + + if (gridtype==1) then + gweight(1:nlat)=w(1:plat) + endif + if (gridtype==3) then + gweight(1:nlat)=w_staggered(1:plat-1) + endif + endif + + + ! go through once, counting the number of points on each element + sphere%r=1 + local_elem_num = -1 + local_elem_gid = -1 + global_elem_gid = -1 + err=0 + do j=1,nlat + do i=1,nlon + sphere%lat=lat(j) + sphere%lon=lon(i) + + number = -1 + if ( (cubed_sphere_map /= 0) .or. MeshUseMeshFile) then + call cube_facepoint_unstructured(sphere, cart, number, elem) + if (number /= -1) then + ! If points are outside element but within tolerance, move to boundary + if (cart%x + 1.0_r8.le.0.0_r8) cart%x = -1.0_r8 + if (cart%x - 1.0_r8.ge.0.0_r8) cart%x = 1.0_r8 + if (cart%y + 1.0_r8.le.0.0_r8) cart%y = -1.0_r8 + if (cart%y - 1.0_r8.ge.0.0_r8) cart%y = 1.0_r8 + + local_elem_num(j,i) = number + local_elem_gid(j,i) = elem(number)%vertex%number + cart_vec(j,i) = cart ! local element coordiante of interpolation point + endif + else + call cube_facepoint_ne(sphere, ne, cart, number) + ! the sphere point belongs to the element number on face = face_no. + ! do I own this element? + if (number /= -1) then + do ii=1,nelemd + if (number == elem(ii)%vertex%number) then + local_elem_gid(j,i) = number + local_elem_num(j,i) = ii + cart_vec(j,i) = cart ! local element coordinate found above + exit + endif + enddo + endif + endif + ii=local_elem_num(j,i) + if (ii /= -1) then + ! compute error: map 'cart' back to sphere and compare with original + ! interpolation point: + sphere2_xyz = spherical_to_cart( ref2sphere(cart%x,cart%y, & + elem(ii)%corners3D,cubed_sphere_map,elem(ii)%corners,elem(ii)%facenum )) + sphere_xyz = spherical_to_cart(sphere) + err=max(err,distance(sphere2_xyz,sphere_xyz)) + endif + enddo + if (par%masterproc) then + if ((MOD(j,64).eq.1).or.(j.eq.nlat)) then + print *,'finished latitude ',j,' of ',nlat + endif + endif + enddo + err2=err + call MPI_Allreduce(err, err2, 1, MPI_real8, MPI_MAX, par%comm, ierr) + if (par%masterproc) then + write(iulog,'(a,e12.4)') 'Max interpolation point search error: ',err2 + endif + + ! if multile elements claim a interpolation point, take the one with largest gid: + global_elem_gid = local_elem_gid + call MPI_Allreduce(local_elem_gid, global_elem_gid, nlat*nlon, MPI_integer, MPI_MAX, par%comm,ierr) + + missing_pts=0 + do j=1,nlat + do i=1,nlon + if (global_elem_gid(j,i) == -1 ) then + missing_pts = missing_pts + 1 + if (par%masterproc) & + print *,'Error: point not claimed by any element j,i,lat(j),lon(i)=',j,i,lat(j),lon(i) + else if (local_elem_gid(j,i) == global_elem_gid(j,i) ) then + ii = local_elem_num(j,i) + interpdata(ii)%n_interp = interpdata(ii)%n_interp + 1 + endif + end do + end do + + countx=maxval(interpdata(1:nelemd)%n_interp) + count_max = countx + call MPI_Allreduce(countx,count_max,1,MPI_integer,MPI_MAX,par%comm,ierr) + + if (par%masterproc) then + write(iulog,'(a,i6)') 'Maximum number of interpolation points claimed by an element: ',count_max + endif + + ! allocate storage + do ii=1,nelemd + ngrid = interpdata(ii)%n_interp + if(interpdata(ii)%first_entry)then + NULLIFY(interpdata(ii)%interp_xy) + NULLIFY(interpdata(ii)%ilat) + NULLIFY(interpdata(ii)%ilon) + + interpdata(ii)%first_entry=.FALSE. + endif + if(associated(interpdata(ii)%interp_xy))then + if(size(interpdata(ii)%interp_xy)>0)deallocate(interpdata(ii)%interp_xy) + endif + if(associated(interpdata(ii)%ilat))then + if(size(interpdata(ii)%ilat)>0)deallocate(interpdata(ii)%ilat) + endif + + if (associated(interpdata(ii)%ilon))then + if(size(interpdata(ii)%ilon)>0)deallocate(interpdata(ii)%ilon) + endif + allocate(interpdata(ii)%interp_xy( ngrid ) ) + allocate(interpdata(ii)%ilat( ngrid ) ) + allocate(interpdata(ii)%ilon( ngrid ) ) + interpdata(ii)%n_interp=0 ! reset counter + enddo + do j=1,nlat + do i=1,nlon + if (local_elem_gid(j,i) == global_elem_gid(j,i) .and. & + local_elem_gid(j,i) /= -1 ) then + ii = local_elem_num(j,i) + ngrid = interpdata(ii)%n_interp + 1 + interpdata(ii)%n_interp = ngrid + interpdata(ii)%interp_xy( ngrid ) = cart_vec(j,i) + interpdata(ii)%ilon( ngrid ) = i + interpdata(ii)%ilat( ngrid ) = j + endif + enddo + enddo + + ! now lets compute the number of points that were claimed by + ! more than one element: + do j=1,nlat + do i=1,nlon + if (local_elem_gid(j,i) == -1) then + local_elem_gid(j,i)=0 + else + local_elem_gid(j,i)=1 + endif + enddo + enddo + global_elem_gid = local_elem_gid + call MPI_Allreduce(local_elem_gid, global_elem_gid, nlat*nlon, MPI_integer, MPI_SUM, par%comm,ierr) + if (par%masterproc) then + countx=0 + do j=1,nlat + do i=1,nlon + if (global_elem_gid(j,i)>1) countx=countx+1 + enddo + enddo + npts_mult_claims=countx + max_claims=maxval(global_elem_gid) + endif + + if (par%masterproc) then + print *,'Number of interpolation points claimed by more than one element: ',npts_mult_claims + print *,'max number of elements which claimed the same interpolation point:',max_claims + endif + + deallocate(global_elem_gid) + deallocate(local_elem_num) + deallocate(local_elem_gid) + deallocate(cart_vec) + + ! check if every point in interpolation grid was claimed by an element: + if (missing_pts>0) then + count_total = nlat*nlon + if(par%masterproc) then + write(iulog,"(3A,I4,A,I7,a,i5)")"Error:",__FILE__," ",__LINE__," count_total:",count_total," missing:",missing_pts + end if + call syncmp(par) + call endrun('Error: interpolation points not claimed by any element') + endif + + + end subroutine setup_latlon_interp + + + +! interpolate_scalar +! +! Interpolate a scalar field given in an element (fld_cube) to the points in +! interpdata%interp_xy(i), i=1 .. interpdata%n_interp. +! +! Note that it is possible the given element contains none of the interpolation points +! ======================================= +subroutine interpolate_ce(cart,fld_cube,npts,fld, fillvalue) + type (cartesian2D_t) :: cart + integer :: npts + real (kind=r8) :: fld_cube(npts,npts) ! cube field + real (kind=r8) :: fld ! field at new grid lat,lon coordinates + real (kind=r8), intent(in), optional :: fillvalue + ! Local variables + type (interpolate_t), pointer :: interp ! interpolation structure + + integer :: ne + integer :: i + + if (npts==np) then + interp => interp_p + else + call endrun('Error in interpolate_scalar(): must be called with p or v grid data') + endif + + fld=interpolate_2d(cart,fld_cube,interp,npts,fillvalue) + +end subroutine interpolate_ce + + + + ! ======================================= + ! interpolate_scalar + ! + ! Interpolate a scalar field given in an element (fld_cube) to the points in + ! interpdata%interp_xy(i), i=1 .. interpdata%n_interp. + ! + ! Note that it is possible the given element contains none of the interpolation points + ! ======================================= + subroutine interpolate_scalar2d(interpdata,fld_cube,nsize,nhalo,fld, fillvalue) + use dimensions_mod, only: npsq, fv_nphys,nc + integer, intent(in) :: nsize,nhalo + real (kind=r8), intent(in) :: fld_cube(1-nhalo:nsize+nhalo,1-nhalo:nsize+nhalo) ! cube field + real (kind=r8), intent(out):: fld(:) ! field at new grid lat,lon coordinates + type (interpdata_t), intent(in) :: interpdata + real (kind=r8), intent(in), optional :: fillvalue + ! Local variables + type (interpolate_t), pointer :: interp ! interpolation structure + + integer :: i,imin,imax,ne + real (kind=r8):: xoy(1-nhalo:nsize+nhalo),dx + type (cartesian2D_t) :: cart + + if (nsize==np.and.nhalo==0) then + ! + ! GLL grid + ! + interp => interp_p + xoy = interp%glp(:) + imin = 1 + imax = np + else if (nhalo>0.and.(nsize==fv_nphys.or.nsize==nc)) then + ! + ! finite-volume grid + ! + if (itype.ne.1) then + call endrun('itype must be 1 for latlon output from finite-volume (non-GLL) grids') + end if + imin = 1-nhalo + imax = nsize+nhalo + ! + ! create normalized coordinates + ! + dx = 2.0_r8/REAL(nsize,KIND=r8) + do i=imin,imax + xoy(i) = -1.0_r8+(i-0.5_r8)*dx + end do + else + call endrun('interpolate_scalar2d: resolution not supported') + endif + + ! Choice for Native (high-order) or Bilinear interpolations + if(present(fillvalue)) then + if (itype == 0) then + do i=1,interpdata%n_interp + fld(i)=interpolate_2d(interpdata%interp_xy(i),fld_cube,interp,nsize,fillvalue) + end do + elseif (itype == 1) then + do i=1,interpdata%n_interp + fld(i)=interpol_bilinear(interpdata%interp_xy(i),fld_cube,xoy,imin,imax,fillvalue) + end do + end if + else + if (itype == 0) then + do i=1,interpdata%n_interp + fld(i)=interpolate_2d(interpdata%interp_xy(i),fld_cube,interp,nsize) + end do + elseif (itype == 1) then + do i=1,interpdata%n_interp + fld(i)=interpol_bilinear(interpdata%interp_xy(i),fld_cube,xoy,imin,imax) + end do + end if + endif + + + end subroutine interpolate_scalar2d + subroutine interpolate_scalar3d(interpdata,fld_cube,nsize,nhalo,nlev,fld, fillvalue) + use dimensions_mod, only: npsq, fv_nphys,nc + integer , intent(in) :: nsize, nhalo, nlev + real (kind=r8), intent(in) :: fld_cube(1-nhalo:nsize+nhalo,1-nhalo:nsize+nhalo,nlev) ! cube field + real (kind=r8), intent(out) :: fld(:,:) ! field at new grid lat,lon coordinates + type (interpdata_t), intent(in) :: interpdata + real (kind=r8), intent(in), optional :: fillvalue + ! Local variables + type (interpolate_t), pointer :: interp ! interpolation structure + + integer :: ne + + integer :: i, k, imin, imax + real (kind=r8) :: xoy(1-nhalo:nsize+nhalo),dx + + type (cartesian2D_t) :: cart + + if (nsize==np.and.nhalo==0) then + ! + ! GLL grid + ! + interp => interp_p + xoy = interp%glp(:) + imin = 1 + imax = np + else if (nhalo>0.and.(nsize==fv_nphys.or.nsize==nc)) then + ! + ! finite-volume grid + ! + if (itype.ne.1) then + call endrun('itype must be 1 for latlon output from finite-volume (non-GLL) grids') + end if + imin = 1-nhalo + imax = nsize+nhalo + ! + ! create normalized coordinates + ! + dx = 2.0_r8/REAL(nsize,KIND=r8) + do i=imin,imax + xoy(i) = -1.0_r8+(i-0.5_r8)*dx + end do + else + call endrun('interpolate_scalar3d: resolution not supported') + endif + + ! Choice for Native (high-order) or Bilinear interpolations + if(present(fillvalue)) then + if (itype == 0) then + do k=1,nlev + do i=1,interpdata%n_interp + fld(i,k)=interpolate_2d(interpdata%interp_xy(i),fld_cube(:,:,k),interp,nsize,fillvalue) + end do + end do + elseif (itype == 1) then + do k=1,nlev + do i=1,interpdata%n_interp + fld(i,k)=interpol_bilinear(interpdata%interp_xy(i),fld_cube(:,:,k),xoy,imin,imax,fillvalue) + end do + end do + endif + else + if (itype == 0) then + do k=1,nlev + do i=1,interpdata%n_interp + fld(i,k)=interpolate_2d(interpdata%interp_xy(i),fld_cube(:,:,k),interp,nsize) + end do + end do + elseif (itype == 1) then + do k=1,nlev + do i=1,interpdata%n_interp + fld(i,k)=interpol_bilinear(interpdata%interp_xy(i),fld_cube(:,:,k),xoy,imin,imax) + end do + end do + else + write(iulog,*) itype + call endrun("wrong interpolation type") + endif + endif + end subroutine interpolate_scalar3d + + + ! ======================================= + ! interpolate_vector + ! + ! Interpolate a vector field given in an element (fld_cube) + ! to the points in interpdata%interp_xy(i), i=1 .. interpdata%n_interp. + ! + ! input_coords = 0 fld_cube given in lat-lon + ! input_coords = 1 fld_cube given in contravariant + ! + ! Note that it is possible the given element contains none of the interpolation points + ! ======================================= + subroutine interpolate_vector2d(interpdata,elem,fld_cube,npts,fld,input_coords, fillvalue) + implicit none + integer :: npts + real (kind=r8) :: fld_cube(npts,npts,2) ! vector field + real (kind=r8) :: fld(:,:) ! field at new grid lat,lon coordinates + type (interpdata_t) :: interpdata + type (element_t), intent(in) :: elem + real (kind=r8), intent(in), optional :: fillvalue + integer :: input_coords + + + ! Local variables + real (kind=r8) :: fld_contra(npts,npts,2) ! vector field + type (interpolate_t), pointer :: interp ! interpolation structure + + real (kind=r8) :: v1,v2 + real (kind=r8) :: D(2,2) ! derivative of gnomonic mapping + real (kind=r8) :: JJ(2,2), tmpD(2,2) ! derivative of gnomonic mapping + + integer :: i,j + + type (cartesian2D_t) :: cart + + if(present(fillvalue)) then + if (any(fld_cube==fillvalue)) then + fld = fillvalue + return + end if + end if + + if (input_coords==0 ) then + ! convert to contra + do j=1,npts + do i=1,npts + ! latlon->contra + fld_contra(i,j,1) = elem%Dinv(i,j,1,1)*fld_cube(i,j,1) + elem%Dinv(i,j,1,2)*fld_cube(i,j,2) + fld_contra(i,j,2) = elem%Dinv(i,j,2,1)*fld_cube(i,j,1) + elem%Dinv(i,j,2,2)*fld_cube(i,j,2) + enddo + enddo + else + fld_contra=fld_cube + endif + + + if (npts==np) then + interp => interp_p + else if (npts==np) then + call endrun('Error in interpolate_vector(): input must be on velocity grid') + endif + + + ! Choice for Native (high-order) or Bilinear interpolations + + if (itype == 0) then + do i=1,interpdata%n_interp + fld(i,1)=interpolate_2d(interpdata%interp_xy(i),fld_contra(:,:,1),interp,npts) + fld(i,2)=interpolate_2d(interpdata%interp_xy(i),fld_contra(:,:,2),interp,npts) + end do + elseif (itype == 1) then + do i=1,interpdata%n_interp + fld(i,1)=interpol_bilinear(interpdata%interp_xy(i),fld_contra(:,:,1),interp%glp(:),1,np) + fld(i,2)=interpol_bilinear(interpdata%interp_xy(i),fld_contra(:,:,2),interp%glp(:),1,np) + end do + else + write(iulog,*) itype + call endrun("wrong interpolation type") + endif + do i=1,interpdata%n_interp + ! convert fld from contra->latlon + call dmap(D,interpdata%interp_xy(i)%x,interpdata%interp_xy(i)%y,& + elem%corners3D,cubed_sphere_map,elem%corners,elem%u2qmap,elem%facenum) + ! convert fld from contra->latlon + v1 = fld(i,1) + v2 = fld(i,2) + + fld(i,1)=D(1,1)*v1 + D(1,2)*v2 + fld(i,2)=D(2,1)*v1 + D(2,2)*v2 + end do + + end subroutine interpolate_vector2d + + ! ======================================= + ! interpolate_vector + ! + ! Interpolate a vector field given in an element (fld_cube) + ! to the points in interpdata%interp_xy(i), i=1 .. interpdata%n_interp. + ! + ! input_coords = 0 fld_cube given in lat-lon + ! input_coords = 1 fld_cube given in contravariant + ! + ! Note that it is possible the given element contains none of the interpolation points + ! ======================================= + subroutine interpolate_vector3d(interpdata,elem,fld_cube,npts,nlev,fld,input_coords, fillvalue) + implicit none + type (interpdata_t),intent(in) :: interpdata + type (element_t), intent(in) :: elem + integer, intent(in) :: npts, nlev + real (kind=r8), intent(in) :: fld_cube(npts,npts,2,nlev) ! vector field + real (kind=r8), intent(out) :: fld(:,:,:) ! field at new grid lat,lon coordinates + real (kind=r8), intent(in),optional :: fillvalue + integer, intent(in) :: input_coords + + ! Local variables + real (kind=r8) :: fld_contra(npts,npts,2,nlev) ! vector field + type (interpolate_t), pointer :: interp ! interpolation structure + + real (kind=r8) :: v1,v2 + real (kind=r8) :: D(2,2) ! derivative of gnomonic mapping + real (kind=r8) :: JJ(2,2), tmpD(2,2) ! derivative of gnomonic mapping + + + integer :: i,j,k + + type (cartesian2D_t) :: cart + if(present(fillvalue)) then + if (any(fld_cube==fillvalue)) then + fld = fillvalue + return + end if + end if + if (input_coords==0 ) then + ! convert to contra + do k=1,nlev + do j=1,npts + do i=1,npts + ! latlon->contra + fld_contra(i,j,1,k) = elem%Dinv(i,j,1,1)*fld_cube(i,j,1,k) + elem%Dinv(i,j,1,2)*fld_cube(i,j,2,k) + fld_contra(i,j,2,k) = elem%Dinv(i,j,2,1)*fld_cube(i,j,1,k) + elem%Dinv(i,j,2,2)*fld_cube(i,j,2,k) + enddo + enddo + end do + else + fld_contra=fld_cube + endif + + if (npts==np) then + interp => interp_p + else if (npts==np) then + call endrun('Error in interpolate_vector(): input must be on velocity grid') + endif + + + ! Choice for Native (high-order) or Bilinear interpolations + + if (itype == 0) then + do k=1,nlev + do i=1,interpdata%n_interp + fld(i,k,1)=interpolate_2d(interpdata%interp_xy(i),fld_contra(:,:,1,k),interp,npts) + fld(i,k,2)=interpolate_2d(interpdata%interp_xy(i),fld_contra(:,:,2,k),interp,npts) + end do + end do + elseif (itype == 1) then + do k=1,nlev + do i=1,interpdata%n_interp + fld(i,k,1)=interpol_bilinear(interpdata%interp_xy(i),fld_contra(:,:,1,k),interp%glp(:),1,np) + fld(i,k,2)=interpol_bilinear(interpdata%interp_xy(i),fld_contra(:,:,2,k),interp%glp(:),1,np) + end do + end do + else + call endrun("wrong interpolation type") + endif + + + do i=1,interpdata%n_interp + ! compute D(:,:) at the point elem%interp_cube(i) + call dmap(D,interpdata%interp_xy(i)%x,interpdata%interp_xy(i)%y,& + elem%corners3D,cubed_sphere_map,elem%corners,elem%u2qmap,elem%facenum) + do k=1,nlev + ! convert fld from contra->latlon + v1 = fld(i,k,1) + v2 = fld(i,k,2) + + fld(i,k,1)=D(1,1)*v1 + D(1,2)*v2 + fld(i,k,2)=D(2,1)*v1 + D(2,2)*v2 + end do + end do + end subroutine interpolate_vector3d + + subroutine vec_latlon_to_contra(elem,nphys,nhalo,nlev,fld,fvm) + use fvm_control_volume_mod, only: fvm_struct + use dimensions_mod, only: fv_nphys + integer , intent(in) :: nphys,nhalo,nlev + real(kind=r8), intent(inout):: fld(1-nhalo:nphys+nhalo,1-nhalo:nphys+nhalo,2,nlev) + type (element_t), intent(in) :: elem + type(fvm_struct), intent(in), optional :: fvm + ! + ! local variables + ! + integer :: i,j,k + real(r8):: v1,v2 + + if (nhalo==0.and.nphys==np) then + do k=1,nlev + do j=1,nphys + do i=1,nphys + ! latlon->contra + v1 = fld(i,j,1,k) + v2 = fld(i,j,2,k) + fld(i,j,1,k) = elem%Dinv(i,j,1,1)*v1 + elem%Dinv(i,j,1,2)*v2 + fld(i,j,2,k) = elem%Dinv(i,j,2,1)*v1 + elem%Dinv(i,j,2,2)*v2 + enddo + enddo + end do + else if (nphys==fv_nphys.and.nhalo.le.fv_nphys) then + do k=1,nlev + do j=1-nhalo,nphys+nhalo + do i=1-nhalo,nphys+nhalo + ! latlon->contra + v1 = fld(i,j,1,k) + v2 = fld(i,j,2,k) + fld(i,j,1,k) = fvm%Dinv_physgrid(i,j,1,1)*v1 + fvm%Dinv_physgrid(i,j,1,2)*v2 + fld(i,j,2,k) = fvm%Dinv_physgrid(i,j,2,1)*v1 + fvm%Dinv_physgrid(i,j,2,2)*v2 + enddo + enddo + end do + else + call endrun('ERROR: vec_latlon_to_contra - grid not supported or halo too large') + end if + end subroutine vec_latlon_to_contra +end module interpolate_mod diff --git a/src/dynamics/se/dycore/ll_mod.F90 b/src/dynamics/se/dycore/ll_mod.F90 new file mode 100644 index 00000000..cf445c86 --- /dev/null +++ b/src/dynamics/se/dycore/ll_mod.F90 @@ -0,0 +1,149 @@ +module ll_mod + implicit none + private + type :: node_t + integer :: id + integer :: Src,Dest + type(node_t), pointer :: prev => NULL() + type(node_t), pointer :: next => NULL() + end type node_t + + type :: root_t + integer :: number + type(node_t), pointer :: first => NULL() + end type root_t + public :: node_t, root_t + integer, public :: NumEdges + + public :: PrintEdgeList + public :: LLAddEdge,LLFindEdge, LLInsertEdge + public :: LLSetEdgeCount,LLGetEdgeCount + public :: LLFree + +contains + + subroutine LLSetEdgeCount(value) + implicit none + integer,intent(in) :: value + NumEdges=value + end subroutine LLSetEdgeCount + + subroutine LLGetEdgeCount(value) + implicit none + integer,intent(out) :: value + value=NumEdges + end subroutine LLGetEdgeCount + + subroutine PrintEdgeList(EdgeList) + + type(root_t) :: EdgeList(:) + type(node_t), pointer :: temp_node + integer :: nlist, i + nlist = SIZE(EdgeList) + + do i=1,nlist + temp_node => EdgeList(i)%first + do while(associated(temp_node)) + print *,'Vertex: ',EdgeList(i)%number ,temp_node%Src,'->' ,temp_node%dest, '(',temp_node%id,')' + temp_node => temp_node%next + enddo + enddo + + end subroutine PrintEdgeList + + subroutine LLFree(List) + + implicit none + type(root_t) :: List + type(node_t), pointer :: temp_node + integer :: nlist,i + + + temp_node => List%first + if (associated(temp_node)) then + ! Find the end of the list + do while(associated(temp_node%next)) + temp_node => temp_node%next + end do + + temp_node => temp_node%prev + !Now step back and deallocate all entries + do while(associated(temp_node)) + deallocate(temp_node%next) + temp_node => temp_node%prev + end do + end if + + end subroutine LLFree + + subroutine LLInsertEdge(EdgeList,src,dest,eNum) + type (root_t), intent(inout) :: EdgeList + integer, intent(in) :: src,dest + integer, intent(out) :: eNum + logical :: found + + call LLFindEdge(EdgeList,src,dest,eNum,found) + if(.not. found) then + call LLAddEdge(EdgeList,src,dest,eNum) + endif + + end subroutine LLInsertEdge + + subroutine LLFindEdge(Edge,src,dest,id,found) + + type (root_t), intent(in) :: Edge + integer, intent(in) :: src,dest + integer, intent(out) :: id + logical, intent(out) :: found + + type (node_t), pointer :: temp_node + + found =.FALSE. + + temp_node => Edge%first + do while(associated(temp_node) .and. (.not. found)) + if((dest .eq. temp_node%dest) .and. (src .eq. temp_node%Src) ) then + found = .TRUE. + id=temp_node%id + else + temp_node => temp_node%next + endif + enddo + end subroutine LLFindEdge + + subroutine LLAddEdge(EdgeList,src,dest,id) + type (root_t), intent(inout) :: EdgeList + integer, intent(in) :: src + integer, intent(in) :: dest + integer, intent(out) :: id + + type(node_t), pointer :: temp_node + type(node_t), pointer :: new_node + type(node_t), pointer :: parent + + temp_node => EdgeList%first + parent => EdgeList%first + + do while(associated(temp_node)) + parent => temp_node + temp_node => parent%next + enddo + allocate(new_node) + NumEdges = NumEdges + 1 + + new_node%src=src + new_node%dest=dest + new_node%id=NumEdges + NULLIFY(new_node%next) + new_node%prev => parent + + if(associated(EdgeList%first)) then + parent%next => new_node + else + EdgeList%first => new_node + endif + id = NumEdges + + end subroutine LLAddEdge + +end module ll_mod diff --git a/src/dynamics/se/dycore/mass_matrix_mod.F90 b/src/dynamics/se/dycore/mass_matrix_mod.F90 new file mode 100644 index 00000000..a59f1cc1 --- /dev/null +++ b/src/dynamics/se/dycore/mass_matrix_mod.F90 @@ -0,0 +1,120 @@ +module mass_matrix_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use dimensions_mod, only: np, nelemd + use quadrature_mod, only: quadrature_t, gauss ,gausslobatto + use element_mod, only: element_t + use parallel_mod, only: parallel_t + use edge_mod, only: edgevpack, edgevunpack, & + freeedgebuffer,initedgebuffer + use edgetype_mod, only: edgebuffer_t + use bndry_mod, only: bndry_exchange + +implicit none +private + + public :: mass_matrix + +contains + +! =========================================== +! mass_matrix: +! +! Compute the mass matrix for each element... +! =========================================== + + subroutine mass_matrix(par,elem) + + type (parallel_t),intent(in) :: par + type (element_t) :: elem(:) + + type (EdgeBuffer_t) :: edge + + real(kind=r8) da ! area element + + type (quadrature_t) :: gp + + integer ii + integer i,j + integer kptr + integer iptr + + ! =================== + ! begin code + ! =================== + + call initEdgeBuffer(par,edge,elem,1,nthreads=1) + + ! ================================================= + ! mass matrix on the velocity grid + ! ================================================= + + gp=gausslobatto(np) + + do ii=1,nelemd + do j=1,np + do i=1,np + ! MNL: metric term for map to reference element is now in metdet! + elem(ii)%mp(i,j)=gp%weights(i)*gp%weights(j) + elem(ii)%rmp(i,j)=elem(ii)%mp(i,j) + end do + end do + + kptr=0 + call edgeVpack(edge,elem(ii)%rmp,1,kptr,ii) + + end do + + ! ============================== + ! Insert boundary exchange here + ! ============================== + + call bndry_exchange(par,edge,location='mass_matrix #1') + + do ii=1,nelemd + + kptr=0 + call edgeVunpack(edge,elem(ii)%rmp,1,kptr,ii) + + do j=1,np + do i=1,np + elem(ii)%rmp(i,j)=1.0_r8/elem(ii)%rmp(i,j) + end do + end do + + end do +!$OMP BARRIER + + deallocate(gp%points) + deallocate(gp%weights) + + ! ============================================= + ! compute spherical element mass matrix + ! ============================================= + do ii=1,nelemd + do j=1,np + do i=1,np + elem(ii)%spheremp(i,j)=elem(ii)%mp(i,j)*elem(ii)%metdet(i,j) + elem(ii)%rspheremp(i,j)=elem(ii)%spheremp(i,j) + end do + end do + kptr=0 + call edgeVpack(edge,elem(ii)%rspheremp,1,kptr,ii) + end do + call bndry_exchange(par,edge,location='mass_matrix #2') + do ii=1,nelemd + kptr=0 + call edgeVunpack(edge,elem(ii)%rspheremp,1,kptr,ii) + do j=1,np + do i=1,np + elem(ii)%rspheremp(i,j)=1.0_r8/elem(ii)%rspheremp(i,j) + end do + end do + end do +!$OMP BARRIER + + call FreeEdgeBuffer(edge) + + end subroutine mass_matrix + +end module mass_matrix_mod + diff --git a/src/dynamics/se/dycore/mesh_mod.F90 b/src/dynamics/se/dycore/mesh_mod.F90 new file mode 100644 index 00000000..c5e22868 --- /dev/null +++ b/src/dynamics/se/dycore/mesh_mod.F90 @@ -0,0 +1,1289 @@ +module mesh_mod + + use shr_kind_mod, only: r8=>shr_kind_r8 + use physconst, only: PI + use control_mod, only: MAX_FILE_LEN + use cam_abortutils, only: endrun + + use netcdf, only: nf90_strerror, nf90_open, nf90_close + use netcdf, only: NF90_NOWRITE, nf90_NoErr + use netcdf, only: nf90_inq_dimid, nf90_inquire_dimension + use netcdf, only: nf90_inq_varid, nf90_get_var + + implicit none + logical, public :: MeshUseMeshFile = .false. + + public :: MeshOpen ! Must be called first + + integer, parameter :: MXSTLN = 32 + + ! =============================== + ! Public methods for mesh_mod + ! =============================== + + public :: MeshCubeEdgeCount ! called anytime afer MeshOpen + public :: MeshCubeElemCount ! called anytime afer MeshOpen + public :: MeshCubeTopology ! called afer MeshOpen + public :: MeshSetCoordinates ! called after MeshCubeTopology + public :: MeshPrint ! show the contents of the Mesh after it has been loaded into the module + public :: MeshClose + ! =============================== + ! Private members + ! =============================== + + integer,private,parameter :: nfaces = 6 ! number of faces on the cube + integer,private,parameter :: nInnerElemEdge = 8 ! number of edges for an interior element + + character (len=MAX_FILE_LEN), private :: p_mesh_file_name + integer , private :: p_ncid + integer , private :: p_number_elements + integer , private :: p_number_elements_per_face + integer , private :: p_number_blocks + integer , private :: p_number_nodes + integer , private :: p_number_dimensions + integer , private :: p_number_neighbor_edges + real(kind=r8) , private, allocatable :: p_node_coordinates(:,:) + integer , private, allocatable :: p_connectivity(:,:) + + ! =============================== + ! Private methods + ! =============================== + + private :: create_index_table + private :: find_side_neighbors + private :: find_corner_neighbors + private :: get_node_coordinates + private :: get_2D_sub_coordinate_indexes + private :: mesh_connectivity + private :: cube_face_element_centroids + private :: smallest_diameter_element + private :: cube_to_cube_coordinates + private :: sphere_to_cube_coordinates + private :: initialize_space_filling_curve + + private :: handle_error + private :: open_mesh_file + private :: close_mesh_file + private :: get_number_of_elements + private :: get_number_of_dimensions + private :: get_number_of_elements_per_face + private :: get_number_of_nodes + private :: get_number_of_element_blocks + private :: get_node_multiplicity + private :: get_face_connectivity + +CONTAINS + +!====================================================================== +! subroutine handle_error +!====================================================================== + subroutine handle_error (status, file, line) + + integer, intent(in) :: status + character (len=*), intent(in) :: file + integer, intent(in) :: line + print *, file,':', line, ': ', trim(nf90_strerror(status)) + call endrun("Terminating program due to netcdf error while obtaining mesh information, please see message in standard output.") + end subroutine handle_error + +!====================================================================== +! open_mesh_file() +! +!> Open the netcdf file containing the mesh. +!! Assign the holder to the file to p_ncid so everyone else knows +!! how to use it without passing the argument around. +!====================================================================== + subroutine open_mesh_file() + implicit none + integer :: status + + status = nf90_open(p_mesh_file_name, NF90_NOWRITE, p_ncid) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + MeshUseMeshFile = .true. + + end subroutine open_mesh_file + +!====================================================================== +! subroutine close_mesh_file() +!====================================================================== + + subroutine close_mesh_file() + implicit none + integer :: status + + status = nf90_close(p_ncid) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + end subroutine close_mesh_file + +!====================================================================== +! function get_number_of_dimensions() +!====================================================================== + + function get_number_of_dimensions() result(number_dimensions) + implicit none + integer :: number_dimensions + + ! local variables + integer :: status, number_of_dim_id + + ! Get the id of 'num_elem', if such dimension is not there panic and quit :P + status = nf90_inq_dimid(p_ncid, "num_dim", number_of_dim_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + ! How many values for 'num_elem' are there? + status = nf90_inquire_dimension(p_ncid, number_of_dim_id, len = number_dimensions) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + end function get_number_of_dimensions + +!====================================================================== +! function get_number_of_elements() +!====================================================================== + + function get_number_of_elements() result(number_elements) + implicit none + integer :: number_elements + ! local variables + integer :: status, number_of_elements_id + + ! Get the id of 'num_elem', if such dimension is not there panic and quit :P + status = nf90_inq_dimid(p_ncid, "num_elem", number_of_elements_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + ! How many values for 'num_elem' are there? + status = nf90_inquire_dimension(p_ncid, number_of_elements_id, len = number_elements) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + end function get_number_of_elements + +!====================================================================== +! function get_number_of_nodes() +!====================================================================== + function get_number_of_nodes() result(number_nodes) + implicit none + integer :: number_nodes + ! local variables + integer :: status, number_of_nodes_id + + ! Get the id of 'num_nodes', if such dimension is not there panic and quit :P + status = nf90_inq_dimid(p_ncid, "num_nodes", number_of_nodes_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + ! How many values for 'num_nodes' are there? + status = nf90_inquire_dimension(p_ncid, number_of_nodes_id, len = number_nodes) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + end function get_number_of_nodes + + +!====================================================================== +! function get_number_of_element_blocks() +!====================================================================== + function get_number_of_element_blocks() result(number_element_blocks) + + integer :: number_element_blocks + ! local variables + integer :: status, number_of_element_blocks_id + + ! Get the id of 'num_el_blk', if such dimension is not there panic and quit :P + status = nf90_inq_dimid(p_ncid, "num_el_blk", number_of_element_blocks_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + ! How many values for 'num_el_blk' are there? + status = nf90_inquire_dimension(p_ncid, number_of_element_blocks_id, len = number_element_blocks) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + + if (number_element_blocks /= 1) then + if (number_element_blocks /= 6 ) then + call endrun('Reading cube-sphere from input file is not supported') + else + call endrun('Number of elements blocks not exactly 1 (sphere) or 6 (cube)') + endif + endif + + end function get_number_of_element_blocks + +!====================================================================== +! function get_number_of_elements_per_face() +!====================================================================== + function get_number_of_elements_per_face() result(number_elements_per_face) + + integer :: number_elements_per_face + + integer :: face_num ! For each of the face, we get the information + character(len=MXSTLN) :: element_type ! Each face is composed of elements of certain type + integer :: number_elements_in_face ! How many elements in this face + integer :: num_nodes_per_elem ! How many nodes in each element + integer :: number_of_attributes ! How many attributes in the face + + integer :: status, dimension_id + + if (p_number_blocks == 0) then + call endrun('get_number_of_elements_per_face called before MeshOpen') + else if (p_number_blocks == 1) then ! we are in the presence of a sphere + ! First we get sure the number of nodes per element is four + status = nf90_inq_dimid(p_ncid, "num_nod_per_el1", dimension_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + status = nf90_inquire_dimension(p_ncid, dimension_id, len = num_nodes_per_elem) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + if (num_nodes_per_elem /= 4) call endrun('Number of nodes per element is not four') + ! now we check how many elements there are in the face + status = nf90_inq_dimid(p_ncid, "num_el_in_blk1", dimension_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + status = nf90_inquire_dimension(p_ncid, dimension_id, len = number_elements_in_face) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + number_elements_per_face = number_elements_in_face + else if (p_number_blocks == 6) then ! we are in the presence of a cube-sphere + call endrun('Reading a mesh for a cube-sphere is not supported') + else + call endrun('Number of elements blocks not exactly 1 (sphere) or 6 (cube)') + end if + + end function get_number_of_elements_per_face + +!====================================================================== +! subroutine get_face_connectivity +!====================================================================== + subroutine get_face_connectivity() + + integer :: var_id, status + + status = nf90_inq_varid(p_ncid, "connect1", var_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + status = nf90_get_var(p_ncid, var_id, p_connectivity) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + end subroutine get_face_connectivity + +!====================================================================== +! subroutine get_node_multiplicity +!====================================================================== + subroutine get_node_multiplicity(node_multiplicity) + use dimensions_mod, only : max_elements_attached_to_node + + integer, intent(out) :: node_multiplicity(:) + integer :: node_num(4) + + integer :: k, number_nodes + + node_multiplicity(:) = 0 + number_nodes = SIZE(node_multiplicity) + ! check this external buffer was allocated correctly + if (number_nodes /= p_number_nodes) call endrun('Number of nodes does not matches size of node multiplicity array') + ! for each node, we have for four other nodes + + if (minval(p_connectivity) < 1 .or. number_nodes < maxval(p_connectivity)) then + call endrun('get_node_multiplicity: Node number less than 1 or greater than max.') + end if + + do k=1,p_number_elements_per_face + node_num = p_connectivity(:,k) + node_multiplicity(node_num) = node_multiplicity(node_num) + 1 + enddo + + if (minval(node_multiplicity) < 3 .or. max_elements_attached_to_node < maxval(node_multiplicity)) then + print *, 'minval(node_multiplicity)', minval(node_multiplicity) + print *, 'maxval(node_multiplicity)', maxval(node_multiplicity),& + ' and max_elements_attached_to_node ',max_elements_attached_to_node + call endrun('get_node_multiplicity: Number of elements attached to node less than 3 or greater than maximum.') + endif + + end subroutine get_node_multiplicity + +!====================================================================== +! subroutine get_node_coordinates () +!====================================================================== + subroutine get_node_coordinates () + + integer :: var_id, status + + status = nf90_inq_varid(p_ncid, "coord", var_id) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + status = nf90_get_var(p_ncid, var_id, p_node_coordinates) + if(status /= nf90_NoErr) call handle_error(status, __FILE__, __LINE__) + end subroutine get_node_coordinates + + ! ================================================================================ + ! + ! -----------------Internal private routines that do not use netCDF IO ----------- + ! + ! ================================================================================ + +!====================================================================== +! subroutine get_2D_sub_coordinate_indexes +!====================================================================== + subroutine get_2D_sub_coordinate_indexes(x, y, sgnx, sgny, face_no) + implicit none + integer, intent(in) :: face_no + integer, intent(out) :: x,y + integer, intent(out) :: sgnx, sgny + if (face_no == 1 .or. face_no == 3) then + x = 2 + y = 3 + else if (face_no == 2 .or. face_no == 4) then + x = 1 + y = 3 + else + x = 2 + y = 1 + endif + if (face_no == 1 .or. face_no == 4 .or. face_no == 5) then + sgnx = 1 + sgny = 1 + else if (face_no == 2 .or. face_no == 3) then + sgnx = -1 + sgny = 1 + else + sgnx = 1 + sgny = -1 + endif + end subroutine get_2D_sub_coordinate_indexes + + + +!====================================================================== +! subroutine mesh_connectivity(connect) +! +! puts the transpose of p_connectivity into connect +!====================================================================== + + subroutine mesh_connectivity (connect) + + integer, intent(out) :: connect(p_number_elements,4) + + integer :: k, j + + if (0 == p_number_blocks) call endrun('mesh_connectivity called before MeshOpen') + j=0 + do k=1, p_number_elements_per_face + j=j+1 + connect(j,:) = p_connectivity(:,k) + enddo + + if (j /= p_number_elements) call endrun('mesh_connectivity: Number of elements in side sets not equal to total elements') + + if (minval(connect) < 1 .or. maxval(connect) > p_number_nodes) then + call endrun('mesh_connectivity: Node number out of bounds') + end if + + end subroutine mesh_connectivity +!====================================================================== +! subroutine create_index_table() +! +! this is needed to detremine side and corner neighbors +!====================================================================== + + subroutine create_index_table(index_table, element_nodes) + use dimensions_mod, only : max_elements_attached_to_node + + integer, allocatable, intent(inout) :: index_table(:,:) + integer , intent(in) :: element_nodes(p_number_elements, 4) + integer :: cnt, cnt_index, node + integer :: k, ll + + !Create an index table so that we can find neighbors on O(n) + ! so for each node, we want to know which elements it is part of + allocate(index_table(p_number_nodes, max_elements_attached_to_node + 1)) + + !the last column in the index table is a count of the number of elements + index_table = 0 + + cnt_index = max_elements_attached_to_node + 1 + + do k=1,p_number_elements + do ll=1,4 + node = element_nodes(k, ll) !the node + cnt = index_table(node, cnt_index) !how many elements for that node already in table + cnt = cnt + 1 !increment since we are adding an element + if (cnt > max_elements_attached_to_node) then + call endrun('Found a node in too many elements.') + endif + index_table(node, cnt_index) = cnt + index_table(node, cnt) = k !put the element in the indextable + enddo + enddo + + end subroutine create_index_table + +!====================================================================== +! subroutine find_side_neighbors() +! +! find the element neighbors to the n,s,e,w and put them in GridVertex_t +! (only 1 neighbor to the n,s,e,w) +!====================================================================== + subroutine find_side_neighbors (GridVertex, normal_to_homme_ordering, element_nodes, edge_wgt, index_table) + use coordinate_systems_mod, only : cartesian3D_t + use gridgraph_mod, only : GridVertex_t + use dimensions_mod, only : max_elements_attached_to_node + + integer , intent(in) :: normal_to_homme_ordering(8) + integer , intent(in) :: element_nodes(p_number_elements, 4) + integer , intent(in) :: edge_wgt + integer , intent(in) :: index_table(:,:) + type (GridVertex_t) , intent(inout) :: GridVertex(:) + + integer :: i_node(2), my_node(2) + integer :: neighbor, direction + integer :: j,k,ll,i, m + integer :: i_elem, jump, end_i + integer :: loc, cnt_index, a_count(2) + logical :: found + if (0 == p_number_blocks) call endrun('find_side_neighbors called before MeshOpen') + + + !the last column in the index table is a count of the number of elements + cnt_index = max_elements_attached_to_node + 1 + + !use index table to find neighbors + do k=1,p_number_elements ! for each element k + !set the side weights + GridVertex(k)%nbrs_wgt(1:4) = edge_wgt + do ll=1,4 !loop through the four sides + + jump = normal_to_homme_ordering(ll) + loc = GridVertex(k)%nbrs_ptr(jump) + + if (GridVertex(k)%nbrs(loc) == 0) then !if side is not set yet, then + !look for side element + found = .false. + neighbor = 0 + + my_node(1) = element_nodes(k, ll) + a_count(1) = index_table(my_node(1), cnt_index) + my_node(2) = element_nodes(k, mod(ll,4)+1) + a_count(2) = index_table(my_node(2), cnt_index) + + !loop through the elements that are in the index table for each node + !and find the element number and direction of the side neighbor + do m = 1,2 + if (found) exit + end_i = a_count(m) + do i = 1, end_i + if (found) exit + i_elem = index_table(my_node(m),i) + if (i_elem /= k) then !k is the element we are setting sides for + do j=1,4 !loop through each of i_elem's four sides + i_node(1) = element_nodes(i_elem, j) + i_node(2) = element_nodes(i_elem, mod(j,4)+1) + if ( (i_node(1) == my_node(2) .and. i_node(2) == my_node(1)) .or. & + (i_node(1) == my_node(1) .and. i_node(2) == my_node(2)) ) then + neighbor = i_elem + direction = j + found = .true. + !found a match + exit + end if + end do ! j loop + end if + enddo ! i loop + enddo !m loop + + if (neighbor == 0) call endrun('find_side_neighbor: Neighbor not found! Every side should have a neighbor.') + + GridVertex(k)%nbrs(loc) = neighbor + jump = normal_to_homme_ordering(direction) + loc = GridVertex(neighbor)%nbrs_ptr(jump) + GridVertex(neighbor)%nbrs(loc)= k + endif + enddo ! ll loop => 4 sides + enddo ! k loop: each element + + do k=1,p_number_elements + do ll=1,4 + if ( 0 == GridVertex(k)%nbrs(ll)) then + call endrun('Found one side of one element witout a neighbor. Bummer!') + end if + end do + end do + + end subroutine find_side_neighbors + +!====================================================================== +! function smallest_diameter_element +!====================================================================== + + function smallest_diameter_element(element_nodes) result(min_diameter) + + integer ,intent(in) :: element_nodes(:,:) + + integer :: i, j + integer :: node_numbers(4) + real(kind=r8) :: coordinates (4,3) + real(kind=r8) :: x(3), y(3), r(3), d, min_diameter + + if (SIZE(element_nodes,dim=1) /= p_number_elements) then + call endrun('smallest_diameter_element:Element count check failed in & + &exodus_mesh. Connectivity array length not equal to number of elements.') + end if + if ( p_number_elements_per_face /= p_number_elements) then + call endrun('smallest_diameter_element: Element count check failed in & + &exodus_mesh. Element array length not equal to sum of face.') + end if + + min_diameter = 9999999.0_r8 + do i=1, p_number_elements + node_numbers = element_nodes(i,:) + coordinates = p_node_coordinates(node_numbers,:) + ! smallest side length + do j=1,4 + x = coordinates(j ,:) + y = coordinates(1+MOD(j,4),:) + r = x-y + d = dot_product(r,r) + if (d < min_diameter ) then + min_diameter = d + end if + end do + ! smallest diameter length + do j=1,2 + x = coordinates(j ,:) + y = coordinates(2+MOD(j,4),:) + r = x-y + d = dot_product(r,r) + if (d < min_diameter ) then + min_diameter = d + end if + end do + enddo + min_diameter = SQRT(min_diameter) + end function smallest_diameter_element + +!====================================================================== +! subroutine cube_to_cube_coordinates +!====================================================================== + + subroutine cube_to_cube_coordinates (cube_coor, node_coor, face_number) + + real(kind=r8), intent(in) :: node_coor(4,3) + integer, intent(in) :: face_number + real(kind=r8), intent(out) :: cube_coor(4,2) + + integer :: x_index, y_index, sgnx, sgny + call get_2D_sub_coordinate_indexes(x_index, y_index, sgnx, sgny, face_number) + cube_coor(:,1) = sgnx*node_coor(:,x_index) + cube_coor(:,2) = sgny*node_coor(:,y_index) + end subroutine cube_to_cube_coordinates + + +!====================================================================== +! subroutine sphere_to_cube_coordinates +!====================================================================== + + subroutine sphere_to_cube_coordinates (cube_coor, node_coor, face_number) + use coordinate_systems_mod, only : cartesian2d_t, change_coordinates, sphere2cubedsphere + implicit none + real(kind=r8), intent(in) :: node_coor(4,3) + integer, intent(in) :: face_number + real(kind=r8), intent(out) :: cube_coor(4,2) + integer :: i + type(cartesian2d_t) :: cart(4) + + do i=1,4 + cart(i) = sphere2cubedsphere(change_coordinates(node_coor(i,:)), face_number) + end do + cube_coor(:,1) = cart(:)%x + cube_coor(:,2) = cart(:)%y + end subroutine sphere_to_cube_coordinates + + +!====================================================================== +! subroutine cube_face_element_centroids +!====================================================================== + + subroutine cube_face_element_centroids(centroids, face_numbers, element_nodes) + + integer , intent(in) :: element_nodes(:,:) + integer, intent(in) :: face_numbers (p_number_elements) + real(kind=r8),intent(out) :: centroids (p_number_elements,2) + real(kind=r8) :: coordinates(4,3) + real(kind=r8) :: cube_coor (4,2) + integer :: i, node_numbers(4) + + if (0 == p_number_blocks) call endrun('cube_face_element_centroids called before MeshOpen') + if (SIZE(element_nodes,dim=1) /= p_number_elements) then + call endrun('cube_face_element_centroids:Element count check failed in & + &exodus_mesh. Connectivity array length not equal to number of elements.') + end if + if ( p_number_elements_per_face /= p_number_elements ) then + call endrun('cube_face_element_centroids: Element count check failed in & + &exodus_mesh. Element array length not equal to sum of face.') + end if + + do i=1, p_number_elements + node_numbers = element_nodes(i,:) + coordinates = p_node_coordinates(node_numbers,:) + if (6 == p_number_blocks) then + call cube_to_cube_coordinates (cube_coor, coordinates, face_numbers(i)) + else + call sphere_to_cube_coordinates (cube_coor, coordinates, face_numbers(i)) + end if + centroids(i,:) = SUM(cube_coor,dim=1)/4.0_r8 + enddo + end subroutine cube_face_element_centroids + +!====================================================================== +! subroutine initialize_space_filling_curve +!====================================================================== + subroutine initialize_space_filling_curve(GridVertex, element_nodes) + use gridgraph_mod, only : GridVertex_t + use spacecurve_mod, only : GenspaceCurve + + type (GridVertex_t), intent(inout) :: GridVertex(:) + integer , intent(in) :: element_nodes(:,:) + + integer,allocatable :: Mesh2(:,:),Mesh2_map(:,:),sfcij(:,:) + + real(kind=r8) :: centroids(p_number_elements,2) + integer :: face_numbers(p_number_elements) + real(kind=r8) :: x, y, h + integer :: i, j, i2, j2, ne, ne2 + integer :: sfc_index, face + + if (SIZE(GridVertex) /= p_number_elements) then + call endrun('initialize_space_filling_curve:Element count check failed & + &in exodus_mesh. Vertex array length not equal to number of elements.') + end if + if (SIZE(element_nodes,dim=1) /= p_number_elements) then + call endrun('initialize_space_filling_curve:Element count check failed & + &in exodus_mesh. Connectivity array length not equal to number of elements.') + end if + + face_numbers(:) = GridVertex(:)%face_number + h = smallest_diameter_element ( element_nodes) + + call cube_face_element_centroids (centroids, face_numbers, element_nodes) + + if (h<.00001_r8) then + call endrun('initialize_space_filling_curve: Unreasonably small element found. less than .00001') + end if + + ne = CEILING(0.5_r8*PI/(h/2)); + + ! find the smallest ne2 which is a power of 2 and ne2>ne + ne2=2**ceiling( log(real(ne))/log(2._r8) ) + if (ne2 Mesh in Mesh2_map. + ! elements in Mesh2 which are not mapped get assigned a value of 0 + Mesh2_map=0 + do i=1,p_number_elements + if (face_numbers(i) == face ) then + x = centroids(i,1) + y = centroids(i,2) + ! map this element to an (i2,j2) element + ! [ -PI/4, PI/4 ] -> [ 0, ne2 ] + i2=nint( (0.5_r8 + 2.0_r8*x/PI)*ne2 + 0.5_r8 ) + j2=nint( (0.5_r8 + 2.0_r8*y/PI)*ne2 + 0.5_r8 ) + if (face == 4 .or. face == 6 ) i2 = ne2-i2+1 + if (face == 1 .or. face == 2 .or. face == 6) j2 = ne2-j2+1 + if (i2<1 ) i2=1 + if (i2>ne2) i2=ne2 + if (j2<1 ) j2=1 + if (j2>ne2) j2=ne2 + Mesh2_map(i2,j2)=i + end if + end do + + ! generate a SFC for Mesh with the same ordering as the + ! elements in Mesh2 which map to Mesh. + do j=0,ne2*ne2-1 + i2=sfcij(j,1) + j2=sfcij(j,2) + i=Mesh2_map(i2,j2) + if (i/=0) then + ! (i2,j2) element maps to element + GridVertex(i)%SpaceCurve=sfc_index + sfc_index=sfc_index+1 + endif + enddo + enddo + deallocate(Mesh2) + deallocate(Mesh2_map) + deallocate(sfcij) + + if (minval(GridVertex(:)%SpaceCurve) == -1) then + do i=1,p_number_elements + if (-1==GridVertex(i)%SpaceCurve) then + write (*,*) " Error in projecting element ",i," to space filling curve." + write (*,*) " Face:",face_numbers(i) + write (*,*) " Centroid:",centroids(i,:) + end if + end do + call endrun('initialize_space_filling_curve: Vertex not on SpaceCurve') + end if + + end subroutine initialize_space_filling_curve + +!====================================================================== +! subroutine find_corner_neighbors +!====================================================================== + + subroutine find_corner_neighbors (GridVertex, normal_to_homme_ordering, element_nodes, corner_wgt, index_table) + use gridgraph_mod, only : GridVertex_t + use dimensions_mod, only : max_elements_attached_to_node, max_corner_elem + use control_mod, only: north, south, east, west, neast,seast, nwest,swest + + type (GridVertex_t), intent(inout) :: GridVertex(:) + integer , intent(in) :: normal_to_homme_ordering(8) + integer , intent(in) :: element_nodes(p_number_elements, 4) + integer , intent(in) :: corner_wgt + integer , intent(in) :: index_table(:,:) + + integer :: node_elements (2*max_elements_attached_to_node) + integer :: elem_neighbor (4*max_elements_attached_to_node) + integer :: nbr_cnt(4) + integer :: elem_nbr_start, start + integer :: i, j, k, ll, jj, kk + integer :: node, loc, cnt, cnt_index + integer :: corner_array(max_corner_elem), orig_pos(max_corner_elem) + integer :: face_array(max_corner_elem), a_corner_elems(max_corner_elem) + integer :: corner_sides(2) + integer :: side_elem, corner_elem, tmp_s + + !the last column in the index table is a count of the number of elements + cnt_index = max_elements_attached_to_node + 1 + + do i=1, p_number_elements !loop through all elements + node_elements(:) = 0 + elem_neighbor(:) = 0 + elem_nbr_start = 0 + nbr_cnt(:) = 0 + + do j=1,4 !check each of the 4 nodes at the element corners + node = element_nodes(i,j) + cnt = index_table(node, cnt_index) + if (cnt < 3 .or. max_elements_attached_to_node < cnt) then + call endrun('find_corner_neighbors: Number of elements attached to node less than 3 or greater than maximum.') + endif + + node_elements(1:cnt) = index_table(node, 1:cnt) + + !now node_elements contains the element neighbors to that node - so grab the + ! corner neighbors - these are the ones that are not already side neighbors (or myself) + k = 0 + do ll=1,cnt + if ( i /= node_elements(ll) .and. & !not me + GridVertex(i)%nbrs(1) /= node_elements(ll) .and. & !not side 1 + GridVertex(i)%nbrs(2) /= node_elements(ll) .and. & ! etc ... + GridVertex(i)%nbrs(3) /= node_elements(ll) .and. & + GridVertex(i)%nbrs(4) /= node_elements(ll)) then + k = k + 1 + elem_neighbor(elem_nbr_start + k) = node_elements(ll) + end if + end do ! end of ll loop for multiplicity + + !keep track of where we are starting in elem_neighbor for each corner j + elem_nbr_start = elem_nbr_start + k + nbr_cnt(j) = k !how many neighbors in this corner + end do ! end of j loop through 4 nodes + + + ! now that we have done the 4 corners we can populate nbrs and nbrs_ptr + ! with the corners in the proper order (clockwise) in neighbors + ! also we can add the corner weight + + do j=5,8 !loop through 4 corners + elem_nbr_start = 1 + !easiest to do the corner in ascending order - find loc + do jj = 5,8 + ll = normal_to_homme_ordering(jj) + if (j == ll) then + loc = jj + exit + end if + elem_nbr_start = elem_nbr_start + nbr_cnt(jj-4) + end do + + start = GridVertex(i)%nbrs_ptr(j) + cnt = nbr_cnt(loc - 4) + GridVertex(i)%nbrs_ptr(j+1) = start + cnt + + if (cnt > 0) then + GridVertex(i)%nbrs(start : start + cnt-1) = & + elem_neighbor(elem_nbr_start : elem_nbr_start + cnt -1) + GridVertex(i)%nbrs_face(start : start + cnt - 1) = & + GridVertex(elem_neighbor(elem_nbr_start : elem_nbr_start + cnt -1))%face_number + GridVertex(i)%nbrs_wgt(start : start + cnt-1) = corner_wgt + + end if + + ! within each corner neighbor, lets list the corners in clockwise order + if (cnt > 1) then !cnt is the number of neighbors in this corner j + !there can be at most max_corner element of these + + a_corner_elems = 0 + a_corner_elems(1:cnt) = elem_neighbor(elem_nbr_start : elem_nbr_start + cnt -1) + !corner-sides(2) is clockwise of corner_side(1) + corner_array= 0 + orig_pos = 0 + select case (j) + case(neast) + corner_sides(1) = north + corner_sides(2) = east + case(seast) + corner_sides(1) = east + corner_sides(2) = south + case(swest) + corner_sides(1) = south + corner_sides(2) = west + case(nwest) + corner_sides(1) = west + corner_sides(2) = north + end select + + !so the first element to list touches corner_sides(1) element + side_elem = GridVertex(i)%nbrs(corner_sides(1)) + + !loop though the corner elements and see if any have a side neighbor + !that = side_elem + do k = 1,cnt !number of corner elements + corner_elem = a_corner_elems(k) + do kk = 1,4 !number of sides to check + loc = GridVertex(corner_elem)%nbrs_ptr(kk) + tmp_s = GridVertex(corner_elem)%nbrs(loc) + if (tmp_s == side_elem) then + corner_array(1) = corner_elem + orig_pos(1) = k + exit + endif + enddo + if (corner_array(1)> 0) exit + enddo + if (corner_array(1)==0) then + print *, i, cnt + call endrun('find_corner_neighbors (1) : mistake finding corner neighbor order') + endif + + !if cnt == 2, we are done (we know the order of neighbors) + if (cnt ==2) then + if (corner_array(1) == a_corner_elems(1)) then + corner_array(2) = a_corner_elems(2) + orig_pos(2) = 2 + else + corner_array(2) = a_corner_elems(1) + orig_pos(2) = 1 + end if + else !cnt = 3 or 4 + !find which corner element borders corner_sides(2) + side_elem = GridVertex(i)%nbrs(corner_sides(2)) + do k = 1,cnt + corner_elem = a_corner_elems(k) + do kk = 1,4 + loc = GridVertex(corner_elem)%nbrs_ptr(kk) + tmp_s = GridVertex(corner_elem)%nbrs(loc) + if (tmp_s == side_elem) then + corner_array(4) = corner_elem + orig_pos(4) = k + exit + endif + enddo + if (corner_array(4)> 0) exit + enddo + if (corner_array(4)==0 .or. corner_array(4) == corner_array(1)) then + print *, i, cnt + call endrun('find_corner_neighbors (2) : mistake finding corner neighbor order') + endif + + !now if cnt = 3 then we are done + if (cnt ==3) then + corner_array(3) = corner_array(4) + orig_pos(3) = orig_pos(4) + + do k = 1,cnt !find the "middle" element + if (k /= orig_pos(1) .and. k /= orig_pos(3)) then + orig_pos(2) = k + corner_array(2) = a_corner_elems(k) + exit + endif + enddo + else !cnt = 4 + !which of the two unassigned elements borders the element in + !corner_array(1) => put in corner_array(2) + side_elem = corner_array(1) + + do k = 1,cnt + corner_elem = a_corner_elems(k) + if (corner_elem == corner_array(4) .or. corner_elem == corner_array(1)) then + cycle + else + do kk = 1,4 !check each side + loc = GridVertex(corner_elem)%nbrs_ptr(kk) + tmp_s = GridVertex(corner_elem)%nbrs(loc) + if (tmp_s == side_elem) then + corner_array(2) = corner_elem + orig_pos(2) = k + exit + endif + enddo + endif + if (corner_array(2)> 0) exit + enddo + !now put the remaining one in pos 3 + do k = 1,cnt + corner_elem = a_corner_elems(k) + if (corner_elem /= corner_array(4) .and. corner_elem /= & + corner_array(2) .and. corner_elem /= corner_array(1)) then + corner_array(3) = corner_elem + orig_pos(3) = k + exit + endif + enddo + endif ! end of cnt=4 + endif! end of not cnt=2 + + !now re-set the elements in this corner + GridVertex(i)%nbrs(start : start + cnt-1) = corner_array(1:cnt) + !nbrs_wgt are the same - nothing to do + !fix neighbors face + do k = 1,cnt + face_array(k) = GridVertex(i)%nbrs_face(start + orig_pos(k) - 1) + end do + GridVertex(i)%nbrs_face(start : start + cnt - 1) = face_array(1:cnt) + endif !end of cnt > 1 loop for corners + + end do !j loop through each corner + + end do ! end of i loop through elements + end subroutine find_corner_neighbors + + ! ================================================================================ + ! + ! -------------------------------Public Methods----------------------------------- + ! + ! ================================================================================ + +!====================================================================== +! subroutine MeshOpen +!====================================================================== + + subroutine MeshOpen(mesh_file_name, par) + use parallel_mod, only: parallel_t + use cam_logfile, only: iulog + + character (len=*), intent(in) :: mesh_file_name + type (parallel_t), intent(in) :: par + + integer, allocatable :: node_multiplicity(:) + integer :: k + + p_mesh_file_name = mesh_file_name + call open_mesh_file () + + p_number_elements = get_number_of_elements () + p_number_nodes = get_number_of_nodes () + p_number_blocks = get_number_of_element_blocks () + p_number_dimensions = get_number_of_dimensions () + + if (p_number_dimensions /= 3) then + call endrun('The number of dimensions must be 3, otherwise the mesh algorithms will not work') + endif + + ! Only spheres are allowed in input files. + if (par%masterproc) then + if (p_number_blocks == 1) then + write(iulog,*) "Since the mesh file has only one block, it is assumed to be a sphere." + endif + end if + + if (p_number_blocks /= 1) then + call endrun('Number of elements blocks not exactly 1 (sphere)') + end if + + p_number_elements_per_face = get_number_of_elements_per_face() + ! Because all elements are in one face, this value must match p_number_elements + if ( p_number_elements /= p_number_elements_per_face) then + call endrun('The value of the total number of elements does not match all the elements found in face 1') + end if + + allocate( p_connectivity(4,p_number_elements_per_face) ) + p_connectivity(:,:)=0 + ! extract the connectivity from the netcdf file + call get_face_connectivity() + + allocate(node_multiplicity(p_number_nodes)) + call get_node_multiplicity(node_multiplicity) + + ! tricky: For each node with multiplicity n, there are n(n-1) neighbor links + ! created. But this counts each edge twice, so: n(n-1) -n + ! Should be the same as SUM(SIZE(GridVertex(i)%nbrs(j)%n),i=1:p_number_elements,j=1:8) + ! p_number_neighbor_edges = dot_product(mult,mult) - 2*sum(mult) + p_number_neighbor_edges = 0 + do k=1,p_number_nodes + p_number_neighbor_edges = p_number_neighbor_edges + node_multiplicity(k)*(node_multiplicity(k)-2) + end do + + deallocate(node_multiplicity) + + ! allocate the space for the coordinates, this is used in many functions + allocate(p_node_coordinates(p_number_nodes, p_number_dimensions)) + call get_node_coordinates() + + if (p_number_elements_per_face /= p_number_elements) then + call endrun('MeshOpen: Total number of elements not equal to the number of elements on face 1!') + end if + + end subroutine MeshOpen + +!====================================================================== +! subroutine MeshClose +! +! This routine acts as a destructor cleaning the memory allocated in MeshOpen +! which acts as a constructor allocated dynamical memory for the nodes coordinates. +!====================================================================== + + subroutine MeshClose + + ! release memory + deallocate(p_node_coordinates) + deallocate(p_connectivity) + ! let the file go + call close_mesh_file () + + end subroutine MeshClose + + +!====================================================================== +! subroutine MeshPrint +!====================================================================== + + + subroutine MeshPrint(par) + use parallel_mod, only: parallel_t + use cam_logfile, only: iulog + + type (parallel_t), intent(in) :: par + if (par%masterproc) then + write(iulog,*) 'This are the values for file ', trim(p_mesh_file_name) + write(iulog,*) 'The value for the number of dimensions (num_dim) is ', p_number_dimensions + write(iulog,*) 'The number of elements in the mesh file is ', p_number_elements + write(iulog,*) 'The number of nodes in the mesh file is ', p_number_nodes + write(iulog,*) 'The number of blocks in the mesh file is ', p_number_blocks + write(iulog,*) 'The number of elements in the face 1 (sphere) is ', p_number_elements_per_face + if ( p_number_elements == p_number_elements) then + write(iulog,*) 'The value of the total number of elements does match all the elements found in face 1 (the only face)' + else + write(iulog,*) 'The value of the total number of elements does not match all the elements found in face 1' + write(iulog,*) 'This message should not be appearing, there is something wrong in the code' + endif + write(iulog,*) 'The number of neighbor edges ', p_number_neighbor_edges + end if + + end subroutine MeshPrint + +!====================================================================== +! subroutine MeshCubeTopology +!====================================================================== + subroutine MeshCubeTopology(GridEdge, GridVertex) + use dimensions_mod, only : np + use coordinate_systems_mod, only : cartesian3D_t, cube_face_number_from_cart + use gridgraph_mod, only : GridVertex_t + use gridgraph_mod, only : GridEdge_t + use cube_mod, only : CubeSetupEdgeIndex + use gridgraph_mod, only : initgridedge, num_neighbors + use control_mod, only : north, south, east, west, neast, seast, swest, nwest + + type (GridEdge_t), intent(inout), target :: GridEdge(:) + type (GridVertex_t), intent(inout), target :: GridVertex(:) + + real(kind=r8) :: coordinates(4,3) + real(kind=r8) :: centroid(3) + type (cartesian3D_t) :: face_center + + integer :: i, j, k, ll, loc + integer :: element_nodes(p_number_elements, 4) + integer :: EdgeWgtP,CornerWgt + integer :: normal_to_homme_ordering(8) + integer :: node_numbers(4) + integer, allocatable :: index_table(:,:) + + normal_to_homme_ordering(1) = south + normal_to_homme_ordering(2) = east + normal_to_homme_ordering(3) = north + normal_to_homme_ordering(4) = west + normal_to_homme_ordering(5) = swest + normal_to_homme_ordering(6) = seast + normal_to_homme_ordering(7) = neast + normal_to_homme_ordering(8) = nwest + + if (SIZE(GridVertex) /= p_number_elements) then + call endrun('MeshCubeTopology: Element count check failed in exodus_mesh. & + &Vertex array length not equal to number of elements.') + end if + if (p_number_elements_per_face /= p_number_elements) then + call endrun('MeshCubeTopology: Element count check failed in exodus_mesh. & + &Element array length not equal to sum of face.') + end if + + EdgeWgtP = np + CornerWgt = 1 + + + call mesh_connectivity (element_nodes) + + do i=1, p_number_elements + GridVertex(i)%number = i + GridVertex(i)%face_number = 0 + GridVertex(i)%processor_number = 0 + GridVertex(i)%SpaceCurve = 0 + + GridVertex(i)%nbrs(:) = 0 + GridVertex(i)%nbrs_face(:) = 0 + GridVertex(i)%nbrs_wgt(:) = 0 + GridVertex(i)%nbrs_wgt_ghost(:) = 1 + + !each elements has one side neighbor (first 4) + GridVertex(i)%nbrs_ptr(1) = 1 + GridVertex(i)%nbrs_ptr(2) = 2 + GridVertex(i)%nbrs_ptr(3) = 3 + GridVertex(i)%nbrs_ptr(4) = 4 + !don't know about corners yet + GridVertex(i)%nbrs_ptr(5:num_neighbors+1) = 5 + + end do + + !create index table to find neighbors + call create_index_table(index_table, element_nodes) + + ! side neighbors + call find_side_neighbors(GridVertex, normal_to_homme_ordering, element_nodes, EdgeWgtP, index_table) + + ! set vertex faces + do i=1, p_number_elements + node_numbers = element_nodes(i,:) + coordinates = p_node_coordinates(node_numbers,:) + centroid = SUM(coordinates, dim=1)/4.0_r8 + face_center%x = centroid(1) + face_center%y = centroid(2) + face_center%z = centroid(3) + GridVertex(i)%face_number = cube_face_number_from_cart(face_center) + end do + + ! set side neighbor faces + do i=1, p_number_elements + do j=1,4 !look at each side + k = normal_to_homme_ordering(j) + loc = GridVertex(i)%nbrs_ptr(k) + ll = GridVertex(i)%nbrs(loc) + GridVertex(i)%nbrs_face(loc) = GridVertex(ll)%face_number + end do + end do + + ! find corner neighbor and faces (weights added also) + call find_corner_neighbors (GridVertex, normal_to_homme_ordering, element_nodes, CornerWgt, index_table) + + !done with the index table + deallocate(index_table) + + + call initgridedge(GridEdge,GridVertex) + do i=1,SIZE(GridEdge) + call CubeSetupEdgeIndex(GridEdge(i)) + enddo + + call initialize_space_filling_curve(GridVertex, element_nodes) + end subroutine MeshCubeTopology + +!====================================================================== +! subroutine MeshSetCoordinates(elem) +!====================================================================== + + subroutine MeshSetCoordinates(elem) + use element_mod, only: element_t + + type (element_t), intent(inout) :: elem(:) + + integer :: connectivity(p_number_elements,4) + integer :: node_multiplicity(p_number_nodes) + integer :: face_no, i, k, l + integer :: number + integer :: node_num(4) + real(kind=r8) :: coordinates(4,3) + real(kind=r8) :: cube_coor (4,2) + + connectivity =0 + node_multiplicity=0 + + call mesh_connectivity (connectivity) + + do k=1,p_number_elements + node_num = connectivity(k,:) + node_multiplicity(node_num(:)) = node_multiplicity(node_num(:)) + 1 + end do + + do k=1,SIZE(elem) + number = elem(k)%vertex%number + face_no = elem(k)%vertex%face_number + node_num = connectivity(number,:) + coordinates = p_node_coordinates(node_num,:) + + if (6 == p_number_blocks) then + call cube_to_cube_coordinates (cube_coor, coordinates, face_no) + else + call sphere_to_cube_coordinates (cube_coor, coordinates, face_no) + end if +! elem(k)%node_numbers = node_num +! elem(k)%node_multiplicity(:) = node_multiplicity(node_num(:)) + elem(k)%corners(:)%x = cube_coor(:,1) + elem(k)%corners(:)%y = cube_coor(:,2) + end do + end subroutine MeshSetCoordinates + +!====================================================================== +!function MeshCubeEdgeCount() +!====================================================================== + function MeshCubeEdgeCount() result(nedge) + + integer :: nedge + if (0 == p_number_blocks) call endrun('MeshCubeEdgeCount called before MeshOpenMesh') + if (MeshUseMeshFile) then + ! should be the same as SUM(SIZE(GridVertex(i)%nbrs(j)%n),i=1:p_number_elements,j=1:nInnerElemEdge) + ! the total number of neighbors. + nedge = p_number_neighbor_edges + else + call endrun('Error in MeshCubeEdgeCount: Should not call for non-exodus mesh file.') + endif + + end function MeshCubeEdgeCount + + function MeshCubeElemCount() result(nelem) + + integer :: nelem + if (0 == p_number_blocks) call endrun('MeshCubeElemCount called before MeshOpenMesh') + if (MeshUseMeshFile) then + nelem = p_number_elements + else + call endrun('Error in MeshCubeElemCount: Should not call for non-exodus mesh file.') + end if + end function MeshCubeElemCount + + subroutine test_private_methods + implicit none + integer :: element_nodes(p_number_elements, 4) + call mesh_connectivity (element_nodes) + end subroutine test_private_methods + + +end module mesh_mod diff --git a/src/dynamics/se/dycore/metagraph_mod.F90 b/src/dynamics/se/dycore/metagraph_mod.F90 new file mode 100644 index 00000000..a2870ae8 --- /dev/null +++ b/src/dynamics/se/dycore/metagraph_mod.F90 @@ -0,0 +1,375 @@ +module metagraph_mod + use cam_logfile, only: iulog + use gridgraph_mod, only : gridvertex_t, gridedge_t, & + allocate_gridvertex_nbrs, assignment ( = ) + + implicit none + private + + type, public :: MetaEdge_t + type (GridEdge_t),pointer :: members(:) + integer ,pointer :: edgeptrP(:) + integer ,pointer :: edgeptrP_ghost(:) + integer ,pointer :: edgeptrS(:) + integer :: number + integer :: type + integer :: wgtP ! sum of lengths of all messages to pack for edges + integer :: wgtP_ghost ! sum of lengths of all messages to pack for ghost cells + integer :: wgtS + integer :: HeadVertex ! processor number to send to + integer :: TailVertex ! processor number to send from + integer :: nmembers ! number of messages to (un)pack (out)into this buffer + integer :: padding ! just to quite compiler + end type MetaEdge_t + + type, public :: MetaVertex_t ! one for each processor + integer :: number ! USELESS just the local processor number + integer :: nmembers ! number of elements on this processor + type (GridVertex_t),pointer :: members(:) ! array of elements on this processor + type (MetaEdge_t),pointer :: edges(:) ! description of messages to send/receive + integer :: nedges ! number of processors to communicate with (length of edges) + integer :: padding ! just to quite compiler + end type MetaVertex_t + + + public :: edge_uses_vertex + public :: PrintMetaEdge, PrintMetaVertex + public :: LocalElemCount + public :: initMetaGraph + + interface assignment ( = ) + module procedure copy_metaedge + end interface + +CONTAINS + + ! ===================================== + ! copy vertex: + ! copy device for overloading = sign. + ! ===================================== + + recursive subroutine copy_metaedge(edge2,edge1) + + type (MetaEdge_t), intent(out) :: edge2 + type (MetaEdge_t), intent(in) :: edge1 + + integer i + + edge2%number = edge1%number + edge2%type = edge1%type + edge2%wgtP = edge1%wgtP + edge2%wgtP_ghost = edge1%wgtP_ghost + edge2%nmembers = edge1%nmembers + + if (associated(edge1%members)) then + allocate(edge2%members(edge2%nmembers)) + do i=1,edge2%nmembers + edge2%members(i)=edge1%members(i) + end do + end if + + if (associated(edge1%edgeptrP)) then + allocate(edge2%edgeptrP(edge2%nmembers)) + allocate(edge2%edgeptrS(edge2%nmembers)) + allocate(edge2%edgeptrP_ghost(edge2%nmembers)) + do i=1,edge2%nmembers + edge2%edgeptrP(i)=edge1%edgeptrP(i) + edge2%edgeptrS(i)=edge1%edgeptrS(i) + edge2%edgeptrP_ghost(i)=edge1%edgeptrP_ghost(i) + end do + end if + + edge2%HeadVertex = edge1%HeadVertex + edge2%TailVertex = edge1%TailVertex + + end subroutine copy_metaedge + + function LocalElemCount(Vertex) result(nelemd) + + type (MetaVertex_t),intent(in) :: Vertex + integer :: nelemd + + nelemd = Vertex%nmembers + + end function LocalElemCount + + function edge_uses_vertex(Vertex,Edge) result(log) + + type(MetaVertex_t), intent(in) :: Vertex + type(MetaEdge_t), intent(in) :: Edge + logical :: log + integer :: number + + number = Vertex%number + if(number == Edge%HeadVertex .or. number == Edge%TailVertex) then + log = .TRUE. + else + log = .FALSE. + endif + + end function edge_uses_vertex + + subroutine PrintMetaEdge(Edge) + use gridgraph_mod, only : PrintGridEdge + implicit none + type (MetaEdge_t), intent(in) :: Edge(:) + integer :: i,nedge + + nedge = SIZE(Edge) + do i=1,nedge + print * + write(iulog,90) Edge(i)%number,Edge(i)%type,Edge(i)%wgtP,Edge(i)%nmembers, & + Edge(i)%TailVertex, Edge(i)%HeadVertex + if(associated(Edge(i)%members)) then + call PrintGridEdge(Edge(i)%members) + endif + enddo +90 format('METAEDGE #',I4,2x,'TYPE ',I1,2x,'WGT ',I4,2x,'NUM ',I6,2x,'Processors ',I4,' ---> ',I4) + + end subroutine PrintMetaEdge + + subroutine PrintMetaVertex(Vertex) + use gridgraph_mod, only : PrintGridVertex + implicit none + type (MetaVertex_t), intent(in),target :: Vertex + + integer :: j + + + write(iulog,*) + write(iulog,95) Vertex%nmembers + call PrintGridVertex(Vertex%members) + write(iulog,96) Vertex%nedges + if(associated(Vertex%edges)) then + do j=1,Vertex%nedges + write(iulog,97) Vertex%edges(j)%number, Vertex%edges(j)%type, & + Vertex%edges(j)%wgtP, Vertex%edges(j)%HeadVertex, & + Vertex%edges(j)%TailVertex + enddo + endif + +95 format(5x,I2,' Member Grid Vertices') +96 format(5x,I2,' Incident Meta Edges ') +97 format(10x,'METAEDGE #',I2,2x,'TYPE ',I1,2x,'WGT ',I4,2x,'Processors ',I2,' ---> ',I2) + + end subroutine PrintMetaVertex + + subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) + use ll_mod, only : root_t, LLSetEdgeCount, LLFree, LLInsertEdge, LLGetEdgeCount, LLFindEdge + use gridgraph_mod, only : GridEdge_type, printGridVertex + !------------------ + !------------------ + implicit none + + integer, intent(in) :: ThisProcessorNumber + type (MetaVertex_t), intent(out) :: MetaVertex + type (GridVertex_t), intent(in),target :: GridVertex(:) + type (GridEdge_t), intent(in),target :: GridEdge(:) + + !type (MetaEdge_t), allocatable :: MetaEdge(:) + integer :: nelem,nelem_edge, nedges + integer,allocatable :: icount(:) + integer :: ic,i,j,ii + integer :: npart + integer :: head_processor_number + integer :: tail_processor_number + integer :: nedge_active,enum + logical :: found + integer iTail, iHead, wgtP,wgtS + + type (root_t) :: mEdgeList ! root_t = C++ std::set > + + logical :: Verbose = .FALSE. + logical :: Debug = .FALSE. + + + if(Debug) write(iulog,*)'initMetagraph: point #1' + ! Number of grid vertices + nelem = SIZE(GridVertex) + ! Number of grid edges + nelem_edge = SIZE(GridEdge) + + mEdgeList%number = ThisProcessorNumber + NULLIFY(mEdgeList%first) + call LLSetEdgeCount(0) + + do i=1,nelem_edge + tail_processor_number = GridEdge(i)%tail%processor_number + head_processor_number = GridEdge(i)%head%processor_number + if(tail_processor_number .eq. ThisProcessorNumber .or. & + head_processor_number .eq. ThisProcessorNumber ) then + call LLInsertEdge(mEdgeList,tail_processor_number,head_processor_number,eNum) + endif + enddo + + call LLGetEdgeCount(nedges) + + NULLIFY(MetaVertex%edges) + + allocate(MetaVertex%edges(nedges)) + + ! Initalize the Meta Vertices to zero... probably should be done + ! in a separate routine + MetaVertex%nmembers=0 + MetaVertex%number=0 + MetaVertex%nedges=0 + if(Debug) write(iulog,*)'initMetagraph: point #2' + + + ! Give some identity to the Meta_vertex + MetaVertex%number = ThisProcessorNumber + if(Debug) write(iulog,*)'initMetagraph: point #3' + + ! Look through all the small_vertices and determine the number of + ! member vertices + if(Debug) call PrintGridVertex(GridVertex) + if(Debug) write(iulog,*)'initMetagraph: After call to PrintGridVertex point #3.1' + if(Debug) write(iulog,*)'initMetaGraph: ThisProcessorNumber is ',ThisProcessorNumber + + do j=1,nelem ! count number of elements on this processor + if(GridVertex(j)%processor_number .eq. ThisProcessorNumber) then + MetaVertex%nmembers = MetaVertex%nmembers + 1 + endif + enddo + + if(Debug) write(iulog,*)'initMetagraph: point #4 ' + ! Allocate space for the members of the MetaVertices + if(Debug) write(iulog,*)'initMetagraph: point #4.1 i,MetaVertex%nmembers',i,MetaVertex%nmembers + allocate(MetaVertex%members(MetaVertex%nmembers)) + + do j=1, MetaVertex%nmembers + call allocate_gridvertex_nbrs(MetaVertex%members(j)) + end do + + if(Debug) write(iulog,*)'initMetagraph: point #5' + + ! Set the identity of the members of the MetaVertices + ic=1 + do j=1,nelem + if( GridVertex(j)%processor_number .eq. ThisProcessorNumber) then + MetaVertex%members(ic) = GridVertex(j) + ic=ic+1 + endif + enddo + + nedges = SIZE(MetaVertex%edges) + if(Debug) write(iulog,*)'initMetagraph: point #6 nedges',nedges + ! Zero out all the edge numbers ... this should probably be + ! move to some initalization routine + MetaVertex%edges%number = 0 + MetaVertex%edges%nmembers = 0 + MetaVertex%edges%wgtP = 0 + MetaVertex%edges%wgtS = 0 + MetaVertex%edges%wgtP_ghost = 0 + do i=1,nedges + NULLIFY(MetaVertex%edges(i)%members) + enddo + + if(Debug) write(iulog,*)'initMetagraph: point #7' + + ! Insert all the grid edges into the Meta Edges + do i=1, nelem_edge + ! Which Meta Edge does this grid edge belong + head_processor_number = GridEdge(i)%head%processor_number + tail_processor_number = GridEdge(i)%tail%processor_number + call LLFindEdge(mEdgeList,tail_processor_number,head_processor_number,j,found) + if(found) then + + ! Increment the number of grid edges contained in the grid edge + ! and setup the pointers + if(Debug) write(iulog,*)'initMetagraph: point #8' + ii=GridEdge(i)%tail_face + + wgtP=Gridedge(i)%tail%nbrs_wgt(ii) + wgtS=1 + + MetaVertex%edges(j)%nmembers = MetaVertex%edges(j)%nmembers+1 + MetaVertex%edges(j)%wgtP = MetaVertex%edges(j)%wgtP + wgtP + MetaVertex%edges(j)%wgtS = MetaVertex%edges(j)%wgtS + wgtS + + MetaVertex%edges(j)%wgtP_ghost = MetaVertex%edges(j)%wgtP_ghost + Gridedge(i)%tail%nbrs_wgt_ghost(ii) + + if(Debug) write(iulog,*)'initMetagraph: point #9' + + ! If this the first grid edge to be inserted into the Meta Edge + ! do some more stuff + + if(MetaVertex%edges(j)%nmembers .eq. 1) then + + if(Debug) write(iulog,*)'initMetagraph: point #10' + MetaVertex%edges(j)%number = j ! its identity + MetaVertex%edges(j)%type = gridedge_type(GridEdge(i)) ! Type of grid edge + + if(Debug) write(iulog,*)'initMetagraph: point #11' + + ! Setup the pointer to the head and tail of the Vertex + MetaVertex%edges(j)%HeadVertex = head_processor_number + MetaVertex%edges(j)%TailVertex = tail_processor_number + if(Debug) write(iulog,*)'initMetagraph: point #12' + + ! Determine the number of edges for the Meta_Vertex + ! This is the number of processors to communicate with + MetaVertex%nedges = MetaVertex%nedges + 1 + if(Debug) write(iulog,*)'initMetagraph: point #13' + endif + endif + enddo + + do i=1,nedges + ! Allocate space for the member edges and edge index + allocate(MetaVertex%edges(i)%members (MetaVertex%edges(i)%nmembers)) + allocate(MetaVertex%edges(i)%edgeptrP(MetaVertex%edges(i)%nmembers)) + allocate(MetaVertex%edges(i)%edgeptrS(MetaVertex%edges(i)%nmembers)) + allocate(MetaVertex%edges(i)%edgeptrP_ghost(MetaVertex%edges(i)%nmembers)) + MetaVertex%edges(i)%edgeptrP(:)=0 + MetaVertex%edges(i)%edgeptrS(:)=0 + MetaVertex%edges(i)%edgeptrP_ghost(:)=0 + enddo + if(Debug) write(iulog,*)'initMetagraph: point #14' + + ! Insert the edges into the proper meta edges + allocate(icount(nelem_edge)) + icount=1 + do i=1,nelem_edge + head_processor_number = GridEdge(i)%head%processor_number + tail_processor_number = GridEdge(i)%tail%processor_number + call LLFindEdge(mEdgeList,tail_processor_number,head_processor_number,j,found) + if(found) then + MetaVertex%edges(j)%members(icount(j)) = GridEdge(i) + if(icount(j)+1 .le. MetaVertex%edges(j)%nmembers) then + + ii=GridEdge(i)%tail_face + + wgtP=Gridedge(i)%tail%nbrs_wgt(ii) + MetaVertex%edges(j)%edgeptrP(icount(j)+1) = MetaVertex%edges(j)%edgeptrP(icount(j)) + wgtP + + wgtS = 1 + MetaVertex%edges(j)%edgeptrS(icount(j)+1) = MetaVertex%edges(j)%edgeptrS(icount(j)) + wgtS + + wgtP=Gridedge(i)%tail%nbrs_wgt_ghost(ii) + MetaVertex%edges(j)%edgeptrP_ghost(icount(j)+1) = MetaVertex%edges(j)%edgeptrP_ghost(icount(j)) + wgtP + endif + if(Debug) write(iulog,*)'initMetagraph: point #15' + icount(j)=icount(j)+1 + endif + enddo + deallocate(icount) + if(Debug) write(iulog,*)'initMetagraph: point #16' + + if(Verbose) then + print * + write(iulog,*)"edge bundle list:(INITMETAGRAPH)" + call PrintMetaEdge( MetaVertex%edges) + write(iulog,*)'initmetagrap: Before last call to PrintMetaVertex' + call PrintMetaVertex(MetaVertex) + endif + + call LLFree(mEdgeList) + +90 format('EDGE #',I2,2x,'TYPE ',I1,2x,'Processor Numbers ',I2,' ---> ',I2) +100 format(10x,I2,1x,'(',I1,') ---> ',I2,1x,'(',I1,')') + + end subroutine initMetaGraph + + +end module metagraph_mod diff --git a/src/dynamics/se/dycore/namelist_mod.F90 b/src/dynamics/se/dycore/namelist_mod.F90 new file mode 100644 index 00000000..8db0b627 --- /dev/null +++ b/src/dynamics/se/dycore/namelist_mod.F90 @@ -0,0 +1,166 @@ +module namelist_mod + !----------------- + use cam_logfile, only: iulog + !----------------- + use params_mod, only: recursive, sfcurve + !----------------- + use shr_string_mod, only: shr_string_toUpper + use shr_kind_mod, only: r8=>shr_kind_r8 + !----------------- + use control_mod, only: & + partmethod, & ! Mesh partitioning method (METIS) + multilevel, & + numnodes, & + tasknum, & ! used dg model in AIX machine + remapfreq, & ! number of steps per remapping call + statefreq, & ! number of steps per printstate call + runtype, & + cubed_sphere_map, & + limiter_option, & + nu_top, & + hypervis_scaling, & ! use tensor HV instead of scalar coefficient + hypervis_power, & + columnpackage + + !----------------- + use thread_mod, only : omp_get_max_threads, max_num_threads, horz_num_threads, vert_num_threads, tracer_num_threads + !----------------- + use dimensions_mod, only : ne, np, npdg, nnodes, nmpi_per_node, npart, qsize, qsize_d, set_mesh_dimensions + !----------------- + !----------------- + use cam_abortutils, only: endrun + use parallel_mod, only: parallel_t, partitionfornodes, useframes + !----------------- + + + use interpolate_mod, only : set_interp_parameter, get_interp_parameter + +!=============================================================================! + implicit none + private +! +! This module should contain no global data and should only be 'use'd to +! call one of the public interfaces below +! + public :: homme_set_defaults + public :: homme_postprocess_namelist + + contains + + ! ============================================ + ! homme_set_defaults: + ! + ! Set default values for namelist variables + ! + ! ============================================ + subroutine homme_set_defaults() + npart = 1 + useframes = 0 + multilevel = 1 + numnodes = -1 + runtype = 0 + statefreq = 1 + remapfreq = 240 + tasknum =-1 + columnpackage = "none" + nu_top = 0 + ne = 0 + + end subroutine homme_set_defaults + + subroutine homme_postprocess_namelist(mesh_file, par) + use mesh_mod, only: MeshOpen + use dimensions_mod, only: ntrac + ! Dummy arguments + character(len=*), intent(in) :: mesh_file + type (parallel_t), intent(in) :: par + + ! Local variable + real(kind=r8) :: dt_max + character(len=*), parameter :: subname = 'HOMME_POSTPROCESS_NAMELIST: ' + + if(par%masterproc) then + write(iulog, *) subname, 'omp_get_max_threads() = ', max_num_threads + end if + + if((vert_num_threads > 1) .and. (limiter_option .ne. 8)) then + if(par%masterproc) then + write(iulog, *) subname, 'WARNING: vertical threading on supported for limiter_option != 8 ' + end if + vert_num_threads = 1 + endif + + if (ne /= 0) then + if (mesh_file /= "none" .and. mesh_file /= "/dev/null") then + if (par%masterproc) then + write(iulog, *) subname, "mesh_file:", trim(mesh_file), & + " and ne:",ne," are both sepcified in the input file." + write(iulog,*) " Specify one or the other, but not both." + end if + call endrun(subname//"Do not specify ne if using a mesh file input.") + end if + end if + if (par%masterproc) then + write(iulog,*) subname, "Mesh File:", trim(mesh_file) + end if + if (ne == 0) then + if (par%masterproc) then + write (iulog,*) subname, "Opening Mesh File:", trim(mesh_file) + end if + call set_mesh_dimensions() + call MeshOpen(mesh_file, par) + end if + + ! set map + if (cubed_sphere_map < 0) then + if (ne == 0) then + cubed_sphere_map = 2 ! element_local for var-res grids + else + cubed_sphere_map = 0 ! default is equi-angle gnomonic + end if + end if + + if ((cubed_sphere_map /= 0) .AND. ntrac>0) then + if (par%masterproc) then + write(iulog, *) subname, 'fvm transport and require equi-angle gnomonic cube sphere mapping.' + write(iulog, *) ' Set cubed_sphere_map = 0 or comment it out all together. ' + end if + call endrun(subname//"ERROR: fvm transport and cubed_sphere_map>0") + end if + if (par%masterproc) then + write (iulog,*) subname, "Reference element projection: cubed_sphere_map=",cubed_sphere_map + end if + + !logic around different hyperviscosity options + if (hypervis_power /= 0) then + if (hypervis_scaling /= 0) then + if (par%masterproc) then + write(iulog, *) subname, 'Both hypervis_power and hypervis_scaling are nonzero.' + write(iulog, *) ' (1) Set hypervis_power=1, hypervis_scaling=0 for HV based on an element area.' + write(iulog, *) ' (2) Set hypervis_power=0 and hypervis_scaling=1 for HV based on a tensor.' + write(iulog, *) ' (3) Set hypervis_power=0 and hypervis_scaling=0 for constant HV.' + end if + call endrun(subname//"ERROR: hypervis_power>0 and hypervis_scaling>0") + end if + end if + + if (multilevel <= 0) then + nmpi_per_node = 1 + end if + + nnodes = npart / nmpi_per_node + + if((numnodes > 0) .and. (multilevel == 1)) then + nnodes = numnodes + nmpi_per_node = npart/nnodes + end if + + ! ==================================================================== + ! Do not perform node level partitioning if you are only on one node + ! ==================================================================== + if((nnodes .eq. 1) .and. PartitionForNodes) then + PartitionForNodes = .FALSE. + end if + + end subroutine homme_postprocess_namelist +end module namelist_mod diff --git a/src/dynamics/se/dycore/parallel_mod.F90 b/src/dynamics/se/dycore/parallel_mod.F90 new file mode 100644 index 00000000..f7dc0fa7 --- /dev/null +++ b/src/dynamics/se/dycore/parallel_mod.F90 @@ -0,0 +1,246 @@ +module parallel_mod + ! --------------------------- + use shr_kind_mod, only: r8=>shr_kind_r8 + ! --------------------------- + use dimensions_mod, only : nmpi_per_node, nlev, qsize_d, ntrac_d + ! --------------------------- + use spmd_utils, only: MPI_STATUS_SIZE, MPI_MAX_ERROR_STRING, MPI_TAG_UB + + implicit none + private + + integer, public, parameter :: ORDERED = 1 + integer, public, parameter :: FAST = 2 + integer, public, parameter :: BNDRY_TAG_BASE = 0 + integer, public, parameter :: THREAD_TAG_BITS = 9 + integer, public, parameter :: MAX_ACTIVE_MSG = (MPI_TAG_UB/2**THREAD_TAG_BITS) - 1 + integer, public, parameter :: HME_status_size = MPI_STATUS_SIZE + + integer, public, parameter :: HME_BNDRY_P2P = 1 + integer, public, parameter :: HME_BNDRY_MASHM = 2 + integer, public, parameter :: HME_BNDRY_A2A = 3 + integer, public, parameter :: HME_BNDRY_A2AO = 4 + + integer, public, parameter :: nrepro_vars = MAX(10, nlev*qsize_d, nlev*ntrac_d) + + integer, public :: MaxNumberFrames + integer, public :: numframes + integer, public :: useframes + logical, public :: PartitionForNodes + logical, public :: PartitionForFrames + + ! Namelist-selectable type of boundary comms (AUTO,P2P,A2A,MASHM) + integer, public :: boundaryCommMethod + + integer, public, allocatable :: status(:,:) + integer, public, allocatable :: Rrequest(:) + integer, public, allocatable :: Srequest(:) + + real(r8), public, allocatable :: FrameWeight(:) + integer, public, allocatable :: FrameIndex(:) + integer, public, allocatable :: FrameCount(:) + integer, public :: nComPoints + integer, public :: nPackPoints + + real(r8), public, allocatable :: global_shared_buf(:,:) + real(r8), public :: global_shared_sum(nrepro_vars) + + ! ================================================== + ! Define type parallel_t for distributed memory info + ! ================================================== + type, public :: parallel_t + integer :: rank ! local rank + integer :: root ! local root + integer :: nprocs ! number of processes in group + integer :: comm ! communicator + integer :: intracomm ! Intra-node communicator + integer :: commGraphFull ! distributed graph topo communicator for all neighbors + integer :: commGraphInter ! distributed graph topo communicator for off-node neighbors + integer :: commGraphIntra ! distributed graph topo communicator for on-node neighbors + integer :: groupGraphFull + logical :: masterproc + end type + + type (parallel_t), public :: par ! info for distributed memory programming + + ! =================================================== + ! Module Interfaces + ! =================================================== + + public :: initmpi + public :: syncmp + public :: copy_par + + interface assignment ( = ) + module procedure copy_par + end interface + +CONTAINS + +! ================================================ +! copy_par: copy constructor for parallel_t type +! +! +! Overload assignment operator for parallel_t +! ================================================ + + subroutine copy_par(par2,par1) + type(parallel_t), intent(out) :: par2 + type(parallel_t), intent(in) :: par1 + + par2%rank = par1%rank + par2%root = par1%root + par2%nprocs = par1%nprocs + par2%comm = par1%comm + par2%intracomm = par1%intracomm + par2%commGraphFull = par1%commGraphFull + par2%commGraphInter = par1%commGraphInter + par2%commGraphIntra = par1%commGraphIntra + par2%groupGraphFull = par1%groupGraphFull + par2%masterproc = par1%masterproc + + end subroutine copy_par + +! ================================================ +! initmpi: +! Initializes the parallel (message passing) +! environment, returns a parallel_t structure.. +! ================================================ + + function initmpi(npes_homme) result(par) + use cam_logfile, only: iulog + use cam_abortutils, only: endrun + use spmd_utils, only: mpicom, MPI_COMM_NULL, MPI_MAX_PROCESSOR_NAME + use spmd_utils, only: MPI_CHARACTER, MPI_INTEGER, MPI_BAND, iam, npes + + integer, intent(in) :: npes_homme + + type(parallel_t) :: par + + integer :: ierr,tmp + integer :: FrameNumber + logical :: running ! state of MPI at beginning of initmpi call + character(len=MPI_MAX_PROCESSOR_NAME) :: my_name + character(len=MPI_MAX_PROCESSOR_NAME), allocatable :: the_names(:) + + integer, allocatable :: tarray(:) + integer :: namelen, i + integer :: color + + !================================================ + ! Basic MPI initialization + ! ================================================ + + call MPI_initialized(running, ierr) + + if (.not.running) then + call endrun('initmpi: MPI not initialized for SE dycore') + end if + + par%root = 0 + par%masterproc = .FALSE. + nmpi_per_node = 2 + PartitionForNodes = .TRUE. + + ! The SE dycore needs to split from CAM communicator for npes > par%nprocs + color = iam / npes_homme + call mpi_comm_split(mpicom, color, iam, par%comm, ierr) + if (iam < npes_homme) then + call MPI_comm_size(par%comm, par%nprocs, ierr) + call MPI_comm_rank(par%comm, par%rank, ierr) + if ( par%nprocs /= npes_homme) then + call endrun('INITMPI: SE communicator count mismatch') + end if + + if(par%rank == par%root) then + par%masterproc = .TRUE. + end if + else + par%rank = 0 + par%nprocs = 0 + par%comm = MPI_COMM_NULL + end if + + if (par%masterproc) then + write(iulog, '(a,i0)')'initmpi: Number of MPI processes: ', par%nprocs + end if + + if (iam < npes_homme) then + ! ================================================ + ! Determine where this MPI process is running + ! then use this information to determined the + ! number of MPI processes per node + ! ================================================ + my_name(:) = '' + call MPI_Get_Processor_Name(my_name, namelen, ierr) + + allocate(the_names(par%nprocs)) + do i = 1, par%nprocs + the_names(i)(:) = '' + end do + + ! ================================================ + ! Collect all the machine names + ! ================================================ + call MPI_Allgather(my_name, MPI_MAX_PROCESSOR_NAME, MPI_CHARACTER, & + the_names,MPI_MAX_PROCESSOR_NAME,MPI_CHARACTER,par%comm,ierr) + + ! ====================================================================== + ! Calculate how many other MPI processes are on my node + ! ====================================================================== + nmpi_per_node = 0 + do i = 1, par%nprocs + if(TRIM(ADJUSTL(my_name)) .eq. TRIM(ADJUSTL(the_names(i)))) then + nmpi_per_node = nmpi_per_node + 1 + end if + end do + + ! ======================================================================= + ! Verify that everybody agrees on this number otherwise do not do + ! the multi-level partitioning + ! ======================================================================= + call MPI_Allreduce(nmpi_per_node,tmp,1,MPI_INTEGER,MPI_BAND,par%comm,ierr) + if(tmp /= nmpi_per_node) then + if (par%masterproc) then + write(iulog,*)'initmpi: disagrement accross nodes for nmpi_per_node' + end if + nmpi_per_node = 1 + PartitionForNodes = .FALSE. + else + PartitionForNodes = .TRUE. + end if + + if(PartitionForFrames .and. par%masterproc) then + write(iulog,*)'initmpi: FrameWeight: ', FrameWeight + end if + + deallocate(the_names) + end if + + end function initmpi + + ! ===================================== + ! syncmp: + ! + ! sychronize message passing domains + ! + ! ===================================== + subroutine syncmp(par) + use cam_abortutils, only: endrun + use spmd_utils, only: MPI_MAX_ERROR_STRING, MPI_ERROR + + type (parallel_t), intent(in) :: par + + integer :: errorcode, errorlen, ierr + character(len=MPI_MAX_ERROR_STRING) :: errorstring + + call MPI_barrier(par%comm, ierr) + + if(ierr == MPI_ERROR) then + errorcode = ierr + call MPI_Error_String(errorcode, errorstring, errorlen, ierr) + call endrun(errorstring) + end if + end subroutine syncmp + +end module parallel_mod diff --git a/src/dynamics/se/dycore/params_mod.F90 b/src/dynamics/se/dycore/params_mod.F90 new file mode 100644 index 00000000..cc7b5ba5 --- /dev/null +++ b/src/dynamics/se/dycore/params_mod.F90 @@ -0,0 +1,11 @@ +module params_mod + integer, public, parameter :: INTERNAL_EDGE = 0 + integer, public, parameter :: EXTERNAL_EDGE = 1 + + integer, public, parameter :: RECURSIVE = 0, & ! Type of partitioning methods + KWAY = 1, & + VOLUME = 2, & + WRECURSIVE = 3, & + SFCURVE = 4 + +end module params_mod diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 new file mode 100644 index 00000000..ca9c1253 --- /dev/null +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -0,0 +1,2259 @@ +module prim_advance_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use edgetype_mod, only: EdgeBuffer_t + use perf_mod, only: t_startf, t_stopf, t_adj_detailf !, t_barrierf _EXTERNAL + use cam_abortutils, only: endrun + use parallel_mod, only: parallel_t, HME_BNDRY_P2P!,HME_BNDRY_A2A + use thread_mod , only: horz_num_threads, vert_num_threads, omp_set_nested + + implicit none + private + save + + public :: prim_advance_exp, prim_advance_init, applyCAMforcing, calc_tot_energy_dynamics, compute_omega + + type (EdgeBuffer_t) :: edge3,edgeOmega,edgeSponge + real (kind=r8), allocatable :: ur_weights(:) + +contains + + subroutine prim_advance_init(par, elem) + use edge_mod, only: initEdgeBuffer + use element_mod, only: element_t + use dimensions_mod, only: nlev,ksponge_end + use control_mod, only: qsplit + + type (parallel_t) :: par + type (element_t), target, intent(inout) :: elem(:) + integer :: i + + call initEdgeBuffer(par,edge3 ,elem,4*nlev ,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) + call initEdgeBuffer(par,edgeSponge,elem,4*ksponge_end,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) + call initEdgeBuffer(par,edgeOmega ,elem,nlev ,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) + + if(.not. allocated(ur_weights)) allocate(ur_weights(qsplit)) + ur_weights(:)=0.0_r8 + + if(mod(qsplit,2).NE.0)then + ur_weights(1)=1.0_r8/qsplit + do i=3,qsplit,2 + ur_weights(i)=2.0_r8/qsplit + enddo + else + do i=2,qsplit,2 + ur_weights(i)=2.0_r8/qsplit + enddo + endif + end subroutine prim_advance_init + + subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, nete) + use control_mod, only: tstep_type, qsplit + use derivative_mod, only: derivative_t + use dimensions_mod, only: np, nlev + use element_mod, only: element_t + use hybvcoord_mod, only: hvcoord_t + use hybrid_mod, only: hybrid_t + use time_mod, only: TimeLevel_t, timelevel_qdp, tevolve + use dimensions_mod, only: lcp_moist + use fvm_control_volume_mod, only: fvm_struct + use control_mod, only: raytau0 + use physconst, only: get_cp, thermodynamic_active_species_num + use physconst, only: get_kappa_dry, dry_air_species_num + use physconst, only: thermodynamic_active_species_idx_dycore + use physconst, only: cpair, rair + implicit none + + type (element_t), intent(inout), target :: elem(:) + type(fvm_struct) , intent(in) :: fvm(:) + type (derivative_t) , intent(in) :: deriv + type (hvcoord_t) :: hvcoord + type (hybrid_t) , intent(in) :: hybrid + real (kind=r8), intent(in) :: dt + type (TimeLevel_t) , intent(in) :: tl + integer , intent(in) :: nets + integer , intent(in) :: nete + + ! Local + real (kind=r8) :: dt_vis, eta_ave_w + real (kind=r8) :: dp(np,np) + integer :: ie,nm1,n0,np1,k,qn0,m_cnst, nq + real (kind=r8) :: inv_cp_full(np,np,nlev,nets:nete) + real (kind=r8) :: qwater(np,np,nlev,thermodynamic_active_species_num,nets:nete) + integer :: qidx(thermodynamic_active_species_num) + real (kind=r8) :: kappa(np,np,nlev,nets:nete) + call t_startf('prim_advance_exp') + nm1 = tl%nm1 + n0 = tl%n0 + np1 = tl%np1 + + call TimeLevel_Qdp(tl, qsplit, qn0) ! compute current Qdp() timelevel + ! + ! tstep_type=1 RK2-SSP 3 stage (as used by tracers) CFL=.58 + ! optimal in terms of SSP CFL, but not CFLSSP=2 + ! optimal in terms of CFL + ! typically requires qsplit=3 + ! but if windspeed > 340m/s, could use this + ! with qsplit=1 + ! tstep_type=2 classic RK3 CFL=1.73 (sqrt(3)) + ! + ! tstep_type=3 Kinnmark&Gray RK4 4 stage CFL=sqrt(8)=2.8 + ! should we replace by standard RK4 (CFL=sqrt(8))? + ! (K&G 1st order method has CFL=3) + ! tstep_type=4 Kinnmark&Gray RK3 5 stage 3rd order CFL=3.87 (sqrt(15)) + ! From Paul Ullrich. 3rd order for nonlinear terms also + ! K&G method is only 3rd order for linear + ! optimal: for windspeeds ~120m/s,gravity: 340m/2 + ! run with qsplit=1 + ! (K&G 2nd order method has CFL=4. tiny CFL improvement not worth 2nd order) + ! + + if (dry_air_species_num > 0) & + call endrun('ERROR: SE dycore not ready for species dependent thermodynamics - ABORT') + + call omp_set_nested(.true.) + + ! default weights for computing mean dynamics fluxes + eta_ave_w = 1_r8/qsplit + + ! ================================== + ! Take timestep + ! ================================== + do nq=1,thermodynamic_active_species_num + qidx(nq) = nq + end do + do ie=nets,nete + do nq=1,thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx_dycore(nq) + ! + ! make sure Q is updated + ! + qwater(:,:,:,nq,ie) = elem(ie)%state%Qdp(:,:,:,m_cnst,qn0)/elem(ie)%state%dp3d(:,:,:,n0) + end do + end do + ! + ! compute Cp and kappa=Rdry/cpdry here and not in RK-stages since Q stays constant => Cp and kappa also stays constant + ! + if (lcp_moist) then + do ie=nets,nete + call get_cp(1,np,1,np,1,nlev,thermodynamic_active_species_num,qwater(:,:,:,:,ie),& + .true.,inv_cp_full(:,:,:,ie),active_species_idx_dycore=qidx) + end do + else + do ie=nets,nete + inv_cp_full(:,:,:,ie) = 1.0_r8/cpair + end do + end if + do ie=nets,nete + call get_kappa_dry(1,np,1,np,1,nlev,nlev,thermodynamic_active_species_num,qwater(:,:,:,:,ie),qidx,kappa(:,:,:,ie)) + end do + + + dt_vis = dt + + if (raytau0>0) call rayleigh_friction(elem,n0,nets,nete,dt) + if (tstep_type==1) then + ! RK2-SSP 3 stage. matches tracer scheme. optimal SSP CFL, but + ! not optimal for regular CFL + ! u1 = u0 + dt/2 RHS(u0) + call compute_and_apply_rhs(np1,n0,n0,dt/2,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w/3,inv_cp_full,qwater,qidx,kappa) + ! u2 = u1 + dt/2 RHS(u1) + call compute_and_apply_rhs(np1,np1,np1,dt/2,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w/3,inv_cp_full,qwater,qidx,kappa) + ! u3 = u2 + dt/2 RHS(u2) + call compute_and_apply_rhs(np1,np1,np1,dt/2,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w/3,inv_cp_full,qwater,qidx,kappa) + + ! unew = u/3 +2*u3/3 = u + 1/3 (RHS(u) + RHS(u1) + RHS(u2)) + do ie=nets,nete + elem(ie)%state%v(:,:,:,:,np1)= elem(ie)%state%v(:,:,:,:,n0)/3 & + + 2*elem(ie)%state%v(:,:,:,:,np1)/3 + elem(ie)%state%T(:,:,:,np1)= elem(ie)%state%T(:,:,:,n0)/3 & + + 2*elem(ie)%state%T(:,:,:,np1)/3 + elem(ie)%state%dp3d(:,:,:,np1)= elem(ie)%state%dp3d(:,:,:,n0)/3 & + + 2*elem(ie)%state%dp3d(:,:,:,np1)/3 + enddo + else if (tstep_type==2) then + ! classic RK3 CFL=sqrt(3) + ! u1 = u0 + dt/3 RHS(u0) + call compute_and_apply_rhs(np1,n0,n0,dt/3,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! u2 = u0 + dt/2 RHS(u1) + call compute_and_apply_rhs(np1,n0,np1,dt/2,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! u3 = u0 + dt RHS(u2) + call compute_and_apply_rhs(np1,n0,np1,dt,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w,inv_cp_full,qwater,qidx,kappa) + else if (tstep_type==3) then + ! KG 4th order 4 stage: CFL=sqrt(8) + ! low storage version of classic RK4 + ! u1 = u0 + dt/4 RHS(u0) + call compute_and_apply_rhs(np1,n0,n0,dt/4,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! u2 = u0 + dt/3 RHS(u1) + call compute_and_apply_rhs(np1,n0,np1,dt/3,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! u3 = u0 + dt/2 RHS(u2) + call compute_and_apply_rhs(np1,n0,np1,dt/2,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! u4 = u0 + dt RHS(u3) + call compute_and_apply_rhs(np1,n0,np1,dt,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w,inv_cp_full,qwater,qidx,kappa) + else if (tstep_type==4) then + ! + ! Ullrich 3nd order 5 stage: CFL=sqrt( 4^2 -1) = 3.87 + ! u1 = u0 + dt/5 RHS(u0) (save u1 in timelevel nm1) + ! rhs: t=t + call compute_and_apply_rhs(nm1,n0,n0,dt/5,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w/4,inv_cp_full,qwater,qidx,kappa) + ! + ! u2 = u0 + dt/5 RHS(u1); rhs: t=t+dt/5 + ! + call compute_and_apply_rhs(np1,n0,nm1,dt/5,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! + ! u3 = u0 + dt/3 RHS(u2); rhs: t=t+2*dt/5 + ! + call compute_and_apply_rhs(np1,n0,np1,dt/3,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! + ! u4 = u0 + 2dt/3 RHS(u3); rhs: t=t+2*dt/5+dt/3 + ! + call compute_and_apply_rhs(np1,n0,np1,2*dt/3,elem,hvcoord,hybrid,& + deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + ! compute (5*u1/4 - u0/4) in timelevel nm1: + do ie=nets,nete + elem(ie)%state%v(:,:,:,:,nm1)= (5*elem(ie)%state%v(:,:,:,:,nm1) & + - elem(ie)%state%v(:,:,:,:,n0) ) /4 + elem(ie)%state%T(:,:,:,nm1)= (5*elem(ie)%state%T(:,:,:,nm1) & + - elem(ie)%state%T(:,:,:,n0) )/4 + elem(ie)%state%dp3d(:,:,:,nm1)= (5*elem(ie)%state%dp3d(:,:,:,nm1) & + - elem(ie)%state%dp3d(:,:,:,n0) )/4 + enddo + ! u5 = (5*u1/4 - u0/4) + 3dt/4 RHS(u4) + ! + ! phl: rhs: t=t+2*dt/5+dt/3+3*dt/4 -wrong RK times ... + ! + call compute_and_apply_rhs(np1,nm1,np1,3*dt/4,elem,hvcoord,hybrid,& + deriv,nets,nete,3*eta_ave_w/4,inv_cp_full,qwater,qidx,kappa) + ! final method is the same as: + ! u5 = u0 + dt/4 RHS(u0)) + 3dt/4 RHS(u4) + else + call endrun('ERROR: bad choice of tstep_type') + endif + + ! ============================================== + ! Time-split Horizontal diffusion: nu.del^2 or nu.del^4 + ! U(*) = U(t+1) + dt2 * HYPER_DIFF_TERM(t+1) + ! ============================================== + + call t_startf('advance_hypervis') + + ! note:time step computes u(t+1)= u(t*) + RHS. + ! for consistency, dt_vis = t-1 - t*, so this is timestep method dependent + + ! forward-in-time, hypervis applied to dp3d + call advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,np1,qn0,nets,nete,dt_vis,eta_ave_w,& + inv_cp_full,hvcoord) + + call t_stopf('advance_hypervis') + ! + ! update psdry + ! + do ie=nets,nete + elem(ie)%state%psdry(:,:) = hvcoord%hyai(1)*hvcoord%ps0 + do k=1,nlev + elem(ie)%state%psdry(:,:) = elem(ie)%state%psdry(:,:)+elem(ie)%state%dp3d(:,:,k,np1) + end do + end do + tevolve=tevolve+dt + + call omp_set_nested(.false.) + + call t_stopf('prim_advance_exp') + end subroutine prim_advance_exp + + + subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsubstep) + use dimensions_mod, only: np, nc, nlev, qsize, ntrac + use element_mod, only: element_t + use control_mod, only: ftype, ftype_conserve + use fvm_control_volume_mod, only: fvm_struct + use physconst, only: get_dp, thermodynamic_active_species_idx_dycore + type (element_t) , intent(inout) :: elem(:) + type(fvm_struct) , intent(inout) :: fvm(:) + real (kind=r8), intent(in) :: dt_dribble, dt_phys + integer, intent(in) :: np1,nets,nete,np1_qdp,nsubstep + + ! local + integer :: i,j,k,ie,q + real (kind=r8) :: v1,dt_local, dt_local_tracer,tmp + real (kind=r8) :: dt_local_tracer_fvm + real (kind=r8) :: ftmp(np,np,nlev,qsize,nets:nete) !diagnostics + real (kind=r8) :: pdel(np,np,nlev) + real (kind=r8), allocatable :: ftmp_fvm(:,:,:,:,:) !diagnostics + + + if (ntrac>0) allocate(ftmp_fvm(nc,nc,nlev,ntrac,nets:nete)) + + if (ftype==0) then + ! + ! "Dribble" tendencies: divide total adjustment with nsplit and + ! add adjustments to state after each + ! vertical remap + ! + dt_local = dt_dribble + dt_local_tracer = dt_dribble + dt_local_tracer_fvm = dt_dribble + else if (ftype==1) then + ! + ! CAM-FV-stype forcing, i.e. equivalent to updating state once in the + ! beginning of dynamics + ! + dt_local = dt_phys + dt_local_tracer = dt_phys + dt_local_tracer_fvm = dt_phys + if (nsubstep.ne.1) then + ! + ! do nothing + ! + dt_local = 0.0_r8 + dt_local_tracer = 0.0_r8 + dt_local_tracer_fvm = 0.0_r8 + end if + else if (ftype==2) then + ! + ! do state-update for tracers and "dribbling" forcing for u,v,T + ! + dt_local = dt_dribble + if (ntrac>0) then + dt_local_tracer = dt_dribble + dt_local_tracer_fvm = dt_phys + if (nsubstep.ne.1) then + dt_local_tracer_fvm = 0.0_r8 + end if + else + dt_local_tracer = dt_phys + dt_local_tracer_fvm = dt_phys + if (nsubstep.ne.1) then + dt_local_tracer = 0.0_r8 + dt_local_tracer_fvm = 0.0_r8 + end if + end if + end if + + do ie=nets,nete + ! + ! tracers + ! + if (qsize>0.and.dt_local_tracer>0) then +#if (defined COLUMN_OPENMP) + !$omp parallel do num_threads(tracer_num_threads) private(q,k,i,j,v1) +#endif + do q=1,qsize + do k=1,nlev + do j=1,np + do i=1,np + ! + ! FQ holds q-tendency: (qnew-qold)/dt_physics + ! + v1 = dt_local_tracer*elem(ie)%derived%FQ(i,j,k,q) + if (elem(ie)%state%Qdp(i,j,k,q,np1_qdp) + v1 < 0 .and. v1<0) then + if (elem(ie)%state%Qdp(i,j,k,q,np1_qdp) < 0 ) then + v1=0 ! Q already negative, dont make it more so + else + v1 = -elem(ie)%state%Qdp(i,j,k,q,np1_qdp) + endif + endif + elem(ie)%state%Qdp(i,j,k,q,np1_qdp) = elem(ie)%state%Qdp(i,j,k,q,np1_qdp)+v1 + ftmp(i,j,k,q,ie) = dt_local_tracer*& + elem(ie)%derived%FQ(i,j,k,q)-v1 !Only used for diagnostics! + enddo + enddo + enddo + enddo + else + ftmp(:,:,:,:,ie) = 0.0_r8 + end if + if (ntrac>0.and.dt_local_tracer_fvm>0) then + ! + ! Repeat for the fvm tracers: fc holds tendency (fc_new-fc_old)/dt_physics + ! + do q = 1, ntrac + do k = 1, nlev + do j = 1, nc + do i = 1, nc + tmp = dt_local_tracer_fvm*fvm(ie)%fc(i,j,k,q)/fvm(ie)%dp_fvm(i,j,k) + v1 = tmp + if (fvm(ie)%c(i,j,k,q) + v1 < 0 .and. v1<0) then + if (fvm(ie)%c(i,j,k,q) < 0 ) then + v1 = 0 ! C already negative, dont make it more so + else + v1 = -fvm(ie)%c(i,j,k,q) + end if + end if + fvm(ie)%c(i,j,k,q) = fvm(ie)%c(i,j,k,q)+ v1 + ftmp_fvm(i,j,k,q,ie) = tmp-v1 !Only used for diagnostics! + end do + end do + end do + end do + else + if (ntrac>0) ftmp_fvm(:,:,:,:,ie) = 0.0_r8 + end if + + + if (ftype_conserve==1) then + call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,np1_qdp),2, & + thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,np1),pdel) + do k=1,nlev + do j=1,np + do i = 1,np + pdel(i,j,k)=elem(ie)%derived%FDP(i,j,k)/pdel(i,j,k) + + elem(ie)%state%T(i,j,k,np1) = elem(ie)%state%T(i,j,k,np1) + & + dt_local*elem(ie)%derived%FT(i,j,k)*pdel(i,j,k) + ! + ! momentum conserving: dp*u + ! + elem(ie)%state%v(i,j,1,k,np1) = elem(ie)%state%v(i,j,1,k,np1) + & + dt_local*elem(ie)%derived%FM(i,j,1,k)*pdel(i,j,k)!elem(ie)%state%dp3d(i,j,k,np1) + elem(ie)%state%v(i,j,2,k,np1) = elem(ie)%state%v(i,j,2,k,np1) + & + dt_local*elem(ie)%derived%FM(i,j,2,k)*pdel(i,j,k)!/elem(ie)%state%dp3d(i,j,k,np1) + end do + end do + end do + else + elem(ie)%state%T(:,:,:,np1) = elem(ie)%state%T(:,:,:,np1) + & + dt_local*elem(ie)%derived%FT(:,:,:) + elem(ie)%state%v(:,:,:,:,np1) = elem(ie)%state%v(:,:,:,:,np1) + & + dt_local*elem(ie)%derived%FM(:,:,:,:) + end if + end do + if (ntrac>0) then + call output_qdp_var_dynamics(ftmp_fvm(:,:,:,:,:),nc,ntrac,nets,nete,'PDC') + else + call output_qdp_var_dynamics(ftmp(:,:,:,:,:),np,qsize,nets,nete,'PDC') + end if + if (ftype==1.and.nsubstep==1) call calc_tot_energy_dynamics(elem,fvm,nets,nete,np1,np1_qdp,'p2d') + if (ntrac>0) deallocate(ftmp_fvm) + end subroutine applyCAMforcing + + + subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2,eta_ave_w,inv_cp_full,hvcoord) + ! + ! take one timestep of: + ! u(:,:,:,np) = u(:,:,:,np) + dt2*nu*laplacian**order ( u ) + ! T(:,:,:,np) = T(:,:,:,np) + dt2*nu_s*laplacian**order ( T ) + ! + ! + ! For correct scaling, dt2 should be the same 'dt2' used in the leapfrog advace + ! + ! + use physconst, only: gravit, cappa, cpair, tref, lapse_rate, get_dp_ref + use dimensions_mod, only: np, nlev, nc, ntrac, npsq, qsize + use dimensions_mod, only: hypervis_dynamic_ref_state,ksponge_end + use dimensions_mod, only: nu_scale_top,nu_lev,kmvis_ref,kmcnd_ref,rho_ref,km_sponge_factor + use dimensions_mod, only: kmvisi_ref,kmcndi_ref,rhoi_ref + use control_mod, only: nu, nu_s, hypervis_subcycle,hypervis_subcycle_sponge, nu_p, nu_top + use control_mod, only: molecular_diff + use hybrid_mod, only: hybrid_t!, get_loop_ranges + use element_mod, only: element_t + use derivative_mod, only: derivative_t, laplace_sphere_wk, vlaplace_sphere_wk, vlaplace_sphere_wk_mol + use derivative_mod, only: subcell_Laplace_fluxes, subcell_dss_fluxes + use edge_mod, only: edgevpack, edgevunpack, edgeDGVunpack + use edgetype_mod, only: EdgeBuffer_t, EdgeDescriptor_t + use bndry_mod, only: bndry_exchange + use viscosity_mod, only: biharmonic_wk_dp3d + use hybvcoord_mod, only: hvcoord_t + use fvm_control_volume_mod, only: fvm_struct + use physconst, only: thermodynamic_active_species_idx_dycore + use physconst, only: get_molecular_diff_coef,get_rho_dry + use cam_history, only: outfld, hist_fld_active + + type (hybrid_t) , intent(in) :: hybrid + type (element_t) , intent(inout), target :: elem(:) + type(fvm_struct) , intent(in) :: fvm(:) + type (EdgeBuffer_t), intent(inout):: edge3 + type (derivative_t), intent(in ) :: deriv + integer , intent(in) :: nets,nete, nt, qn0 + real (kind=r8) , intent(in) :: inv_cp_full(np,np,nlev,nets:nete) + type (hvcoord_t) , intent(in) :: hvcoord + real (kind=r8) :: eta_ave_w ! weighting for mean flux terms + real (kind=r8) :: dt2 + ! local + integer :: k,kptr,i,j,ie,ic + integer :: kbeg, kend, kblk + real (kind=r8), dimension(np,np,2,nlev,nets:nete) :: vtens + real (kind=r8), dimension(np,np,nlev,nets:nete) :: ttens, dptens + real (kind=r8), dimension(np,np,nlev,nets:nete) :: dp3d_ref, T_ref + real (kind=r8), dimension(np,np,nets:nete) :: ps_ref + real (kind=r8), dimension(0:np+1,0:np+1,nlev) :: corners + real (kind=r8), dimension(2,2,2) :: cflux + real (kind=r8) :: temp (np,np,nlev) + real (kind=r8) :: tempflux(nc,nc,4) + real (kind=r8), dimension(nc,nc,4,nlev,nets:nete) :: dpflux + type (EdgeDescriptor_t) :: desc + + real (kind=r8), dimension(np,np) :: lap_t,lap_dp + real (kind=r8), dimension(np,np) :: tmp, tmp2 + real (kind=r8), dimension(np,np,ksponge_end,nets:nete):: kmvis,kmcnd,rho_dry + real (kind=r8), dimension(np,np,ksponge_end+1):: kmvisi,kmcndi + real (kind=r8), dimension(np,np,ksponge_end+1):: pint,rhoi_dry + real (kind=r8), dimension(np,np,ksponge_end ):: pmid + real (kind=r8), dimension(np,np,nlev) :: tmp_kmvis,tmp_kmcnd + real (kind=r8), dimension(np,np,2) :: lap_v + real (kind=r8) :: v1,v2,v1new,v2new,dt,heating,T0,T1 + real (kind=r8) :: laplace_fluxes(nc,nc,4) + real (kind=r8) :: rhypervis_subcycle + real (kind=r8) :: nu_ratio1, ptop, inv_rho + real (kind=r8), dimension(ksponge_end) :: dtemp,du,dv + real (kind=r8) :: nu_temp, nu_dp, nu_velo + + if (nu_s == 0 .and. nu == 0 .and. nu_p==0 ) return; + + ptop = hvcoord%hyai(1)*hvcoord%ps0 + + if (hypervis_dynamic_ref_state) then + ! + ! use dynamic reference pressure (P. Callaghan) + ! + call calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) + do ie=nets,nete + ps_ref(:,:,ie) = ptop + sum(elem(ie)%state%dp3d(:,:,:,nt),3) + end do + else + ! + ! use static reference pressure (hydrostatic balance incl. effect of topography) + ! + do ie=nets,nete + call get_dp_ref(hvcoord%hyai, hvcoord%hybi, hvcoord%ps0,1,np,1,np,1,nlev,& + elem(ie)%state%phis(:,:),dp3d_ref(:,:,:,ie),ps_ref(:,:,ie)) + end do + endif + ! + ! reference temperature profile (Simmons and Jiabin, 1991, QJRMS, Section 2a) + ! + ! Tref = T0+T1*Exner + ! T1 = .0065*Tref*Cp/g ! = ~191 + ! T0 = Tref-T1 ! = ~97 + ! + T1 = lapse_rate*Tref*cpair/gravit + T0 = Tref-T1 + do ie=nets,nete + do k=1,nlev + dp3d_ref(:,:,k,ie) = ((hvcoord%hyai(k+1)-hvcoord%hyai(k))*hvcoord%ps0 + & + (hvcoord%hybi(k+1)-hvcoord%hybi(k))*ps_ref(:,:,ie)) + tmp = hvcoord%hyam(k)*hvcoord%ps0+hvcoord%hybm(k)*ps_ref(:,:,ie) + tmp2 = (tmp/hvcoord%ps0)**cappa + T_ref(:,:,k,ie) = (T0+T1*tmp2) + end do + end do + + kbeg=1; kend=nlev + + kblk = kend - kbeg + 1 + + dt=dt2/hypervis_subcycle + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! hyper viscosity + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + do ic=1,hypervis_subcycle + call calc_tot_energy_dynamics(elem,fvm,nets,nete,nt,qn0,'dBH') + + rhypervis_subcycle=1.0_r8/real(hypervis_subcycle,kind=r8) + call biharmonic_wk_dp3d(elem,dptens,dpflux,ttens,vtens,deriv,edge3,hybrid,nt,nets,nete,kbeg,kend,& + dp3d_ref,T_ref) + + do ie=nets,nete + ! compute mean flux + if (nu_p>0) then + do k=kbeg,kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%derived%dpdiss_ave(i,j,k)=elem(ie)%derived%dpdiss_ave(i,j,k)+& + rhypervis_subcycle*eta_ave_w*elem(ie)%state%dp3d(i,j,k,nt) + elem(ie)%derived%dpdiss_biharmonic(i,j,k)=elem(ie)%derived%dpdiss_biharmonic(i,j,k)+& + rhypervis_subcycle*eta_ave_w*dptens(i,j,k,ie) + enddo + enddo + enddo + endif + !$omp parallel do num_threads(vert_num_threads) private(lap_t,lap_dp,lap_v,laplace_fluxes,nu_ratio1,i,j,k) + do k=kbeg,kend + ! advace in time. + ! note: DSS commutes with time stepping, so we can time advance and then DSS. + ! note: weak operators alreayd have mass matrix "included" + + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + ttens(i,j,k,ie) = -nu_s*ttens(i,j,k,ie) + dptens(i,j,k,ie) = -nu_p*dptens(i,j,k,ie) + vtens(i,j,1,k,ie) = -nu_lev(k)*vtens(i,j,1,k,ie) + vtens(i,j,2,k,ie) = -nu_lev(k)*vtens(i,j,2,k,ie) + enddo + enddo + + if (ntrac>0) then + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,nc + do i=1,nc + ! + ! del4 mass flux for CSLAM + ! + elem(ie)%sub_elem_mass_flux(i,j,:,k) = elem(ie)%sub_elem_mass_flux(i,j,:,k) - & + rhypervis_subcycle*eta_ave_w*nu_p*dpflux(i,j,:,k,ie) + enddo + enddo + endif + + ! NOTE: we will DSS all tendicies, EXCEPT for dp3d, where we DSS the new state + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%dp3d(i,j,k,nt) = elem(ie)%state%dp3d(i,j,k,nt)*elem(ie)%spheremp(i,j)& + + dt*dptens(i,j,k,ie) + enddo + enddo + + enddo + + kptr = kbeg - 1 + call edgeVpack(edge3,ttens(:,:,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + nlev + call edgeVpack(edge3,vtens(:,:,1,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 2*nlev + call edgeVpack(edge3,vtens(:,:,2,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 3*nlev + call edgeVpack(edge3,elem(ie)%state%dp3d(:,:,kbeg:kend,nt),kblk,kptr,ie) + enddo + + call bndry_exchange(hybrid,edge3,location='advance_hypervis_dp2') + + do ie=nets,nete + + kptr = kbeg - 1 + call edgeVunpack(edge3,ttens(:,:,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + nlev + call edgeVunpack(edge3,vtens(:,:,1,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 2*nlev + call edgeVunpack(edge3,vtens(:,:,2,kbeg:kend,ie),kblk,kptr,ie) + + if (ntrac>0) then + do k=kbeg,kend + temp(:,:,k) = elem(ie)%state%dp3d(:,:,k,nt) / elem(ie)%spheremp ! STATE before DSS + corners(0:np+1,0:np+1,k) = 0.0_r8 + corners(1:np ,1:np ,k) = elem(ie)%state%dp3d(1:np,1:np,k,nt) ! fill in interior data of STATE*mass + enddo + endif + kptr = kbeg - 1 + 3*nlev + call edgeVunpack(edge3,elem(ie)%state%dp3d(:,:,kbeg:kend,nt),kblk,kptr,ie) + + if (ntrac>0) then + desc = elem(ie)%desc + + kptr = kbeg - 1 + 3*nlev + call edgeDGVunpack(edge3,corners(:,:,kbeg:kend),kblk,kptr,ie) + do k=kbeg,kend + corners(:,:,k) = corners(:,:,k)/dt !note: array size is 0:np+1 + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + temp(i,j,k) = elem(ie)%rspheremp(i,j)*elem(ie)%state%dp3d(i,j,k,nt) - temp(i,j,k) + temp(i,j,k) = temp(i,j,k)/dt + enddo + enddo + + call distribute_flux_at_corners(cflux, corners(:,:,k), desc%getmapP) + + cflux(1,1,:) = elem(ie)%rspheremp(1, 1) * cflux(1,1,:) + cflux(2,1,:) = elem(ie)%rspheremp(np, 1) * cflux(2,1,:) + cflux(1,2,:) = elem(ie)%rspheremp(1, np) * cflux(1,2,:) + cflux(2,2,:) = elem(ie)%rspheremp(np,np) * cflux(2,2,:) + + call subcell_dss_fluxes(temp(:,:,k), np, nc, elem(ie)%metdet,cflux,tempflux) + elem(ie)%sub_elem_mass_flux(:,:,:,k) = elem(ie)%sub_elem_mass_flux(:,:,:,k) + & + rhypervis_subcycle*eta_ave_w*tempflux + end do + endif + + ! apply inverse mass matrix, accumulate tendencies + !$omp parallel do num_threads(vert_num_threads) private(k,i,j) + do k=kbeg,kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + vtens(i,j,1,k,ie)=dt*vtens(i,j,1,k,ie)*elem(ie)%rspheremp(i,j) + vtens(i,j,2,k,ie)=dt*vtens(i,j,2,k,ie)*elem(ie)%rspheremp(i,j) + ttens(i,j,k,ie)=dt*ttens(i,j,k,ie)*elem(ie)%rspheremp(i,j) + elem(ie)%state%dp3d(i,j,k,nt)=elem(ie)%state%dp3d(i,j,k,nt)*elem(ie)%rspheremp(i,j) + enddo + enddo + enddo + + !$omp parallel do num_threads(vert_num_threads) private(k,i,j) + do k=kbeg,kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + ! update v first (gives better results than updating v after heating) + elem(ie)%state%v(i,j,:,k,nt)=elem(ie)%state%v(i,j,:,k,nt) + & + vtens(i,j,:,k,ie) + elem(ie)%state%T(i,j,k,nt)=elem(ie)%state%T(i,j,k,nt) & + +ttens(i,j,k,ie) + enddo + enddo + enddo + end do + + call calc_tot_energy_dynamics(elem,fvm,nets,nete,nt,qn0,'dCH') + do ie=nets,nete + !$omp parallel do num_threads(vert_num_threads), private(k,i,j,v1,v2,heating) + do k=kbeg,kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + v1new=elem(ie)%state%v(i,j,1,k,nt) + v2new=elem(ie)%state%v(i,j,2,k,nt) + v1 =elem(ie)%state%v(i,j,1,k,nt)- vtens(i,j,1,k,ie) + v2 =elem(ie)%state%v(i,j,2,k,nt)- vtens(i,j,2,k,ie) + heating = 0.5_r8*(v1new*v1new+v2new*v2new-(v1*v1+v2*v2)) + + elem(ie)%state%T(i,j,k,nt)=elem(ie)%state%T(i,j,k,nt) & + -heating*inv_cp_full(i,j,k,ie) + enddo + enddo + enddo + enddo + call calc_tot_energy_dynamics(elem,fvm,nets,nete,nt,qn0,'dAH') + end do + + ! + !*************************************************************** + ! + ! sponge layer damping + ! + !*************************************************************** + ! + ! + ! vertical diffusion + ! + call t_startf('vertical_molec_diff') + if (molecular_diff>1) then + do ie=nets,nete + call get_rho_dry(1,np,1,np,ksponge_end,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,qn0), & + elem(ie)%state%T(:,:,:,nt),ptop,elem(ie)%state%dp3d(:,:,:,nt),& + .true.,rhoi_dry=rhoi_dry(:,:,:), & + active_species_idx_dycore=thermodynamic_active_species_idx_dycore,& + pint_out=pint,pmid_out=pmid) + ! + ! constant coefficients + ! + do k=1,ksponge_end+1 + kmvisi(:,:,k) = kmvisi_ref(k)*rhoi_dry(:,:,k) + kmcndi(:,:,k) = kmcndi_ref(k)*rhoi_dry(:,:,k) + end do + ! + ! do vertical diffusion + ! + do j=1,np + do i=1,np + call solve_diffusion(dt2,np,nlev,i,j,ksponge_end,pmid,pint,kmcndi(:,:,:)/cpair,elem(ie)%state%T(:,:,:,nt),& + 0,dtemp) + call solve_diffusion(dt2,np,nlev,i,j,ksponge_end,pmid,pint,kmvisi(:,:,:),elem(ie)%state%v(:,:,1,:,nt),1,du) + call solve_diffusion(dt2,np,nlev,i,j,ksponge_end,pmid,pint,kmvisi(:,:,:),elem(ie)%state%v(:,:,2,:,nt),1,dv) + do k=1,ksponge_end + v1 = elem(ie)%state%v(i,j,1,k,nt) + v2 = elem(ie)%state%v(i,j,2,k,nt) + v1new = v1 + du(k) + v2new = v2 + dv(k) + ! + ! frictional heating + ! + heating = 0.5_r8*((v1new*v1new+v2new*v2new) - (v1*v1+v2*v2)) + elem(ie)%state%T(i,j,k,nt)=elem(ie)%state%T(i,j,k,nt) & + -heating*inv_cp_full(i,j,k,ie)+dtemp(k) + elem(ie)%state%v(i,j,1,k,nt)=v1new + elem(ie)%state%v(i,j,2,k,nt)=v2new + end do + end do + end do + end do + end if + call t_stopf('vertical_molec_diff') + call t_startf('sponge_diff') + ! + ! compute coefficients for horizontal diffusion + ! + if (molecular_diff>0) then + do ie=nets,nete + call get_rho_dry(1,np,1,np,ksponge_end,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,qn0), & + elem(ie)%state%T(:,:,:,nt),ptop,elem(ie)%state%dp3d(:,:,:,nt),& + .true.,rho_dry=rho_dry(:,:,:,ie), & + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + end do + + if (molecular_diff==1) then + do ie=nets,nete + ! + ! compute molecular diffusion and thermal conductivity coefficients at mid-levels + ! + call get_molecular_diff_coef(1,np,1,np,ksponge_end,nlev,& + elem(ie)%state%T(:,:,:,nt),0,km_sponge_factor(1:ksponge_end),kmvis(:,:,:,ie),kmcnd(:,:,:,ie),qsize,& + elem(ie)%state%Qdp(:,:,:,1:qsize,qn0),fact=1.0_r8/elem(ie)%state%dp3d(:,:,1:ksponge_end,nt),& + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + end do + else + ! + ! constant coefficients + ! + do ie=nets,nete + do k=1,ksponge_end + kmvis (:,:,k,ie) = kmvis_ref(k) + kmcnd (:,:,k,ie) = kmcnd_ref(k) + end do + end do + end if + ! + ! diagnostics + ! + if (hist_fld_active('nu_kmvis')) then + do ie=nets,nete + tmp_kmvis = 0.0_r8 + do k=1,ksponge_end + tmp_kmvis(:,:,k) = kmvis(:,:,k,ie)/rho_dry(:,:,k,ie) + end do + call outfld('nu_kmvis',RESHAPE(tmp_kmvis(:,:,:), (/npsq,nlev/)), npsq, ie) + end do + end if + if (hist_fld_active('nu_kmcnd')) then + do ie=nets,nete + tmp_kmcnd = 0.0_r8 + do k=1,ksponge_end + tmp_kmcnd(:,:,k) = kmcnd(:,:,k,ie)*inv_cp_full(:,:,k,ie)/rho_dry(:,:,k,ie) + end do + call outfld('nu_kmcnd',RESHAPE(tmp_kmcnd(:,:,:), (/npsq,nlev/)), npsq, ie) + end do + end if + if (hist_fld_active('nu_kmcnd_dp')) then + do ie=nets,nete + tmp_kmcnd = 0.0_r8 + do k=1,ksponge_end + tmp_kmcnd(:,:,k) = kmcnd(:,:,k,ie)/(cpair*rho_ref(k)) + end do + call outfld('nu_kmcnd_dp',RESHAPE(tmp_kmcnd(:,:,:), (/npsq,nlev/)), npsq, ie) + end do + end if + + ! + ! scale by reference value + ! + do ie=nets,nete + do k=1,ksponge_end + kmcnd(:,:,k,ie) = kmcnd(:,:,k,ie)/kmcnd_ref(k) + kmvis(:,:,k,ie) = kmvis(:,:,k,ie)/kmvis_ref(k) + end do + end do + end if + ! + ! Horizontal Laplacian diffusion + ! + dt=dt2/hypervis_subcycle_sponge + call calc_tot_energy_dynamics(elem,fvm,nets,nete,nt,qn0,'dBS') + kblk = ksponge_end + do ic=1,hypervis_subcycle_sponge + rhypervis_subcycle=1.0_r8/real(hypervis_subcycle_sponge,kind=r8) + do ie=nets,nete + do k=1,ksponge_end + if (nu_top>0.or.molecular_diff>1) then + !************************************************************** + ! + ! traditional sponge formulation (constant coefficients) + ! + !************************************************************** + call laplace_sphere_wk(elem(ie)%state%T(:,:,k,nt),deriv,elem(ie),lap_t,var_coef=.false.) + call laplace_sphere_wk(elem(ie)%state%dp3d(:,:,k,nt),deriv,elem(ie),lap_dp,var_coef=.false.) + nu_ratio1=1.0_r8 + call vlaplace_sphere_wk(elem(ie)%state%v(:,:,:,k,nt),deriv,elem(ie),.true.,lap_v, var_coef=.false.,& + nu_ratio=nu_ratio1) + + nu_dp = nu_scale_top(k)*nu_top + nu_temp = nu_scale_top(k)*nu_top + nu_velo = nu_scale_top(k)*nu_top + if (molecular_diff>1) then + nu_dp = nu_dp + kmcnd_ref(k)/(cpair*rho_ref(k)) + end if + + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + ttens(i,j,k,ie) = nu_temp*lap_t(i,j) + dptens(i,j,k,ie) = nu_dp *lap_dp(i,j) + vtens(i,j,1,k,ie) = nu_velo*lap_v(i,j,1) + vtens(i,j,2,k,ie) = nu_velo*lap_v(i,j,2) + enddo + enddo + end if + if (molecular_diff>0) then + !************************************************************************ + ! + ! sponge formulation using molecular diffusion and thermal conductivity + ! + !************************************************************************ + call vlaplace_sphere_wk_mol(elem(ie)%state%v(:,:,:,k,nt),deriv,elem(ie),.false.,kmvis(:,:,k,ie),lap_v) + call laplace_sphere_wk(elem(ie)%state%T(:,:,k,nt),deriv,elem(ie),lap_t ,var_coef=.false.,mol_nu=kmcnd(:,:,k,ie)) + + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + inv_rho = 1.0_r8/rho_dry(i,j,k,ie) + ttens(i,j,k,ie) = ttens(i,j,k,ie) + kmcnd_ref(k)*inv_cp_full(i,j,k,ie)*inv_rho*lap_t(i,j) + vtens(i,j,1,k,ie) = vtens(i,j,1,k,ie)+ kmvis_ref(k)*inv_rho*lap_v(i,j,1) + vtens(i,j,2,k,ie) = vtens(i,j,2,k,ie)+ kmvis_ref(k)*inv_rho*lap_v(i,j,2) + end do + end do + end if + + if (ntrac>0.and.nu_dp>0) then + ! + ! mass flux for CSLAM due to sponge layer diffusion on dp + ! + call subcell_Laplace_fluxes(elem(ie)%state%dp3d(:,:,k,nt),deriv,elem(ie),np,nc,laplace_fluxes) + elem(ie)%sub_elem_mass_flux(:,:,:,k) = elem(ie)%sub_elem_mass_flux(:,:,:,k) + & + rhypervis_subcycle*eta_ave_w*nu_dp*laplace_fluxes + endif + + ! NOTE: we will DSS all tendencies, EXCEPT for dp3d, where we DSS the new state + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%dp3d(i,j,k,nt) = elem(ie)%state%dp3d(i,j,k,nt)*elem(ie)%spheremp(i,j)& + + dt*dptens(i,j,k,ie) + enddo + enddo + + enddo + + + kptr = 0 + call edgeVpack(edgeSponge,ttens(:,:,1:ksponge_end,ie),kblk,kptr,ie) + + kptr = ksponge_end + call edgeVpack(edgeSponge,vtens(:,:,1,1:ksponge_end,ie),kblk,kptr,ie) + + kptr = 2*ksponge_end + call edgeVpack(edgeSponge,vtens(:,:,2,1:ksponge_end,ie),kblk,kptr,ie) + + kptr = 3*ksponge_end + call edgeVpack(edgeSponge,elem(ie)%state%dp3d(:,:,1:ksponge_end,nt),kblk,kptr,ie) + enddo + + call bndry_exchange(hybrid,edgeSponge,location='advance_hypervis_sponge') + + do ie=nets,nete + + kptr = 0 + call edgeVunpack(edgeSponge,ttens(:,:,1:ksponge_end,ie),kblk,kptr,ie) + + kptr = ksponge_end + call edgeVunpack(edgeSponge,vtens(:,:,1,1:ksponge_end,ie),kblk,kptr,ie) + + kptr = 2*ksponge_end + call edgeVunpack(edgeSponge,vtens(:,:,2,1:ksponge_end,ie),kblk,kptr,ie) + + if (ntrac>0.and.nu_dp>0.0_r8) then + do k=1,ksponge_end + temp(:,:,k) = elem(ie)%state%dp3d(:,:,k,nt) / elem(ie)%spheremp ! STATE before DSS + corners(0:np+1,0:np+1,k) = 0.0_r8 + corners(1:np ,1:np ,k) = elem(ie)%state%dp3d(1:np,1:np,k,nt) ! fill in interior data of STATE*mass + enddo + endif + kptr = 3*ksponge_end + call edgeVunpack(edgeSponge,elem(ie)%state%dp3d(:,:,1:ksponge_end,nt),kblk,kptr,ie) + + if (ntrac>0.and.nu_dp>0.0_r8) then + desc = elem(ie)%desc + + kptr = 3*ksponge_end + call edgeDGVunpack(edgeSponge,corners(:,:,1:ksponge_end),kblk,kptr,ie) + do k=1,ksponge_end + corners(:,:,k) = corners(:,:,k)/dt !note: array size is 0:np+1 + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + temp(i,j,k) = elem(ie)%rspheremp(i,j)*elem(ie)%state%dp3d(i,j,k,nt) - temp(i,j,k) + temp(i,j,k) = temp(i,j,k)/dt + enddo + enddo + + call distribute_flux_at_corners(cflux, corners(:,:,k), desc%getmapP) + + cflux(1,1,:) = elem(ie)%rspheremp(1, 1) * cflux(1,1,:) + cflux(2,1,:) = elem(ie)%rspheremp(np, 1) * cflux(2,1,:) + cflux(1,2,:) = elem(ie)%rspheremp(1, np) * cflux(1,2,:) + cflux(2,2,:) = elem(ie)%rspheremp(np,np) * cflux(2,2,:) + + call subcell_dss_fluxes(temp(:,:,k), np, nc, elem(ie)%metdet,cflux,tempflux) + elem(ie)%sub_elem_mass_flux(:,:,:,k) = elem(ie)%sub_elem_mass_flux(:,:,:,k) + & + rhypervis_subcycle*eta_ave_w*tempflux + end do + endif + + ! apply inverse mass matrix, accumulate tendencies + !$omp parallel do num_threads(vert_num_threads) private(k,i,j) + do k=1,ksponge_end + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + vtens(i,j,1,k,ie)=dt*vtens(i,j,1,k,ie)*elem(ie)%rspheremp(i,j) + vtens(i,j,2,k,ie)=dt*vtens(i,j,2,k,ie)*elem(ie)%rspheremp(i,j) + ttens(i,j,k,ie)=dt*ttens(i,j,k,ie)*elem(ie)%rspheremp(i,j) + elem(ie)%state%dp3d(i,j,k,nt)=elem(ie)%state%dp3d(i,j,k,nt)*elem(ie)%rspheremp(i,j) + enddo + enddo + enddo + !$omp parallel do num_threads(vert_num_threads) private(k,i,j,v1,v2,v1new,v2new) + do k=1,ksponge_end + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + ! update v first (gives better results than updating v after heating) + elem(ie)%state%v(i,j,:,k,nt)=elem(ie)%state%v(i,j,:,k,nt) + & + vtens(i,j,:,k,ie) + elem(ie)%state%T(i,j,k,nt)=elem(ie)%state%T(i,j,k,nt) & + +ttens(i,j,k,ie) + + v1new=elem(ie)%state%v(i,j,1,k,nt) + v2new=elem(ie)%state%v(i,j,2,k,nt) + v1 =elem(ie)%state%v(i,j,1,k,nt)- vtens(i,j,1,k,ie) + v2 =elem(ie)%state%v(i,j,2,k,nt)- vtens(i,j,2,k,ie) + ! + ! frictional heating + ! + heating = 0.5_r8*(v1new*v1new+v2new*v2new-(v1*v1+v2*v2)) + elem(ie)%state%T(i,j,k,nt)=elem(ie)%state%T(i,j,k,nt) & + -heating*inv_cp_full(i,j,k,ie) + enddo + enddo + enddo + end do + end do + call t_stopf('sponge_diff') + call calc_tot_energy_dynamics(elem,fvm,nets,nete,nt,qn0,'dAS') + end subroutine advance_hypervis_dp + + + + subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& + deriv,nets,nete,eta_ave_w,inv_cp_full,qwater,qidx,kappa) + ! =================================== + ! compute the RHS, accumulate into u(np1) and apply DSS + ! + ! u(np1) = u(nm1) + dt2*DSS[ RHS(u(n0)) ] + ! + ! This subroutine is normally called to compute a leapfrog timestep + ! but by adjusting np1,nm1,n0 and dt2, many other timesteps can be + ! accomodated. For example, setting nm1=np1=n0 this routine will + ! take a forward euler step, overwriting the input with the output. + ! + ! if dt2<0, then the DSS'd RHS is returned in timelevel np1 + ! + ! Combining the RHS and DSS pack operation in one routine + ! allows us to fuse these two loops for more cache reuse + ! + ! Combining the dt advance and DSS unpack operation in one routine + ! allows us to fuse these two loops for more cache reuse + ! + ! =================================== + use dimensions_mod, only: np, nc, nlev, ntrac, ksponge_end + use hybrid_mod, only: hybrid_t + use element_mod, only: element_t + use derivative_mod, only: derivative_t, divergence_sphere, gradient_sphere, vorticity_sphere + use derivative_mod, only: subcell_div_fluxes, subcell_dss_fluxes + use edge_mod, only: edgevpack, edgevunpack, edgeDGVunpack + use edgetype_mod, only: edgedescriptor_t + use bndry_mod, only: bndry_exchange + use hybvcoord_mod, only: hvcoord_t + use physconst, only: epsilo, get_gz_given_dp_Tv_Rdry + use physconst, only: thermodynamic_active_species_num, get_virtual_temp, get_cp_dry + use physconst, only: thermodynamic_active_species_idx_dycore,get_R_dry + use physconst, only: dry_air_species_num,get_exner + use time_mod, only : tevolve + + implicit none + integer, intent(in) :: np1,nm1,n0,nets,nete + real (kind=r8), intent(in) :: dt2 + + type (hvcoord_t) , intent(in) :: hvcoord + type (hybrid_t) , intent(in) :: hybrid + type (element_t) , intent(inout), target :: elem(:) + type (derivative_t) , intent(in) :: deriv + real (kind=r8) , intent(in) :: inv_cp_full(np,np,nlev,nets:nete) + real (kind=r8) , intent(in) :: qwater(np,np,nlev,thermodynamic_active_species_num,nets:nete) + integer , intent(in) :: qidx(thermodynamic_active_species_num) + real (kind=r8) , intent(in) :: kappa(np,np,nlev,nets:nete) + + real (kind=r8) :: eta_ave_w ! weighting for eta_dot_dpdn mean flux + + ! local + real (kind=r8), dimension(np,np,nlev) :: phi + real (kind=r8), dimension(np,np,nlev) :: omega_full + real (kind=r8), dimension(np,np,nlev) :: divdp_dry + real (kind=r8), dimension(np,np,nlev) :: divdp_full + real (kind=r8), dimension(np,np,2) :: vtemp + real (kind=r8), dimension(np,np,2) :: grad_kappa_term + real (kind=r8), dimension(np,np,2,nlev) :: vdp_dry + real (kind=r8), dimension(np,np,2,nlev) :: vdp_full + real (kind=r8), dimension(np,np,nlev) :: vgrad_p_full + real (kind=r8), dimension(np,np,2 ) :: v ! + real (kind=r8), dimension(np,np) :: vgrad_T ! v.grad(T) + real (kind=r8), dimension(np,np) :: Ephi ! kinetic energy + PHI term + real (kind=r8), dimension(np,np,2,nlev) :: grad_p_full + real (kind=r8), dimension(np,np,2,nlev) :: grad_p_m_pmet! gradient(p - p_met) + real (kind=r8), dimension(np,np,nlev) :: vort ! vorticity + real (kind=r8), dimension(np,np,nlev) :: p_dry ! pressure dry + real (kind=r8), dimension(np,np,nlev) :: dp_dry ! delta pressure dry + real (kind=r8), dimension(np,np,nlev) :: R_dry, cp_dry! + real (kind=r8), dimension(np,np,nlev) :: p_full ! pressure + real (kind=r8), dimension(np,np,nlev) :: dp_full + real (kind=r8), dimension(np,np) :: exner + real (kind=r8), dimension(0:np+1,0:np+1,nlev) :: corners + real (kind=r8), dimension(2,2,2) :: cflux + real (kind=r8), dimension(np,np) :: suml + real (kind=r8) :: vtens1(np,np,nlev),vtens2(np,np,nlev),ttens(np,np,nlev) + real (kind=r8) :: stashdp3d (np,np,nlev),tempdp3d(np,np), tempflux(nc,nc,4) + real (kind=r8) :: ckk, term, T_v(np,np,nlev) + real (kind=r8), dimension(np,np,2) :: grad_exner + real (kind=r8), dimension(np,np) :: theta_v + + type (EdgeDescriptor_t):: desc + + real (kind=r8) :: sum_water(np,np,nlev), density_inv(np,np) + real (kind=r8) :: E,v1,v2,glnps1,glnps2 + integer :: i,j,k,kptr,ie + real (kind=r8) :: u_m_umet, v_m_vmet, t_m_tmet, ptop + +!JMD call t_barrierf('sync_compute_and_apply_rhs', hybrid%par%comm) + call t_adj_detailf(+1) + call t_startf('compute_and_apply_rhs') + ptop = hvcoord%hyai(1)*hvcoord%ps0 + do ie=nets,nete + ! + ! compute virtual temperature and sum_water + ! + call get_virtual_temp(1,np,1,np,1,nlev,thermodynamic_active_species_num,qwater(:,:,:,:,ie),& + t_v(:,:,:),temp=elem(ie)%state%T(:,:,:,n0),sum_q =sum_water(:,:,:),& + active_species_idx_dycore=qidx) + call get_R_dry(1,np,1,np,1,nlev,1,nlev,thermodynamic_active_species_num,& + qwater(:,:,:,:,ie),qidx,R_dry) + call get_cp_dry(1,np,1,np,1,nlev,1,nlev,thermodynamic_active_species_num,& + qwater(:,:,:,:,ie),qidx,cp_dry) + + do k=1,nlev + dp_dry(:,:,k) = elem(ie)%state%dp3d(:,:,k,n0) + dp_full(:,:,k) = sum_water(:,:,k)*dp_dry(:,:,k) + end do + call get_gz_given_dp_Tv_Rdry(1,np,1,np,nlev,dp_full,T_v,R_dry,elem(ie)%state%phis,ptop,phi,pmid=p_full) + do k=1,nlev + ! vertically lagrangian code: we advect dp3d instead of ps + ! we also need grad(p) at all levels (not just grad(ps)) + !p(k)= hyam(k)*ps0 + hybm(k)*ps + ! = .5_r8*(hyai(k+1)+hyai(k))*ps0 + .5_r8*(hybi(k+1)+hybi(k))*ps + ! = .5_r8*(ph(k+1) + ph(k) ) = ph(k) + dp(k)/2 + ! + ! p(k+1)-p(k) = ph(k+1)-ph(k) + (dp(k+1)-dp(k))/2 + ! = dp(k) + (dp(k+1)-dp(k))/2 = (dp(k+1)+dp(k))/2 + + call gradient_sphere(p_full(:,:,k),deriv,elem(ie)%Dinv,grad_p_full(:,:,:,k)) + ! ============================== + ! compute vgrad_lnps - for omega_full + ! ============================== + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + vgrad_p_full(i,j,k) = (v1*grad_p_full(i,j,1,k) + v2*grad_p_full(i,j,2,k)) + vdp_dry(i,j,1,k) = v1*dp_dry(i,j,k) + vdp_dry(i,j,2,k) = v2*dp_dry(i,j,k) + vdp_full(i,j,1,k) = v1*dp_full(i,j,k) + vdp_full(i,j,2,k) = v2*dp_full(i,j,k) + end do + end do + ! ================================ + ! Accumulate mean Vel_rho flux in vn0 + ! ================================ + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%derived%vn0(i,j,1,k)=elem(ie)%derived%vn0(i,j,1,k)+eta_ave_w*vdp_dry(i,j,1,k) + elem(ie)%derived%vn0(i,j,2,k)=elem(ie)%derived%vn0(i,j,2,k)+eta_ave_w*vdp_dry(i,j,2,k) + enddo + enddo + !divdp_dry(:,:,k) + ! ========================================= + ! + ! Compute relative vorticity and divergence + ! + ! ========================================= + call divergence_sphere(vdp_dry(:,:,:,k),deriv,elem(ie),divdp_dry(:,:,k)) + call divergence_sphere(vdp_full(:,:,:,k),deriv,elem(ie),divdp_full(:,:,k)) + call vorticity_sphere(elem(ie)%state%v(:,:,:,k,n0),deriv,elem(ie),vort(:,:,k)) + enddo + + ! ==================================================== + ! Compute omega_full + ! ==================================================== + ckk = 0.5_r8 + suml(:,: ) = 0 +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k,j,i,ckk,term) +#endif + do k=1,nlev + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np ! Loop inversion (AAM) + do i=1,np + term = -divdp_full(i,j,k) + + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + + omega_full(i,j,k) = suml(i,j) + ckk*term+vgrad_p_full(i,j,k) + suml(i,j) = suml(i,j) + term + end do + end do + end do +#if (defined COLUMN_OPENMP) + !$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev ! Loop index added (AAM) + elem(ie)%derived%omega(:,:,k) = & + elem(ie)%derived%omega(:,:,k) + eta_ave_w*omega_full(:,:,k) + enddo + ! ============================================== + ! Compute phi + kinetic energy term: 10*nv*nv Flops + ! ============================================== +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k,i,j,v1,v2,E,Ephi,vtemp,vgrad_T,gpterm,glnps1,glnps2) +#endif + vertloop: do k=1,nlev + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + E = 0.5_r8*( v1*v1 + v2*v2 ) + Ephi(i,j)=E+phi(i,j,k) + end do + end do + ! ================================================ + ! compute gradp term (ps/p)*(dp/dps)*T + ! ================================================ + call gradient_sphere(elem(ie)%state%T(:,:,k,n0),deriv,elem(ie)%Dinv,vtemp) + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + vgrad_T(i,j) = v1*vtemp(i,j,1) + v2*vtemp(i,j,2) + end do + end do + + + ! vtemp = grad ( E + PHI ) + ! vtemp = gradient_sphere(Ephi(:,:),deriv,elem(ie)%Dinv) + call gradient_sphere(Ephi(:,:),deriv,elem(ie)%Dinv,vtemp) + density_inv(:,:) = R_dry(:,:,k)*T_v(:,:,k)/p_full(:,:,k) + + if (dry_air_species_num==0) then + exner(:,:)=(p_full(:,:,k)/hvcoord%ps0)**kappa(:,:,k,ie) + theta_v(:,:)=T_v(:,:,k)/exner(:,:) + call gradient_sphere(exner(:,:),deriv,elem(ie)%Dinv,grad_exner) + + grad_exner(:,:,1) = cp_dry(:,:,k)*theta_v(:,:)*grad_exner(:,:,1) + grad_exner(:,:,2) = cp_dry(:,:,k)*theta_v(:,:)*grad_exner(:,:,2) + else + exner(:,:)=(p_full(:,:,k)/hvcoord%ps0)**kappa(:,:,k,ie) + theta_v(:,:)=T_v(:,:,k)/exner(:,:) + call gradient_sphere(exner(:,:),deriv,elem(ie)%Dinv,grad_exner) + + call gradient_sphere(kappa(:,:,k,ie),deriv,elem(ie)%Dinv,grad_kappa_term) + suml = exner(:,:)*LOG(p_full(:,:,k)/hvcoord%ps0) + grad_kappa_term(:,:,1)=-suml*grad_kappa_term(:,:,1) + grad_kappa_term(:,:,2)=-suml*grad_kappa_term(:,:,2) + + grad_exner(:,:,1) = cp_dry(:,:,k)*theta_v(:,:)*(grad_exner(:,:,1)+grad_kappa_term(:,:,1)) + grad_exner(:,:,2) = cp_dry(:,:,k)*theta_v(:,:)*(grad_exner(:,:,2)+grad_kappa_term(:,:,2)) + end if + + do j=1,np + do i=1,np + glnps1 = grad_exner(i,j,1) + glnps2 = grad_exner(i,j,2) + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + + vtens1(i,j,k) = & + + v2*(elem(ie)%fcor(i,j) + vort(i,j,k)) & + - vtemp(i,j,1) - glnps1 + + vtens2(i,j,k) = & + - v1*(elem(ie)%fcor(i,j) + vort(i,j,k)) & + - vtemp(i,j,2) - glnps2 + ttens(i,j,k) = - vgrad_T(i,j) + & + density_inv(i,j)*omega_full(i,j,k)*inv_cp_full(i,j,k,ie) + end do + end do + + end do vertloop + + ! ========================================================= + ! local element timestep, store in np1. + ! note that we allow np1=n0 or nm1 + ! apply mass matrix + ! ========================================================= +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%v(i,j,1,k,np1) = elem(ie)%spheremp(i,j)*( elem(ie)%state%v(i,j,1,k,nm1) + dt2*vtens1(i,j,k) ) + elem(ie)%state%v(i,j,2,k,np1) = elem(ie)%spheremp(i,j)*( elem(ie)%state%v(i,j,2,k,nm1) + dt2*vtens2(i,j,k) ) + elem(ie)%state%T(i,j,k,np1) = elem(ie)%spheremp(i,j)*(elem(ie)%state%T(i,j,k,nm1) + dt2*ttens(i,j,k)) + elem(ie)%state%dp3d(i,j,k,np1) = & + elem(ie)%spheremp(i,j) * (elem(ie)%state%dp3d(i,j,k,nm1) - & + dt2 * (divdp_dry(i,j,k))) + enddo + enddo + + + if (ntrac>0.and.eta_ave_w.ne.0._r8) then + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + v(i,j,1) = elem(ie)%Dinv(i,j,1,1)*vdp_dry(i,j,1,k) + elem(ie)%Dinv(i,j,1,2)*vdp_dry(i,j,2,k) + v(i,j,2) = elem(ie)%Dinv(i,j,2,1)*vdp_dry(i,j,1,k) + elem(ie)%Dinv(i,j,2,2)*vdp_dry(i,j,2,k) + enddo + enddo + call subcell_div_fluxes(v, np, nc, elem(ie)%metdet,tempflux) + elem(ie)%sub_elem_mass_flux(:,:,:,k) = elem(ie)%sub_elem_mass_flux(:,:,:,k) - eta_ave_w*tempflux + end if + enddo + ! ========================================================= + ! + ! Pack + ! + ! ========================================================= + kptr=0 + call edgeVpack(edge3, elem(ie)%state%T(:,:,:,np1),nlev,kptr,ie) + + kptr=nlev + call edgeVpack(edge3, elem(ie)%state%v(:,:,:,:,np1),2*nlev,kptr,ie) + + kptr=kptr+2*nlev + call edgeVpack(edge3, elem(ie)%state%dp3d(:,:,:,np1),nlev,kptr, ie) + end do + + ! ============================================================= + ! Insert communications here: for shared memory, just a single + ! sync is required + ! ============================================================= + call bndry_exchange(hybrid,edge3,location='edge3') + do ie=nets,nete + ! =========================================================== + ! Unpack the edges for vgrad_T and v tendencies... + ! =========================================================== + kptr=0 + call edgeVunpack(edge3, elem(ie)%state%T(:,:,:,np1), nlev, kptr, ie) + + kptr=nlev + call edgeVunpack(edge3, elem(ie)%state%v(:,:,:,:,np1), 2*nlev, kptr, ie) + + if (ntrac>0.and.eta_ave_w.ne.0._r8) then + do k=1,nlev + stashdp3d(:,:,k) = elem(ie)%state%dp3d(:,:,k,np1)/elem(ie)%spheremp(:,:) + end do + endif + + corners = 0.0_r8 + corners(1:np,1:np,:) = elem(ie)%state%dp3d(:,:,:,np1) + kptr=kptr+2*nlev + call edgeVunpack(edge3, elem(ie)%state%dp3d(:,:,:,np1),nlev,kptr,ie) + + if (ntrac>0.and.eta_ave_w.ne.0._r8) then + desc = elem(ie)%desc + + call edgeDGVunpack(edge3, corners, nlev, kptr, ie) + + corners = corners/dt2 + + do k=1,nlev + tempdp3d = elem(ie)%rspheremp(:,:)*elem(ie)%state%dp3d(:,:,k,np1) + tempdp3d = tempdp3d - stashdp3d(:,:,k) + tempdp3d = tempdp3d/dt2 + + call distribute_flux_at_corners(cflux, corners(:,:,k), desc%getmapP) + + cflux(1,1,:) = elem(ie)%rspheremp(1, 1) * cflux(1,1,:) + cflux(2,1,:) = elem(ie)%rspheremp(np, 1) * cflux(2,1,:) + cflux(1,2,:) = elem(ie)%rspheremp(1, np) * cflux(1,2,:) + cflux(2,2,:) = elem(ie)%rspheremp(np,np) * cflux(2,2,:) + + call subcell_dss_fluxes(tempdp3d, np, nc, elem(ie)%metdet, cflux,tempflux) + elem(ie)%sub_elem_mass_flux(:,:,:,k) = elem(ie)%sub_elem_mass_flux(:,:,:,k) + eta_ave_w*tempflux + end do + end if + + ! ==================================================== + ! Scale tendencies by inverse mass matrix + ! ==================================================== + +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%T(i,j,k,np1) = elem(ie)%rspheremp(i,j)*elem(ie)%state%T(i,j,k,np1) + elem(ie)%state%v(i,j,1,k,np1) = elem(ie)%rspheremp(i,j)*elem(ie)%state%v(i,j,1,k,np1) + elem(ie)%state%v(i,j,2,k,np1) = elem(ie)%rspheremp(i,j)*elem(ie)%state%v(i,j,2,k,np1) + enddo + enddo + end do + + ! vertically lagrangian: complete dp3d timestep: + do k=1,nlev + elem(ie)%state%dp3d(:,:,k,np1)= elem(ie)%rspheremp(:,:)*elem(ie)%state%dp3d(:,:,k,np1) + enddo + end do + + call t_stopf('compute_and_apply_rhs') + call t_adj_detailf(-1) + end subroutine compute_and_apply_rhs + + + ! + ! corner fluxes for CSLAM + ! + subroutine distribute_flux_at_corners(cflux, corners, getmapP) + use dimensions_mod, only : np, max_corner_elem + use control_mod, only : swest + + real(r8), intent(out) :: cflux(2,2,2) + real(r8), intent(in) :: corners(0:np+1,0:np+1) + integer, intent(in) :: getmapP(:) + + cflux = 0.0_r8 + if (getmapP(swest+0*max_corner_elem) /= -1) then + cflux(1,1,1) = (corners(0,1) - corners(1,1)) + cflux(1,1,1) = cflux(1,1,1) + (corners(0,0) - corners(1,1)) / 2.0_r8 + cflux(1,1,1) = cflux(1,1,1) + (corners(0,1) - corners(1,0)) / 2.0_r8 + + cflux(1,1,2) = (corners(1,0) - corners(1,1)) + cflux(1,1,2) = cflux(1,1,2) + (corners(0,0) - corners(1,1)) / 2.0_r8 + cflux(1,1,2) = cflux(1,1,2) + (corners(1,0) - corners(0,1)) / 2.0_r8 + else + cflux(1,1,1) = (corners(0,1) - corners(1,1)) + cflux(1,1,2) = (corners(1,0) - corners(1,1)) + endif + + if (getmapP(swest+1*max_corner_elem) /= -1) then + cflux(2,1,1) = (corners(np+1,1) - corners(np,1)) + cflux(2,1,1) = cflux(2,1,1) + (corners(np+1,0) - corners(np,1)) / 2.0_r8 + cflux(2,1,1) = cflux(2,1,1) + (corners(np+1,1) - corners(np,0)) / 2.0_r8 + + cflux(2,1,2) = (corners(np ,0) - corners(np, 1)) + cflux(2,1,2) = cflux(2,1,2) + (corners(np+1,0) - corners(np, 1)) / 2.0_r8 + cflux(2,1,2) = cflux(2,1,2) + (corners(np ,0) - corners(np+1,1)) / 2.0_r8 + else + cflux(2,1,1) = (corners(np+1,1) - corners(np,1)) + cflux(2,1,2) = (corners(np ,0) - corners(np,1)) + endif + + if (getmapP(swest+2*max_corner_elem) /= -1) then + cflux(1,2,1) = (corners(0,np ) - corners(1,np )) + cflux(1,2,1) = cflux(1,2,1) + (corners(0,np+1) - corners(1,np )) / 2.0_r8 + cflux(1,2,1) = cflux(1,2,1) + (corners(0,np ) - corners(1,np+1)) / 2.0_r8 + + cflux(1,2,2) = (corners(1,np+1) - corners(1,np )) + cflux(1,2,2) = cflux(1,2,2) + (corners(0,np+1) - corners(1,np )) / 2.0_r8 + cflux(1,2,2) = cflux(1,2,2) + (corners(1,np+1) - corners(0,np )) / 2.0_r8 + else + cflux(1,2,1) = (corners(0,np ) - corners(1,np )) + cflux(1,2,2) = (corners(1,np+1) - corners(1,np )) + endif + + if (getmapP(swest+3*max_corner_elem) /= -1) then + cflux(2,2,1) = (corners(np+1,np ) - corners(np,np )) + cflux(2,2,1) = cflux(2,2,1) + (corners(np+1,np+1) - corners(np,np )) / 2.0_r8 + cflux(2,2,1) = cflux(2,2,1) + (corners(np+1,np ) - corners(np,np+1)) / 2.0_r8 + + cflux(2,2,2) = (corners(np ,np+1) - corners(np,np )) + cflux(2,2,2) = cflux(2,2,2) + (corners(np+1,np+1) - corners(np,np )) / 2.0_r8 + cflux(2,2,2) = cflux(2,2,2) + (corners(np ,np+1) - corners(np+1,np)) / 2.0_r8 + else + cflux(2,2,1) = (corners(np+1,np ) - corners(np,np )) + cflux(2,2,2) = (corners(np ,np+1) - corners(np,np )) + endif + end subroutine distribute_flux_at_corners + + subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suffix) + use dimensions_mod, only: npsq,nlev,np,lcp_moist,nc,ntrac,qsize + use physconst, only: gravit, cpair, rearth,omega + use element_mod, only: element_t + use cam_history, only: outfld, hist_fld_active + use constituents, only: cnst_get_ind + use string_utils, only: strlist_get_ind + use hycoef, only: hyai, ps0 + use fvm_control_volume_mod, only: fvm_struct + use physconst, only: get_dp, get_cp + use physconst, only: thermodynamic_active_species_idx_dycore + use dimensions_mod, only: cnst_name_gll + !------------------------------Arguments-------------------------------- + + type (element_t) , intent(in) :: elem(:) + type(fvm_struct) , intent(in) :: fvm(:) + integer , intent(in) :: tl, tl_qdp,nets,nete + character*(*) , intent(in) :: outfld_name_suffix ! suffix for "outfld" names + + !---------------------------Local storage------------------------------- + + real(kind=r8) :: se(npsq) ! Dry Static energy (J/m2) + real(kind=r8) :: ke(npsq) ! kinetic energy (J/m2) + + real(kind=r8) :: cdp_fvm(nc,nc,nlev) + real(kind=r8) :: se_tmp + real(kind=r8) :: ke_tmp + real(kind=r8) :: ps(np,np) + real(kind=r8) :: pdel(np,np,nlev) + ! + ! global axial angular momentum (AAM) can be separated into one part (mr) associatedwith the relative motion + ! of the atmosphere with respect to the planets surface (also known as wind AAM) and another part (mo) + ! associated with the angular velocity OMEGA (2*pi/d, where d is the length of the day) of the planet + ! (also known as mass AAM) + ! + real(kind=r8) :: mr(npsq) ! wind AAM + real(kind=r8) :: mo(npsq) ! mass AAM + real(kind=r8) :: mr_cnst, mo_cnst, cos_lat, mr_tmp, mo_tmp + real(kind=r8) :: cp(np,np,nlev) + + integer :: ie,i,j,k + integer :: ixwv,ixcldice, ixcldliq, ixtt ! CLDICE, CLDLIQ and test tracer indices + character(len=16) :: name_out1,name_out2,name_out3,name_out4,name_out5,name_out6 + + !----------------------------------------------------------------------- + + name_out1 = 'SE_' //trim(outfld_name_suffix) + name_out2 = 'KE_' //trim(outfld_name_suffix) + name_out3 = 'WV_' //trim(outfld_name_suffix) + name_out4 = 'WL_' //trim(outfld_name_suffix) + name_out5 = 'WI_' //trim(outfld_name_suffix) + name_out6 = 'TT_' //trim(outfld_name_suffix) + + if ( hist_fld_active(name_out1).or.hist_fld_active(name_out2).or.hist_fld_active(name_out3).or.& + hist_fld_active(name_out4).or.hist_fld_active(name_out5).or.hist_fld_active(name_out6)) then + + if (ntrac>0) then + ixwv = 1 + call cnst_get_ind('CLDLIQ' , ixcldliq, abort=.false.) + call cnst_get_ind('CLDICE' , ixcldice, abort=.false.) + else + ! + ! when using CSLAM the condensates on the GLL grid may be located in a different index than in physics + ! + ixwv = -1 + call strlist_get_ind(cnst_name_gll, 'CLDLIQ' , ixcldliq, abort=.false.) + call strlist_get_ind(cnst_name_gll, 'CLDICE' , ixcldice, abort=.false.) + end if + call cnst_get_ind('TT_LW' , ixtt , abort=.false.) + ! + ! Compute frozen static energy in 3 parts: KE, SE, and energy associated with vapor and liquid + ! + do ie=nets,nete + se = 0.0_r8 + ke = 0.0_r8 + call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),2,thermodynamic_active_species_idx_dycore,& + elem(ie)%state%dp3d(:,:,:,tl),pdel,ps=ps,ptop=hyai(1)*ps0) + call get_cp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),& + .false.,cp,dp_dry=elem(ie)%state%dp3d(:,:,:,tl),& + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + do k = 1, nlev + do j=1,np + do i = 1, np + ! + ! kinetic energy + ! + ke_tmp = 0.5_r8*(elem(ie)%state%v(i,j,1,k,tl)**2+ elem(ie)%state%v(i,j,2,k,tl)**2)*pdel(i,j,k)/gravit + if (lcp_moist) then + se_tmp = cp(i,j,k)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/gravit + else + ! + ! using CAM physics definition of internal energy + ! + se_tmp = cpair*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/gravit + end if + se (i+(j-1)*np) = se (i+(j-1)*np) + se_tmp + ke (i+(j-1)*np) = ke (i+(j-1)*np) + ke_tmp + end do + end do + end do + + do j=1,np + do i = 1, np + se(i+(j-1)*np) = se(i+(j-1)*np) + elem(ie)%state%phis(i,j)*ps(i,j)/gravit + end do + end do + ! + ! Output energy diagnostics on GLL grid + ! + call outfld(name_out1 ,se ,npsq,ie) + call outfld(name_out2 ,ke ,npsq,ie) + ! + ! mass variables are output on CSLAM grid if using CSLAM else GLL grid + ! + if (ntrac>0) then + if (ixwv>0) then + cdp_fvm = fvm(ie)%c(1:nc,1:nc,:,ixwv)*fvm(ie)%dp_fvm(1:nc,1:nc,:) + call util_function(cdp_fvm,nc,nlev,name_out3,ie) + end if + if (ixcldliq>0) then + cdp_fvm = fvm(ie)%c(1:nc,1:nc,:,ixcldliq)*fvm(ie)%dp_fvm(1:nc,1:nc,:) + call util_function(cdp_fvm,nc,nlev,name_out4,ie) + end if + if (ixcldice>0) then + cdp_fvm = fvm(ie)%c(1:nc,1:nc,:,ixcldice)*fvm(ie)%dp_fvm(1:nc,1:nc,:) + call util_function(cdp_fvm,nc,nlev,name_out5,ie) + end if + if (ixtt>0) then + cdp_fvm = fvm(ie)%c(1:nc,1:nc,:,ixtt)*fvm(ie)%dp_fvm(1:nc,1:nc,:) + call util_function(cdp_fvm,nc,nlev,name_out6,ie) + end if + else + call util_function(elem(ie)%state%qdp(:,:,:,1 ,tl_qdp),np,nlev,name_out3,ie) + if (ixcldliq>0) call util_function(elem(ie)%state%qdp(:,:,:,ixcldliq,tl_qdp),np,nlev,name_out4,ie) + if (ixcldice>0) call util_function(elem(ie)%state%qdp(:,:,:,ixcldice,tl_qdp),np,nlev,name_out5,ie) + if (ixtt>0 ) call util_function(elem(ie)%state%qdp(:,:,:,ixtt ,tl_qdp),np,nlev,name_out6,ie) + end if + end do + end if + ! + ! Axial angular momentum diagnostics + ! + ! Code follows + ! + ! Lauritzen et al., (2014): Held-Suarez simulations with the Community Atmosphere Model + ! Spectral Element (CAM-SE) dynamical core: A global axial angularmomentum analysis using Eulerian + ! and floating Lagrangian vertical coordinates. J. Adv. Model. Earth Syst. 6,129-140, + ! doi:10.1002/2013MS000268 + ! + ! MR is equation (6) without \Delta A and sum over areas (areas are in units of radians**2) + ! MO is equation (7) without \Delta A and sum over areas (areas are in units of radians**2) + ! + name_out1 = 'MR_' //trim(outfld_name_suffix) + name_out2 = 'MO_' //trim(outfld_name_suffix) + + if ( hist_fld_active(name_out1).or.hist_fld_active(name_out2)) then + call strlist_get_ind(cnst_name_gll, 'CLDLIQ', ixcldliq, abort=.false.) + call strlist_get_ind(cnst_name_gll, 'CLDICE', ixcldice, abort=.false.) + mr_cnst = rearth**3/gravit + mo_cnst = omega*rearth**4/gravit + do ie=nets,nete + mr = 0.0_r8 + mo = 0.0_r8 + call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),2,thermodynamic_active_species_idx_dycore,& + elem(ie)%state%dp3d(:,:,:,tl),pdel,ps=ps,ptop=hyai(1)*ps0) + do k = 1, nlev + do j=1,np + do i = 1, np + cos_lat = cos(elem(ie)%spherep(i,j)%lat) + mr_tmp = mr_cnst*elem(ie)%state%v(i,j,1,k,tl)*pdel(i,j,k)*cos_lat + mo_tmp = mo_cnst*pdel(i,j,k)*cos_lat**2 + + mr (i+(j-1)*np) = mr (i+(j-1)*np) + mr_tmp + mo (i+(j-1)*np) = mo (i+(j-1)*np) + mo_tmp + end do + end do + end do + call outfld(name_out1 ,mr ,npsq,ie) + call outfld(name_out2 ,mo ,npsq,ie) + end do + end if + + + end subroutine calc_tot_energy_dynamics + + subroutine output_qdp_var_dynamics(qdp,nx,num_trac,nets,nete,outfld_name) + use dimensions_mod, only: nlev,ntrac + use cam_history , only: outfld, hist_fld_active + use constituents , only: cnst_get_ind + !------------------------------Arguments-------------------------------- + + integer ,intent(in) :: nx,num_trac,nets,nete + real(kind=r8) :: qdp(nx,nx,nlev,num_trac,nets:nete) + character*(*),intent(in) :: outfld_name + + !---------------------------Local storage------------------------------- + + integer :: ie + integer :: ixcldice, ixcldliq, ixtt + character(len=16) :: name_out1,name_out2,name_out3,name_out4 + !----------------------------------------------------------------------- + + name_out1 = 'WV_' //trim(outfld_name) + name_out2 = 'WI_' //trim(outfld_name) + name_out3 = 'WL_' //trim(outfld_name) + name_out4 = 'TT_' //trim(outfld_name) + + if ( hist_fld_active(name_out1).or.hist_fld_active(name_out2).or.hist_fld_active(name_out3).or.& + hist_fld_active(name_out4)) then + + call cnst_get_ind('CLDLIQ', ixcldliq, abort=.false.) + call cnst_get_ind('CLDICE', ixcldice, abort=.false.) + call cnst_get_ind('TT_LW' , ixtt , abort=.false.) + + do ie=nets,nete + call util_function(qdp(:,:,:,1,ie),nx,nlev,name_out1,ie) + if (ixcldice>0) call util_function(qdp(:,:,:,ixcldice,ie),nx,nlev,name_out2,ie) + if (ixcldliq>0) call util_function(qdp(:,:,:,ixcldliq,ie),nx,nlev,name_out3,ie) + if (ixtt>0 ) call util_function(qdp(:,:,:,ixtt ,ie),nx,nlev,name_out4,ie) + end do + end if + end subroutine output_qdp_var_dynamics + + ! + ! column integrate mass-variable and outfld + ! + subroutine util_function(f_in,nx,nz,name_out,ie) + use physconst, only: gravit + use cam_history, only: outfld, hist_fld_active + integer, intent(in) :: nx,nz,ie + real(kind=r8), intent(in) :: f_in(nx,nx,nz) + character(len=16), intent(in) :: name_out + real(kind=r8) :: f_out(nx*nx) + integer :: i,j,k + real(kind=r8) :: inv_g + if (hist_fld_active(name_out)) then + f_out = 0.0_r8 + inv_g = 1.0_r8/gravit + do k = 1, nz + do j = 1, nx + do i = 1, nx + f_out(i+(j-1)*nx) = f_out(i+(j-1)*nx) + f_in(i,j,k) + end do + end do + end do + f_out = f_out*inv_g + call outfld(name_out,f_out,nx*nx,ie) + end if + end subroutine util_function + + subroutine compute_omega(hybrid,n0,qn0,elem,deriv,nets,nete,dt,hvcoord) + use control_mod, only : nu_p, hypervis_subcycle + use dimensions_mod, only : np, nlev, qsize + use hybrid_mod, only : hybrid_t + use element_mod, only : element_t + use derivative_mod, only : divergence_sphere, derivative_t,gradient_sphere + use hybvcoord_mod, only : hvcoord_t + use edge_mod, only : edgevpack, edgevunpack + use bndry_mod, only : bndry_exchange + use viscosity_mod, only: biharmonic_wk_omega + use physconst, only: thermodynamic_active_species_num, get_dp + use physconst, only: thermodynamic_active_species_idx_dycore + implicit none + type (hybrid_t) , intent(in) :: hybrid + type (element_t) , intent(inout), target :: elem(:) + type (derivative_t) , intent(in) :: deriv + integer , intent(in) :: nets,nete,n0,qn0 + real (kind=r8) , intent(in) :: dt + type (hvcoord_t) , intent(in) :: hvcoord + + integer :: i,j,k,ie,kptr,ic + real (kind=r8) :: ckk, suml(np,np), v1, v2, term + real (kind=r8) :: dp_full(np,np,nlev) + real (kind=r8) :: p_full(np,np,nlev),grad_p_full(np,np,2),vgrad_p_full(np,np,nlev) + real (kind=r8) :: divdp_full(np,np,nlev),vdp_full(np,np,2) + real(kind=r8) :: Otens(np,np ,nlev,nets:nete), dt_hyper, sum_water(np,np,nlev) + + logical, parameter :: del4omega = .true. + + do ie=nets,nete + call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,qn0),2,& + thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,n0),dp_full) + do k=1,nlev + if (k==1) then + p_full(:,:,k) = hvcoord%hyai(k)*hvcoord%ps0 + dp_full(:,:,k)/2 + else + p_full(:,:,k)=p_full(:,:,k-1) + dp_full(:,:,k-1)/2 + dp_full(:,:,k)/2 + endif + call gradient_sphere(p_full(:,:,k),deriv,elem(ie)%Dinv,grad_p_full (:,:,:)) + do j=1,np + do i=1,np + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + vdp_full(i,j,1) = dp_full(i,j,k)*v1 + vdp_full(i,j,2) = dp_full(i,j,k)*v2 + vgrad_p_full(i,j,k) = (v1*grad_p_full(i,j,1) + v2*grad_p_full(i,j,2)) + end do + end do + call divergence_sphere(vdp_full(:,:,:),deriv,elem(ie),divdp_full(:,:,k)) + end do + ckk = 0.5_r8 + suml(:,: ) = 0 + do k=1,nlev + do j=1,np ! Loop inversion (AAM) + do i=1,np + term = -divdp_full(i,j,k) + + v1 = elem(ie)%state%v(i,j,1,k,n0) + v2 = elem(ie)%state%v(i,j,2,k,n0) + + elem(ie)%derived%omega(i,j,k) = suml(i,j) + ckk*term+vgrad_p_full(i,j,k) + + suml(i,j) = suml(i,j) + term + end do + end do + end do + end do + do ie=nets,nete + do k=1,nlev + elem(ie)%derived%omega(:,:,k) = elem(ie)%spheremp(:,:)*elem(ie)%derived%omega(:,:,k) + end do + kptr=0 + call edgeVpack(edgeOmega, elem(ie)%derived%omega(:,:,:),nlev,kptr, ie) + end do + call bndry_exchange(hybrid,edgeOmega,location='compute_omega #1') + do ie=nets,nete + kptr=0 + call edgeVunpack(edgeOmega, elem(ie)%derived%omega(:,:,:),nlev,kptr, ie) + do k=1,nlev + elem(ie)%derived%omega(:,:,k) = elem(ie)%rspheremp(:,:)*elem(ie)%derived%omega(:,:,k) + end do + end do + + if (del4omega) then + dt_hyper=dt/hypervis_subcycle + do ic=1,hypervis_subcycle + do ie=nets,nete + Otens(:,:,:,ie) = elem(ie)%derived%omega(:,:,:) + end do + call biharmonic_wk_omega(elem,Otens,deriv,edgeOmega,hybrid,nets,nete,1,nlev) + do ie=nets,nete + do k=1,nlev + Otens(:,:,k,ie) = -dt_hyper*nu_p*Otens(:,:,k,ie) + end do + kptr=0 + call edgeVpack(edgeOmega,Otens(:,:,:,ie) ,nlev,kptr, ie) + end do + call bndry_exchange(hybrid,edgeOmega,location='compute_omega #2') + do ie=nets,nete + kptr=0 + call edgeVunpack(edgeOmega, Otens(:,:,:,ie),nlev,kptr, ie) + end do + do ie=nets,nete + do k=1,nlev + elem(ie)%derived%omega(:,:,k) =elem(ie)%derived%omega(:,:,k)+& + elem(ie)%rspheremp(:,:)*Otens(:,:,k,ie) + end do + end do + end do + end if + !call FreeEdgeBuffer(edgeOmega) + end subroutine compute_omega + + + subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) + ! + ! calc_dp3d_reference: When the del^4 horizontal damping is applied to dp3d + ! the values are implicitly affected by natural variations + ! due to surface topography. + ! + ! To account for these physicaly correct variations, use + ! the current state values to compute appropriate + ! reference values for the current (lagrangian) ETA-surfaces. + ! Damping should then be applied to values relative to + ! this reference. + !======================================================================= + use hybvcoord_mod ,only: hvcoord_t + use physconst ,only: rair,cappa + use element_mod, only: element_t + use dimensions_mod, only: np,nlev + use hybrid_mod, only: hybrid_t + use edge_mod, only: edgevpack, edgevunpack + use bndry_mod, only: bndry_exchange + ! + ! Passed variables + !------------------- + type(element_t ),target,intent(inout):: elem(:) + type(EdgeBuffer_t) ,intent(inout):: edge3 + type(hybrid_t ) ,intent(in ):: hybrid + integer ,intent(in ):: nets,nete + integer ,intent(in ):: nt + type(hvcoord_t ) ,intent(in ):: hvcoord + real(kind=r8) ,intent(out ):: dp3d_ref(np,np,nlev,nets:nete) + ! + ! Local Values + !-------------- + real(kind=r8):: Phis_avg(np,np, nets:nete) + real(kind=r8):: Phi_avg (np,np,nlev,nets:nete) + real(kind=r8):: RT_avg (np,np,nlev,nets:nete) + real(kind=r8):: P_val (np,np,nlev) + real(kind=r8):: Ps_val (np,np) + real(kind=r8):: Phi_val (np,np,nlev) + real(kind=r8):: Phi_ival(np,np) + real(kind=r8):: I_Phi (np,np,nlev+1) + real(kind=r8):: Alpha (np,np,nlev ) + real(kind=r8):: I_P (np,np,nlev+1) + real(kind=r8):: DP_avg (np,np,nlev) + real(kind=r8):: P_avg (np,np,nlev) + real(kind=r8):: Ps_avg (np,np) + real(kind=r8):: Ps_ref (np,np) + real(kind=r8):: RT_lapse(np,np) + real(kind=r8):: dlt_Ps (np,np) + real(kind=r8):: dPhi (np,np,nlev) + real(kind=r8):: dPhis (np,np) + real(kind=r8):: E_Awgt,E_phis,E_phi(nlev),E_T(nlev),Lapse0,Expon0 + integer :: ie,ii,jj,kk,kptr + + ! Loop over elements + !-------------------- + do ie=nets,nete + + ! Calculate Pressure values from dp3dp + !-------------------------------------- + P_val(:,:,1) = hvcoord%hyai(1)*hvcoord%ps0 + elem(ie)%state%dp3d(:,:,1,nt)*0.5_r8 + do kk=2,nlev + P_val(:,:,kk) = P_val(:,:,kk-1) & + + elem(ie)%state%dp3d(:,:,kk-1,nt)*0.5_r8 & + + elem(ie)%state%dp3d(:,:,kk ,nt)*0.5_r8 + end do + Ps_val(:,:) = P_val(:,:,nlev) + elem(ie)%state%dp3d(:,:,nlev,nt)*0.5_r8 + + ! Calculate (dry) geopotential values + !-------------------------------------- + dPhi (:,:,:) = 0.5_r8*(rair*elem(ie)%state%T (:,:,:,nt) & + *elem(ie)%state%dp3d(:,:,:,nt) & + /P_val(:,:,:) ) + Phi_val (:,:,nlev) = elem(ie)%state%phis(:,:) + dPhi(:,:,nlev) + Phi_ival(:,:) = elem(ie)%state%phis(:,:) + dPhi(:,:,nlev)*2._r8 + do kk=(nlev-1),1,-1 + Phi_val (:,:,kk) = Phi_ival(:,:) + dPhi(:,:,kk) + Phi_ival(:,:) = Phi_val (:,:,kk) + dPhi(:,:,kk) + end do + + ! Calculate Element averages + !---------------------------- + E_Awgt = 0.0_r8 + E_phis = 0.0_r8 + E_phi(:) = 0._r8 + E_T (:) = 0._r8 + do jj=1,np + do ii=1,np + E_Awgt = E_Awgt + elem(ie)%spheremp(ii,jj) + E_phis = E_phis + elem(ie)%spheremp(ii,jj)*elem(ie)%state%phis(ii,jj) + E_phi (:) = E_phi (:) + elem(ie)%spheremp(ii,jj)*Phi_val(ii,jj,:) + E_T (:) = E_T (:) + elem(ie)%spheremp(ii,jj)*elem(ie)%state%T(ii,jj,:,nt) + end do + end do + + Phis_avg(:,:,ie) = E_phis/E_Awgt + do kk=1,nlev + Phi_avg(:,:,kk,ie) = E_phi(kk) /E_Awgt + RT_avg (:,:,kk,ie) = E_T (kk)*rair/E_Awgt + end do + end do ! ie=nets,nete + + ! Boundary Exchange of average values + !------------------------------------- + do ie=nets,nete + Phis_avg(:,:,ie) = elem(ie)%spheremp(:,:)*Phis_avg(:,:,ie) + do kk=1,nlev + Phi_avg(:,:,kk,ie) = elem(ie)%spheremp(:,:)*Phi_avg(:,:,kk,ie) + RT_avg (:,:,kk,ie) = elem(ie)%spheremp(:,:)*RT_avg (:,:,kk,ie) + end do + kptr = 0 + call edgeVpack(edge3,Phi_avg(:,:,:,ie),nlev,kptr,ie) + kptr = nlev + call edgeVpack(edge3,RT_avg (:,:,:,ie),nlev,kptr,ie) + kptr = 2*nlev + call edgeVpack(edge3,Phis_avg (:,:,ie),1 ,kptr,ie) + end do ! ie=nets,nete + + call bndry_exchange(hybrid,edge3,location='calc_dp3d_reference') + + do ie=nets,nete + kptr = 0 + call edgeVunpack(edge3,Phi_avg(:,:,:,ie),nlev,kptr,ie) + kptr = nlev + call edgeVunpack(edge3,RT_avg (:,:,:,ie),nlev,kptr,ie) + kptr = 2*nlev + call edgeVunpack(edge3,Phis_avg (:,:,ie),1 ,kptr,ie) + Phis_avg(:,:,ie) = elem(ie)%rspheremp(:,:)*Phis_avg(:,:,ie) + do kk=1,nlev + Phi_avg(:,:,kk,ie) = elem(ie)%rspheremp(:,:)*Phi_avg(:,:,kk,ie) + RT_avg (:,:,kk,ie) = elem(ie)%rspheremp(:,:)*RT_avg (:,:,kk,ie) + end do + end do ! ie=nets,nete + + ! Loop over elements + !-------------------- + do ie=nets,nete + + ! Fill elements with uniformly varying average values + !----------------------------------------------------- + call fill_element(Phis_avg(1,1,ie)) + do kk=1,nlev + call fill_element(Phi_avg(1,1,kk,ie)) + call fill_element(RT_avg (1,1,kk,ie)) + end do + + ! Integrate upward to compute Alpha == (dp3d/P) + !---------------------------------------------- + I_Phi(:,:,nlev+1) = Phis_avg(:,:,ie) + do kk=nlev,1,-1 + I_Phi(:,:,kk) = 2._r8* Phi_avg(:,:,kk,ie) - I_Phi(:,:,kk+1) + Alpha(:,:,kk) = 2._r8*(Phi_avg(:,:,kk,ie) - I_Phi(:,:,kk+1))/RT_avg(:,:,kk,ie) + end do + + ! Integrate downward to compute corresponding average pressure values + !--------------------------------------------------------------------- + I_P(:,:,1) = hvcoord%hyai(1)*hvcoord%ps0 + do kk=1,nlev + DP_avg(:,:,kk ) = I_P(:,:,kk)*(2._r8 * Alpha(:,:,kk))/(2._r8 - Alpha(:,:,kk)) + P_avg (:,:,kk ) = I_P(:,:,kk)*(2._r8 )/(2._r8 - Alpha(:,:,kk)) + I_P (:,:,kk+1) = I_P(:,:,kk)*(2._r8 + Alpha(:,:,kk))/(2._r8 - Alpha(:,:,kk)) + end do + Ps_avg(:,:) = I_P(:,:,nlev+1) + + ! Determine an appropriate d/d lapse rate near the surface + ! OPTIONALLY: Use dry adiabatic lapse rate or environmental lapse rate. + !----------------------------------------------------------------------- + if(.FALSE.) then + ! DRY ADIABATIC laspe rate + !------------------------------ + RT_lapse(:,:) = -cappa + else + ! ENVIRONMENTAL (empirical) laspe rate + !-------------------------------------- + RT_lapse(:,:) = (RT_avg (:,:,nlev-1,ie)-RT_avg (:,:,nlev,ie)) & + /(Phi_avg(:,:,nlev-1,ie)-Phi_avg(:,:,nlev,ie)) + endif + + ! Calcualte reference surface pressure + !-------------------------------------- + dPhis(:,:) = elem(ie)%state%phis(:,:)-Phis_avg(:,:,ie) + do jj=1,np + do ii=1,np + if (abs(RT_lapse(ii,jj)) .gt. 1.e-3_r8) then + Lapse0 = RT_lapse(ii,jj)/RT_avg(ii,jj,nlev,ie) + Expon0 = (-1._r8/RT_lapse(ii,jj)) + Ps_ref(ii,jj) = Ps_avg(ii,jj)*((1._r8 + Lapse0*dPhis(ii,jj))**Expon0) + else + Ps_ref(ii,jj) = Ps_avg(ii,jj)*exp(-dPhis(ii,jj)/RT_avg(ii,jj,nlev,ie)) + endif + end do + end do + + ! Calculate reference dp3d values + !--------------------------------- + dlt_Ps(:,:) = Ps_ref(:,:) - Ps_avg(:,:) + do kk=1,nlev + dp3d_ref(:,:,kk,ie) = DP_avg(:,:,kk) + (hvcoord%hybi(kk+1) & + -hvcoord%hybi(kk ))*dlt_Ps(:,:) + end do + + end do ! ie=nets,nete + + ! End Routine + !------------ + return + end subroutine calc_dp3d_reference + !============================================================================= + + + !============================================================================= + subroutine fill_element(Eval) + ! + ! fill_element_bilin: Fill in element gridpoints using local bi-linear + ! interpolation of nearby average values. + ! + ! NOTE: This routine is hard coded for NP=4, if a + ! different value of NP is used... bad things + ! will happen. + !======================================================================= + use dimensions_mod,only: np + ! + ! Passed variables + !------------------- + real(kind=r8),intent(inout):: Eval(np,np) + ! + ! Local Values + !-------------- + real(kind=r8):: X0 + real(kind=r8):: S1,S2,S3,S4 + real(kind=r8):: C1,C2,C3,C4 + real(kind=r8):: E1,E2,E3,E4,E0 + + X0 = sqrt(1._r8/5._r8) + + ! Set the "known" values Eval + !---------------------------- + S1 = (Eval(1 ,2 )+Eval(1 ,3 ))/2._r8 + S2 = (Eval(2 ,np)+Eval(3 ,np))/2._r8 + S3 = (Eval(np,2 )+Eval(np,3 ))/2._r8 + S4 = (Eval(2 ,1 )+Eval(3 ,1 ))/2._r8 + C1 = Eval(1 ,1 ) + C2 = Eval(1 ,np) + C3 = Eval(np,np) + C4 = Eval(np,1 ) + + ! E0 OPTION: Element Center value: + !--------------------------------- + IF(.FALSE.) THEN + ! Use ELEMENT AVERAGE value contained in (2,2) + !---------------------------------------------- + E0 = Eval(2,2) + ELSE + ! Use AVG OF SIDE VALUES after boundary exchange of E0 (smooting option) + !----------------------------------------------------------------------- + E0 = (S1 + S2 + S3 + S4)/4._r8 + ENDIF + + ! Calc interior values along center axes + !---------------------------------------- + E1 = E0 + X0*(S1-E0) + E2 = E0 + X0*(S2-E0) + E3 = E0 + X0*(S3-E0) + E4 = E0 + X0*(S4-E0) + + ! Calculate Side Gridpoint Values for Eval + !------------------------------------------ + Eval(1 ,2 ) = S1 + X0*(C1-S1) + Eval(1 ,3 ) = S1 + X0*(C2-S1) + Eval(2 ,np) = S2 + X0*(C2-S2) + Eval(3 ,np) = S2 + X0*(C3-S2) + Eval(np,2 ) = S3 + X0*(C4-S3) + Eval(np,3 ) = S3 + X0*(C3-S3) + Eval(2 ,1 ) = S4 + X0*(C1-S4) + Eval(3 ,1 ) = S4 + X0*(C4-S4) + + ! Calculate interior values + !--------------------------- + Eval(2 ,2 ) = E1 + X0*(Eval(2 ,1 )-E1) + Eval(2 ,3 ) = E1 + X0*(Eval(2 ,np)-E1) + Eval(3 ,2 ) = E3 + X0*(Eval(3 ,1 )-E3) + Eval(3 ,3 ) = E3 + X0*(Eval(3 ,np)-E3) + + ! End Routine + !------------ + return + end subroutine fill_element + + subroutine rayleigh_friction(elem,nt,nets,nete,dt) + use dimensions_mod, only: nlev, otau + use hybrid_mod, only: hybrid_t!, get_loop_ranges + use element_mod, only: element_t + + type (element_t) , intent(inout), target :: elem(:) + integer , intent(in) :: nets,nete, nt + real(r8) :: dt + + real(r8) :: c1, c2 + integer :: k,ie + + do ie=nets,nete + do k=1,nlev + c2 = 1._r8 / (1._r8 + otau(k)*dt) + c1 = -otau(k) * c2 * dt + elem(ie)%state%v(:,:,:,k,nt) = elem(ie)%state%v(:,:,:,k,nt)+c1 * elem(ie)%state%v(:,:,:,k,nt) +! ptend%s(:ncol,k) = c3 * (state%u(:ncol,k)**2 + state%v(:ncol,k)**2) + enddo + end do + end subroutine rayleigh_friction + + + + subroutine solve_diffusion(dt,nx,nlev,i,j,nlay,pmid,pint,km,fld,boundary_condition,dfld) + use physconst, only: gravit + real(kind=r8), intent(in) :: dt + integer , intent(in) :: nlay, nlev,nx, i, j + real(kind=r8), intent(in) :: pmid(nx,nx,nlay),pint(nx,nx,nlay+1),km(nx,nx,nlay+1) + real(kind=r8), intent(in) :: fld(nx,nx,nlev) + real(kind=r8), intent(out) :: dfld(nlay) + integer :: boundary_condition + ! + real(kind=r8), dimension(nlay) :: current_guess,next_iterate + real(kind=r8) :: alp, alm, value_level0 + integer :: k,iter, niterations=4 + + ! Make the guess for the next time step equal to the initial value + current_guess(:)= fld(i,j,1:nlay) + do iter = 1, niterations + ! two formulations of the upper boundary condition + !next_iterate(1) = (initial_value(1) + alp * current_guess(i+1) + alm * current_guess(1)) /(1. + alp + alm) ! top BC, u'=0 + if (boundary_condition==0) then + next_iterate(1) = fld(i,j,1) ! u doesn't get prognosed by diffusion at top + else if (boundary_condition==1) then + value_level0 = 0.75_r8*fld(i,j,1) ! value above sponge + k=1 + alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*gravit*gravit/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) + next_iterate(k) = (fld(i,j,k) + alp * current_guess(k+1) + alm * value_level0)/(1._r8 + alp + alm) + else + ! + ! set fld'=0 at model top + ! + k=1 + alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*gravit*gravit/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) + next_iterate(k) = (fld(i,j,1) + alp * current_guess(2) + alm * current_guess(1))/(1._r8 + alp + alm) + end if + do k = 2, nlay-1 + alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k )-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*gravit*gravit/(pmid(i,j,k-1)-pmid(i,j,k )))/(pint(i,j,k)-pint(i,j,k+1)) + next_iterate(k) = (fld(i,j,k) + alp * current_guess(k+1) + alm * current_guess(k-1))/(1._r8 + alp + alm) + end do + next_iterate(nlay) = (fld(i,j,nlay) + alp * fld(i,j,nlay) + alm * current_guess(nlay-1))/(1._r8 + alp + alm) ! bottom BC + + ! before the next iterate, make the current guess equal to the values of the last iteration + current_guess(:) = next_iterate(:) + end do + dfld(:) = next_iterate(:) - fld(i,j,1:nlay) + + end subroutine solve_diffusion + + +end module prim_advance_mod diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90 new file mode 100644 index 00000000..41e15744 --- /dev/null +++ b/src/dynamics/se/dycore/prim_advection_mod.F90 @@ -0,0 +1,1122 @@ +#define OVERLAP 1 +module prim_advection_mod +! +! two formulations. both are conservative +! u grad Q formulation: +! +! d/dt[ Q] + U grad Q = 0 +! +! d/dt[ dp/dn ] = div( dp/dn U ) +! +! total divergence formulation: +! d/dt[dp/dn Q] + div( U dp/dn Q ) = 0 +! +! for convience, rewrite this as dp Q: (since dn does not depend on time or the horizonal): +! equation is now: +! d/dt[dp Q] + div( U dp Q ) = 0 +! +! + use shr_kind_mod, only: r8=>shr_kind_r8 + use dimensions_mod, only: nlev, np, qsize, nc + use physconst, only: cpair + use derivative_mod, only: derivative_t + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + use hybvcoord_mod, only: hvcoord_t + use time_mod, only: TimeLevel_t, TimeLevel_Qdp + use control_mod, only: nu_q, nu_p, limiter_option, hypervis_subcycle_q, rsplit + use edge_mod, only: edgevpack, edgevunpack, initedgebuffer, initedgesbuffer + + use edgetype_mod, only: EdgeBuffer_t + use hybrid_mod, only: hybrid_t + use viscosity_mod, only: biharmonic_wk_scalar, neighbor_minmax, & + neighbor_minmax_start, neighbor_minmax_finish + use perf_mod, only: t_startf, t_stopf, t_barrierf + use cam_abortutils, only: endrun + use thread_mod, only: horz_num_threads, vert_num_threads, tracer_num_threads + + implicit none + + private + save + + public :: Prim_Advec_Init1, Prim_Advec_Init2 + public :: Prim_Advec_Tracers_remap + public :: prim_advec_tracers_fvm + public :: vertical_remap + + type (EdgeBuffer_t) :: edgeAdv, edgeAdvp1, edgeAdvQminmax, edgeAdv1, edgeveloc + + integer,parameter :: DSSeta = 1 + integer,parameter :: DSSomega = 2 + integer,parameter :: DSSdiv_vdp_ave = 3 + integer,parameter :: DSSno_var = -1 + + real(kind=r8), allocatable :: qmin(:,:,:), qmax(:,:,:) + +!JMD I don't see why this needs to be thread private. +!JMD type (derivative_t), public, allocatable :: deriv(:) ! derivative struct (nthreads) + type (derivative_t), public :: deriv + + +contains + + + subroutine Prim_Advec_Init1(par, elem) + use dimensions_mod, only : nlev, qsize, nelemd,ntrac + use parallel_mod, only : parallel_t, boundaryCommMethod + type(parallel_t) :: par + type (element_t) :: elem(:) + ! + ! Shared buffer pointers. + ! Using "=> null()" in a subroutine is usually bad, because it makes + ! the variable have an implicit "save", and therefore shared between + ! threads. But in this case we want shared pointers. + real(kind=r8), pointer :: buf_ptr(:) => null() + real(kind=r8), pointer :: receive_ptr(:) => null() + integer :: advec_remap_num_threads + + + ! + ! Set the number of threads used in the subroutine Prim_Advec_tracers_remap() + ! + if (ntrac>0) then + advec_remap_num_threads = 1 + else + advec_remap_num_threads = tracer_num_threads + endif + ! this might be called with qsize=0 + ! allocate largest one first + ! Currently this is never freed. If it was, only this first one should + ! be freed, as only it knows the true size of the buffer. + call initEdgeBuffer(par,edgeAdvp1,elem,qsize*nlev + nlev,bndry_type=boundaryCommMethod,& + nthreads=horz_num_threads*advec_remap_num_threads) + call initEdgeBuffer(par,edgeAdv,elem,qsize*nlev,bndry_type=boundaryCommMethod, & + nthreads=horz_num_threads*advec_remap_num_threads) + ! This is a different type of buffer pointer allocation + ! used for determine the minimum and maximum value from + ! neighboring elements + call initEdgeSBuffer(par,edgeAdvQminmax,elem,qsize*nlev*2,bndry_type=boundaryCommMethod, & + nthreads=horz_num_threads*advec_remap_num_threads) + + call initEdgeBuffer(par,edgeAdv1,elem,nlev,bndry_type=boundaryCommMethod) + call initEdgeBuffer(par,edgeveloc,elem,2*nlev,bndry_type=boundaryCommMethod) + + + ! Don't actually want these saved, if this is ever called twice. + nullify(buf_ptr) + nullify(receive_ptr) + + + ! this static array is shared by all threads, so dimension for all threads (nelemd), not nets:nete: + allocate (qmin(nlev,qsize,nelemd)) + allocate (qmax(nlev,qsize,nelemd)) + + end subroutine Prim_Advec_Init1 + + subroutine Prim_Advec_Init2(fvm_corners, fvm_points) + use dimensions_mod, only : nc + use derivative_mod, only : derivinit + + real(kind=r8), intent(in) :: fvm_corners(nc+1) + real(kind=r8), intent(in) :: fvm_points(nc) + + ! ================================== + ! Initialize derivative structure + ! ================================== + call derivinit(deriv,fvm_corners, fvm_points) + end subroutine Prim_Advec_Init2 + + ! + ! fvm driver + ! + subroutine Prim_Advec_Tracers_fvm(elem,fvm,hvcoord,hybrid,& + dt,tl,nets,nete,ghostbufQnhc,ghostBufQ1, ghostBufFlux,kmin,kmax) + use fvm_consistent_se_cslam, only: run_consistent_se_cslam + use edgetype_mod, only: edgebuffer_t + implicit none + type (element_t), intent(inout) :: elem(:) + type (fvm_struct), intent(inout) :: fvm(:) + type (hvcoord_t) :: hvcoord + type (hybrid_t), intent(in):: hybrid + type (TimeLevel_t) :: tl + + real(kind=r8) , intent(in) :: dt + integer,intent(in) :: nets,nete,kmin,kmax + type (EdgeBuffer_t), intent(inout):: ghostbufQnhc,ghostBufQ1, ghostBufFlux + + call t_barrierf('sync_prim_advec_tracers_fvm', hybrid%par%comm) + call t_startf('prim_advec_tracers_fvm') + + if (rsplit==0) call endrun('cslam only works for rsplit>0') + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! 2D advection step + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + call run_consistent_se_cslam(elem,fvm,hybrid,dt,tl,nets,nete,hvcoord,& + ghostbufQnhc,ghostBufQ1, ghostBufFlux,kmin,kmax) + call t_stopf('prim_advec_tracers_fvm') + end subroutine Prim_Advec_Tracers_fvm + + + +!=================================================================================================! + + subroutine Prim_Advec_Tracers_remap( elem , deriv , hvcoord , hybrid , dt , tl , nets , nete ) + implicit none + type (element_t) , intent(inout) :: elem(:) + type (derivative_t) , intent(in ) :: deriv + type (hvcoord_t) , intent(in ) :: hvcoord + type (hybrid_t) , intent(in ) :: hybrid + real(kind=r8) , intent(in ) :: dt + type (TimeLevel_t) , intent(inout) :: tl + integer , intent(in ) :: nets + integer , intent(in ) :: nete + + + !print *,'prim_Advec_Tracers_remap: qsize: ',qsize + call Prim_Advec_Tracers_remap_rk2( elem , deriv , hvcoord , hybrid , dt , tl , nets , nete ) + end subroutine Prim_Advec_Tracers_remap + + + subroutine euler_step_driver(np1_qdp , n0_qdp , dt , elem , hvcoord , hybrid , deriv , nets , nete , DSSopt , rhs_multiplier ) + + + integer , intent(in ) :: np1_qdp, n0_qdp + real (kind=r8), intent(in ) :: dt + type (element_t) , intent(inout) :: elem(:) + type (hvcoord_t) , intent(in ) :: hvcoord + type (hybrid_t) , intent(in ) :: hybrid + type (derivative_t) , intent(in ) :: deriv + integer , intent(in ) :: nets + integer , intent(in ) :: nete + integer , intent(in ) :: DSSopt + integer , intent(in ) :: rhs_multiplier + + call euler_step( np1_qdp , n0_qdp , dt , elem , hvcoord , hybrid , deriv , nets , nete , DSSopt , rhs_multiplier) + + end subroutine euler_step_driver + +!----------------------------------------------------------------------------- +!----------------------------------------------------------------------------- +! forward-in-time 2 level vertically lagrangian step +! this code takes a lagrangian step in the horizontal +! (complete with DSS), and then applies a vertical remap +! +! This routine may use dynamics fields at timelevel np1 +! In addition, other fields are required, which have to be +! explicitly saved by the dynamics: (in elem(ie)%derived struct) +! +! Fields required from dynamics: (in +! omega it will be DSS'd here, for later use by CAM physics +! we DSS omega here because it can be done for "free" +! Consistent mass/tracer-mass advection (used if subcycling turned on) +! dp() dp at timelevel n0 +! vn0() mean flux < U dp > going from n0 to np1 +! +! 3 stage +! Euler step from t -> t+.5 +! Euler step from t+.5 -> t+1.0 +! Euler step from t+1.0 -> t+1.5 +! u(t) = u(t)/3 + u(t+2)*2/3 +! +!----------------------------------------------------------------------------- +!----------------------------------------------------------------------------- + subroutine Prim_Advec_Tracers_remap_rk2( elem , deriv , hvcoord , hybrid , dt , tl , nets , nete ) + use derivative_mod, only : divergence_sphere + use control_mod , only : qsplit + use hybrid_mod , only : get_loop_ranges!, PrintHybrid +! use thread_mod , only : omp_set_num_threads, omp_get_thread_num + + type (element_t) , intent(inout) :: elem(:) + type (derivative_t) , intent(in ) :: deriv + type (hvcoord_t) , intent(in ) :: hvcoord + type (hybrid_t) , intent(in ) :: hybrid + real(kind=r8) , intent(in ) :: dt + type (TimeLevel_t) , intent(inout) :: tl + integer , intent(in ) :: nets + integer , intent(in ) :: nete + + real (kind=r8), dimension(np,np,2 ) :: gradQ + integer :: k,ie + integer :: rkstage,rhs_multiplier + integer :: n0_qdp, np1_qdp + integer :: kbeg,kend,qbeg,qend + +! call t_barrierf('sync_prim_advec_tracers_remap_k2', hybrid%par%comm) +! call t_startf('prim_advec_tracers_remap_rk2') +! call extrae_user_function(1) + call TimeLevel_Qdp( tl, qsplit, n0_qdp, np1_qdp) !time levels for qdp are not the same + rkstage = 3 ! 3 stage RKSSP scheme, with optimal SSP CFL + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! RK2 2D advection step + ! note: stage 3 we take the oppertunity to DSS omega + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! use these for consistent advection (preserve Q=1) + ! derived%vdp_ave = mean horiz. flux: U*dp + ! derived%omega = advection code will DSS this for the physics, but otherwise + ! it is not needed + ! Also: save a copy of div(U dp) in derived%div(:,:,:,1), which will be DSS'd + ! and a DSS'ed version stored in derived%div(:,:,:,2) + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + do ie=nets,nete + do k=kbeg,kend + ! div( U dp Q), + gradQ(:,:,1)=elem(ie)%derived%vn0(:,:,1,k) + gradQ(:,:,2)=elem(ie)%derived%vn0(:,:,2,k) + ! elem(ie)%derived%divdp(:,:,k) = divergence_sphere(gradQ,deriv,elem(ie)) + call divergence_sphere(gradQ,deriv,elem(ie),elem(ie)%derived%divdp(:,:,k)) + elem(ie)%derived%divdp_proj(:,:,k) = elem(ie)%derived%divdp(:,:,k) + enddo + enddo + + + !rhs_multiplier is for obtaining dp_tracers at each stage: + !dp_tracers(stage) = dp - rhs_multiplier*dt*divdp_proj +! call t_startf('euler_step') + + rhs_multiplier = 0 + call euler_step_driver( np1_qdp, n0_qdp , dt/2, elem, hvcoord, hybrid, deriv, nets, nete, DSSdiv_vdp_ave, rhs_multiplier ) + + rhs_multiplier = 1 + call euler_step_driver( np1_qdp, np1_qdp, dt/2, elem, hvcoord, hybrid, deriv, nets, nete, DSSno_var , rhs_multiplier ) + + rhs_multiplier = 2 + call euler_step_driver( np1_qdp, np1_qdp, dt/2, elem, hvcoord, hybrid, deriv, nets, nete, DSSomega , rhs_multiplier ) + +! call t_stopf ('euler_step') + + !to finish the 2D advection step, we need to average the t and t+2 results to get a second order estimate for t+1. + call qdp_time_avg( elem , rkstage , n0_qdp , np1_qdp , hybrid, nets , nete ) + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! Dissipation + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + if ( limiter_option == 8 ) then + ! dissipation was applied in RHS. + else + call advance_hypervis_scalar(edgeadv,elem,hvcoord,hybrid,deriv,tl%np1,np1_qdp,nets,nete,dt) + endif +! call extrae_user_function(0) + +! call t_stopf('prim_advec_tracers_remap_rk2') + + end subroutine prim_advec_tracers_remap_rk2 + +!----------------------------------------------------------------------------- +!----------------------------------------------------------------------------- + + subroutine qdp_time_avg( elem , rkstage , n0_qdp , np1_qdp , hybrid , nets , nete ) + use hybrid_mod, only : hybrid_t, get_loop_ranges + implicit none + type(element_t) , intent(inout) :: elem(:) + integer , intent(in ) :: rkstage , n0_qdp , np1_qdp , nets , nete + type(hybrid_t) :: hybrid + integer :: i,j,ie,q,k + integer :: kbeg,kend,qbeg,qend + real(kind=r8) :: rrkstage + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + rrkstage=1.0_r8/real(rkstage,kind=r8) + do ie=nets,nete + do q=qbeg,qend + do k=kbeg,kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%Qdp(i,j,k,q,np1_qdp) = & + rrkstage *( elem(ie)%state%Qdp(i,j,k,q,n0_qdp) + & + (rkstage-1)*elem(ie)%state%Qdp(i,j,k,q,np1_qdp) ) + enddo + enddo + enddo + enddo + enddo + end subroutine qdp_time_avg + +!----------------------------------------------------------------------------- +!----------------------------------------------------------------------------- + + subroutine euler_step( np1_qdp , n0_qdp , dt , elem , hvcoord , hybrid , deriv , nets , nete , DSSopt , rhs_multiplier ) + ! =================================== + ! This routine is the basic foward + ! euler component used to construct RK SSP methods + ! + ! u(np1) = u(n0) + dt2*DSS[ RHS(u(n0)) ] + ! + ! n0 can be the same as np1. + ! + ! DSSopt = DSSeta or DSSomega: also DSS omega + ! + ! =================================== + use dimensions_mod , only : np, nlev + use hybrid_mod , only : hybrid_t!, PrintHybrid + use hybrid_mod , only : get_loop_ranges, threadOwnsTracer + use element_mod , only : element_t + use derivative_mod , only : derivative_t, divergence_sphere, limiter_optim_iter_full + use edge_mod , only : edgevpack, edgevunpack + use bndry_mod , only : bndry_exchange + use hybvcoord_mod , only : hvcoord_t + + integer , intent(in ) :: np1_qdp, n0_qdp + real (kind=r8), intent(in ) :: dt + type (element_t) , intent(inout), target :: elem(:) + type (hvcoord_t) , intent(in ) :: hvcoord + type (hybrid_t) , intent(in ) :: hybrid + type (derivative_t) , intent(in ) :: deriv + integer , intent(in ) :: nets + integer , intent(in ) :: nete + integer , intent(in ) :: DSSopt + integer , intent(in ) :: rhs_multiplier + + ! local + real(kind=r8), dimension(np,np ) :: dpdiss + real(kind=r8), dimension(np,np,nlev) :: dpdissk + real(kind=r8), dimension(np,np,2 ) :: gradQ + real(kind=r8), dimension(np,np,2,nlev ) :: Vstar + real(kind=r8), dimension(np,np ,nlev ) :: Qtens + real(kind=r8), dimension(np,np ,nlev ) :: dp + real(kind=r8), dimension(np,np ,nlev,qsize,nets:nete) :: Qtens_biharmonic + real(kind=r8), dimension(np,np) :: div + real(kind=r8), pointer, dimension(:,:,:) :: DSSvar + real(kind=r8) :: dp0(nlev) + integer :: ie,q,i,j,k, kptr + integer :: rhs_viss = 0 + integer :: kblk,qblk ! The per thead size of the vertical and tracers + integer :: kbeg, kend, qbeg, qend + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + kblk = kend - kbeg + 1 ! calculate size of the block of vertical levels + qblk = qend - qbeg + 1 ! calculate size of the block of tracers + + do k = kbeg, kend + dp0(k) = ( hvcoord%hyai(k+1) - hvcoord%hyai(k) )*hvcoord%ps0 + & + ( hvcoord%hybi(k+1) - hvcoord%hybi(k) )*hvcoord%ps0 + enddo + +! call t_startf('euler_step') + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! compute Q min/max values for lim8 + ! compute biharmonic mixing term f + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + rhs_viss = 0 + if ( limiter_option == 8 ) then + ! when running lim8, we also need to limit the biharmonic, so that term needs + ! to be included in each euler step. three possible algorithms here: + ! 1) most expensive: + ! compute biharmonic (which also computes qmin/qmax) during all 3 stages + ! be sure to set rhs_viss=1 + ! cost: 3 biharmonic steps with 3 DSS + ! + ! 2) cheapest: + ! compute biharmonic (which also computes qmin/qmax) only on first stage + ! be sure to set rhs_viss=3 + ! reuse qmin/qmax for all following stages (but update based on local qmin/qmax) + ! cost: 1 biharmonic steps with 1 DSS + ! main concern: viscosity + ! + ! 3) compromise: + ! compute biharmonic (which also computes qmin/qmax) only on last stage + ! be sure to set rhs_viss=3 + ! compute qmin/qmax directly on first stage + ! reuse qmin/qmax for 2nd stage stage (but update based on local qmin/qmax) + ! cost: 1 biharmonic steps, 2 DSS + ! + ! NOTE when nu_p=0 (no dissipation applied in dynamics to dp equation), we should + ! apply dissipation to Q (not Qdp) to preserve Q=1 + ! i.e. laplace(Qdp) ~ dp0 laplace(Q) + ! for nu_p=nu_q>0, we need to apply dissipation to Q * diffusion_dp + ! + ! initialize dp, and compute Q from Qdp (and store Q in Qtens_biharmonic) + do ie = nets, nete + ! add hyperviscosity to RHS. apply to Q at timelevel n0, Qdp(n0)/dp + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + dp(i,j,k) = elem(ie)%derived%dp(i,j,k) - rhs_multiplier*dt*elem(ie)%derived%divdp_proj(i,j,k) + enddo + enddo + enddo + !JMD need to update loop based on changes in dungeon21 tag + do q = qbeg, qend + do k= kbeg, kend + Qtens_biharmonic(:,:,k,q,ie) = elem(ie)%state%Qdp(:,:,k,q,n0_qdp)/dp(:,:,k) + if ( rhs_multiplier == 1 ) then + qmin(k,q,ie)=min(qmin(k,q,ie),minval(Qtens_biharmonic(:,:,k,q,ie))) + qmax(k,q,ie)=max(qmax(k,q,ie),maxval(Qtens_biharmonic(:,:,k,q,ie))) + else + qmin(k,q,ie)=minval(Qtens_biharmonic(:,:,k,q,ie)) + qmax(k,q,ie)=maxval(Qtens_biharmonic(:,:,k,q,ie)) + endif + enddo + enddo + enddo + + ! compute element qmin/qmax + if ( rhs_multiplier == 0 ) then + ! update qmin/qmax based on neighbor data for lim8 +! call t_startf('euler_neighbor_minmax1') + call neighbor_minmax(hybrid,edgeAdvQminmax,nets,nete,qmin(:,:,nets:nete),qmax(:,:,nets:nete)) +! call t_stopf('euler_neighbor_minmax1') + endif + + ! get niew min/max values, and also compute biharmonic mixing term + if ( rhs_multiplier == 2 ) then + rhs_viss = 3 + ! two scalings depending on nu_p: + ! nu_p=0: qtens_biharmonic *= dp0 (apply viscsoity only to q) + ! nu_p>0): qtens_biharmonc *= elem()%psdiss_ave (for consistency, if nu_p=nu_q) + if ( nu_p > 0 ) then + do ie = nets, nete + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + dpdissk(i,j,k) = elem(ie)%derived%dpdiss_ave(i,j,k)/dp0(k) + enddo + enddo + enddo + do q = qbeg,qend + do k = kbeg, kend + ! NOTE: divide by dp0 since we multiply by dp0 below + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + Qtens_biharmonic(i,j,k,q,ie)=Qtens_biharmonic(i,j,k,q,ie)*dpdissk(i,j,k) + enddo + enddo + enddo + enddo + enddo + endif + +! Previous version of biharmonic_wk_scalar_minmax included a min/max +! calculation into the boundary exchange. This was causing cache issues. +! Split the single operation into two separate calls +! call neighbor_minmax() +! call biharmonic_wk_scalar() +! +#ifdef OVERLAP + call neighbor_minmax_start(hybrid,edgeAdvQminmax,nets,nete,qmin(:,:,nets:nete),qmax(:,:,nets:nete)) + call biharmonic_wk_scalar(elem,qtens_biharmonic,deriv,edgeAdv,hybrid,nets,nete) + do ie = nets, nete + do q = qbeg, qend + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + ! note: biharmonic_wk() output has mass matrix already applied. Un-apply since we apply again below: + qtens_biharmonic(i,j,k,q,ie) = & + -rhs_viss*dt*nu_q*dp0(k)*Qtens_biharmonic(i,j,k,q,ie) / elem(ie)%spheremp(i,j) + enddo + enddo + enddo + enddo + enddo + call neighbor_minmax_finish(hybrid,edgeAdvQminmax,nets,nete,qmin(:,:,nets:nete),qmax(:,:,nets:nete)) +#else + call t_startf('euler_neighbor_minmax2') + call neighbor_minmax(hybrid,edgeAdvQminmax,nets,nete,qmin(:,:,nets:nete),qmax(:,:,nets:nete)) + call t_stopf('euler_neighbor_minmax2') + call biharmonic_wk_scalar(elem,qtens_biharmonic,deriv,edgeAdv,hybrid,nets,nete) + + do ie = nets, nete + do q = qbeg, qend + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + ! note: biharmonic_wk() output has mass matrix already applied. Un-apply since we apply again below: + qtens_biharmonic(i,j,k,q,ie) = & + -rhs_viss*dt*nu_q*dp0(k)*Qtens_biharmonic(i,j,k,q,ie) / elem(ie)%spheremp(i,j) + enddo + enddo + enddo + enddo + enddo +#endif + + + endif + endif ! compute biharmonic mixing term and qmin/qmax + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! 2D Advection step + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + do ie = nets, nete + + + ! Compute velocity used to advance Qdp + do k = kbeg, kend + ! derived variable divdp_proj() (DSS'd version of divdp) will only be correct on 2nd and 3rd stage + ! but that's ok because rhs_multiplier=0 on the first stage: + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + dp(i,j,k) = elem(ie)%derived%dp(i,j,k) - rhs_multiplier * dt * elem(ie)%derived%divdp_proj(i,j,k) + Vstar(i,j,1,k) = elem(ie)%derived%vn0(i,j,1,k) / dp(i,j,k) + Vstar(i,j,2,k) = elem(ie)%derived%vn0(i,j,2,k) / dp(i,j,k) + enddo + enddo + enddo + if ( limiter_option == 8) then + ! Note that the term dpdissk is independent of Q + do k = kbeg, kend + ! UN-DSS'ed dp at timelevel n0+1: + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + dpdissk(i,j,k) = dp(i,j,k) - dt * elem(ie)%derived%divdp(i,j,k) + enddo + enddo + if ( nu_p > 0 .and. rhs_viss /= 0 ) then + ! add contribution from UN-DSS'ed PS dissipation +! dpdiss(:,:) = ( hvcoord%hybi(k+1) - hvcoord%hybi(k) ) * +! elem(ie)%derived%psdiss_biharmonic(:,:) + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + dpdiss(i,j) = elem(ie)%derived%dpdiss_biharmonic(i,j,k) + dpdissk(i,j,k) = dpdissk(i,j,k) - rhs_viss * dt * nu_q * dpdiss(i,j) / elem(ie)%spheremp(i,j) + enddo + enddo + endif + ! IMPOSE ZERO THRESHOLD. do this here so it can be turned off for + ! testing + do q=qbeg, qend + qmin(k,q,ie)=max(qmin(k,q,ie),0.0_r8) + enddo + enddo + endif ! limiter == 8 + + + ! advance Qdp + do q = qbeg, qend + do k = kbeg, kend + ! div( U dp Q), + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + gradQ(i,j,1) = Vstar(i,j,1,k) * elem(ie)%state%Qdp(i,j,k,q,n0_qdp) + gradQ(i,j,2) = Vstar(i,j,2,k) * elem(ie)%state%Qdp(i,j,k,q,n0_qdp) + enddo + enddo + ! Qtens(:,:,k) = elem(ie)%state%Qdp(:,:,k,q,n0_qdp) - & + ! dt * divergence_sphere( gradQ , deriv , elem(ie) ) + call divergence_sphere( gradQ , deriv , elem(ie),div ) + + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + Qtens(i,j,k) = elem(ie)%state%Qdp(i,j,k,q,n0_qdp) - dt * div(i,j) + enddo + enddo + + ! optionally add in hyperviscosity computed above: + if ( rhs_viss /= 0 ) then + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + Qtens(i,j,k) = Qtens(i,j,k) + Qtens_biharmonic(i,j,k,q,ie) + enddo + enddo + endif + enddo + + if ( limiter_option == 8) then + ! apply limiter to Q = Qtens / dp_star + call limiter_optim_iter_full( Qtens(:,:,:) , elem(ie)%spheremp(:,:) , qmin(:,q,ie) , & + qmax(:,q,ie) , dpdissk, kbeg, kend ) + endif + + + ! apply mass matrix, overwrite np1 with solution: + ! dont do this earlier, since we allow np1_qdp == n0_qdp + ! and we dont want to overwrite n0_qdp until we are done using it + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%Qdp(i,j,k,q,np1_qdp) = elem(ie)%spheremp(i,j) * Qtens(i,j,k) + enddo + enddo + enddo + + if ( limiter_option == 4 ) then + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1 + ! sign-preserving limiter, applied after mass matrix + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1 +!JMD !$OMP BARRIER +!JMD !$OMP MASTER + call limiter2d_zero(elem(ie)%state%Qdp(:,:,:,q,np1_qdp)) +!JMD !$OMP END MASTER +!JMD !$OMP BARRIER + endif + + kptr = nlev*(q-1) + kbeg - 1 + call edgeVpack(edgeAdvp1 , elem(ie)%state%Qdp(:,:,kbeg:kend,q,np1_qdp) , kblk , kptr , ie ) + enddo + ! only perform this operation on thread which owns the first tracer + if (DSSopt>0) then + if (threadOwnsTracer(hybrid,1)) then + ! all zero so we only have to DSS 1:nlev + if ( DSSopt == DSSomega ) DSSvar => elem(ie)%derived%omega(:,:,:) + if ( DSSopt == DSSdiv_vdp_ave ) DSSvar => elem(ie)%derived%divdp_proj(:,:,:) + ! also DSS extra field + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + DSSvar(i,j,k) = elem(ie)%spheremp(i,j) * DSSvar(i,j,k) + enddo + enddo + enddo + kptr = nlev*qsize + kbeg - 1 + call edgeVpack( edgeAdvp1 , DSSvar(:,:,kbeg:kend), kblk, kptr, ie) + endif + end if + enddo + + call bndry_exchange( hybrid , edgeAdvp1,location='edgeAdvp1') + + do ie = nets, nete + ! only perform this operation on thread which owns the first tracer + if (DSSopt>0) then + if(threadOwnsTracer(hybrid,1)) then + if ( DSSopt == DSSomega ) DSSvar => elem(ie)%derived%omega(:,:,:) + if ( DSSopt == DSSdiv_vdp_ave ) DSSvar => elem(ie)%derived%divdp_proj(:,:,:) + kptr = qsize*nlev + kbeg -1 + call edgeVunpack( edgeAdvp1 , DSSvar(:,:,kbeg:kend) , kblk , kptr , ie ) + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + DSSvar(i,j,k) = DSSvar(i,j,k) * elem(ie)%rspheremp(i,j) + enddo + enddo + enddo + endif + end if + do q = qbeg, qend + kptr = nlev*(q-1) + kbeg - 1 + call edgeVunpack( edgeAdvp1 , elem(ie)%state%Qdp(:,:,kbeg:kend,q,np1_qdp) , kblk , kptr , ie ) + do k = kbeg, kend + !OMP_COLLAPSE_SIMD + !DIR_VECTOR_ALIGNED + do j=1,np + do i=1,np + elem(ie)%state%Qdp(i,j,k,q,np1_qdp) = elem(ie)%rspheremp(i,j) * elem(ie)%state%Qdp(i,j,k,q,np1_qdp) + enddo + enddo + enddo + enddo + enddo +! call t_stopf('euler_step') + + end subroutine euler_step + + + + subroutine limiter2d_zero(Q) + ! mass conserving zero limiter (2D only). to be called just before DSS + ! + ! this routine is called inside a DSS loop, and so Q had already + ! been multiplied by the mass matrix. Thus dont include the mass + ! matrix when computing the mass = integral of Q over the element + ! + ! ps is only used when advecting Q instead of Qdp + ! so ps should be at one timelevel behind Q + implicit none + real (kind=r8), intent(inout) :: Q(np,np,nlev) + + ! local +! real (kind=r8) :: dp(np,np) + real (kind=r8) :: mass,mass_new,ml + integer i,j,k + + do k = nlev , 1 , -1 + mass = 0 + do j = 1 , np + do i = 1 , np + !ml = Q(i,j,k)*dp(i,j)*spheremp(i,j) ! see above + ml = Q(i,j,k) + mass = mass + ml + enddo + enddo + + ! negative mass. so reduce all postive values to zero + ! then increase negative values as much as possible + if ( mass < 0 ) Q(:,:,k) = -Q(:,:,k) + mass_new = 0 + do j = 1 , np + do i = 1 , np + if ( Q(i,j,k) < 0 ) then + Q(i,j,k) = 0 + else + ml = Q(i,j,k) + mass_new = mass_new + ml + endif + enddo + enddo + + ! now scale the all positive values to restore mass + if ( mass_new > 0 ) Q(:,:,k) = Q(:,:,k) * abs(mass) / mass_new + if ( mass < 0 ) Q(:,:,k) = -Q(:,:,k) + enddo + end subroutine limiter2d_zero + +!----------------------------------------------------------------------------- +!----------------------------------------------------------------------------- + + subroutine advance_hypervis_scalar( edgeAdv , elem , hvcoord , hybrid , deriv , nt , nt_qdp , nets , nete , dt2 ) + ! hyperviscsoity operator for foward-in-time scheme + ! take one timestep of: + ! Q(:,:,:,np) = Q(:,:,:,np) + dt2*nu*laplacian**order ( Q ) + ! + ! For correct scaling, dt2 should be the same 'dt2' used in the leapfrog advace + use dimensions_mod , only : np, nlev + use hybrid_mod , only : hybrid_t!, PrintHybrid + use hybrid_mod , only : get_loop_ranges + use element_mod , only : element_t + use derivative_mod , only : derivative_t + use edge_mod , only : edgevpack, edgevunpack + use edgetype_mod , only : EdgeBuffer_t + use bndry_mod , only : bndry_exchange + + implicit none + type (EdgeBuffer_t) , intent(inout) :: edgeAdv + type (element_t) , intent(inout), target :: elem(:) + type (hvcoord_t) , intent(in ) :: hvcoord + type (hybrid_t) , intent(in ) :: hybrid + type (derivative_t) , intent(in ) :: deriv + integer , intent(in ) :: nt + integer , intent(in ) :: nt_qdp + integer , intent(in ) :: nets + integer , intent(in ) :: nete + real (kind=r8), intent(in ) :: dt2 + + ! local + real (kind=r8), dimension(np,np,nlev,qsize,nets:nete) :: Qtens + real (kind=r8), dimension(np,np,nlev ) :: dp +! real (kind=r8), dimension( nlev,qsize,nets:nete) :: min_neigh +! real (kind=r8), dimension( nlev,qsize,nets:nete) :: max_neigh + integer :: k,kptr,ie,ic,q,i,j + integer :: kbeg,kend,qbeg,qend + +! NOTE: PGI compiler bug: when using spheremp, rspheremp and ps as pointers to elem(ie)% members, +! data is incorrect (offset by a few numbers actually) +! removed for now. +! real (kind=r8), dimension(:,:), pointer :: spheremp,rspheremp + real (kind=r8) :: dt,dp0 + integer :: kblk,qblk ! The per thead size of the vertical and tracers + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + if ( nu_q == 0 ) return + !if ( hypervis_order /= 2 ) return + + kblk = kend - kbeg + 1 ! calculate size of the block of vertical levels + qblk = qend - qbeg + 1 ! calculate size of the block of tracers + + call t_startf('advance_hypervis_scalar') + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! hyper viscosity + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + dt = dt2 / hypervis_subcycle_q + + do ic = 1 , hypervis_subcycle_q + do ie = nets, nete + ! Qtens = Q/dp (apply hyperviscsoity to dp0 * Q, not Qdp) + do k = kbeg, kend + ! various options: + ! 1) biharmonic( Qdp ) + ! 2) dp0 * biharmonic( Qdp/dp ) + ! 3) dpave * biharmonic(Q/dp) + ! For trace mass / mass consistenciy, we use #2 when nu_p=0 + ! and #e when nu_p>0, where dpave is the mean mass flux from the nu_p + ! contribution from dynamics. + dp0 = ( hvcoord%hyai(k+1) - hvcoord%hyai(k) ) * hvcoord%ps0 + & + ( hvcoord%hybi(k+1) - hvcoord%hybi(k) ) * hvcoord%ps0 + dp(:,:,k) = elem(ie)%derived%dp(:,:,k) - dt2*elem(ie)%derived%divdp_proj(:,:,k) + if (nu_p>0) then + do q = qbeg, qend + Qtens(:,:,k,q,ie) = elem(ie)%derived%dpdiss_ave(:,:,k)*& + elem(ie)%state%Qdp(:,:,k,q,nt_qdp) / dp(:,:,k) + enddo + else + do q = qbeg, qend + Qtens(:,:,k,q,ie) = dp0*elem(ie)%state%Qdp(:,:,k,q,nt_qdp) / dp(:,:,k) + enddo + endif + enddo + enddo + + ! compute biharmonic operator. Qtens = input and output + call biharmonic_wk_scalar( elem , Qtens , deriv , edgeAdv , hybrid , nets , nete ) + + do ie = nets, nete + !spheremp => elem(ie)%spheremp + do q = qbeg, qend + do k = kbeg, kend + dp0 = ( hvcoord%hyai(k+1) - hvcoord%hyai(k) ) * hvcoord%ps0 + & + ( hvcoord%hybi(k+1) - hvcoord%hybi(k) ) * hvcoord%ps0 + do j = 1 , np + do i = 1 , np + + ! advection Qdp. For mass advection consistency: + ! DIFF( Qdp) ~ dp0 DIFF (Q) = dp0 DIFF ( Qdp/dp ) + elem(ie)%state%Qdp(i,j,k,q,nt_qdp) = elem(ie)%state%Qdp(i,j,k,q,nt_qdp) * elem(ie)%spheremp(i,j) & + - dt * nu_q * Qtens(i,j,k,q,ie) + enddo + enddo + enddo + + if (limiter_option .ne. 0 ) then +!JMD Only need if threading over the vertical +!JMD!$OMP BARRIER +!JMD!$OMP MASTER + ! smooth some of the negativities introduced by diffusion: + call limiter2d_zero( elem(ie)%state%Qdp(:,:,:,q,nt_qdp) ) +!JMD!$OMP END MASTER +!JMD!$OMP BARRIER + endif + + enddo + do q = qbeg, qend + kptr = nlev*(q-1) + kbeg - 1 + call edgeVpack( edgeAdv , elem(ie)%state%Qdp(:,:,kbeg:kend,q,nt_qdp) , kblk, kptr, ie ) + enddo + enddo + + call bndry_exchange( hybrid , edgeAdv,location='advance_hypervis_scalar') + + do ie = nets, nete + do q = qbeg, qend + kptr = nlev*(q-1) + kbeg - 1 + call edgeVunpack( edgeAdv , elem(ie)%state%Qdp(:,:,kbeg:kend,q,nt_qdp) , kblk, kptr, ie ) + enddo + !rspheremp => elem(ie)%rspheremp + do q = qbeg, qend + ! apply inverse mass matrix + do k = kbeg, kend + elem(ie)%state%Qdp(:,:,k,q,nt_qdp) = elem(ie)%rspheremp(:,:) * elem(ie)%state%Qdp(:,:,k,q,nt_qdp) + enddo + enddo + enddo + + enddo + call t_stopf('advance_hypervis_scalar') + end subroutine advance_hypervis_scalar + + subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) + ! + ! This routine is called at the end of the vertically Lagrangian + ! dynamics step to compute the vertical flux needed to get back + ! to reference eta levels + ! + ! map tracers + ! map velocity components + ! map temperature (either by mapping thermal energy or virtual temperature over log(p) + ! (controlled by vert_remap_uvTq_alg > -20 or <= -20) + ! + use hybvcoord_mod, only : hvcoord_t + use vertremap_mod, only : remap1 + use hybrid_mod , only : hybrid_t, config_thread_region,get_loop_ranges, PrintHybrid + use fvm_control_volume_mod, only : fvm_struct + use dimensions_mod , only : ntrac + use dimensions_mod, only : lcp_moist, kord_tr,kord_tr_cslam + use cam_logfile, only : iulog + use physconst, only : pi,get_thermal_energy,get_dp,get_virtual_temp + use physconst , only : thermodynamic_active_species_idx_dycore + use thread_mod , only : omp_set_nested + use control_mod, only: vert_remap_uvTq_alg + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + type(fvm_struct), intent(inout) :: fvm(:) + type (element_t), intent(inout) :: elem(:) + ! + real (kind=r8) :: dpc_star(nc,nc,nlev) !Lagrangian levels on CSLAM grid + + type (hvcoord_t) :: hvcoord + integer :: ie,i,j,k,np1,nets,nete,np1_qdp,q, m_cnst + real (kind=r8), dimension(np,np,nlev) :: dp_moist,dp_star_moist, dp_dry,dp_star_dry + real (kind=r8), dimension(np,np,nlev) :: internal_energy_star + real (kind=r8), dimension(np,np,nlev,2):: ttmp + real(r8), parameter :: rad2deg = 180.0_r8/pi + integer :: region_num_threads,qbeg,qend,kord_uvT(1) + type (hybrid_t) :: hybridnew,hybridnew2 + real (kind=r8) :: ptop + + kord_uvT = vert_remap_uvTq_alg + + ptop = hvcoord%hyai(1)*hvcoord%ps0 + do ie=nets,nete + ! + ! prepare for mapping of temperature + ! + if (vert_remap_uvTq_alg>-20) then + if (lcp_moist) then + ! + ! compute internal energy on Lagrangian levels + ! (do it here since qdp is overwritten by remap1) + ! + call get_thermal_energy(1,np,1,np,1,nlev,qsize,elem(ie)%state%qdp(:,:,:,1:qsize,np1_qdp), & + elem(ie)%state%t(:,:,:,np1),elem(ie)%state%dp3d(:,:,:,np1),internal_energy_star, & + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + end if + else + ! + ! map Tv over log(p) following FV and FV3 + ! + call get_virtual_temp(1,np,1,np,1,nlev,qsize,elem(ie)%state%qdp(:,:,:,1:qsize,np1_qdp), & + internal_energy_star,dp_dry=elem(ie)%state%dp3d(:,:,:,np1), & + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + internal_energy_star = internal_energy_star*elem(ie)%state%t(:,:,:,np1) + end if + ! + ! update final psdry + ! + elem(ie)%state%psdry(:,:) = ptop + & + sum(elem(ie)%state%dp3d(:,:,:,np1),3) + ! + ! compute dry vertical coordinate (Lagrangian and reference levels) + ! + do k=1,nlev + dp_star_dry(:,:,k) = elem(ie)%state%dp3d(:,:,k,np1) + dp_dry(:,:,k) = ( hvcoord%hyai(k+1) - hvcoord%hyai(k) )*hvcoord%ps0 + & + ( hvcoord%hybi(k+1) - hvcoord%hybi(k) )*elem(ie)%state%psdry(:,:) + elem(ie)%state%dp3d(:,:,k,np1) = dp_dry(:,:,k) + enddo + ! + call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,np1_qdp),2,& + thermodynamic_active_species_idx_dycore,dp_star_dry,dp_star_moist(:,:,:)) + ! + ! Check if Lagrangian leves have crossed + ! + if (minval(dp_star_moist)<1.0E-12_r8) then + write(iulog,*) "NEGATIVE LAYER THICKNESS DIAGNOSTICS:" + write(iulog,*) " " + do j=1,np + do i=1,np + if (minval(dp_star_moist(i,j,:))<1.0e-12_r8) then + write(iulog,'(A13,2f6.2)') "(lon,lat) = ",& + elem(ie)%spherep(i,j)%lon*rad2deg,elem(ie)%spherep(i,j)%lat*rad2deg + write(iulog,*) " " + do k=1,nlev + write(iulog,'(A21,I5,A1,f12.8,3f8.2)') "k,dp_star_moist,u,v,T: ",k," ",dp_star_moist(i,j,k)/100.0_r8,& + elem(ie)%state%v(i,j,1,k,np1),elem(ie)%state%v(i,j,2,k,np1),elem(ie)%state%T(i,j,k,np1) + end do + end if + end do + end do + call endrun('negative moist layer thickness. timestep or remap time too large') + endif + + call remap1(elem(ie)%state%Qdp(:,:,:,1:qsize,np1_qdp),np,1,qsize,qsize,dp_star_dry,dp_dry,ptop,0,.true.,kord_tr) + ! + ! compute moist reference pressure level thickness + ! + call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,np1_qdp),2,& + thermodynamic_active_species_idx_dycore,dp_dry,dp_moist(:,:,:)) + + ! + ! Remapping of temperature + ! + if (vert_remap_uvTq_alg>-20) then + ! + ! remap internal energy and back out temperature + ! + if (lcp_moist) then + call remap1(internal_energy_star,np,1,1,1,dp_star_dry,dp_dry,ptop,1,.true.,kord_uvT) + ! + ! compute sum c^(l)_p*m^(l)*dp on arrival (Eulerian) grid + ! + ttmp(:,:,:,1) = 1.0_r8 + call get_thermal_energy(1,np,1,np,1,nlev,qsize,elem(ie)%state%qdp(:,:,:,1:qsize,np1_qdp), & + ttmp(:,:,:,1),dp_dry,ttmp(:,:,:,2), & + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + elem(ie)%state%t(:,:,:,np1)=internal_energy_star/ttmp(:,:,:,2) + else + internal_energy_star(:,:,:)=elem(ie)%state%t(:,:,:,np1)*dp_star_moist + call remap1(internal_energy_star,np,1,1,1,dp_star_moist,dp_moist,ptop,1,.true.,kord_uvT) + elem(ie)%state%t(:,:,:,np1)=internal_energy_star/dp_moist + end if + else + ! + ! map Tv over log(p); following FV and FV3 + ! + call remap1(internal_energy_star,np,1,1,1,dp_star_moist,dp_moist,ptop,1,.false.,kord_uvT) + call get_virtual_temp(1,np,1,np,1,nlev,qsize,elem(ie)%state%qdp(:,:,:,1:qsize,np1_qdp), & + ttmp(:,:,:,1),dp_dry=dp_dry, & + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + ! + ! convert new Tv to T + ! + elem(ie)%state%t(:,:,:,np1)=internal_energy_star/ttmp(:,:,:,1) + end if + ! + ! remap velocity components + ! + call remap1(elem(ie)%state%v(:,:,1,:,np1),np,1,1,1,dp_star_moist,dp_moist,ptop,-1,.false.,kord_uvT) + call remap1(elem(ie)%state%v(:,:,2,:,np1),np,1,1,1,dp_star_moist,dp_moist,ptop,-1,.false.,kord_uvT) + enddo + + if (ntrac>0) then + ! + ! vertical remapping of CSLAM tracers + ! + do ie=nets,nete + dpc_star=fvm(ie)%dp_fvm(1:nc,1:nc,:) + do k=1,nlev + do j=1,nc + do i=1,nc + ! + ! new pressure levels on CSLAM grid + ! + fvm(ie)%dp_fvm(i,j,k) = (hvcoord%hyai(k+1) - hvcoord%hyai(k))*hvcoord%ps0 + & + (hvcoord%hybi(k+1) - hvcoord%hybi(k))*fvm(ie)%psc(i,j) + end do + end do + end do + if(ntrac>tracer_num_threads) then + call omp_set_nested(.true.) + !$OMP PARALLEL NUM_THREADS(tracer_num_threads), DEFAULT(SHARED), PRIVATE(hybridnew2,qbeg,qend) + hybridnew2 = config_thread_region(hybrid,'ctracer') + call get_loop_ranges(hybridnew2, qbeg=qbeg, qend=qend) + call remap1(fvm(ie)%c(1:nc,1:nc,:,1:ntrac),nc,qbeg,qend,ntrac,dpc_star, & + fvm(ie)%dp_fvm(1:nc,1:nc,:),ptop,0,.false.,kord_tr_cslam) + !$OMP END PARALLEL + call omp_set_nested(.false.) + else + call remap1(fvm(ie)%c(1:nc,1:nc,:,1:ntrac),nc,1,ntrac,ntrac,dpc_star, & + fvm(ie)%dp_fvm(1:nc,1:nc,:),ptop,0,.false.,kord_tr_cslam) + endif + enddo + end if + end subroutine vertical_remap + +end module prim_advection_mod diff --git a/src/dynamics/se/dycore/prim_driver_mod.F90 b/src/dynamics/se/dycore/prim_driver_mod.F90 new file mode 100644 index 00000000..ffc010d1 --- /dev/null +++ b/src/dynamics/se/dycore/prim_driver_mod.F90 @@ -0,0 +1,684 @@ +!#define _DBG_ print *,"file: ",__FILE__," line: ",__LINE__," ithr: ",hybrid%ithr +#define _DBG_ +module prim_driver_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_logfile, only: iulog + use cam_abortutils, only: endrun + use dimensions_mod, only: np, nlev, nelem, nelemd, GlobalUniqueCols, qsize, nc,nhc + use hybrid_mod, only: hybrid_t, config_thread_region, PrintHybrid + use derivative_mod, only: derivative_t + use fvm_control_volume_mod, only: fvm_struct + + use element_mod, only: element_t, timelevels, allocate_element_desc + use thread_mod , only: horz_num_threads, vert_num_threads, tracer_num_threads + use thread_mod , only: omp_set_nested + use perf_mod, only: t_startf, t_stopf + use prim_init, only: gp, fvm_corners, fvm_points + + implicit none + private + public :: prim_init2, prim_run_subcycle, prim_finalize + public :: prim_set_dry_mass + +contains + +!=============================================================================! + + subroutine prim_init2(elem, fvm, hybrid, nets, nete, tl, hvcoord) + use dimensions_mod, only: irecons_tracer, fvm_supercycling + use dimensions_mod, only: fv_nphys, ntrac, nc + use parallel_mod, only: syncmp + use time_mod, only: timelevel_t, tstep, phys_tscale, nsplit, TimeLevel_Qdp + use time_mod, only: nsplit_baseline,rsplit_baseline + use prim_state_mod, only: prim_printstate + use control_mod, only: runtype, topology, rsplit, qsplit, rk_stage_user, & + nu, nu_q, nu_div, hypervis_subcycle, hypervis_subcycle_q, & + hypervis_subcycle_sponge, variable_nsplit + use fvm_mod, only: fill_halo_fvm,ghostBufQnhc_h + use thread_mod, only: omp_get_thread_num + use global_norms_mod, only: print_cfl + use hybvcoord_mod, only: hvcoord_t + use prim_advection_mod, only: prim_advec_init2,deriv + use prim_advance_mod, only: compute_omega + + type (element_t), intent(inout) :: elem(:) + type (fvm_struct), intent(inout) :: fvm(:) + type (hybrid_t), intent(in) :: hybrid + + type (TimeLevel_t), intent(inout) :: tl ! time level struct + type (hvcoord_t), intent(inout) :: hvcoord ! hybrid vertical coordinate struct + + integer, intent(in) :: nets ! starting thread element number (private) + integer, intent(in) :: nete ! ending thread element number (private) + + + ! ================================== + ! Local variables + ! ================================== + +! variables used to calculate CFL + real (kind=r8) :: dtnu ! timestep*viscosity parameter + real (kind=r8) :: dt_dyn_vis ! viscosity timestep used in dynamics + real (kind=r8) :: dt_dyn_del2_sponge, dt_remap + real (kind=r8) :: dt_tracer_vis ! viscosity timestep used in tracers + + real (kind=r8) :: dp + + integer :: i,j,k,ie,t,q + integer :: n0,n0_qdp + + + do ie=nets,nete + elem(ie)%derived%FM=0.0_r8 + elem(ie)%derived%FT=0.0_r8 + elem(ie)%derived%FQ=0.0_r8 + end do + + ! ========================== + ! begin executable code + ! ========================== + !call prim_advance_init(hybrid%par,elem) + + ! compute most restrictive dt*nu for use by variable res viscosity: + ! compute timestep seen by viscosity operator: + dt_dyn_vis = tstep + dt_dyn_del2_sponge = tstep + dt_tracer_vis=tstep*qsplit + dt_remap=dt_tracer_vis*rsplit + ! compute most restrictive condition: + ! note: dtnu ignores subcycling + dtnu=max(dt_dyn_vis*max(nu,nu_div), dt_tracer_vis*nu_q) + ! compute actual viscosity timesteps with subcycling + dt_tracer_vis = dt_tracer_vis/hypervis_subcycle_q + dt_dyn_vis = dt_dyn_vis/hypervis_subcycle + dt_dyn_del2_sponge = dt_dyn_del2_sponge/hypervis_subcycle_sponge + if (variable_nsplit) then + nsplit_baseline=nsplit + rsplit_baseline=rsplit + end if + ! ================================== + ! Initialize derivative structure + ! ================================== + call Prim_Advec_Init2(fvm_corners, fvm_points) + if (fv_nphys>0.and.nc.ne.fv_nphys) then + ! + ! need to fill halo for dp_coupling for fvm2phys mapping + ! + call fill_halo_fvm(ghostBufQnhc_h,elem,fvm,hybrid,nets,nete,nhc,1,nlev,nlev) + end if +! !$OMP BARRIER +! if (hybrid%ithr==0) then +! call syncmp(hybrid%par) +! end if +! !$OMP BARRIER + + if (topology /= "cube") then + call endrun('Error: only cube topology supported for primaitve equations') + endif + + ! CAM has set tstep based on dtime before calling prim_init2(), + ! so only now does HOMME learn the timstep. print them out: + call print_cfl(elem,hybrid,nets,nete,dtnu,& + !p top and p mid levels + hvcoord%hyai(1)*hvcoord%ps0,(hvcoord%hyam(:)+hvcoord%hybm(:))*hvcoord%ps0,& + !dt_remap,dt_tracer_fvm,dt_tracer_se + tstep*qsplit*rsplit,tstep*qsplit*fvm_supercycling,tstep*qsplit,& + !dt_dyn,dt_dyn_visco,dt_tracer_visco, dt_phys + tstep,dt_dyn_vis,dt_dyn_del2_sponge,dt_tracer_vis,tstep*nsplit*qsplit*rsplit) + + if (hybrid%masterthread) then + if (phys_tscale/=0) then + write(iulog,'(a,2f9.2)') "CAM physics timescale: ",phys_tscale + endif + write(iulog,'(a,2f9.2)') "CAM dtime (dt_phys): ",tstep*nsplit*qsplit*rsplit + + write(iulog,*) "CAM-SE uses dry-mass vertical coordinates" + end if + + n0=tl%n0 + call TimeLevel_Qdp( tl, qsplit, n0_qdp) + call compute_omega(hybrid,n0,n0_qdp,elem,deriv,nets,nete,dt_remap,hvcoord) + + if (hybrid%masterthread) write(iulog,*) "initial state:" + call prim_printstate(elem, tl, hybrid,nets,nete, fvm) + + end subroutine prim_init2 + +!=======================================================================================================! + + + subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubstep, omega_cn) +! +! advance all variables (u,v,T,ps,Q,C) from time t to t + dt_q +! +! for the RK schemes: +! input: +! tl%nm1 not used +! tl%n0 data at time t +! tl%np1 new values at t+dt_q +! +! then we update timelevel pointers: +! tl%nm1 = tl%n0 +! tl%n0 = tl%np1 +! so that: +! tl%nm1 tracers: t dynamics: t+(qsplit-1)*dt +! tl%n0 time t + dt_q +! +! for the implicit schemes: +! +! input: +! tl%nm1 variables at t-1 level are stored fro BDF2 scheme +! tl%n0 data at time t +! tl%np1 new values at t+dt_q +! generally dt_q = t for BDF2, so its t+1 +! +! then we update timelevel pointers: +! tl%nm1 = tl%n0 +! tl%n0 = tl%np1 +! so that: +! tl%nm1 tracers: t dynamics: t+(qsplit-1)*dt +! tl%n0 time t + dt_q +! +! + use hybvcoord_mod, only : hvcoord_t + use time_mod, only: TimeLevel_t, timelevel_update, timelevel_qdp, nsplit + use control_mod, only: statefreq,qsplit, rsplit, variable_nsplit + use prim_advance_mod, only: applycamforcing + use prim_advance_mod, only: calc_tot_energy_dynamics,compute_omega + use prim_state_mod, only: prim_printstate, adjust_nsplit + use prim_advection_mod, only: vertical_remap, deriv + use thread_mod, only: omp_get_thread_num + use perf_mod , only: t_startf, t_stopf + use fvm_mod , only: fill_halo_fvm, ghostBufQnhc_h + use dimensions_mod, only: ntrac,fv_nphys, ksponge_end + + type (element_t) , intent(inout) :: elem(:) + type(fvm_struct), intent(inout) :: fvm(:) + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + type (hvcoord_t), intent(in) :: hvcoord ! hybrid vertical coordinate struct + integer, intent(in) :: nets ! starting thread element number (private) + integer, intent(in) :: nete ! ending thread element number (private) + real(kind=r8), intent(in) :: dt ! "timestep dependent" timestep + type (TimeLevel_t), intent(inout):: tl + integer, intent(in) :: nsubstep ! nsubstep = 1 .. nsplit + real (kind=r8) , intent(inout):: omega_cn(2,nets:nete) !min and max of vertical Courant number + + real(kind=r8) :: dt_q, dt_remap, dt_phys + integer :: ie, q,k,n0_qdp,np1_qdp,r, nstep_end,region_num_threads,i,j + real (kind=r8) :: dp_np1(np,np) + real (kind=r8) :: dp_start(np,np,nlev+1,nets:nete),dp_end(np,np,nlev,nets:nete) + logical :: compute_diagnostics + + ! =================================== + ! Main timestepping loop + ! =================================== + dt_q = dt*qsplit + nstep_end = tl%nstep + qsplit + dt_remap=dt_q*rsplit + nstep_end = tl%nstep + qsplit*rsplit ! nstep at end of this routine + dt_phys = nsplit*dt_remap + + ! compute diagnostics for STDOUT + compute_diagnostics=.false. + + if (statefreq>0) then + if (MODULO(nstep_end,statefreq)==0 .or. nstep_end==tl%nstep0) then + compute_diagnostics=.true. + endif + end if + ! + ! initialize variables for computing vertical Courant number + ! + if (variable_nsplit.or.compute_diagnostics) then + if (nsubstep==1) then + do ie=nets,nete + omega_cn(1,ie) = 0.0_r8 + omega_cn(2,ie) = 0.0_r8 + end do + end if + do ie=nets,nete + dp_start(:,:,1:nlev,ie) = elem(ie)%state%dp3d(:,:,:,tl%n0) + dp_start(:,:,nlev+1,ie) = elem(ie)%state%dp3d(:,:,nlev,tl%n0) + end do + endif + + + call TimeLevel_Qdp( tl, qsplit, n0_qdp) + + call calc_tot_energy_dynamics(elem,fvm,nets,nete,tl%n0,n0_qdp,'dAF') + call ApplyCAMForcing(elem,fvm,tl%n0,n0_qdp,dt_remap,dt_phys,nets,nete,nsubstep) + call calc_tot_energy_dynamics(elem,fvm,nets,nete,tl%n0,n0_qdp,'dBD') + do r=1,rsplit + if (r.ne.1) call TimeLevel_update(tl,"leapfrog") + call prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,r) + enddo + + + ! defer final timelevel update until after remap and diagnostics + call TimeLevel_Qdp( tl, qsplit, n0_qdp, np1_qdp) + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! + ! apply vertical remap + ! always for tracers + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + call calc_tot_energy_dynamics(elem,fvm,nets,nete,tl%np1,np1_qdp,'dAD') + + if (variable_nsplit.or.compute_diagnostics) then + ! + ! initialize variables for computing vertical Courant number + ! + do ie=nets,nete + dp_end(:,:,:,ie) = elem(ie)%state%dp3d(:,:,:,tl%np1) + end do + end if + call t_startf('vertical_remap') + call vertical_remap(hybrid,elem,fvm,hvcoord,tl%np1,np1_qdp,nets,nete) + call t_stopf('vertical_remap') + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! time step is complete. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + call calc_tot_energy_dynamics(elem,fvm,nets,nete,tl%np1,np1_qdp,'dAR') + + if (nsubstep==nsplit) then + call compute_omega(hybrid,tl%np1,np1_qdp,elem,deriv,nets,nete,dt_remap,hvcoord) + end if + + ! now we have: + ! u(nm1) dynamics at t+dt_remap - 2*dt + ! u(n0) dynamics at t+dt_remap - dt + ! u(np1) dynamics at t+dt_remap + ! + ! Q(1) Q at t+dt_remap + + + + ! ================================= + ! update dynamics time level pointers + ! ================================= + call TimeLevel_update(tl,"leapfrog") + ! note: time level update for fvm tracers takes place in fvm_mod + + ! now we have: + ! u(nm1) dynamics at t+dt_remap - dt (Robert-filtered) + ! u(n0) dynamics at t+dt_remap + ! u(np1) undefined + + + ! + ! Compute vertical Courant numbers + ! + if (variable_nsplit.or.compute_diagnostics) then + do ie=nets,nete + do k=1,nlev + do j=1,np + do i=1,np + if (dp_end(i,j,k,ie)0.and.nsubstep==nsplit.and.nc.ne.fv_nphys) then + ! + ! fill the fvm halo for mapping in d_p_coupling if + ! physics grid resolution is different than fvm resolution + ! + call fill_halo_fvm(ghostBufQnhc_h, elem,fvm,hybrid,nets,nete,nhc,1,nlev,nlev) + end if + + end subroutine prim_run_subcycle + + + subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) + ! + ! Take qsplit dynamics steps and one tracer step + ! for vertically lagrangian option, this subroutine does only the horizontal step + ! + ! input: + ! tl%nm1 not used + ! tl%n0 data at time t + ! tl%np1 new values at t+dt_q + ! + ! then we update timelevel pointers: + ! tl%nm1 = tl%n0 + ! tl%n0 = tl%np1 + ! so that: + ! tl%nm1 tracers: t dynamics: t+(qsplit-1)*dt + ! tl%n0 time t + dt_q + ! + use hybvcoord_mod, only: hvcoord_t + use time_mod, only: TimeLevel_t, timelevel_update + use control_mod, only: statefreq, qsplit, nu_p + use thread_mod, only: omp_get_thread_num + use prim_advance_mod, only: prim_advance_exp + use prim_advection_mod, only: prim_advec_tracers_remap, prim_advec_tracers_fvm, deriv + use derivative_mod, only: subcell_integration + use hybrid_mod, only: set_region_num_threads, config_thread_region, get_loop_ranges + use dimensions_mod, only: ntrac,fvm_supercycling,fvm_supercycling_jet + use dimensions_mod, only: kmin_jet, kmax_jet + use fvm_mod, only: ghostBufQnhc_vh,ghostBufQ1_vh, ghostBufFlux_vh + use fvm_mod, only: ghostBufQ1_h,ghostBufQnhcJet_h, ghostBufFluxJet_h + +#ifdef waccm_debug + use cam_history, only: outfld +#endif + + + type (element_t) , intent(inout) :: elem(:) + type(fvm_struct), intent(inout) :: fvm(:) + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + type (hvcoord_t), intent(in) :: hvcoord ! hybrid vertical coordinate struct + integer, intent(in) :: nets ! starting thread element number (private) + integer, intent(in) :: nete ! ending thread element number (private) + real(kind=r8), intent(in) :: dt ! "timestep dependent" timestep + type (TimeLevel_t), intent(inout) :: tl + integer, intent(in) :: rstep ! vertical remap subcycling step + + type (hybrid_t):: hybridnew,hybridnew2 + real(kind=r8) :: st, st1, dp, dt_q + integer :: ie,t,q,k,i,j,n, n_Q + integer :: ithr + integer :: region_num_threads + integer :: kbeg,kend + + real (kind=r8) :: tempdp3d(np,np), x + real (kind=r8) :: tempmass(nc,nc) + real (kind=r8) :: tempflux(nc,nc,4) + + real (kind=r8) :: dp_np1(np,np) + + + dt_q = dt*qsplit + ! =============== + ! initialize mean flux accumulation variables and save some variables at n0 + ! for use by advection + ! =============== + do ie=nets,nete + elem(ie)%derived%vn0=0 ! mean horizontal mass flux + elem(ie)%derived%omega=0 + if (nu_p>0) then + elem(ie)%derived%dpdiss_ave=0 + elem(ie)%derived%dpdiss_biharmonic=0 + endif + + ! dp at time t: use floating lagrangian levels: + elem(ie)%derived%dp(:,:,:)=elem(ie)%state%dp3d(:,:,:,tl%n0) + enddo + + ! =============== + ! Dynamical Step + ! =============== + n_Q = tl%n0 ! n_Q = timelevel of FV tracers at time t. need to save this + ! SE tracers only carry 2 timelevels + + call t_startf('prim_advance_exp') +! ithr = 0 ! omp_get_thread_num() +! vybrid = hybrid_create(hybrid%par,ithr) + + call prim_advance_exp(elem, fvm, deriv, hvcoord, & + hybrid, dt, tl, nets, nete) + + call t_stopf('prim_advance_exp') + + do n=2,qsplit + call TimeLevel_update(tl,"leapfrog") + + call t_startf('prim_advance_exp') + + call prim_advance_exp(elem, fvm, deriv, hvcoord, & + hybrid, dt, tl, nets, nete) + + call t_stopf('prim_advance_exp') + + ! defer final timelevel update until after Q update. + enddo +#ifdef HOMME_TEST_SUB_ELEMENT_MASS_FLUX + if (ntrac>0.and.rstep==1) then + do ie=nets,nete + do k=1,nlev + tempdp3d = elem(ie)%state%dp3d(:,:,k,tl%np1) - & + elem(ie)%derived%dp(:,:,k) + call subcell_integration(tempdp3d, np, nc, elem(ie)%metdet,tempmass) + tempflux = dt_q*elem(ie)%sub_elem_mass_flux(:,:,:,k) + do i=1,nc + do j=1,nc + x = SUM(tempflux(i,j,:)) + if (ABS(tempmass(i,j)).lt.1e-11_r8 .and. 1e-11_r8.lt.ABS(x)) then + write(iulog,*) __FILE__,__LINE__,"**CSLAM mass-flux ERROR***",ie,k,i,j,tempmass(i,j),x + call endrun('**CSLAM mass-flux ERROR***') + elseif (1e-5_r8.lt.ABS((tempmass(i,j)-x)/tempmass(i,j))) then + write(iulog,*) __FILE__,__LINE__,"**CSLAM mass-flux ERROR**",ie,k,i,j,tempmass(i,j),x,& + ABS((tempmass(i,j)-x)/tempmass(i,j)) + call endrun('**CSLAM mass-flux ERROR**') + endif + end do + end do + end do + end do + end if +#endif + + ! current dynamics state variables: + ! derived%dp = dp at start of timestep + ! derived%vn0 = mean horiz. flux: U*dp + ! rsplit>0 + ! state%v(:,:,:,np1) = velocity on lagrangian levels + ! state%dp3d(:,:,:,np1) = dp3d + ! + + + ! =============== + ! Tracer Advection. + ! in addition, this routine will apply the DSS to: + ! derived%omega = + ! Tracers are always vertically lagrangian. + ! =============== + ! Advect tracers if their count is > 0. + ! special case in CAM: if CSLAM tracers are turned on , qsize=1 but this tracer should + ! not be advected. This will be cleaned up when the physgrid is merged into CAM trunk + ! Currently advecting all species + if (qsize > 0) then + + call t_startf('prim_advec_tracers_remap') + if(ntrac>0) then + ! Deactivate threading in the tracer dimension if this is a CSLAM run + region_num_threads = 1 + else + region_num_threads=tracer_num_threads + endif + call omp_set_nested(.true.) + !$OMP PARALLEL NUM_THREADS(region_num_threads), DEFAULT(SHARED), PRIVATE(hybridnew) + if(ntrac>0) then + ! Deactivate threading in the tracer dimension if this is a CSLAM run + hybridnew = config_thread_region(hybrid,'serial') + else + hybridnew = config_thread_region(hybrid,'tracer') + endif + call Prim_Advec_Tracers_remap(elem, deriv,hvcoord,hybridnew,dt_q,tl,nets,nete) + !$OMP END PARALLEL + call omp_set_nested(.false.) + call t_stopf('prim_advec_tracers_remap') + end if + ! + ! only run fvm transport every fvm_supercycling rstep + ! + if (ntrac>0) then + ! + ! FVM transport + ! + if ((mod(rstep,fvm_supercycling) == 0).and.(mod(rstep,fvm_supercycling_jet) == 0)) then + +! call omp_set_nested(.true.) +! !$OMP PARALLEL NUM_THREADS(vert_num_threads), DEFAULT(SHARED), PRIVATE(hybridnew2,kbeg,kend) +! hybridnew2 = config_thread_region(hybrid,'vertical') +! call get_loop_ranges(hybridnew2,kbeg=kbeg,kend=kend) + call Prim_Advec_Tracers_fvm(elem,fvm,hvcoord,hybrid,& + dt_q,tl,nets,nete,ghostBufQnhc_vh,ghostBufQ1_vh, ghostBufFlux_vh,1,nlev) +! !$OMP END PARALLEL +! call omp_set_nested(.false.) + ! + ! to avoid accumulation of truncation error overwrite CSLAM surface pressure with SE + ! surface pressure + ! + do ie=nets,nete + ! + ! overwrite PSDRY on CSLAM grid with SE PSDRY integrated over CSLAM control volume + ! + ! call subcell_integration(elem(ie)%state%psdry(:,:), np, nc, elem(ie)%metdet,fvm(ie)%psc) + ! fvm(ie)%psc = fvm(ie)%psc*fvm(ie)%inv_se_area_sphere + ! + ! Update CSLAM surface pressure + ! + do j=1,nc + do i=1,nc + fvm(ie)%psc(i,j) = sum(fvm(ie)%dp_fvm(i,j,:)) + hvcoord%hyai(1)*hvcoord%ps0 + end do + end do + end do + else if ((mod(rstep,fvm_supercycling_jet) == 0)) then + ! + ! shorter fvm time-step in jet region + ! + call Prim_Advec_Tracers_fvm(elem,fvm,hvcoord,hybrid,& + dt_q,tl,nets,nete,ghostBufQnhcJet_h,ghostBufQ1_h, ghostBufFluxJet_h,kmin_jet,kmax_jet) + end if + +#ifdef waccm_debug + do ie=nets,nete + call outfld('CSLAM_gamma', RESHAPE(fvm(ie)%CSLAM_gamma(:,:,:,1), & + (/nc*nc,nlev/)), nc*nc, ie) + end do +#endif + endif + + end subroutine prim_step + + +!=======================================================================================================! + + + subroutine prim_finalize(hybrid) + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + + ! ========================== + ! end of the hybrid program + ! ========================== + end subroutine prim_finalize + +!========================================================================================= + + subroutine prim_set_dry_mass(elem, hvcoord,initial_global_ave_dry_ps,q) + use element_mod, only: element_t + use hybvcoord_mod , only: hvcoord_t + use dimensions_mod, only: nelemd, nlev, np + use constituents, only: cnst_type, qmin, pcnst + use cam_logfile, only: iulog + use spmd_utils, only: masterproc + + type (element_t) , intent(inout):: elem(:) + type (hvcoord_t) , intent(in) :: hvcoord + real (kind=r8), intent(in) :: initial_global_ave_dry_ps + real (kind=r8), intent(inout):: q(np,np,nlev,nelemd,pcnst) + + ! local + real (kind=r8) :: global_ave_ps_inic,dp_tmp, factor(np,np,nlev) + integer :: ie, i, j ,k, m_cnst + + if (initial_global_ave_dry_ps == 0) return; + + call get_global_ave_surface_pressure(elem, global_ave_ps_inic) + + do ie=1,nelemd + elem(ie)%state%psdry(:,:)=elem(ie)%state%psdry(:,:)*(initial_global_ave_dry_ps/global_ave_ps_inic) + do k=1,nlev + do j = 1,np + do i = 1,np + dp_tmp = ((hvcoord%hyai(k+1) - hvcoord%hyai(k))*hvcoord%ps0)+& + ((hvcoord%hybi(k+1) - hvcoord%hybi(k))*elem(ie)%state%psdry(i,j)) + factor(i,j,k) = elem(ie)%state%dp3d(i,j,k,1)/dp_tmp + elem(ie)%state%dp3d(i,j,k,:) = dp_tmp + end do + end do + end do + ! + ! conserve initial condition mass of 'wet' tracers (following dryairm.F90 for FV dycore) + ! and conserve mixing ratio (not mass) of 'dry' tracers + ! + do m_cnst=1,pcnst + if (cnst_type(m_cnst).ne.'dry') then + do k=1,nlev + do j = 1,np + do i = 1,np + q(i,j,k,ie,m_cnst) = q(i,j,k,ie,m_cnst)*factor(i,j,k) + q(i,j,k,ie,m_cnst) = max(qmin(m_cnst),q(i,j,k,ie,m_cnst)) + end do + end do + end do + end if + end do + end do + if (masterproc) then + write (iulog,*) "------ info from prim_set_dry_mass -----------------------------------------------------------" + write (iulog,*) "Scaling dry surface pressure to global average of = ",& + initial_global_ave_dry_ps/100.0_r8,"hPa" + write (iulog,*) "Average dry surface pressure in initial condition = ",global_ave_ps_inic/100.0_r8,"hPa" + write (iulog,*) "Average dry surface pressure change = ",& + initial_global_ave_dry_ps-global_ave_ps_inic,"Pa" + write (iulog,*) "Mixing ratios that are wet have been scaled so that total mass of tracer is conserved" + write (iulog,*) "Mixing ratios that are dry have not been changed (mass not conserved in scaling process)" + write (iulog,*) "------ end info from prim_set_dry_mass -------------------------------------------------------" + endif + end subroutine prim_set_dry_mass + + subroutine get_global_ave_surface_pressure(elem, global_ave_ps_inic) + use element_mod , only : element_t + use dimensions_mod , only : np + use global_norms_mod , only : global_integral + use hybrid_mod , only : config_thread_region, get_loop_ranges, hybrid_t + use parallel_mod , only : par + + type (element_t) , intent(in) :: elem(:) + real (kind=r8), intent(out) :: global_ave_ps_inic + + ! local + real (kind=r8), allocatable :: tmp(:,:,:) + type (hybrid_t) :: hybrid + integer :: ie, nets, nete + + !JMD $OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,n) + !JMD hybrid = config_thread_region(par,'horizontal') + hybrid = config_thread_region(par,'serial') + call get_loop_ranges(hybrid,ibeg=nets,iend=nete) + allocate(tmp(np,np,nets:nete)) + + do ie=nets,nete + tmp(:,:,ie)=elem(ie)%state%psdry(:,:) + enddo + global_ave_ps_inic = global_integral(elem, tmp(:,:,nets:nete),hybrid,np,nets,nete) + deallocate(tmp) + end subroutine get_global_ave_surface_pressure + +end module prim_driver_mod diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 new file mode 100644 index 00000000..afbd9486 --- /dev/null +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -0,0 +1,395 @@ +module prim_init + + use shr_kind_mod, only: r8=>shr_kind_r8 + use dimensions_mod, only: nc + use reduction_mod, only: reductionbuffer_ordered_1d_t + use quadrature_mod, only: quadrature_t, gausslobatto + + implicit none + private + save + + public :: prim_init1 + + real(r8), public :: fvm_corners(nc+1) ! fvm cell corners on reference element + real(r8), public :: fvm_points(nc) ! fvm cell centers on reference element + + type (quadrature_t), public :: gp ! element GLL points + type (ReductionBuffer_ordered_1d_t) :: red ! reduction buffer (shared) + +contains + subroutine prim_init1(elem, fvm, par, Tl) + use cam_logfile, only: iulog + use shr_sys_mod, only: shr_sys_flush + use thread_mod, only: max_num_threads + use dimensions_mod, only: np, nlev, nelem, nelemd, nelemdmax + use dimensions_mod, only: GlobalUniqueCols, fv_nphys,irecons_tracer + use control_mod, only: topology, partmethod + use element_mod, only: element_t, allocate_element_desc + use fvm_mod, only: fvm_init1 + use mesh_mod, only: MeshUseMeshFile + use time_mod, only: timelevel_init, timelevel_t + use mass_matrix_mod, only: mass_matrix + use derivative_mod, only: allocate_subcell_integration_matrix_cslam + use derivative_mod, only: allocate_subcell_integration_matrix_physgrid + use cube_mod, only: cubeedgecount , cubeelemcount, cubetopology + use cube_mod, only: cube_init_atomic, rotation_init_atomic, set_corner_coordinates + use cube_mod, only: assign_node_numbers_to_elem + use mesh_mod, only: MeshSetCoordinates, MeshUseMeshFile, MeshCubeTopology + use mesh_mod, only: MeshCubeElemCount, MeshCubeEdgeCount + use metagraph_mod, only: metavertex_t, localelemcount, initmetagraph, printmetavertex + use gridgraph_mod, only: gridvertex_t, gridedge_t + use gridgraph_mod, only: allocate_gridvertex_nbrs, deallocate_gridvertex_nbrs + use schedtype_mod, only: schedule + use schedule_mod, only: genEdgeSched + use prim_advection_mod, only: prim_advec_init1 + use cam_abortutils, only: endrun + use spmd_utils, only: mpi_integer, mpi_max + use parallel_mod, only: parallel_t, syncmp, global_shared_buf, nrepro_vars + use spacecurve_mod, only: genspacepart + use dof_mod, only: global_dof, CreateUniqueIndex, SetElemOffset + use params_mod, only: SFCURVE + use physconst, only: pi + use reduction_mod, only: red_min, red_max, red_max_int, red_flops + use reduction_mod, only: red_sum, red_sum_int, initreductionbuffer + use infnan, only: nan, assignment(=) + use shr_reprosum_mod, only: repro_sum => shr_reprosum_calc + use fvm_analytic_mod, only: compute_basic_coordinate_vars + use fvm_control_volume_mod, only: fvm_struct, allocate_physgrid_vars + + type(element_t), pointer :: elem(:) + type(fvm_struct), pointer :: fvm(:) + type(parallel_t), intent(inout) :: par + type(timelevel_t), intent(out) :: Tl + + ! Local Variables + type (GridVertex_t), target,allocatable :: GridVertex(:) + type (GridEdge_t), target,allocatable :: Gridedge(:) + type (MetaVertex_t), target,allocatable :: MetaVertex(:) + + integer :: ie + integer :: nets, nete + integer :: nelem_edge + integer :: ierr, j + logical, parameter :: Debug = .FALSE. + + real(r8), allocatable :: aratio(:,:) + real(r8) :: area(1), xtmp + character(len=80) :: rot_type ! cube edge rotation type + + integer :: i + + character(len=128) :: errmsg + character(len=*), parameter :: subname = 'PRIM_INIT1: ' + + ! ==================================== + ! Set cube edge rotation type for model + ! unnecessary complication here: all should + ! be on the same footing. RDL + ! ===================================== + rot_type = "contravariant" + + ! =============================================================== + ! Allocate and initialize the graph (array of GridVertex_t types) + ! =============================================================== + + if (topology=="cube") then + + if (par%masterproc) then + write(iulog,*) subname, "creating cube topology..." + call shr_sys_flush(iulog) + end if + + if (MeshUseMeshFile) then + nelem = MeshCubeElemCount() + nelem_edge = MeshCubeEdgeCount() + else + nelem = CubeElemCount() + nelem_edge = CubeEdgeCount() + end if + + allocate(GridVertex(nelem)) + allocate(GridEdge(nelem_edge)) + + do j = 1, nelem + call allocate_gridvertex_nbrs(GridVertex(j)) + end do + + if (MeshUseMeshFile) then + if (par%masterproc) then + write(iulog,*) subname, "Set up grid vertex from mesh..." + end if + call MeshCubeTopology(GridEdge, GridVertex) + else + call CubeTopology(GridEdge,GridVertex) + end if + + if (par%masterproc) then + write(iulog,*)"...done." + end if + end if + if(par%masterproc) then + write(iulog,*) subname, "total number of elements nelem = ",nelem + end if + + if(partmethod == SFCURVE) then + if(par%masterproc) then + write(iulog,*) subname, "partitioning graph using SF Curve..." + end if + call genspacepart(GridVertex) + else + write(errmsg, *) 'Unsupported partition method, ',partmethod + call endrun(subname//trim(errmsg)) + end if + + ! =========================================================== + ! given partition, count number of local element descriptors + ! =========================================================== + allocate(MetaVertex(1)) + allocate(Schedule(1)) + + nelem_edge = SIZE(GridEdge) + + ! ==================================================== + ! Generate the communication graph + ! ==================================================== + call initMetaGraph(par%rank+1,MetaVertex(1),GridVertex,GridEdge) + + nelemd = LocalElemCount(MetaVertex(1)) + if (par%masterproc .and. Debug) then + call PrintMetaVertex(MetaVertex(1)) + endif + + if(nelemd <= 0) then + call endrun(subname//'Not yet ready to handle nelemd = 0 yet' ) + end if + call mpi_allreduce(nelemd, nelemdmax, 1, MPI_INTEGER, MPI_MAX, par%comm, ierr) + + if (nelemd > 0) then + allocate(elem(nelemd)) + call allocate_element_desc(elem) + end if + + if (fv_nphys > 0) then + allocate(fvm(nelemd)) + call allocate_physgrid_vars(fvm,par) + else + ! Even if fvm not needed, still desirable to allocate it as empty + ! so it can be passed as a (size zero) array rather than pointer. + allocate(fvm(0)) + end if + + ! ==================================================== + ! Generate the communication schedule + ! ==================================================== + + call genEdgeSched(par, elem, par%rank+1, Schedule(1), MetaVertex(1)) + + allocate(global_shared_buf(nelemd, nrepro_vars)) + global_shared_buf = 0.0_r8 + + call syncmp(par) + + ! ================================================================= + ! Set number of domains (for 'decompose') equal to number of threads + ! for OpenMP across elements, equal to 1 for OpenMP within element + ! ================================================================= + + ! ================================================================= + ! Initialize shared boundary_exchange and reduction buffers + ! ================================================================= + if(par%masterproc) then + write(iulog,*) subname, 'init shared boundary_exchange buffers' + call shr_sys_flush(iulog) + end if + call InitReductionBuffer(red,3*nlev,max_num_threads) + call InitReductionBuffer(red_sum,5) + call InitReductionBuffer(red_sum_int,1) + call InitReductionBuffer(red_max,1) + call InitReductionBuffer(red_max_int,1) + call InitReductionBuffer(red_min,1) + call initReductionBuffer(red_flops,1) + + gp = gausslobatto(np) ! GLL points + + ! fvm nodes are equally spaced in alpha/beta + ! HOMME with equ-angular gnomonic projection maps alpha/beta space + ! to the reference element via simple scale + translation + ! thus, fvm nodes in reference element [-1,1] are a tensor product of + ! array 'fvm_corners(:)' computed below: + xtmp = nc + do i = 1, nc+1 + fvm_corners(i)= 2*(i-1)/xtmp - 1 ! [-1,1] including end points + end do + do i = 1, nc + fvm_points(i)= ( fvm_corners(i)+fvm_corners(i+1) ) /2 + end do + + if (topology == "cube") then + if(par%masterproc) then + write(iulog,*) subname, "initializing cube elements..." + call shr_sys_flush(iulog) + end if + if (MeshUseMeshFile) then + call MeshSetCoordinates(elem) + else + do ie = 1, nelemd + call set_corner_coordinates(elem(ie)) + end do + call assign_node_numbers_to_elem(elem, GridVertex) + end if + do ie = 1, nelemd + call cube_init_atomic(elem(ie),gp%points) + end do + end if + + ! ================================================================= + ! Initialize mass_matrix + ! ================================================================= + if(par%masterproc) then + write(iulog,*) subname, 'running mass_matrix' + call shr_sys_flush(iulog) + end if + call mass_matrix(par, elem) + allocate(aratio(nelemd,1)) + + if (topology == "cube") then + area = 0 + do ie = 1, nelemd + aratio(ie,1) = sum(elem(ie)%mp(:,:)*elem(ie)%metdet(:,:)) + end do + call repro_sum(aratio, area, nelemd, nelemd, 1, commid=par%comm) + area(1) = 4.0_r8*pi/area(1) ! ratio correction + deallocate(aratio) + if (par%masterproc) then + write(iulog,'(2a,f20.17)') subname, "re-initializing cube elements: area correction=", area(1) + call shr_sys_flush(iulog) + end if + + do ie = 1, nelemd + call cube_init_atomic(elem(ie),gp%points,area(1)) + call rotation_init_atomic(elem(ie),rot_type) + end do + end if + + if(par%masterproc) then + write(iulog,*) subname, 're-running mass_matrix' + call shr_sys_flush(iulog) + end if + call mass_matrix(par, elem) + + ! ================================================================= + ! Determine the global degree of freedome for each gridpoint + ! ================================================================= + if(par%masterproc) then + write(iulog,*) subname, 'running global_dof' + call shr_sys_flush(iulog) + end if + call global_dof(par, elem) + + ! ================================================================= + ! Create Unique Indices + ! ================================================================= + + do ie = 1, nelemd + call CreateUniqueIndex(elem(ie)%GlobalId,elem(ie)%gdofP,elem(ie)%idxP) + end do + + call SetElemOffset(par,elem, GlobalUniqueCols) + + do ie = 1, nelemd + elem(ie)%idxV=>elem(ie)%idxP + end do + + ! initialize flux terms to 0 + do ie = 1, nelemd + elem(ie)%derived%FM=0.0_r8 + elem(ie)%derived%FQ=0.0_r8 + elem(ie)%derived%FT=0.0_r8 + elem(ie)%derived%FDP=0.0_r8 + elem(ie)%derived%pecnd=0.0_r8 + + elem(ie)%derived%Omega=0 + elem(ie)%state%dp3d=0 + + elem(ie)%derived%etadot_prescribed = nan + elem(ie)%derived%u_met = nan + elem(ie)%derived%v_met = nan + elem(ie)%derived%dudt_met = nan + elem(ie)%derived%dvdt_met = nan + elem(ie)%derived%T_met = nan + elem(ie)%derived%dTdt_met = nan + elem(ie)%derived%ps_met = nan + elem(ie)%derived%dpsdt_met = nan + elem(ie)%derived%nudge_factor = nan + + elem(ie)%derived%Utnd=0._r8 + elem(ie)%derived%Vtnd=0._r8 + elem(ie)%derived%Ttnd=0._r8 + end do + + ! ========================================================== + ! This routines initalizes a Restart file. This involves: + ! I) Setting up the MPI datastructures + ! ========================================================== + deallocate(GridEdge) + do j = 1, nelem + call deallocate_gridvertex_nbrs(GridVertex(j)) + end do + deallocate(GridVertex) + + do j = 1, MetaVertex(1)%nmembers + call deallocate_gridvertex_nbrs(MetaVertex(1)%members(j)) + end do + deallocate(MetaVertex) + + ! ===================================== + ! Set number of threads... + ! ===================================== + if(par%masterproc) then + write(iulog,*) subname, "max_num_threads=",max_num_threads + call shr_sys_flush(iulog) + end if + + nets = 1 + nete = nelemd + call Prim_Advec_Init1(par, elem) + if (fv_nphys > 0) then + call fvm_init1(par,elem) + end if + + ! ======================================================= + ! Allocate memory for subcell flux calculations. + ! ======================================================= + call allocate_subcell_integration_matrix_cslam(np, nc) + if (fv_nphys > 0) then + call allocate_subcell_integration_matrix_physgrid(np, fv_nphys) + end if + + call TimeLevel_init(tl) + + if (fv_nphys > 0) then + if(par%masterproc) then + write(iulog,*) subname, 'initialize basic fvm coordinate variables' + call shr_sys_flush(iulog) + end if + do ie = 1, nelemd + call compute_basic_coordinate_vars(elem(ie), nc, irecons_tracer, & + fvm(ie)%dalpha, fvm(ie)%dbeta, fvm(ie)%vtx_cart(:,:,1:nc,1:nc), & + fvm(ie)%center_cart(1:nc,1:nc), fvm(ie)%area_sphere(1:nc,1:nc), & + fvm(ie)%spherecentroid(:,1:nc,1:nc)) + call compute_basic_coordinate_vars(elem(ie), fv_nphys, irecons_tracer,& + fvm(ie)%dalpha_physgrid, fvm(ie)%dbeta_physgrid, & + fvm(ie)%vtx_cart_physgrid (:,:,1:fv_nphys,1:fv_nphys), & + fvm(ie)%center_cart_physgrid(1:fv_nphys,1:fv_nphys), & + fvm(ie)%area_sphere_physgrid(1:fv_nphys,1:fv_nphys), & + fvm(ie)%spherecentroid_physgrid(:,1:fv_nphys,1:fv_nphys)) + end do + end if + + if(par%masterproc) then + write(iulog,*) subname, 'end of prim_init' + call shr_sys_flush(iulog) + end if + end subroutine prim_init1 +end module prim_init diff --git a/src/dynamics/se/dycore/prim_state_mod.F90 b/src/dynamics/se/dycore/prim_state_mod.F90 new file mode 100644 index 00000000..4c845ba0 --- /dev/null +++ b/src/dynamics/se/dycore/prim_state_mod.F90 @@ -0,0 +1,476 @@ +module prim_state_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_logfile, only: iulog + use dimensions_mod, only: nlev, np, nc, qsize_d, ntrac_d + use parallel_mod, only: ordered + use hybrid_mod, only: hybrid_t + use time_mod, only: timelevel_t, TimeLevel_Qdp, time_at + use control_mod, only: qsplit, statediag_numtrac + use global_norms_mod, only: global_integrals_general + use element_mod, only: element_t + use reduction_mod, only: parallelmax,parallelmin + use fvm_control_volume_mod, only: fvm_struct + + implicit none + private + + public :: prim_printstate, adjust_nsplit + +CONTAINS + + subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) + use dimensions_mod, only: ntrac + use constituents, only: cnst_name + use physconst, only: thermodynamic_active_species_idx_dycore, dry_air_species_num + use physconst, only: thermodynamic_active_species_num,thermodynamic_active_species_idx + use cam_control_mod, only: initial_run + use time_mod, only: tstep + use control_mod, only: rsplit, qsplit + use perf_mod, only: t_startf, t_stopf + type (element_t), intent(inout) :: elem(:) + type (TimeLevel_t), target, intent(in) :: tl + type (hybrid_t), intent(in) :: hybrid + integer, intent(in) :: nets,nete + type(fvm_struct), intent(inout) :: fvm(:) + real (kind=r8), optional, intent(in) :: omega_cn(2,nets:nete) + ! Local variables... + integer :: k,ie,m_cnst + integer, parameter :: type=ORDERED + + integer, parameter :: vmax=11+2*MAX(qsize_d,ntrac_d) + + character(len=10) :: varname(vmax) + + real (kind=r8), dimension(nets:nete,vmax) :: min_local,max_local + real (kind=r8), dimension(vmax) :: min_p,max_p,mass,mass_chg + real (kind=r8), dimension(np,np,nets:nete):: moist_ps + real (kind=r8), dimension(nc,nc,nets:nete):: moist_ps_fvm + + real (kind=r8) :: tmp_gll(np,np,vmax,nets:nete),tmp_mass(vmax)! + real (kind=r8) :: tmp_fvm(nc,nc,vmax,nets:nete) + real (kind=r8) :: tmp_q(np,np,nlev) + integer :: n0, n0_qdp, q, nm, nm2 + real(kind=r8) :: da_gll(np,np,nets:nete),da_fvm(nc,nc,nets:nete) + + !dynamics variables in n0 are at time = 'time': time=tl%nstep*tstep + if (hybrid%masterthread) then + write(iulog,*) "nstep=",tl%nstep," time=",Time_at(tl%nstep)/(24*3600)," [day]" + end if + ! dynamics timelevels + n0=tl%n0 + call TimeLevel_Qdp( tl, qsplit, n0_qdp) + ! moist surface pressure + if (ntrac>0) then + do ie=nets,nete + moist_ps_fvm(:,:,ie)=SUM(fvm(ie)%dp_fvm(1:nc,1:nc,:),DIM=3) + do q=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx(q) + do k=1,nlev + moist_ps_fvm(:,:,ie) = moist_ps_fvm(:,:,ie)+& + fvm(ie)%dp_fvm(1:nc,1:nc,k)*fvm(ie)%c(1:nc,1:nc,k,m_cnst) + end do + end do + enddo + end if + do ie=nets,nete + moist_ps(:,:,ie)=elem(ie)%state%psdry(:,:) + do q=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx_dycore(q) + do k=1,nlev + moist_ps(:,:,ie) = moist_ps(:,:,ie)+& + elem(ie)%state%Qdp(:,:,k,m_cnst,n0_qdp) + end do + end do + enddo + ! weights/areas for global integrals + do ie=nets,nete + da_gll(:,:,ie) = elem(ie)%mp(:,:)*elem(ie)%metdet(:,:) + enddo + if (ntrac>0) then + do ie=nets,nete + da_fvm(:,:,ie) = fvm(ie)%area_sphere(:,:) + enddo + end if + ! + !********************************************* + ! + ! min/max of u,v,T,PS,OMEGA + ! + !********************************************* + ! + varname(1) = 'U ' + varname(2) = 'V ' + varname(3) = 'T ' + varname(4) = 'OMEGA ' + varname(5) = 'OMEGA CN ' + if (ntrac>0) then + varname(6) = 'PSDRY(fvm)' + varname(7) = 'PS(fvm) ' + varname(8) = 'PSDRY(gll)' + varname(9) = 'PS(gll) ' + nm = 9 !number of vars before tracers + nm2 = nm+statediag_numtrac!number of vars after tracers + else + varname(6) = 'PSDRY ' + varname(7) = 'PS ' + nm = 7 !number of vars before tracers + nm2 = nm+statediag_numtrac!number of vars after tracers + end if + + do ie=nets,nete + min_local(ie,1) = MINVAL(elem(ie)%state%v(:,:,1,:,n0)) + max_local(ie,1) = MAXVAL(elem(ie)%state%v(:,:,1,:,n0)) + min_local(ie,2) = MINVAL(elem(ie)%state%v(:,:,2,:,n0)) + max_local(ie,2) = MAXVAL(elem(ie)%state%v(:,:,2,:,n0)) + min_local(ie,3) = MINVAL(elem(ie)%state%T(:,:,:,n0)) + max_local(ie,3) = MAXVAL(elem(ie)%state%T(:,:,:,n0)) + min_local(ie,4) = MINVAL(elem(ie)%derived%Omega(:,:,:)) + max_local(ie,4) = MAXVAL(elem(ie)%derived%Omega(:,:,:)) + if (present(omega_cn)) then + min_local(ie,5) = omega_cn(1,ie) + max_local(ie,5) = omega_cn(2,ie) + else + min_local(ie,5) = 0.0_r8 + max_local(ie,5) = 0.0_r8 + end if + if (ntrac>0) then + min_local(ie,6) = MINVAL(SUM(fvm(ie)%dp_fvm(1:nc,1:nc,:),DIM=3)) + max_local(ie,6) = MAXVAL(SUM(fvm(ie)%dp_fvm(1:nc,1:nc,:),DIM=3)) + min_local(ie,7) = MINVAL(moist_ps_fvm(:,:,ie)) + max_local(ie,7) = MINVAL(moist_ps_fvm(:,:,ie)) + min_local(ie,8) = MINVAL(elem(ie)%state%psdry(:,:)) + max_local(ie,8) = MAXVAL(elem(ie)%state%psdry(:,:)) + min_local(ie,9) = MINVAL(moist_ps(:,:,ie)) + max_local(ie,9) = MAXVAL(moist_ps(:,:,ie)) + do q=1,statediag_numtrac + varname(nm+q) = TRIM(cnst_name(q)) + min_local(ie,nm+q) = MINVAL(fvm(ie)%c(1:nc,1:nc,:,q)) + max_local(ie,nm+q) = MAXVAL(fvm(ie)%c(1:nc,1:nc,:,q)) + end do + else + min_local(ie,6) = MINVAL(elem(ie)%state%psdry(:,:)) + max_local(ie,6) = MAXVAL(elem(ie)%state%psdry(:,:)) + min_local(ie,7) = MINVAL(moist_ps(:,:,ie)) + max_local(ie,7) = MAXVAL(moist_ps(:,:,ie)) + do q=1,statediag_numtrac + varname(nm+q) = TRIM(cnst_name(q)) + tmp_q = elem(ie)%state%Qdp(:,:,:,q,n0_qdp)/elem(ie)%state%dp3d(:,:,:,n0) + min_local(ie,nm+q) = MINVAL(tmp_q) + max_local(ie,nm+q) = MAXVAL(tmp_q) + end do + end if + ! + ! forcing diagnostics + ! + varname(nm2+1) = 'FT ' + varname(nm2+2) = 'FM ' + min_local(ie,nm2+1) = MINVAL(elem(ie)%derived%FT(:,:,:)) + max_local(ie,nm2+1) = MAXVAL(elem(ie)%derived%FT(:,:,:)) + min_local(ie,nm2+2) = MINVAL(elem(ie)%derived%FM(:,:,:,:)) + max_local(ie,nm2+2) = MAXVAL(elem(ie)%derived%FM(:,:,:,:)) + if (ntrac>0) then + do q=1,statediag_numtrac + varname(nm2+2+q) = TRIM('F'//TRIM(cnst_name(q))) + min_local(ie,nm2+2+q) = MINVAL(fvm(ie)%fc(1:nc,1:nc,:,q)) + max_local(ie,nm2+2+q) = MAXVAL(fvm(ie)%fc(1:nc,1:nc,:,q)) + end do + else + do q=1,statediag_numtrac + varname(nm2+2+q) = TRIM('F'//TRIM(cnst_name(q))) + tmp_q = elem(ie)%derived%FQ(:,:,:,q) + min_local(ie,nm2+2+q) = MINVAL(tmp_q) + max_local(ie,nm2+2+q) = MAXVAL(tmp_q) + end do + end if + + end do + !JMD This is a Thread Safe Reduction + do k = 1, nm2+2+statediag_numtrac + if (k==1) call t_startf('parallelMin') + min_p(k) = ParallelMin(min_local(:,k),hybrid) + if (k==1) call t_stopf('parallelMin') + max_p(k) = ParallelMax(max_local(:,k),hybrid) + end do + ! + !********************************************* + ! + ! Mass diagnostics + ! + !********************************************* + ! + ! tracers + ! + mass = -1.0_r8 + if (ntrac>0) then + do ie=nets,nete + do q=1,statediag_numtrac + tmp_fvm(:,:,q,ie) = SUM(fvm(ie)%c(1:nc,1:nc,:,q)*fvm(ie)%dp_fvm(1:nc,1:nc,:),DIM=3) + end do + q=statediag_numtrac+1 + tmp_fvm(:,:,q,ie) = SUM(fvm(ie)%dp_fvm(1:nc,1:nc,:),DIM=3) + q=statediag_numtrac+2 + tmp_fvm(:,:,q,ie) = moist_ps_fvm(:,:,ie) + end do + call global_integrals_general(tmp_fvm(:,:,1:statediag_numtrac+2,nets:nete),hybrid,nc,da_fvm,statediag_numtrac+2,& + nets,nete,tmp_mass(1:statediag_numtrac+2)) + mass(nm+1:nm+statediag_numtrac)=tmp_mass(1:statediag_numtrac)*0.01_r8 + mass(6:7)=tmp_mass(statediag_numtrac+1:statediag_numtrac+2)*0.01_r8 + do ie=nets,nete + tmp_gll(:,:,1,ie)=elem(ie)%state%psdry(:,:) + tmp_gll(:,:,2,ie)=moist_ps(:,:,ie) + end do + call global_integrals_general(tmp_gll(:,:,1:2,nets:nete),hybrid,np,da_gll,2,& + nets,nete,tmp_mass(1:2)) + mass(8:9)=tmp_mass(1:2)*0.01_r8 + else + do ie=nets,nete + do q=1,statediag_numtrac + tmp_gll(:,:,q,ie)=sum(elem(ie)%state%Qdp(:,:,:,q,n0_qdp),DIM=3) + end do + q=statediag_numtrac+1 + tmp_gll(:,:,q,ie)=elem(ie)%state%psdry(:,:) + q=statediag_numtrac+2 + tmp_gll(:,:,q,ie)=moist_ps(:,:,ie) + end do + call global_integrals_general(tmp_gll(:,:,1:statediag_numtrac+2,nets:nete),hybrid,np,da_gll,statediag_numtrac+2,& + nets,nete,tmp_mass(1:statediag_numtrac+2)) + mass(nm+1:nm+statediag_numtrac)=tmp_mass(1:statediag_numtrac)*0.01_r8 + mass(6:7)=tmp_mass(statediag_numtrac+1:statediag_numtrac+2)*0.01_r8 + end if + ! + ! compute relative mass change + ! + if (tl%nstep==0.or..not. initial_run) then + mass_chg(:) = 0.0_R8 + elem(nets)%derived%mass(nm+1:nm+statediag_numtrac) = mass(nm+1:nm+statediag_numtrac) + if (ntrac>0) then + elem(nets)%derived%mass(6:9) = mass(6:9) + else + elem(nets)%derived%mass(6:7) = mass(6:7) + end if + else + mass_chg(:) = 0.0_r8 + do q=1,nm2!statediag_numtrac + if (mass(q).ne.-1.0_r8) then + if (ABS(elem(nets)%derived%mass(q))<1.0e-12_r8) then + mass_chg(q) =mass(q) - elem(nets)%derived%mass(q) + else + mass_chg(q) =(mass(q) - elem(nets)%derived%mass(q))/elem(nets)%derived%mass(q) + end if + end if + end do + end if + ! + ! write diagnostics to log file + ! + if(hybrid%masterthread) then + write(iulog,*) ' ' + write(iulog,*) 'STATE DIAGNOSTICS' + write(iulog,*) ' ' + write(iulog,101) ' ','MIN','MAX','AVE (hPa)','REL. MASS. CHANGE' + do k=1,nm+statediag_numtrac + if (mass(k)==-1.0_r8) then + write(iulog,100) varname(k),min_p(k),max_p(k) + else + write(iulog,100) varname(k),min_p(k),max_p(k),mass(k),mass_chg(k) + end if + end do + ! + ! forcing diagnostics + ! + write(iulog,*) ' ' + write(iulog,*) 'FORCING DIAGNOSTICS' + write(iulog,*) ' ' + write(iulog,101) ' ','MIN','MAX' + do k=nm2+1,nm2+2+statediag_numtrac + write(iulog,100) varname(k),min_p(k),max_p(k) + end do + end if + +100 format (A12,4(E23.15)) +101 format (A12,A23,A23,A23,A23) + +#ifdef waccm_debug + call prim_printstate_cslam_gamma(elem, tl,hybrid,nets,nete, fvm) +#endif + call prim_printstate_U(elem, tl,hybrid,nets,nete, fvm) + end subroutine prim_printstate + + +#ifdef waccm_debug + subroutine prim_printstate_cslam_gamma(elem, tl,hybrid,nets,nete, fvm) + type (element_t), intent(inout) :: elem(:) + type(fvm_struct), intent(inout) :: fvm(:) + type (TimeLevel_t), target, intent(in) :: tl + type (hybrid_t), intent(in) :: hybrid + integer, intent(in) :: nets,nete + + ! Local variables... + integer :: k,ie + + real (kind=r8), dimension(nets:nete,nlev) :: max_local + real (kind=r8), dimension(nlev) :: max_p + integer :: n0, n0_qdp, q, nm, nm2 + + !dt=tstep*qsplit + !dt = tstep*qsplit*rsplit ! vertical REMAP timestep + !dynamics variables in n0 are at time = 'time': time=tl%nstep*tstep + if (hybrid%masterthread) then + write(iulog,*) "nstep=",tl%nstep," time=",Time_at(tl%nstep)/(24*3600)," [day]" + end if + ! dynamics timelevels + n0=tl%n0 + call TimeLevel_Qdp( tl, qsplit, n0_qdp) + + do ie=nets,nete + do k=1,nlev + max_local(ie,k) = MAXVAL(fvm(ie)%CSLAM_gamma(:,:,k,1)) + end do + end do + !JMD This is a Thread Safe Reduction + do k = 1, nlev + max_p(k) = Parallelmax(max_local(:,k),hybrid) + end do + if (hybrid%masterthread) then + write(iulog,*) ' ' + write(iulog,*) 'Gamma max' + write(iulog,*) ' ' + do k=1,nlev + write(iulog,*) 'k,gamma= ',k,max_p(k) + end do + end if + end subroutine prim_printstate_cslam_gamma +#endif + + subroutine adjust_nsplit(elem, tl,hybrid,nets,nete, fvm, omega_cn) + use dimensions_mod, only: ksponge_end + use dimensions_mod, only: fvm_supercycling, fvm_supercycling_jet + use time_mod, only: tstep + use control_mod, only: rsplit, qsplit + use perf_mod, only: t_startf, t_stopf + use time_mod, only: nsplit, nsplit_baseline,rsplit_baseline + use control_mod, only: qsplit, rsplit + use time_manager, only: get_step_size + use cam_abortutils, only: endrun + use control_mod, only: nu_top + ! + type (element_t), intent(inout) :: elem(:) + type (TimeLevel_t), target, intent(in) :: tl + type (hybrid_t), intent(in) :: hybrid + integer, intent(in) :: nets,nete + type(fvm_struct), intent(inout) :: fvm(:) + real (kind=r8), intent(in) :: omega_cn(2,nets:nete) + ! Local variables... + integer :: k,ie + real (kind=r8), dimension(1) :: min_o + real (kind=r8), dimension(1) :: max_o + real (kind=r8) :: dtime + character(len=128) :: errmsg + real (kind=r8) :: threshold=0.90_r8 + real (kind=r8) :: max_abs_omega_cn(nets:nete) + real (kind=r8) :: min_abs_omega_cn(nets:nete) + ! + ! The threshold values for when to double nsplit are empirical. + ! In FW2000climo runs the Courant numbers are large in the sponge + ! + ! The model was found to be stable if regular del4 is increased + ! in the sponge and nu_top is increased (when nsplit doubles) + ! + ! + do ie=nets,nete + max_abs_omega_cn(ie) = MAXVAL(ABS(omega_cn(:,ie))) + end do + + !JMD This is a Thread Safe Reduction + do k = 1,1 + max_o(k) = ParallelMax(max_abs_omega_cn(:),hybrid) +! min_o(k) = ParallelMin(min_abs_omega_cn(:),hybrid) + end do + if (max_o(1)>threshold.and.nsplit==nsplit_baseline) then + ! + ! change vertical remap time-step + ! + nsplit=2*nsplit_baseline + fvm_supercycling = rsplit + fvm_supercycling_jet = rsplit + nu_top=2.0_r8*nu_top + ! + ! write diagnostics to log file + ! + if(hybrid%masterthread) then + !dynamics variables in n0 are at time = 'time': time=tl%nstep*tstep + !dt=tstep*qsplit + ! dt_remap = tstep*qsplit*rsplit ! vertical REMAP timestep + ! + write(iulog,*) 'adj. nsplit: doubling nsplit; t=',Time_at(tl%nstep)/(24*3600)," [day]; max OMEGA",max_o(1) + end if + dtime = get_step_size() + tstep = dtime / real(nsplit*qsplit*rsplit, r8) + + else if (nsplit.ne.nsplit_baseline.and.max_o(1)<0.4_r8*threshold) then + ! + ! should nsplit be reduced again? + ! + nsplit=nsplit_baseline + rsplit=rsplit_baseline + fvm_supercycling = rsplit + fvm_supercycling_jet = rsplit + nu_top=nu_top/2.0_r8 + +! nu_div_scale_top(:) = 1.0_r8 + + dtime = get_step_size() + tstep = dtime / real(nsplit*qsplit*rsplit, r8) + if(hybrid%masterthread) then + write(iulog,*) 'adj. nsplit: reset nsplit ; t=',Time_at(tl%nstep)/(24*3600)," [day]; max OMEGA",max_o(1) + end if + end if + end subroutine adjust_nsplit + + subroutine prim_printstate_U(elem, tl,hybrid,nets,nete, fvm) + type (element_t), intent(inout) :: elem(:) + type(fvm_struct), intent(inout) :: fvm(:) + type (TimeLevel_t), target, intent(in) :: tl + type (hybrid_t), intent(in) :: hybrid + integer, intent(in) :: nets,nete + + ! Local variables... + integer :: k,ie + + real (kind=r8), dimension(nets:nete,nlev) :: max_local + real (kind=r8), dimension(nets:nete,nlev) :: min_local + real (kind=r8), dimension(nlev) :: max_p + real (kind=r8), dimension(nlev) :: min_p + integer :: n0, n0_qdp, q, nm, nm2 + + !dt=tstep*qsplit + !dt = tstep*qsplit*rsplit ! vertical REMAP timestep + !dynamics variables in n0 are at time = 'time': time=tl%nstep*tstep + if (hybrid%masterthread) then + write(iulog,*) "nstep=",tl%nstep," time=",Time_at(tl%nstep)/(24*3600)," [day]" + end if + ! dynamics timelevels + n0=tl%n0 + call TimeLevel_Qdp( tl, qsplit, n0_qdp) + + do ie=nets,nete + do k=1,nlev + max_local(ie,k) = MAXVAL(elem(ie)%state%v(:,:,:,k,n0)) + min_local(ie,k) = MINVAL(elem(ie)%state%v(:,:,:,k,n0)) + end do + end do + !JMD This is a Thread Safe Reduction + do k = 1, nlev + max_p(k) = Parallelmax(max_local(:,k),hybrid) + min_p(k) = Parallelmin(min_local(:,k),hybrid) + end do + if (hybrid%masterthread) then + write(iulog,*) ' ' + write(iulog,*) 'min/max of wind components in each layer' + write(iulog,*) ' ' + do k=1,nlev + write(iulog,*) 'k,V (min max)= ',k,min_p(k),max_p(k) + end do + end if + end subroutine prim_printstate_U +end module prim_state_mod diff --git a/src/dynamics/se/dycore/quadrature_mod.F90 b/src/dynamics/se/dycore/quadrature_mod.F90 new file mode 100644 index 00000000..ca6ba835 --- /dev/null +++ b/src/dynamics/se/dycore/quadrature_mod.F90 @@ -0,0 +1,955 @@ +#undef _GAUSS_TABLE +module quadrature_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + + implicit none + private + + type, public :: quadrature_t + real (kind=r8), dimension(:), pointer :: points + real (kind=r8), dimension(:), pointer :: weights + end type quadrature_t + + public :: gausslobatto + public :: test_gausslobatto + public :: gauss + public :: test_gauss + public :: legendre + public :: jacobi + public :: quad_norm + + public :: trapezoid + private :: trapN + public :: simpsons + public :: gaussian_int + + private :: gausslobatto_pts + private :: gausslobatto_wts + private :: gauss_pts + private :: gauss_wts + private :: jacobi_polynomials + private :: jacobi_derivatives + + +contains + + ! ============================================================== + ! gauss: + ! + ! Find the Gauss collocation points and the corresponding weights. + ! + ! ============================================================== + + function gauss(npts) result(gs) + integer, intent(in) :: npts + type (quadrature_t) :: gs + + allocate(gs%points(npts)) + allocate(gs%weights(npts)) + + gs%points=gauss_pts(npts) + gs%weights=gauss_wts(npts,gs%points) + + end function gauss + +#if defined(_GAUSS_TABLE) + function gauss_pts(npts) result(pts) + + integer, intent(in) :: npts + real (kind=r8) :: pts(npts) + + pts(1) = -0.93246951420315202781_r8 + pts(2) = -0.66120938646626451366_r8 + pts(3) = -0.23861918608319690863_r8 + pts(4) = -pts(3) + pts(5) = -pts(2) + pts(6) = -pts(1) + + end function gauss_pts + + + function gauss_wts(npts,pts) result(wts) + + integer, intent(in) :: npts + real (kind=r8) :: pts(npts) + real (kind=r8) :: wts(npts) + + wts(1) = 0.17132449237917034504_r8 + wts(2) = 0.36076157304813860756_r8 + wts(3) = 0.46791393457269104738_r8 + wts(4) = wts(3) + wts(5) = wts(2) + wts(6) = wts(1) + + end function gauss_wts +#else + + ! ============================================================== + ! gauss_pts: + ! + ! Compute the Gauss Collocation points + ! for Jacobi Polynomials + ! + ! ============================================================== + + function gauss_pts(np1) result(pts) + use physconst, only: pi + + integer, intent(in) :: np1 ! Number of velocity grid points + real (kind=r8) :: pts(np1) + + ! Local variables + + real (kind=r8) :: alpha,beta + real (kind=r8) :: xjac(0:np1-1) + real (kind=r8) :: jac(0:np1) + real (kind=r8) :: djac(0:np1) + + integer prec ! number of mantissa bits + real (kind=r8) eps ! machine epsilon + real (kind=r8), parameter :: convthresh = 10 ! convergence threshold relative\ + + ! to machine epsilon + integer, parameter :: kstop = 30 ! max iterations for polynomial deflation + + real (kind=r8) :: poly + real (kind=r8) :: pder + real (kind=r8) :: recsum,thresh + real (kind=r8) :: dth + + real (kind=r8) :: x + real (kind=r8) :: delx + real (kind=r8) :: c0,c1,c2,c10 + + integer i,j,k + integer n, nh + + n = np1 - 1 + c0 = 0.0_r8 + c1 = 1.0_r8 + c2 = 2.0_r8 + c10 = 10.0_r8 + alpha = c0 + beta = c0 + + ! ========================================================= + ! compute machine precision and set the convergence + ! threshold thresh to 10 times that level + ! ========================================================= + + prec = precision(c10) + eps = c10**(-prec) + thresh = convthresh*eps + + ! ============================================================ + ! Compute first half of the roots by "polynomial deflation". + ! ============================================================ + + dth = PI/(2*n+2) + + nh = (n+1)/2 + + do j=0,nh-1 + x=COS((c2*j+1)*dth) ! first guess at root + k=0 + delx=c1 + do while(k thresh) + call jacobi(n+1,x,alpha,beta,jac(0:n+1),djac(0:n+1)) + poly = jac(n+1) + pder = djac(n+1) + recsum=c0 + do i=0,j-1 + recsum = recsum + c1/(x-xjac(i)) + end do + delx = -poly/(pder-recsum*poly) + x = x + delx + k = k + 1 + end do + + xjac(j)=x + + end do + + ! ================================================ + ! compute the second half of the roots by symmetry + ! ================================================ + + do j=0,nh + xjac(n-j) = -xjac(j) + end do + + if (MODULO(n,2)==0) xjac(nh)=c0 + + ! ==================================================== + ! Reverse the sign of everything so that indexing + ! increases with position + ! ==================================================== + + do j=0,n + pts(j+1) = -xjac(j) + end do + + end function gauss_pts + + ! ================================================ + ! gauss_wts: + ! + ! Gauss Legendre Weights + ! ================================================ + + function gauss_wts(np1, gpts) result(wts) + + integer, intent(in) :: np1 + real (kind=r8), intent(in) :: gpts(np1) ! Gauss-Legendre points + real (kind=r8) :: wts(np1) ! Gauss-Legendre weights + + ! Local variables + + real (kind=r8) :: c0,c1,c2 + real (kind=r8) :: alpha + real (kind=r8) :: beta + real (kind=r8) :: djac(np1) + integer i,n + + c0 = 0.0_r8 + c1 = 1.0_r8 + c2 = 2.0_r8 + + alpha = c0 + beta = c0 + n = np1-1 + + djac=jacobi_derivatives(np1,alpha,beta,np1,gpts) + + do i=1,np1 + wts(i)=c2/((c1-gpts(i)**2)*djac(i)*djac(i)) + end do + + end function gauss_wts + +#endif + + ! ============================================================== + ! test_gauss: + ! + ! Unit Tester for Gaussian Points, Weights + ! ============================================================== + + subroutine test_gauss(npts) + + integer, intent(in) :: npts + type (quadrature_t) :: gs + + integer i + real (kind=r8) :: gssum + gs=gauss(npts) + + print * + print *,"============================================" + print *," Testing Gaussian Quadrature..." + print * + print *," points weights" + print *,"============================================" + do i=1,npts + print *,i,gs%points(i),gs%weights(i) + end do + print *,"============================================" + gssum=SUM(gs%weights(:)) + print *,"sum of Gaussian weights=",gssum + print *,"============================================" + + deallocate(gs%points) + deallocate(gs%weights) + + end subroutine test_gauss + + ! ============================================================== + ! gausslobatto: + ! + ! Find the Gauss-Lobatto Legendre collocation points xgl(i) and the + ! corresponding weights. + ! + ! ============================================================== + + function gausslobatto(npts) result(gll) + + integer, intent(in) :: npts + type (quadrature_t) :: gll + + allocate(gll%points(npts)) + allocate(gll%weights(npts)) + + gll%points=gausslobatto_pts(npts) + gll%weights=gausslobatto_wts(npts,gll%points) + + end function gausslobatto + + ! ============================================================== + ! gausslobatto_pts: + ! + ! Compute the Gauss-Lobatto Collocation points + ! for Jacobi Polynomials + ! + ! ============================================================== + + function gausslobatto_pts(np1) result(pts) + use physconst, only: pi + + integer, intent(in) :: np1 ! Number of velocity grid points + real (kind=r8) :: pts(np1) + + ! Local variables + + real (kind=r8) :: alpha,beta + real (kind=r8) :: xjac(0:np1-1) + real (kind=r8) :: jac(0:np1) + real (kind=r8) :: jacm1(0:np1) + real (kind=r8) :: djac(0:np1) + + integer prec ! number of mantissa bits + real (kind=r8) eps ! machine epsilon + real (kind=r8), parameter :: convthresh = 10 ! convergence threshold relative + ! to machine epsilon + integer, parameter :: kstop = 30 ! max iterations for polynomial deflation + + real (kind=r8) :: a,b,det + real (kind=r8) :: poly + real (kind=r8) :: pder + real (kind=r8) :: recsum,thresh + real (kind=r8) :: dth,cd,sd,cs,ss,cstmp + + real (kind=r8) :: x + real (kind=r8) :: delx + real (kind=r8) :: c0,c1,c2,c10 + + integer i,j,k + integer n, nh + + n = np1 - 1 + c0 = 0.0_r8 + c1 = 1.0_r8 + c2 = 2.0_r8 + c10 = 10.0_r8 + + alpha = c0 + beta = c0 + + ! ========================================================= + ! compute machine precision and set the convergence + ! threshold thresh to 10 times that level + ! ========================================================= + + prec = PRECISION(c10) + eps = c10**(-prec) + thresh = convthresh*eps + + ! ===================================================== + ! initialize the end points + ! ===================================================== + + xjac(0) = c1 + xjac(n) = -c1 + + ! ============================================================ + ! Compute first half of the roots by "polynomial deflation". + ! ============================================================ + + ! ============================================================ + ! compute the parameters in the polynomial whose + ! roots are desired... + ! ============================================================ + + call jacobi(n+1, c1,alpha,beta,jac(0:n+1),djac(0:n+1)) + call jacobi(n+1,-c1,alpha,beta,jacm1(0:n+1),djac(0:n+1)) + + det = jac(n )*jacm1(n-1)-jacm1(n )*jac(n-1) + a = -(jac(n+1)*jacm1(n-1)-jacm1(n+1)*jac(n-1))/det + b = -(jac(n )*jacm1(n+1)-jacm1(n )*jac(n+1))/det + + dth = PI/(2*n+1) + cd = COS(c2*dth) + sd = SIN(c2*dth) + cs = COS(dth) + ss = SIN(dth) + + nh = (n+1)/2 + + do j=1,nh-1 + x=cs ! first guess at root + k=0 + delx=c1 + do while(k thresh) + call jacobi(n+1,x,alpha,beta,jac(0:n+1),djac(0:n+1)) + poly = jac(n+1)+a* jac(n)+b* jac(n-1) + pder = djac(n+1)+a*djac(n)+b*djac(n-1) + recsum=c0 + do i=0,j-1 + recsum = recsum + c1/(x-xjac(i)) + end do + delx = -poly/(pder-recsum*poly) + x = x + delx + k = k + 1 + end do + + xjac(j)=x + + ! ===================================================== + ! compute the guesses for the roots + ! for the next points, i.e : + ! + ! ss = sn(theta) => sin(theta+2*dth) + ! cs = cs(theta) => cs(theta+2*dth) + ! ===================================================== + + cstmp=cs*cd-ss*sd + ss=cs*sd+ss*cd + cs=cstmp + end do + + ! ================================================ + ! compute the second half of the roots by symmetry + ! ================================================ + + do j=1,nh + xjac(n-j) = -xjac(j) + end do + + if (MODULO(n,2)==0) xjac(nh)=c0 + + ! ==================================================== + ! Reverse the sign of everything so that indexing + ! increases with position + ! ==================================================== + + do j=0,n + pts(j+1) = -xjac(j) + end do + + end function gausslobatto_pts + + ! ================================================ + ! Gauss Lobatto Legendre Weights + ! ================================================ + + function gausslobatto_wts(np1, glpts) result(wts) + + integer, intent(in) :: np1 + real (kind=r8), intent(in) :: glpts(np1) + real (kind=r8) :: wts(np1) + + ! Local variables + + real (kind=r8) :: c0,c2 + real (kind=r8) :: alpha + real (kind=r8) :: beta + real (kind=r8) :: jac(np1) + integer i,n + + c0 = 0.0_r8 + c2 = 2.0_r8 + alpha = c0 + beta = c0 + n = np1-1 + + jac=jacobi_polynomials(n,alpha,beta,np1,glpts) + + do i=1,np1 + wts(i)=c2/(n*(n+1)*jac(i)*jac(i)) + end do + + end function gausslobatto_wts + + ! ============================================================== + ! test_gausslobatto: + ! + ! Unit Tester for Gaussian Lobatto Quadrature... + ! ============================================================== + + subroutine test_gausslobatto(npts) + integer, intent(in) :: npts + type (quadrature_t) :: gll + + integer i + real (kind=r8) :: gllsum + gll=gausslobatto(npts) + + print * + print *,"============================================" + print *," Testing Gauss-Lobatto Quadrature..." + print * + print *," points weights" + print *,"============================================" + do i=1,npts + print *,i,gll%points(i),gll%weights(i) + end do + print *,"============================================" + gllsum=SUM(gll%weights(:)) + print *,"sum of Gauss-Lobatto weights=",gllsum + print *,"============================================" + + deallocate(gll%points) + deallocate(gll%weights) + + end subroutine test_gausslobatto + + ! ================================================ + ! + ! subroutine jacobi: + ! + ! Computes the Jacobi Polynomials (jac) and their + ! first derivatives up to and including degree n + ! at point x on the interval (-1,1). + ! + ! See for example the recurrence relations + ! in equation 2.5.4 (page 70) in + ! + ! "Spectral Methods in Fluid Dynamics", + ! by C. Canuto, M.Y. Hussaini, A. Quarteroni, T.A.Zang + ! Springer-Verlag, 1988. + ! ================================================ + + subroutine jacobi(n, x, alpha, beta, jac, djac) + + integer, intent(in) :: n + real (kind=r8), intent(in) :: x + real (kind=r8), intent(in) :: alpha + real (kind=r8), intent(in) :: beta + real (kind=r8) :: jac(0:n) + real (kind=r8) :: djac(0:n) + + ! Local variables + + real (kind=r8) :: a1k + real (kind=r8) :: a2k + real (kind=r8) :: a3k + real (kind=r8) :: da2kdx + + real (kind=r8) :: c2,c1,c0 + + integer :: k + + c0 = 0.0_r8 + c1 = 1.0_r8 + c2 = 2.0_r8 + + jac(0)=c1 + jac(1)=(c1 + alpha)*x + + djac(0)=c0 + djac(1)=(c1 + alpha) + + do k=1,n-1 + a1k = c2*( k + c1 )*( k + alpha + beta + c1 )*( c2*k + alpha + beta ) + da2kdx = ( c2*( k + c1 ) + alpha + beta )*( c2*k + alpha + beta + c1 )*( c2*k + alpha + beta ) + a2k = ( c2*k + alpha + beta + c1 )*( alpha*alpha - beta*beta ) + x*da2kdx + a3k = c2*(k + alpha)*( k + beta )*( c2*k + alpha + beta + c2 ) + jac(k+1) = ( a2k*jac(k)-a3k*jac(k-1) )/a1k + djac(k+1)= ( a2k*djac(k) + da2kdx*jac(k) - a3k*djac(k-1) )/a1k + end do + + end subroutine jacobi + + + ! ========================================================== + ! This routine computes the Nth order Jacobi Polynomials + ! (jac) for a vector of positions x on the interval (-1,1), + ! of length npoints. + ! + ! See for example the recurrence relations + ! in equation 2.5.4 (page 70) in + ! + ! "Spectral Methods in Fluid Dynamics", + ! by C. Canuto, M.Y. Hussaini, A. Quarteroni, T.A.Zang + ! Springer-Verlag, 1988. + ! + ! =========================================================== + + function jacobi_polynomials(n, alpha, beta, npoints, x) result(jac) + + integer, intent(in) :: n ! order of the Jacobi Polynomial + real (kind=r8) :: alpha + real (kind=r8) :: beta + integer, intent(in) :: npoints + real (kind=r8) :: x(npoints) + real (kind=r8) :: jac(npoints) + + ! Local variables + + real (kind=r8) :: a1k + real (kind=r8) :: a2k + real (kind=r8) :: a3k + real (kind=r8) :: da2kdx + + real (kind=r8) :: jacp1 + real (kind=r8) :: jacm1 + real (kind=r8) :: jac0 + real (kind=r8) :: xtmp + + real (kind=r8) :: c2,c1,c0 + integer j,k + + c0 = 0.0_r8 + c1 = 1.0_r8 + c2 = 2.0_r8 + + do j = 1,npoints + + xtmp=x(j) + + jacm1=c1 + jac0 =(c1+alpha)*xtmp + + do k=1,n-1 + a1k=c2*(k+c1)*(k+alpha+beta+c1)*(c2*k+alpha+beta) + da2kdx=(c2*k+alpha+beta+c2)*(c2*k+alpha+beta+c1)*(c2*k+alpha+beta) + a2k=(c2*k+alpha+beta+c1)*(alpha*alpha-beta*beta) + xtmp*da2kdx + a3k=c2*(k+alpha)*(k+beta)*(c2*k+alpha+beta+c2) + jacp1=(a2k*jac0-a3k*jacm1)/a1k + jacm1=jac0 + jac0 =jacp1 + end do + + if (n==0)jac0=jacm1 + jac(j)=jac0 + end do + + end function jacobi_polynomials + + ! ================================================ + ! This routine computes the first derivatives of Nth + ! order Jacobi Polynomials (djac) for a vector of + ! positions x on the interval (-1,1), of length npoints. + ! + ! See for example the recurrence relations + ! in equation 2.5.4 (page 70) in + ! + ! "Spectral Methods in Fluid Dynamics", + ! by C. Canuto, M.Y. Hussaini, A. Quarteroni, T.A.Zang + ! Springer-Verlag, 1988. + ! + ! ================================================ + + function jacobi_derivatives(n, alpha, beta, npoints, x) result(djac) + + integer , intent(in) :: n ! order of the Jacobi Polynomial + real (kind=r8), intent(in) :: alpha + real (kind=r8), intent(in) :: beta + integer , intent(in) :: npoints + real (kind=r8), intent(in) :: x(npoints) + + real (kind=r8) :: djac(npoints) + + ! Local variables + + ! Local variables + + real (kind=r8) :: a1k + real (kind=r8) :: a2k + real (kind=r8) :: a3k + real (kind=r8) :: da2kdx + + real (kind=r8) :: jacp1 + real (kind=r8) :: jacm1 + real (kind=r8) :: jac0 + real (kind=r8) :: djacp1 + real (kind=r8) :: djacm1 + real (kind=r8) :: djac0 + + real (kind=r8) :: xtmp + + real (kind=r8) :: c2,c1,c0 + integer j,k + + c0 = 0.0_r8 + c1 = 1.0_r8 + c2 = 2.0_r8 + + do j = 1,npoints + + xtmp=x(j) + + jacm1=c1 + jac0 =(c1+alpha)*xtmp + + djacm1 = c0 + djac0 = (c1+alpha) + + do k=1,n-1 + a1k=c2*(k+c1)*(k+alpha+beta+c1)*(c2*k+alpha+beta) + da2kdx=(c2*k+alpha+beta+c2)*(c2*k+alpha+beta+c1)*(c2*k+alpha+beta) + a2k=(c2*k+alpha+beta+c1)*(alpha*alpha-beta*beta) + xtmp*da2kdx + a3k=c2*(k+alpha)*(k+beta)*(c2*k+alpha+beta+c2) + + jacp1=(a2k*jac0-a3k*jacm1)/a1k + djacp1=(a2k*djac0+da2kdx*jac0-a3k*djacm1)/a1k + + jacm1=jac0 + jac0=jacp1 + + djacm1=djac0 + djac0=djacp1 + + end do + + if (n==0)djac0=djacm1 + djac(j)=djac0 + + end do + + end function jacobi_derivatives + + ! =================================================== + ! + ! legendre: + ! + ! Compute the legendre polynomials using + ! the recurrence relationship. + ! return leg(m+1) = P_N(x) for m=0..N + ! p_3 = Legendre polynomial of degree N + ! p_2 = Legendre polynomial of degree N-1 at x + ! p_1 = Legendre polynomial of degree N-2 at x + ! + ! =================================================== + + function legendre(x,N) result(leg) + + integer :: N + real (kind=r8) :: x + real (kind=r8) :: leg(N+1) + + real (kind=r8) :: p_1, p_2, p_3 + integer :: k + + p_3 = 1.0_r8 + leg(1)=p_3 + if (n.ne.0) then + p_2 = p_3 + p_3 = x + leg(2)=p_3 + do k = 2,N + p_1 = p_2 + p_2 = p_3 + p_3 = ( (2*k-1)*x*p_2 - (k-1)*p_1 ) / k + leg(k+1)=p_3 + end do + end if + + end function legendre + + + ! =========================================== + ! quad_norm: + ! + ! compute normalization constants + ! for k=1,N order Legendre polynomials + ! + ! e.g. gamma(k) in Canuto, page 58. + ! + ! =========================================== + + function quad_norm(gquad,N) result(gamma) + type (quadrature_t), intent(in) :: gquad + integer , intent(in) :: N + + real (kind=r8) :: gamma(N) + + ! Local variables + real (kind=r8) :: leg(N) + integer :: i,k + + gamma(:)=0.0_r8 + + do i=1,N + leg=legendre(gquad%points(i),N-1) + do k=1,N + gamma(k)= gamma(k)+leg(k)*leg(k)*gquad%weights(i) + end do + end do + + end function quad_norm + + ! ======================= + ! TrapN: + ! Numerical recipes + ! ======================= + + subroutine trapN(f,a,b,N,it,s) + INTERFACE + FUNCTION f(x) RESULT(f_x) ! Function to be integrated + use shr_kind_mod, only: r8=>shr_kind_r8 + real(kind=r8), INTENT(IN) :: x + real(kind=r8) :: f_x + END FUNCTION f + END INTERFACE + + real(kind=r8),intent(in) :: a,b + integer, intent(in) :: N + integer, intent(inout) :: it + real(kind=r8), intent(inout) :: s + + real(kind=r8) :: ssum + real(kind=r8) :: del + real(kind=r8) :: rtnm + real(kind=r8) :: x + + integer :: j + + if (N==1) then + s = 0.5_r8*(b-a)*(f(a) + f(b)) + it =1 + else + ssum = 0.0_r8 + rtnm =1.0_r8/it + del = (b-a)*rtnm + x=a+0.5_r8*del + do j=1,it + ssum = ssum + f(x) + x=x+del + end do + s=0.5_r8*(s + del*ssum) + it=2*it + end if + + end subroutine trapN + + ! ========================================== + ! Trapezoid Rule for integrating functions + ! from a to b with residual error eps + ! ========================================== + + function trapezoid(f,a,b,eps) result(Integral) + + integer, parameter :: Nmax = 25 ! At most 2^Nmax + 1 points in integral + + INTERFACE + FUNCTION f(x) RESULT(f_x) ! Function to be integrated + use shr_kind_mod, only: r8=>shr_kind_r8 + real(kind=r8), INTENT(IN) :: x + real(kind=r8) :: f_x + END FUNCTION f + END INTERFACE + + real(kind=r8), intent(in) :: a,b ! The integral bounds + real(kind=r8), intent(in) :: eps ! relative error bound for integral + real(kind=r8) :: Integral ! the integral result (within eps) + real(kind=r8) :: s ! Integral approximation + real(kind=r8) :: sold ! previous integral approx + + integer :: N + integer :: it + + ! ============================================================== + ! Calculate I here using trapezoid rule using f and a DO loop... + ! ============================================================== + + s = 1.0e30_r8 + sold = 0.0_r8 + N=1 + it=0 + do while(N<=Nmax .and. ABS(s-sold)>eps*ABS(sold)) + sold=s + call trapN(f,a,b,N,it,s) + N=N+1 + end do + + Integral = s + + end function trapezoid + + ! ========================================== + ! Simpsons Rule for integrating functions + ! from a to b with residual error eps + ! ========================================== + + function simpsons(f,a,b,eps) result(Integral) + + integer, parameter :: Nmax = 25 ! At most 2^Nmax + 1 points in integral + + INTERFACE + FUNCTION f(x) RESULT(f_x) ! Function to be integrated + use shr_kind_mod, only: r8=>shr_kind_r8 + real(kind=r8), INTENT(IN) :: x + real(kind=r8) :: f_x + END FUNCTION f + END INTERFACE + + real(kind=r8), intent(in) :: a,b ! The integral bounds + real(kind=r8), intent(in) :: eps ! relative error bound for integral + real(kind=r8) :: Integral ! the integral result (within eps) + real(kind=r8) :: s ! Integral approximation + real(kind=r8) :: os ! previous integral approx + real(kind=r8) :: st ! Integral approximation + real(kind=r8) :: ost ! previous integral approx + + integer :: N + integer :: it + + ! ============================================================== + ! Calculate I here using trapezoid rule using f and a DO loop... + ! ============================================================== + + ost= 0.0_r8 + s = 1.0e30_r8 + os = 0.0_r8 + + N=1 + it=0 + do while ((N<=Nmax .and. ABS(s-os)>eps*ABS(os) ) .or. N<=2) + os = s + call trapN(f,a,b,N,it,st) + s=(4.0_r8*st-ost)/3.0_r8 + ost=st + N=N+1 + end do + + Integral = s + + end function simpsons + + + ! ========================================== + ! gaussian_int: + ! + ! Gaussian Quadrature Rule for integrating + ! function f from a to b with gs weights and + ! points with precomputed gaussian quadrature + ! and weights. + ! ========================================== + + function gaussian_int(f,a,b,gs) result(Integral) + + integer, parameter :: Nmax = 10 ! At most 2^Nmax + 1 points in integral + + INTERFACE + FUNCTION f(x) RESULT(f_x) ! Function to be integrated + use shr_kind_mod, only: r8=>shr_kind_r8 + real(kind=r8), INTENT(IN) :: x + real(kind=r8) :: f_x + END FUNCTION f + END INTERFACE + + real(kind=r8), intent(in) :: a,b ! The integral bounds + type(quadrature_t), intent(in) :: gs ! gaussian points/wts + real(kind=r8) :: Integral ! the integral result (within eps) + + integer :: i + real (kind=r8) :: s,x + ! ============================================================== + ! Calculate I = S f(x)dx here using gaussian quadrature + ! ============================================================== + + s = 0.0_r8 + do i=1,SIZE(gs%points) + x = 0.50_r8*((b-a)*gs%points(i) + (b+a)) + s = s + gs%weights(i)*f(x) + end do + Integral = s*(0.5_r8*(b-a)) + + end function gaussian_int + +end module quadrature_mod + + + + + diff --git a/src/dynamics/se/dycore/reduction_mod.F90 b/src/dynamics/se/dycore/reduction_mod.F90 new file mode 100644 index 00000000..3f8afbc3 --- /dev/null +++ b/src/dynamics/se/dycore/reduction_mod.F90 @@ -0,0 +1,447 @@ +module reduction_mod + use shr_kind_mod, only: r8=>shr_kind_r8 + use spmd_utils, only: mpi_sum, mpi_min, mpi_max, mpi_real8, mpi_integer + use spmd_utils, only: mpi_success + use cam_abortutils, only: endrun + + implicit none + private + + type, public :: ReductionBuffer_int_1d_t + integer, dimension(:), pointer :: buf + integer :: len=0 + integer :: ctr + end type ReductionBuffer_int_1d_t + + type, public :: ReductionBuffer_r_1d_t + real (kind=r8), dimension(:), pointer :: buf + integer :: len=0 + integer :: ctr + end type ReductionBuffer_r_1d_t + + type, public :: ReductionBuffer_ordered_1d_t + real (kind=r8), dimension(:,:),pointer :: buf + integer :: len=0 + integer :: ctr + end type ReductionBuffer_ordered_1d_t + + public :: ParallelMin + public :: ParallelMax + + type (ReductionBuffer_int_1d_t), public :: red_max_int + type (ReductionBuffer_int_1d_t), public :: red_sum_int + type (ReductionBuffer_r_1d_t), public :: red_flops + type (ReductionBuffer_r_1d_t), public :: red_max + type (ReductionBuffer_r_1d_t), public :: red_min + type (ReductionBuffer_r_1d_t), public :: red_sum +#ifndef Darwin + SAVE red_max_int, red_sum_int, red_flops, red_max, red_min, red_sum +#endif + + interface ParallelMin + module procedure ParallelMin1d + module procedure ParallelMin0d + end interface + interface ParallelMax + module procedure ParallelMax1d_int + module procedure ParallelMax2d_int + module procedure ParallelMax1d + module procedure ParallelMax0d + module procedure ParallelMax0d_int + end interface + + interface pmax_mt + module procedure pmax_mt_int_1d + module procedure pmax_mt_r_1d + end interface + + interface pmin_mt + module procedure pmin_mt_r_1d + end interface + + interface InitReductionBuffer + module procedure InitReductionBuffer_int_1d + module procedure InitReductionBuffer_r_1d + module procedure InitReductionBuffer_ordered_1d + end interface + + public :: InitReductionBuffer + public :: pmax_mt, pmin_mt + public :: ElementSum_1d + +contains + + function ParallelMin1d(data,hybrid) result(pmin) + use hybrid_mod, only : hybrid_t + + real(kind=r8), intent(in) :: data(:) + type (hybrid_t), intent(in) :: hybrid + real(kind=r8) :: pmin + + real(kind=r8) :: tmp(1) + + + tmp(1) = MINVAL(data) + call pmin_mt(red_min,tmp,1,hybrid) + pmin = red_min%buf(1) + + end function ParallelMin1d + + function ParallelMin0d(data,hybrid) result(pmin) + use hybrid_mod, only : hybrid_t + implicit none + real(kind=r8), intent(in) :: data + type (hybrid_t), intent(in) :: hybrid + real(kind=r8) :: pmin + real(kind=r8) :: tmp(1) + tmp(1) = data + call pmin_mt(red_min,tmp,1,hybrid) + pmin = red_min%buf(1) + + end function ParallelMin0d + !================================================== + function ParallelMax2d_int(data, n, m, hybrid) result(pmax) + use hybrid_mod, only : hybrid_t + implicit none + integer, intent(in) :: n,m + integer, intent(in), dimension(n,m) :: data + type (hybrid_t), intent(in) :: hybrid + integer, dimension(n,m) :: pmax + integer, dimension(n*m) :: tmp + integer :: i,j + do i=1,n + do j=1,m + tmp(i+(j-1)*n) = data(i,j) + enddo + enddo + call pmax_mt(red_max_int,tmp,n*m,hybrid) + do i=1,n + do j=1,m + pmax(i,j) = red_max_int%buf(i+(j-1)*n) + enddo + enddo + end function ParallelMax2d_int + + function ParallelMax1d_int(data, len, hybrid) result(pmax) + use hybrid_mod, only : hybrid_t + implicit none + integer, intent(in) :: len + integer, intent(in), dimension(len) :: data + type (hybrid_t), intent(in) :: hybrid + integer, dimension(len) :: pmax, tmp + + tmp = data(:) + call pmax_mt(red_max_int,tmp,len,hybrid) + pmax(:) = red_max_int%buf(1:len) + + end function ParallelMax1d_int + function ParallelMax1d(data,hybrid) result(pmax) + use hybrid_mod, only : hybrid_t + implicit none + real(kind=r8), intent(in) :: data(:) + type (hybrid_t), intent(in) :: hybrid + real(kind=r8) :: pmax + + real(kind=r8) :: tmp(1) + + + tmp(1) = MAXVAL(data) + call pmax_mt(red_max,tmp,1,hybrid) + pmax = red_max%buf(1) + + end function ParallelMax1d + function ParallelMax0d(data,hybrid) result(pmax) + use hybrid_mod, only : hybrid_t + implicit none + real(kind=r8), intent(in) :: data + type (hybrid_t), intent(in) :: hybrid + real(kind=r8) :: pmax + real(kind=r8) :: tmp(1) + + tmp(1)=data + + call pmax_mt(red_max,tmp,1,hybrid) + pmax = red_max%buf(1) + + end function ParallelMax0d + function ParallelMax0d_int(data,hybrid) result(pmax) + use hybrid_mod, only : hybrid_t + implicit none + integer , intent(in) :: data + type (hybrid_t), intent(in) :: hybrid + integer :: pmax + integer :: tmp(1) + + tmp(1)=data + + call pmax_mt(red_max_int,tmp,1,hybrid) + pmax = red_max_int%buf(1) + + end function ParallelMax0d_int + !================================================== + subroutine InitReductionBuffer_int_1d(red,len) + use thread_mod, only: omp_get_num_threads + integer, intent(in) :: len + type (ReductionBuffer_int_1d_t),intent(out) :: red + + if (omp_get_num_threads()>1) then + call endrun("Error: attempt to allocate reduction buffer in threaded region") + endif + + ! if buffer is already allocated and large enough, do nothing + if (len > red%len) then + !buffer is too small, or has not yet been allocated + if (red%len>0) deallocate(red%buf) + red%len = len + allocate(red%buf(len)) + red%buf = 0 + red%ctr = 0 + endif + + end subroutine InitReductionBuffer_int_1d + !**************************************************************** + subroutine InitReductionBuffer_r_1d(red,len) + use thread_mod, only: omp_get_num_threads + integer, intent(in) :: len + type (ReductionBuffer_r_1d_t),intent(out) :: red + + if (omp_get_num_threads()>1) then + call endrun("Error: attempt to allocate reduction buffer in threaded region") + endif + + if (len > red%len) then + if (red%len>0) deallocate(red%buf) + red%len = len + allocate(red%buf(len)) + red%buf = 0.0_R8 + red%ctr = 0 + endif + end subroutine InitReductionBuffer_r_1d + !**************************************************************** + subroutine InitReductionBuffer_ordered_1d(red,len,nthread) + use thread_mod, only: omp_get_num_threads + integer, intent(in) :: len + integer, intent(in) :: nthread + type (ReductionBuffer_ordered_1d_t),intent(out) :: red + + if (omp_get_num_threads()>1) then + call endrun("Error: attempt to allocate reduction buffer in threaded region") + endif + + if (len > red%len) then + if (red%len>0) deallocate(red%buf) + red%len = len + allocate(red%buf(len,nthread+1)) + red%buf = 0.0_R8 + red%ctr = 0 + endif + end subroutine InitReductionBuffer_ordered_1d + + ! ======================================= + ! pmax_mt: + ! + ! thread safe, parallel reduce maximum + ! of a one dimensional reduction vector + ! ======================================= + + subroutine pmax_mt_int_1d(red,redp,len,hybrid) + use hybrid_mod, only : hybrid_t + + type (ReductionBuffer_int_1d_t) :: red ! shared memory reduction buffer struct + integer, intent(in) :: len ! buffer length + integer, intent(inout) :: redp(len) ! thread private vector of partial sum + type (hybrid_t), intent(in) :: hybrid ! parallel handle + + ! Local variables +#ifdef _MPI + integer ierr +#endif + + integer :: k + if (len>red%len) then + call endrun('ERROR: threadsafe reduction buffer too small') + end if + + !$OMP BARRIER + !$OMP CRITICAL (CRITMAX) + if (red%ctr == 0) red%buf(1:len)= -9999 + if (red%ctr < hybrid%NThreads) then + do k=1,len + red%buf(k)=MAX(red%buf(k),redp(k)) + enddo + red%ctr=red%ctr+1 + end if + if (red%ctr == hybrid%NThreads) red%ctr=0 + !$OMP END CRITICAL (CRITMAX) +#ifdef _MPI + !$OMP BARRIER + if (hybrid%ithr==0) then + + call MPI_Allreduce(red%buf(1),redp,len,Mpi_integer, & + MPI_MAX,hybrid%par%comm,ierr) + + red%buf(1:len)=redp(1:len) + end if +#endif + !$OMP BARRIER + + end subroutine pmax_mt_int_1d + + subroutine pmax_mt_r_1d(red,redp,len,hybrid) + use hybrid_mod, only : hybrid_t + + type (ReductionBuffer_r_1d_t) :: red ! shared memory reduction buffer struct + real (kind=r8), intent(inout) :: redp(:) ! thread private vector of partial sum + integer, intent(in) :: len ! buffer length + type (hybrid_t), intent(in) :: hybrid ! parallel handle + + ! Local variables +#ifdef _MPI + integer ierr +#endif + + integer :: k + if (len>red%len) then + call endrun('ERROR: threadsafe reduction buffer too small') + end if + + !$OMP BARRIER + !$OMP CRITICAL (CRITMAX) + if (red%ctr == 0) red%buf(1:len)= -9.11e30_r8 + if (red%ctr < hybrid%NThreads) then + do k=1,len + red%buf(k)=MAX(red%buf(k),redp(k)) + enddo + red%ctr=red%ctr+1 + end if + if (red%ctr == hybrid%NThreads) red%ctr=0 + !$OMP END CRITICAL (CRITMAX) +#ifdef _MPI + !$OMP BARRIER + if (hybrid%ithr==0) then + + call MPI_Allreduce(red%buf(1),redp,len,Mpi_real8, & + MPI_MAX,hybrid%par%comm,ierr) + + red%buf(1:len)=redp(1:len) + end if +#endif + !$OMP BARRIER + + end subroutine pmax_mt_r_1d + + ! ======================================= + ! pmin_mt: + ! + ! thread safe, parallel reduce maximum + ! of a one dimensional reduction vector + ! ======================================= + + subroutine pmin_mt_r_1d(red,redp,len,hybrid) + use hybrid_mod, only : hybrid_t + + type (ReductionBuffer_r_1d_t) :: red ! shared memory reduction buffer struct + real (kind=r8), intent(inout) :: redp(:) ! thread private vector of partial sum + integer, intent(in) :: len ! buffer length + type (hybrid_t), intent(in) :: hybrid ! parallel handle + + ! Local variables + +#ifdef _MPI + integer :: ierr +#endif + integer :: k + + if (len>red%len) then + call endrun('ERROR: threadsafe reduction buffer too small') + end if + + !$OMP BARRIER + !$OMP CRITICAL (CRITMAX) + if (red%ctr == 0) red%buf(1:len)= 9.11e30_r8 + if (red%ctr < hybrid%NThreads) then + do k=1,len + red%buf(k)=MIN(red%buf(k),redp(k)) + enddo + red%ctr=red%ctr+1 + end if + if (red%ctr == hybrid%NThreads) red%ctr=0 + !$OMP END CRITICAL (CRITMAX) +#ifdef _MPI + !$OMP BARRIER + if (hybrid%ithr==0) then + + call MPI_Allreduce(red%buf(1),redp,len,Mpi_real8, & + MPI_MIN,hybrid%par%comm,ierr) + + red%buf(1:len)=redp(1:len) + end if +#endif + !$OMP BARRIER + + end subroutine pmin_mt_r_1d + + subroutine ElementSum_1d(res,variable,type,hybrid) + use hybrid_mod, only: hybrid_t + use dimensions_mod, only: nelem + use parallel_mod, only: ORDERED + + ! ========================== + ! Arguments + ! ========================== + real(kind=r8), intent(out) :: res + real(kind=r8), intent(in) :: variable(:) + integer, intent(in) :: type + type (hybrid_t), intent(in) :: hybrid + + ! ========================== + ! Local Variables + ! ========================== + + ! + ! Note this is a real kludge here since it may be used for + ! arrays of size other then nelem + ! + +#ifdef _MPI + integer :: errorcode,errorlen + character(len=80) :: errorstring + + real(kind=r8) :: local_sum + integer :: ierr +#else + integer :: i +#endif + +#ifdef _MPI + if(hybrid%ithr == 0) then + local_sum=SUM(variable) + call MPI_Barrier(hybrid%par%comm,ierr) + + call MPI_Allreduce(local_sum,res,1,Mpi_real8, & + MPI_SUM,hybrid%par%comm,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + print *,'ElementSum_1d: Error after call to MPI_Allreduce: ',errorstring + endif + endif +#else + if(hybrid%ithr == 0) then + if(type == ORDERED) then + ! =========================== + ! Perform the ordererd sum + ! =========================== + res = 0.0_r8 + do i=1,nelem + res = res + variable(i) + enddo + else + res=SUM(variable) + endif + endif +#endif + + end subroutine ElementSum_1d + +end module reduction_mod diff --git a/src/dynamics/se/dycore/schedtype_mod.F90 b/src/dynamics/se/dycore/schedtype_mod.F90 new file mode 100644 index 00000000..a4efb146 --- /dev/null +++ b/src/dynamics/se/dycore/schedtype_mod.F90 @@ -0,0 +1,59 @@ +module schedtype_mod + + use metagraph_mod, only : MetaEdge_t + + implicit none + private + type, public :: Cycle_t + integer :: tag + integer :: dest + integer :: source + integer :: lengthP + integer :: lengthP_ghost + integer :: lengthS + integer :: type + integer :: ptrP + integer :: ptrP_ghost + integer :: ptrS + logical :: onNode + type (MetaEdge_t),pointer :: edge + end type Cycle_t + + type, public :: pgindex_t + integer :: elemid + integer :: edgeid + integer :: mesgid + integer :: lenP,lenS + integer :: edgeType + end type pgindex_t + + type, public :: Schedule_t + integer :: ncycles + integer :: nelemd + integer :: placeholder ! total integer count should be even + integer :: nSendCycles + integer :: nRecvCycles + integer :: nInter ! number of off-node or inter node communication cycles + integer :: nIntra ! number of on-node or intra node communication cycles + integer :: padding + integer,pointer :: Local2Global(:) + integer,pointer :: destFull(:) + integer,pointer :: srcFull(:) + type (Cycle_t), pointer :: Cycle(:) + type (Cycle_t), pointer :: SendCycle(:) + type (Cycle_t), pointer :: RecvCycle(:) + type (Cycle_t), pointer :: MoveCycle(:) + type (pgindex_t), pointer :: pIndx(:) + type (pgindex_t), pointer :: gIndx(:) + integer :: pPtr,gPtr + end type Schedule_t + + type (Schedule_t), public, allocatable, target :: Schedule(:) + type (Schedule_t), public, allocatable, target :: gSchedule(:) + type (Schedule_t), public, allocatable, target :: sSchedule(:) + + integer,public,parameter :: HME_Cardinal = 101 + integer,public,parameter :: HME_Ordinal = 102 + + +end module schedtype_mod diff --git a/src/dynamics/se/dycore/schedule_mod.F90 b/src/dynamics/se/dycore/schedule_mod.F90 new file mode 100644 index 00000000..cabdcbb7 --- /dev/null +++ b/src/dynamics/se/dycore/schedule_mod.F90 @@ -0,0 +1,714 @@ +module schedule_mod + use metagraph_mod, only: MetaEdge_t + use schedtype_mod, only: Cycle_t, Schedule_t, schedule, pgindex_t, HME_Ordinal,HME_Cardinal + use parallel_mod, only: parallel_t + use cam_logfile, only: iulog + + implicit none + private + + type, public :: GraphStats_t + integer :: offnode + integer :: onnode + integer :: LB + integer :: padding + end type GraphStats_t + + integer,public,parameter :: HME_CYCLE_SEND=1 + integer,public,parameter :: HME_CYCLE_RECV=2 + integer,public,parameter :: HME_CYCLE_MOVE=3 + integer,public,parameter :: HME_CYCLE_ANY =4 + + + integer,public,parameter :: BNDRY_EXCHANGE_MESSAGE=10 + integer,private,allocatable,target :: Global2Local(:) + + integer :: MinNelemd,MaxNelemd + + public :: genEdgeSched ! Setup the communication schedule for the edge based boundary exchange + public :: PrintSchedule, PrintCycle + public :: PrintIndex + public :: CheckSchedule + public :: FindBufferSlot + +contains + + subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) + use element_mod, only: element_t + use metagraph_mod, only: metavertex_t + use dimensions_mod, only: nelem, max_neigh_edges + use gridgraph_mod, only: gridvertex_t, gridedge_t, assignment ( = ) + use cam_abortutils, only: endrun + use spmd_utils, only: mpi_status_size, mpi_info_null, mpi_success + use parallel_mod, only: nComPoints, rrequest, srequest, status, npackpoints + + type(parallel_t), intent(inout) :: par + type(element_t), intent(inout) :: elem(:) + integer, intent(in) :: PartNumber + type (schedule_t), intent(inout) :: LSchedule + type (MetaVertex_t), intent(inout) :: MetaVertex + + integer :: lengthP,lengthS,total_length,lengthp_ghost + integer :: i,j,is,ir,ncycle + integer :: il,ie,ig + integer :: nelemd0 + integer :: jmd + integer :: inbr + integer :: nSched + integer,allocatable :: tmpP(:,:) + integer,allocatable :: tmpS(:,:) + integer,allocatable :: tmpP_ghost(:,:) + integer :: nSend,nRecv,nedges + integer :: icycle + integer :: iSched + logical, parameter :: VerbosePrint=.FALSE. + logical, parameter :: Debug=.FALSE. + character(len=*), parameter :: subname = 'genEdgeSched' + integer :: errorcode,errorlen + character*(80) :: errorstring + integer, allocatable :: intracommranks(:) + integer :: numIntra, numInter, rank + logical :: OnNode + + + integer :: ierr + integer :: l1,l2,l1id,l2id + integer :: src,dest,wgt + integer :: icIntra, icInter + + integer, allocatable :: srcFull(:), destFull(:), srcweightFull(:), destweightFull(:) + integer, allocatable :: srcInter(:),destInter(:), srcweightInter(:),destweightInter(:) + integer, allocatable :: srcIntra(:),destIntra(:), srcweightIntra(:),destweightIntra(:) + + logical :: reorder + integer :: sizeGroup, groupFull + + nSched=SIZE(schedule) + ! ================================================ + ! allocate some arrays for the call to MPI_gatherv + ! ================================================ + + MinNelemd = nelem + MaxNelemd = 0 + ! ===================================================== + ! It looks like this is only used in this routine... + ! so no need to put it in the schedule data-structure + ! ===================================================== + allocate(Global2Local(nelem)) + if(Debug) write(iulog,*)'genEdgeSched: point #1' + iSched = PartNumber + + nelemd0 = MetaVertex%nmembers + MaxNelemd = AMAX0(MaxNelemd,nelemd0) + MinNelemd = AMIN0(MinNelemd,nelemd0) + if(Debug) write(iulog,*)'genEdgeSched: point #2' + + if(Debug) write(iulog,*)'genEdgeSched: point #3' + LSchedule%ncycles = MetaVertex%nedges + LSchedule%nelemd = nelemd0 + if(Debug) write(iulog,*)'genEdgeSched: point #4' + + ! Note the minus one is for the internal node + nedges = MetaVertex%nedges + if(2*(nedges/2) .eq. nedges) then + nedges = nedges/2 + else + nedges = (nedges-1)/2 + endif + LSchedule%nSendCycles = nedges + LSchedule%nRecvCycles = nedges + if(Debug) write(iulog,*)'genEdgeSched: point #5' + + ! Temporary array to calculate the Buffer Slot + allocate(tmpP(2,nedges+1)) + allocate(tmpS(2,nedges+1)) + allocate(tmpP_ghost(2,nedges+1)) + + + ! Allocate all the cycle structures + allocate(LSchedule%SendCycle(nedges)) + allocate(LSchedule%RecvCycle(nedges)) + allocate(LSchedule%MoveCycle(1)) + + ! Initialize the schedules... + LSchedule%MoveCycle(1)%ptrP = 0 + LSchedule%MoveCycle(1)%ptrS = 0 + LSchedule%MoveCycle(1)%lengthP = 0 + if(Debug) write(iulog,*)'genEdgeSched: point #6' + + !================================================================== + ! Allocate and initalized the index translation arrays + Global2Local = -1 + allocate(LSchedule%Local2Global(nelemd0)) + allocate(LSchedule%pIndx(max_neigh_edges*nelemd0)) + allocate(LSchedule%gIndx(max_neigh_edges*nelemd0)) + + LSchedule%pIndx(:)%elemId = -1 + LSchedule%pIndx(:)%edgeId = -1 + LSchedule%pIndx(:)%lenP = -1 + LSchedule%pIndx(:)%lenS = -1 + LSchedule%pIndx(:)%mesgid = -1 + LSchedule%pIndx(:)%edgeType = -1 + + LSchedule%gIndx(:)%elemId = -1 + LSchedule%gIndx(:)%edgeId = -1 + LSchedule%gIndx(:)%lenP = -1 + LSchedule%gIndx(:)%lenS = -1 + LSchedule%gIndx(:)%mesgid = -1 + LSchedule%gIndx(:)%edgeType = -1 + + LSchedule%pPtr=1 + LSchedule%gPtr=1 + + if(Debug) write(iulog,*)'genEdgeSched: point #7' + + do il=1,nelemd0 + ig = MetaVertex%members(il)%number + Global2Local(ig)=il + LSchedule%Local2Global(il)=ig + elem(il)%desc%putmapP=-1 + elem(il)%desc%getmapP=-1 + elem(il)%desc%putmapS=-1 + elem(il)%desc%getmapS=-1 + elem(il)%desc%putmapP_ghost=-1 + elem(il)%desc%getmapP_ghost=-1 + elem(il)%desc%reverse = .FALSE. + enddo + !================================================================== + if(Debug) write(iulog,*)'genEdgeSched: point #8' + + + + total_length = 0 + ncycle = LSchedule%ncycles + ! + ! Send Cycle + ! + is=1 + tmpP(1,:) = -1 + tmpP(2,:) = 0 + tmpS(1,:) = -1 + tmpS(2,:) = 0 + tmpP_ghost(1,:) = -1 + tmpP_ghost(2,:) = 0 + + do j=1,ncycle + lengthP = MetaVertex%edges(j)%wgtP + lengthS = MetaVertex%edges(j)%wgtS + lengthP_ghost = MetaVertex%edges(j)%wgtP_ghost + + if ((MetaVertex%edges(j)%TailVertex == PartNumber) .AND. & + (MetaVertex%edges(j)%HeadVertex .ne. PartNumber) ) then + inbr = MetaVertex%edges(j)%HeadVertex + if(Debug) write(iulog,*)'genEdgeSched: point #11', par%rank + LSchedule%SendCycle(is)%ptrP = FindBufferSlot(inbr,lengthP,tmpP) + LSchedule%SendCycle(is)%ptrS = FindBufferSlot(inbr,lengthS,tmpS) + LSchedule%SendCycle(is)%ptrP_ghost= FindBufferSlot(inbr,lengthP_ghost,tmpP_ghost) + call SetCycle(par, elem, LSchedule,LSchedule%SendCycle(is),MetaVertex%edges(j), HME_CYCLE_SEND) + if(Debug) write(iulog,*)'genEdgeSched: point #12',par%rank + is = is+1 + endif + enddo + + ! + ! Recv Cycle: Note that by reinitializing the tmpP array we change the structure of the receive buffer + ! + ir=1 + tmpP(1,:) = -1 + tmpP(2,:) = 0 + tmpS(1,:) = -1 + tmpS(2,:) = 0 + tmpP_ghost(1,:) = -1 + tmpP_ghost(2,:) = 0 + + do j=1,ncycle + lengthP = MetaVertex%edges(j)%wgtP + lengthS = MetaVertex%edges(j)%wgtS + lengthP_ghost = MetaVertex%edges(j)%wgtP_ghost + + if ( (MetaVertex%edges(j)%HeadVertex == PartNumber) .AND. & + (MetaVertex%edges(j)%TailVertex .ne. PartNumber) ) then + inbr = MetaVertex%edges(j)%TailVertex + if(Debug) write(iulog,*)'genEdgeSched: point #13',par%rank + LSchedule%RecvCycle(ir)%ptrP = FindBufferSlot(inbr,lengthP,tmpP) + LSchedule%RecvCycle(ir)%ptrS = FindBufferSlot(inbr,lengthS,tmpS) + LSchedule%RecvCycle(ir)%ptrP_ghost= FindBufferSlot(inbr,lengthP_ghost,tmpP_ghost) + call SetCycle(par, elem, LSchedule,LSchedule%RecvCycle(ir),MetaVertex%edges(j),HME_CYCLE_RECV) + if(Debug) write(iulog,*)'genEdgeSched: point #14',par%rank + ir = ir+1 + endif + enddo + + ! Put the move cycle at the end of the buffer. + do j=1,ncycle + lengthP = MetaVertex%edges(j)%wgtP + lengthS = MetaVertex%edges(j)%wgtS + lengthP_ghost = MetaVertex%edges(j)%wgtP_ghost + + if((MetaVertex%edges(j)%HeadVertex == PartNumber) .AND. & + (MetaVertex%edges(j)%TailVertex == PartNumber)) then + inbr = PartNumber + if(Debug) write(iulog,*)'genEdgeSched: point #9', par%rank + LSchedule%MoveCycle%ptrP = FindBufferSlot(inbr,lengthP,tmpP) + LSchedule%MoveCycle%ptrS = FindBufferSlot(inbr,lengthS,tmpS) + LSchedule%MoveCycle%ptrP_ghost = FindBufferSlot(inbr,lengthP_ghost,tmpP_ghost) + call SetCycle(par, elem, LSchedule,LSchedule%MoveCycle(1),MetaVertex%edges(j),HME_CYCLE_MOVE) + if(Debug) write(iulog,*)'genEdgeSched: point #10',par%rank + endif + enddo + + deallocate(tmpP) + deallocate(tmpS) + deallocate(tmpP_ghost) + + do ie=1,nelemd0 + ! compute number of neighbers for each element + elem(ie)%desc%actual_neigh_edges=0 + do i=1,max_neigh_edges + if (elem(ie)%desc%globalID(i)>0) then + elem(ie)%desc%actual_neigh_edges=elem(ie)%desc%actual_neigh_edges+1 + endif + enddo + + ! normally, we loop over max_neigh_edges, checking if there is an edge + ! let's create a mapping so that we can loop over actual_neigh_edges + ! sort in REVERSE global id order (so the ones with globalID=0 are last) + do l1 = 1,max_neigh_edges-1 + do l2=l1+1,max_neigh_edges + l1id=elem(ie)%desc%loc2buf(l1) + l2id=elem(ie)%desc%loc2buf(l2) + if (elem(ie)%desc%globalID(l2id) > elem(ie)%desc%globalID(l1id)) then + ! swap index: + l1id=elem(ie)%desc%loc2buf(l2) + elem(ie)%desc%loc2buf(l2)=elem(ie)%desc%loc2buf(l1) + elem(ie)%desc%loc2buf(l1)=l1id + endif + enddo + enddo + + + + + elem(ie)%vertex = MetaVertex%members(ie) + ig = MetaVertex%members(ie)%number + elem(ie)%GlobalId = ig + elem(ie)%LocalId = ie + enddo + + deallocate(Global2Local) + +#ifdef SPMD + !================================================================ + ! Allocate a couple of structures for bndry_exchange + ! done here to remove it from the critical path + !================================================================ + nComPoints = 0 + + nSend = nedges + nRecv = nedges + allocate(Rrequest(nRecv)) + allocate(Srequest(nSend)) + allocate(status(MPI_STATUS_SIZE,nRecv)) + + !=============================================================== + ! Number of communication points ... to be used later to + ! setup the size of the communication buffer for MPI_Ibsend + !=============================================================== + do icycle = 1, nSend + nComPoints = nComPoints + LSchedule%SendCycle(icycle)%lengthP + end do + nPackPoints = nComPoints + LSchedule%MoveCycle(1)%lengthP +#if MPI_VERSION >= 3 + ! Create a communicator that only contains the on-node MPI ranks + call MPI_Comm_split_type(par%comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, par%intracomm, ierr) + + call MPI_Comm_size(par%intracomm, par%intracommsize, ierr) + call MPI_Comm_rank(par%intracomm, par%intracommrank, ierr) + + allocate(intracommranks(par%intracommsize)) + call MPI_Allgather(par%rank,1,MPIinteger_t,intracommranks,1,MPIinteger_t,par%intracomm,ierr) + + numIntra=0 + do icycle=1,nSend + rank = LSchedule%SendCycle(icycle)%dest - 1 + onNode = isIntraComm(intracommranks,rank) + LSchedule%SendCycle(icycle)%onNode = onNode + if(onNode) then + numIntra=numIntra+1 + endif + enddo + do icycle=1,nRecv + rank = LSchedule%RecvCycle(icycle)%source - 1 + onNode = isIntraComm(intracommranks,rank) + LSchedule%RecvCycle(icycle)%onNode = onNode + enddo + numInter = nsend-numIntra + + + deallocate(intracommranks) +#else + numIntra = 0 + numInter = nSend + ! Mark all communications as off-node by default + do icycle=1,nSend + LSchedule%SendCycle(icycle)%onNode = .False. + enddo + do icycle=1,nRecv + LSchedule%RecvCycle(icycle)%onNode = .False. + enddo +#endif + LSchedule%nInter = numInter + LSchedule%nIntra = numIntra + + allocate(srcFull(nRecv), srcWeightFull(nRecv),destFull(nSend),destWeightFull(nSend)) + if(numInter>0) then + allocate(srcInter(numInter),srcWeightInter(numInter),destInter(numInter), destWeightInter(numInter)) + endif + if(numIntra>0) then + allocate(srcIntra(numIntra),srcWeightIntra(numIntra),destIntra(numIntra), destWeightIntra(numIntra)) + endif + + icIntra=0 + icInter=0 + do icycle=1,nSend + dest = LSchedule%SendCycle(icycle)%dest - 1 + wgt = LSchedule%SendCycle(icycle)%lengthP + destFull(icycle) = dest + destWeightFull(icycle) = wgt + if(LSchedule%SendCycle(icycle)%onNode) then + icIntra=icIntra+1 + destIntra(icIntra) = dest + destWeightIntra(icIntra) = wgt + else + icInter=icInter+1 + destInter(icInter) = dest + destWeightInter(icInter) = wgt + endif + enddo + + icIntra=0 + icInter=0 + do icycle=1,nRecv + src = LSchedule%RecvCycle(icycle)%source - 1 + wgt = LSchedule%RecvCycle(icycle)%lengthP + srcFull(icycle) = src + srcWeightFUll(icycle) = wgt + if(LSchedule%RecvCycle(icycle)%onNode) then + icIntra=icIntra+1 + srcIntra(icIntra) = src + srcWeightIntra(icIntra) = wgt + else + icInter=icInter+1 + srcInter(icInter) = src + srcWeightInter(icInter) = wgt + endif + enddo + + ! construct the FULL communication graph + reorder=.FALSE. + call MPI_Dist_graph_create_adjacent(par%comm, nRecv,srcFull,srcWeightFull, & + nSend,destFull,destWeightFull,MPI_INFO_NULL,reorder,par%commGraphFull,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + print *,subname,': Error after call to MPI_dist_graph_create_adjacent(FULL) ',errorstring + endif + allocate(LSchedule%destFull(nSend),LSchedule%srcFull(nRecv)) + LSchedule%destFull(:) = destFull(:) + LSchedule%srcFull(:) = srcFull(:) + ! construct the FULL communication -group- (for one-sided operations): + call MPI_Comm_group(par%comm, groupFull, ierr) + call MPI_group_incl(groupFull,nRecv,srcFull,par%groupGraphFull,ierr) + if (ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode, errorstring, errorlen, ierr) + print *,subname, ': Error after call to MPI_Comm_group (groupGraphFull) ', errorstring + endif + call MPi_Group_size(par%groupGraphFull,sizeGroup,ierr) + if(Debug) write (*,199) par%rank,sizeGroup,nSend,nRecv + +199 format ('RANK: ',i4,' genEdgeSched: size of groupGraphFUll is: ',i8,' nSend, nRecv: ',2(i4)) + deallocate(srcFull,srcWeightFull,destFull,destWeightFull) + + ! construct the INTER communication graph + reorder=.FALSE. + if(numInter>0) then + call MPI_Dist_graph_create_adjacent(par%comm, numInter,srcInter,srcWeightInter, & + numInter,destInter,destWeightInter,MPI_INFO_NULL,reorder,par%commGraphInter,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + print *,subname,': Error after call to MPI_dist_graph_create_adjacent(INTER) ',errorstring + endif + deallocate(srcInter,srcWeightInter,destInter,destWeightInter) + endif + + ! construct the INTRA communication graph + reorder=.FALSE. + if(numIntra>0) then + call MPI_Dist_graph_create_adjacent(par%comm, numIntra,srcIntra,srcWeightIntra, & + numIntra,destIntra,destWeightIntra,MPI_INFO_NULL,reorder,par%commGraphIntra,ierr) + if(ierr .ne. MPI_SUCCESS) then + errorcode=ierr + call MPI_Error_String(errorcode,errorstring,errorlen,ierr) + print *,subname,': Error after call to MPI_dist_graph_create_adjacent(INTRA) ',errorstring + endif + deallocate(srcIntra,srcWeightIntra,destIntra,destWeightIntra) + endif + + 200 format ('IAM: ',i4,': ', i2,' of',i2,' comms are interNode') + 201 format ('IAM: ',i4,': ', i2,' of',i2,' comms are intraNode') +#endif + + + end subroutine genEdgeSched + + logical function isIntraComm(commranks,rank) + + + integer, intent(in) :: commranks(:) + integer, intent(in) :: rank + + integer :: i,nranks + + nranks = SIZE(commranks) + isIntraComm = .FALSE. + do i=1,nranks + if(commranks(i) .eq. rank) then + isIntraComm=.TRUE. + endif + enddo + + end function isIntraComm + + subroutine CheckSchedule() + + integer :: i, nSched, nbufferwords_1, nbufferwords_2 + type (Schedule_t), pointer :: pSchedule + + nSched = SIZE(Schedule) + + do i = 1, nSched + pSchedule => Schedule(i) + nbufferwords_1 = SUM(pSchedule%SendCycle(:)%lengthP) + nbufferwords_2 = SUM(pSchedule%RecvCycle(:)%lengthP) + if(nbufferwords_1 .ne. nbufferwords_2) then + write (iulog,100) i,nbufferwords_1, nbufferwords_2 + end if + end do +100 format('CheckSchedule: ERR IAM:',I3,' SIZEOF(SendBuffer):',I10,' != SIZEOF(RecvBuffer) :',I10) + + end subroutine CheckSchedule + + subroutine PrintSchedule(Schedule) + ! Debug subroutine for the schedule_t data-structure + use gridgraph_mod, only : printgridedge + + type (Schedule_t),intent(in),target :: Schedule(:) + type (Schedule_t), pointer :: pSchedule + type (Cycle_t),pointer :: pCycle + + integer :: i,j,nSched + + nSched = SIZE(Schedule) + + write(6,*) '------NEW SCHEDULE FORMAT---------------------' + do i=1,nSched + pSchedule => Schedule(i) + write(6,*) + write(6,*) '----------------------------------------------' + write(6,90) i,pSchedule%ncycles + write(6,*) '----------------------------------------------' + write(6,*) '-----------SEND-------------------------------' + do j=1,pSchedule%nSendCycles + pCycle => pSchedule%SendCycle(j) + call PrintCycle(pCycle) + call PrintGridEdge(pCycle%edge%members) + enddo + write(6,*) '-----------RECV-------------------------------' + do j=1,pSchedule%nRecvCycles + pCycle => pSchedule%RecvCycle(j) + call PrintCycle(pCycle) + call PrintGridEdge(pCycle%edge%members) + enddo + write(6,*) '-----------MOVE-------------------------------' + pCycle => pSchedule%MoveCycle(1) + call PrintCycle(pCycle) + call PrintGridEdge(pCycle%edge%members) + enddo + write(6,*) '-----------Put Index--------------------' + call PrintIndex(Schedule(1)%pIndx) + write(6,*) '-----------Get Index--------------------' + call PrintIndex(Schedule(1)%gIndx) + +90 format('NODE # ',I2,2x,'NCYCLES ',I2) +97 format(10x,'EDGE #',I2,2x,'TYPE ',I1,2x,'G.EDGES',I4,2x,'WORDS ',I5,2x, & + 'SRC ',I3,2x,'DEST ',I3,2x,'PTR ',I4) +100 format(15x,I4,5x,I3,1x,'(',I1,') --',I1,'--> ',I3,1x,'(',I1,')') + + end subroutine PrintSchedule + + subroutine PrintIndex(Indx) + ! Debugging subroutine for the pgindex_t data-structure + + ! type, public :: pgindex_t + ! integer :: elemid + ! integer :: edgeid + ! integer :: mesgid + ! integer :: lenP,lenS + ! end type pgindex_t + + type (pgindex_t) :: Indx(:) + + integer :: i, len + + len = SIZE(Indx) + + write(6,*) ' elemID, edgeID, mesgID, lenP, lenS ' + do i=1,len + write(6,1099) Indx(i)%elemid,Indx(i)%edgeid,Indx(i)%mesgid,Indx(i)%lenP,Indx(i)%lenS + enddo + +1099 format(I4,5X,I4,5X,I4,5X,I2,4X,I2) + + end subroutine PrintIndex + + subroutine PrintCycle(Cycle) + ! debug subroutine for the cycle_t data-structure + type (Cycle_t),intent(in),target :: Cycle + + write(6,97) Cycle%edge%number,Cycle%type,Cycle%edge%nmembers, & + Cycle%lengthP,Cycle%source, Cycle%dest,Cycle%ptrP + +97 format(5x,'METAEDGE #',I2,2x,'TYPE ',I1,2x,'G.EDGES',I4,2x,'WORDS ',I5,2x, & + 'SRC ',I3,2x,'DEST ',I3,2x,'PTR ',I5) + + end subroutine PrintCycle + + subroutine SetCycle(par, elem, schedule,Cycle,Edge,ctype) + use element_mod, only: element_t + use dimensions_mod, only: max_corner_elem, max_neigh_edges + use cam_abortutils, only: endrun + + type(parallel_t), intent(in) :: par + type(element_t), intent(inout) :: elem(:) + type (Schedule_t), intent(inout) :: Schedule + type (Cycle_t), intent(inout) :: Cycle + type (MetaEdge_t), intent(in), target :: Edge + integer, intent(in) :: ctype + integer :: i,il,face, loc, dir + + do i = 1, Edge%nmembers + if((ctype == HME_CYCLE_SEND) .or. & + (ctype == HME_CYCLE_MOVE) .or. & + (ctype == HME_CYCLE_ANY)) then + ! Setup send index + il = Global2Local(Edge%members(i)%tail%number) + face = Edge%members(i)%tail_face + !need to convert the location of corner elements for getmap and putmap + if (face.ge.5) then ! if a corner element + dir = Edge%members(i)%tail_dir + loc = MOD(dir,max_corner_elem) !this is the location within that direction + dir = (dir - loc)/max_corner_elem !this is the direction (1-8) + loc = dir + (dir-5)*(max_corner_elem-1)+loc + else + loc = face + end if + + if(il .gt. 0) then + elem(il)%desc%putmapP(loc) = Edge%edgeptrP(i) + Cycle%ptrP - 1 ! offset, so start at 0 + elem(il)%desc%putmapS(loc) = Edge%edgeptrS(i) + Cycle%ptrS - 1 + elem(il)%desc%putmapP_ghost(loc) = Edge%edgeptrP_ghost(i) + Cycle%ptrP_ghost ! index, start at 1 + elem(il)%desc%reverse(loc) = Edge%members(i)%reverse + schedule%pIndx(schedule%pPtr)%elemid=il + schedule%pIndx(schedule%pPtr)%edgeid=loc + schedule%pIndx(schedule%pPtr)%mesgid=Edge%HeadVertex-1 ! convert this to 0-based + schedule%pIndx(schedule%pPtr)%lenP =Edge%members(i)%wgtP + schedule%pIndx(schedule%pPtr)%lenS =Edge%members(i)%wgtS + if (face.ge.5) then + schedule%pIndx(schedule%pPtr)%edgeType = HME_Ordinal + else + schedule%pIndx(schedule%pPtr)%edgeType = HME_Cardinal + endif + schedule%pPtr=schedule%pPtr+1 + end if + end if + + if((ctype == HME_CYCLE_RECV) .or. & + (ctype == HME_CYCLE_MOVE) .or. & + (ctype == HME_CYCLE_ANY)) then + ! Setup receive index + il = Global2Local(Edge%members(i)%head%number) + face = Edge%members(i)%head_face + !need to convert the location of corner elements for getmap and putmap + if (face.ge.5) then !its a corner + dir = Edge%members(i)%head_dir + loc = MOD(dir,max_corner_elem) !this is the location within that direction + dir = (dir - loc)/max_corner_elem !this is the direction (1-8) + loc = dir + (dir-5)*(max_corner_elem-1)+loc + if(loc > max_neigh_edges) then + write(iulog, *) __FILE__,__LINE__,par%rank,face,i,max_corner_elem,max_neigh_edges,edge%members(i)%head_face + call endrun('max_neigh_edges set too low.') + end if + else + loc = face + end if + + if(il .gt. 0) then + elem(il)%desc%getmapP(loc) = Edge%edgeptrP(i) + Cycle%ptrP - 1 + elem(il)%desc%getmapS(loc) = Edge%edgeptrS(i) + Cycle%ptrS - 1 + elem(il)%desc%getmapP_ghost(loc) = Edge%edgeptrP_ghost(i) + Cycle%ptrP_ghost + elem(il)%desc%globalID(loc) = Edge%members(i)%tail%number + schedule%gIndx(schedule%gPtr)%elemid=il + schedule%gIndx(schedule%gPtr)%edgeid=loc + schedule%gIndx(schedule%gPtr)%mesgid=Edge%TailVertex-1 ! convert this to 0-based + schedule%gIndx(schedule%gPtr)%lenP =Edge%members(i)%wgtP + schedule%gIndx(schedule%gPtr)%lenS =Edge%members(i)%wgtS + if (face.ge.5) then + schedule%gIndx(schedule%gPtr)%edgeType = HME_Ordinal + else + schedule%gIndx(schedule%gPtr)%edgeType = HME_Cardinal + endif + schedule%gPtr=schedule%gPtr+1 + end if + end if + end do + Cycle%edge => Edge + Cycle%type = Edge%type + Cycle%dest = Edge%HeadVertex + Cycle%source = Edge%TailVertex + Cycle%tag = BNDRY_EXCHANGE_MESSAGE + Cycle%lengthP = Edge%wgtP + Cycle%lengthS = Edge%wgtS + Cycle%lengthP_ghost = Edge%wgtP_ghost + + end subroutine SetCycle + + function FindBufferSlot(inbr,length,tmp) result(ptr) + + integer :: ptr + integer, intent(in) :: inbr,length + integer, intent(inout) :: tmp(:,:) + + integer :: i,n + + n = SIZE(tmp,2) + + ptr = 0 + do i=1,n + if( tmp(1,i) == inbr) then + ptr = tmp(2,i) + return + endif + if( tmp(1,i) == -1 ) then + tmp(1,i) = inbr + if(i .eq. 1) tmp(2,i) = 1 + ptr = tmp(2,i) + if(i .ne. n) tmp(2,i+1) = ptr +length + return + endif + enddo + + end function FindBufferSlot + +end module schedule_mod diff --git a/src/dynamics/se/dycore/spacecurve_mod.F90 b/src/dynamics/se/dycore/spacecurve_mod.F90 new file mode 100644 index 00000000..c7631121 --- /dev/null +++ b/src/dynamics/se/dycore/spacecurve_mod.F90 @@ -0,0 +1,1274 @@ +module spacecurve_mod + use cam_logfile, only: iulog + + implicit none + private + + type, public :: factor_t + integer :: numfact + integer, dimension(:),pointer :: factors => NULL() + end type factor_t + + + integer,public, dimension(:,:), allocatable :: ordered + integer,public, dimension(:,:), allocatable :: dir ! direction to move along each level + integer,public, dimension(:) , allocatable :: pos ! position along each of the axes + + integer,public :: maxdim ! dimensionality of entire space + integer,public :: vcnt ! visitation count + logical,private :: verbose=.FALSE. + + type (factor_t), public :: fact + + SAVE:: fact + public :: map + public :: hilbert_old + public :: PeanoM,hilbert, Cinco + public :: GenCurve + public :: GenSpaceCurve + public :: log2,Factor + public :: PrintCurve + public :: IsFactorable,IsLoadBalanced + public :: genspacepart +contains + !--------------------------------------------------------- + recursive function Cinco(l,type,ma,md,ja,jd) result(ierr) + + implicit none + integer,intent(in) :: l,type,ma,md,ja,jd + + integer :: lma,lmd,lja,ljd,ltype + integer :: ll + integer :: ierr + logical :: debug = .FALSE. + + ll = l + if(ll .gt. 1) ltype = fact%factors(ll-1) ! Set the next type of space curve + + !-------------------------------------------------------------- + ! Position [0,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,21) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'Cinco: After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,22) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [1,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,23) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,24) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = ma + ljd = -md + + if(ll .gt. 1) then + if(debug) write(iulog,25) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [1,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,26) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,27) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + !-------------------------------------------------------------- + ! Position [0,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = MOD(ma+1,maxdim) + ljd = md + + if(ll .gt. 1) then + if(debug) write(iulog,28) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,29) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,30) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,31) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(iulog,32) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ma + ljd = md + + if(ll .gt. 1) then + if(debug) write(iulog,33) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,34) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,35) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,36) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(iulog,37) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,3] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,38) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,39) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ma + ljd = md + + if(ll .gt. 1) then + if(debug) write(iulog,40) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(iulog,41) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,42) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,43) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ma + ljd = md + + if(ll .gt. 1) then + if(debug) write(iulog,44) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = ja + ljd = jd + + if(ll .gt. 1) then + if(debug) write(iulog,45) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + +21 format('Call Cinco Pos [0,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +22 format('Call Cinco Pos [1,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +23 format('Call Cinco Pos [2,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +24 format('Call Cinco Pos [2,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +25 format('Call Cinco Pos [2,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +26 format('Call Cinco Pos [1,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +27 format('Call Cinco Pos [1,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +28 format('Call Cinco Pos [0,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +29 format('Call Cinco Pos [0,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +30 format('Call Cinco Pos [0,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) +31 format('Call Cinco Pos [0,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) +32 format('Call Cinco Pos [1,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) +33 format('Call Cinco Pos [1,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) +34 format('Call Cinco Pos [2,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) +35 format('Call Cinco Pos [2,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) +36 format('Call Cinco Pos [3,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) +37 format('Call Cinco Pos [4,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) +38 format('Call Cinco Pos [4,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) +39 format('Call Cinco Pos [3,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) +40 format('Call Cinco Pos [3,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +41 format('Call Cinco Pos [4,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +42 format('Call Cinco Pos [4,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +43 format('Call Cinco Pos [3,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +44 format('Call Cinco Pos [3,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +45 format('Call Cinco Pos [4,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + + end function Cinco + + !--------------------------------------------------------- + recursive function PeanoM(l,type,ma,md,ja,jd) result(ierr) + + implicit none + integer,intent(in) :: l,type,ma,md,ja,jd + + integer :: lma,lmd,lja,ljd,ltype + integer :: ll + integer :: ierr + logical :: debug = .FALSE. + + ll = l + if(ll .gt. 1) ltype = fact%factors(ll-1) ! Set the next type of space curve + !-------------------------------------------------------------- + ! Position [0,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,21) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [0,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(iulog,22) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(iulog,23) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(iulog,24) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [1,2] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [2,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(lma+1,maxdim) + ljd = -lmd + + if(ll .gt. 1) then + if(debug) write(iulog,25) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [2,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,26) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [2,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,27) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [1,1] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [1,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = MOD(lma+1,maxdim) + ljd = -lmd + + if(ll .gt. 1) then + if(debug) write(iulog,28) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [1,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = ja + ljd = jd + + if(ll .gt. 1) then + if(debug) write(iulog,29) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [2,0] ',pos + endif + +21 format('Call PeanoM Pos [0,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +22 format('Call PeanoM Pos [0,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +23 format('Call PeanoM Pos [0,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +24 format('Call PeanoM Pos [1,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +25 format('Call PeanoM Pos [2,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) +26 format('Call PeanoM Pos [2,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +27 format('Call PeanoM Pos [1,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +28 format('Call PeanoM Pos [1,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +29 format('Call PeanoM Pos [2,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + + end function PeanoM + !--------------------------------------------------------- + recursive function hilbert(l,type,ma,md,ja,jd) result(ierr) + + implicit none + integer,intent(in) :: l,type,ma,md,ja,jd + + integer :: lma,lmd,lja,ljd,ltype + integer :: ll + integer :: ierr + logical :: debug = .FALSE. + + ll = l + if(ll .gt. 1) ltype = fact%factors(ll-1) ! Set the next type of space curve + !-------------------------------------------------------------- + ! Position [0,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(iulog,21) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,0] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [0,1] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(iulog,22) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [0,1] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [1,1] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(iulog,23) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [1,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ja + ljd = jd + + if(ll .gt. 1) then + if(debug) write(iulog,24) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) write(iulog,*)'After Position [1,0] ',pos + endif + +21 format('Call Hilbert Pos [0,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) +22 format('Call Hilbert Pos [0,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +23 format('Call Hilbert Pos [1,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) +24 format('Call Hilbert Pos [1,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + + end function hilbert + !--------------------------------------------------------- + function IncrementCurve(ja,jd) result(ierr) + + implicit none + + integer :: ja,jd + integer :: ierr + + ordered(pos(0)+1,pos(1)+1) = vcnt + vcnt = vcnt + 1 + pos(ja) = pos(ja) + jd + + ierr = 0 + end function IncrementCurve + !--------------------------------------------------------- + recursive function hilbert_old(l,d,ma,md,ja,jd) result(ierr) + + integer :: l,d ! log base 2 of levels and dimensions left + integer :: ma,md ! main axis and direction + integer :: ja,jd ! joiner axis and direction + + integer :: ierr + integer :: axis + integer :: ll + + if(verbose) write(iulog,10) l,d,ma,md,ja,jd,pos(0),pos(1) + ll = l ! Copy this to a temporary variable + if(d == 0) then + ll=ll-1 + if(ll == 0) then + return + endif + axis = ja + if(dir(ll,axis) /= jd) then ! do not move away from joiner plane + axis = MOD(axis+1,maxdim) ! next axis + endif + if(verbose) write(iulog,*)'hilbert_old: call hilbert_old(l,d) #1:' + ierr = hilbert_old(ll,maxdim,axis,dir(ll,axis),ja,jd) + dir(ll,ja) = -dir(ll,ja) + return + endif + axis = MOD(ma+1,maxdim) + if(verbose) write(iulog,*)'hilbert_old: before call hilbert_old(l,d) #2:' + ierr = hilbert_old(ll,d-1,axis,dir(ll,axis),ma,md) + if(verbose) write(iulog,*)'hilbert_old: after call hilbert_old(l,d) #2:' + if(verbose) write(iulog,30) l,d,ma,md,ja,jd,pos(0),pos(1) + + + pos(ma) = pos(ma) + md + dir(ll,ma) = - dir(ll,ma) + + !---------------------------------- + ! Mark this node as visited + !---------------------------------- + if(verbose) write(iulog,20) l,d,ma,md,ja,jd,pos(0),pos(1) + vcnt=vcnt+1 + if(verbose) write(iulog,15) pos(0)+1,pos(1)+1,vcnt + if(verbose) write(iulog,*)' ' + if(verbose) write(iulog,*)' ' + ordered(pos(0)+1,pos(1)+1)=vcnt + + if(verbose) write(iulog,*)'hilbert_old: before call hilbert_old(l,d) #3:' + ierr = hilbert_old(ll,d-1,axis,dir(ll,axis),ja,jd) + if(verbose) write(iulog,*)'hilbert_old: after call hilbert_old(l,d) #3:' + +10 format('hilbert_old: Entering hilbert_old (l,d,ma,md,ja,jd) are: ', & + 2(i4),' [',2(i3),'][',2(i3),']',2(i3)) +15 format('hilbert_old: mark element {x,y,ordered}:',3(i4)) +20 format('hilbert_old: Before visit code (l,d,ma,md,ja,jd) are:', & + 2(i4),' [',2(i3),'][',2(i3),']',2(i3)) + +30 format('hilbert_old: after call hilbert_old(l,d) #2: (l,d,ma,md,ja,jd are:', & + 2(i4),' [',2(i3),'][',2(i3),']',2(i3)) + + end function hilbert_old + !--------------------------------------------------------- + function log2( n) + + implicit none + + integer :: n + + integer :: log2,tmp + ! + ! Find the log2 of input value + ! + log2 = 1 + tmp =n + do while (tmp/2 .ne. 1) + tmp=tmp/2 + log2=log2+1 + enddo + + end function log2 + !--------------------------------------------------------- + function IsLoadBalanced(nelem,npart) + + implicit none + + integer :: nelem,npart + + logical :: IsLoadBalanced + + integer :: tmp1 + + tmp1 = nelem/npart + + if(npart*tmp1 == nelem ) then + IsLoadBalanced=.TRUE. + else + IsLoadBalanced=.FALSE. + endif + + end function IsLoadBalanced + !--------------------------------------------------------- + recursive function GenCurve(l,type,ma,md,ja,jd) result(ierr) + + implicit none + integer,intent(in) :: l,type,ma,md,ja,jd + integer :: ierr + + if(type == 2) then + ierr = hilbert(l,type,ma,md,ja,jd) + elseif ( type == 3) then + ierr = PeanoM(l,type,ma,md,ja,jd) + elseif ( type == 5) then + ierr = Cinco(l,type,ma,md,ja,jd) + endif + + end function GenCurve + !--------------------------------------------------------- + function Factor(num) result(res) + + implicit none + integer,intent(in) :: num + + type (factor_t) :: res + integer :: tmp,tmp2,tmp3,tmp5 + integer :: i,n + logical :: found + + ! -------------------------------------- + ! Allocate for max # of factors + ! -------------------------------------- + tmp = num + tmp2 = log2(num) + allocate(res%factors(tmp2)) + + n=0 + !----------------------- + ! Look for factors of 2 + !----------------------- + found=.TRUE. + do while (found) + found = .FALSE. + tmp2 = tmp/2 + if( tmp2*2 == tmp ) then + n = n + 1 + res%factors(n) = 2 + found = .TRUE. + tmp = tmp2 + endif + enddo + + !----------------------- + ! Look for factors of 3 + !----------------------- + found=.TRUE. + do while (found) + found = .FALSE. + tmp3 = tmp/3 + if( tmp3*3 == tmp ) then + n = n + 1 + res%factors(n) = 3 + found = .TRUE. + tmp = tmp3 + endif + enddo + + !----------------------- + ! Look for factors of 5 + !----------------------- + found=.TRUE. + do while (found) + found = .FALSE. + tmp5 = tmp/5 + if( tmp5*5 == tmp ) then + n = n + 1 + res%factors(n) = 5 + found = .TRUE. + tmp = tmp5 + endif + enddo + + tmp=1 + do i=1,n + tmp = tmp * res%factors(i) + enddo + if(tmp == num) then + res%numfact = n + else + res%numfact = -1 + endif + + end function Factor + !--------------------------------------------------------- + + function IsFactorable(n) + use cam_abortutils, only: endrun + + integer,intent(in) :: n + type (factor_t) :: fact + + logical :: IsFactorable + + if (associated(fact%factors)) then + call endrun("fact already allocated!!!") + end if + fact = Factor(n) + if(fact%numfact .ne. -1) then + IsFactorable = .TRUE. + else + IsFactorable = .FALSE. + endif + + end function IsFactorable + !------------------------------------------------ + + subroutine map(l) + + implicit none + integer :: l,d + integer :: type, ierr + + d = SIZE(pos) + + pos=0 + maxdim=d + vcnt=0 + + type = fact%factors(l) + ierr = GenCurve(l,type,0,1,0,1) + + end subroutine map + !--------------------------------------------------------- + subroutine GenSpaceCurve(Mesh) + + implicit none + + integer,target,intent(inout) :: Mesh(:,:) + integer :: level,dim + + integer :: gridsize + + ! Setup the size of the grid to traverse + + dim = 2 + gridsize = SIZE(Mesh,dim=1) + fact = factor(gridsize) + level = fact%numfact + + if(verbose) write(iulog,*)'GenSpacecurve: level is ',level + allocate(ordered(gridsize,gridsize)) + + ! Setup the working arrays for the traversal + allocate(pos(0:dim-1)) + + ! The array ordered will contain the visitation order + ordered(:,:) = 0 + + call map(level) + + Mesh(:,:) = ordered(:,:) + + end subroutine GenSpaceCurve + !------------------------------------------------------------------------------------------------------- + subroutine PrintCurve(Mesh) + implicit none + integer,target :: Mesh(:,:) + integer :: gridsize,i + + gridsize = SIZE(Mesh,dim=1) + + if(gridsize == 2) then + write (iulog,*) "A Level 1 Hilbert Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,2) Mesh(1,i),Mesh(2,i) + enddo + else if(gridsize == 3) then + write (iulog,*) "A Level 1 Peano Meandering Curve:" + write (iulog,*) "---------------------------------" + do i=1,gridsize + write(iulog,3) Mesh(1,i),Mesh(2,i),Mesh(3,i) + enddo + else if(gridsize == 4) then + write (iulog,*) "A Level 2 Hilbert Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,4) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i) + enddo + else if(gridsize == 5) then + write (iulog,*) "A Level 1 Cinco Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,5) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i),Mesh(5,i) + enddo + else if(gridsize == 6) then + write (iulog,*) "A Level 1 Hilbert and Level 1 Peano Curve:" + write (iulog,*) "------------------------------------------" + do i=1,gridsize + write(iulog,6) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i),Mesh(5,i),Mesh(6,i) + enddo + else if(gridsize == 8) then + write (iulog,*) "A Level 3 Hilbert Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,8) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i) + enddo + else if(gridsize == 9) then + write (iulog,*) "A Level 2 Peano Meandering Curve:" + write (iulog,*) "---------------------------------" + do i=1,gridsize + write(iulog,9) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i) + enddo + else if(gridsize == 10) then + write (iulog,*) "A Level 1 Hilbert and Level 1 Cinco Curve:" + write (iulog,*) "---------------------------------" + do i=1,gridsize + write(iulog,10) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i),Mesh(10,i) + enddo + else if(gridsize == 12) then + write (iulog,*) "A Level 2 Hilbert and Level 1 Peano Curve:" + write (iulog,*) "------------------------------------------" + do i=1,gridsize + write(iulog,12) Mesh(1,i),Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i),Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i),Mesh(10,i),Mesh(11,i),Mesh(12,i) + enddo + else if(gridsize == 15) then + write (iulog,*) "A Level 1 Peano and Level 1 Cinco Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,15) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i),Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i) + enddo + else if(gridsize == 16) then + write (iulog,*) "A Level 4 Hilbert Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,16) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i),Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i) + enddo + else if(gridsize == 18) then + write (iulog,*) "A Level 1 Hilbert and Level 2 Peano Curve:" + write (iulog,*) "------------------------------------------" + do i=1,gridsize + write(iulog,18) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i) + enddo + else if(gridsize == 20) then + write (iulog,*) "A Level 2 Hilbert and Level 1 Cinco Curve:" + write (iulog,*) "------------------------------------------" + do i=1,gridsize + write(iulog,20) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i) + enddo + else if(gridsize == 24) then + write (iulog,*) "A Level 3 Hilbert and Level 1 Peano Curve:" + write (iulog,*) "------------------------------------------" + do i=1,gridsize + write(iulog,24) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i) + enddo + else if(gridsize == 25) then + write (iulog,*) "A Level 2 Cinco Curve:" + write (iulog,*) "------------------------------------------" + do i=1,gridsize + write(iulog,25) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i), & + Mesh(25,i) + enddo + else if(gridsize == 27) then + write (iulog,*) "A Level 3 Peano Meandering Curve:" + write (iulog,*) "---------------------------------" + do i=1,gridsize + write(iulog,27) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i), & + Mesh(25,i),Mesh(26,i),Mesh(27,i) + enddo + else if(gridsize == 32) then + write (iulog,*) "A Level 5 Hilbert Curve:" + write (iulog,*) "------------------------" + do i=1,gridsize + write(iulog,32) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i), & + Mesh(25,i),Mesh(26,i),Mesh(27,i),Mesh(28,i), & + Mesh(29,i),Mesh(30,i),Mesh(31,i),Mesh(32,i) + enddo + endif +2 format('|',2(i2,'|')) +3 format('|',3(i2,'|')) +4 format('|',4(i2,'|')) +5 format('|',5(i2,'|')) +6 format('|',6(i2,'|')) +8 format('|',8(i2,'|')) +9 format('|',9(i2,'|')) +10 format('|',10(i2,'|')) +12 format('|',12(i3,'|')) +15 format('|',15(i3,'|')) +16 format('|',16(i3,'|')) +18 format('|',18(i3,'|')) +20 format('|',20(i3,'|')) +24 format('|',24(i3,'|')) +25 format('|',25(i3,'|')) +27 format('|',27(i3,'|')) +32 format('|',32(i4,'|')) + + end subroutine PrintCurve + + !------------------------------------------------------------------------------------------------------- + subroutine genspacepart(GridVertex) + use dimensions_mod, only: npart + use gridgraph_mod, only: gridedge_t, gridvertex_t + + type (GridVertex_t), intent(inout) :: GridVertex(:) + + integer :: nelem, nelemd + integer :: k, tmp1, id, s1, extra + + nelem = SIZE(GridVertex(:)) + + nelemd = nelem / npart + ! every cpu gets nelemd elements, but the first 'extra' get nelemd+1 + extra = mod(nelem,npart) + s1 = extra*(nelemd+1) + + ! split curve into two curves: + ! 1 ... s1 s2 ... nelem + ! + ! s1 = extra*(nelemd+1) (count be 0) + ! s2 = s1+1 + ! + ! First region gets nelemd+1 elements per Processor + ! Second region gets nelemd elements per Processor + + ! =========================================== + ! Add the partitioning information into the + ! Grid Vertex and Grid Edge structures + ! =========================================== + + do k = 1, nelem + id = GridVertex(k)%SpaceCurve + if (id <= s1) then + tmp1 = id/(nelemd+1) + GridVertex(k)%processor_number = tmp1 + 1 + else + id = id - s1 + tmp1 = id / nelemd + GridVertex(k)%processor_number = extra + tmp1+1 + end if + end do +#if 0 + if (masterproc) then + write(iulog, *)'Space-Filling Curve Parititioning: ' + write(iulog, '(2(a,i0))') 'npart = ',npart,', nelem = ',nelem + write(iulog, '(2(a,i0))') 'nelemd = ',npart,', extra = ',extra + write(iulog, '(a)') ' elem task#' + do k = 1, nelem + write(iulog,'(i6," ",i6)') k, GridVertex(k)%processor_number + end do + end if + call mpi_barrier(mpicom, tmp1) +#endif + + end subroutine genspacepart + + end module spacecurve_mod diff --git a/src/dynamics/se/dycore/thread_mod.F90 b/src/dynamics/se/dycore/thread_mod.F90 new file mode 100644 index 00000000..f8e0f235 --- /dev/null +++ b/src/dynamics/se/dycore/thread_mod.F90 @@ -0,0 +1,82 @@ +module thread_mod + +#ifdef _OPENMP + use omp_lib, only: omp_get_thread_num, & + omp_in_parallel, & + omp_set_num_threads, & + omp_get_max_threads, & + omp_get_num_threads, & + omp_get_nested, & + omp_set_nested +#endif + use cam_logfile, only: iulog + use spmd_utils, only: masterproc + + implicit none + private + + integer, public :: max_num_threads=1 ! maximum number of OpenMP threads + integer, public :: horz_num_threads, vert_num_threads, tracer_num_threads + + public :: omp_get_thread_num + public :: omp_in_parallel + public :: omp_set_num_threads + public :: omp_get_max_threads + public :: omp_get_num_threads + public :: omp_get_nested + public :: omp_set_nested + public :: initomp +contains + +#ifndef _OPENMP + function omp_get_thread_num() result(ithr) + integer ithr + ithr=0 + end function omp_get_thread_num + + function omp_get_num_threads() result(ithr) + integer ithr + ithr=1 + end function omp_get_num_threads + + function omp_in_parallel() result(ans) + logical ans + ans=.FALSE. + end function omp_in_parallel + + subroutine omp_set_num_threads(NThreads) + integer Nthreads + NThreads=1 + end subroutine omp_set_num_threads + + integer function omp_get_max_threads() + omp_get_max_threads=1 + end function omp_get_max_threads + + integer function omp_get_nested() + omp_get_nested=0 + end function omp_get_nested + + subroutine omp_set_nested(flag) + logical :: flag + end subroutine omp_set_nested + + subroutine initomp + max_num_threads = 1 + if (masterproc) then + write(iulog,*) "INITOMP: INFO: openmp not activated" + end if + end subroutine initomp + +#else + subroutine initomp + !$OMP PARALLEL + max_num_threads = omp_get_num_threads() + !$OMP END PARALLEL + if (masterproc) then + write(iulog,*) "INITOMP: INFO: number of OpenMP threads = ", max_num_threads + end if + end subroutine initomp +#endif + +end module thread_mod diff --git a/src/dynamics/se/dycore/time_mod.F90 b/src/dynamics/se/dycore/time_mod.F90 new file mode 100644 index 00000000..fdd68af0 --- /dev/null +++ b/src/dynamics/se/dycore/time_mod.F90 @@ -0,0 +1,135 @@ +module time_mod + !------------------ + use shr_kind_mod, only: r8=>shr_kind_r8 + !------------------ + implicit none + integer,public :: nsplit=1 + integer,public :: nsplit_baseline=-1 + integer,public :: rsplit_baseline=-1 + integer,public :: nmax ! Max number of timesteps + integer,public :: nEndStep ! Number of End Step + integer,public :: ndays ! Max number of days + + real (kind=r8) , public :: tstep ! Dynamics timestep + real (kind=r8) , public :: tevolve ! time evolved since start of dynamics (end of physics) + real (kind=r8) , public :: phys_tscale=0 ! Physics time scale + real (kind=r8) , public :: dt_phys = -900! physics time-step (only used in standalone HOMME) + ! if negative no forcing (see prim_main) + + ! smooth now in namelist + integer, parameter :: ptimelevels = 3 ! number of time levels in the dycore + + type, public :: TimeLevel_t + integer nm1 ! relative time level n-1 + integer n0 ! relative time level n + integer np1 ! relative time level n+1 + integer nstep ! time level since simulation start + integer nstep0 ! timelevel of first complete leapfrog timestep + end type TimeLevel_t + + ! Methods + public :: Time_at + public :: TimeLevel_update + public :: TimeLevel_init + public :: TimeLevel_Qdp + + interface TimeLevel_init + module procedure TimeLevel_init_default + module procedure TimeLevel_init_specific + module procedure TimeLevel_init_copy + end interface + +contains + + function Time_at(nstep) result(tat) + integer, intent(in) :: nstep + real (kind=r8) :: tat + tat = nstep*tstep + end function Time_at + + subroutine TimeLevel_init_default(tl) + type (TimeLevel_t), intent(out) :: tl + tl%nm1 = 1 + tl%n0 = 2 + tl%np1 = 3 + tl%nstep = 0 + tl%nstep0 = 2 + end subroutine TimeLevel_init_default + + subroutine TimeLevel_init_copy(tl, tin) + type (TimeLevel_t), intent(in) :: tin + type (TimeLevel_t), intent(out) :: tl + tl%nm1 = tin%nm1 + tl%n0 = tin%n0 + tl%np1 = tin%np1 + tl%nstep = tin%nstep + tl%nstep0= tin%nstep0 + end subroutine TimeLevel_init_copy + + subroutine TimeLevel_init_specific(tl,n0,n1,n2,nstep) + type (TimeLevel_t) :: tl + integer, intent(in) :: n0,n1,n2,nstep + tl%nm1= n0 + tl%n0 = n1 + tl%np1= n2 + tl%nstep= nstep + end subroutine TimeLevel_init_specific + + + !this subroutine returns the proper + !locations for nm1 and n0 for Qdp - because + !it only has 2 levels for storage + subroutine TimeLevel_Qdp(tl, qsplit, n0, np1) + type (TimeLevel_t) :: tl + integer, intent(in) :: qsplit + integer, intent(inout) :: n0 + integer, intent(inout), optional :: np1 + + integer :: i_temp + + i_temp = tl%nstep/qsplit + + if (mod(i_temp,2) ==0) then + n0 = 1 + if (present(np1)) then + np1 = 2 + endif + else + n0 = 2 + if (present(np1)) then + np1 = 1 + end if + endif + + !print * ,'nstep = ', tl%nstep, 'qsplit= ', qsplit, 'i_temp = ', i_temp, 'n0 = ', n0 + + end subroutine TimeLevel_Qdp + + subroutine TimeLevel_update(tl,uptype) + type (TimeLevel_t) :: tl + character(len=*) :: uptype + + ! Local Variable + + integer :: ntmp +!$OMP BARRIER +!$OMP MASTER + if (uptype == "leapfrog") then + ntmp = tl%np1 + tl%np1 = tl%nm1 + tl%nm1 = tl%n0 + tl%n0 = ntmp + else if (uptype == "forward") then + ntmp = tl%np1 + tl%np1 = tl%n0 + tl%n0 = ntmp + else + print *,'WARNING: TimeLevel_update called wint invalid uptype=',uptype + end if + + tl%nstep = tl%nstep+1 +!$OMP END MASTER +!$OMP BARRIER + end subroutine TimeLevel_update + +end module time_mod diff --git a/src/dynamics/se/dycore/vertremap_mod.F90 b/src/dynamics/se/dycore/vertremap_mod.F90 new file mode 100644 index 00000000..3b57fd89 --- /dev/null +++ b/src/dynamics/se/dycore/vertremap_mod.F90 @@ -0,0 +1,606 @@ +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!! Begin GPU remap module !! +!! by Rick Archibald, 2010 !! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +module vertremap_mod + + !************************************************************************************** + ! + ! Purpose: + ! Construct sub-grid-scale polynomials using piecewise spline method with + ! monotone filters. + ! + ! References: PCM - Zerroukat et al., Q.J.R. Meteorol. Soc., 2005. (ZWS2005QJR) + ! PSM - Zerroukat et al., Int. J. Numer. Meth. Fluids, 2005. (ZWS2005IJMF) + ! + !************************************************************************************** + + use shr_kind_mod, only: r8=>shr_kind_r8 + use dimensions_mod, only: np,nlev,qsize,nlevp,npsq,nc + use hybvcoord_mod, only: hvcoord_t + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + use perf_mod, only: t_startf, t_stopf ! _EXTERNAL + use parallel_mod, only: parallel_t + use cam_abortutils, only: endrun + + implicit none + + public remap1 ! remap any field, splines, monotone + public remap1_nofilter ! remap any field, splines, no filter +! todo: tweak interface to match remap1 above, rename remap1_ppm: + public remap_q_ppm ! remap state%Q, PPM, monotone + + contains + +!=======================================================================================================! + + subroutine remap1(Qdp,nx,qstart,qstop,qsize,dp1,dp2,ptop,identifier,Qdp_mass,kord) + use fv_mapz, only: map1_ppm + ! remap 1 field + ! input: Qdp field to be remapped (NOTE: MASS, not MIXING RATIO) + ! dp1 layer thickness (source) + ! dp2 layer thickness (target) + ! + ! output: remaped Qdp, conserving mass, monotone on Q=Qdp/dp + ! + integer, intent(in) :: nx,qstart,qstop,qsize + real (kind=r8), intent(inout) :: Qdp(nx,nx,nlev,qsize) + real (kind=r8), intent(in) :: dp1(nx,nx,nlev),dp2(nx,nx,nlev) + integer, intent(in) :: identifier !0: tracers, 1: T, -1: u,v + real (kind=r8), intent(in) :: ptop + logical, intent(in) :: Qdp_mass + integer, intent(in) :: kord(qsize) + ! ======================== + ! Local Variables + ! ======================== + real (kind=r8) :: pe1(nx,nlev+1),pe2(nx,nlev+1),inv_dp(nx,nx,nlev),dp2_local(nx,nlev) + real (kind=r8) :: tmp(nx,nlev), gz(nx) + integer :: i,j,k,itrac + logical :: logp + integer :: kord_local(qsize) + + kord_local = kord + + if (any(kord(:) >= 0)) then + if (.not.qdp_mass) then + do itrac=1,qsize + if (kord(itrac) >= 0) then + Qdp(:,:,:,itrac) = Qdp(:,:,:,itrac)*dp1(:,:,:) + end if + end do + end if + call remap_Q_ppm(qdp,nx,qstart,qstop,qsize,dp1,dp2,kord) + if (.not.qdp_mass) then + do itrac=1,qsize + if (kord(itrac) >= 0) then + Qdp(:,:,:,itrac) = Qdp(:,:,:,itrac)/dp2(:,:,:) + end if + end do + end if + endif + if (any(kord(:)<0)) then + ! + ! check if remapping over p or log(p) + ! + ! can not mix and match here (all kord's must >-20 or <=-20) + ! + if (any(kord(:)>-20)) then + kord_local = abs(kord) + logp = .false. + else + kord_local = abs(kord/10) + if (identifier==1) then + logp = .true. + else + logp = .false. + end if + end if + ! + ! modified FV3 vertical remapping + ! + if (qdp_mass) then + inv_dp = 1.0_r8/dp1 + do itrac=1,qsize + if (kord(itrac)<0) then + Qdp(:,:,:,itrac) = Qdp(:,:,:,itrac)*inv_dp(:,:,:) + end if + end do + end if + if (logp) then + do j=1,nx + pe1(:,1) = ptop + pe2(:,1) = ptop + do k=1,nlev + do i=1,nx + pe1(i,k+1) = pe1(i,k)+dp1(i,j,k) + pe2(i,k+1) = pe2(i,k)+dp2(i,j,k) + end do + end do + pe1(:,nlev+1) = pe2(:,nlev+1) + do k=1,nlev+1 + do i=1,nx + pe1(i,k) = log(pe1(i,k)) + pe2(i,k) = log(pe2(i,k)) + end do + end do + + do itrac=1,qsize + if (kord(itrac)<0) then + call map1_ppm( nlev, pe1(:,:), Qdp(:,:,:,itrac), gz, & + nlev, pe2(:,:), Qdp(:,:,:,itrac), & + 1, nx, j, 1, nx, 1, nx, identifier, kord_local(itrac)) + end if + end do + ! call mapn_tracer(qsize, nlev, pe1, pe2, Qdp, dp2_local, kord, j, & + ! 1, nx, 1, nx, 1, nx, 0.0_r8, fill) + end do + else + do j=1,nx + pe1(:,1) = ptop + pe2(:,1) = ptop + do k=1,nlev + do i=1,nx + pe1(i,k+1) = pe1(i,k)+dp1(i,j,k) + pe2(i,k+1) = pe2(i,k)+dp2(i,j,k) + end do + end do + pe1(:,nlev+1) = pe2(:,nlev+1) + do itrac=1,qsize + if (kord(itrac)<0) then + call map1_ppm( nlev, pe1(:,:), Qdp(:,:,:,itrac), gz, &!phl + nlev, pe2(:,:), Qdp(:,:,:,itrac), & + 1, nx, j, 1, nx, 1, nx, identifier, kord_local(itrac)) + end if + end do + ! call mapn_tracer(qsize, nlev, pe1, pe2, Qdp, dp2_local, kord, j, & + ! 1, nx, 1, nx, 1, nx, 0.0_r8, fill) + end do + end if + if (qdp_mass) then + do itrac=1,qsize + if (kord(itrac)<0) then + Qdp(:,:,:,itrac) = Qdp(:,:,:,itrac)*dp2(:,:,:) + end if + end do + end if + end if + end subroutine remap1 + +subroutine remap1_nofilter(Qdp,nx,qsize,dp1,dp2) + ! remap 1 field + ! input: Qdp field to be remapped (NOTE: MASS, not MIXING RATIO) + ! dp1 layer thickness (source) + ! dp2 layer thickness (target) + ! + ! output: remaped Qdp, conserving mass + ! + implicit none + integer, intent(in) :: nx,qsize + real (kind=r8), intent(inout) :: Qdp(nx,nx,nlev,qsize) + real (kind=r8), intent(in) :: dp1(nx,nx,nlev),dp2(nx,nx,nlev) + ! ======================== + ! Local Variables + ! ======================== + + real (kind=r8), dimension(nlev+1) :: rhs,lower_diag,diag,upper_diag,q_diag,zgam,z1c,z2c,zv + real (kind=r8), dimension(nlev) :: h,Qcol,za0,za1,za2,zarg,zhdp + real (kind=r8) :: tmp_cal,zv1,zv2 + integer :: zkr(nlev+1),i,ilev,j,jk,k,q + logical :: abort=.false. + ! call t_startf('remap1_nofilter') + +#if (defined COLUMN_OPENMP) + !$omp parallel do num_threads(tracer_num_threads) & + !$omp private(q,i,j,z1c,z2c,zv,k,Qcol,zkr,ilev) & + !$omp private(jk,zgam,zhdp,h,zarg,rhs,lower_diag,diag,upper_diag,q_diag,tmp_cal) & + !$omp private(za0,za1,za2) & + !$omp private(ip2,zv1,zv2) +#endif + do q=1,qsize + do i=1,nx + do j=1,nx + + z1c(1)=0 ! source grid + z2c(1)=0 ! target grid + do k=1,nlev + z1c(k+1)=z1c(k)+dp1(i,j,k) + z2c(k+1)=z2c(k)+dp2(i,j,k) + enddo + + zv(1)=0 + do k=1,nlev + Qcol(k)=Qdp(i,j,k,q)! *(z1c(k+1)-z1c(k)) input is mass + zv(k+1) = zv(k)+Qcol(k) + enddo + + if (ABS(z2c(nlev+1)-z1c(nlev+1)) >= 0.000001_r8) then + write(6,*) 'SURFACE PRESSURE IMPLIED BY ADVECTION SCHEME' + write(6,*) 'NOT CORRESPONDING TO SURFACE PRESSURE IN ' + write(6,*) 'DATA FOR MODEL LEVELS' + write(6,*) 'PLEVMODEL=',z2c(nlev+1) + write(6,*) 'PLEV =',z1c(nlev+1) + write(6,*) 'DIFF =',z2c(nlev+1)-z1c(nlev+1) + abort=.true. + endif + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! quadratic splies with UK met office monotonicity constraints !! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + zkr = 99 + ilev = 2 + zkr(1) = 1 + zkr(nlev+1) = nlev + kloop: do k = 2,nlev + do jk = ilev,nlev+1 + if (z1c(jk) >= z2c(k)) then + ilev = jk + zkr(k) = jk-1 + cycle kloop + endif + enddo + enddo kloop + + zgam = (z2c(1:nlev+1)-z1c(zkr)) / (z1c(zkr+1)-z1c(zkr)) + zgam(1) = 0.0_r8 + zgam(nlev+1) = 1.0_r8 + zhdp = z1c(2:nlev+1)-z1c(1:nlev) + + + h = 1/zhdp + zarg = Qcol * h + rhs = 0 + lower_diag = 0 + diag = 0 + upper_diag = 0 + + rhs(1)=3*zarg(1) + rhs(2:nlev) = 3*(zarg(2:nlev)*h(2:nlev) + zarg(1:nlev-1)*h(1:nlev-1)) + rhs(nlev+1)=3*zarg(nlev) + + lower_diag(1)=1 + lower_diag(2:nlev) = h(1:nlev-1) + lower_diag(nlev+1)=1 + + diag(1)=2 + diag(2:nlev) = 2*(h(2:nlev) + h(1:nlev-1)) + diag(nlev+1)=2 + + upper_diag(1)=1 + upper_diag(2:nlev) = h(2:nlev) + upper_diag(nlev+1)=0 + + q_diag(1)=-upper_diag(1)/diag(1) + rhs(1)= rhs(1)/diag(1) + + do k=2,nlev+1 + tmp_cal = 1/(diag(k)+lower_diag(k)*q_diag(k-1)) + q_diag(k) = -upper_diag(k)*tmp_cal + rhs(k) = (rhs(k)-lower_diag(k)*rhs(k-1))*tmp_cal + enddo + do k=nlev,1,-1 + rhs(k)=rhs(k)+q_diag(k)*rhs(k+1) + enddo + + za0 = rhs(1:nlev) + za1 = -4*rhs(1:nlev) - 2*rhs(2:nlev+1) + 6*zarg + za2 = 3*rhs(1:nlev) + 3*rhs(2:nlev+1) - 6*zarg + + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! start iteration from top to bottom of atmosphere !! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + zv1 = 0 + do k=1,nlev + if (zgam(k+1)>1_r8) then + WRITE(*,*) 'r not in [0:1]', zgam(k+1) + abort=.true. + endif + zv2 = zv(zkr(k+1))+(za0(zkr(k+1))*zgam(k+1)+(za1(zkr(k+1))/2)*(zgam(k+1)**2)+ & + (za2(zkr(k+1))/3)*(zgam(k+1)**3))*zhdp(zkr(k+1)) + Qdp(i,j,k,q) = (zv2 - zv1) ! / (z2c(k+1)-z2c(k) ) dont convert back to mixing ratio + zv1 = zv2 + enddo + enddo + enddo + enddo ! q loop + if (abort) then + call endrun('Bad levels in remap1_nofilter. usually CFL violatioin') + end if +end subroutine remap1_nofilter + +!=============================================================================! + +!This uses the exact same model and reference grids and data as remap_Q, but it interpolates +!using PPM instead of splines. +subroutine remap_Q_ppm(Qdp,nx,qstart,qstop,qsize,dp1,dp2,kord) + ! remap 1 field + ! input: Qdp field to be remapped (NOTE: MASS, not MIXING RATIO) + ! dp1 layer thickness (source) + ! dp2 layer thickness (target) + ! + ! output: remaped Qdp, conserving mass + ! + implicit none + integer,intent(in) :: nx,qstart,qstop,qsize + real (kind=r8), intent(inout) :: Qdp(nx,nx,nlev,qsize) + real (kind=r8), intent(in) :: dp1(nx,nx,nlev),dp2(nx,nx,nlev) + integer , intent(in) :: kord(qsize) + ! Local Variables + integer, parameter :: gs = 2 !Number of cells to place in the ghost region + real(kind=r8), dimension( nlev+2 ) :: pio !Pressure at interfaces for old grid + real(kind=r8), dimension( nlev+1 ) :: pin !Pressure at interfaces for new grid + real(kind=r8), dimension( nlev+1 ) :: masso !Accumulate mass up to each interface + real(kind=r8), dimension( 1-gs:nlev+gs) :: ao !Tracer value on old grid + real(kind=r8), dimension( 1-gs:nlev+gs) :: dpo !change in pressure over a cell for old grid + real(kind=r8), dimension( 1-gs:nlev+gs) :: dpn !change in pressure over a cell for old grid + real(kind=r8), dimension(3, nlev ) :: coefs !PPM coefficients within each cell + real(kind=r8), dimension( nlev ) :: z1, z2 + real(kind=r8) :: ppmdx(10,0:nlev+1) !grid spacings + real(kind=r8) :: massn1, massn2, ext(2) + integer :: i, j, k, q, kk, kid(nlev) + + do j = 1 , nx + do i = 1 , nx + + pin(1)=0 + pio(1)=0 + do k=1,nlev + dpn(k)=dp2(i,j,k) + dpo(k)=dp1(i,j,k) + pin(k+1)=pin(k)+dpn(k) + pio(k+1)=pio(k)+dpo(k) + enddo + + + + pio(nlev+2) = pio(nlev+1) + 1._r8 !This is here to allow an entire block of k threads to run in the remapping phase. + !It makes sure there's an old interface value below the domain that is larger. + pin(nlev+1) = pio(nlev+1) !The total mass in a column does not change. + !Therefore, the pressure of that mass cannot either. + !Fill in the ghost regions with mirrored values. if vert_remap_q_alg is defined, this is of no consequence. + do k = 1 , gs + dpo(1 -k) = dpo( k) + dpo(nlev+k) = dpo(nlev+1-k) + enddo + + !Compute remapping intervals once for all tracers. Find the old grid cell index in which the + !k-th new cell interface resides. Then integrate from the bottom of that old cell to the new + !interface location. In practice, the grid never deforms past one cell, so the search can be + !simplified by this. Also, the interval of integration is usually of magnitude close to zero + !or close to dpo because of minimial deformation. + !Numerous tests confirmed that the bottom and top of the grids match to machine precision, so + !I set them equal to each other. + do k = 1 , nlev + kk = k !Keep from an order n^2 search operation by assuming the old cell index is close. + !Find the index of the old grid cell in which this new cell's bottom interface resides. +! do while ( pio(kk) <= pin(k+1) ) +! kk = kk + 1 +! if(kk==nlev+2) exit +! enddo + ! kk = kk - 1 !kk is now the cell index we're integrating over. + + if (pio(kk) <= pin(k+1)) then + do while ( pio(kk) <= pin(k+1) ) + kk = kk + 1 + enddo + kk = kk - 1 !kk is now the cell index we're integrating over. + else + call binary_search(pio, pin(k+1), kk) + end if + if (kk == nlev+1) kk = nlev !This is to keep the indices in bounds. + !Top bounds match anyway, so doesn't matter what coefficients are used + kid(k) = kk !Save for reuse + z1(k) = -0.5_R8 !This remapping assumes we're starting from the left interface of an old grid cell + !In fact, we're usually integrating very little or almost all of the cell in question + z2(k) = ( pin(k+1) - ( pio(kk) + pio(kk+1) ) * 0.5_r8 ) / dpo(kk) !PPM interpolants are normalized to an independent + !coordinate domain [-0.5,0.5]. + enddo + + !This turned out a big optimization, remembering that only parts of the PPM algorithm depends on the data, namely the + !limiting. So anything that depends only on the grid is pre-computed outside the tracer loop. + ppmdx(:,:) = compute_ppm_grids( dpo) + + !From here, we loop over tracers for only those portions which depend on tracer data, which includes PPM limiting and + !mass accumulation + do q = qstart, qstop + if (kord(q) >= 0) then + !Accumulate the old mass up to old grid cell interface locations to simplify integration + !during remapping. Also, divide out the grid spacing so we're working with actual tracer + !values and can conserve mass. The option for ifndef ZEROHORZ I believe is there to ensure + !tracer consistency for an initially uniform field. I copied it from the old remap routine. + masso(1) = 0._r8 + + do k = 1 , nlev + ao(k) = Qdp(i,j,k,q) + masso(k+1) = masso(k) + ao(k) !Accumulate the old mass. This will simplify the remapping + ao(k) = ao(k) / dpo(k) !Divide out the old grid spacing because we want the tracer mixing ratio, not mass. + enddo + !Fill in ghost values. Ignored if kord == 2 + if (kord(q) == 10) then + ext(1) = minval(ao(1:nlev)) + ext(2) = maxval(ao(1:nlev)) + call linextrap(dpo(2), dpo(1), dpo(0), dpo(-1), ao(2), ao(1), ao(0), ao(-1), ext(1), ext(2)) + call linextrap(dpo(nlev-1), dpo(nlev), dpo(nlev+1), dpo(nlev+2), & + ao(nlev-1), ao(nlev), ao(nlev+1), ao(nlev+2), ext(1), ext(2)) + else + do k = 1 , gs + ao(1 -k) = ao( k) + ao(nlev+k) = ao(nlev+1-k) + enddo + end if + !Compute monotonic and conservative PPM reconstruction over every cell + coefs(:,:) = compute_ppm( ao , ppmdx, kord(q) ) + !Compute tracer values on the new grid by integrating from the old cell bottom to the new + !cell interface to form a new grid mass accumulation. Taking the difference between + !accumulation at successive interfaces gives the mass inside each cell. Since Qdp is + !supposed to hold the full mass this needs no normalization. + massn1 = 0._r8 + do k = 1 , nlev + kk = kid(k) + massn2 = masso(kk) + integrate_parabola( coefs(:,kk) , z1(k) , z2(k) ) * dpo(kk) + Qdp(i,j,k,q) = massn2 - massn1 + massn1 = massn2 + enddo + end if + enddo + enddo + enddo +! call t_stopf('remap_Q_ppm') +end subroutine remap_Q_ppm + +! Find k such that pio(k) <= pivot < pio(k+1). Provide a reasonable input +! value for k. +subroutine binary_search(pio, pivot, k) + real(kind=r8), intent(in) :: pio(nlev+2), pivot + integer, intent(inout) :: k + integer :: lo, hi, mid + + if (pio(k) > pivot) then + lo = 1 + hi = k + else + lo = k + hi = nlev+2 + end if + do while (hi > lo + 1) + k = (lo + hi)/2 + if (pio(k) > pivot) then + hi = k + else + lo = k + end if + end do + k = lo +end subroutine binary_search +!=======================================================================================================! + +!This compute grid-based coefficients from Collela & Woodward 1984. +function compute_ppm_grids( dx ) result(rslt) + implicit none + real(kind=r8), intent(in) :: dx(-1:nlev+2) !grid spacings + real(kind=r8) :: rslt(10,0:nlev+1) !grid spacings + integer :: j + + !Calculate grid-based coefficients for stage 1 of compute_ppm + do j = 0 , nlev+1 + rslt( 1,j) = dx(j) / ( dx(j-1) + dx(j) + dx(j+1) ) + rslt( 2,j) = ( 2._r8*dx(j-1) + dx(j) ) / ( dx(j+1) + dx(j) ) + rslt( 3,j) = ( dx(j) + 2._r8*dx(j+1) ) / ( dx(j-1) + dx(j) ) + enddo + + !Caculate grid-based coefficients for stage 2 of compute_ppm + do j = 0 , nlev + rslt( 4,j) = dx(j) / ( dx(j) + dx(j+1) ) + rslt( 5,j) = 1._r8 / sum( dx(j-1:j+2) ) + rslt( 6,j) = ( 2._r8 * dx(j+1) * dx(j) ) / ( dx(j) + dx(j+1 ) ) + rslt( 7,j) = ( dx(j-1) + dx(j ) ) / ( 2._r8 * dx(j ) + dx(j+1) ) + rslt( 8,j) = ( dx(j+2) + dx(j+1) ) / ( 2._r8 * dx(j+1) + dx(j ) ) + rslt( 9,j) = dx(j ) * ( dx(j-1) + dx(j ) ) / ( 2._r8*dx(j ) + dx(j+1) ) + rslt(10,j) = dx(j+1) * ( dx(j+1) + dx(j+2) ) / ( dx(j ) + 2._r8*dx(j+1) ) + enddo +end function compute_ppm_grids + + +!=======================================================================================================! + + + +!This computes a limited parabolic interpolant using a net 5-cell stencil, but the stages of computation are broken up into 3 stages +function compute_ppm( a , dx , kord) result(coefs) + implicit none + real(kind=r8), intent(in) :: a ( -1:nlev+2) !Cell-mean values + real(kind=r8), intent(in) :: dx (10, 0:nlev+1) !grid spacings + integer, intent(in) :: kord + real(kind=r8) :: coefs(0:2, nlev ) !PPM coefficients (for parabola) + real(kind=r8) :: ai (0:nlev ) !fourth-order accurate, then limited interface values + real(kind=r8) :: dma(0:nlev+1) !An expression from Collela's '84 publication + real(kind=r8) :: da !Ditto + ! Hold expressions based on the grid (which are cumbersome). + real(kind=r8) :: al, ar !Left and right interface values for cell-local limiting + integer :: j + integer :: indB, indE + + ! Stage 1: Compute dma for each cell, allowing a 1-cell ghost stencil below and above the domain + do j = 0 , nlev+1 + da = dx(1,j) * ( dx(2,j) * ( a(j+1) - a(j) ) + dx(3,j) * ( a(j) - a(j-1) ) ) + dma(j) = minval( (/ abs(da) , 2._r8 * abs( a(j) - a(j-1) ) , 2._r8 * abs( a(j+1) - a(j) ) /) ) * sign(1._r8,da) + if ( ( a(j+1) - a(j) ) * ( a(j) - a(j-1) ) <= 0._r8 ) dma(j) = 0._r8 + enddo + + ! Stage 2: Compute ai for each cell interface in the physical domain (dimension nlev+1) + do j = 0 , nlev + ai(j) = a(j) + dx(4,j) * ( a(j+1) - a(j) ) + dx(5,j) * ( dx(6,j) * ( dx(7,j) - dx(8,j) ) & + * ( a(j+1) - a(j) ) - dx(9,j) * dma(j+1) + dx(10,j) * dma(j) ) + enddo + + ! Stage 3: Compute limited PPM interpolant over each cell in the physical domain + ! (dimension nlev) using ai on either side and ao within the cell. + do j = 1 , nlev + al = ai(j-1) + ar = ai(j ) + if ( (ar - a(j)) * (a(j) - al) <= 0._r8 ) then + al = a(j) + ar = a(j) + endif + if ( (ar - al) * (a(j) - (al + ar)/2._r8) > (ar - al)**2/6._r8 ) al = 3._r8*a(j) - 2._r8 * ar + if ( (ar - al) * (a(j) - (al + ar)/2._r8) < -(ar - al)**2/6._r8 ) ar = 3._r8*a(j) - 2._r8 * al + !Computed these coefficients from the edge values and cell mean in Maple. Assumes normalized coordinates: xi=(x-x0)/dx + coefs(0,j) = 1.5_r8 * a(j) - ( al + ar ) / 4._r8 + coefs(1,j) = ar - al + coefs(2,j) = 3._r8 * (-2._r8 * a(j) + ( al + ar )) + enddo + + !If kord == 2, use piecewise constant in the boundaries, and don't use ghost cells. + if (kord == 2) then + coefs(0,1:2) = a(1:2) + coefs(1:2,1:2) = 0._r8 + coefs(0,nlev-1:nlev) = a(nlev-1:nlev) + coefs(1:2,nlev-1:nlev) = 0._r8 + endif +end function compute_ppm + + +!=======================================================================================================! + + +!Simple function computes the definite integral of a parabola in normalized coordinates, xi=(x-x0)/dx, +!given two bounds. Make sure this gets inlined during compilation. +function integrate_parabola( a , x1 , x2 ) result(mass) + implicit none + real(kind=r8), intent(in) :: a(0:2) !Coefficients of the parabola + real(kind=r8), intent(in) :: x1 !lower domain bound for integration + real(kind=r8), intent(in) :: x2 !upper domain bound for integration + real(kind=r8) :: mass + mass = a(0) * (x2 - x1) + a(1) * (x2 ** 2 - x1 ** 2) / 0.2D1 + a(2) * (x2 ** 3 - x1 ** 3) / 0.3D1 +end function integrate_parabola + + +!=============================================================================================! + subroutine linextrap(dx1,dx2,dx3,dx4,y1,y2,y3,y4,lo,hi) + real(kind=r8), intent(in) :: dx1,dx2,dx3,dx4,y1,y2,lo,hi + real(kind=r8), intent(out) :: y3,y4 + + real(kind=r8), parameter :: half = 0.5_r8 + + real(kind=r8) :: x1,x2,x3,x4,a + + x1 = half*dx1 + x2 = x1 + half*(dx1 + dx2) + x3 = x2 + half*(dx2 + dx3) + x4 = x3 + half*(dx3 + dx4) + a = (x3-x1)/(x2-x1) + y3 = (1.0_r8-a)*y1 + a*y2 + a = (x4-x1)/(x2-x1) + y4 = (1.0_r8-a)*y1 + a*y2 + y3 = max(lo, min(hi, y3)) + y4 = max(lo, min(hi, y4)) + end subroutine linextrap +end module vertremap_mod + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!! End GPU remap module !! +!! by Rick Archibald, 2010 !! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/src/dynamics/se/dycore/viscosity_mod.F90 b/src/dynamics/se/dycore/viscosity_mod.F90 new file mode 100644 index 00000000..b29e48a1 --- /dev/null +++ b/src/dynamics/se/dycore/viscosity_mod.F90 @@ -0,0 +1,740 @@ +module viscosity_mod +! +! This module should be renamed "global_deriv_mod.F90" +! +! It is a collection of derivative operators that must be applied to the field +! over the sphere (as opposed to derivative operators that can be applied element +! by element) +! +! + use shr_kind_mod, only: r8=>shr_kind_r8 + use thread_mod, only: max_num_threads, omp_get_num_threads + use dimensions_mod, only: np, nc, nlev,qsize,nelemd + use hybrid_mod, only: hybrid_t, get_loop_ranges, config_thread_region + use parallel_mod, only: parallel_t + use element_mod, only: element_t + use derivative_mod, only: derivative_t, laplace_sphere_wk, vlaplace_sphere_wk, vorticity_sphere, derivinit, divergence_sphere + use edgetype_mod, only: EdgeBuffer_t, EdgeDescriptor_t + use edge_mod, only: edgevpack, edgevunpack, edgeVunpackmin, edgeSunpackmin, & + edgeVunpackmax, initEdgeBuffer, FreeEdgeBuffer, edgeSunpackmax, edgeSpack + use bndry_mod, only: bndry_exchange, bndry_exchange_start,bndry_exchange_finish + use control_mod, only: hypervis_scaling, nu, nu_div + use thread_mod, only: vert_num_threads + + implicit none + save + + public :: biharmonic_wk_scalar + public :: biharmonic_wk_omega + public :: neighbor_minmax, neighbor_minmax_start,neighbor_minmax_finish + + ! + ! compute vorticity/divergence and then project to make continious + ! high-level routines uses only for I/O + public :: compute_zeta_C0 + public :: compute_div_C0 + + interface compute_zeta_C0 + module procedure compute_zeta_C0_hybrid ! hybrid version + module procedure compute_zeta_C0_par ! single threaded + end interface compute_zeta_C0 + interface compute_div_C0 + module procedure compute_div_C0_hybrid + module procedure compute_div_C0_par + end interface compute_div_C0 + + public :: compute_zeta_C0_contra ! for older versions of sweq which carry + public :: compute_div_C0_contra ! velocity around in contra-coordinates + + type (EdgeBuffer_t) :: edge1 + +CONTAINS + +subroutine biharmonic_wk_dp3d(elem,dptens,dpflux,ttens,vtens,deriv,edge3,hybrid,nt,nets,nete,kbeg,kend,& + dp3d_ref,T_ref) + use derivative_mod, only : subcell_Laplace_fluxes + use dimensions_mod, only : ntrac, nu_div_lev,nu_lev + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! compute weak biharmonic operator + ! input: h,v (stored in elem()%, in lat-lon coordinates + ! output: ttens,vtens overwritten with weak biharmonic of h,v (output in lat-lon coordinates) + ! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + type (hybrid_t) , intent(in) :: hybrid + type (element_t) , intent(inout), target :: elem(:) + integer , intent(in) :: nt,nets,nete + integer , intent(in) :: kbeg, kend + real (kind=r8), intent(out), dimension(nc,nc,4,nlev,nets:nete) :: dpflux + real (kind=r8), dimension(np,np,2,nlev,nets:nete) :: vtens + real (kind=r8), dimension(np,np,nlev,nets:nete) :: ttens,dptens + real (kind=r8), dimension(np,np,nlev,nets:nete), optional :: dp3d_ref,T_ref + type (EdgeBuffer_t) , intent(inout) :: edge3 + type (derivative_t) , intent(in) :: deriv + + ! local + integer :: i,j,k,kptr,ie,kblk +! real (kind=r8), dimension(:,:), pointer :: rspheremv + real (kind=r8), dimension(np,np) :: tmp + real (kind=r8), dimension(np,np) :: tmp2 + real (kind=r8), dimension(np,np,2) :: v + real (kind=r8) :: nu_ratio1, nu_ratio2 + logical var_coef1 + + kblk = kend - kbeg + 1 + + if (ntrac>0) dpflux = 0 + !if tensor hyperviscosity with tensor V is used, then biharmonic operator is (\grad\cdot V\grad) (\grad \cdot \grad) + !so tensor is only used on second call to laplace_sphere_wk + var_coef1 = .true. + if(hypervis_scaling > 0) var_coef1 = .false. + + + do ie=nets,nete +!$omp parallel do num_threads(vert_num_threads) private(k,tmp) + do k=kbeg,kend + nu_ratio1=1 + nu_ratio2=1 + if (nu_div_lev(k)/=nu_lev(k)) then + if(hypervis_scaling /= 0) then + ! we have a problem with the tensor in that we cant seperate + ! div and curl components. So we do, with tensor V: + ! nu * (del V del ) * ( nu_ratio * grad(div) - curl(curl)) + nu_ratio1=nu_div_lev(k)/nu_lev(k) + nu_ratio2=1 + else + nu_ratio1=sqrt(nu_div_lev(k)/nu_lev(k)) + nu_ratio2=sqrt(nu_div_lev(k)/nu_lev(k)) + endif + endif + + if (present(T_ref)) then + tmp=elem(ie)%state%T(:,:,k,nt)-T_ref(:,:,k,ie) + else + tmp=elem(ie)%state%T(:,:,k,nt) + end if + call laplace_sphere_wk(tmp,deriv,elem(ie),ttens(:,:,k,ie),var_coef=var_coef1) + if (present(dp3d_ref)) then + tmp=elem(ie)%state%dp3d(:,:,k,nt)-dp3d_ref(:,:,k,ie) + else + tmp=elem(ie)%state%dp3d(:,:,k,nt) + end if + call laplace_sphere_wk(tmp,deriv,elem(ie),dptens(:,:,k,ie),var_coef=var_coef1) + + call vlaplace_sphere_wk(elem(ie)%state%v(:,:,:,k,nt),deriv,elem(ie),.true.,vtens(:,:,:,k,ie), & + var_coef=var_coef1,nu_ratio=nu_ratio1) + enddo + + kptr = kbeg - 1 + call edgeVpack(edge3,ttens(:,:,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + nlev + call edgeVpack(edge3,vtens(:,:,1,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 2*nlev + call edgeVpack(edge3,vtens(:,:,2,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 3*nlev + call edgeVpack(edge3,dptens(:,:,kbeg:kend,ie),kblk,kptr,ie) + enddo + + call bndry_exchange(hybrid,edge3,location='biharmonic_wk_dp3d') + + do ie=nets,nete +!CLEAN rspheremv => elem(ie)%rspheremp(:,:) + + kptr = kbeg - 1 + call edgeVunpack(edge3,ttens(:,:,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + nlev + call edgeVunpack(edge3,vtens(:,:,1,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 2*nlev + call edgeVunpack(edge3,vtens(:,:,2,kbeg:kend,ie),kblk,kptr,ie) + + kptr = kbeg - 1 + 3*nlev + call edgeVunpack(edge3,dptens(:,:,kbeg:kend,ie),kblk,kptr,ie) + + if (ntrac>0) then + do k=1,nlev +!CLEAN tmp(:,:)= rspheremv(:,:)*dptens(:,:,k,ie) + tmp(:,:)= elem(ie)%rspheremp(:,:)*dptens(:,:,k,ie) + call subcell_Laplace_fluxes(tmp, deriv, elem(ie), np, nc,dpflux(:,:,:,k,ie)) + enddo + endif + + ! apply inverse mass matrix, then apply laplace again + !$omp parallel do num_threads(vert_num_threads) private(k,v,tmp,tmp2) + do k=kbeg,kend +!CLEAN tmp(:,:)=rspheremv(:,:)*ttens(:,:,k,ie) + tmp(:,:)=elem(ie)%rspheremp(:,:)*ttens(:,:,k,ie) + call laplace_sphere_wk(tmp,deriv,elem(ie),ttens(:,:,k,ie),var_coef=.true.) +!CLEAN tmp2(:,:)=rspheremv(:,:)*dptens(:,:,k,ie) + tmp2(:,:)=elem(ie)%rspheremp(:,:)*dptens(:,:,k,ie) + call laplace_sphere_wk(tmp2,deriv,elem(ie),dptens(:,:,k,ie),var_coef=.true.) +!CLEAN v(:,:,1)=rspheremv(:,:)*vtens(:,:,1,k,ie) +!CLEAN v(:,:,2)=rspheremv(:,:)*vtens(:,:,2,k,ie) + + v(:,:,1)=elem(ie)%rspheremp(:,:)*vtens(:,:,1,k,ie) + v(:,:,2)=elem(ie)%rspheremp(:,:)*vtens(:,:,2,k,ie) + call vlaplace_sphere_wk(v(:,:,:),deriv,elem(ie),.true.,vtens(:,:,:,k,ie), & + var_coef=.true.,nu_ratio=nu_ratio2) + + enddo + enddo +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +end subroutine biharmonic_wk_dp3d + + +subroutine biharmonic_wk_omega(elem,ptens,deriv,edge3,hybrid,nets,nete,kbeg,kend) + type (hybrid_t) , intent(in) :: hybrid + type (element_t) , intent(inout), target :: elem(:) + integer , intent(in) :: nets,nete + integer , intent(in) :: kbeg, kend + real (kind=r8), dimension(np,np,nlev,nets:nete) :: ptens + type (EdgeBuffer_t) , intent(inout) :: edge3 + type (derivative_t) , intent(in) :: deriv + + ! local + integer :: i,j,k,kptr,ie,kblk + real (kind=r8), dimension(:,:), pointer :: rspheremv + real (kind=r8), dimension(np,np) :: tmp + real (kind=r8), dimension(np,np) :: tmp2 + real (kind=r8), dimension(np,np,2) :: v + real (kind=r8) :: nu_ratio1, nu_ratio2 + logical var_coef1 + + kblk = kend - kbeg + 1 + + !if tensor hyperviscosity with tensor V is used, then biharmonic operator is (\grad\cdot V\grad) (\grad \cdot \grad) + !so tensor is only used on second call to laplace_sphere_wk + var_coef1 = .true. + if(hypervis_scaling > 0) var_coef1 = .false. + + nu_ratio1=1 + nu_ratio2=1 + + do ie=nets,nete + + !$omp parallel do num_threads(vert_num_threads) private(k,tmp) + do k=kbeg,kend + tmp=elem(ie)%derived%omega(:,:,k) + call laplace_sphere_wk(tmp,deriv,elem(ie),ptens(:,:,k,ie),var_coef=var_coef1) + enddo + + kptr = kbeg - 1 + call edgeVpack(edge3,ptens(:,:,kbeg:kend,ie),kblk,kptr,ie) + enddo + + call bndry_exchange(hybrid,edge3,location='biharmonic_wk_omega') + + do ie=nets,nete + rspheremv => elem(ie)%rspheremp(:,:) + + kptr = kbeg - 1 + call edgeVunpack(edge3,ptens(:,:,kbeg:kend,ie),kblk,kptr,ie) + + ! apply inverse mass matrix, then apply laplace again + !$omp parallel do num_threads(vert_num_threads) private(k,tmp) + do k=kbeg,kend + tmp(:,:)=rspheremv(:,:)*ptens(:,:,k,ie) + call laplace_sphere_wk(tmp,deriv,elem(ie),ptens(:,:,k,ie),var_coef=.true.) + enddo + enddo +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +end subroutine biharmonic_wk_omega + + +subroutine biharmonic_wk_scalar(elem,qtens,deriv,edgeq,hybrid,nets,nete) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute weak biharmonic operator +! input: qtens = Q +! output: qtens = weak biharmonic of Q +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(inout), target :: elem(:) +integer :: nets,nete +real (kind=r8), dimension(np,np,nlev,qsize,nets:nete) :: qtens +type (EdgeBuffer_t) , intent(inout) :: edgeq +type (derivative_t) , intent(in) :: deriv + +! local +integer :: k,kptr,i,j,ie,ic,q +integer :: kbeg,kend,qbeg,qend +real (kind=r8), dimension(np,np) :: lap_p +logical var_coef1 +integer :: kblk,qblk ! The per thead size of the vertical and tracers + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + !if tensor hyperviscosity with tensor V is used, then biharmonic operator is (\grad\cdot V\grad) (\grad \cdot \grad) + !so tensor is only used on second call to laplace_sphere_wk + var_coef1 = .true. + if(hypervis_scaling > 0) var_coef1 = .false. + + + kblk = kend - kbeg + 1 ! calculate size of the block of vertical levels + qblk = qend - qbeg + 1 ! calculate size of the block of tracers + + do ie=nets,nete + do q=qbeg,qend + do k=kbeg,kend + lap_p(:,:)=qtens(:,:,k,q,ie) + call laplace_sphere_wk(lap_p,deriv,elem(ie),qtens(:,:,k,q,ie),var_coef=var_coef1) + enddo + kptr = nlev*(q-1) + kbeg - 1 + call edgeVpack(edgeq, qtens(:,:,kbeg:kend,q,ie),kblk,kptr,ie) + enddo + enddo + + + call bndry_exchange(hybrid,edgeq,location='biharmonic_wk_scalar') + + do ie=nets,nete + + ! apply inverse mass matrix, then apply laplace again + do q=qbeg,qend + kptr = nlev*(q-1) + kbeg - 1 + call edgeVunpack(edgeq, qtens(:,:,kbeg:kend,q,ie),kblk,kptr,ie) + do k=kbeg,kend + lap_p(:,:)=elem(ie)%rspheremp(:,:)*qtens(:,:,k,q,ie) + call laplace_sphere_wk(lap_p,deriv,elem(ie),qtens(:,:,k,q,ie),var_coef=.true.) + enddo + enddo + enddo + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +end subroutine biharmonic_wk_scalar + + +subroutine make_C0(zeta,elem,hybrid,nets,nete) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! apply DSS (aka assembly procedure) to zeta. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nets,nete +real (kind=r8), dimension(np,np,nlev,nets:nete) :: zeta + +! local +integer :: k,i,j,ie,ic,kptr,nthread_save + + + call initEdgeBuffer(hybrid%par,edge1,elem,nlev) + +do ie=nets,nete +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev + zeta(:,:,k,ie)=zeta(:,:,k,ie)*elem(ie)%spheremp(:,:) + enddo + kptr=0 + call edgeVpack(edge1, zeta(1,1,1,ie),nlev,kptr,ie) +enddo +call bndry_exchange(hybrid,edge1,location='make_C0') +do ie=nets,nete + kptr=0 + call edgeVunpack(edge1, zeta(1,1,1,ie),nlev,kptr, ie) +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev + zeta(:,:,k,ie)=zeta(:,:,k,ie)*elem(ie)%rspheremp(:,:) + enddo +enddo + +call FreeEdgeBuffer(edge1) + +end subroutine + + +subroutine make_C0_vector(v,elem,hybrid,nets,nete) +#if 1 +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! apply DSS to a velocity vector +! this is a low-performance routine used for I/O and analysis. +! no need to optimize +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nets,nete +real (kind=r8), dimension(np,np,2,nlev,nets:nete) :: v + +! local +integer :: k,i,j,ie,ic,kptr +type (EdgeBuffer_t) :: edge2 +real (kind=r8), dimension(np,np,nlev,nets:nete) :: v1 + +v1(:,:,:,:) = v(:,:,1,:,:) +call make_C0(v1,elem,hybrid,nets,nete) +v(:,:,1,:,:) = v1(:,:,:,:) + +v1(:,:,:,:) = v(:,:,2,:,:) +call make_C0(v1,elem,hybrid,nets,nete) +v(:,:,2,:,:) = v1(:,:,:,:) +#else +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nets,nete +real (kind=r8), dimension(np,np,2,nlev,nets:nete) :: v + +! local +integer :: k,i,j,ie,ic,kptr +type (EdgeBuffer_t) :: edge2 +real (kind=r8), dimension(np,np,nlev,nets:nete) :: v1 + + + + call initEdgeBuffer(hybrid%par,edge2,elem,2*nlev) + +do ie=nets,nete +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev + v(:,:,1,k,ie)=v(:,:,1,k,ie)*elem(ie)%spheremp(:,:) + v(:,:,2,k,ie)=v(:,:,2,k,ie)*elem(ie)%spheremp(:,:) + enddo + kptr=0 + call edgeVpack(edge2, v(1,1,1,1,ie),2*nlev,kptr,ie) +enddo +call bndry_exchange(hybrid,edge2,location='make_C0_vector') +do ie=nets,nete + kptr=0 + call edgeVunpack(edge2, v(1,1,1,1,ie),2*nlev,kptr,ie) +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif + do k=1,nlev + v(:,:,1,k,ie)=v(:,:,1,k,ie)*elem(ie)%rspheremp(:,:) + v(:,:,2,k,ie)=v(:,:,2,k,ie)*elem(ie)%rspheremp(:,:) + enddo +enddo + +call FreeEdgeBuffer(edge2) +#endif +end subroutine + + + + + + +subroutine compute_zeta_C0_contra(zeta,elem,hybrid,nets,nete,nt) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute C0 vorticity. That is, solve: +! < PHI, zeta > = +! +! input: v (stored in elem()%, in contra-variant coordinates) +! output: zeta(:,:,:,:) +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nt,nets,nete +real (kind=r8), dimension(np,np,nlev,nets:nete) :: zeta +real (kind=r8), dimension(np,np,2) :: ulatlon +real (kind=r8), dimension(np,np) :: v1,v2 + +! local +integer :: k,ie +type (derivative_t) :: deriv + +call derivinit(deriv) + +do k=1,nlev +do ie=nets,nete + v1 = elem(ie)%state%v(:,:,1,k,nt) + v2 = elem(ie)%state%v(:,:,2,k,nt) + ulatlon(:,:,1) = elem(ie)%D(:,:,1,1)*v1 + elem(ie)%D(:,:,1,2)*v2 + ulatlon(:,:,2) = elem(ie)%D(:,:,2,1)*v1 + elem(ie)%D(:,:,2,2)*v2 + call vorticity_sphere(ulatlon,deriv,elem(ie),zeta(:,:,k,ie)) +enddo +enddo + +call make_C0(zeta,elem,hybrid,nets,nete) + +end subroutine + + + +subroutine compute_div_C0_contra(zeta,elem,hybrid,nets,nete,nt) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute C0 divergence. That is, solve: +! < PHI, zeta > = +! +! input: v (stored in elem()%, in contra-variant coordinates) +! output: zeta(:,:,:,:) +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nt,nets,nete +real (kind=r8), dimension(np,np,nlev,nets:nete) :: zeta +real (kind=r8), dimension(np,np,2) :: ulatlon +real (kind=r8), dimension(np,np) :: v1,v2 + +! local +integer :: k,ie +type (derivative_t) :: deriv + +call derivinit(deriv) + +do k=1,nlev +do ie=nets,nete + v1 = elem(ie)%state%v(:,:,1,k,nt) + v2 = elem(ie)%state%v(:,:,2,k,nt) + ulatlon(:,:,1) = elem(ie)%D(:,:,1,1)*v1 + elem(ie)%D(:,:,1,2)*v2 + ulatlon(:,:,2) = elem(ie)%D(:,:,2,1)*v1 + elem(ie)%D(:,:,2,2)*v2 + call divergence_sphere(ulatlon,deriv,elem(ie),zeta(:,:,k,ie)) +enddo +enddo + +call make_C0(zeta,elem,hybrid,nets,nete) + +end subroutine + +subroutine compute_zeta_C0_par(zeta,elem,par,nt) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute C0 vorticity. That is, solve: +! < PHI, zeta > = +! +! input: v (stored in elem()%, in lat-lon coordinates) +! output: zeta(:,:,:,:) +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +type (parallel_t) :: par +type (element_t) , intent(in), target :: elem(:) +real (kind=r8), dimension(np,np,nlev,nelemd) :: zeta +integer :: nt + +! local +type (hybrid_t) :: hybrid +integer :: k,i,j,ie,ic +type (derivative_t) :: deriv + +! single thread +hybrid = config_thread_region(par,'serial') + +call compute_zeta_C0_hybrid(zeta,elem,hybrid,1,nelemd,nt) + +end subroutine + + +subroutine compute_div_C0_par(zeta,elem,par,nt) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute C0 divergence. That is, solve: +! < PHI, zeta > = +! +! input: v (stored in elem()%, in lat-lon coordinates) +! output: zeta(:,:,:,:) +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +type (parallel_t) :: par +type (element_t) , intent(in), target :: elem(:) +real (kind=r8), dimension(np,np,nlev,nelemd) :: zeta +integer :: nt + +! local +type (hybrid_t) :: hybrid +integer :: k,i,j,ie,ic +type (derivative_t) :: deriv + +! single thread +hybrid = config_thread_region(par,'serial') + +call compute_div_C0_hybrid(zeta,elem,hybrid,1,nelemd,nt) + +end subroutine + + + +subroutine compute_zeta_C0_hybrid(zeta,elem,hybrid,nets,nete,nt) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute C0 vorticity. That is, solve: +! < PHI, zeta > = +! +! input: v (stored in elem()%, in lat-lon coordinates) +! output: zeta(:,:,:,:) +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nt,nets,nete +real (kind=r8), dimension(np,np,nlev,nets:nete) :: zeta + +! local +integer :: k,i,j,ie,ic +type (derivative_t) :: deriv + +call derivinit(deriv) + +do ie=nets,nete +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif +do k=1,nlev + call vorticity_sphere(elem(ie)%state%v(:,:,:,k,nt),deriv,elem(ie),zeta(:,:,k,ie)) +enddo +enddo + +call make_C0(zeta,elem,hybrid,nets,nete) + +end subroutine + + +subroutine compute_div_C0_hybrid(zeta,elem,hybrid,nets,nete,nt) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! compute C0 divergence. That is, solve: +! < PHI, zeta > = +! +! input: v (stored in elem()%, in lat-lon coordinates) +! output: zeta(:,:,:,:) +! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +type (hybrid_t) , intent(in) :: hybrid +type (element_t) , intent(in), target :: elem(:) +integer :: nt,nets,nete +real (kind=r8), dimension(np,np,nlev,nets:nete) :: zeta + +! local +integer :: k,i,j,ie,ic +type (derivative_t) :: deriv + +call derivinit(deriv) + +do ie=nets,nete +#if (defined COLUMN_OPENMP) +!$omp parallel do num_threads(vert_num_threads) private(k) +#endif +do k=1,nlev + call divergence_sphere(elem(ie)%state%v(:,:,:,k,nt),deriv,elem(ie),zeta(:,:,k,ie)) +enddo +enddo + +call make_C0(zeta,elem,hybrid,nets,nete) + +end subroutine + + + + + + + + +subroutine neighbor_minmax(hybrid,edgeMinMax,nets,nete,min_neigh,max_neigh) + + type (hybrid_t) , intent(in) :: hybrid + type (EdgeBuffer_t) , intent(inout) :: edgeMinMax + integer :: nets,nete + real (kind=r8) :: min_neigh(nlev,qsize,nets:nete) + real (kind=r8) :: max_neigh(nlev,qsize,nets:nete) + integer :: kblk, qblk + ! local + integer:: ie, q, k, kptr + integer:: kbeg, kend, qbeg, qend + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + kblk = kend - kbeg + 1 ! calculate size of the block of vertical levels + qblk = qend - qbeg + 1 ! calculate size of the block of tracers + + do ie=nets,nete + do q = qbeg, qend + kptr = nlev*(q - 1) + kbeg - 1 + call edgeSpack(edgeMinMax,min_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + kptr = qsize*nlev + nlev*(q - 1) + kbeg - 1 + call edgeSpack(edgeMinMax,max_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + enddo + enddo + + call bndry_exchange(hybrid,edgeMinMax,location='neighbor_minmax') + + do ie=nets,nete + do q=qbeg,qend + kptr = nlev*(q - 1) + kbeg - 1 + call edgeSunpackMIN(edgeMinMax,min_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + kptr = qsize*nlev + nlev*(q - 1) + kbeg - 1 + call edgeSunpackMAX(edgeMinMax,max_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + do k=kbeg,kend + min_neigh(k,q,ie) = max(min_neigh(k,q,ie),0.0_r8) + enddo + enddo + enddo + +end subroutine neighbor_minmax + + +subroutine neighbor_minmax_start(hybrid,edgeMinMax,nets,nete,min_neigh,max_neigh) + + type (hybrid_t) , intent(in) :: hybrid + type (EdgeBuffer_t) , intent(inout) :: edgeMinMax + integer :: nets,nete + real (kind=r8) :: min_neigh(nlev,qsize,nets:nete) + real (kind=r8) :: max_neigh(nlev,qsize,nets:nete) + integer :: kblk, qblk + integer :: kbeg, kend, qbeg, qend + + ! local + integer :: ie,q, k,kptr + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + kblk = kend - kbeg + 1 ! calculate size of the block of vertical levels + qblk = qend - qbeg + 1 ! calculate size of the block of tracers + + do ie=nets,nete + do q=qbeg, qend + kptr = nlev*(q - 1) + kbeg - 1 + call edgeSpack(edgeMinMax,min_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + kptr = qsize*nlev + nlev*(q - 1) + kbeg - 1 + call edgeSpack(edgeMinMax,max_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + enddo + enddo + + call bndry_exchange_start(hybrid,edgeMinMax,location='neighbor_minmax_start') + +end subroutine neighbor_minmax_start + +subroutine neighbor_minmax_finish(hybrid,edgeMinMax,nets,nete,min_neigh,max_neigh) + + type (hybrid_t) , intent(in) :: hybrid + type (EdgeBuffer_t) , intent(inout) :: edgeMinMax + integer :: nets,nete + real (kind=r8) :: min_neigh(nlev,qsize,nets:nete) + real (kind=r8) :: max_neigh(nlev,qsize,nets:nete) + integer :: kblk, qblk + integer :: ie,q, k,kptr + integer :: kbeg, kend, qbeg, qend + + call get_loop_ranges(hybrid,kbeg=kbeg,kend=kend,qbeg=qbeg,qend=qend) + + kblk = kend - kbeg + 1 ! calculate size of the block of vertical levels + qblk = qend - qbeg + 1 ! calculate size of the block of tracers + + call bndry_exchange_finish(hybrid,edgeMinMax,location='neighbor_minmax_finish') + + do ie=nets,nete + do q=qbeg, qend + kptr = nlev*(q - 1) + kbeg - 1 + call edgeSunpackMIN(edgeMinMax,min_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + kptr = qsize*nlev + nlev*(q - 1) + kbeg - 1 + call edgeSunpackMAX(edgeMinMax,max_neigh(kbeg:kend,q,ie),kblk,kptr,ie) + do k=kbeg,kend + min_neigh(k,q,ie) = max(min_neigh(k,q,ie),0.0_r8) + enddo + enddo + enddo + +end subroutine neighbor_minmax_finish + +end module viscosity_mod diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 new file mode 100644 index 00000000..b3e2fa06 --- /dev/null +++ b/src/dynamics/se/dyn_comp.F90 @@ -0,0 +1,2398 @@ +module dyn_comp + +! CAM interfaces to the SE Dynamical Core + +use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl +use physconst, only: pi +use spmd_utils, only: iam, masterproc +!use constituents, only: pcnst, cnst_get_ind, cnst_name, cnst_longname, & +! cnst_read_iv, qmin, cnst_type, tottnam, & +! cnst_is_a_water_species +use constituents, only: pcnst +use cam_control_mod, only: initial_run, simple_phys +use cam_initfiles, only: initial_file_get_id, topo_file_get_id, pertlim +!use phys_control, only: use_gw_front, use_gw_front_igw, waccmx_is +use dyn_grid, only: timelevel, hvcoord, edgebuf + +use cam_grid_support, only: cam_grid_id, cam_grid_get_gcid, & + cam_grid_dimensions, cam_grid_get_dim_names, & + cam_grid_get_latvals, cam_grid_get_lonvals, & + max_hcoordname_len + +use inic_analytic, only: analytic_ic_active, analytic_ic_set_ic +use dyn_tests_utils, only: vcoord=>vc_dry_pressure + +!use cam_history, only: outfld, hist_fld_active, fieldname_len +!use cam_history_support, only: max_fieldname_len +use time_manager, only: get_step_size + +!use ncdio_atm, only: infld +use cam_field_read, only: cam_read_field + +use pio, only: file_desc_t, pio_seterrorhandling, PIO_BCAST_ERROR, & + pio_inq_dimid, pio_inq_dimlen, PIO_NOERR + +use shr_infnan_mod, only: shr_infnan_isnan +use cam_logfile, only: iulog +use cam_abortutils, only: endrun +use cam_map_utils, only: iMap +use shr_sys_mod, only: shr_sys_flush + +use parallel_mod, only: par +use hybrid_mod, only: hybrid_t +use dimensions_mod, only: nelemd, nlev, np, npsq, ntrac, nc, fv_nphys, & + qsize +use element_mod, only: element_t, elem_state_t +use fvm_control_volume_mod, only: fvm_struct +use time_mod, only: nsplit +use edge_mod, only: initEdgeBuffer, edgeVpack, edgeVunpack, FreeEdgeBuffer +use edgetype_mod, only: EdgeBuffer_t +use bndry_mod, only: bndry_exchange + +implicit none +private +save + +public :: & + dyn_import_t, & + dyn_export_t, & + dyn_readnl, & + dyn_register, & + dyn_init, & + dyn_run, & + dyn_final + +type dyn_import_t + type (element_t), pointer :: elem(:) => null() + type (fvm_struct), pointer :: fvm(:) => null() +end type dyn_import_t + +type dyn_export_t + type (element_t), pointer :: elem(:) => null() + type (fvm_struct), pointer :: fvm(:) => null() +end type dyn_export_t + +! Namelist +logical, public, protected :: write_restart_unstruct + +! Frontogenesis indices +integer, public :: frontgf_idx = -1 +integer, public :: frontga_idx = -1 + +! constituent indices for waccm-x dry air properties +integer, public, protected :: & + ixo = -1, & + ixo2 = -1, & + ixh = -1, & + ixh2 = -1 + +interface read_dyn_var + module procedure read_dyn_field_2d + module procedure read_dyn_field_3d +end interface read_dyn_var + +real(r8), parameter :: rad2deg = 180.0_r8 / pi +real(r8), parameter :: deg2rad = pi / 180.0_r8 + +!=============================================================================== +contains +!=============================================================================== + +subroutine dyn_readnl(NLFileName) + use physconst, only: thermodynamic_active_species_num + use shr_nl_mod, only: find_group_name => shr_nl_find_group_name + use shr_file_mod, only: shr_file_getunit, shr_file_freeunit + use spmd_utils, only: masterproc, masterprocid, mpicom, npes + use spmd_utils, only: mpi_real8, mpi_integer, mpi_character, mpi_logical + use dyn_grid, only: se_write_grid_file, se_grid_filename, se_write_gll_corners + use dp_mapping, only: nphys_pts + use native_mapping, only: native_mapping_readnl + use physconst, only: rearth + + !SE dycore: + use namelist_mod, only: homme_set_defaults, homme_postprocess_namelist + use control_mod, only: hypervis_subcycle, hypervis_subcycle_sponge + use control_mod, only: hypervis_subcycle_q, statefreq, runtype + use control_mod, only: nu, nu_div, nu_p, nu_q, nu_top, qsplit, rsplit + use control_mod, only: vert_remap_uvTq_alg, vert_remap_tracer_alg + use control_mod, only: tstep_type, rk_stage_user + use control_mod, only: ftype, limiter_option, partmethod + use control_mod, only: topology, phys_dyn_cp, variable_nsplit + use control_mod, only: fine_ne, hypervis_power, hypervis_scaling + use control_mod, only: max_hypervis_courant, statediag_numtrac,refined_mesh + use control_mod, only: raytau0, raykrange, rayk0, molecular_diff + use dimensions_mod, only: ne, npart + use dimensions_mod, only: lcp_moist + use dimensions_mod, only: hypervis_dynamic_ref_state,large_Courant_incr + use dimensions_mod, only: fvm_supercycling, fvm_supercycling_jet + use dimensions_mod, only: kmin_jet, kmax_jet + use params_mod, only: SFCURVE + use parallel_mod, only: initmpi + use thread_mod, only: initomp, max_num_threads + use thread_mod, only: horz_num_threads, vert_num_threads, tracer_num_threads + + ! Dummy argument + character(len=*), intent(in) :: NLFileName + + ! Local variables + integer :: unitn, ierr,k + real(r8) :: uniform_res_hypervis_scaling,nu_fac + + ! SE Namelist variables + integer :: se_fine_ne + integer :: se_ftype + integer :: se_statediag_numtrac + integer :: se_fv_nphys + real(r8) :: se_hypervis_power + real(r8) :: se_hypervis_scaling + integer :: se_hypervis_subcycle + integer :: se_hypervis_subcycle_sponge + integer :: se_hypervis_subcycle_q + integer :: se_limiter_option + real(r8) :: se_max_hypervis_courant + character(len=SHR_KIND_CL) :: se_mesh_file + integer :: se_ne + integer :: se_npes + integer :: se_nsplit + real(r8) :: se_nu + real(r8) :: se_nu_div + real(r8) :: se_nu_p + real(r8) :: se_nu_top + integer :: se_qsplit + logical :: se_refined_mesh + integer :: se_rsplit + integer :: se_statefreq + integer :: se_tstep_type + character(len=32) :: se_vert_remap_T + character(len=32) :: se_vert_remap_uvTq_alg + character(len=32) :: se_vert_remap_tracer_alg + integer :: se_horz_num_threads + integer :: se_vert_num_threads + integer :: se_tracer_num_threads + logical :: se_hypervis_dynamic_ref_state + logical :: se_lcp_moist + logical :: se_write_restart_unstruct + logical :: se_large_Courant_incr + integer :: se_fvm_supercycling + integer :: se_fvm_supercycling_jet + integer :: se_kmin_jet + integer :: se_kmax_jet + integer :: se_phys_dyn_cp + real(r8) :: se_raytau0 + real(r8) :: se_raykrange + integer :: se_rayk0 + real(r8) :: se_molecular_diff + + namelist /dyn_se_inparm/ & + se_fine_ne, & ! For refined meshes + se_ftype, & ! forcing type + se_statediag_numtrac, & + se_fv_nphys, & + se_hypervis_power, & + se_hypervis_scaling, & + se_hypervis_subcycle, & + se_hypervis_subcycle_sponge, & + se_hypervis_subcycle_q, & + se_limiter_option, & + se_max_hypervis_courant, & + se_mesh_file, & ! Refined mesh definition file + se_ne, & + se_npes, & + se_nsplit, & ! # of dyn steps per physics timestep + se_nu, & + se_nu_div, & + se_nu_p, & + se_nu_top, & + se_qsplit, & + se_refined_mesh, & + se_rsplit, & + se_statefreq, & ! number of steps per printstate call + se_tstep_type, & + se_vert_remap_T, & + se_vert_remap_uvTq_alg, & + se_vert_remap_tracer_alg, & + se_write_grid_file, & + se_grid_filename, & + se_write_gll_corners, & + se_horz_num_threads, & + se_vert_num_threads, & + se_tracer_num_threads, & + se_hypervis_dynamic_ref_state,& + se_lcp_moist, & + se_write_restart_unstruct, & + se_large_Courant_incr, & + se_fvm_supercycling, & + se_fvm_supercycling_jet, & + se_kmin_jet, & + se_kmax_jet, & + se_phys_dyn_cp, & + se_raytau0, & + se_raykrange, & + se_rayk0, & + se_molecular_diff + + !-------------------------------------------------------------------------- + + ! Read the namelist (dyn_se_inparm) + call MPI_barrier(mpicom, ierr) + if (masterproc) then + write(iulog, *) "dyn_readnl: reading dyn_se_inparm namelist..." + unitn = shr_file_getunit() + open( unitn, file=trim(NLFileName), status='old' ) + call find_group_name(unitn, 'dyn_se_inparm', status=ierr) + if (ierr == 0) then + read(unitn, dyn_se_inparm, iostat=ierr) + if (ierr /= 0) then + call endrun('dyn_readnl: ERROR reading dyn_se_inparm namelist') + end if + end if + close(unitn) + call shr_file_freeunit(unitn) + end if + + ! Broadcast namelist values to all PEs + call MPI_bcast(se_fine_ne, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_ftype, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_statediag_numtrac, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_hypervis_power, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_hypervis_scaling, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_hypervis_subcycle, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_hypervis_subcycle_sponge, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_hypervis_subcycle_q, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_limiter_option, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_max_hypervis_courant, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_mesh_file, SHR_KIND_CL, mpi_character, masterprocid, mpicom, ierr) + call MPI_bcast(se_ne, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_npes, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_nsplit, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_nu, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_nu_div, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_nu_p, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_nu_top, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_qsplit, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_refined_mesh, 1, mpi_logical, masterprocid, mpicom, ierr) + call MPI_bcast(se_rsplit, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_statefreq, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_tstep_type, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_vert_remap_T, 32, mpi_character, masterprocid, mpicom, ierr) + call MPI_bcast(se_vert_remap_uvTq_alg, 32, mpi_character, masterprocid, mpicom, ierr) + call MPI_bcast(se_vert_remap_tracer_alg, 32, mpi_character, masterprocid, mpicom, ierr) + call MPI_bcast(se_fv_nphys, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_write_grid_file, 16, mpi_character, masterprocid, mpicom, ierr) + call MPI_bcast(se_grid_filename, shr_kind_cl, mpi_character, masterprocid, mpicom, ierr) + call MPI_bcast(se_write_gll_corners, 1, mpi_logical, masterprocid, mpicom, ierr) + call MPI_bcast(se_horz_num_threads, 1, MPI_integer, masterprocid, mpicom,ierr) + call MPI_bcast(se_vert_num_threads, 1, MPI_integer, masterprocid, mpicom,ierr) + call MPI_bcast(se_tracer_num_threads, 1, MPI_integer, masterprocid, mpicom,ierr) + call MPI_bcast(se_hypervis_dynamic_ref_state, 1, mpi_logical, masterprocid, mpicom, ierr) + call MPI_bcast(se_lcp_moist, 1, mpi_logical, masterprocid, mpicom, ierr) + call MPI_bcast(se_write_restart_unstruct, 1, mpi_logical, masterprocid, mpicom, ierr) + call MPI_bcast(se_large_Courant_incr, 1, mpi_logical, masterprocid, mpicom, ierr) + call MPI_bcast(se_fvm_supercycling, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_fvm_supercycling_jet, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_kmin_jet, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_kmax_jet, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_phys_dyn_cp, 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_rayk0 , 1, mpi_integer, masterprocid, mpicom, ierr) + call MPI_bcast(se_raykrange, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_raytau0, 1, mpi_real8, masterprocid, mpicom, ierr) + call MPI_bcast(se_molecular_diff, 1, mpi_real8, masterprocid, mpicom, ierr) + + ! Set se_npes to the model npes value if a namelist value of zero is given: + if (se_npes == 0) then + se_npes = npes + end if + + ! Check that se_npes is a positive integer: + if (se_npes < 0) then + call endrun('dyn_readnl: ERROR: se_npes must be > 0') + end if + + ! Initialize the SE structure that holds the MPI decomposition information + par = initmpi(se_npes) + call initomp() + + if (se_fvm_supercycling < 0) se_fvm_supercycling = se_rsplit + if (se_fvm_supercycling_jet < 0) se_fvm_supercycling_jet = se_rsplit + + ! Go ahead and enforce ne = 0 for refined mesh runs + if (se_refined_mesh) then + se_ne = 0 + end if + + ! Set HOMME defaults + call homme_set_defaults() + ! Set HOMME variables not in CAM's namelist but with different CAM defaults + partmethod = SFCURVE + npart = se_npes + ! CAM requires forward-in-time, subcycled dynamics + ! RK2 3 stage tracers, sign-preserving conservative + rk_stage_user = 3 + topology = "cube" + ! Finally, set the HOMME variables which have different names + fine_ne = se_fine_ne + ftype = se_ftype + statediag_numtrac = MIN(se_statediag_numtrac,pcnst) + hypervis_power = se_hypervis_power + hypervis_scaling = se_hypervis_scaling + hypervis_subcycle = se_hypervis_subcycle + if (hypervis_subcycle_sponge<0) then + hypervis_subcycle_sponge = hypervis_subcycle + else + hypervis_subcycle_sponge = se_hypervis_subcycle_sponge + end if + hypervis_subcycle_q = se_hypervis_subcycle_q + limiter_option = se_limiter_option + max_hypervis_courant = se_max_hypervis_courant + refined_mesh = se_refined_mesh + ne = se_ne + nsplit = se_nsplit + nu = se_nu + nu_div = se_nu_div + nu_p = se_nu_p + nu_q = se_nu_p !for tracer-wind consistency nu_q must me equal to nu_p + nu_top = se_nu_top + qsplit = se_qsplit + rsplit = se_rsplit + statefreq = se_statefreq + tstep_type = se_tstep_type + vert_remap_uvTq_alg = set_vert_remap(se_vert_remap_T, se_vert_remap_uvTq_alg) + vert_remap_tracer_alg = set_vert_remap(se_vert_remap_T, se_vert_remap_tracer_alg) + fv_nphys = se_fv_nphys + hypervis_dynamic_ref_state = se_hypervis_dynamic_ref_state + lcp_moist = se_lcp_moist + large_Courant_incr = se_large_Courant_incr + fvm_supercycling = se_fvm_supercycling + fvm_supercycling_jet = se_fvm_supercycling_jet + kmin_jet = se_kmin_jet + kmax_jet = se_kmax_jet + variable_nsplit = .false. + phys_dyn_cp = se_phys_dyn_cp + raytau0 = se_raytau0 + raykrange = se_raykrange + rayk0 = se_rayk0 + molecular_diff = se_molecular_diff + + if (fv_nphys > 0) then + ! Use finite volume physics grid and CSLAM for tracer advection + nphys_pts = fv_nphys*fv_nphys + qsize = thermodynamic_active_species_num ! number tracers advected by GLL + ntrac = pcnst ! number tracers advected by CSLAM + else + ! Use GLL grid for physics and tracer advection + nphys_pts = npsq + qsize = pcnst + ntrac = 0 + end if + + if (rsplit < 1) then + call endrun('dyn_readnl: rsplit must be > 0') + end if + + ! if restart or branch run + if (.not. initial_run) then + runtype = 1 + end if + + ! HOMME wants 'none' to indicate no mesh file + if (len_trim(se_mesh_file) == 0) then + se_mesh_file = 'none' + if (se_refined_mesh) then + call endrun('dyn_readnl ERROR: se_refined_mesh=.true. but no se_mesh_file') + end if + end if + call homme_postprocess_namelist(se_mesh_file, par) + + ! Set threading numbers to reasonable values + if ((se_horz_num_threads == 0) .and. (se_vert_num_threads == 0) .and. (se_tracer_num_threads == 0)) then + ! The user has not set any threading values, choose defaults + se_horz_num_threads = 1 + se_vert_num_threads = max_num_threads + se_tracer_num_threads = se_vert_num_threads + end if + if (se_horz_num_threads < 1) then + se_horz_num_threads = 1 + end if + if (se_vert_num_threads < 1) then + se_vert_num_threads = 1 + end if + if (se_tracer_num_threads < 1) then + se_tracer_num_threads = 1 + end if + horz_num_threads = se_horz_num_threads + vert_num_threads = se_vert_num_threads + tracer_num_threads = se_tracer_num_threads + + write_restart_unstruct = se_write_restart_unstruct + + if (se_kmin_jet<0 ) kmin_jet = 1 + if (se_kmax_jet<0 ) kmax_jet = nlev + + if (masterproc) then + write(iulog, '(a,i0)') 'dyn_readnl: se_ftype = ',ftype + write(iulog, '(a,i0)') 'dyn_readnl: se_statediag_numtrac = ',statediag_numtrac + write(iulog, '(a,i0)') 'dyn_readnl: se_hypervis_subcycle = ',se_hypervis_subcycle + write(iulog, '(a,i0)') 'dyn_readnl: se_hypervis_subcycle_sponge = ',se_hypervis_subcycle_sponge + write(iulog, '(a,i0)') 'dyn_readnl: se_hypervis_subcycle_q = ',se_hypervis_subcycle_q + write(iulog, '(a,l4)') 'dyn_readnl: se_large_Courant_incr = ',se_large_Courant_incr + write(iulog, '(a,i0)') 'dyn_readnl: se_limiter_option = ',se_limiter_option + if (.not. se_refined_mesh) then + write(iulog, '(a,i0)')'dyn_readnl: se_ne = ',se_ne + end if + write(iulog, '(a,i0)') 'dyn_readnl: se_npes = ',se_npes + write(iulog, '(a,i0)') 'dyn_readnl: se_nsplit = ',se_nsplit + write(iulog, '(a,i0)') 'dyn_readnl: se_phys_dyn_cp = ',se_phys_dyn_cp + ! + ! se_nu<0 then coefficients are set automatically in module global_norms_mod + ! + if (se_nu_div>0) & + write(iulog, '(a,e9.2)') 'dyn_readnl: se_nu = ',se_nu + if (se_nu_div>0) & + write(iulog, '(a,e9.2)') 'dyn_readnl: se_nu_div = ',se_nu_div + if (se_nu_p>0) then + write(iulog, '(a,e9.2)') 'dyn_readnl: se_nu_p = ',se_nu_p + write(iulog, '(a)') 'Note that nu_q must be the same as nu_p for mass / tracer inconsistency' + end if + write(iulog, '(a,e9.2)') 'dyn_readnl: se_nu_top = ',se_nu_top + write(iulog, '(a,i0)') 'dyn_readnl: se_qsplit = ',se_qsplit + write(iulog, '(a,i0)') 'dyn_readnl: se_rsplit = ',se_rsplit + write(iulog, '(a,i0)') 'dyn_readnl: se_statefreq = ',se_statefreq + write(iulog, '(a,i0)') 'dyn_readnl: se_tstep_type = ',se_tstep_type + write(iulog, '(a,a)') 'dyn_readnl: se_vert_remap_T = ',trim(se_vert_remap_T) + write(iulog, '(a,a)') 'dyn_readnl: se_vert_remap_uvTq_alg = ',trim(se_vert_remap_uvTq_alg) + write(iulog, '(a,a)') 'dyn_readnl: se_vert_remap_tracer_alg = ',trim(se_vert_remap_tracer_alg) + write(iulog, '(a,l4)') 'dyn_readnl: se_hypervis_dynamic_ref_state = ',hypervis_dynamic_ref_state + write(iulog, '(a,l4)') 'dyn_readnl: lcp_moist = ',lcp_moist + write(iulog, '(a,i0)') 'dyn_readnl: se_fvm_supercycling = ',fvm_supercycling + write(iulog, '(a,i0)') 'dyn_readnl: se_fvm_supercycling_jet = ',fvm_supercycling_jet + write(iulog, '(a,i0)') 'dyn_readnl: se_kmin_jet = ',kmin_jet + write(iulog, '(a,i0)') 'dyn_readnl: se_kmax_jet = ',kmax_jet + if (se_refined_mesh) then + write(iulog, '(a)') 'dyn_readnl: Refined mesh simulation' + write(iulog, '(a)') 'dyn_readnl: se_mesh_file = ',trim(se_mesh_file) + if (hypervis_power /= 0) then + write(iulog, '(a)') 'Using scalar viscosity (Zarzycki et al 2014 JClim)' + write(iulog, '(a,e11.4)') 'dyn_readnl: se_hypervis_power = ',se_hypervis_power, ', (tensor hyperviscosity)' + write(iulog, '(a,e11.4)') 'dyn_readnl: se_max_hypervis_courant = ',se_max_hypervis_courant + end if + if (hypervis_scaling /= 0) then + write(iulog, '(a)') 'Using tensor viscosity (Guba et al., 2014)' + write(iulog, '(a,e11.4)') 'dyn_readnl: se_hypervis_scaling = ',se_hypervis_scaling + end if + end if + + if (fv_nphys > 0) then + write(iulog, '(a)') 'dyn_readnl: physics will run on FVM points; advection by CSLAM' + write(iulog,'(a,i0)') 'dyn_readnl: se_fv_nphys = ', fv_nphys + else + write(iulog, '(a)') 'dyn_readnl: physics will run on SE GLL points' + end if + write(iulog, '(a,i0)') 'dyn_readnl: se_horz_num_threads = ',horz_num_threads + write(iulog, '(a,i0)') 'dyn_readnl: se_vert_num_threads = ',vert_num_threads + write(iulog, '(a,i0)') 'dyn_readnl: se_tracer_num_threads = ',tracer_num_threads + if (trim(se_write_grid_file) == 'SCRIP') then + write(iulog,'(2a)') "dyn_readnl: write SCRIP grid file = ", trim(se_grid_filename) + else + write(iulog,'(a)') "dyn_readnl: do not write grid file" + end if + write(iulog,'(a,l1)') 'dyn_readnl: write gll corners to SEMapping.nc = ', & + se_write_gll_corners + write(iulog,'(a,l1)') 'dyn_readnl: write restart data on unstructured grid = ', & + se_write_restart_unstruct + + write(iulog, '(a,e9.2)') 'dyn_readnl: se_raytau0 = ', raytau0 + write(iulog, '(a,e9.2)') 'dyn_readnl: se_raykrange = ', raykrange + write(iulog, '(a,i0)' ) 'dyn_readnl: se_rayk0 = ', rayk0 + write(iulog, '(a,e9.2)') 'dyn_readnl: se_molecular_diff = ', molecular_diff + end if + + call native_mapping_readnl(NLFileName) + + !--------------------------------------------------------------------------- + contains + !--------------------------------------------------------------------------- + + integer function set_vert_remap( remap_T, remap_alg ) + + ! Convert namelist input strings to the internally used integers. + + character(len=*), intent(in) :: remap_T ! scheme for remapping temperature + character(len=*), intent(in) :: remap_alg ! remapping algorithm + + ! check valid remap_T values: + if (remap_T /= 'thermal_energy_over_P' .and. remap_T /= 'Tv_over_logP') then + write(iulog,*)'set_vert_remap: invalid remap_T= ',trim(remap_T) + call endrun('set_vert_remap: invalid remap_T') + end if + + select case (remap_alg) + case ('PPM_bc_mirror') + set_vert_remap = 1 + case ('PPM_bc_PCoM') + set_vert_remap = 2 + case ('PPM_bc_linear_extrapolation') + set_vert_remap = 10 + case ('FV3_PPM') + if (remap_T == 'thermal_energy_over_P') then + set_vert_remap = -4 + else + set_vert_remap = -40 + end if + case ('FV3_CS') + if (remap_T == 'thermal_energy_over_P') then + set_vert_remap = -9 + else + set_vert_remap = -90 + end if + case ('FV3_CS_2dz_filter') + if (remap_T == 'thermal_energy_over_P') then + set_vert_remap = -10 + else + set_vert_remap = -100 + end if + case ('FV3_non_monotone_CS_2dz_filter') + if (remap_T == 'thermal_energy_over_P') then + set_vert_remap = -11 + else + set_vert_remap = -110 + end if + case default + write(iulog,*)'set_vert_remap: invalid remap_alg= ',trim(remap_alg) + call endrun('set_vert_remap: invalid remap_alg') + end select + + end function set_vert_remap + +end subroutine dyn_readnl + +!========================================================================================= + +subroutine dyn_init(dyn_in, dyn_out) + use dyn_grid, only: elem, fvm + use cam_pio_utils, only: clean_iodesc_list + use physconst, only: thermodynamic_active_species_num, thermodynamic_active_species_idx + use physconst, only: thermodynamic_active_species_idx_dycore, rair, cpair + use physconst, only: get_molecular_diff_coef_reference + !use cam_history, only: addfld, add_default, horiz_only, register_vector_field + !use gravity_waves_sources, only: gws_init + + !SE dycore: + use prim_advance_mod, only: prim_advance_init + use thread_mod, only: horz_num_threads + use hybrid_mod, only: get_loop_ranges, config_thread_region + use dimensions_mod, only: nu_scale_top, nu_lev, nu_div_lev + use dimensions_mod, only: ksponge_end, kmvis_ref, kmcnd_ref,rho_ref,km_sponge_factor + use dimensions_mod, only: kmvisi_ref, kmcndi_ref,rhoi_ref + use dimensions_mod, only: cnst_name_gll, cnst_longname_gll + use dimensions_mod, only: irecons_tracer_lev, irecons_tracer, otau, kord_tr, kord_tr_cslam + use prim_driver_mod, only: prim_init2 + use time_mod, only: time_at + use control_mod, only: runtype, raytau0, raykrange, rayk0, molecular_diff, nu_top + use test_fvm_mapping, only: test_mapping_addfld + !use phys_control, only: phys_getopts + use control_mod, only: vert_remap_uvTq_alg, vert_remap_tracer_alg + + ! Dummy arguments: + type(dyn_import_t), intent(out) :: dyn_in + type(dyn_export_t), intent(out) :: dyn_out + + ! Local variables + integer :: ithr, nets, nete, ie, k, kmol_end + real(r8), parameter :: Tinit = 300.0_r8 + real(r8) :: press, ptop, tref + + type(hybrid_t) :: hybrid + + integer :: ixcldice, ixcldliq, ixrain, ixsnow, ixgraupel + integer :: m_cnst, m + + ! variables for initializing energy and axial angular momentum diagnostics + character (len = 3), dimension(12) :: stage = (/"dED","dAF","dBD","dAD","dAR","dBF","dBH","dCH","dAH",'dBS','dAS','p2d'/) + character (len = 70),dimension(12) :: stage_txt = (/& + " end of previous dynamics ",& !dED + " from previous remapping or state passed to dynamics",& !dAF - state in beginning of nsplit loop + " state after applying CAM forcing ",& !dBD - state after applyCAMforcing + " before vertical remapping ",& !dAD - state before vertical remapping + " after vertical remapping ",& !dAR - state at end of nsplit loop + " state passed to parameterizations ",& !dBF + " state before hypervis ",& !dBH + " state after hypervis but before adding heating term",& !dCH + " state after hypervis ",& !dAH + " state before sponge layer diffusion ",& !dBS - state before sponge del2 + " state after sponge layer diffusion ",& !dAS - state after sponge del2 + " phys2dyn mapping errors (requires ftype-1) " & !p2d - for assessing phys2dyn mapping errors + /) + character (len = 2) , dimension(8) :: vars = (/"WV" ,"WL" ,"WI" ,"SE" ,"KE" ,"MR" ,"MO" ,"TT" /) + !if ntrac>0 then tracers should be output on fvm grid but not energy (SE+KE) and AAM diags + logical , dimension(8) :: massv = (/.true.,.true.,.true.,.false.,.false.,.false.,.false.,.false./) + character (len = 70) , dimension(8) :: vars_descriptor = (/& + "Total column water vapor ",& + "Total column cloud water ",& + "Total column cloud ice ",& + "Total column dry static energy ",& + "Total column kinetic energy ",& + "Total column wind axial angular momentum",& + "Total column mass axial angular momentum",& + "Total column test tracer "/) + character (len = 14), dimension(8) :: & + vars_unit = (/& + "kg/m2 ","kg/m2 ","kg/m2 ","J/m2 ",& + "J/m2 ","kg*m2/s*rad2 ","kg*m2/s*rad2 ","kg/m2 "/) + + integer :: istage, ivars + character (len=108) :: str1, str2, str3 + + logical :: history_budget ! output tendencies and state variables for budgets + integer :: budget_hfile_num + + character(len=*), parameter :: subname = 'dyn_init' + + real(r8) :: tau0, krange, otau0, scale + real(r8) :: km_sponge_factor_local(nlev+1) + !---------------------------------------------------------------------------- + + ! Now allocate and set condenstate vars + allocate(cnst_name_gll(qsize)) ! constituent names for gll tracers + allocate(cnst_longname_gll(qsize)) ! long name of constituents for gll tracers + + allocate(kord_tr(qsize)) + kord_tr(:) = vert_remap_tracer_alg + if (ntrac>0) then + allocate(kord_tr_cslam(ntrac)) + kord_tr_cslam(:) = vert_remap_tracer_alg + end if + +!Remove/replace after constituents are enabled in CCPP -JN: +#if 0 + + do m=1,qsize + ! + ! The "_gll" index variables below are used to keep track of condensate-loading tracers + ! since they are not necessarily indexed contiguously and not necessarily in the same + ! order (physics is in charge of the order) + ! + ! if running with CSLAM then the SE (gll) condensate-loading water tracers are always + ! indexed contiguously (q,cldliq,cldice,rain,snow,graupel) - see above + ! + ! CSLAM tracers are always indexed as in physics + ! of no CSLAM then SE tracers are always indexed as in physics + ! + if (ntrac>0) then + ! + ! note that in this case qsize = thermodynamic_active_species_num + ! + thermodynamic_active_species_idx_dycore(m) = m + kord_tr_cslam(thermodynamic_active_species_idx(m)) = vert_remap_uvTq_alg + kord_tr(m) = vert_remap_uvTq_alg + cnst_name_gll (m) = cnst_name (thermodynamic_active_species_idx(m)) + cnst_longname_gll(m) = cnst_longname(thermodynamic_active_species_idx(m)) + else + ! + ! if not running with CSLAM then the condensate-loading water tracers are not necessarily + ! indexed contiguously (are indexed as in physics) + ! + if (m.le.thermodynamic_active_species_num) then + thermodynamic_active_species_idx_dycore(m) = thermodynamic_active_species_idx(m) + kord_tr(thermodynamic_active_species_idx_dycore(m)) = vert_remap_uvTq_alg + end if + cnst_name_gll (m) = cnst_name (m) + cnst_longname_gll(m) = cnst_longname(m) + + end if + end do +#else + !Remove/replace after constituents are enabled in CCPP -JN: + do m=1, qsize + if (ntrac>0) then + thermodynamic_active_species_idx_dycore(m) = m + kord_tr_cslam(thermodynamic_active_species_idx(m)) = vert_remap_uvTq_alg + kord_tr(m) = vert_remap_uvTq_alg + else + if (m.le.thermodynamic_active_species_num) then + thermodynamic_active_species_idx_dycore(m) = thermodynamic_active_species_idx(m) + kord_tr(thermodynamic_active_species_idx_dycore(m)) = vert_remap_uvTq_alg + end if + endif + + if (m == ix_qv) then + cnst_name_gll(m) = 'Q' + cnst_longname_gll(m) = 'water_vapor_specific_humidity' + else if (m == ix_cld_liq) then + cnst_name_gll(m) = 'CLDLIQ' + cnst_longname_gll(m) = 'cloud_liquid_water_mixing_ratio' + else + cnst_name_gll(m) = 'RAINQM' + cnst_longname_gll(m) = 'rain_water_mixing_ratio' + end if + end do +#endif + + ! + ! Initialize the import/export objects + ! + if(iam < par%nprocs) then + dyn_in%elem => elem + dyn_in%fvm => fvm + + dyn_out%elem => elem + dyn_out%fvm => fvm + else + nullify(dyn_in%elem) + nullify(dyn_in%fvm) + nullify(dyn_out%elem) + nullify(dyn_out%fvm) + end if + + call set_phis(dyn_in) + + if (initial_run) then + call read_inidat(dyn_in) + call clean_iodesc_list() + end if + ! + ! initialize Rayleigh friction + ! + krange = raykrange + if (raykrange .eq. 0._r8) krange = (rayk0 - 1) / 2._r8 + tau0 = (86400._r8) * raytau0 ! convert to seconds + otau0 = 0._r8 + if (tau0 .ne. 0._r8) otau0 = 1._r8/tau0 + do k = 1, nlev + otau(k) = otau0 * (1.0_r8 + tanh((rayk0 - k) / krange)) / (2._r8) + enddo + if (masterproc) then + if (tau0 > 0._r8) then + write (iulog,*) 'SE dycore Rayleigh friction - krange = ', krange + write (iulog,*) 'SE dycore Rayleigh friction - otau0 = ', 1.0_r8/tau0 + write (iulog,*) 'SE dycore Rayleigh friction decay rate profile (only applied to (u,v))' + do k = 1, nlev + write (iulog,*) ' k = ', k, ' otau = ', otau(k) + enddo + end if + end if + ! + ! initialize diffusion in dycore + ! + kmol_end = 0 + if (molecular_diff>0) then + ! + ! molecular diffusion and thermal conductivity reference values + ! + if (masterproc) write(iulog,*) subname//": initialize molecular diffusion reference profiles" + tref = 1000._r8 !mean value at model top for solar max + km_sponge_factor = molecular_diff + km_sponge_factor_local = molecular_diff + call get_molecular_diff_coef_reference(1,nlev+1,tref,& + (hvcoord%hyai(:)+hvcoord%hybi(:))*hvcoord%ps0, km_sponge_factor_local,& + kmvisi_ref,kmcndi_ref,rhoi_ref) + ! + ! get rho, kmvis and kmcnd at mid-levels + ! + call get_molecular_diff_coef_reference(1,nlev,tref,& + (hvcoord%hyam(:)+hvcoord%hybm(:))*hvcoord%ps0,km_sponge_factor,& + kmvis_ref,kmcnd_ref,rho_ref) + + do k=1,nlev + ! only apply molecular viscosity where viscosity is > 1000 m/s^2 + if (MIN(kmvis_ref(k)/rho_ref(k),kmcnd_ref(k)/(cpair*rho_ref(k)))>1000.0_r8) then + if (masterproc) then + write(iulog,'(a,i3,2e11.4)') "k, p, km_sponge_factor :",k, & + (hvcoord%hyam(k)+hvcoord%hybm(k))*hvcoord%ps0,km_sponge_factor(k) + write(iulog,'(a,2e11.4)') "kmvis_ref/rho_ref, kmcnd_ref/(cp*rho_ref): ", & + kmvis_ref(k)/rho_ref(k),kmcnd_ref(k)/(cpair*rho_ref(k)) + end if + kmol_end = k + else + kmvis_ref(k) = 1.0_r8 + kmcnd_ref(k) = 1.0_r8 + end if + end do + else + ! -1.0E6 is an arbitrary unrealistic value. But it is used in the calculation + ! of a diagnostic quantity in global_norms_mod so can't be set to huge or nan. + kmvis_ref(:) = -1.0E6_r8 + kmcnd_ref(:) = -1.0E6_r8 + rho_ref(:) = -1.0E6_r8 + end if + ! + irecons_tracer_lev(:) = irecons_tracer !use high-order CSLAM in all layers + ! + ! compute scaling of traditional sponge layer damping (following cd_core.F90 in CAM-FV) + ! + nu_scale_top(:) = 0.0_r8 + if (nu_top>0) then + if (masterproc) write(iulog,*) subname//": sponge layer viscosity scaling factor" + do k=1,nlev + press = (hvcoord%hyam(k)+hvcoord%hybm(k))*hvcoord%ps0 + ptop = hvcoord%hyai(1)*hvcoord%ps0 + nu_scale_top(k) = 8.0_r8*(1.0_r8+tanh(1.0_r8*log(ptop/press))) ! tau will be maximum 8 at model top + if (nu_scale_top(k).ge.0.15_r8) then + ksponge_end = k + else + nu_scale_top(k) = 0.0_r8 + end if + end do + else + ksponge_end = 0 + end if + ksponge_end = MAX(MAX(ksponge_end,1),kmol_end) + if (masterproc) then + write(iulog,*) subname//": ksponge_end = ",ksponge_end + if (nu_top>0) then + do k=1,ksponge_end + write(iulog,'(a,i3,1e11.4)') subname//": nu_scale_top ",k,nu_scale_top(k) + end do + end if + end if + + if (iam < par%nprocs) then + call prim_advance_init(par,elem) + !$OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete) + hybrid = config_thread_region(par,'horizontal') + call get_loop_ranges(hybrid, ibeg=nets, iend=nete) + call prim_init2(elem, fvm, hybrid, nets, nete, TimeLevel, hvcoord) + !$OMP END PARALLEL + +! if (use_gw_front .or. use_gw_front_igw) call gws_init(elem) + end if ! iam < par%nprocs + +!Remove/replace after CAMDEN history output is enabled -JN: +#if 0 + + call addfld ('nu_kmvis', (/ 'lev' /), 'A', '', 'Molecular viscosity Laplacian coefficient' , gridname='GLL') + call addfld ('nu_kmcnd', (/ 'lev' /), 'A', '', 'Thermal conductivity Laplacian coefficient' , gridname='GLL') + call addfld ('nu_kmcnd_dp',(/ 'lev' /), 'A', '', 'Thermal conductivity like Laplacian coefficient on dp', gridname='GLL') + + + ! Forcing from physics on the GLL grid + call addfld ('FU', (/ 'lev' /), 'A', 'm/s2', 'Zonal wind forcing term on GLL grid', gridname='GLL') + call addfld ('FV', (/ 'lev' /), 'A', 'm/s2', 'Meridional wind forcing term on GLL grid',gridname='GLL') + call register_vector_field('FU', 'FV') + call addfld ('FT', (/ 'lev' /), 'A', 'K/s', 'Temperature forcing term on GLL grid',gridname='GLL') + + ! Tracer forcing on fvm (CSLAM) grid and internal CSLAM pressure fields + if (ntrac>0) then + do m = 1, ntrac + call addfld (trim(cnst_name(m))//'_fvm', (/ 'lev' /), 'I', 'kg/kg', & + trim(cnst_longname(m)), gridname='FVM') + + call addfld ('F'//trim(cnst_name(m))//'_fvm', (/ 'lev' /), 'I', 'kg/kg/s', & + trim(cnst_longname(m))//' mixing ratio forcing term (q_new-q_old) on fvm grid', & + gridname='FVM') + end do + + call addfld ('dp_fvm' ,(/ 'lev' /), 'I', 'Pa','CSLAM Pressure level thickness', gridname='FVM') + call addfld ('PSDRY_fvm',horiz_only, 'I','Pa','CSLAM dry surface pressure' , gridname='FVM') + end if + + do m_cnst = 1, qsize + call addfld ('F'//trim(cnst_name_gll(m_cnst))//'_gll', (/ 'lev' /), 'I', 'kg/kg/s', & + trim(cnst_longname(m_cnst))//' mixing ratio forcing term (q_new-q_old) on GLL grid', gridname='GLL') + end do + + ! Energy diagnostics and axial angular momentum diagnostics + call addfld ('ABS_dPSdt', horiz_only, 'A', 'Pa/s', 'Absolute surface pressure tendency',gridname='GLL') + + if (ntrac>0) then +#ifdef waccm_debug + call addfld ('CSLAM_gamma', (/ 'lev' /), 'A', '', 'Courant number from CSLAM', gridname='FVM') +#endif + call addfld ('WV_PDC', horiz_only, 'A', 'kg/m2','Total column water vapor lost in physics-dynamics coupling',gridname='FVM') + call addfld ('WL_PDC', horiz_only, 'A', 'kg/m2','Total column cloud water lost in physics-dynamics coupling',gridname='FVM') + call addfld ('WI_PDC', horiz_only, 'A', 'kg/m2','Total column cloud ice lost in physics-dynamics coupling' ,gridname='FVM') + call addfld ('TT_PDC', horiz_only, 'A', 'kg/m2','Total column test tracer lost in physics-dynamics coupling',gridname='FVM') + else + call addfld ('WV_PDC', horiz_only, 'A', 'kg/m2','Total column water vapor lost in physics-dynamics coupling',gridname='GLL') + call addfld ('WL_PDC', horiz_only, 'A', 'kg/m2','Total column cloud water lost in physics-dynamics coupling',gridname='GLL') + call addfld ('WI_PDC', horiz_only, 'A', 'kg/m2','Total column cloud ice lost in physics-dynamics coupling' ,gridname='GLL') + call addfld ('TT_PDC', horiz_only, 'A', 'kg/m2','Total column test tracer lost in physics-dynamics coupling',gridname='GLL') + end if + + do istage = 1,SIZE(stage) + do ivars=1,SIZE(vars) + write(str1,*) TRIM(ADJUSTL(vars(ivars))),TRIM(ADJUSTL("_")),TRIM(ADJUSTL(stage(istage))) + write(str2,*) TRIM(ADJUSTL(vars_descriptor(ivars))),& + TRIM(ADJUSTL(" ")),TRIM(ADJUSTL(stage_txt(istage))) + write(str3,*) TRIM(ADJUSTL(vars_unit(ivars))) + if (ntrac>0.and.massv(ivars)) then + call addfld (TRIM(ADJUSTL(str1)), horiz_only, 'A', TRIM(ADJUSTL(str3)),TRIM(ADJUSTL(str2)),gridname='FVM') + else + call addfld (TRIM(ADJUSTL(str1)), horiz_only, 'A', TRIM(ADJUSTL(str3)),TRIM(ADJUSTL(str2)),gridname='GLL') + end if + end do + end do + + ! + ! add dynamical core tracer tendency output + ! + if (ntrac>0) then + do m = 1, pcnst + call addfld(tottnam(m),(/ 'lev' /),'A','kg/kg/s',trim(cnst_name(m))//' horz + vert', & + gridname='FVM') + end do + else + do m = 1, pcnst + call addfld(tottnam(m),(/ 'lev' /),'A','kg/kg/s',trim(cnst_name(m))//' horz + vert', & + gridname='GLL') + end do + end if + call phys_getopts(history_budget_out=history_budget, history_budget_histfile_num_out=budget_hfile_num) + if ( history_budget ) then + call cnst_get_ind('CLDLIQ', ixcldliq) + call cnst_get_ind('CLDICE', ixcldice) + call add_default(tottnam( 1), budget_hfile_num, ' ') + call add_default(tottnam(ixcldliq), budget_hfile_num, ' ') + call add_default(tottnam(ixcldice), budget_hfile_num, ' ') + end if + + ! constituent indices for waccm-x +! if ( waccmx_is('ionosphere') .or. waccmx_is('neutral') ) then +! call cnst_get_ind('O', ixo) +! call cnst_get_ind('O2', ixo2) +! call cnst_get_ind('H', ixh) +! call cnst_get_ind('H2', ixh2) +! end if + + call test_mapping_addfld + +!Remove/replace after CAMDEN history output is enabled -JN: +#endif + +end subroutine dyn_init + +!========================================================================================= + +subroutine dyn_run(dyn_state) + use physconst, only: thermodynamic_active_species_num, dry_air_species_num + use physconst, only: thermodynamic_active_species_idx_dycore + + !Se dycore: + use prim_advance_mod, only: calc_tot_energy_dynamics + use prim_driver_mod, only: prim_run_subcycle + use dimensions_mod, only: cnst_name_gll + use time_mod, only: tstep, nsplit, timelevel_qdp + use hybrid_mod, only: config_thread_region, get_loop_ranges + use control_mod, only: qsplit, rsplit, ftype_conserve + use thread_mod, only: horz_num_threads + use time_mod, only: tevolve + + type(dyn_export_t), intent(inout) :: dyn_state + + type(hybrid_t) :: hybrid + integer :: tl_f + integer :: n + integer :: nets, nete, ithr + integer :: i, ie, j, k, m, nq, m_cnst + integer :: n0_qdp, nsplit_local + logical :: ldiag + + real(r8) :: ftmp(npsq,nlev,3) + real(r8) :: dtime + real(r8) :: rec2dt, pdel + + real(r8), allocatable, dimension(:,:,:) :: ps_before + real(r8), allocatable, dimension(:,:,:) :: abs_ps_tend + real (kind=r8) :: omega_cn(2,nelemd) !min and max of vertical Courant number + !---------------------------------------------------------------------------- + +#ifdef debug_coupling + return +#endif + + nsplit_local = nsplit + tevolve = 0._r8 + + if (iam >= par%nprocs) return + + ldiag = hist_fld_active('ABS_dPSdt') + if (ldiag) then + allocate(ps_before(np,np,nelemd)) + allocate(abs_ps_tend(np,np,nelemd)) + + end if + + !$OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,n,ie,m,i,j,k,ftmp) + hybrid = config_thread_region(par,'horizontal') + call get_loop_ranges(hybrid, ibeg=nets, iend=nete) + + dtime = get_step_size() + rec2dt = 1._r8/dtime + + tl_f = TimeLevel%n0 ! timelevel which was adjusted by physics + call TimeLevel_Qdp(TimeLevel, qsplit, n0_qdp)!get n0_qdp for diagnostics call + +!Uncomment once "outfld" is enabled in CAMDEN-JN: +#if 0 + + ! output physics forcing + if (hist_fld_active('FU') .or. hist_fld_active('FV') .or.hist_fld_active('FT')) then + do ie = nets, nete + do k = 1, nlev + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,k,1) = dyn_state%elem(ie)%derived%FM(i,j,1,k) + ftmp(i+(j-1)*np,k,2) = dyn_state%elem(ie)%derived%FM(i,j,2,k) + ftmp(i+(j-1)*np,k,3) = dyn_state%elem(ie)%derived%FT(i,j,k) + end do + end do + end do + + call outfld('FU', ftmp(:,:,1), npsq, ie) + call outfld('FV', ftmp(:,:,2), npsq, ie) + call outfld('FT', ftmp(:,:,3), npsq, ie) + end do + end if + + do m = 1, qsize + if (hist_fld_active('F'//trim(cnst_name_gll(m))//'_gll')) then + do ie = nets, nete + call outfld('F'//trim(cnst_name_gll(m))//'_gll',& + RESHAPE(dyn_state%elem(ie)%derived%FQ(:,:,:,m), (/np*np,nlev/)), npsq, ie) + end do + end if + end do +#endif + + ! convert elem(ie)%derived%fq to mass tendency + do ie = nets, nete + do m = 1, qsize + do k = 1, nlev + do j = 1, np + do i = 1, np + dyn_state%elem(ie)%derived%FQ(i,j,k,m) = dyn_state%elem(ie)%derived%FQ(i,j,k,m)* & + rec2dt*dyn_state%elem(ie)%state%dp3d(i,j,k,tl_f) + end do + end do + end do + end do + end do + + if (ftype_conserve>0) then + do ie = nets, nete + do k=1,nlev + do j=1,np + do i = 1, np + pdel = dyn_state%elem(ie)%state%dp3d(i,j,k,tl_f) + do nq=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx_dycore(nq) + pdel = pdel + (dyn_state%elem(ie)%state%qdp(i,j,k,m_cnst,n0_qdp)+dyn_state%elem(ie)%derived%FQ(i,j,k,m_cnst)*dtime) + end do + dyn_state%elem(ie)%derived%FDP(i,j,k) = pdel + end do + end do + end do + end do + end if + + if (ntrac > 0) then + do ie = nets, nete + do m = 1, ntrac + do k = 1, nlev + do j = 1, nc + do i = 1, nc + dyn_state%fvm(ie)%fc(i,j,k,m) = dyn_state%fvm(ie)%fc(i,j,k,m)* & + rec2dt!*dyn_state%fvm(ie)%dp_fvm(i,j,k) + end do + end do + end do + end do + end do + end if + + if (ldiag) then + abs_ps_tend(:,:,nets:nete) = 0.0_r8 + endif + + do n = 1, nsplit_local + + if (ldiag) then + do ie = nets, nete + ps_before(:,:,ie) = dyn_state%elem(ie)%state%psdry(:,:) + end do + end if + + ! forward-in-time RK, with subcycling + call prim_run_subcycle(dyn_state%elem, dyn_state%fvm, hybrid, nets, nete, & + tstep, TimeLevel, hvcoord, n, omega_cn) + + if (ldiag) then + do ie = nets, nete + abs_ps_tend(:,:,ie) = abs_ps_tend(:,:,ie) + & + ABS(ps_before(:,:,ie)-dyn_state%elem(ie)%state%psdry(:,:)) & + /(tstep*qsplit*rsplit) + end do + end if + + end do + +!Uncomment once "outfld" is enabled in CAMDEN-JN: +#if 0 + if (ldiag) then + do ie=nets,nete + abs_ps_tend(:,:,ie)=abs_ps_tend(:,:,ie)/DBLE(nsplit) + call outfld('ABS_dPSdt',RESHAPE(abs_ps_tend(:,:,ie),(/npsq/)),npsq,ie) + end do + end if +#endif + + call calc_tot_energy_dynamics(dyn_state%elem,dyn_state%fvm, nets, nete, TimeLevel%n0, n0_qdp,'dBF') + !$OMP END PARALLEL + +!Uncomment once "outfld" is enabled in CAMDEN-JN: +#if 0 + if (ldiag) then + deallocate(ps_before,abs_ps_tend) + endif + ! output vars on CSLAM fvm grid + call write_dyn_vars(dyn_state) +#endif + +end subroutine dyn_run + +!=============================================================================== + +subroutine dyn_final(DYN_STATE, RESTART_FILE) + + type (elem_state_t), target :: DYN_STATE + character(LEN=*) , intent(IN) :: RESTART_FILE + +end subroutine dyn_final + +!=============================================================================== + +subroutine read_inidat(dyn_in) + use physconst, only: thermodynamic_active_species_num, dry_air_species_num + use physconst, only: thermodynamic_active_species_idx + use shr_sys_mod, only: shr_sys_flush + use hycoef, only: hyai, hybi, ps0 + use phys_vars_init_check, only: mark_as_initialized + !use const_init, only: cnst_init_default + + !SE-dycore: + use element_mod, only: timelevels + use fvm_mapping, only: dyn2fvm_mass_vars + use control_mod, only: runtype,initial_global_ave_dry_ps + use prim_driver_mod, only: prim_set_dry_mass + + ! Arguments + type (dyn_import_t), target, intent(inout) :: dyn_in ! dynamics import + + ! Local variables + + integer(iMap), pointer :: ldof(:) ! Basic (2D) grid dof + + type(file_desc_t), pointer :: fh_ini, fh_topo + + type(element_t), pointer :: elem(:) + + real(r8), allocatable :: qtmp(:,:,:,:,:) ! (np,np,nlev,nelemd,n) + real(r8), allocatable :: dbuf2(:,:) ! (npsq,nelemd) + real(r8), allocatable :: dbuf3(:,:,:) ! (npsq,nlev,nelemd) + real(r8), allocatable :: phis_tmp(:,:) ! (npsp,nelemd) + real(r8), allocatable :: factor_array(:,:,:,:) ! (np,np,nlev,nelemd) + logical, allocatable :: pmask(:) ! (npsq*nelemd) unique grid vals + + character(len=max_hcoordname_len):: grid_name + real(r8), allocatable :: latvals(:),latvals_phys(:) + real(r8), allocatable :: lonvals(:),lonvals_phys(:) + real(r8), pointer :: latvals_deg(:) + real(r8), pointer :: lonvals_deg(:) + + integer :: ie, k, t + character(len=max_fieldname_len) :: fieldname, fieldname2 + logical :: found + logical :: inic_wet ! true if initial condition is based on + ! wet pressure and water species + integer :: kptr, m_cnst + type(EdgeBuffer_t) :: edge + + character(len=max_fieldname_len) :: dimname, varname + integer :: ierr + + integer :: rndm_seed_sz + integer, allocatable :: rndm_seed(:) + integer :: dims(2) + integer :: pio_errtype + real(r8) :: pertval + integer :: i, j, indx, nq + integer :: dyn_cols + character(len=128) :: errmsg + character(len=*), parameter :: subname='READ_INIDAT' + + ! fvm vars + real(r8), allocatable :: inv_dp_darea_fvm(:,:,:) + real(r8) :: min_val, max_val + + real(r8) :: dp_tmp, pstmp(np,np) + + ! Variables for analytic initial conditions + integer, allocatable :: glob_ind(:) + integer, allocatable :: m_ind(:) + real(r8), allocatable :: dbuf4(:,:,:,:) + !---------------------------------------------------------------------------- + + fh_ini => initial_file_get_id() + fh_topo => topo_file_get_id() + + if (iam < par%nprocs) then + elem => dyn_in%elem + else + nullify(elem) + end if + + allocate(qtmp(np,np,nlev,nelemd,pcnst)) + qtmp = 0._r8 + + ! Set mask to indicate which columns are active + nullify(ldof) + call cam_grid_get_gcid(cam_grid_id('GLL'), ldof) + allocate(pmask(npsq*nelemd)) + pmask(:) = (ldof /= 0) + + ! lat/lon needed in radians + latvals_deg => cam_grid_get_latvals(cam_grid_id('GLL')) + lonvals_deg => cam_grid_get_lonvals(cam_grid_id('GLL')) + allocate(latvals(np*np*nelemd)) + allocate(lonvals(np*np*nelemd)) + latvals(:) = latvals_deg(:)*deg2rad + lonvals(:) = lonvals_deg(:)*deg2rad + + ! Set PIO to return error codes when reading data from IC file. + call pio_seterrorhandling(fh_ini, PIO_BCAST_ERROR, pio_errtype) + + ! The grid name is defined in dyn_grid::define_cam_grids. + ! Get the number of columns in the global GLL grid. + call cam_grid_dimensions('GLL', dims) + dyn_cols = dims(1) + + ! Set ICs. Either from analytic expressions or read from file. + + if (analytic_ic_active() .and. (iam < par%nprocs)) then + + ! PHIS has already been set by set_phis. Get local copy for + ! possible use in setting T and PS in the analytic IC code. + allocate(phis_tmp(npsq,nelemd)) + do ie = 1, nelemd + k = 1 + do j = 1, np + do i = 1, np + phis_tmp(k,ie) = elem(ie)%state%phis(i,j) + k = k + 1 + end do + end do + end do + + inic_wet = .false. + allocate(glob_ind(npsq * nelemd)) + j = 1 + do ie = 1, nelemd + do i = 1, npsq + ! Create a global(ish) column index + glob_ind(j) = elem(ie)%GlobalId + j = j + 1 + end do + end do + + ! First, initialize all the variables, then assign + allocate(dbuf4(npsq, nlev, nelemd, (qsize + 4))) + dbuf4 = 0.0_r8 + allocate(m_ind(qsize)) + do m_cnst = 1, qsize + m_ind(m_cnst) = m_cnst + end do + + ! Init tracers on the GLL grid. Note that analytic_ic_set_ic makes + ! use of cnst_init_default for the tracers except water vapor. + + call analytic_ic_set_ic(vcoord, latvals, lonvals, glob_ind, & + PS=dbuf4(:,1,:,(qsize+1)), U=dbuf4(:,:,:,(qsize+2)), & + V=dbuf4(:,:,:,(qsize+3)), T=dbuf4(:,:,:,(qsize+4)), & + Q=dbuf4(:,:,:,1:qsize), m_cnst=m_ind, mask=pmask(:), & + PHIS_IN=PHIS_tmp) + deallocate(m_ind) + deallocate(glob_ind) + deallocate(phis_tmp) + do ie = 1, nelemd + indx = 1 + do j = 1, np + do i = 1, np + ! PS + elem(ie)%state%psdry(i,j) = dbuf4(indx, 1, ie, (qsize+1)) + ! U + elem(ie)%state%v(i,j,1,:,1) = dbuf4(indx, :, ie, (qsize+2)) + ! V + elem(ie)%state%v(i,j,2,:,1) = dbuf4(indx, :, ie, (qsize+3)) + ! T + elem(ie)%state%T(i,j,:,1) = dbuf4(indx, :, ie, (qsize+4)) + indx = indx + 1 + end do + end do + end do + + ! Tracers to be advected on GLL grid. + ! Note that fvm tracers are initialized below. + do m_cnst = 1, qsize + do ie = 1, nelemd + qtmp(:,:,:,ie,m_cnst) = 0.0_r8 + indx = 1 + do j = 1, np + do i = 1, np + ! Set qtmp at the unique columns only + if (pmask(((ie - 1) * npsq) + indx)) then + qtmp(i,j,:,ie,m_cnst) = dbuf4(indx, :, ie, m_cnst) + end if + indx = indx + 1 + end do + end do + end do + end do + deallocate(dbuf4) + + + else + + ! Read ICs from file. Assume all fields in the initial file are on the GLL grid. + + allocate(dbuf2(npsq,nelemd)) + allocate(dbuf3(npsq,nlev,nelemd)) + + ! Check that number of columns in IC file matches grid definition. + call check_file_layout(fh_ini, elem, dyn_cols, 'ncdata', .true., dimname) + + ! Read 2-D field + + fieldname = 'PS' + fieldname2 = 'PSDRY' + if (dyn_field_exists(fh_ini, trim(fieldname), required=.false.)) then + inic_wet = .true. + call read_dyn_var(trim(fieldname), fh_ini, dimname, dbuf2) + elseif (dyn_field_exists(fh_ini, trim(fieldname2), required=.false.)) then + inic_wet = .false. + call read_dyn_var(trim(fieldname2), fh_ini, dimname, dbuf2) + else + call endrun(trim(subname)//': PS or PSDRY must be on GLL grid') + end if +#ifndef planet_mars + if (iam < par%nprocs) then + if (minval(dbuf2, mask=reshape(pmask, (/npsq,nelemd/))) < 10000._r8) then + call endrun(trim(subname)//': Problem reading ps or psdry field -- bad values') + end if + end if +#endif + do ie = 1, nelemd + indx = 1 + do j = 1, np + do i = 1, np + elem(ie)%state%psdry(i,j) = dbuf2(indx,ie) ! can be either wet or dry ps + indx = indx + 1 + end do + end do + end do + + ! Read in 3-D fields + + if (dyn_field_exists(fh_ini, 'U')) then + call read_dyn_var('U', fh_ini, dimname, dbuf3) + else + call endrun(trim(subname)//': U not found') + end if + do ie = 1, nelemd + elem(ie)%state%v = 0.0_r8 + indx = 1 + do j = 1, np + do i = 1, np + elem(ie)%state%v(i,j,1,:,1) = dbuf3(indx,:,ie) + indx = indx + 1 + end do + end do + end do + + if (dyn_field_exists(fh_ini, 'V')) then + call read_dyn_var('V', fh_ini, dimname, dbuf3) + else + call endrun(trim(subname)//': V not found') + end if + do ie = 1, nelemd + indx = 1 + do j = 1, np + do i = 1, np + elem(ie)%state%v(i,j,2,:,1) = dbuf3(indx,:,ie) + indx = indx + 1 + end do + end do + end do + + if (dyn_field_exists(fh_ini, 'T')) then + call read_dyn_var('T', fh_ini, dimname, dbuf3) + else + call endrun(trim(subname)//': T not found') + end if + do ie=1,nelemd + elem(ie)%state%T = 0.0_r8 + indx = 1 + do j = 1, np + do i = 1, np + elem(ie)%state%T(i,j,:,1) = dbuf3(indx,:,ie) + indx = indx + 1 + end do + end do + end do + + if (pertlim .ne. 0.0_r8) then + if (masterproc) then + write(iulog,*) trim(subname), ': Adding random perturbation bounded', & + 'by +/- ', pertlim, ' to initial temperature field' + end if + + call random_seed(size=rndm_seed_sz) + allocate(rndm_seed(rndm_seed_sz)) + + do ie = 1, nelemd + ! seed random number generator based on element ID + ! (possibly include a flag to allow clock-based random seeding) + rndm_seed = elem(ie)%GlobalId + call random_seed(put=rndm_seed) + do i = 1, np + do j = 1, np + do k = 1, nlev + call random_number(pertval) + pertval = 2.0_r8*pertlim*(0.5_r8 - pertval) + elem(ie)%state%T(i,j,k,1) = elem(ie)%state%T(i,j,k,1)*(1.0_r8 + pertval) + end do + end do + end do + end do + + deallocate(rndm_seed) + end if + + ! Cleanup + deallocate(dbuf2) + deallocate(dbuf3) + + end if ! analytic_ic_active + + ! Read in or cold-initialize all the tracer fields. + ! Data is read in on the GLL grid. + ! Both GLL and FVM tracer fields are initialized based on the + ! dimension qsize or ntrac for GLL or FVM tracers respectively. + ! Data is only read in on GLL so if FVM tracers are active, + ! interpolation is performed. + ! + ! If analytic ICs are being used, we allow constituents in an initial + ! file to overwrite mixing ratios set by the default constituent initialization + ! except for the water species. + + if (ntrac > qsize) then + if (ntrac < pcnst) then + write(errmsg, '(a,3(i0,a))') ': ntrac (',ntrac,') > qsize (',qsize, & + ') but < pcnst (',pcnst,')' + call endrun(trim(subname)//errmsg) + end if + else if (qsize < pcnst) then + write(errmsg, '(a,2(i0,a))') ': qsize (',qsize,') < pcnst (',pcnst,')' + call endrun(trim(subname)//errmsg) + end if + +!Un-comment once non-water constituents are enabled in CAMDEN -JN: +#if 0 + + ! If using analytic ICs the initial file only needs the horizonal grid + ! dimension checked in the case that the file contains constituent mixing + ! ratios. + do m_cnst = 1, pcnst + if (cnst_read_iv(m_cnst) .and. .not. cnst_is_a_water_species(cnst_name(m_cnst))) then + if (dyn_field_exists(fh_ini, trim(cnst_name(m_cnst)), required=.false.)) then + call check_file_layout(fh_ini, elem, dyn_cols, 'ncdata', .true., dimname) + exit + end if + end if + end do + + allocate(dbuf3(npsq,nlev,nelemd)) + + do m_cnst = 1, pcnst + + if (analytic_ic_active() .and. cnst_is_a_water_species(cnst_name(m_cnst))) cycle + + found = .false. + if (cnst_read_iv(m_cnst)) then + found = dyn_field_exists(fh_ini, trim(cnst_name(m_cnst)), required=.false.) + end if + + if (found) then + call read_dyn_var(trim(cnst_name(m_cnst)), fh_ini, dimname, dbuf3) + else + call cnst_init_default(m_cnst, latvals, lonvals, dbuf3, pmask) + end if + + do ie = 1, nelemd + ! Copy tracers defined on GLL grid into Eulerian array + ! Make sure tracers have at least minimum value + do k=1, nlev + indx = 1 + do j = 1, np + do i = 1, np + ! Set qtmp at the unique columns only: zero non-unique columns + if (pmask(((ie - 1) * npsq) + indx)) then + qtmp(i,j, k, ie, m_cnst) = max(qmin(m_cnst),dbuf3(indx,k,ie)) + else + qtmp(i,j, k, ie, m_cnst) = 0.0_r8 + end if + indx = indx + 1 + end do + end do + end do + end do + + end do ! pcnst +!Un-comment once constituents are enabled in CAMDEN -JN: +#endif + + ! Cleanup + deallocate(dbuf3) + + ! Put the error handling back the way it was + call pio_seterrorhandling(fh_ini, pio_errtype) + + ! Cleanup + deallocate(pmask) + deallocate(latvals) + deallocate(lonvals) + + if (associated(ldof)) then + deallocate(ldof) + nullify(ldof) + end if + + ! once we've read or initialized all the fields we do a boundary exchange to + ! update the redundent columns in the dynamics + if(iam < par%nprocs) then + call initEdgeBuffer(par, edge, elem, (3+pcnst)*nlev + 2 ) + end if + do ie = 1, nelemd + kptr = 0 + call edgeVpack(edge, elem(ie)%state%psdry,1,kptr,ie) + kptr = kptr + 1 + call edgeVpack(edge, elem(ie)%state%v(:,:,:,:,1),2*nlev,kptr,ie) + kptr = kptr + (2 * nlev) + call edgeVpack(edge, elem(ie)%state%T(:,:,:,1),nlev,kptr,ie) + kptr = kptr + nlev + call edgeVpack(edge, qtmp(:,:,:,ie,:),nlev*pcnst,kptr,ie) + end do + if(iam < par%nprocs) then + call bndry_exchange(par,edge,location='read_inidat') + end if + do ie = 1, nelemd + kptr = 0 + call edgeVunpack(edge, elem(ie)%state%psdry,1,kptr,ie) + kptr = kptr + 1 + call edgeVunpack(edge, elem(ie)%state%v(:,:,:,:,1),2*nlev,kptr,ie) + kptr = kptr + (2 * nlev) + call edgeVunpack(edge, elem(ie)%state%T(:,:,:,1),nlev,kptr,ie) + kptr = kptr + nlev + call edgeVunpack(edge, qtmp(:,:,:,ie,:),nlev*pcnst,kptr,ie) + end do + + if (inic_wet) then + ! + ! convert to dry + ! + ! (this has to be done after edge-exchange since shared points between elements are only + ! initialized in one element and not the other!) + ! + if (par%masterproc) then + write(iulog,*) 'Convert specific/wet mixing ratios to dry' + end if + + allocate(factor_array(np,np,nlev,nelemd)) + ! + ! compute: factor_array = 1/(1-sum(q)) + ! + factor_array(:,:,:,:) = 1.0_r8 + do ie = 1, nelemd + do k = dry_air_species_num+1, thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx(k) + factor_array(:,:,:,ie) = factor_array(:,:,:,ie) - qtmp(:,:,:,ie,m_cnst) + end do + end do + factor_array(:,:,:,:) = 1.0_r8/factor_array(:,:,:,:) + + do m_cnst = 1, pcnst + if (cnst_type(m_cnst) == 'wet') then + do ie = 1, nelemd + do k = 1, nlev + do j = 1, np + do i = 1, np + + ! convert wet mixing ratio to dry + qtmp(i,j,k,ie,m_cnst) = qtmp(i,j,k,ie,m_cnst) * factor_array(i,j,k,ie) + + ! truncate negative values if they were not analytically specified + if (.not. analytic_ic_active()) then +! qtmp(i,j,k,ie,m_cnst) = max(qmin(m_cnst), qtmp(i,j,k,ie,m_cnst)) + qtmp(i,j,k,ie,m_cnst) = max(0._r8, qtmp(i,j,k,ie,m_cnst)) !Remove once constituents are enabled -JN + end if + end do + end do + end do + end do + end if + end do + + ! initialize dp3d and qdp + ! + ! compute: factor_array = 1/(1+sum(q)) + + factor_array(:,:,:,:) = 1.0_r8 + do ie = 1, nelemd + do k = dry_air_species_num+1, thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx(k) + factor_array(:,:,:,ie) = factor_array(:,:,:,ie) + qtmp(:,:,:,ie,m_cnst) + end do + end do + factor_array(:,:,:,:) = 1.0_r8/factor_array(:,:,:,:) + do ie = 1, nelemd + ! pstmp is the wet ps + pstmp = elem(ie)%state%psdry(:,:) + ! start accumulating the dry air pressure differences across each layer + elem(ie)%state%psdry(:,:) = hyai(1)*ps0 + do k=1,nlev + do j = 1,np + do i = 1,np + dp_tmp = ((hyai(k+1) - hyai(k))*ps0) + & + ((hybi(k+1) - hybi(k))*pstmp(i,j)) + if (.not. analytic_ic_active()) then + + ! if analytic_ic then the surface pressure is already dry + ! (note that it is not correct to convert to moist pressure + ! in analytic_ic and not have the #ifndef statement here + ! since the dry levels are in a different location than + ! what is obtained from algorithm below) + + ! convert dp_tmp to dry + dp_tmp = dp_tmp*factor_array(i,j,k,ie) + end if + + elem(ie)%state%dp3d(i,j,k,:) = dp_tmp + + ! compute dry surface pressure; note that at this point + ! + ! dp3d .NE. (hyai(k+1) - hyai(k))*ps0 + (hybi(k+1) - hybi(k))*ps(i,j) + + elem(ie)%state%psdry(i,j) = elem(ie)%state%psdry(i,j)+elem(ie)%state%dp3d(i,j,k,1) + end do + end do + end do + end do + + deallocate(factor_array) + + else + + ! initial condition is based on dry surface pressure and constituents + ! + ! we only need to initialize state%dp3d + + do ie = 1, nelemd + do k = 1, nlev + do j = 1, np + do i = 1, np + elem(ie)%state%dp3d(i,j,k,:) = (hyai(k+1) - hyai(k))*ps0 + & + (hybi(k+1) - hybi(k))*elem(ie)%state%psdry(i,j) + end do + end do + end do + end do + end if + + ! scale PS to achieve prescribed dry mass following FV dycore (dryairm.F90) +#ifndef planet_mars + if (runtype == 0) then + initial_global_ave_dry_ps = 98288.0_r8 + if (.not. associated(fh_topo)) then + initial_global_ave_dry_ps = 101325._r8 - 245._r8 + end if + if (simple_phys) then + initial_global_ave_dry_ps = 0 !do not scale psdry + end if + if (iam < par%nprocs) then + call prim_set_dry_mass(elem, hvcoord, initial_global_ave_dry_ps, qtmp) + end if + endif +#endif + ! store Q values: + ! + ! if CSLAM is NOT active then state%Qdp for all constituents + ! if CSLAM active then we only advect water vapor and condensate + ! loading tracers in state%qdp + + if (ntrac > 0) then + do ie = 1, nelemd + do nq = 1, thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx(nq) + do k = 1, nlev + do j = 1, np + do i = 1, np + elem(ie)%state%Qdp(i,j,k,nq,:) = & + elem(ie)%state%dp3d(i,j,k,1)*qtmp(i,j,k,ie,m_cnst) + end do + end do + end do + end do + end do + else + do ie = 1, nelemd + do m_cnst = 1, qsize + do k = 1, nlev + do j = 1, np + do i = 1, np + elem(ie)%state%Qdp(i,j,k,m_cnst,:)=& + elem(ie)%state%dp3d(i,j,k,1)*qtmp(i,j,k,ie,m_cnst) + end do + end do + end do + end do + end do + end if + + ! interpolate fvm tracers and fvm pressure variables + + if (ntrac > 0) then + if (par%masterproc) then + write(iulog,*) 'Initializing dp_fvm from spectral element dp' + end if + + do ie = 1, nelemd + + ! note that the area over fvm cells as computed from subcell_integration is up to 1.0E-6 + ! different than the areas (exact) computed by CSLAM + ! + ! Map the constituents which are also to be transported by dycore + call dyn2fvm_mass_vars(elem(ie)%state%dp3d(:,:,:,1),elem(ie)%state%psdry(:,:),& + qtmp(:,:,:,ie,1:ntrac),& + dyn_in%fvm(ie)%dp_fvm(1:nc,1:nc,:),dyn_in%fvm(ie)%psC(1:nc,1:nc),& + dyn_in%fvm(ie)%c(1:nc,1:nc,:,1:ntrac),& + ntrac,elem(ie)%metdet,dyn_in%fvm(ie)%inv_se_area_sphere(1:nc,1:nc)) + end do + + if(par%masterproc) then + write(iulog,*) 'FVM tracers, FVM pressure variables and se_area_sphere initialized.' + end if + + end if ! (ntrac > 0) + + ! Cleanup + deallocate(qtmp) + + do ie = 1, nelemd + do t = 2, timelevels + elem(ie)%state%v(:,:,:,:,t) = elem(ie)%state%v(:,:,:,:,1) + elem(ie)%state%T(:,:,:,t) = elem(ie)%state%T(:,:,:,1) + end do + end do + + if(iam < par%nprocs) then + call FreeEdgeBuffer(edge) + end if + + !Finally, mark variables as initialized so that physics doesn't try to set + !the initial values itself: + call mark_as_initialized("surface_air_pressure") + call mark_as_initialized("pressure_thickness") + call mark_as_initialized("eastward_wind") + call mark_as_initialized("northward_wind") + call mark_as_initialized("temperature") + + !These calls will need to be modified once constituents are enabled: + call mark_as_initialized("water_vapor_specific_humidity") + call mark_as_initialized("cloud_liquid_water_mixing_ratio") + call mark_as_initialized("rain_water_mixing_ratio") + + !These calls may be removed if geopotential_t is only allowed to run + !in a CCPP physics suite: + call mark_as_initialized("geopotential_height") + call mark_as_initialized("geopotential_height_at_interface") + call mark_as_initialized("dry_static_energy_content_of_atmosphere_layer") + + !These quantities are calculated in d_p_coupling using the variables initialized here: + call mark_as_initialized("air_pressure") + call mark_as_initialized("natural_log_of_air_pressure") + call mark_as_initialized("air_pressure_at_interface") + call mark_as_initialized("natural_log_of_air_pressure_at_interface") + call mark_as_initialized("pressure_thickness_of_dry_air") + call mark_as_initialized("surface_pressure_of_dry_air") + call mark_as_initialized("air_pressure_of_dry_air") + call mark_as_initialized("air_pressure_of_dry_air_at_interface") + call mark_as_initialized("natural_log_of_air_pressure_of_dry_air_at_interface") + call mark_as_initialized("natural_log_of_air_pressure_of_dry_air") + call mark_as_initialized("reciprocal_of_pressure_thickness_of_dry_air") + call mark_as_initialized("reciprocal_of_pressure_thickness") + call mark_as_initialized("inverse_exner_function_wrt_surface_pressure") + call mark_as_initialized("lagrangian_tendency_of_air_pressure") + +end subroutine read_inidat + + +!======================================================================================== + +subroutine set_phis(dyn_in) + + ! Set PHIS according to the following rules. + ! + ! 1) If a topo file is specified use it. This option has highest precedence. + ! 2) If not using topo file, but analytic_ic option is on, use analytic phis. + ! 3) Set phis = 0.0. + ! + ! If using the physics grid then the topo file will be on that grid since its + ! contents are primarily for the physics parameterizations, and the values of + ! PHIS should be consistent with the values of sub-grid variability (e.g., SGH) + ! which are computed on the physics grid. In this case phis on the physics grid + ! will be interpolated to the GLL grid. + + use phys_vars_init_check, only: mark_as_initialized + + ! Arguments + type (dyn_import_t), target, intent(inout) :: dyn_in ! dynamics import + + ! local variables + type(file_desc_t), pointer :: fh_topo + + type(element_t), pointer :: elem(:) + + real(r8), allocatable :: phis_tmp(:,:) ! (npsp,nelemd) + real(r8), allocatable :: phis_phys_tmp(:,:) ! (fv_nphys**2,nelemd) + + integer :: i, ie, indx, j, kptr + integer :: ierr, pio_errtype + + character(len=max_fieldname_len) :: fieldname + character(len=max_hcoordname_len):: grid_name + integer :: dims(2) + integer :: dyn_cols + integer :: ncol_did + integer :: ncol_size + + integer(iMap), pointer :: ldof(:) ! Basic (2D) grid dof + logical, allocatable :: pmask(:) ! (npsq*nelemd) unique columns + + ! Variables for analytic initial conditions + integer, allocatable :: glob_ind(:) + logical, allocatable :: pmask_phys(:) + real(r8), pointer :: latvals_deg(:) + real(r8), pointer :: lonvals_deg(:) + real(r8), allocatable :: latvals(:) + real(r8), allocatable :: lonvals(:) + real(r8), allocatable :: latvals_phys(:) + real(r8), allocatable :: lonvals_phys(:) + + character(len=*), parameter :: subname='set_phis' + !---------------------------------------------------------------------------- + + fh_topo => topo_file_get_id() + + if (iam < par%nprocs) then + elem => dyn_in%elem + else + nullify(elem) + end if + + allocate(phis_tmp(npsq,nelemd)) + phis_tmp = 0.0_r8 + + if (fv_nphys > 0) then + allocate(phis_phys_tmp(fv_nphys**2,nelemd)) + phis_phys_tmp = 0.0_r8 + do ie=1,nelemd + elem(ie)%sub_elem_mass_flux=0.0_r8 +#ifdef waccm_debug + dyn_in%fvm(ie)%CSLAM_gamma = 0.0_r8 +#endif + end do + end if + + ! Set mask to indicate which columns are active in GLL grid. + nullify(ldof) + call cam_grid_get_gcid(cam_grid_id('GLL'), ldof) + allocate(pmask(npsq*nelemd)) + pmask(:) = (ldof /= 0) + deallocate(ldof) + + if (associated(fh_topo)) then + + ! Set PIO to return error flags. + call pio_seterrorhandling(fh_topo, PIO_BCAST_ERROR, pio_errtype) + + ! Set name of grid object which will be used to read data from file + ! into internal data structure via PIO. + if (fv_nphys == 0) then + grid_name = 'GLL' + else + grid_name = 'physgrid_d' + end if + + ! Get number of global columns from the grid object and check that + ! it matches the file data. + call cam_grid_dimensions(grid_name, dims) + dyn_cols = dims(1) + + ! The dimension of the unstructured grid in the TOPO file is 'ncol'. + ierr = pio_inq_dimid(fh_topo, 'ncol', ncol_did) + if (ierr /= PIO_NOERR) then + call endrun(subname//': dimension ncol not found in bnd_topo file') + end if + ierr = pio_inq_dimlen(fh_topo, ncol_did, ncol_size) + if (ncol_size /= dyn_cols) then + if (masterproc) then + write(iulog,*) subname//': ncol_size=', ncol_size, ' : dyn_cols=', dyn_cols + end if + call endrun(subname//': ncol size in bnd_topo file does not match grid definition') + end if + + fieldname = 'PHIS' + if (dyn_field_exists(fh_topo, trim(fieldname))) then + if (fv_nphys == 0) then + call read_dyn_var(fieldname, fh_topo, 'ncol', phis_tmp) + else + call read_phys_field_2d(fieldname, fh_topo, 'ncol', phis_phys_tmp) + call map_phis_from_physgrid_to_gll(dyn_in%fvm, elem, phis_phys_tmp, & + phis_tmp, pmask) + end if + else + call endrun(subname//': Could not find PHIS field on input datafile') + end if + + ! Put the error handling back the way it was + call pio_seterrorhandling(fh_topo, pio_errtype) + + else if (analytic_ic_active() .and. (iam < par%nprocs)) then + + ! lat/lon needed in radians + latvals_deg => cam_grid_get_latvals(cam_grid_id('GLL')) + lonvals_deg => cam_grid_get_lonvals(cam_grid_id('GLL')) + allocate(latvals(np*np*nelemd)) + allocate(lonvals(np*np*nelemd)) + latvals(:) = latvals_deg(:)*deg2rad + lonvals(:) = lonvals_deg(:)*deg2rad + + allocate(glob_ind(npsq*nelemd)) + j = 1 + do ie = 1, nelemd + do i = 1, npsq + ! Create a global(ish) column index + glob_ind(j) = elem(ie)%GlobalId + j = j + 1 + end do + end do + call analytic_ic_set_ic(vcoord, latvals, lonvals, glob_ind, & + PHIS_OUT=phis_tmp, mask=pmask(:)) + deallocate(glob_ind) + + if (fv_nphys > 0) then + + ! initialize PHIS on physgrid + allocate(latvals_phys(fv_nphys*fv_nphys*nelemd)) + allocate(lonvals_phys(fv_nphys*fv_nphys*nelemd)) + indx = 1 + do ie = 1, nelemd + do j = 1, fv_nphys + do i = 1, fv_nphys + latvals_phys(indx) = dyn_in%fvm(ie)%center_cart_physgrid(i,j)%lat + lonvals_phys(indx) = dyn_in%fvm(ie)%center_cart_physgrid(i,j)%lon + indx = indx + 1 + end do + end do + end do + + allocate(pmask_phys(fv_nphys*fv_nphys*nelemd)) + pmask_phys(:) = .true. + allocate(glob_ind(fv_nphys*fv_nphys*nelemd)) + + j = 1 + do ie = 1, nelemd + do i = 1, fv_nphys*fv_nphys + ! Create a global(ish) column index + glob_ind(j) = elem(ie)%GlobalId + j = j + 1 + end do + end do + + call analytic_ic_set_ic(vcoord, latvals_phys, lonvals_phys, glob_ind, & + PHIS_OUT=phis_phys_tmp, mask=pmask_phys) + + deallocate(latvals_phys) + deallocate(lonvals_phys) + deallocate(pmask_phys) + deallocate(glob_ind) + end if + + end if + + deallocate(pmask) + + ! Set PHIS in element objects + do ie = 1, nelemd + elem(ie)%state%phis = 0.0_r8 + indx = 1 + do j = 1, np + do i = 1, np + elem(ie)%state%phis(i,j) = phis_tmp(indx, ie) + indx = indx + 1 + end do + end do + end do + if (fv_nphys > 0) then + do ie = 1, nelemd + dyn_in%fvm(ie)%phis_physgrid = RESHAPE(phis_phys_tmp(:,ie),(/fv_nphys,fv_nphys/)) + end do + end if + + deallocate(phis_tmp) + if (fv_nphys > 0) then + deallocate(phis_phys_tmp) + end if + + ! boundary exchange to update the redundent columns in the element objects + do ie = 1, nelemd + kptr = 0 + call edgeVpack(edgebuf, elem(ie)%state%phis, 1, kptr, ie) + end do + if(iam < par%nprocs) then + call bndry_exchange(par, edgebuf, location=subname) + end if + do ie = 1, nelemd + kptr = 0 + call edgeVunpack(edgebuf, elem(ie)%state%phis,1,kptr,ie) + end do + + !Mark phis as initialized so that physics doesn't try to set + !the initial values itself: + call mark_as_initialized("geopotential_at_surface") + +end subroutine set_phis + +!======================================================================================== + +subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) + + type(file_desc_t), pointer :: file + type(element_t), pointer :: elem(:) + integer, intent(in) :: dyn_cols + character(len=*), intent(in) :: file_desc + logical, intent(in) :: dyn_ok ! .true. iff ncol_d is okay + character(len=*), intent(out) :: dimname + + integer :: ncol_did, ncol_size + integer :: ierr + integer :: ie, i, j + integer :: grid_id + integer :: indx + real(r8) :: dbuf2(npsq, nelemd) + logical :: found + character(len=max_fieldname_len) :: dimname2, coordname + + character(len=*), parameter :: subname = 'check_file_layout' + !---------------------------------------------------------------------------- + + ! Check that number of columns in IC file matches grid definition. + ! The dimension of the unstructured grid in the IC file can either be 'ncol' + ! or 'ncol_d'. Check for ncol_d first since if a file contains distinct GLL + ! and physics grids the GLL grid will use dimension ncol_d. + ierr = pio_inq_dimid(file, 'ncol_d', ncol_did) + if (ierr /= PIO_NOERR) then + if (dyn_ok) then + ierr = pio_inq_dimid(file, 'ncol', ncol_did) + if (ierr /= PIO_NOERR) then + call endrun(subname//': ERROR: neither ncol nor ncol_d dimension found in ' & + //trim(file_desc)//' file') + end if + else + call endrun(trim(subname)//': ERROR: ncol dimension not found in '//trim(file_desc) & + //' file') + end if + end if + ierr = pio_inq_dimlen(file, ncol_did, ncol_size) + if (ncol_size /= dyn_cols) then + if (masterproc) then + write(iulog, '(a,2(a,i0))') trim(subname), ': ncol_size=', ncol_size, & + ' : dyn_cols=', dyn_cols + end if + call endrun(subname//': ERROR: dimension ncol size not same as in ncdata file') + end if + + ! The dimname that's passed to the read_dyn_var routines must match the + ! dimname that's in the GLL grid object definition. The mapping info used by + ! pio is constructed using the grid object. So this dimname is not necessarily + ! the one in the IC (or topo) file. + grid_id = cam_grid_id('GLL') + call cam_grid_get_dim_names(grid_id, dimname, dimname2) + + ! If coordinates come from an initial file containing only the GLL grid then the + ! the variable names will be lat/lon. On the other hand if the file contains both + ! GLL and a distinct physics grid, then the variable names will be lat_d/lon_d. + ! Check whether lat_d/lon_d are present and use them if they are. Otherwise use + ! lat/lon. + if (dyn_field_exists(file, 'lat_d', required=.false.)) then + coordname = 'lat_d' + else + coordname = 'lat' + end if + + !! Check to make sure file is in correct order + call read_dyn_var(coordname, file, dimname, dbuf2) + found = .true. + do ie = 1, nelemd + indx = 1 + do j = 1, np + do i = 1, np + if ((abs(dbuf2(indx,ie)) > 1.e-12_r8) .and. & + (abs((elem(ie)%spherep(i,j)%lat*rad2deg - dbuf2(indx,ie))/dbuf2(indx,ie)) > 1.0e-10_r8)) then + write(iulog, *) 'XXG ',iam,') ',ie,i,j,elem(ie)%spherep(i,j)%lat,dbuf2(indx,ie)*deg2rad + call shr_sys_flush(iulog) + found = .false. + end if + indx = indx + 1 + end do + end do + end do + if (.not. found) then + call endrun("ncdata file latitudes not in correct column order") + end if + + if (dyn_field_exists(file, 'lon_d', required=.false.)) then + coordname = 'lon_d' + else + coordname = 'lon' + end if + + call read_dyn_var(coordname, file, dimname, dbuf2) + do ie = 1, nelemd + indx = 1 + do j = 1, np + do i = 1, np + if ((abs(dbuf2(indx,ie)) > 1.e-12_r8) .and. & + (abs((elem(ie)%spherep(i,j)%lon*rad2deg - dbuf2(indx,ie))/dbuf2(indx,ie)) > 1.0e-10_r8)) then + write(iulog, *) 'XXG ',iam,') ',ie,i,j,elem(ie)%spherep(i,j)%lon,dbuf2(indx,ie)*deg2rad + call shr_sys_flush(iulog) + found = .false. + end if + indx = indx + 1 + end do + end do + end do + if (.not. found) then + call endrun("ncdata file longitudes not in correct column order") + end if +end subroutine check_file_layout + +!======================================================================================== + +logical function dyn_field_exists(fh, fieldname, required) + + use pio, only: var_desc_t, PIO_inq_varid + use pio, only: PIO_NOERR + + type(file_desc_t), intent(in) :: fh + character(len=*), intent(in) :: fieldname + logical, optional, intent(in) :: required + + ! Local variables + logical :: found + logical :: field_required + integer :: ret + type(var_desc_t) :: varid + character(len=128) :: errormsg + !-------------------------------------------------------------------------- + + if (present(required)) then + field_required = required + else + field_required = .true. + end if + + ret = PIO_inq_varid(fh, trim(fieldname), varid) + found = (ret == PIO_NOERR) + if (.not. found) then + if (field_required) then + write(errormsg, *) trim(fieldname),' was not present in the input file.' + call endrun('DYN_FIELD_EXISTS: '//errormsg) + end if + end if + + dyn_field_exists = found + +end function dyn_field_exists + +!======================================================================================== + +subroutine read_dyn_field_2d(fieldname, fh, dimname, buffer) + + ! Dummy arguments + character(len=*), intent(in) :: fieldname + type(file_desc_t), intent(inout) :: fh + character(len=*), intent(in) :: dimname + real(r8), intent(inout) :: buffer(:, :) + + ! Local variables + logical :: found + !---------------------------------------------------------------------------- + + buffer = 0.0_r8 +! call infld(trim(fieldname), fh, dimname, 1, npsq, 1, nelemd, buffer, & +! found, gridname='GLL') !Remove if below works! -JN + call cam_read_field(trim(fieldname), fh, buffer, found, gridname='GLL') + if(.not. found) then + call endrun('READ_DYN_FIELD_2D: Could not find '//trim(fieldname)//' field on input datafile') + end if + + ! This code allows use of compiler option to set uninitialized values + ! to NaN. In that case infld can return NaNs where the element GLL points + ! are not "unique columns" + where (shr_infnan_isnan(buffer)) buffer = 0.0_r8 + +end subroutine read_dyn_field_2d + +!======================================================================================== + +subroutine read_dyn_field_3d(fieldname, fh, dimname, buffer) + + ! Dummy arguments + character(len=*), intent(in) :: fieldname + type(file_desc_t), intent(inout) :: fh + character(len=*), intent(in) :: dimname + real(r8), intent(inout) :: buffer(:,:,:) + + ! Local variables + logical :: found + !---------------------------------------------------------------------------- + + buffer = 0.0_r8 +! call infld(trim(fieldname), fh, dimname, 'lev', 1, npsq, 1, nlev, & +! 1, nelemd, buffer, found, gridname='GLL') !Remove if below works! -JN + call cam_read_field(trim(fieldname), fh, buffer, found, 'lev', (/1, nlev/), & + dim3_pos=2, gridname='GLL') + if(.not. found) then + call endrun('READ_DYN_FIELD_3D: Could not find '//trim(fieldname)//' field on input datafile') + end if + + ! This code allows use of compiler option to set uninitialized values + ! to NaN. In that case infld can return NaNs where the element GLL points + ! are not "unique columns" + where (shr_infnan_isnan(buffer)) buffer = 0.0_r8 + +end subroutine read_dyn_field_3d + +!======================================================================================== + +subroutine read_phys_field_2d(fieldname, fh, dimname, buffer) + + ! Dummy arguments + character(len=*), intent(in) :: fieldname + type(file_desc_t), intent(inout) :: fh + character(len=*), intent(in) :: dimname + real(r8), intent(inout) :: buffer(:, :) + + ! Local variables + logical :: found + !---------------------------------------------------------------------------- + +! call infld(trim(fieldname), fh, dimname, 1, fv_nphys**2, 1, nelemd, buffer, & +! found, gridname='physgrid_d') !Remove if below works! -JN + call cam_read_field(trim(fieldname), fh, buffer, found, gridname='physgrid_d') + if(.not. found) then + call endrun('READ_PHYS_FIELD_2D: Could not find '//trim(fieldname)//' field on input datafile') + end if + +end subroutine read_phys_field_2d + +!======================================================================================== + +subroutine map_phis_from_physgrid_to_gll(fvm,elem,phis_phys_tmp,phis_tmp,pmask) + + !SE dycore: + use hybrid_mod, only: get_loop_ranges, config_thread_region + use dimensions_mod, only: nhc_phys + use fvm_mapping, only: phys2dyn + use thread_mod, only: horz_num_threads + + type(element_t), intent(inout) :: elem(:) + type (fvm_struct), intent(in) :: fvm(:) + real(r8) , intent(in) :: phis_phys_tmp(fv_nphys**2,nelemd) !physgrid phis + real(r8) , intent(inout) :: phis_tmp(npsq,nelemd) !gll phis + logical , intent(in) :: pmask(npsq*nelemd) + + type(hybrid_t) :: hybrid + integer :: nets, nete, ie,i,j,indx + real(r8), allocatable :: fld_phys(:,:,:,:,:),fld_gll(:,:,:,:,:) + logical :: llimiter(1) + !---------------------------------------------------------------------------- + + !!$OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,ie) + !hybrid = config_thread_region(par,'horizontal') + hybrid = config_thread_region(par,'serial') + + call get_loop_ranges(hybrid, ibeg=nets, iend=nete) + + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete)) + allocate(fld_gll(np,np,1,1,nets:nete)) + fld_phys = 0.0_r8 + do ie = nets, nete + fld_phys(1:fv_nphys,1:fv_nphys,1,1,ie) = RESHAPE(phis_phys_tmp(:,ie),(/fv_nphys,fv_nphys/)) + end do + llimiter = .true. + call phys2dyn(hybrid,elem,fld_phys,fld_gll,nets,nete,1,1,fvm,llimiter,halo_filled=.false.) + do ie = nets,nete + indx = 1 + do j = 1, np + do i = 1, np + if (pmask(((ie - 1) * npsq) + indx)) then + phis_tmp(indx,ie) = fld_gll(i,j,1,1,ie) + else + phis_tmp(indx,ie) = 0.0_r8 + end if + indx = indx + 1 + end do + end do + end do + deallocate(fld_phys) + deallocate(fld_gll) + !!$OMP END PARALLEL +end subroutine map_phis_from_physgrid_to_gll + +!======================================================================================== + +!Un-comment once "outfld has been enabled in CAMDEN -JN: +#if 0 + +subroutine write_dyn_vars(dyn_out) + + type (dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + + character(len=fieldname_len) :: tfname + integer :: ie, m + !---------------------------------------------------------------------------- + + if (ntrac > 0) then + do ie = 1, nelemd + call outfld('dp_fvm', RESHAPE(dyn_out%fvm(ie)%dp_fvm(1:nc,1:nc,:), & + (/nc*nc,nlev/)), nc*nc, ie) + call outfld('PSDRY_fvm', RESHAPE(dyn_out%fvm(ie)%psc(1:nc,1:nc), & + (/nc*nc/)), nc*nc, ie) + do m = 1, ntrac + tfname = trim(cnst_name(m))//'_fvm' + call outfld(tfname, RESHAPE(dyn_out%fvm(ie)%c(1:nc,1:nc,:,m), & + (/nc*nc,nlev/)), nc*nc, ie) + + tfname = 'F'//trim(cnst_name(m))//'_fvm' + call outfld(tfname, RESHAPE(dyn_out%fvm(ie)%fc(1:nc,1:nc,:,m),& + (/nc*nc,nlev/)), nc*nc, ie) + end do + end do + end if + +end subroutine write_dyn_vars + +#endif + +!========================================================================================= +end module dyn_comp diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 new file mode 100644 index 00000000..87ef19f4 --- /dev/null +++ b/src/dynamics/se/dyn_grid.F90 @@ -0,0 +1,1255 @@ +module dyn_grid +!------------------------------------------------------------------------------- +! +! Define SE computational grids on the dynamics decomposition. +! + +! The grid used by the SE dynamics is called the GLL grid. It is +! decomposed into elements which correspond to "blocks" in the +! physics/dynamics coupler terminology. The columns in this grid are +! located at the Gauss-Lobatto-Legendre (GLL) quadrature points. The GLL +! grid will also be used by the physics if the CSLAM advection is not used. +! If CSLAM is used for tracer advection then it uses an FVM grid and the +! physics will either use the same FVM grid or an FVM grid with a different +! number of equal area subcells. The FVM grid used by the physics is +! referred to as the "physgrid". +! +! Module responsibilities: +! +! . Provide the physics/dynamics coupler (in module phys_grid) with data for the +! physics grid on the dynamics decomposition. +! +! . Create CAM grid objects that are used by the I/O functionality to read +! data from an unstructured grid format to the dynamics data structures, and +! to write from the dynamics data structures to unstructured grid format. The +! global column ordering for the unstructured grid is determined by the SE dycore. +! +!------------------------------------------------------------------------------- + +use shr_kind_mod, only: r8 => shr_kind_r8, shr_kind_cl +use spmd_utils, only: masterproc, iam, mpicom, mstrid=>masterprocid, & + npes, mpi_integer, mpi_real8 +!use constituents, only: pcnst +use physconst, only: pi +use cam_initfiles, only: initial_file_get_id +use physics_column_type, only: physics_column_t, kind_pcol +use cam_map_utils, only: iMap + +use cam_logfile, only: iulog +use cam_abortutils, only: endrun + +!SE dycore: +use dimensions_mod, only: globaluniquecols, nelem, nelemd, nelemdmax, & + ne, np, npsq, fv_nphys, nlev, nc, ntrac +use element_mod, only: element_t +use fvm_control_volume_mod, only: fvm_struct +use hybvcoord_mod, only: hvcoord_t +use prim_init, only: prim_init1 +use edge_mod, only: initEdgeBuffer +use edgetype_mod, only: EdgeBuffer_t +use time_mod, only: TimeLevel_t +use dof_mod, only: UniqueCoords, UniquePoints + +implicit none +private +save + +integer, parameter :: dyn_decomp = 101 ! The SE dynamics grid +integer, parameter :: fvm_decomp = 102 ! The FVM (CSLAM) grid +integer, parameter :: physgrid_d = 103 ! physics grid on dynamics decomp +integer, parameter :: ptimelevels = 2 + +type (TimeLevel_t) :: TimeLevel ! main time level struct (used by tracers) +type (hvcoord_t) :: hvcoord +type(element_t), pointer :: elem(:) => null() ! local GLL elements for this task +type(fvm_struct), pointer :: fvm(:) => null() ! local FVM elements for this task + +public :: & + dyn_decomp, & + ptimelevels, & + TimeLevel, & + hvcoord, & + elem, & + fvm, & + edgebuf + +public :: dyn_grid_init +public :: get_dyn_grid_info +public :: physgrid_copy_attributes_d + +!!XXgoldyXX: v try to remove? +public :: get_horiz_grid_dim_d +public :: dyn_grid_get_colndx ! get element block/column and MPI process indices +public :: get_dyn_grid_parm +!!XXgoldyXX: ^ try to remove? +public :: dyn_grid_get_elem_coords ! get coords of a specified block element + +! Namelist variables controlling grid writing. +! Read in dyn_readnl from dyn_se_inparm group. +character(len=16), public :: se_write_grid_file = 'no' +character(len=shr_kind_cl), public :: se_grid_filename = '' +logical, public :: se_write_gll_corners = .false. + +type block_global_data + integer :: UniquePtOffset ! global index of first column in element + integer :: NumUniqueP ! number of unique columns in element + integer :: LocalID ! local index of element in a task + integer :: Owner ! task id of element owner +end type block_global_data + +type(physics_column_t), pointer :: local_dyn_columns(:) => NULL() + +! number of global dynamics columns. Set by SE dycore init. +integer :: ngcols_d = 0 +! number of global elements. Set by SE dycore init. +integer :: nelem_d = 0 + +real(r8), parameter :: rad2deg = 180.0_r8/pi + +type(EdgeBuffer_t) :: edgebuf + +!============================================================================= +contains +!============================================================================= + +subroutine dyn_grid_init() + + ! Initialize SE grid, and decomposition. + + use hycoef, only: hycoef_init, hypi, hypm, nprlev, & + hyam, hybm, hyai, hybi, ps0 + use physconst, only: thermodynamic_active_species_num + use ref_pres, only: ref_pres_init !Brought in via Held-Suarez - JN + use spmd_utils, only: MPI_MAX, MPI_INTEGER, mpicom + use time_manager, only: get_nstep, get_step_size + use dp_mapping, only: dp_init, dp_write + use native_mapping, only: do_native_mapping, create_native_mapping_files + + !SE dycore: + use parallel_mod, only: par + use hybrid_mod, only: hybrid_t, init_loop_ranges, & + get_loop_ranges, config_thread_region + use control_mod, only: qsplit, rsplit + use time_mod, only: tstep, nsplit + use fvm_mod, only: fvm_init2, fvm_init3, fvm_pg_init + use dimensions_mod, only: irecons_tracer + use comp_gll_ctr_vol, only: gll_grid_write + + ! Local variables + + type(file_desc_t), pointer :: fh_ini + + integer :: qsize_local + integer :: k + + type(hybrid_t) :: hybrid + integer :: ierr + integer :: dtime + + real(r8), allocatable ::clat(:), clon(:), areaa(:) + integer :: nets, nete + + character(len=*), parameter :: sub = 'dyn_grid_init' + !---------------------------------------------------------------------------- + + ! Get file handle for initial file and first consistency check + fh_ini => initial_file_get_id() + + ! Initialize hybrid coordinate arrays + call hycoef_init(fh_ini, psdry=.true.) + + hvcoord%hyam = hyam + hvcoord%hyai = hyai + hvcoord%hybm = hybm + hvcoord%hybi = hybi + hvcoord%ps0 = ps0 + do k = 1, nlev + hvcoord%hybd(k) = hvcoord%hybi(k+1) - hvcoord%hybi(k) + end do + + ! Initialize reference pressures + call ref_pres_init(hypi, hypm, nprlev) + + if (iam < par%nprocs) then + + call prim_init1(elem, fvm, par, TimeLevel) + if (fv_nphys > 0) then + call dp_init(elem, fvm) + end if + + if (fv_nphys > 0) then + qsize_local = thermodynamic_active_species_num + 3 + else + qsize_local = pcnst + 3 + end if + + call initEdgeBuffer(par, edgebuf, elem, qsize_local*nlev, nthreads=1) + + else ! auxiliary processes + + globaluniquecols = 0 + nelem = 0 + nelemd = 0 + nelemdmax = 0 + endif + + ! nelemdmax is computed on the dycore comm, we need it globally. + ngcols_d = nelemdmax + call MPI_Allreduce(ngcols_d, nelemdmax, 1, MPI_INTEGER, MPI_MAX, mpicom, ierr) + ! All pes might not have the correct global grid size + call MPI_Allreduce(globaluniquecols, ngcols_d, 1, MPI_INTEGER, MPI_MAX, mpicom, ierr) + ! All pes might not have the correct number of elements + call MPI_Allreduce(nelem, nelem_d, 1, MPI_INTEGER, MPI_MAX, mpicom, ierr) + + ! nelemd (# of elements on this task) is set by prim_init1 + call init_loop_ranges(nelemd) + + ! Dynamics timestep + ! + ! Note: dtime = timestep for physics/dynamics coupling + ! tstep = the dynamics timestep: + dtime = get_step_size() + tstep = dtime / real(nsplit*qsplit*rsplit, r8) + TimeLevel%nstep = get_nstep()*nsplit*qsplit*rsplit + + ! initial SE (subcycled) nstep + TimeLevel%nstep0 = 0 + + ! Define the dynamics and physics grids on the dynamics decompostion. + ! Physics grid on the physics decomposition is defined in phys_grid_init. + call define_cam_grids() + + if (fv_nphys > 0) then + + ! ================================================ + ! finish fvm initialization + ! ================================================ + + if (iam < par%nprocs) then + hybrid = config_thread_region(par,'serial') + call get_loop_ranges(hybrid, ibeg=nets, iend=nete) + + ! initialize halo coordinate variables for cslam and physgrid + call fvm_init2(elem, fvm, hybrid, nets, nete) + call fvm_pg_init(elem, fvm, hybrid, nets, nete, irecons_tracer) + call fvm_init3(elem, fvm, hybrid, nets, nete, irecons_tracer) + end if + + end if + + ! write grid and mapping files + if (se_write_gll_corners) then + call write_grid_mapping(par, elem) + end if + + if (trim(se_write_grid_file) /= "no") then + if (fv_nphys > 0) then + call dp_write(elem, fvm, trim(se_write_grid_file), trim(se_grid_filename)) + else + call gll_grid_write(elem, trim(se_write_grid_file), trim(se_grid_filename)) + end if + end if + + if (do_native_mapping) then + + allocate(areaA(ngcols_d)) + allocate(clat(ngcols_d),clon(ngcols_d)) + call get_horiz_grid_int(ngcols_d, clat_d_out=clat, clon_d_out=clon, area_d_out=areaA) + + ! Create mapping files using SE basis functions + call create_native_mapping_files(par, elem, 'native', ngcols_d, clat, clon, areaa) + call create_native_mapping_files(par, elem, 'bilin', ngcols_d, clat, clon, areaa) + + deallocate(areaa, clat, clon) + end if + + call mpi_barrier(mpicom, ierr) + +end subroutine dyn_grid_init + +!============================================================================== + +subroutine get_dyn_grid_info(hdim1_d, hdim2_d, num_lev, & + dycore_name, index_model_top_layer, index_surface_layer, dyn_columns) + use physconst, only: pi + use cam_abortutils, only: endrun + use spmd_utils, only: iam + ! Dummy arguments + integer, intent(out) :: hdim1_d ! # longitudes or grid size + integer, intent(out) :: hdim2_d ! # latitudes or 1 + integer, intent(out) :: num_lev ! # levels + character(len=*), intent(out) :: dycore_name + integer, intent(out) :: index_model_top_layer + integer, intent(out) :: index_surface_layer + type(physics_column_t), pointer :: dyn_columns(:) ! Phys col in Dyn decomp + ! Local variables + integer :: lindex + integer :: gindex + integer :: elem_ind, col_ind, ii, jj + integer :: num_local_cols + real(kind_pcol), parameter :: radtodeg = 180.0_kind_pcol / pi + real(kind_pcol), parameter :: degtorad = pi / 180.0_kind_pcol + character(len=*), parameter :: subname = 'get_dyn_grid_info' + + if (associated(dyn_columns)) then + call endrun(subname//': dyn_columns must be unassociated pointer') + end if + if (fv_nphys > 0) then ! physics uses an FVM grid + num_local_cols = nelemd * nc * nc + else + num_local_cols = 0 + do elem_ind = 1, nelemd + num_local_cols = num_local_cols + elem(elem_ind)%idxP%NumUniquePts + end do + end if + if (associated(local_dyn_columns)) then + ! Check for correct number of columns + if (size(local_dyn_columns) /= num_local_cols) then + call endrun(subname//': called with inconsistent column numbers') + end if + else + allocate(local_dyn_columns(num_local_cols)) + end if + dyn_columns => local_dyn_columns + hdim1_d = ngcols_d + hdim2_d = 1 + num_lev = nlev + dycore_name = 'SE' + index_model_top_layer = 1 + index_surface_layer = nlev + lindex = 0 + do elem_ind = 1, nelemd + if (fv_nphys > 0) then ! physics uses an FVM grid + do col_ind = 0, (nc * nc) - 1 + ii = MOD(col_ind, nc) + 1 + jj = col_ind / nc + dyn_columns(lindex)%lat_rad = real(fvm(elem_ind)%center_cart(ii,jj)%lat, kind_pcol) + dyn_columns(lindex)%lat_deg = dyn_columns(lindex)%lat_rad * radtodeg + dyn_columns(lindex)%lon_rad = real(fvm(elem_ind)%center_cart(ii,jj)%lon, kind_pcol) + dyn_columns(lindex)%lon_deg = dyn_columns(lindex)%lon_rad * radtodeg + dyn_columns(lindex)%area = real(fvm(elem_ind)%area_sphere_physgrid(ii,jj), kind_pcol) + dyn_columns(lindex)%weight = dyn_columns(lindex)%area + ! File decomposition + gindex = ((elem(elem_ind)%GlobalId-1) * nc * nc) + col_ind + dyn_columns(lindex)%global_col_num = gindex + ! Note, coord_indices not used for unstructured dycores + ! Dynamics decomposition + dyn_columns(lindex)%dyn_task = iam + dyn_columns(lindex)%local_dyn_block = elem_ind + dyn_columns(lindex)%global_dyn_block = elem(elem_ind)%GlobalId + allocate(dyn_columns(lindex)%dyn_block_index(1)) + dyn_columns(lindex)%dyn_block_index(1) = col_ind + end do + else + do col_ind = 1, elem(elem_ind)%idxP%NumUniquePts + lindex = lindex + 1 + ii = elem(elem_ind)%idxP%ia(col_ind) + jj = elem(elem_ind)%idxP%ja(col_ind) + + dyn_columns(lindex)%lat_rad = real(elem(elem_ind)%spherep(ii,jj)%lat, kind_pcol) + dyn_columns(lindex)%lat_deg = dyn_columns(lindex)%lat_rad * radtodeg + dyn_columns(lindex)%lon_rad = real(elem(elem_ind)%spherep(ii,jj)%lon, kind_pcol) + dyn_columns(lindex)%lon_deg = dyn_columns(lindex)%lon_rad * radtodeg + dyn_columns(lindex)%area = real(1.0_kind_pcol / elem(elem_ind)%rspheremp(ii,jj), kind_pcol) + dyn_columns(lindex)%weight = dyn_columns(lindex)%area + ! File decomposition + gindex = elem(elem_ind)%idxP%UniquePtoffset + col_ind - 1 + dyn_columns(lindex)%global_col_num = gindex + ! Note, coord_indices not used for unstructured dycores + ! Dynamics decomposition + dyn_columns(lindex)%dyn_task = iam + dyn_columns(lindex)%local_dyn_block = elem_ind + dyn_columns(lindex)%global_dyn_block = elem(elem_ind)%GlobalId + allocate(dyn_columns(lindex)%dyn_block_index(1)) + dyn_columns(lindex)%dyn_block_index(1) = col_ind + end do + end if + end do + + end subroutine get_dyn_grid_info + +!============================================================================== + +subroutine get_horiz_grid_dim_d(hdim1_d,hdim2_d) + + ! Returns declared horizontal dimensions of computational grid. + ! For non-lon/lat grids, declare grid to be one-dimensional, + ! i.e., (ngcols_d x 1) + + !------------------------------Arguments-------------------------------- + integer, intent(out) :: hdim1_d ! first horizontal dimension + integer, intent(out), optional :: hdim2_d ! second horizontal dimension + !----------------------------------------------------------------------- + + if (fv_nphys > 0) then + hdim1_d = fv_nphys*fv_nphys*nelem_d + else + hdim1_d = ngcols_d + end if + if (present(hdim2_d)) then + hdim2_d = 1 + end if + +end subroutine get_horiz_grid_dim_d + +!========================================================================================= + +subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & + wght_d_out, lat_d_out, lon_d_out) + + ! Return global arrays of latitude and longitude (in radians), column + ! surface area (in radians squared) and surface integration weights for + ! global column indices that will be passed to/from physics + + ! arguments + integer, intent(in) :: nxy ! array sizes + + real(r8), intent(out), optional :: clat_d_out(:) ! column latitudes + real(r8), intent(out), optional :: clon_d_out(:) ! column longitudes + real(r8), intent(out), target, optional :: area_d_out(:) ! column surface + + real(r8), intent(out), target, optional :: wght_d_out(:) ! column integration weight + real(r8), intent(out), optional :: lat_d_out(:) ! column degree latitudes + real(r8), intent(out), optional :: lon_d_out(:) ! column degree longitudes + + ! local variables + real(r8), pointer :: area_d(:) + real(r8), pointer :: temp(:) + character(len=256) :: errormsg + character(len=*), parameter :: sub = 'get_horiz_grid_d' + !---------------------------------------------------------------------------- + + ! check that nxy is set to correct size for global arrays + if (fv_nphys > 0) then + if (nxy < fv_nphys*fv_nphys*nelem_d) then + write(errormsg, *) sub//': arrays too small; Passed', & + nxy, ', needs to be at least', fv_nphys*fv_nphys*nelem_d + call endrun(errormsg) + end if + else + if (nxy < ngcols_d) then + write(errormsg,*) sub//': arrays not large enough; ', & + 'Passed', nxy, ', needs to be at least', ngcols_d + call endrun(errormsg) + end if + end if + + if ( present(area_d_out) ) then + if (size(area_d_out) /= nxy) then + call endrun(sub//': bad area_d_out array size') + end if + area_d => area_d_out + call create_global_area(area_d) + + else if ( present(wght_d_out) ) then + if (size(wght_d_out) /= nxy) then + call endrun(sub//': bad wght_d_out array size') + end if + area_d => wght_d_out + call create_global_area(area_d) + + end if + + ! If one of area_d_out or wght_d_out was present, then it was computed + ! above. If they were *both* present, then do this: + if ( present(area_d_out) .and. present(wght_d_out) ) then + wght_d_out(:) = area_d_out(:) + end if + + if (present(clon_d_out)) then + if (size(clon_d_out) /= nxy) then + call endrun(sub//': bad clon_d_out array size in dyn_grid') + end if + end if + + if (present(clat_d_out)) then + + if (size(clat_d_out) /= nxy) then + call endrun('bad clat_d_out array size in dyn_grid') + end if + + if (present(clon_d_out)) then + call create_global_coords(clat_d_out, clon_d_out, lat_d_out, lon_d_out) + else + allocate(temp(nxy)) + call create_global_coords(clat_d_out, temp, lat_d_out, lon_d_out) + deallocate(temp) + end if + + else if (present(clon_d_out)) then + + allocate(temp(nxy)) + call create_global_coords(temp, clon_d_out, lat_d_out, lon_d_out) + deallocate(temp) + + end if + +end subroutine get_horiz_grid_int + +!========================================================================================= + +subroutine physgrid_copy_attributes_d(gridname, grid_attribute_names) + + ! create list of attributes for the physics grid that should be copied + ! from the corresponding grid object on the dynamics decomposition + + use cam_grid_support, only: max_hcoordname_len + + ! Dummy arguments + character(len=max_hcoordname_len), intent(out) :: gridname + character(len=max_hcoordname_len), pointer, intent(out) :: grid_attribute_names(:) + + if (fv_nphys > 0) then + gridname = 'physgrid_d' + allocate(grid_attribute_names(2)) + grid_attribute_names(1) = 'fv_nphys' + grid_attribute_names(2) = 'ne' + else + gridname = 'GLL' + allocate(grid_attribute_names(3)) + ! For standard CAM-SE, we need to copy the area attribute. + ! For physgrid, the physics grid will create area (GLL has area_d) + grid_attribute_names(1) = 'area' + grid_attribute_names(2) = 'np' + grid_attribute_names(3) = 'ne' + end if + +end subroutine physgrid_copy_attributes_d + +!========================================================================================= + +integer function get_dyn_grid_parm(name) result(ival) + + ! This function is in the process of being deprecated, but is still needed + ! as a dummy interface to satisfy external references from some chemistry routines. + + use pmgrid, only: plat, plev + + character(len=*), intent(in) :: name + !---------------------------------------------------------------------------- + + if (name.eq.'plat') then + ival = plat + else if(name.eq.'plon') then + if (fv_nphys>0) then + ival = fv_nphys*fv_nphys*nelem_d + else + ival = ngcols_d + end if + else if(name.eq.'plev') then + ival = plev + + else + ival = -1 + end if + +end function get_dyn_grid_parm + +!========================================================================================= + +subroutine dyn_grid_get_colndx(igcol, ncols, owners, col, lbk) + + ! For each global column index return the owning task. If the column is owned + ! by this task, then also return the local block number and column index in that + ! block. + ! + ! NOTE: this routine needs to be updated for the physgrid + + integer, intent(in) :: ncols + integer, intent(in) :: igcol(ncols) + integer, intent(out) :: owners(ncols) + integer, intent(out) :: col(ncols) + integer, intent(out) :: lbk(ncols) + + !---------------------------------------------------------------------------- + + owners = (igcol * 0) -1 ! Kill compiler warnings + col = -1 ! Kill compiler warnings + lbk = -1 ! Kill compiler warnings + call endrun('dyn_grid_get_colndx: not implemented for unstructured grids') + +end subroutine dyn_grid_get_colndx + +!========================================================================================= + +subroutine dyn_grid_get_elem_coords(ie, rlon, rlat, cdex) + + ! Returns coordinates of a specified block element of the dyn grid + ! + ! NB: This routine only uses the GLL points (i.e, it ignores the physics + ! grid). This is probably OK as current use is only for dyn_decomp + ! variables in history. + + integer, intent(in) :: ie ! block element index + + real(r8),optional, intent(out) :: rlon(:) ! longitudes of the columns in the element + real(r8),optional, intent(out) :: rlat(:) ! latitudes of the columns in the element + integer, optional, intent(out) :: cdex(:) ! global column index + + integer :: sb,eb, ii, i,j, icol, igcol + real(r8), allocatable :: clat(:), clon(:) + !---------------------------------------------------------------------------- + + if (fv_nphys > 0) then + call endrun('dyn_grid_get_colndx: not implemented for the FVM physics grid') + end if + + sb = elem(ie)%idxp%UniquePtOffset + eb = sb + elem(ie)%idxp%NumUniquePts-1 + + allocate( clat(sb:eb), clon(sb:eb) ) + call UniqueCoords( elem(ie)%idxP, elem(ie)%spherep, clat(sb:eb), clon(sb:eb) ) + + if (present(cdex)) cdex(:) = -1 + if (present(rlat)) rlat(:) = -999._r8 + if (present(rlon)) rlon(:) = -999._r8 + + do ii=1,elem(ie)%idxp%NumUniquePts + i=elem(ie)%idxp%ia(ii) + j=elem(ie)%idxp%ja(ii) + icol = i+(j-1)*np + igcol = elem(ie)%idxp%UniquePtoffset+ii-1 + if (present(cdex)) cdex(icol) = igcol + if (present(rlat)) rlat(icol) = clat( igcol ) + if (present(rlon)) rlon(icol) = clon( igcol ) + end do + + deallocate( clat, clon ) + +end subroutine dyn_grid_get_elem_coords + +!========================================================================================= +! Private routines. +!========================================================================================= + +subroutine define_cam_grids() + + ! Create grid objects on the dynamics decomposition for grids used by + ! the dycore. The decomposed grid object contains data for the elements + ! in each task and information to map that data to the global grid. + ! + ! Notes on dynamic memory management: + ! + ! . Coordinate values and the map passed to the horiz_coord_create + ! method are copied to the object. The memory may be deallocated + ! after the object is created. + ! + ! . The area values passed to cam_grid_attribute_register are only pointed + ! to by the attribute object, so that memory cannot be deallocated. But the + ! map is copied. + ! + ! . The grid_map passed to cam_grid_register is just pointed to. + ! Cannot be deallocated. + + use cam_grid_support, only: horiz_coord_t, horiz_coord_create + use cam_grid_support, only: cam_grid_register, cam_grid_attribute_register + + ! Local variables + integer :: i, ii, j, k, ie, mapind + character(len=8) :: latname, lonname, ncolname, areaname + + type(horiz_coord_t), pointer :: lat_coord + type(horiz_coord_t), pointer :: lon_coord + integer(iMap), pointer :: grid_map(:,:) + + real(r8), allocatable :: pelat_deg(:) ! pe-local latitudes (degrees) + real(r8), allocatable :: pelon_deg(:) ! pe-local longitudes (degrees) + real(r8), pointer :: pearea(:) => null() ! pe-local areas + real(r8) :: areaw(np,np) + integer(iMap) :: fdofP_local(npsq,nelemd) ! pe-local map for dynamics decomp + integer(iMap), allocatable :: pemap(:) ! pe-local map for PIO decomp + + integer :: ncols_fvm, ngcols_fvm + real(r8), allocatable :: fvm_coord(:) + real(r8), pointer :: fvm_area(:) + integer(iMap), pointer :: fvm_map(:) + + integer :: ncols_physgrid, ngcols_physgrid + real(r8), allocatable :: physgrid_coord(:) + real(r8), pointer :: physgrid_area(:) + integer(iMap), pointer :: physgrid_map(:) + !---------------------------------------------------------------------------- + + !----------------------- + ! Create GLL grid object + !----------------------- + + ! Calculate the mapping between element GLL points and file order + fdofp_local = 0_iMap + do ie = 1, nelemd + do ii = 1, elem(ie)%idxP%NumUniquePts + i = elem(ie)%idxP%ia(ii) + j = elem(ie)%idxP%ja(ii) + fdofp_local((np*(j-1))+i,ie) = elem(ie)%idxP%UniquePtoffset + ii - 1 + end do + end do + + allocate(pelat_deg(np*np*nelemd)) + allocate(pelon_deg(np*np*nelemd)) + allocate(pearea(np*np*nelemd)) + allocate(pemap(np*np*nelemd)) + + pemap = 0_iMap + ii = 1 + do ie = 1, nelemd + areaw = 1.0_r8 / elem(ie)%rspheremp(:,:) + pearea(ii:ii+npsq-1) = reshape(areaw, (/ np*np /)) + pemap(ii:ii+npsq-1) = fdofp_local(:,ie) + do j = 1, np + do i = 1, np + pelat_deg(ii) = elem(ie)%spherep(i,j)%lat * rad2deg + pelon_deg(ii) = elem(ie)%spherep(i,j)%lon * rad2deg + ii = ii + 1 + end do + end do + end do + + ! If using the physics grid then the GLL grid will use the names with + ! '_d' suffixes and the physics grid will use the unadorned names. + ! This allows fields on both the GLL and physics grids to be written to history + ! output files. + if (fv_nphys > 0) then + latname = 'lat_d' + lonname = 'lon_d' + ncolname = 'ncol_d' + areaname = 'area_d' + else + latname = 'lat' + lonname = 'lon' + ncolname = 'ncol' + areaname = 'area' + end if + lat_coord => horiz_coord_create(trim(latname), trim(ncolname), ngcols_d, & + 'latitude', 'degrees_north', 1, size(pelat_deg), pelat_deg, map=pemap) + lon_coord => horiz_coord_create(trim(lonname), trim(ncolname), ngcols_d, & + 'longitude', 'degrees_east', 1, size(pelon_deg), pelon_deg, map=pemap) + + ! Map for GLL grid + allocate(grid_map(3,npsq*nelemd)) + grid_map = 0_iMap + mapind = 1 + do j = 1, nelemd + do i = 1, npsq + grid_map(1, mapind) = i + grid_map(2, mapind) = j + grid_map(3, mapind) = pemap(mapind) + mapind = mapind + 1 + end do + end do + + ! The native SE GLL grid + call cam_grid_register('GLL', dyn_decomp, lat_coord, lon_coord, & + grid_map, block_indexed=.false., unstruct=.true.) + call cam_grid_attribute_register('GLL', trim(areaname), 'gll grid areas', & + trim(ncolname), pearea, map=pemap) + call cam_grid_attribute_register('GLL', 'np', '', np) + call cam_grid_attribute_register('GLL', 'ne', '', ne) + + ! Coordinate values and maps are copied into the coordinate and attribute objects. + ! Locally allocated storage is no longer needed. + deallocate(pelat_deg) + deallocate(pelon_deg) + deallocate(pemap) + + ! pearea cannot be deallocated as the attribute object is just pointing + ! to that memory. It can be nullified since the attribute object has + ! the reference. + nullify(pearea) + + ! grid_map cannot be deallocated as the cam_filemap_t object just points + ! to it. It can be nullified. + nullify(grid_map) + + !--------------------------------- + ! Create FVM grid object for CSLAM + !--------------------------------- + + if (ntrac > 0) then + + ncols_fvm = nc * nc * nelemd + ngcols_fvm = nc * nc * nelem_d + allocate(fvm_coord(ncols_fvm)) + allocate(fvm_map(ncols_fvm)) + allocate(fvm_area(ncols_fvm)) + + do ie = 1, nelemd + k = 1 + do j = 1, nc + do i = 1, nc + mapind = k + ((ie - 1) * nc * nc) + fvm_coord(mapind) = fvm(ie)%center_cart(i,j)%lon*rad2deg + fvm_map(mapind) = k + ((elem(ie)%GlobalId-1) * nc * nc) + fvm_area(mapind) = fvm(ie)%area_sphere(i,j) + k = k + 1 + end do + end do + end do + lon_coord => horiz_coord_create('lon_fvm', 'ncol_fvm', ngcols_fvm, & + 'longitude', 'degrees_east', 1, size(fvm_coord), fvm_coord, & + map=fvm_map) + + do ie = 1, nelemd + k = 1 + do j = 1, nc + do i = 1, nc + mapind = k + ((ie - 1) * nc * nc) + fvm_coord(mapind) = fvm(ie)%center_cart(i,j)%lat*rad2deg + k = k + 1 + end do + end do + end do + lat_coord => horiz_coord_create('lat_fvm', 'ncol_fvm', ngcols_fvm, & + 'latitude', 'degrees_north', 1, size(fvm_coord), fvm_coord, & + map=fvm_map) + + ! Map for FVM grid + allocate(grid_map(3, ncols_fvm)) + grid_map = 0_iMap + mapind = 1 + do j = 1, nelemd + do i = 1, nc*nc + grid_map(1, mapind) = i + grid_map(2, mapind) = j + grid_map(3, mapind) = fvm_map(mapind) + mapind = mapind + 1 + end do + end do + + ! create FVM (CSLAM) grid object + call cam_grid_register('FVM', fvm_decomp, lat_coord, lon_coord, & + grid_map, block_indexed=.false., unstruct=.true.) + call cam_grid_attribute_register('FVM', 'area_fvm', 'fvm grid areas', & + 'ncol_fvm', fvm_area, map=fvm_map) + call cam_grid_attribute_register('FVM', 'nc', '', nc) + call cam_grid_attribute_register('FVM', 'ne', '', ne) + + deallocate(fvm_coord) + deallocate(fvm_map) + nullify(fvm_area) + nullify(grid_map) + + end if + + !------------------------------------------------------------------ + ! Create grid object for physics grid on the dynamics decomposition + !------------------------------------------------------------------ + + if (fv_nphys > 0) then + + ncols_physgrid = fv_nphys * fv_nphys * nelemd + ngcols_physgrid = fv_nphys * fv_nphys * nelem_d + allocate(physgrid_coord(ncols_physgrid)) + allocate(physgrid_map(ncols_physgrid)) + allocate(physgrid_area(ncols_physgrid)) + + do ie = 1, nelemd + k = 1 + do j = 1, fv_nphys + do i = 1, fv_nphys + mapind = k + ((ie - 1) * fv_nphys * fv_nphys) + physgrid_coord(mapind) = fvm(ie)%center_cart_physgrid(i,j)%lon*rad2deg + physgrid_map(mapind) = k + ((elem(ie)%GlobalId-1) * fv_nphys * fv_nphys) + physgrid_area(mapind) = fvm(ie)%area_sphere_physgrid(i,j) + k = k + 1 + end do + end do + end do + lon_coord => horiz_coord_create('lon', 'ncol', ngcols_physgrid, & + 'longitude', 'degrees_east', 1, size(physgrid_coord), physgrid_coord, & + map=physgrid_map) + + do ie = 1, nelemd + k = 1 + do j = 1, fv_nphys + do i = 1, fv_nphys + mapind = k + ((ie - 1) * fv_nphys * fv_nphys) + physgrid_coord(mapind) = fvm(ie)%center_cart_physgrid(i,j)%lat*rad2deg + k = k + 1 + end do + end do + end do + lat_coord => horiz_coord_create('lat', 'ncol', ngcols_physgrid, & + 'latitude', 'degrees_north', 1, size(physgrid_coord), physgrid_coord, & + map=physgrid_map) + + ! Map for physics grid + allocate(grid_map(3, ncols_physgrid)) + grid_map = 0_iMap + mapind = 1 + do j = 1, nelemd + do i = 1, fv_nphys*fv_nphys + grid_map(1, mapind) = i + grid_map(2, mapind) = j + grid_map(3, mapind) = physgrid_map(mapind) + mapind = mapind + 1 + end do + end do + + ! create physics grid object + call cam_grid_register('physgrid_d', physgrid_d, lat_coord, lon_coord, & + grid_map, block_indexed=.false., unstruct=.true.) + call cam_grid_attribute_register('physgrid_d', 'area_physgrid', 'physics grid areas', & + 'ncol', physgrid_area, map=physgrid_map) + call cam_grid_attribute_register('physgrid_d', 'fv_nphys', '', fv_nphys) + call cam_grid_attribute_register('physgrid_d', 'ne', '', ne) + + deallocate(physgrid_coord) + deallocate(physgrid_map) + nullify(physgrid_area) + nullify(grid_map) + + end if + + nullify(lat_coord) ! Belongs to grid + nullify(lon_coord) ! Belongs to grid + +end subroutine define_cam_grids + +!======================================================================================== + +subroutine write_grid_mapping(par, elem) + + use cam_pio_utils, only: cam_pio_createfile, cam_pio_newdecomp + use pio, only: pio_def_dim, var_desc_t, pio_int, pio_def_var, & + pio_enddef, pio_closefile, io_desc_t, & + pio_write_darray, pio_freedecomp + + ! SE dycore: + use parallel_mod, only: parallel_t + use dof_mod, only: createmetadata + + ! arguments + type(parallel_t), intent(in) :: par + type(element_t), intent(in) :: elem(:) + + ! local variables + integer, parameter :: npm12 = (np-1)*(np-1) + + type(file_desc_t) :: nc + type(var_desc_t) :: vid + type(io_desc_t) :: iodesc + integer :: dim1, dim2, ierr, i, j, ie, cc, base, ii, jj + integer :: subelement_corners(npm12*nelemd,4) + integer :: dof(npm12*nelemd*4) + !---------------------------------------------------------------------------- + + ! Create a CS grid mapping file for postprocessing tools + + ! write meta data for physics on GLL nodes + call cam_pio_createfile(nc, 'SEMapping.nc', 0) + + ierr = pio_def_dim(nc, 'ncenters', npm12*nelem_d, dim1) + ierr = pio_def_dim(nc, 'ncorners', 4, dim2) + ierr = pio_def_var(nc, 'element_corners', PIO_INT, (/dim1,dim2/), vid) + + ierr = pio_enddef(nc) + call createmetadata(par, elem, subelement_corners) + + jj=0 + do cc = 0, 3 + do ie = 1, nelemd + base = ((elem(ie)%globalid-1)+cc*nelem_d)*npm12 + ii=0 + do j = 1, np-1 + do i = 1, np-1 + ii=ii+1 + jj=jj+1 + dof(jj) = base+ii + end do + end do + end do + end do + + call cam_pio_newdecomp(iodesc, (/nelem_d*npm12,4/), dof, pio_int) + + call pio_write_darray(nc, vid, iodesc, & + reshape(subelement_corners, (/nelemd*npm12*4/)), ierr) + + call pio_freedecomp(nc, iodesc) + + call pio_closefile(nc) + +end subroutine write_grid_mapping + +!========================================================================================= + +subroutine create_global_area(area_d) + use dp_mapping, only: dp_reorder, dp_allocate, dp_deallocate + + ! Gather global array of column areas for the physics grid, + ! reorder to global column order, then broadcast it to all tasks. + + ! Input variables + real(r8), pointer :: area_d(:) + + ! Local variables + real(r8) :: areaw(np,np) + real(r8), allocatable :: rbuf(:), dp_area(:,:) + integer :: rdispls(npes), recvcounts(npes) + integer :: ncol + integer :: ie, sb, eb, i, j, k + integer :: ierr + integer :: ibuf + character(len=*), parameter :: sub = 'create_global_area' + !---------------------------------------------------------------------------- + + if (masterproc) then + write(iulog, *) sub//': INFO: Non-scalable action: gathering global area in SE dycore.' + end if + + if (fv_nphys > 0) then ! physics uses an FVM grid + + ! first gather all data onto masterproc, in mpi task order (via + ! mpi_gatherv) then redorder into globalID order (via dp_reorder) + ncol = fv_nphys*fv_nphys*nelem_d + allocate(rbuf(ncol)) + allocate(dp_area(fv_nphys*fv_nphys,nelem_d)) + + do ie = 1, nelemd + k = 1 + do j = 1, fv_nphys + do i = 1, fv_nphys + dp_area(k,ie) = fvm(ie)%area_sphere_physgrid(i,j) + k = k + 1 + end do + end do + end do + + call mpi_gather(nelemd*fv_nphys*fv_nphys, 1, mpi_integer, recvcounts, 1, & + mpi_integer, mstrid, mpicom, ierr) + ! Figure global displacements + if (masterproc) then + rdispls(1) = 0 + do ie = 2, npes + rdispls(ie) = rdispls(ie-1) + recvcounts(ie-1) + end do + ! Check to make sure we counted correctly + if (rdispls(npes) + recvcounts(npes) /= ncol) then + call endrun(sub//': bad rdispls array size') + end if + end if + + ! Gather up the areas onto the masterproc + call mpi_gatherv(dp_area, fv_nphys*fv_nphys*nelemd, mpi_real8, rbuf, & + recvcounts, rdispls, mpi_real8, mstrid, mpicom, ierr) + + ! Reorder to global order + call dp_allocate(elem) + if (masterproc) call dp_reorder(rbuf, area_d) + call dp_deallocate() + + ! Send everyone else the data + call mpi_bcast(area_d, ncol, mpi_real8, mstrid, mpicom, ierr) + + deallocate(dp_area) + + else ! physics is on the GLL grid + + allocate(rbuf(ngcols_d)) + do ie = 1, nelemdmax + if (ie <= nelemd) then + rdispls(iam+1) = elem(ie)%idxp%UniquePtOffset - 1 + eb = rdispls(iam+1) + elem(ie)%idxp%NumUniquePts + recvcounts(iam+1) = elem(ie)%idxP%NumUniquePts + areaw = 1.0_r8 / elem(ie)%rspheremp(:,:) + call UniquePoints(elem(ie)%idxP, areaw, area_d(rdispls(iam+1)+1:eb)) + else + rdispls(iam+1) = 0 + recvcounts(iam+1) = 0 + end if + + ibuf = rdispls(iam+1) + call mpi_allgather(ibuf, 1, mpi_integer, rdispls, & + 1, mpi_integer, mpicom, ierr) + + ibuf = recvcounts(iam+1) + call mpi_allgather(ibuf, 1, mpi_integer, recvcounts, & + 1, mpi_integer, mpicom, ierr) + + sb = rdispls(iam+1) + 1 + eb = rdispls(iam+1) + recvcounts(iam+1) + + rbuf(1:recvcounts(iam+1)) = area_d(sb:eb) + call mpi_allgatherv(rbuf, recvcounts(iam+1), mpi_real8, area_d, & + recvcounts(:), rdispls(:), mpi_real8, mpicom, ierr) + end do + + end if + + deallocate(rbuf) + +end subroutine create_global_area + +!========================================================================================= + +subroutine create_global_coords(clat, clon, lat_out, lon_out) + use dp_mapping, only: dp_reorder, dp_allocate, dp_deallocate + + ! Gather global arrays of column coordinates for the physics grid, + ! reorder to global column order, then broadcast to all tasks. + + ! arguments + real(r8), intent(out) :: clat(:) + real(r8), intent(out) :: clon(:) + real(r8), optional, intent(out) :: lat_out(:) + real(r8), optional, intent(out) :: lon_out(:) + + ! Local variables + real(r8), allocatable :: rbuf(:), dp_lon(:,:), dp_lat(:,:) + integer :: rdispls(npes), recvcounts(npes) + integer :: ie, sb, eb, i, j, k + integer :: ierr + integer :: ibuf + integer :: ncol + character(len=*), parameter :: sub='create_global_coords' + !---------------------------------------------------------------------------- + + if (masterproc) then + write(iulog, *) sub//': INFO: Non-scalable action: Creating global coords in SE dycore.' + end if + + clat(:) = -iam + clon(:) = -iam + if (present(lon_out)) then + lon_out(:) = -iam + end if + if (present(lat_out)) then + lat_out(:) = -iam + end if + + if (fv_nphys > 0) then ! physics uses an FVM grid + + ! first gather all data onto masterproc, in mpi task order (via + ! mpi_gatherv) then redorder into globalID order (via dp_reorder) + + ncol = fv_nphys*fv_nphys*nelem_d + allocate(rbuf(ncol)) + allocate(dp_lon(fv_nphys*fv_nphys,nelem_d)) + allocate(dp_lat(fv_nphys*fv_nphys,nelem_d)) + + do ie = 1, nelemd + k = 1 + do j = 1, fv_nphys + do i = 1, fv_nphys + dp_lon(k,ie) = fvm(ie)%center_cart_physgrid(i,j)%lon ! radians + dp_lat(k,ie) = fvm(ie)%center_cart_physgrid(i,j)%lat + k = k + 1 + end do + end do + end do + + call mpi_gather(nelemd*fv_nphys*fv_nphys, 1, mpi_integer, recvcounts, & + 1, mpi_integer, mstrid, mpicom, ierr) + + ! Figure global displacements + if (masterproc) then + rdispls(1) = 0 + do ie = 2, npes + rdispls(ie) = rdispls(ie-1) + recvcounts(ie-1) + end do + ! Check to make sure we counted correctly + if (rdispls(npes) + recvcounts(npes) /= ncol) then + call endrun(sub//': bad rdispls array size') + end if + end if + + ! Gather up global latitudes + call mpi_gatherv(dp_lat, fv_nphys*fv_nphys*nelemd, mpi_real8, rbuf, & + recvcounts, rdispls, mpi_real8, mstrid, mpicom, ierr) + + ! Reorder to global order + call dp_allocate(elem) + if (masterproc) call dp_reorder(rbuf, clat) + + ! Send everyone else the data + call mpi_bcast(clat, ncol, mpi_real8, mstrid, mpicom, ierr) + + ! Gather up global longitudes + call mpi_gatherv(dp_lon, fv_nphys*fv_nphys*nelemd, mpi_real8, rbuf, & + recvcounts, rdispls, mpi_real8, mstrid, mpicom, ierr) + + ! Reorder to global order + if (masterproc) call dp_reorder(rbuf, clon) + call dp_deallocate() + + ! Send everyone else the data + call mpi_bcast(clon, ncol, mpi_real8, mstrid, mpicom, ierr) + + ! Create degree versions if requested + if (present(lat_out)) then + lat_out(:) = clat(:) * rad2deg + end if + if (present(lon_out)) then + lon_out(:) = clon(:) * rad2deg + end if + + deallocate(dp_lon) + deallocate(dp_lat) + + else ! physics uses the GLL grid + + allocate(rbuf(ngcols_d)) + + do ie = 1, nelemdmax + + if(ie <= nelemd) then + rdispls(iam+1) = elem(ie)%idxp%UniquePtOffset - 1 + eb = rdispls(iam+1) + elem(ie)%idxp%NumUniquePts + recvcounts(iam+1) = elem(ie)%idxP%NumUniquePts + + call UniqueCoords(elem(ie)%idxP, elem(ie)%spherep, & + clat(rdispls(iam+1)+1:eb), clon(rdispls(iam+1)+1:eb)) + + if (present(lat_out)) then + lat_out(rdispls(iam+1)+1:eb) = clat(rdispls(iam+1)+1:eb) * rad2deg + end if + + if (present(lon_out)) then + lon_out(rdispls(iam+1)+1:eb) = clon(rdispls(iam+1)+1:eb) * rad2deg + end if + + else + rdispls(iam+1) = 0 + recvcounts(iam+1) = 0 + end if + + ibuf = rdispls(iam+1) + call mpi_allgather(ibuf, 1, mpi_integer, rdispls, & + 1, mpi_integer, mpicom, ierr) + + ibuf = recvcounts(iam+1) + call mpi_allgather(ibuf, 1, mpi_integer, recvcounts, & + 1, mpi_integer, mpicom, ierr) + + sb = rdispls(iam+1) + 1 + eb = rdispls(iam+1) + recvcounts(iam+1) + + rbuf(1:recvcounts(iam+1)) = clat(sb:eb) ! whats going to happen if end=0? + call mpi_allgatherv(rbuf, recvcounts(iam+1), mpi_real8, clat, & + recvcounts(:), rdispls(:), mpi_real8, mpicom, ierr) + + if (present(lat_out)) then + rbuf(1:recvcounts(iam+1)) = lat_out(sb:eb) + call mpi_allgatherv(rbuf, recvcounts(iam+1), mpi_real8, lat_out, & + recvcounts(:), rdispls(:), mpi_real8, mpicom, ierr) + end if + + rbuf(1:recvcounts(iam+1)) = clon(sb:eb) + call mpi_allgatherv(rbuf, recvcounts(iam+1), mpi_real8, clon, & + recvcounts(:), rdispls(:), mpi_real8, mpicom, ierr) + + if (present(lon_out)) then + rbuf(1:recvcounts(iam+1)) = lon_out(sb:eb) + call mpi_allgatherv(rbuf, recvcounts(iam+1), mpi_real8, lon_out, & + recvcounts(:), rdispls(:), mpi_real8, mpicom, ierr) + end if + + end do ! ie = 1, nelemdmax + + end if ! (fv_nphys > 0) + +end subroutine create_global_coords + +!============================================================================== + +end module dyn_grid diff --git a/src/dynamics/se/native_mapping.F90 b/src/dynamics/se/native_mapping.F90 new file mode 100644 index 00000000..cb69049a --- /dev/null +++ b/src/dynamics/se/native_mapping.F90 @@ -0,0 +1,537 @@ +module native_mapping +! +! Create mapping files using the SE basis functions. This module looks for the namelist 'native_mapping' in +! file NLFileName (usually atm_in) and reads from it a list of up to maxoutgrids grid description files +! It then creates a grid mapping file from the currently defined SE grid to the grid described in each file +! using the SE basis functions. The output mapping file name is generated based on the SE model resolution +! and the input grid file name and ends in '_date_native.nc' +! + use cam_logfile, only : iulog + use shr_kind_mod, only : r8 => shr_kind_r8, shr_kind_cl + use shr_const_mod, only : pi=>shr_const_pi + use cam_abortutils, only : endrun + use spmd_utils, only : iam, masterproc, mpi_character, mpi_logical, mpi_integer, mpi_max, & + mpicom, mstrid=>masterprocid + + implicit none + private + public :: native_mapping_readnl, create_native_mapping_files, do_native_mapping + + integer, parameter :: maxoutgrids=5 + character(len=shr_kind_cl) :: native_mapping_outgrids(maxoutgrids) + logical, protected :: do_native_mapping + +!============================================================================================= +contains +!============================================================================================= + +subroutine native_mapping_readnl(NLFileName) + + use shr_nl_mod, only: find_group_name => shr_nl_find_group_name + use shr_file_mod, only: shr_file_getunit, shr_file_freeunit + + character(len=*), intent(in) :: NLFileName + + character(len=shr_kind_cl) :: mappingfile, fname + + namelist /native_mapping_nl/ native_mapping_outgrids + integer :: nf, unitn, ierr + logical :: exist + character(len=*), parameter :: sub="native_mapping_readnl" + !----------------------------------------------------------------------------- + + do_native_mapping=.false. + + do nf=1,maxoutgrids + native_mapping_outgrids(nf)='' + enddo + + if(masterproc) then + exist=.true. + write(iulog,*) sub//': Check for native_mapping_nl namelist in ',trim(nlfilename) + unitn = shr_file_getunit() + open( unitn, file=trim(nlfilename), status='old' ) + + call find_group_name(unitn, 'native_mapping_nl', status=ierr) + if(ierr/=0) then + write(iulog,*) sub//': No native_mapping_nl namelist found' + exist=.false. + end if + if(exist) then + read(unitn, native_mapping_nl, iostat=ierr) + if(ierr/=0) then + call endrun(sub//': namelist read returns an error condition for native_mapping_nl') + end if + if(len_trim(native_mapping_outgrids(1))==0) exist=.false. + end if + close(unitn) + call shr_file_freeunit(unitn) + end if + + call mpi_bcast(exist, 1, mpi_logical, mstrid, mpicom, ierr) + if (ierr /= 0) call endrun(sub//": FATAL: mpi_bcast: exist") + + if(.not. exist) return + + call mpi_bcast(native_mapping_outgrids, maxoutgrids*shr_kind_cl, mpi_character, mstrid, mpicom, ierr) + if (ierr /= 0) call endrun(sub//": FATAL: mpi_bcast: native_mapping_outgrids") + + do_native_mapping=.true. + +end subroutine native_mapping_readnl + +!============================================================================================= + +subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, areaa) + + use cam_pio_utils, only : cam_pio_openfile, cam_pio_createfile + use pio, only : pio_noerr, pio_openfile, pio_createfile, pio_closefile, & + pio_get_var, pio_put_var, pio_write_darray,pio_int, pio_double, & + pio_def_var, pio_put_att, pio_global, file_desc_t, var_desc_t, & + io_desc_t, pio_internal_error,pio_inq_dimlen, pio_inq_varid, & + pio_get_att, pio_enddef, pio_bcast_error,pio_internal_error, & + pio_def_dim, pio_inq_dimid, pio_seterrorhandling, pio_initdecomp + + !SE dycore: + use parallel_mod, only : parallel_t, global_shared_buf, global_shared_sum + use global_norms_mod, only: wrap_repro_sum + use element_mod, only : element_t + use hybrid_mod, only : hybrid_t, config_thread_region + use quadrature_mod, only : quadrature_t, gauss, gausslobatto + use interpolate_mod, only : interpdata_t, cube_facepoint_ne, interpolate_scalar, set_interp_parameter, interp_init, & + get_interp_parameter + use coordinate_systems_mod, only : spherical_polar_t, cartesian2d_t + use dimensions_mod, only : nelemd, ne, np, npsq, nelem + use reduction_mod, only : ParallelMin,ParallelMax + use cube_mod, only : convert_gbl_index + use infnan, only : isnan + use dof_mod, only : CreateMetaData + use thread_mod, only: omp_get_thread_num + use datetime_mod, only: datetime + + + !use cam_history_support, only : fillvalue + + + type(parallel_t), intent(in) :: par + type(element_t), intent(in) :: elem(:) + character(len=*), intent(in) :: maptype + integer, intent(in) :: ncol + real(r8), intent(in) :: clat(ncol) + real(r8), intent(in) :: clon(ncol) + real(r8), intent(in) :: areaa(ncol) + + character(len=shr_kind_cl) :: mappingfile, fname + + + type(hybrid_t) :: hybrid + logical :: exist + + type (spherical_polar_t) :: sphere + type(file_desc_t) :: ogfile, agfile + type (interpdata_t) :: interpdata(nelemd) + integer :: ierr, dimid, npts, vid + real(r8), allocatable :: lat(:), lon(:) + integer :: i, ii, ie2, je2, ie, je, face_no, face_no2, k, j, n, ngrid, tpts, nf, number + real(r8) :: countx, count_max, count_total + integer :: fdofp(np,np,nelemd) + type (cartesian2D_t) :: cart + real(r8) :: f(np,np) + real(r8), allocatable :: h(:), h1d(:) + integer, allocatable :: grid_imask(:), row(:), col(:), ldof(:), dg_dims(:) + integer :: ns_dim, cnt, na_dim, nb_dim, sg_dim, dg_dim + type(var_desc_t) :: rowid, colid, sid, xca_id, yca_id, xcb_id, ycb_id, maskb_id, maska_id + type(var_desc_t) :: areaA_id, areaB_id, dg_id, sg_id + type(io_desc_t) :: iodesci, iodescd + character(len=12) :: unit_str + real(r8), allocatable :: areaB(:) + integer :: cntperelem_in(nelem), cntperelem_out(nelem) + integer :: ithr, dg_rank, substr1, substr2 + + type(interpdata_t), pointer :: mapping_interpolate(:) + character(len=8) :: cdate, ctime + integer :: olditype, oldnlat, oldnlon, itype + + !Remove once "official" CAMDEN fillvalue code has been developed -JN: + real(r8) :: fillvalue = 9.87e36_r8 + + if(.not. do_native_mapping) return + + if (maptype=='native') then + itype=0 + else if (maptype=='bilin') then + itype=1 + else + call endrun('bad interp_type') + endif + + + + + if(iam > par%nprocs) then + ! The special case of npes_se < npes_cam is not worth dealing with here + call endrun('Native mapping code requires npes_se==npes_cam') + end if + + + call interp_init() + + + oldnlon = get_interp_parameter('nlon') + oldnlat = get_interp_parameter('nlat') + olditype = get_interp_parameter('itype') + + call datetime(cdate, ctime) + + do nf=1,maxoutgrids + fname = native_mapping_outgrids(nf) + if(masterproc) then + write(iulog,*) 'looking for target grid = ',trim(fname) + endif + if(len_trim(fname)==0) cycle + inquire(file=fname,exist=exist) + if(.not. exist) then + write(iulog,*) 'WARNING: Could not find or open grid file ',fname + cycle + end if + if(masterproc) then + write(iulog,*) 'Creating ',trim(maptype),' mapping to grid ',fname + endif + call cam_pio_openfile( ogfile, fname, 0) + + ierr = pio_inq_dimid( ogfile, 'grid_size', dimid) + ierr = pio_inq_dimlen( ogfile, dimid, npts) + allocate(lat(npts), lon(npts), grid_imask(npts), areab(npts)) + + ierr = pio_inq_dimid( ogfile, 'grid_rank', dimid) + ierr = pio_inq_dimlen(ogfile, dimid, dg_rank) + allocate(dg_dims(dg_rank)) + ierr = pio_inq_varid( ogfile, 'grid_dims', vid) + ierr = pio_get_var( ogfile, vid, dg_dims) + + + ierr = pio_inq_varid( ogfile, 'grid_center_lat', vid) + ierr = pio_get_var(ogfile, vid, lat) + ierr = pio_get_att(ogfile, vid, 'units', unit_str) + + ierr = pio_inq_varid( ogfile, 'grid_center_lon', vid) + ierr = pio_get_var(ogfile, vid, lon) + + call pio_seterrorhandling(ogfile, PIO_BCAST_ERROR) + ierr = pio_inq_varid( ogfile, 'grid_area', vid) + call pio_seterrorhandling(ogfile, PIO_INTERNAL_ERROR) + if(ierr == PIO_NOERR) then + ierr = pio_get_var(ogfile, vid, areaB) + else + areaB=fillvalue + end if + + if(unit_str .eq. 'degrees') then + lat = lat * pi/180_r8 + lon = lon * pi/180_r8 + end if + + ierr = pio_inq_varid( ogfile, 'grid_imask', vid) + ierr = pio_get_var(ogfile, vid, grid_imask) + call pio_closefile(ogfile) + + do ie=1,nelemd + interpdata(ie)%n_interp=0 + end do + + call set_interp_parameter('itype',itype) ! itype=0 native, 1 for bilinear + if(lon(1)==lon(2)) then + call set_interp_parameter('nlon',dg_dims(1)) + call set_interp_parameter('nlat',dg_dims(2)) + else + call set_interp_parameter('nlon',dg_dims(2)) + call set_interp_parameter('nlat',dg_dims(1)) + end if + + + + + +! call setup_latlon_interp(elem, cam_interpolate, hybrid, 1, nelemd) + ! go through once, counting the number of points on each element + + sphere%r=1 + do i=1,npts + if(grid_imask(i)==1) then + sphere%lat=lat(i) + sphere%lon=lon(i) + call cube_facepoint_ne(sphere, ne, cart, number) ! new interface + if (number /= -1) then + do ii=1,nelemd + if (number == elem(ii)%vertex%number) then + interpdata(ii)%n_interp = interpdata(ii)%n_interp + 1 + exit + endif + enddo + endif + + + if(masterproc) then + if(mod(i,npts/10).eq.1) then + print *,'finished point ',i,' of ',npts + endif + end if + end if + enddo + + hybrid = config_thread_region(par,'serial') +! ithr=omp_get_thread_num() +! hybrid = hybrid_create(par,ithr,1) + + + + ! check if every point in interpolation grid was claimed by an element: + countx=sum(interpdata(1:nelemd)%n_interp) + global_shared_buf(1,1) = countx + call wrap_repro_sum(nvars=1, comm=hybrid%par%comm, nsize=1) + count_total = global_shared_sum(1) + tpts = sum(grid_imask) + if (count_total /= tpts ) then + write(iulog,*)__FILE__,__LINE__,iam, count_total, tpts, npts + call endrun('Error setting up interpolation grid count_total<>npts') + endif + + countx=maxval(interpdata(1:nelemd)%n_interp) + count_max = ParallelMax(countx,hybrid) + + if (masterproc) then + write(iulog,'(a,f8.1)') 'Average number of interpolation points per element: ',count_total/real(6*ne*ne) + write(iulog,'(a,f8.0)') 'Maximum number of interpolation points on any element: ',count_max + endif + + + ! allocate storage + do ii=1,nelemd + ngrid = interpdata(ii)%n_interp + allocate(interpdata(ii)%interp_xy( ngrid ) ) + allocate(interpdata(ii)%ilat( ngrid ) ) + allocate(interpdata(ii)%ilon( ngrid ) ) + interpdata(ii)%n_interp=0 ! reset counter + enddo + + ! now go through the list again, adding the coordinates + ! if this turns out to be slow, then it can be done in the loop above + ! but we have to allocate and possibly resize the interp_xy() array. + do i=1,npts + if(grid_imask(i)==1) then + sphere%lat=lat(i) + sphere%lon=lon(i) + call cube_facepoint_ne(sphere, ne, cart, number) ! new interface + if (number /= -1) then + do ii=1,nelemd + if (number == elem(ii)%vertex%number) then + ngrid = interpdata(ii)%n_interp + 1 + interpdata(ii)%n_interp = ngrid + interpdata(ii)%interp_xy( ngrid ) = cart + interpdata(ii)%ilon( ngrid ) = i + interpdata(ii)%ilat( ngrid ) = i + endif + enddo + endif + end if + end do + + + allocate(h(int(countx))) + allocate(h1d(int(countx)*npsq*nelemd)) + allocate(row(int(countx)*npsq*nelemd)) + allocate(col(int(countx)*npsq*nelemd)) + + row = 0 + col = 0 + + ngrid=0 + cntperelem_in=0 + call CreateMetaData(hybrid%par, elem, fdofp=fdofp) + + do ie=1,nelemd + ii=0 + do j=1,np + do i=1,np + ii=ii+1 + f = 0.0_R8 + f(i,j) = 1.0_R8 + h = 0 + call interpolate_scalar(interpdata(ie), f, np, 0, h(:)) + + do n=1,interpdata(ie)%n_interp + if(any(isnan(h ))) then + + call endrun('nan generated') + end if + if(h(n)/=0) then + ngrid=ngrid+1 + h1d(ngrid) = h(n) + row(ngrid) = interpdata(ie)%ilon(n) + col(ngrid) = fdofp(i,j,ie) + cntperelem_in(elem(ie)%Globalid)=cntperelem_in(elem(ie)%Globalid)+1 + end if + enddo + + enddo + end do + end do + + countx=ngrid + global_shared_buf(1,1) = countx + call wrap_repro_sum(nvars=1, comm=hybrid%par%comm, nsize=1) + count_total = global_shared_sum(1) + + + call mpi_allreduce(cntperelem_in, cntperelem_out, nelem, MPI_INTEGER, MPI_MAX, par%comm, ierr) + + + allocate(ldof(ngrid)) + ldof = 0 + ii=1 + do ie=1,nelemd + if(elem(ie)%GlobalID==1) then + cnt = 0 + else + cnt = sum(cntperelem_out(1:elem(ie)%globalid-1)) + endif + do i=1,cntperelem_out(elem(ie)%globalid) + ldof(ii) = cnt+i + ii=ii+1 + end do + end do + + deallocate(h) + + ngrid = int(count_total) + + substr1 = index(fname,'/',BACK=.true.) + substr2 = index(fname,'.nc',BACK=.true.) + + if(ne<100) then + write(mappingfile,113) ne,np,fname(substr1+1:substr2-1),trim(maptype),cdate(7:8),cdate(1:2),cdate(4:5) + else if(ne<1000) then + write(mappingfile,114) ne,np,fname(substr1+1:substr2-1),trim(maptype),cdate(7:8),cdate(1:2),cdate(4:5) + else + write(mappingfile,115) ne,np,fname(substr1+1:substr2-1),trim(maptype),cdate(7:8),cdate(1:2),cdate(4:5) + end if + +113 format('map_ne',i2.2,'np',i1,'_to_',a,'_',a,'_',3a2,'.nc') +114 format('map_ne',i3.3,'np',i1,'_to_',a,'_',a,'_',3a2,'.nc') +115 format('map_ne',i4.4,'np',i1,'_to_',a,'_',a,'_',3a2,'.nc') + + call cam_pio_createfile( ogfile,mappingfile , 0) + + ierr = pio_def_dim( ogfile, 'n_a', ncol, na_dim) + ierr = pio_def_dim( ogfile, 'n_b', npts, nb_dim) + ierr = pio_def_dim( ogfile, 'n_s', ngrid, ns_dim) + + ierr = pio_def_dim( ogfile, 'src_grid_rank', 1, sg_dim) + ierr = pio_def_var( ogfile, 'src_grid_dims',pio_int, (/sg_dim/),sg_id) + + ierr = pio_def_dim( ogfile, 'dst_grid_rank',dg_rank, dg_dim) + ierr = pio_def_var( ogfile, 'dst_grid_dims',pio_int, (/dg_dim/),dg_id) + + + + + + ierr = pio_def_var( ogfile, 'col', pio_int, (/ns_dim/), colid) + ierr = pio_def_var( ogfile, 'row', pio_int, (/ns_dim/), rowid) + ierr = pio_def_var( ogfile, 'S', pio_double, (/ns_dim/), sid) + + ierr = pio_def_var( ogfile, 'xc_a', pio_double, (/na_dim/), xca_id) + ierr = pio_def_var( ogfile, 'yc_a', pio_double, (/na_dim/), yca_id) + + ierr = pio_def_var( ogfile, 'xc_b', pio_double, (/nb_dim/), xcb_id) + ierr = pio_def_var( ogfile, 'yc_b', pio_double, (/nb_dim/), ycb_id) + + ierr = pio_def_var( ogfile, 'area_a', pio_double, (/na_dim/), areaA_id) + ierr = pio_def_var( ogfile, 'area_b', pio_double, (/nb_dim/), areaB_id) + ierr = pio_put_att( ogfile, areaB_id, '_FillValue',fillvalue) + + ierr = pio_def_var( ogfile, 'mask_a', pio_int, (/na_dim/), maska_id) + ierr = pio_def_var( ogfile, 'mask_b', pio_int, (/nb_dim/), maskb_id) + + + + ierr = pio_put_att( ogfile, xca_id, 'units','radians') + ierr = pio_put_att( ogfile, yca_id, 'units','radians') + ierr = pio_put_att( ogfile, xcb_id, 'units','radians') + ierr = pio_put_att( ogfile, ycb_id, 'units','radians') + + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'title', 'SE NATIVE Regridding Weights') + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'normalization', 'none') + if (itype==0 ) then + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'map_method', 'Spectral-Element remapping') + else if (itype==1) then + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'map_method', 'Bilinear remapping') + endif + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'conventions', 'NCAR-CSM') + + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'grid_file_out', fname ) + ierr = pio_put_att( ogfile, PIO_GLOBAL, 'grid_file_atm', 'none - model generated') + + + ierr = pio_enddef ( ogfile ) + + ierr = pio_put_var(ogfile, sg_id, ncol) + ierr = pio_put_var(ogfile, dg_id, dg_dims(1:dg_rank)) + + + call pio_initdecomp( ogfile%iosystem, pio_int, (/ngrid/), ldof, iodesci) + call pio_initdecomp( ogfile%iosystem, pio_double, (/ngrid/), ldof, iodescd) + + call pio_write_darray(ogfile, colid, iodesci, col, ierr) + call pio_write_darray(ogfile, rowid, iodesci, row, ierr) + call pio_write_darray(ogfile, sid, iodescd, h1d, ierr) + + + ierr = pio_put_var(ogfile, xcb_id, lon) + ierr = pio_put_var(ogfile, ycb_id, lat) + + ierr = pio_put_var(ogfile, xca_id, clon) + ierr = pio_put_var(ogfile, yca_id, clat) + + ierr = pio_put_var(ogfile, maskb_id, grid_imask) + deallocate(grid_imask) + + ierr = pio_put_var(ogfile, areaA_id, areaA) + ierr = pio_put_var(ogfile, areaB_id, areaB) + deallocate(areaB) + + allocate(grid_imask(ncol)) + grid_imask=1 + + ierr = pio_put_var(ogfile, maska_id, grid_imask) + + call pio_closefile(ogfile) + + deallocate(grid_imask, lat,lon, h1d, col, row, dg_dims, ldof) + do ii=1,nelemd + if(associated(interpdata(ii)%interp_xy))then + deallocate(interpdata(ii)%interp_xy) + endif + if(associated(interpdata(ii)%ilat))then + deallocate(interpdata(ii)%ilat) + endif + if (associated(interpdata(ii)%ilon))then + deallocate(interpdata(ii)%ilon) + endif + end do + + + end do + + call set_interp_parameter('itype',olditype) + call set_interp_parameter('nlon',oldnlon) + call set_interp_parameter('nlat',oldnlat) + + + end subroutine create_native_mapping_files + + + + + +end module native_mapping diff --git a/src/dynamics/se/pmgrid.F90 b/src/dynamics/se/pmgrid.F90 new file mode 100644 index 00000000..fff3dbce --- /dev/null +++ b/src/dynamics/se/pmgrid.F90 @@ -0,0 +1,15 @@ +module pmgrid + +! PLON and PLAT do not correspond to the number of latitudes and longitudes in +! this version of dynamics. + +implicit none +save + +integer, parameter :: plev = PLEV ! number of vertical levels +integer, parameter :: plevp = plev + 1 + +integer, parameter :: plon = 1 +integer, parameter :: plat = 1 + +end module pmgrid diff --git a/src/dynamics/se/spmd_dyn.F90 b/src/dynamics/se/spmd_dyn.F90 new file mode 100644 index 00000000..f061f247 --- /dev/null +++ b/src/dynamics/se/spmd_dyn.F90 @@ -0,0 +1,34 @@ +module spmd_dyn + + !----------------------------------------------------------------------- + ! + ! Purpose: SPMD implementation of CAM SE finite element dynamics. + ! + !----------------------------------------------------------------------- + + implicit none + private + + public spmdbuf + + ! These variables are not used locally, but are set and used in phys_grid. + ! They probably should be moved there. + logical, public :: local_dp_map=.true. ! flag indicates that mapping between dynamics + ! and physics decompositions does not require + ! interprocess communication + integer, public :: block_buf_nrecs ! number of local grid points (lon,lat,lev) + ! in dynamics decomposition (including level 0) + integer, public :: chunk_buf_nrecs ! number of local grid points (lon,lat,lev) + ! in physics decomposition (including level 0) + ! assigned in phys_grid.F90 + +!======================================================================== +CONTAINS +!======================================================================== + +subroutine spmdbuf +end subroutine spmdbuf + +!======================================================================== + +end module spmd_dyn diff --git a/src/dynamics/se/stepon.F90 b/src/dynamics/se/stepon.F90 new file mode 100644 index 00000000..5735e85d --- /dev/null +++ b/src/dynamics/se/stepon.F90 @@ -0,0 +1,420 @@ +module stepon + +use shr_kind_mod, only: r8 => SHR_KIND_R8 +use dyn_comp, only: dyn_import_t, dyn_export_t +use physics_types, only: physics_state, physics_tend +use spmd_utils, only: iam, mpicom +use perf_mod, only: t_startf, t_stopf, t_barrierf + +!SE dycore: +use parallel_mod, only: par + +implicit none +private +save + +public stepon_init +public stepon_run1 +public stepon_run2 +public stepon_run3 +public stepon_final + +!========================================================================================= +contains +!========================================================================================= + +subroutine stepon_init(dyn_in, dyn_out) + + ! Dummy arguments + type(dyn_import_t), intent(in) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(in) :: dyn_out ! Dynamics export container + +end subroutine stepon_init + +!========================================================================================= + +subroutine stepon_run1(dtime_out, phys_state, phys_tend, dyn_in, dyn_out) + + use time_manager, only: get_step_size + use cam_abortutils, only: endrun + use dp_coupling, only: d_p_coupling ! dynamics-physics coupling + + !SE dycore: + use time_mod, only: tstep ! dynamics timestep + + ! Dummy arguments + real(r8), intent(out) :: dtime_out ! Time-step (s) + type(physics_state), intent(inout) :: phys_state ! Physics state object + type(physics_tend), intent(inout) :: phys_tend ! Physics tendency object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + !---------------------------------------------------------------------------- + + !Extract model time step in seconds from ESMF time manager: + dtime_out = get_step_size() + + !Ensure that the model and dynamics time-steps are positive values: + if (iam < par%nprocs) then + if (tstep <= 0) call endrun('stepon_run1: bad tstep') + if (dtime_out <= 0) call endrun('stepon_run1: bad dtime') + end if + + ! Synchronize all PEs and then transfer dynamics variables to physics: + call t_barrierf('sync_d_p_coupling', mpicom) + call t_startf('d_p_coupling') + ! Move data into phys_state structure. + call d_p_coupling(phys_state, phys_tend, dyn_out) + call t_stopf('d_p_coupling') + +end subroutine stepon_run1 + +!========================================================================================= + +subroutine stepon_run2(phys_state, phys_tend, dyn_in, dyn_out) + + !SE/CAM interface: + use dp_coupling, only: p_d_coupling + use dyn_grid, only: TimeLevel + + !SE dycore: + use time_mod, only: TimeLevel_Qdp + use control_mod, only: qsplit + use prim_advance_mod, only: calc_tot_energy_dynamics + + ! Dummy arguments + type(physics_state), intent(inout) :: phys_state ! Physics state object + type(physics_tend), intent(inout) :: phys_tend ! Physics tendency object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + + ! Local variables + integer :: tl_f, tl_fQdp + !---------------------------------------------------------------------------- + + !Determine appropriate time values: + tl_f = TimeLevel%n0 ! timelevel which was adjusted by physics + call TimeLevel_Qdp(TimeLevel, qsplit, tl_fQdp) + + ! Synchronize all PEs and then transfer physics variables to dynamics: + call t_barrierf('sync_p_d_coupling', mpicom) + call t_startf('p_d_coupling') + ! copy from phys structures -> dynamics structures + call p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_fQdp) + call t_stopf('p_d_coupling') + + if (iam < par%nprocs) then + call calc_tot_energy_dynamics(dyn_in%elem,dyn_in%fvm, 1, nelemd, tl_f, tl_fQdp,'dED') + end if + +end subroutine stepon_run2 + +!========================================================================================= + +subroutine stepon_run3(dtime, cam_out, phys_state, dyn_in, dyn_out) + + use camsrfexch, only: cam_out_t + + !SE/CAM interface: + use dyn_comp, only: dyn_run + use dyn_grid, only: TimeLevel + use advect_tend, only: compute_adv_tends_xyz + + !SE dycore: + use time_mod, only: TimeLevel_Qdp + use control_mod, only: qsplit + + ! Dummy arguments + real(r8), intent(in) :: dtime ! Time-step + type(cam_out_t), intent(inout) :: cam_out ! Output from CAM to surface + type(physics_state), intent(inout) :: phys_state ! Physics state object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + + ! Local variables + integer :: tl_f, tl_fQdp + !-------------------------------------------------------------------------------------- + + ! Determine appropriate time values and + ! initalize advected constituent mixing ratios: + call t_startf('comp_adv_tends1') + tl_f = TimeLevel%n0 + call TimeLevel_Qdp(TimeLevel, qsplit, tl_fQdp) + call compute_adv_tends_xyz(dyn_in%elem,dyn_in%fvm,1,nelemd,tl_fQdp,tl_f) + call t_stopf('comp_adv_tends1') + + ! Synchronize all PEs and then run dynamics (dyn_run): + call t_barrierf('sync_dyn_run', mpicom) + call t_startf('dyn_run') + call dyn_run(dyn_out) + call t_stopf('dyn_run') + + ! Determine appropriate time values and + ! calculate constituent advection tendencies: + call t_startf('comp_adv_tends2') + tl_f = TimeLevel%n0 + call TimeLevel_Qdp(TimeLevel, qsplit, tl_fQdp) + call compute_adv_tends_xyz(dyn_in%elem,dyn_in%fvm,1,nelemd,tl_fQdp,tl_f) + call t_stopf('comp_adv_tends2') + +end subroutine stepon_run3 + +!========================================================================================= + +subroutine stepon_final(dyn_in, dyn_out) + + ! Dummy arguments + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + +end subroutine stepon_final + +!========================================================================================= + +!Remove once "outfld" is enabled in CAMDEN -JN: +#if 0 + +subroutine diag_dynvar_ic(elem, fvm) + !use constituents, only: cnst_type, cnst_name + use cam_history, only: write_inithist, outfld, hist_fld_active, fieldname_len + use dyn_grid, only: TimeLevel + !use physconst, only: get_sum_species, get_ps,thermodynamic_active_species_idx + !use physconst, only: thermodynamic_active_species_idx_dycore,get_dp_ref + use hycoef, only: hyai, hybi, ps0 + + !SE dycore: + use time_mod, only: TimeLevel_Qdp ! dynamics typestep + use control_mod, only: qsplit + use hybrid_mod, only: config_thread_region, get_loop_ranges + use hybrid_mod, only: hybrid_t + use dimensions_mod, only: np, npsq, nc, nhc, fv_nphys, qsize, ntrac, nlev + !use dimensions_mod, only: cnst_name_gll + use element_mod, only: element_t + use fvm_control_volume_mod, only: fvm_struct + use fvm_mapping, only: fvm2dyn + + ! Dummy arguments + type(element_t) , intent(in) :: elem(1:nelemd) + type(fvm_struct), intent(inout) :: fvm(:) + + ! Local variables + integer :: ie, i, j, k, m, m_cnst, nq + integer :: tl_f, tl_qdp + character(len=fieldname_len) :: tfname + + type(hybrid_t) :: hybrid + integer :: nets, nete + real(r8), allocatable :: ftmp(:,:,:) + real(r8), allocatable :: fld_fvm(:,:,:,:,:), fld_gll(:,:,:,:,:) + real(r8), allocatable :: fld_2d(:,:) + logical, allocatable :: llimiter(:) + real(r8) :: qtmp(np,np,nlev), dp_ref(np,np,nlev), ps_ref(np,np) + real(r8), allocatable :: factor_array(:,:,:) + !---------------------------------------------------------------------------- + + tl_f = timelevel%n0 + call TimeLevel_Qdp(TimeLevel, qsplit, tl_Qdp) + + allocate(ftmp(npsq,nlev,2)) + +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#if 0 + ! Output tracer fields for analysis of advection schemes + do m_cnst = 1, qsize + tfname = trim(cnst_name_gll(m_cnst))//'_gll' + if (hist_fld_active(tfname)) then + do ie = 1, nelemd + qtmp(:,:,:) = elem(ie)%state%Qdp(:,:,:,m_cnst,tl_qdp)/& + elem(ie)%state%dp3d(:,:,:,tl_f) + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,:,1) = elem(ie)%state%Qdp(i,j,:,m_cnst,tl_qdp)/& + elem(ie)%state%dp3d(i,j,:,tl_f) + end do + end do + call outfld(tfname, ftmp(:,:,1), npsq, ie) + end do + end if + end do + + do m_cnst = 1, qsize + tfname = trim(cnst_name_gll(m_cnst))//'dp_gll' + if (hist_fld_active(tfname)) then + do ie = 1, nelemd + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,:,1) = elem(ie)%state%Qdp(i,j,:,m_cnst,tl_qdp) + end do + end do + call outfld(tfname, ftmp(:,:,1), npsq, ie) + end do + end if + end do + +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#endif + + if (hist_fld_active('U_gll') .or. hist_fld_active('V_gll')) then + do ie = 1, nelemd + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,:,1) = elem(ie)%state%v(i,j,1,:,tl_f) + ftmp(i+(j-1)*np,:,2) = elem(ie)%state%v(i,j,2,:,tl_f) + end do + end do + call outfld('U_gll', ftmp(:,:,1), npsq, ie) + call outfld('V_gll', ftmp(:,:,2), npsq, ie) + end do + end if + + if (hist_fld_active('T_gll')) then + do ie = 1, nelemd + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,:,1) = elem(ie)%state%T(i,j,:,tl_f) + end do + end do + call outfld('T_gll', ftmp(:,:,1), npsq, ie) + end do + end if + + if (hist_fld_active('dp_ref_gll')) then + do ie = 1, nelemd + call get_dp_ref(hyai,hybi,ps0,1,np,1,np,1,nlev,elem(ie)%state%phis(:,:),dp_ref(:,:,:),ps_ref(:,:)) + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,:,1) = elem(ie)%state%dp3d(i,j,:,tl_f)/dp_ref(i,j,:) + end do + end do + call outfld('dp_ref_gll', ftmp(:,:,1), npsq, ie) + end do + end if + + if (hist_fld_active('PSDRY_gll')) then + do ie = 1, nelemd + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,1,1) = elem(ie)%state%psdry(i,j) + end do + end do + call outfld('PSDRY_gll', ftmp(:,1,1), npsq, ie) + end do + end if + + if (hist_fld_active('PS_gll')) then + allocate(fld_2d(np,np)) + do ie = 1, nelemd + call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& + thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,tl_f),fld_2d,hyai(1)*ps0) + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,1,1) = fld_2d(i,j) + end do + end do + call outfld('PS_gll', ftmp(:,1,1), npsq, ie) + end do + deallocate(fld_2d) + end if + + if (hist_fld_active('PHIS_gll')) then + do ie = 1, nelemd + call outfld('PHIS_gll', RESHAPE(elem(ie)%state%phis, (/np*np/)), np*np, ie) + end do + end if + + if (write_inithist()) then +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#if 0 + allocate(fld_2d(np,np)) + do ie = 1, nelemd + call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& + thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,tl_f),fld_2d,hyai(1)*ps0) + do j = 1, np + do i = 1, np + ftmp(i+(j-1)*np,1,1) = fld_2d(i,j) + end do + end do + call outfld('PS&IC', ftmp(:,1,1), npsq, ie) + end do + deallocate(fld_2d) + if (fv_nphys < 1) allocate(factor_array(np,np,nlev)) +#endif + + do ie = 1, nelemd + call outfld('T&IC', RESHAPE(elem(ie)%state%T(:,:,:,tl_f), (/npsq,nlev/)), npsq, ie) + call outfld('U&IC', RESHAPE(elem(ie)%state%v(:,:,1,:,tl_f), (/npsq,nlev/)), npsq, ie) + call outfld('V&IC', RESHAPE(elem(ie)%state%v(:,:,2,:,tl_f), (/npsq,nlev/)), npsq, ie) + +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#if 0 + if (fv_nphys < 1) then + call get_sum_species(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_qdp), & + thermodynamic_active_species_idx_dycore, factor_array,dp_dry=elem(ie)%state%dp3d(:,:,:,tl_f)) + factor_array(:,:,:) = 1.0_r8/factor_array(:,:,:) + do m_cnst = 1, qsize + if (cnst_type(m_cnst) == 'wet') then + call outfld(trim(cnst_name(m_cnst))//'&IC', & + RESHAPE(factor_array(:,:,:)*elem(ie)%state%Qdp(:,:,:,m_cnst,tl_qdp)/& + elem(ie)%state%dp3d(:,:,:,tl_f), (/npsq,nlev/)), npsq, ie) + else + call outfld(trim(cnst_name(m_cnst))//'&IC', & + RESHAPE(elem(ie)%state%Qdp(:,:,:,m_cnst,tl_qdp)/& + elem(ie)%state%dp3d(:,:,:,tl_f), (/npsq,nlev/)), npsq, ie) + end if + end do + end if +#endif + end do + +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#if 0 + if (fv_nphys > 0) then + !JMD $OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,n) + !JMD hybrid = config_thread_region(par,'horizontal') + hybrid = config_thread_region(par,'serial') + call get_loop_ranges(hybrid, ibeg=nets, iend=nete) + + allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete)) + allocate(fld_gll(np,np,nlev,ntrac,nets:nete)) + allocate(llimiter(ntrac)) + allocate(factor_array(nc,nc,nlev)) + llimiter = .true. + do ie = nets, nete + call get_sum_species(1,nc,1,nc,1,nlev,ntrac,fvm(ie)%c(1:nc,1:nc,:,:),thermodynamic_active_species_idx,factor_array) + factor_array(:,:,:) = 1.0_r8/factor_array(:,:,:) + do m_cnst = 1, ntrac + if (cnst_type(m_cnst) == 'wet') then + fld_fvm(1:nc,1:nc,:,m_cnst,ie) = fvm(ie)%c(1:nc,1:nc,:,m_cnst)*factor_array(:,:,:) + else + fld_fvm(1:nc,1:nc,:,m_cnst,ie) = fvm(ie)%c(1:nc,1:nc,:,m_cnst) + end if + end do + end do + + call fvm2dyn(fld_fvm, fld_gll, hybrid, nets, nete, nlev, ntrac, fvm(nets:nete), llimiter) + + do ie = nets, nete + do m_cnst = 1, ntrac + call outfld(trim(cnst_name(m_cnst))//'&IC', & + RESHAPE(fld_gll(:,:,:,m_cnst,ie), (/npsq,nlev/)), npsq, ie) + end do + end do + + deallocate(fld_fvm) + deallocate(fld_gll) + deallocate(llimiter) + end if + deallocate(factor_array) +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#endif + end if ! if (write_inithist) + + deallocate(ftmp) + +end subroutine diag_dynvar_ic + +!Remove once "outfld" is enabled in CAMDEN -JN: +#endif + +!========================================================================================= + +end module stepon diff --git a/src/dynamics/se/test_fvm_mapping.F90 b/src/dynamics/se/test_fvm_mapping.F90 new file mode 100644 index 00000000..262d4a12 --- /dev/null +++ b/src/dynamics/se/test_fvm_mapping.F90 @@ -0,0 +1,853 @@ +module test_fvm_mapping + use shr_kind_mod, only: r8=>shr_kind_r8 +! use cam_history, only: outfld + use physconst, only: pi + + !SE dycore: + use fvm_control_volume_mod, only: fvm_struct + use dimensions_mod, only: np, nelemd, nlev, npsq, ntrac + use element_mod, only: element_t + implicit none + private + + real(r8), parameter, private :: deg2rad = pi/180.0_r8 + real(r8), parameter, private :: psurf_moist = 100000.0_r8 !moist surface pressure + integer, parameter, private :: cl_idx = 12 + integer, parameter, private :: cl2_idx = 13 + real(r8), parameter, private :: cly_constant = 4.e-6_r8 + integer, parameter, private :: num_fnc = 26 + integer, parameter, private :: offset = 15 + + public :: test_mapping_overwrite_tendencies, test_mapping_addfld + public :: test_mapping_output_mapped_tendencies, test_mapping_overwrite_dyn_state + public :: test_mapping_output_phys_state +contains + + subroutine test_mapping_addfld + +#ifdef debug_coupling + use cam_history, only: addfld, add_default, horiz_only, register_vector_field + use constituents, only: cnst_get_ind,cnst_name + character(LEN=128) :: name + integer :: nq,m_cnst + + name = 'd2p_u_gll' + call addfld(trim(name), (/ 'lev' /), 'I','m/2','Exact zonal wind on GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_v_gll' + call addfld(trim(name), (/ 'lev' /), 'I','m/2','Exact meridional wind on GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_scalar_gll' + call addfld(trim(name), (/ 'lev' /), 'I','','Exact scalar on GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_u' + call addfld(trim(name), (/ 'lev' /), 'I','m/2','Zonal wind mapped to physics grid') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_u_err' + call addfld(trim(name), (/ 'lev' /), 'I','m/2','Error in zonal wind mapped to physics grid') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_v_err' + call addfld(trim(name), (/ 'lev' /), 'I','m/2','Error in meridional wind mapped to physics grid') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_v' + call addfld(trim(name), (/ 'lev' /), 'I','m/s','Meridional wind mapped to physics grid') + !call add_default (trim(name), 1, ' ') + + name = 'd2p_scalar' + call addfld(trim(name), (/ 'lev' /), 'I','','Scalar mapped to physics grid') + call add_default (trim(name), 1, ' ') + + name = 'd2p_scalar_err' + call addfld(trim(name), (/ 'lev' /), 'I','','Error in scalar mapped to physics grid') + call add_default (trim(name), 1, ' ') + + do nq=ntrac,ntrac + m_cnst = nq + name = 'f2p_'//trim(cnst_name(m_cnst))//'_fvm' + call addfld(trim(name), (/ 'lev' /), 'I','','Exact water tracer on fvm grid',gridname='FVM') + call add_default (trim(name), 1, ' ') + name = 'f2p_'//trim(cnst_name(m_cnst))//'_err' + call addfld(trim(name), (/ 'lev' /), 'I','','Error in water tracer on physics grid (mapped from fvm grid)') + call add_default (trim(name), 1, ' ') + name = 'f2p_'//trim(cnst_name(m_cnst))//'' + call addfld(trim(name), (/ 'lev' /), 'I','','Water tracer on physics grid (mapped from fvm grid') + call add_default (trim(name), 1, ' ') + ! + ! physgrid to gll (condensate loading tracers) + ! + name = 'p2d_'//trim(cnst_name(m_cnst))//'' + call addfld(trim(name), (/ 'lev' /), 'I','','Water tracer on physics grid') + !call add_default (trim(name), 1, ' ') + name = 'p2d_'//trim(cnst_name(m_cnst))//'_gll' + call addfld(trim(name), (/ 'lev' /), 'I','','Water tracer on GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + name = 'p2d_'//trim(cnst_name(m_cnst))//'_err_gll' + call addfld(trim(name), (/ 'lev' /), 'I','','Error in water tracer mapped to GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + ! + ! physgrid to fvm (condensate loading tracers) + ! + name = 'p2f_'//trim(cnst_name(m_cnst))//'' + call addfld(trim(name), (/ 'lev' /), 'I','','Water tracer on physics grid') + call add_default (trim(name), 1, ' ') + name = 'p2f_'//trim(cnst_name(m_cnst))//'_fvm' + call addfld(trim(name), (/ 'lev' /), 'I','','Water tracer on FVM grid',gridname='FVM') + call add_default (trim(name), 1, ' ') + name = 'p2f_'//trim(cnst_name(m_cnst))//'_err_fvm' + call addfld(trim(name), (/ 'lev' /), 'I','','Error in water tracer mapped to FVM grid',gridname='FVM') + call add_default (trim(name), 1, ' ') + end do + ! + ! temperature tendency + ! + name = 'p2d_ptend' + call addfld(trim(name), (/ 'lev' /), 'I','','T tendency on physics grid') + !call add_default (trim(name), 1, ' ') + name = 'p2d_ptend_gll' + call addfld(trim(name), (/ 'lev' /), 'I','','T tendency on GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + name = 'p2d_ptend_err_gll' + call addfld(trim(name), (/ 'lev' /), 'I','','Error in T tendency mapped to GLL grid',gridname='GLL') + !call add_default (trim(name), 1, ' ') + + call addfld('p2d_u', (/ 'lev' /), 'I','m/2','Zonal wind on physics grid') + !call add_default ('p2d_u', 1, ' ') + call addfld('p2d_v', (/ 'lev' /), 'I','m/2','Meridional wind on physics grid') + !call add_default ('p2d_v', 1, ' ') + call addfld('p2d_u_gll', (/ 'lev' /), 'I','m/2','Zonal wind on physics grid',gridname='GLL') + !call add_default ('p2d_u_gll', 1, ' ') + call addfld('p2d_v_gll', (/ 'lev' /), 'I','m/2','Meridional wind on physics grid',gridname='GLL') + !call add_default ('p2d_v_gll', 1, ' ') + call addfld('p2d_u_gll_err', (/ 'lev' /), 'I','m/2','Error in zonal wind interpolation to GLL grid',gridname='GLL') + !call add_default ('p2d_u_gll_err', 1, ' ') + call addfld('p2d_v_gll_err', (/ 'lev' /), 'I','m/2','Error in meridional wind interpolation to GLL grid',& + gridname='GLL') + !call add_default ('p2d_v_gll_err', 1, ' ') + +! name = 'phys2dyn_'//trim(cnst_name(m_cnst))//'_physgrid' +! call outfld(trim(name),phys_state%q(:ncols,:,m_cnst),ncols,lchnk) +#endif + end subroutine test_mapping_addfld + + subroutine test_mapping_overwrite_tendencies(phys_state,phys_tend,ncols,q_prev,fvm) +! use constituents, only: cnst_get_ind,pcnst,cnst_name + use physics_types, only: physics_state, physics_tend + + !SE dycore: + use dimensions_mod, only: fv_nphys + + type(physics_state), intent(inout) :: phys_state + type(physics_tend), intent(inout) :: phys_tend + real(r8), dimension(:,:,:), intent(inout) :: q_prev + type(fvm_struct), intent(inout):: fvm(:) + integer, intent(in) :: ncols +#ifdef debug_coupling + integer :: icol,k + character(LEN=128) :: name + integer :: m_cnst, nq, ie + + q_prev(:,:,ntrac) = 0.0_r8 + do ie=1,nelemd +!xxx fvm(ie)%c(:,:,:,ntrac) = 0.0_r8 + end do + + phys_state%pdel(1:ncols,:) = phys_state%pdeldry(1:ncols,:) !make sure there is no conversion from wet to dry + do nq=ntrac,ntrac + m_cnst = nq + do icol=1,ncols + do k=1,num_fnc + phys_state%q(icol,k,m_cnst) = test_func(phys_state%lat(icol), phys_state%lon(icol), k, k) + end do + enddo + name = 'p2f_'//trim(cnst_name(m_cnst))//'' + call outfld(trim(name),phys_state%q(:ncols,:,m_cnst),ncols,lchnk) + name = 'p2d_'//trim(cnst_name(m_cnst))//'' + call outfld(trim(name),phys_state%q(:ncols,:,m_cnst),ncols,lchnk) + end do + + do icol=1,ncols + do k=ntrac,ntrac + phys_tend%dudt(icol,k) = test_func(phys_state%lat(icol), phys_state%lon(icol), k, k) + phys_tend%dvdt(icol,k) = test_func(phys_state%lat(icol), phys_state%lon(icol), k, k) + phys_tend%dtdt(icol,k) = test_func(phys_state%lat(icol), phys_state%lon(icol), k, k) + end do + enddo + name = 'p2d_u' + call outfld(trim(name),phys_tend%dudt(:ncols,:),ncols,lchnk) + name = 'p2d_v' + call outfld(trim(name),phys_tend%dvdt(:ncols,:),ncols,lchnk) + name = 'p2d_ptend' + call outfld(trim(name),phys_tend%dtdt(:ncols,:),ncols,lchnk) + + + do icol=1,ncols + do k=1,nlev +! phys_tend%dudt(icol,k) = 0.0_r8 +! phys_tend%dvdt(icol,k) = 0.0_r8 +! phys_tend%dtdt(icol,k) = 0.0_r8 + end do + enddo +#endif + end subroutine test_mapping_overwrite_tendencies + + subroutine test_mapping_output_mapped_tendencies(fvm,elem,nets,nete,tl_f,tl_qdp) +! use constituents, only: cnst_get_ind,cnst_name + + !SE dycore: + use dimensions_mod, only: fv_nphys,nlev,nc + + integer, intent(in) :: nets,nete,tl_f,tl_qdp + type(fvm_struct), intent(inout):: fvm(nets:nete) + type(element_t), intent(inout):: elem(nets:nete) ! pointer to dyn_out element array +#ifdef debug_coupling + integer :: ie,i,j,k + character(LEN=128) :: name + integer :: nq,m_cnst + real(r8) :: diff(nc,nc,nlev,ntrac) + diff = 0.0_r8 + do ie = nets,nete + call outfld('p2d_u_gll', RESHAPE(elem(ie)%derived%fm(:,:,1,:),(/npsq,nlev/)), npsq, ie) + call outfld('p2d_v_gll', RESHAPE(elem(ie)%derived%fm(:,:,2,:),(/npsq,nlev/)), npsq, ie) + call outfld('p2d_ptend_gll', RESHAPE(elem(ie)%derived%ft(:,:,:),(/npsq,nlev/)), npsq, ie) + do k=ntrac,ntrac + do j=1,np + do i=1,np + elem(ie)%derived%fm(i,j,1,k) = elem(ie)%derived%fm(i,j,1,k) -& + test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k,k) + elem(ie)%derived%fm(i,j,2,k) = elem(ie)%derived%fm(i,j,2,k) - & + test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k,k) + elem(ie)%derived%ft(i,j,k) = elem(ie)%derived%ft(i,j,k) - & + test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k,k) + end do + end do + end do + call outfld('p2d_u_gll_err' , RESHAPE(elem(ie)%derived%fm(:,:,1,:),(/npsq,nlev/)), npsq, ie) + call outfld('p2d_v_gll_err' , RESHAPE(elem(ie)%derived%fm(:,:,2,:),(/npsq,nlev/)), npsq, ie) + call outfld('p2d_ptend_err_gll', RESHAPE(elem(ie)%derived%ft(:,:,:),(/npsq,nlev/)), npsq, ie) + elem(ie)%derived%ft(:,:,:) = 0.0_r8 + end do + + do ie = nets,nete + do nq=ntrac,ntrac + m_cnst = nq + name = 'p2d_'//trim(cnst_name(m_cnst))//'_gll' + call outfld(TRIM(name), RESHAPE(elem(ie)%derived%fq(:,:,:,nq),(/npsq,nlev/)), npsq, ie) + ! call outfld(trim(name),& + ! RESHAPE(fvm(ie)%fc(1:nc,1:nc,:,m_cnst),& + ! (/nc*nc,nlev/)),nc*nc,ie) + do k=1,num_fnc + do j=1,np + do i=1,np + elem(ie)%derived%fq(i,j,k,nq) = elem(ie)%derived%fq(i,j,k,nq)-& + test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k, k) + end do + end do + end do + name = 'p2d_'//trim(cnst_name(m_cnst))//'_err_gll' + call outfld(TRIM(name), RESHAPE(elem(ie)%derived%fq(:,:,:,nq),(/npsq,nlev/)), npsq, ie) + end do + if (ntrac>0) then + do nq=ntrac,ntrac + m_cnst = nq + name = 'p2f_'//trim(cnst_name(m_cnst))//'_fvm' + ! + ! cly + ! +! k=num_tracer+1 +! fvm(ie)%fc(1:nc,1:nc,k,:) = fvm(ie)%fc(1:nc,1:nc,cl_idx,:)+& +! 2.0_r8*fvm(ie)%fc(1:nc,1:nc,cl2_idx,:) + call outfld(trim(name),& + RESHAPE(fvm(ie)%fc(1:nc,1:nc,:,m_cnst)/fvm(ie)%dp_fvm(1:nc,1:nc,:),& + (/nc*nc,nlev/)),nc*nc,ie) + do k=1,num_fnc + do j=1,nc + do i=1,nc + diff(i,j,k,m_cnst) = fvm(ie)%fc(i,j,k,m_cnst)/fvm(ie)%dp_fvm(i,j,k)-& + test_func(fvm(ie)%center_cart(i,j)%lat,fvm(ie)%center_cart(i,j)%lon, k, k) + end do + end do + end do + name = 'p2f_'//trim(cnst_name(m_cnst))//'_err_fvm' + call outfld(TRIM(name), RESHAPE(diff(:,:,:,m_cnst),(/nc*nc,nlev/)), nc*nc, ie) + + end do + endif + end do +#endif + end subroutine test_mapping_output_mapped_tendencies + + subroutine test_mapping_overwrite_dyn_state(elem,fvm) +! use constituents, only: cnst_name + + !SE dycore: + use fvm_control_volume_mod, only: fvm_struct + use dimensions_mod, only: nc,nhc + use hybrid_mod, only: get_loop_ranges, hybrid_t,config_thread_region + use control_mod, only: north, south, east, west, neast, nwest, seast, swest + use fvm_mod, only: fill_halo_fvm,ghostBufQnhc_h + use parallel_mod, only: par + + type (fvm_struct), intent(inout) :: fvm(:) + type(element_t), intent(inout) :: elem(:) ! pointer to dyn_out element array +#ifdef debug_coupling + integer :: i,j,k,ie,nq,m_cnst + character(LEN=128) :: name + integer :: nets,nete + type(hybrid_t) :: hybrid + + hybrid = config_thread_region(par,'serial') + call get_loop_ranges(hybrid,ibeg=nets,iend=nete) + do ie=nets,nete + do nq=ntrac,ntrac + m_cnst = nq + name = 'f2p_'//trim(cnst_name(m_cnst))//'_fvm' + do k=1,num_fnc + do j=1,nc + do i=1,nc + fvm(ie)%c(i,j,k,m_cnst) = test_func(fvm(ie)%center_cart(i,j)%lat,fvm(ie)%center_cart(i,j)%lon, k, k) + end do + end do + end do + ! + ! cly + ! +! k=num_tracer+1 +! do j=1,nc +! do i=1,nc +! fvm(ie)%c(i,j,k,m_cnst) = test_func(fvm(ie)%center_cart(i,j)%lat,fvm(ie)%center_cart(i,j)%lon, k,cl_idx)+& +! 2.0_r8*test_func(fvm(ie)%center_cart(i,j)%lat,fvm(ie)%center_cart(i,j)%lon, k,cl2_idx) +! end do +! end do + call outfld(TRIM(name), RESHAPE(fvm(ie)%c(1:nc,1:nc,:,m_cnst),(/nc*nc,nlev/)), nc*nc, ie) + end do + + elem(ie)%state%Qdp(:,:,:,:,:) = 0.0_r8 !for testing the p2d map + do k=1,num_fnc + do j=1,np + do i=1,np + elem(ie)%state%v(i,j,1,k,:) = test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k, k ) + elem(ie)%state%v(i,j,2,k,:) = test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k, k) + end do + end do + end do + do k=1,num_fnc + do j=1,np + do i=1,np + elem(ie)%derived%omega(i,j,k) = test_func(elem(ie)%spherep(i,j)%lat, elem(ie)%spherep(i,j)%lon, k, k) + end do + end do + end do + call outfld('d2p_scalar_gll', RESHAPE(elem(ie)%derived%omega(:,:,:) ,(/npsq,nlev/)), npsq, ie) + call outfld('d2p_u_gll', RESHAPE(elem(ie)%state%v(:,:,1,:,1),(/npsq,nlev/)), npsq, ie) + call outfld('d2p_v_gll', RESHAPE(elem(ie)%state%v(:,:,2,:,1),(/npsq,nlev/)), npsq, ie) + end do + ! + ! do boundary exchange (this call should be indentical to call in prim_driver) + ! + call fill_halo_fvm(ghostBufQnhc_h,elem,fvm,hybrid,nets,nete,nhc,1,nlev,nlev) + do ie=nets,nete + if (fvm(ie)%cubeboundary>4) then + do k=ntrac,ntrac + select case(fvm(ie)%cubeboundary) + case (nwest) + fvm(ie)%c(0,nc+1,:,k) = fvm(ie)%c(1,nc+1,:,k) + case (swest) + fvm(ie)%c(0,0,:,k) = fvm(ie)%c(0,1,:,k) + case (seast) + fvm(ie)%c(nc+1,0,:,k) = fvm(ie)%c(0,nc,:,k) + case (neast) + fvm(ie)%c(nc+1,nc+1,:,k) = fvm(ie)%c(nc,nc+1,:,k) + end select + end do + end if + end do +! call fill_halo_fvm_noprealloc(elem,fvm,hybrid,nets,nete,nhc,1,nlev)!xxx nhr chould be a function of interp_method +#endif + end subroutine test_mapping_overwrite_dyn_state + + subroutine test_mapping_output_phys_state(phys_state,fvm) + use physics_types, only: physics_state +! use ppgrid, only: begchunk, endchunk, pver, pcols +! use constituents, only: cnst_get_ind,cnst_name + + type(physics_state), intent(inout) :: phys_state(begchunk:endchunk) + type(fvm_struct), pointer:: fvm(:) +#ifdef debug_coupling + integer :: lchnk, ncol,k,icol,m_cnst,nq,ie + character(LEN=128) :: name + + do ie=1,nelemd +!xxx fvm(ie)%c(:,:,:,ntrac) = 0.0_r8 + end do + + do lchnk = begchunk, endchunk + call outfld('d2p_scalar', phys_state(lchnk)%omega(1:pcols,1:pver), pcols, lchnk) + call outfld('d2p_u', phys_state(lchnk)%U(1:pcols,1:pver), pcols, lchnk) + call outfld('d2p_v', phys_state(lchnk)%V(1:pcols,1:pver), pcols, lchnk) + if (ntrac>0) then + do nq=ntrac,ntrac + m_cnst = nq + name = 'f2p_'//trim(cnst_name(m_cnst)) + ! + ! cly + ! + !phys_state(lchnk)%q(1:pcols,num_tracer+1,m_cnst)=phys_state(lchnk)%q(1:pcols,cl_idx,m_cnst)+& + ! 2.0_r8*phys_state(lchnk)%q(1:pcols,12,m_cnst) + call outfld(TRIM(name), phys_state(lchnk)%q(1:pcols,1:pver,m_cnst), pcols, lchnk) +! k=num_tracer+1 +! do icol=1,phys_state(lchnk)%ncol +! phys_state(lchnk)%q(icol,k,m_cnst) = phys_state(lchnk)%q(icol,cl_idx,m_cnst)+& +! 2.0_r8*phys_state(lchnk)%q(icol,cl2_idx,m_cnst)-& +! cly_constant +! end do + do k=1,num_fnc + do icol=1,phys_state(lchnk)%ncol + phys_state(lchnk)%q(icol,k,m_cnst) = phys_state(lchnk)%q(icol,k,m_cnst)& + -test_func(phys_state(lchnk)%lat(icol), phys_state(lchnk)%lon(icol), k,k) + end do + enddo + name = 'f2p_'//trim(cnst_name(m_cnst))//'_err' + call outfld(TRIM(name), phys_state(lchnk)%q(1:pcols,1:pver,m_cnst), pcols, lchnk) + phys_state(lchnk)%q(1:pcols,1:pver,m_cnst) = 0.0_r8 + end do + end if + end do + + + do lchnk = begchunk, endchunk + do k=1,nlev + do icol=1,phys_state(lchnk)%ncol + phys_state(lchnk)%U(icol,k) = phys_state(lchnk)%U(icol,k)& + -test_func(phys_state(lchnk)%lat(icol), phys_state(lchnk)%lon(icol), k, 9) + phys_state(lchnk)%V(icol,k) = phys_state(lchnk)%V(icol,k)& + -test_func(phys_state(lchnk)%lat(icol), phys_state(lchnk)%lon(icol), k,10) + end do + enddo + name = 'd2p_u_err' + call outfld(trim(name),phys_state(lchnk)%U(:pcols,:),pcols,lchnk) + name = 'd2p_v_err' + call outfld(trim(name),phys_state(lchnk)%V(:pcols,:),pcols,lchnk) + do k=1,num_fnc + do icol=1,phys_state(lchnk)%ncol + phys_state(lchnk)%omega(icol,k) = phys_state(lchnk)%omega(icol,k)& + -test_func(phys_state(lchnk)%lat(icol), phys_state(lchnk)%lon(icol), k,k) + end do + end do + name = 'd2p_scalar_err' + call outfld(trim(name),phys_state(lchnk)%omega(:pcols,:),pcols,lchnk) + end do +#endif + end subroutine test_mapping_output_phys_state + +#ifdef debug_coupling + function test_func(lat_in, lon_in, k, funcnum) result(fout) + use hycoef, only: hyai, hybi, hyam, hybm, ps0 + use shr_sys_mod, only: shr_sys_flush + use cam_abortutils, only: endrun + real(r8), intent(in) :: lon_in + real(r8), intent(in) :: lat_in + integer, intent(in) :: k + integer, intent(in) :: funcnum + real(r8) :: fout + real(r8) :: lon1,lat1,R0,Rg1,Rg2,lon2,lat2,cl,cl2 + real(r8) :: eta_c + + real(r8) :: radius = 10.0_r8 ! radius of the perturbation + real(r8) :: perturb_lon = 20.0_r8 ! longitudinal position, 20E + real(r8) :: perturb_lat = 40.0_r8 ! latitudinal position, 40N + real(r8) :: cos_tmp, sin_tmp, eta + real(r8) :: u_wind, v_wind, lat, lon, u_tmp, v_tmp + real(r8) :: rotation_angle + real(r8) :: det,r,k1,k2 + real(r8), parameter :: pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164_r8 + real(r8), parameter :: half_pi = pi*0.5_r8 + real(r8), parameter :: degrees_to_radians = pi/180.0_r8 + real(r8), parameter :: k1_lat_center = 20.d0*degrees_to_radians + real(r8), parameter :: k1_lon_center = 300.d0*degrees_to_radians + + lon = lon_in + lat = lat_in + + + select case(MOD(funcnum,8)+1) + case(1) + ! + ! Non-smooth scalar field (slotted cylinder) + ! + R0 = 0.5_r8 + lon1 = 5.0_r8 * PI / 6.0_r8 + lat1 = 0.0_r8 + Rg1 = acos(sin(lat1)*sin(lat)+cos(lat1)*cos(lat)*cos(lon-lon1)) + lon2 = 7.0_r8 * PI / 6.0_r8 + lat2 = 0.0_r8 + Rg2 = acos(sin(lat2)*sin(lat)+cos(lat2)*cos(lat)*cos(lon-lon2)) + + if ((Rg1 <= R0) .AND. (abs(lon-lon1) >= R0/6)) then + fout = 2.0_r8 + elseif ((Rg2 <= R0) .AND. (abs(lon-lon2) >= R0/6)) then + fout = 2.0_r8 + elseif ((Rg1 <= R0) .AND. (abs(lon-lon1) < R0/6) & + .AND. (lat-lat1 < -5.0_r8*R0/12.0_r8)) then + fout = 2.0_r8 + elseif ((Rg2 <= R0) .AND. (abs(lon-lon2) < R0/6) & + .AND. (lat-lat2 > 5.0_r8*R0/12.0_r8)) then + fout = 2.0_r8 + else + fout = 1.0_r8 + endif + case(2) + ! + ! Smooth Gaussian "ball" + ! + R0 = 10.0_r8 ! radius of the perturbation + lon1 = 20.0_r8*deg2rad ! longitudinal position, 20E + lat1 = 40.0_r8 *deg2rad ! latitudinal position, 40N + eta_c = 0.6_r8 + sin_tmp = SIN(lat1)*SIN(lat) + cos_tmp = COS(lat1)*COS(lat) + Rg1 = ACOS( sin_tmp + cos_tmp*COS(lon-lon1) ) ! great circle distance + eta = (hyam(k)*ps0 + hybm(k)*psurf_moist)/psurf_moist + fout = EXP(- ((Rg1*R0)**2 + ((eta-eta_c)/0.1_r8)**2)) + if (ABS(fout) < 1.0E-8_r8) then + fout = 0.0_r8 + end IF + case(3) + ! + ! + ! + fout = 0.5_r8 * ( tanh( 3.0_r8*abs(lat)-pi ) + 1.0_r8) + case(4) + fout = 2.0_r8+cos(5.0_r8+40*lon)!1.0e-8_r8 + fout = -0.5_r8-0.5_r8*(cos(16*lon)*(sin(2_r8*lat)**16)) + case(5) + ! + ! approximately Y^2_2 spherical harmonic + ! + fout = sin(lon)*cos(40*lat)!1.0e-8_r8 + fout = 0.5_r8*(cos(16*lon)*(sin(2_r8*lat)**16)) + case(6) + ! + ! approximately Y32_16 spherical harmonic + ! + fout = 0.5_r8 + 0.5_r8*(cos(16*lon)*(sin(2_r8*lat)**16)) + case(7) + fout = 2.0_r8 + lat + case(8) + fout = 2.0_r8 + cos(lon) + case(9) + rotation_angle = 45.0_r8*pi/180.0_r8 + CALL regrot(lon_in,lat_in,lon,lat,0.0_r8,-0.5_r8*pi+rotation_angle,1) + call Rossby_Haurwitz (lon, lat,u_wind, v_wind) + CALL turnwi(u_wind,v_wind,u_tmp,v_tmp,lon_in,lat_in,lon,lat,0.0_r8,-0.5_r8*pi+rotation_angle,-1) + fout = u_tmp + case(10) + rotation_angle = 45.0_r8*pi/180.0_r8 + CALL regrot(lon_in,lat_in,lon,lat,0.0_r8,-0.5_r8*pi+rotation_angle,1) + call Rossby_Haurwitz (lon, lat,u_wind, v_wind) + CALL turnwi(u_wind,v_wind,u_tmp,v_tmp,lon_in,lat_in,lon,lat,0.0_r8,-0.5_r8*pi+rotation_angle,-1) + fout = v_tmp + case(11) + fout = 1.0E-8_r8 + case(12) + ! + ! Terminator chemistry initial condition + ! + k1 = 1.0_r8*max(0.d0,sin(lat)*sin(k1_lat_center) + cos(lat)*cos(k1_lat_center)*cos(lon-k1_lon_center)) + k2 = 1._r8 + + r = k1 / (4._r8*k2) + det = sqrt(r*r + 2._r8*cly_constant*r) + + fout = (det-r) +! fout = cly_constant/2._r8 - (det-r)/2._r8 + case(13) + ! + ! Terminator chemistry initial condition + ! + k1 = 1.0_r8*max(0.d0,sin(lat)*sin(k1_lat_center) + cos(lat)*cos(k1_lat_center)*cos(lon-k1_lon_center)) + k2 = 1._r8 + + r = k1 / (4._r8*k2) + det = sqrt(r*r + 2._r8*cly_constant*r) + +! fout = (det-r) + fout = cly_constant/2._r8 - (det-r)/2._r8 + case default +! call endrun("Illegal funcnum_arg in test_func") + fout = 1.0_r8 + end select + end function test_func + + function test_wind(lat, lon, iwind) result(fout) + use cam_abortutils, only: endrun + real(r8), intent(in) :: lon + real(r8), intent(in) :: lat + integer, intent(in) :: iwind + + real(r8) :: fout + + + fout = 0 + end function test_wind + + + SUBROUTINE regrot(pxreg,pyreg,pxrot,pyrot,pxcen,pycen,kcall) + use physconst, only: pi +! +!---------------------------------------------------------------------- +! +!* conversion between regular and rotated spherical coordinates. +!* +!* pxreg longitudes of the regular coordinates +!* pyreg latitudes of the regular coordinates +!* pxrot longitudes of the rotated coordinates +!* pyrot latitudes of the rotated coordinates +!* all coordinates given in degrees n (negative for s) +!* and degrees e (negative values for w) +!* pxcen regular longitude of the south pole of the rotated grid +!* pycen regular latitude of the south pole of the rotated grid +!* +!* kcall=-1: find regular as functions of rotated coordinates. +!* kcall= 1: find rotated as functions of regular coordinates. +! +!----------------------------------------------------------------------- +! + integer kxdim,kydim,kx,ky,kcall + real(r8) :: pxreg,pyreg,& + pxrot,pyrot,& + pxcen,pycen +! +!----------------------------------------------------------------------- +! + real(r8) zsycen,zcycen,zxmxc,zsxmxc,zcxmxc,zsyreg,zcyreg, & + zsyrot,zcyrot,zcxrot,zsxrot,zpi,zpih + integer jy,jx + + zpih = pi*0.5_r8 +! + !---------------------------------------------------------------------- +! + zsycen = SIN((pycen+zpih)) + zcycen = COS((pycen+zpih)) +! + IF (kcall.eq.1) then +! + zxmxc = pxreg - pxcen + zsxmxc = SIN(zxmxc) + zcxmxc = COS(zxmxc) + zsyreg = SIN(pyreg) + zcyreg = COS(pyreg) + zsyrot = zcycen*zsyreg - zsycen*zcyreg*zcxmxc + zsyrot = max(zsyrot,-1.0_r8) + zsyrot = min(zsyrot,+1.0_r8) + ! + pyrot = ASIN(zsyrot) + ! + zcyrot = COS(pyrot) + zcxrot = (zcycen*zcyreg*zcxmxc +zsycen*zsyreg)/zcyrot + zcxrot = max(zcxrot,-1.0_r8) + zcxrot = min(zcxrot,+1.0_r8) + zsxrot = zcyreg*zsxmxc/zcyrot + ! + pxrot = ACOS(zcxrot) + ! + IF (zsxrot < 0.0_r8) then + pxrot = -pxrot + end IF + ! + ELSEIF (kcall.eq.-1) then + ! + zsxrot = SIN(pxrot) + zcxrot = COS(pxrot) + zsyrot = SIN(pyrot) + zcyrot = COS(pyrot) + zsyreg = zcycen*zsyrot + zsycen*zcyrot*zcxrot + zsyreg = max(zsyreg,-1.0_r8) + zsyreg = min(zsyreg,+1.0_r8) + ! + pyreg = ASIN(zsyreg) + ! + zcyreg = COS(pyreg) + zcxmxc = (zcycen*zcyrot*zcxrot -& + zsycen*zsyrot)/zcyreg + zcxmxc = max(zcxmxc,-1.0_r8) + zcxmxc = min(zcxmxc,+1.0_r8) + zsxmxc = zcyrot*zsxrot/zcyreg + zxmxc = ACOS(zcxmxc) + IF (zsxmxc < 0.0_r8) then + zxmxc = -zxmxc + end IF + ! + pxreg = zxmxc + pxcen + ! + ELSE + WRITE(6,'(1x,''invalid kcall in regrot'')') + STOP + ENDIF + END SUBROUTINE regrot + + SUBROUTINE turnwi(puarg,pvarg,pures,pvres,pxreg,pyreg,pxrot,pyrot,pxcen,pycen,kcall) + use physconst, only: pi + ! + !----------------------------------------------------------------------- + ! + !* turn horizontal velocity components between regular and + !* rotated spherical coordinates. + ! + !* puarg : input u components + !* pvarg : input v components + !* pures : output u components + !* pvres : output v components + !* pa : transformation coefficients + !* pb : -"- + !* pc : -"- + !* pd : -"- + !* pxreg : regular longitudes + !* pyreg : regular latitudes + !* pxrot : rotated longitudes + !* pyrot : rotated latitudes + !* kxdim : dimension in the x (longitude) direction + !* kydim : dimension in the y (latitude) direction + !* kx : number of gridpoints in the x direction + !* ky : number of gridpoints in the y direction + !* pxcen : regular longitude of the south pole of the + !* transformed grid + !* pycen : regular latitude of the south pole of the + !* transformed grid + !* + !* kcall < 0 : find wind components in regular coordinates + !* from wind components in rotated coordinates + !* kcall > 0 : find wind components in rotated coordinates + !* from wind components in regular coordinates + !* note that all coordinates are given in degrees n and degrees e. + !* (negative values for s and w) + ! + !----------------------------------------------------------------------- + + integer kxdim,kydim,kx,ky,kcall + real(r8) puarg,pvarg, & + pures,pvres, & + pa, pb, & + pc, pd, & + pxreg,pyreg, & + pxrot,pyrot + real(r8) pxcen,pycen + ! + !----------------------------------------------------------------------- + ! + integer jy,jx + real(r8) zpih,zsyc,zcyc,zsxreg,zcxreg,zsyreg,zcyreg,zxmxc,& + zsxmxc,zcxmxc,zsxrot,zcxrot,zsyrot,zcyrot + ! + !----------------------------------------------------------------------- + ! + IF (kcall.eq.1) then + zpih = pi*0.5_r8 + zsyc = SIN(pycen+zpih) + zcyc = COS(pycen+zpih) + ! + zsxreg = SIN(pxreg) + zcxreg = COS(pxreg) + zsyreg = SIN(pyreg) + zcyreg = COS(pyreg) + ! + zxmxc = pxreg - pxcen + zsxmxc = SIN(zxmxc) + zcxmxc = COS(zxmxc) + ! + zsxrot = SIN(pxrot) + zcxrot = COS(pxrot) + zsyrot = SIN(pyrot) + zcyrot = COS(pyrot) + ! + pa = zcyc*zsxmxc*zsxrot + zcxmxc*zcxrot + pb = zcyc*zcxmxc*zsyreg*zsxrot - zsyc*zcyreg*zsxrot - & + zsxmxc*zsyreg*zcxrot + pc = zsyc*zsxmxc/zcyrot + pd = (zsyc*zcxmxc*zsyreg + zcyc*zcyreg)/zcyrot + ! + pures = pa*puarg + pb*pvarg + pvres = pc*puarg + pd*pvarg + ELSEIF (kcall.eq.-1) then + zpih = pi*0.5_r8 + zsyc = SIN(pycen+zpih) + zcyc = COS(pycen+zpih) + ! + zsxreg = SIN(pxreg) + zcxreg = COS(pxreg) + zsyreg = SIN(pyreg) + zcyreg = COS(pyreg) + ! + zxmxc = pxreg - pxcen + zsxmxc = SIN(zxmxc) + zcxmxc = COS(zxmxc) + ! + zsxrot = SIN(pxrot) + zcxrot = COS(pxrot) + zsyrot = SIN(pyrot) + zcyrot = COS(pyrot) + ! + pa = zcxmxc*zcxrot + zcyc*zsxmxc*zsxrot + pb = zcyc*zsxmxc*zcxrot*zsyrot + zsyc*zsxmxc*zcyrot -& + zcxmxc*zsxrot*zsyrot + pc =-zsyc*zsxrot/zcyreg + pd = (zcyc*zcyrot - zsyc*zcxrot*zsyrot)/zcyreg + ! + pures = pa*puarg + pb*pvarg + pvres = pc*puarg + pd*pvarg + ELSE + write(6,'(1x,''invalid kcall in turnwi'')') + STOP + ENDIF + END SUBROUTINE turnwi + + SUBROUTINE Rossby_Haurwitz (lon, lat,u_wind, v_wind) + use physconst, only: rearth +!----------------------------------------------------------------------- +! input parameters +!----------------------------------------------------------------------- + real(r8), intent(in) :: lon, & ! longitude in radians + lat ! latitude in radians + ! both coefficients 'a' and 'b' are needed at the full model level +!----------------------------------------------------------------------- +! input parameters +!----------------------------------------------------------------------- + real(r8), intent(out) :: u_wind, & ! zonal wind in m/s + v_wind ! meridional wind in m/s + +!----------------------------------------------------------------------- +! test case parameters +!----------------------------------------------------------------------- + real(r8),parameter :: u0 = 50._r8, & ! reference wind + n = 4._r8 ! wavenumber + +!----------------------------------------------------------------------- +! local +!----------------------------------------------------------------------- + real(r8) :: tmp1, tmp2, tmp3, KK, MM + real(r8) :: sin_lat, cos_lat, sin_slat, cos_slat + +!----------------------------------------------------------------------- +! initialize the wind components +!----------------------------------------------------------------------- + MM = u0/(n*rearth) ! parameter M + KK = u0/(n*rearth) ! parameter K + + + cos_lat = cos(lat) + sin_lat = sin(lat) + tmp1 = rearth * MM * cos_lat + tmp2 = rearth * KK * cos_lat**(n-1._r8)*(n*sin_lat**2 - cos_lat**2) + tmp3 = -rearth * KK * n * cos_lat**(n-1._r8) * sin_lat + u_wind = tmp1 + tmp2 * cos(n*lon) + v_wind = tmp3 * sin(n*lon) + end subroutine Rossby_Haurwitz + +#endif +end module test_fvm_mapping diff --git a/src/dynamics/tests/dyn_tests_utils.F90 b/src/dynamics/tests/dyn_tests_utils.F90 new file mode 100644 index 00000000..3a3596b0 --- /dev/null +++ b/src/dynamics/tests/dyn_tests_utils.F90 @@ -0,0 +1,23 @@ +module dyn_tests_utils +!----------------------------------------------------------------------- +! +! Utility data (and code) for dynamics testing +! +! The public items in this module are items used both by internal code +! (e.g., analytic initial conditions) and by infrastructure which uses +! the internal code (e.g., read_inidat). They cannot be members of the +! internal code because that is conditionally compiled. +! +!----------------------------------------------------------------------- + + + implicit none + private + save + + integer, parameter :: vc_moist_pressure = 0 ! Moist pressure vertical coord + integer, parameter :: vc_dry_pressure = 1 ! Dry pressure vertical coord + integer, parameter :: vc_height = 2 ! Height vertical coord + public :: vc_moist_pressure, vc_dry_pressure, vc_height + +end module dyn_tests_utils diff --git a/src/dynamics/tests/inic_analytic.F90 b/src/dynamics/tests/inic_analytic.F90 new file mode 100644 index 00000000..bf19dff6 --- /dev/null +++ b/src/dynamics/tests/inic_analytic.F90 @@ -0,0 +1,623 @@ +module inic_analytic + + !----------------------------------------------------------------------- + ! + ! Purpose: Set analytic initial conditions based on input coordinates + ! + ! + !----------------------------------------------------------------------- + use cam_logfile, only: iulog + use shr_kind_mod, only: r8 => shr_kind_r8 + use cam_abortutils, only: endrun + use shr_sys_mod, only: shr_sys_flush + use inic_analytic_utils, only: analytic_ic_active, analytic_ic_type + + implicit none + private + + public :: analytic_ic_active ! forwarded from init_analytic_utils + public :: analytic_ic_set_ic ! Set analytic initial conditions + + interface analytic_ic_set_ic + module procedure dyn_set_inic_cblock + end interface analytic_ic_set_ic + + ! Private module variables + integer :: call_num = 0 + + ! Private interface +#ifdef ANALYTIC_IC + interface get_input_shape + module procedure get_input_shape_2d + module procedure get_input_shape_3d + end interface get_input_shape +#endif + +!============================================================================== +CONTAINS +!============================================================================== + + subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & + PS, PHIS_IN, PHIS_OUT, Q, m_cnst, mask, verbose) + use cam_initfiles, only: pertlim +#ifdef ANALYTIC_IC + use ic_held_suarez, only: hs94_set_ic + use ic_baroclinic, only: bc_wav_set_ic + use ic_baro_dry_jw06, only: bc_dry_jw06_set_ic + use ic_us_standard_atmosphere, only: us_std_atm_set_ic +#endif + use spmd_utils, only: masterproc + !----------------------------------------------------------------------- + ! + ! Purpose: Set analytic initial values for dynamics state variables + ! + !----------------------------------------------------------------------- + + ! Dummy arguments + integer , intent(in) :: vcoord ! See dyn_tests_utils + real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) + real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + integer, intent(in) :: glob_ind(:) ! global column index + real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity + real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity + real(r8), optional, intent(inout) :: T(:,:) ! temperature + real(r8), optional, intent(inout) :: PS(:) ! surface pressure + real(r8), optional, intent(in) :: PHIS_IN(:) ! surface geopotential + real(r8), optional, intent(out) :: PHIS_OUT(:) ! surface geopotential + real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) + integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) + logical, optional, intent(in) :: mask(:) ! Only init where .true. + logical, optional, intent(in) :: verbose ! For internal use + + ! Local variables + logical :: verbose_use + logical, allocatable :: mask_use(:) + real(r8) :: pertval + integer, allocatable :: rndm_seed(:) + integer :: rndm_seed_sz + integer :: i, k + integer :: ncol, nlev + character(len=*), parameter :: subname = 'DYN_SET_INIC_COL' + +#ifdef ANALYTIC_IC + allocate(mask_use(size(latvals))) + if (present(mask)) then + if (size(mask_use) /= size(mask)) then + call endrun('cnst_init_default: input, mask, is wrong size') + end if + mask_use = mask + else + mask_use = .true. + end if + + if (present(verbose)) then + verbose_use = verbose + else + verbose_use = .true. + end if + + ! Basic size sanity checks + if (size(latvals) /= size(lonvals)) then + call endrun(subname//'latvals and lonvals must have same size') + end if + if (present(U)) then + if (size(U) > 0) then + call check_array_size(U(:,1), 'U', latvals, subname) + else + return + end if + end if + if (present(V)) then + if (size(V) > 0) then + call check_array_size(V(:,1), 'V', latvals, subname) + else + return + end if + end if + if (present(T)) then + if (size(T) > 0) then + call check_array_size(T(:,1), 'T', latvals, subname) + else + return + end if + end if + if (present(PS)) then + if (size(PS) > 0) then + call check_array_size(PS, 'PS', latvals, subname) + else + return + end if + end if + if (present(PHIS_IN)) then + if (size(PHIS_IN) > 0) then + call check_array_size(PHIS_IN, 'PHIS_IN', latvals, subname) + else + return + end if + end if + if (present(PHIS_OUT)) then + if (size(PHIS_OUT) > 0) then + call check_array_size(PHIS_OUT, 'PHIS_OUT', latvals, subname) + else + return + end if + end if + ! Some special checks on the tracer argument + if (present(Q)) then + if (.not. present(m_cnst)) then + call endrun(subname//'m_cnst is required if Q is present') + end if + if (size(Q, 3) /= size(m_cnst, 1)) then + call endrun(subname//': size of m_cnst must match last dimension of Q') + end if + if (size(Q) > 0) then + call check_array_size(Q(:,1,1), 'Q', latvals, subname) + else + return + end if + end if + select case(trim(analytic_ic_type)) + case('held_suarez_1994') + call hs94_set_ic(latvals, lonvals, U=U, V=V, T=T, PS=PS, PHIS=PHIS_OUT, & + Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) + + case('moist_baroclinic_wave_dcmip2016', 'dry_baroclinic_wave_dcmip2016') + call bc_wav_set_ic(vcoord, latvals, lonvals, U=U, V=V, T=T, PS=PS, & + PHIS=PHIS_OUT, Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) + + case('dry_baroclinic_wave_jw2006') + call bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U=U, V=V, T=T, PS=PS, & + PHIS=PHIS_OUT, Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) + + case('us_standard_atmosphere') + call us_std_atm_set_ic(latvals, lonvals, U=U, V=V, T=T, PS=PS, PHIS=PHIS_IN, & + Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) + + case default + call endrun(subname//': Unknown analytic_ic_type, "'//trim(analytic_ic_type)//'"') + end select + + ! Maybe peturb T initial conditions + if (present(T) .and. (pertlim /= 0.0_r8)) then + + ! Add random perturbation to temperature if required + if(masterproc .and. verbose_use) then + write(iulog,*) trim(subname), ': Adding random perturbation bounded by +/-', & + pertlim,' to initial temperature field' + end if + call random_seed(size=rndm_seed_sz) + allocate(rndm_seed(rndm_seed_sz)) + + ncol = size(T, 1) + nlev = size(T, 2) + do i = 1, ncol + if (mask_use(i)) then + ! seed random_number generator based on global column index + rndm_seed(:) = glob_ind(i) + call random_seed(put=rndm_seed) + do k = 1, nlev + call random_number(pertval) + pertval = 2.0_r8 * pertlim * (0.5_r8 - pertval) + T(i,k) = T(i,k) * (1.0_r8 + pertval) + end do + end if + end do + + deallocate(rndm_seed) + end if + + ! To get different random seeds each time + call_num = call_num + 1 +#else + call endrun(subname//': analytic initial conditions are not enabled') +#endif + + end subroutine dyn_set_inic_col + + subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & + PS, PHIS_IN, PHIS_OUT, Q, m_cnst, mask) + !----------------------------------------------------------------------- + ! + ! Purpose: Set analytic initial values for dynamics state variables + ! + !----------------------------------------------------------------------- + + ! Dummy arguments + integer, intent(in) :: vcoord ! See dyn_tests_utils + real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) + real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + integer, intent(in) :: glob_ind(:) ! global column index + real(r8), optional, intent(inout) :: U(:,:,:) ! zonal velocity + real(r8), optional, intent(inout) :: V(:,:,:) ! meridional velocity + real(r8), optional, intent(inout) :: T(:,:,:) ! temperature + real(r8), optional, intent(inout) :: PS(:,:) ! surface pressure + real(r8), optional, intent(in) :: PHIS_IN(:,:)! surface geopotential + real(r8), optional, intent(out) :: PHIS_OUT(:,:)! surface geopotential + real(r8), optional, intent(inout) :: Q(:,:,:,:) ! tracer (ncol,lev,blk,m) + integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) + logical, optional, intent(in) :: mask(:) ! Only init where .true. + + ! Local variables + real(r8), allocatable :: lat_use(:) + integer :: i, bbeg, bend + integer :: size1, size2, size3 + integer :: nblks, blksize + logical :: verbose + character(len=4) :: mname + character(len=*), parameter :: subname = 'DYN_SET_INIC_CBLOCK' + +#ifdef ANALYTIC_IC + verbose = .true. ! So subroutines can report setting variables + ! Figure out what sort of blocks we have, all variables should be the same + size1 = -1 + mname = '' + if (present(U)) then + call get_input_shape(U, 'U', mname, size1, size2, size3, subname) + end if + if(present(V)) then + call get_input_shape(V, 'V', mname, size1, size2, size3, subname) + end if + if(present(T)) then + call get_input_shape(T, 'T', mname, size1, size2, size3, subname) + end if + if(present(Q)) then + call get_input_shape(Q(:,:,:,1), 'Q', mname, size1, size2, size3, subname) + end if + ! Need to do all 3-D variables before any 2-D variables + if(present(PS)) then + call get_input_shape(PS, 'PS', mname, size1, size2, size3, subname) + end if + if(present(PHIS_IN)) then + call get_input_shape(PHIS_IN, 'PHIS_IN', mname, size1, size2, size3, subname) + end if + if(present(PHIS_OUT)) then + call get_input_shape(PHIS_OUT, 'PHIS_OUT', mname, size1, size2, size3, subname) + end if + if (size1 < 0) then + call endrun(subname//': No state variables to initialize') + end if + if ((size(latvals) == size1*size3) .and. (size(lonvals) == size1*size3)) then + ! Case: unstructured with blocks in 3rd dim + if (size(glob_ind) /= size(latvals)) then + call endrun(subname//': there must be a global index for every column') + end if + nblks = size3 + blksize = size1 + bend = 0 + do i = 1, nblks + bbeg = bend + 1 + bend = bbeg + blksize - 1 + if (present(mask)) then + if (size(mask) /= size(latvals)) then + call endrun(subname//': incorrect mask size') + end if + if (present(U)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), U=U(:,:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(V)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), V=V(:,:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(PS).and.present(PHIS_IN).and.present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PS=PS(:,i), PHIS_IN=PHIS_IN(:,i), T=T(:,:,i), & + mask=mask(bbeg:bend), verbose=verbose) + else + if (present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), T=T(:,:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(PHIS_OUT)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(PS)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PS=PS(:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + end if + if (present(Q)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), Q=Q(:,:,i,:), m_cnst=m_cnst, & + mask=mask(bbeg:bend), verbose=verbose) + end if + else + if (present(U)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), U=U(:,:,i), verbose=verbose) + end if + if (present(V)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), V=V(:,:,i), verbose=verbose) + end if + if (present(PS).and.present(PHIS_IN).and.present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_IN=PHIS_IN(:,i),PS=PS(:,i),T=T(:,:,i), & + verbose=verbose) + else + if (present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), T=T(:,:,i), verbose=verbose) + end if + if (present(PS)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PS=PS(:,i), verbose=verbose) + end if + if (present(PHIS_OUT)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), verbose=verbose) + end if + end if + if (present(Q)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), Q=Q(:,:,i,:), m_cnst=m_cnst, & + verbose=verbose) + end if + end if + verbose = .false. + end do + else if ((size(latvals) == size1*size2) .and. (size(lonvals) == size1*size2)) then + ! Case: unstructured with blocks in 2nd dim + if (size(glob_ind) /= size(latvals)) then + call endrun(subname//': there must be a global index for every column') + end if + nblks = size2 + blksize = size1 + bend = 0 + do i = 1, nblks + bbeg = bend + 1 + bend = bbeg + blksize - 1 + if (present(mask)) then + if (size(mask) /= size(latvals)) then + call endrun(subname//': incorrect mask size') + end if + if (present(U)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), U=U(:,i,:), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(V)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), V=V(:,i,:), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(PS).and.present(PHIS_IN).and.present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_IN=PHIS_IN(:,i),PS=PS(:,i),T=T(:,i,:), & + mask=mask(bbeg:bend), verbose=verbose) + else + if (present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), T=T(:,i,:), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(PS)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PS=PS(:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + if (present(PHIS_OUT)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), mask=mask(bbeg:bend), verbose=verbose) + end if + end if + if (present(Q)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), Q=Q(:,i,:,:), m_cnst=m_cnst, & + mask=mask(bbeg:bend), verbose=verbose) + end if + else + if (present(U)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), U=U(:,i,:), verbose=verbose) + end if + if (present(V)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), V=V(:,i,:), verbose=verbose) + end if + if (present(PS).and.present(PHIS_IN).and.present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_IN=PHIS_IN(:,i),PS=PS(:,i),T=T(:,i,:), & + verbose=verbose) + else + if (present(T)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), T=T(:,i,:), verbose=verbose) + end if + if (present(PS)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PS=PS(:,i), verbose=verbose) + end if + if (present(PHIS_OUT)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), verbose=verbose) + end if + end if + if (present(Q)) then + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), Q=Q(:,i,:,:), m_cnst=m_cnst, & + verbose=verbose) + end if + end if + verbose = .false. + end do + else if ((size(latvals) == size2) .and. (size(lonvals) == size1)) then + ! Case: lon,lat,lev + if (size(glob_ind) /= (size2 * size1)) then + call endrun(subname//': there must be a global index for every column') + end if + nblks = size2 + allocate(lat_use(size(lonvals))) + if (present(mask)) then + call endrun(subname//': mask not supported for lon/lat') + else + bend = 0 + do i = 1, nblks + bbeg = bend + 1 + bend = bbeg + size1 - 1 + lat_use = latvals(i) + if (present(U)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + U=U(:,i,:), verbose=verbose) + end if + if (present(V)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + V=V(:,i,:), verbose=verbose) + end if + if (present(PS).and.present(PHIS_IN).and.present(T)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + PS=PS(:,i),T=T(:,i,:),PHIS_IN=PHIS_IN(:,i), verbose=verbose) + else + if (present(T)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + T=T(:,i,:), verbose=verbose) + end if + if (present(PS)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + PS=PS(:,i), verbose=verbose) + end if + if (present(PHIS_OUT)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + PHIS_OUT=PHIS_OUT(:,i), verbose=verbose) + end if + end if + if (present(Q)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + Q=Q(:,i,:,:), m_cnst=m_cnst, verbose=verbose) + end if + verbose = .false. + end do + end if + deallocate(lat_use) + else if ((size(latvals) == size3) .and. (size(lonvals) == size1)) then + if (size(glob_ind) /= (size3 * size1)) then + call endrun(subname//': there must be a global index for every column') + end if + ! Case: lon,lev,lat + nblks = size3 + allocate(lat_use(size(lonvals))) + if (present(mask)) then + call endrun(subname//': mask not supported for lon/lat') + else + bend = 0 + do i = 1, nblks + bbeg = bend + 1 + bend = bbeg + size1 - 1 + lat_use = latvals(i) + if (present(U)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + U=U(:,:,i), verbose=verbose) + end if + if (present(V)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + V=V(:,:,i), verbose=verbose) + end if + if (present(PS).and.present(PHIS_IN).and.present(T)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + T=T(:,:,i),PS=PS(:,i), PHIS_IN=PHIS_IN(:,i), verbose=verbose) + else + if (present(T)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + T=T(:,:,i), verbose=verbose) + end if + if (present(PS)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + PS=PS(:,i), verbose=verbose) + end if + if (present(PHIS_OUT)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + PHIS_OUT=PHIS_OUT(:,i), verbose=verbose) + end if + end if + if (present(Q)) then + call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & + Q=Q(:,:,i,:), m_cnst=m_cnst, verbose=verbose) + end if + verbose = .false. + end do + end if + deallocate(lat_use) + else + call endrun(subname//': Unknown state variable layout') + end if +#else + call endrun(subname//': analytic initial conditions are not enabled') +#endif + end subroutine dyn_set_inic_cblock + +#ifdef ANALYTIC_IC + subroutine get_input_shape_2d(array, aname, sname, size1, size2, size3, es) + real(r8), intent(in) :: array(:,:) + character(len=*), intent(in) :: aname + character(len=*), intent(inout) :: sname + integer, intent(inout) :: size1 + integer, intent(inout) :: size2 + integer, intent(inout) :: size3 + character(len=*), intent(in) :: es + + if ((size1 < 0) .and. (size(array) == 0)) then + ! The shape has not yet been set, set it to zero + size1 = 0 + size2 = 0 + size3 = 0 + sname = trim(aname) + else if (size1 < 0) then + ! The shape has not yet been set, set it + size1 = size(array, 1) + size2 = size(array, 2) + size3 = 1 + sname = trim(aname) + else if ((size1 * size2 * size3) > 0) then + ! For 2-D variables, the second dimension is always the block size + ! However, since the shape may have been set by a 3-D variable, we + ! need to pass either possibility + if ( (size1 /= size(array, 1)) .or. & + ((size2 /= size(array, 2)) .and. (size3 /= size(array, 2)))) then + call endrun(trim(es)//': shape of '//trim(aname)//' does not match shape of '//trim(sname)) + end if + ! No else, we cannot compare to zero size master array + end if + + end subroutine get_input_shape_2d + + subroutine get_input_shape_3d(array, aname, sname, size1, size2, size3, es) + real(r8), intent(in) :: array(:,:,:) + character(len=*), intent(in) :: aname + character(len=*), intent(inout) :: sname + integer, intent(inout) :: size1 + integer, intent(inout) :: size2 + integer, intent(inout) :: size3 + character(len=*), intent(in) :: es + + if ((size1 < 0) .and. (size(array) == 0)) then + ! The shape has not yet been set, set it to zero + size1 = 0 + size2 = 0 + size3 = 0 + sname = trim(aname) + else if (size1 < 0) then + ! The shape has not yet been set, set it + size1 = size(array, 1) + size2 = size(array, 2) + size3 = size(array, 3) + sname = trim(aname) + else if ((size1 * size2 * size3) > 0) then + ! We have a shape, make sure array matches it + if ((size1 /= size(array, 1)) .or. (size2 /= size(array, 2)) .or. (size3 /= size(array, 3))) then + call endrun(trim(es)//': shape of '//trim(aname)//' does not match shape of '//trim(sname)) + end if + end if + ! No else, we cannot compare to zero size master array + end subroutine get_input_shape_3d + + subroutine check_array_size(array, aname, check, subname) + real(r8), intent(in) :: array(:) + character(len=*), intent(in) :: aname + real(r8), intent(in) :: check(:) + character(len=*), intent(in) :: subname + + if (size(array, 1) /= size(check, 1)) then + call endrun(trim(subname)//': '//trim(aname)//' has the wrong first dimension') + end if + + end subroutine check_array_size +#endif + +end module inic_analytic diff --git a/src/dynamics/tests/inic_analytic_utils.F90 b/src/dynamics/tests/inic_analytic_utils.F90 new file mode 100644 index 00000000..60091f05 --- /dev/null +++ b/src/dynamics/tests/inic_analytic_utils.F90 @@ -0,0 +1,132 @@ +module inic_analytic_utils + + !----------------------------------------------------------------------- + ! + ! Purpose: Set analytic initial conditions based on input coordinates + ! + ! + !----------------------------------------------------------------------- + use cam_logfile, only: iulog + use shr_kind_mod, only: r8 => shr_kind_r8 + use cam_abortutils, only: endrun + use shr_sys_mod, only: shr_sys_flush + + implicit none + private + + ! Public interfaces + public :: analytic_ic_readnl ! Read dyn_test_nl namelist + public :: analytic_ic_active ! .true. if analytic IC should be set + public :: analytic_ic_is_moist ! .true. if IC are moist + + ! Private module variables + integer, parameter :: scheme_len = 32 + logical :: moist = .false. + + ! Protected resource + character(len=scheme_len), public, protected :: analytic_ic_type = 'none' + +!============================================================================== +CONTAINS +!============================================================================== + + logical function analytic_ic_active() + analytic_ic_active = (trim(analytic_ic_type) /= 'none') + end function analytic_ic_active + + logical function analytic_ic_is_moist() + analytic_ic_is_moist = moist + end function analytic_ic_is_moist + + subroutine analytic_ic_readnl(nlfile) + + use shr_nl_mod, only: find_group_name => shr_nl_find_group_name + use shr_file_mod, only: shr_file_getunit, shr_file_freeunit + use spmd_utils, only: masterproc, masterprocid, mpicom, mpi_character, mpi_logical + use shr_string_mod, only: shr_string_toLower + + ! Dummy argument + character(len=*), intent(in) :: nlfile ! filepath of namelist input file + + ! + ! Local variables + integer :: unitn, ierr + logical :: nl_not_found + character(len=128) :: msg + character(len=*), parameter :: subname = 'ANALYTIC_IC_READNL' + +#ifdef ANALYTIC_IC + ! History namelist items + namelist /analytic_ic_nl/ analytic_ic_type + + if (masterproc) then + unitn = shr_file_getunit() + open(unitn, file=trim(nlfile), status='old') + call find_group_name(unitn, 'analytic_ic_nl', status=ierr) + if (ierr == 0) then + nl_not_found = .false. + write(iulog, *) 'Read in analytic_ic_nl namelist from: ',trim(nlfile) + read(unitn, analytic_ic_nl, iostat=ierr) + if (ierr /= 0) then + write(msg, '(a,i0)') & + ': ERROR reading namelist, analytic_ic_nl, iostat = ', ierr + call endrun(subname//trim(msg)) + end if + else + nl_not_found = .true. + end if + close(unitn) + call shr_file_freeunit(unitn) + + analytic_ic_type = shr_string_toLower(analytic_ic_type) + end if + + ! Broadcast namelist variables + call mpi_bcast(analytic_ic_type, len(analytic_ic_type), mpi_character, masterprocid, mpicom, ierr) + call mpi_bcast(nl_not_found, 1, mpi_logical, masterprocid, mpicom, ierr) + + if (nl_not_found) then + ! If analytic IC functionality is turned on (via a configure switch), then + ! build-namelist supplies the namelist group. If not found then nothing + ! to do. + return + else + select case(trim(analytic_ic_type)) + case('held_suarez_1994') + msg = 'Dynamics state will be set to Held-Suarez (1994) initial conditions.' + case('moist_baroclinic_wave_dcmip2016') + moist = .true. + msg = 'Dynamics state will be set to a moist baroclinic wave initial condition used in DCMIP 2016.' + case('dry_baroclinic_wave_dcmip2016') + moist = .false. + msg = 'Dynamics state will be set to a dry baroclinic wave initial condition used in DCMIP 2016.' + case('dry_baroclinic_wave_jw2006') + moist = .false. + msg = 'Dynamics state will be set to a dry baroclinic wave initial condition as described in JW2006.' + case('us_standard_atmosphere') + moist = .false. + msg = 'static atmospheric state (u,v)=0, standard lapse rate for T, PS is hydrostatic equilibrium with topography.' + case('none') + msg = subname//': ERROR: analytic_ic_type must be set' + write(iulog, *) msg + call endrun(msg) + case default + msg = subname//': ERROR: analytic_ic_type not recognized: '//trim(analytic_ic_type) + write(iulog, *) msg + call endrun(msg) + end select + + end if + + ! Write out initial condition scheme info + if (masterproc) then + write(iulog, *) msg + end if +#else + analytic_ic_type = 'none' + moist = .false. +#endif + + end subroutine analytic_ic_readnl + +end module inic_analytic_utils diff --git a/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 b/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 new file mode 100644 index 00000000..a8f4a282 --- /dev/null +++ b/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 @@ -0,0 +1,271 @@ +module ic_baro_dry_jw06 + !----------------------------------------------------------------------- + ! + ! Purpose: Set idealized initial conditions for the Jablonowski and + ! Williamson baroclinic instability test. + ! References: + ! Jablonowski, C., and D. L. Williamson (2006), A Baroclinic Instability Test Case for + ! Atmospheric Model Dynamical Cores, Quart. J. Roy. Met. Soc., Vol. 132, 2943-2975 + ! Jablonowski, C., and D. L. Williamson (2006), A Baroclinic Wave Test Case for Dynamical + ! Cores of General Circulation Models: Model Intercomparisons, + ! NCAR Technical Note NCAR/TN-469+STR, Boulder, CO, 89 pp. + ! + !----------------------------------------------------------------------- + use cam_logfile, only: iulog + use shr_kind_mod, only: r8 => shr_kind_r8 + use cam_abortutils, only: endrun + use spmd_utils, only: masterproc + use shr_sys_mod, only: shr_sys_flush + + use physconst, only : rair, cpair, gravit, rearth, pi, omega + use hycoef, only : hyai, hybi, hyam, hybm, ps0 + + implicit none + private + + !======================================================================= + ! JW06 Dry baroclinic wave test case parameters + !======================================================================= + real(r8), parameter, private :: & + eta_tropo = 0.2_r8, & ! tropopause level (hybrid vertical coordinate)) + u0 = 35._r8, & ! maximum jet speed 35 m/s + T0 = 288._r8, & ! horizontal mean T at the surface + p00 = 1.e5_r8, & ! surface pressure in Pa + eta0 = 0.252_r8, & ! center of jets (hybrid vertical coordinate) + radius = 10._r8, & ! reciprocal radius of the perturbation without the Earth's radius 'a' + perturbation_amplitude = 1._r8, & ! amplitude of u perturbation 1 m/s + perturbation_longitude = 20._r8, & ! longitudinal position, 20E + perturbation_latitude = 40._r8, & ! latitudinal position, 40N + eta_sfc = 1._r8, & ! hybrid value at the surface + delta_T = 480000._r8, & ! in K, parameter for T mean calculation + gamma = 0.005_r8 ! lapse rate in K/m + real(r8) :: a_omega, exponent + + real(r8), parameter :: deg2rad = pi/180._r8 ! conversion to radians + + ! Public interface + public :: bc_dry_jw06_set_ic + +contains + + subroutine bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U, V, T, PS, PHIS, & + Q, m_cnst, mask, verbose) + use dyn_tests_utils, only: vc_moist_pressure, vc_dry_pressure, vc_height + !use constituents, only: cnst_name + !use const_init, only: cnst_init_default + + !Remove once constituents are enabled -JN + use physics_types, only : ix_cld_liq, ix_rain + + !----------------------------------------------------------------------- + ! + ! Purpose: Set baroclinic wave initial values for dynamics state variables + ! + !----------------------------------------------------------------------- + + ! Dummy arguments + integer, intent(in) :: vcoord + real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) + real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + ! z_k for vccord 1) + real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity + real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity + real(r8), optional, intent(inout) :: T(:,:) ! temperature + real(r8), optional, intent(inout) :: PS(:) ! surface pressure + real(r8), optional, intent(out) :: PHIS(:) ! surface geopotential + real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) + integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) + logical, optional, intent(in) :: mask(:) ! Only init where .true. + logical, optional, intent(in) :: verbose ! For internal use + ! Local variables + logical, allocatable :: mask_use(:) + logical :: verbose_use + logical :: lu,lv,lt,lq,l3d_vars + integer :: i, k, m + integer :: ncol + integer :: nlev + integer :: ncnst + character(len=*), parameter :: subname = 'BC_DRY_JW06_SET_IC' + real(r8) :: tmp + real(r8) :: r(size(latvals)) + real(r8) :: eta + real(r8) :: factor + real(r8) :: perturb_lon, perturb_lat + real(r8) :: phi_vertical + real(r8) :: u_wind(size(latvals)) + + a_omega = rearth*omega + exponent = rair*gamma/gravit + + allocate(mask_use(size(latvals))) + if (present(mask)) then + if (size(mask_use) /= size(mask)) then + call endrun(subname//': input, mask, is wrong size') + end if + mask_use = mask + else + mask_use = .true. + end if + + if (present(verbose)) then + verbose_use = verbose + else + verbose_use = .true. + end if + + ncol = size(latvals, 1) + nlev = -1 + + ! + ! We do not yet handle height-based vertical coordinates + if (vcoord == vc_height) then + call endrun(subname//': height-based vertical coordinate not currently supported') + end if + + ! + !******************************* + ! + ! initialize surface pressure + ! + !******************************* + ! + if (present(PS)) then + where(mask_use) + PS = p00 + end where + + if(masterproc .and. verbose_use) then + write(iulog,*) ' PS initialized by "',subname,'"' + end if + end if + ! + !******************************* + ! + ! Initialize PHIS + ! + !******************************* + ! + if (present(PHIS)) then + phis = 0.0_r8 + tmp = u0 * (cos((eta_sfc-eta0)*pi*0.5_r8))**1.5_r8 + where(mask_use) + PHIS(:) = ((-2._r8*(sin(latvals(:)))**6 * ((cos(latvals(:)))**2 + 1._r8/3._r8) + 10._r8/63._r8)*tmp & + + (8._r8/5._r8*(cos(latvals(:)))**3 * ((sin(latvals(:)))**2 + 2._r8/3._r8) - pi/4._r8)*a_omega)*tmp + end where + if(masterproc .and. verbose_use) then + write(iulog,*) ' PHIS initialized by "',subname,'"' + end if + end if + ! + !******************************* + ! + ! Initialize 3D vars + ! + ! + !******************************* + ! + lu = present(U) + lv = present(V) + lT = present(T) + lq = present(Q) + l3d_vars = lu .or. lv .or. lt .or.lq + nlev = -1 + if (l3d_vars) then + if (lu) nlev = size(U, 2) + if (lv) nlev = size(V, 2) + if (lt) nlev = size(T, 2) + if (lq) nlev = size(Q, 2) + + if (lu) then + do k = 1, nlev + perturb_lon = perturbation_longitude * deg2rad + perturb_lat = perturbation_latitude * deg2rad + phi_vertical = ((hyam(k)+hybm(k)) - eta0) *0.5_r8*pi + where(mask_use) + ! background wind + u_wind(:) = (cos(phi_vertical))**1.5_r8 * 4._r8 * u0 * (sin(latvals(:)))**2 * (cos(latvals(:)))**2 + ! great circle distance without radius 'a' + r(:) = acos( sin(perturb_lat)*sin(latvals(:)) + cos(perturb_lat)*cos(latvals(:))*cos(lonvals(:)-perturb_lon)) + ! background + perturbation wind + U(:,k) = perturbation_amplitude*exp(- (r(:)*radius)**2 ) + u_wind(:) + end where + end do + if(masterproc.and. verbose_use) then + write(iulog,*) ' U initialized by "',subname,'"' + end if + end if + if (lv) then + do k = 1, nlev + where(mask_use) + V(:,k) = 0.0_r8 + end where + end do + if(masterproc.and. verbose_use) then + write(iulog,*) ' V initialized by "',subname,'"' + end if + end if + if (lt) then + do k = 1, nlev + eta = hyam(k) + hybm(k) + ! background temperature + if (eta .ge. eta_tropo) then + tmp = T0*eta**exponent + else + tmp = T0*eta**exponent + delta_T*(eta_tropo-eta)**5 + endif + factor = eta*pi*u0/rair + phi_vertical = (eta - eta0) * 0.5_r8*pi + where(mask_use) + ! background temperature 'tmp' plus temperature deviation + T(:,k) = factor * 1.5_r8 * sin(phi_vertical) * (cos(phi_vertical))**0.5_r8 * & + ((-2._r8*(sin(latvals(:)))**6 * ((cos(latvals(:)))**2 + 1._r8/3._r8) + 10._r8/63._r8)* & + u0 * (cos(phi_vertical))**1.5_r8 + & + (8._r8/5._r8*(cos(latvals(:)))**3 * ((sin(latvals(:)))**2 + 2._r8/3._r8) - pi/4._r8)*a_omega*0.5_r8 ) + & + tmp + end where + enddo + if(masterproc.and. verbose_use) then + write(iulog,*) ' T initialized by "',subname,'"' + end if + end if + if (lq) then + do k = 1, nlev + where(mask_use) + Q(:,k,1) = 0.0_r8 + end where + end do +!Un-comment once constituents are working in CAMDEN -JN: +#if 0 + if(masterproc.and. verbose_use) then + write(iulog,*) ' ', trim(cnst_name(m_cnst(1))), ' initialized by "',subname,'"' + end if +#endif + end if + end if + +!Un-comment once constituents are working in CAMDEN -JN: +#if 0 + if (lq) then + ncnst = size(m_cnst, 1) + if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then + do m = 2, ncnst + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m_cnst(m)),& + mask=mask_use, verbose=verbose_use, notfound=.false.) + end do + end if + end if +#else + if (lq) then + if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then + !Initialize cloud liquid and rain until constituent routines are enabled: + Q(:,:,ix_cld_liq) = 0.0_r8 + Q(:,:,ix_rain) = 0.0_r8 + end if + end if +#endif + + deallocate(mask_use) + + end subroutine bc_dry_jw06_set_ic + +end module ic_baro_dry_jw06 diff --git a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 new file mode 100644 index 00000000..e8b9c4c9 --- /dev/null +++ b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 @@ -0,0 +1,720 @@ +module ic_baroclinic + !----------------------------------------------------------------------- + ! + ! Purpose: Set idealized initial conditions for the Ullrich, Melvin, + ! Jablonowski and Staniforth (QJRMS, 2014) baroclinic + ! instability test. + ! + !----------------------------------------------------------------------- + use cam_logfile, only: iulog + use shr_kind_mod, only: r8 => shr_kind_r8 + use cam_abortutils, only: endrun + use spmd_utils, only: masterproc + + use physconst, only : rair, gravit, rearth, pi, omega, epsilo + use hycoef, only : hyai, hybi, hyam, hybm, ps0 + + !Remove once constituents are enabled -JN + use physics_types, only : ix_cld_liq, ix_rain + + implicit none + private + + real(r8), parameter :: deg2rad = pi/180.0_r8 + + !======================================================================= + ! Baroclinic wave test case parameters + !======================================================================= + real(r8), parameter, private :: Mvap = 0.608_r8 ! Ratio of molar mass dry air/water vapor + real(r8), parameter, private :: psurf_moist = 100000.0_r8 ! Moist surface pressure + + real(r8), parameter, private :: & + T0E = 310.0_r8, & ! Temperature at equatorial surface (K) + T0P = 240.0_r8, & ! Temperature at polar surface (K) + B = 2.0_r8, & ! Jet half-width parameter + KK = 3.0_r8, & ! Jet width parameter + lapse = 0.005_r8 ! Lapse rate parameter + + real(r8), parameter, private :: & + pertu0 = 0.5_r8, & ! SF Perturbation wind velocity (m/s) + pertr = 1.0_r8/6.0_r8, & ! SF Perturbation radius (Earth radii) + pertup = 1.0_r8, & ! Exp. perturbation wind velocity (m/s) + pertexpr = 0.1_r8, & ! Exp. perturbation radius (Earth radii) + pertlon = pi/9.0_r8, & ! Perturbation longitude + pertlat = 2.0_r8*pi/9.0_r8, & ! Perturbation latitude + pertz = 15000.0_r8, & ! Perturbation height cap + dxepsilon = 1.0e-5_r8 ! Small value for numerical derivatives + + real(r8), parameter, private :: & + moistqlat = 2.0_r8*pi/9.0_r8, & ! Humidity latitudinal width + moistqp = 34000.0_r8, & ! Humidity vertical pressure width + moistq0 = 0.018_r8 ! Maximum specific humidity + + real(r8), parameter, private :: & + eps = 1.0e-13_r8, & ! Iteration threshold + qv_min = 1.0e-12_r8 ! Min specific humidity value + + + integer, parameter :: deep = 0 ! Deep (1) or Shallow (0) test case + integer, parameter :: pertt = 0 ! 0: exponential, 1: streamfunction + real(r8), parameter :: bigx = 1.0 ! Factor for a reduced size earth + + ! + ! Gauss nodes and weights + ! + integer , parameter :: num_gauss = 10 + real(r8), parameter, dimension(num_gauss), private :: gaussx =(/& + -0.97390652851717_r8,-0.865063366689_r8,-0.67940956829902_r8,-0.4333953941292_r8,-0.14887433898163_r8,& + 0.14887433898163_r8,0.4333953941292_r8,0.679409568299_r8,0.86506336668898_r8,0.97390652851717_r8/) + + real(r8), parameter, dimension(num_gauss), private :: gaussw =(/& + 0.06667134430869_r8,0.1494513491506_r8,0.219086362516_r8,0.26926671931_r8,0.29552422471475_r8, & + 0.2955242247148_r8,0.26926671931_r8,0.21908636251598_r8,0.1494513491506_r8,0.0666713443087_r8/) + + ! Public interface + public :: bc_wav_set_ic + +contains + + subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & + Q, Z, m_cnst, mask, verbose) + use dyn_tests_utils, only: vc_moist_pressure, vc_dry_pressure, vc_height + !use constituents, only: cnst_name + !use const_init, only: cnst_init_default + use inic_analytic_utils, only: analytic_ic_is_moist + + !----------------------------------------------------------------------- + ! + ! Purpose: Set baroclinic wave initial values for dynamics state variables + ! + !----------------------------------------------------------------------- + + ! Dummy arguments + integer, intent(in) :: vcoord + real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) + real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + ! z_k for vccord 1) + real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity + real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity + real(r8), optional, intent(inout) :: T(:,:) ! temperature + real(r8), optional, intent(inout) :: PS(:) ! surface pressure + real(r8), optional, intent(out) :: PHIS(:) ! surface geopotential + real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) + real(r8), optional, intent(inout) :: Z(:,:) ! height (ncol, lev) + integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) + logical, optional, intent(in) :: mask(:) ! only init where .true. + logical, optional, intent(in) :: verbose ! for internal use + ! Local variables + logical, allocatable :: mask_use(:) + logical :: verbose_use + integer :: i, k, m + integer :: ncol + integer :: nlev + integer :: ncnst + character(len=*), parameter :: subname = 'BC_WAV_SET_IC' + real(r8) :: ztop,ptop + real(r8) :: uk,vk,Tvk,qk,pk !mid-level state + real(r8) :: psurface + real(r8) :: wvp,qdry + logical :: lU, lV, lT, lQ, l3d_vars + logical :: cnst1_is_moisture + real(r8), allocatable :: pdry_half(:), pwet_half(:),zdry_half(:),zk(:) + real(r8), allocatable :: zlocal(:,:)! height of full level p for test tracer initialization + + if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then + ! + ! pressure-based vertical coordinate + ! + ptop = hyai(1) * ps0 + if (ptop > 1.0e5_r8) then + call endrun(subname//' ERROR: For iterate_z_given_pressure to work ptop must be less than 100hPa') + end if + ztop = iterate_z_given_pressure(ptop,.false.,ptop,0.0_r8,-1000._r8) !Find height of top pressure surface + else if (vcoord == vc_height) then + ! + ! height-based vertical coordinate + ! + call endrun(subname//' ERROR: z-based vertical coordinate not coded yet') + else + call endrun(subname//' ERROR: vcoord value out of range') + end if + + allocate(mask_use(size(latvals))) + if (present(mask)) then + if (size(mask_use) /= size(mask)) then + call endrun(subname//': input, mask, is wrong size') + end if + mask_use = mask + else + mask_use = .true. + end if + + if (present(verbose)) then + verbose_use = verbose + else + verbose_use = .true. + end if + + if(masterproc .and. verbose .and. present(PS)) then + write(iulog,*) subname, ': Model top (in km) is at z= ',ztop/1000.0_r8 + end if + + ncol = size(latvals, 1) + nlev = -1 + ! + !******************************* + ! + ! initialize surface pressure + ! + !******************************* + ! + if (present(PS)) then + if (vcoord == vc_moist_pressure) then + where(mask_use) + PS = psurf_moist + end where + else if(vcoord == vc_dry_pressure) then + ! + ! compute dry surface pressure (subtract water vapor in coloumn) + ! + do i=1,ncol + if (mask_use(i)) then + wvp = weight_of_water_vapor_given_z(0.0_r8,latvals(i),ztop) + ps(i) = psurf_moist-wvp + end if + end do + endif + + if(masterproc .and. verbose_use) then + write(iulog,*) ' PS initialized by "',subname,'"' + end if + end if + ! + !******************************* + ! + ! Initialize PHIS + ! + !******************************* + ! + if (present(PHIS)) then + PHIS = 0.0_r8 + if(masterproc .and. verbose_use) then + write(iulog,*) ' PHIS initialized by "',subname,'"' + end if + end if + ! + !******************************* + ! + ! Initialize 3D vars + ! + ! + !******************************* + ! + lu = present(U) + lv = present(V) + lT = present(T) + lq = present(Q) + l3d_vars = lu .or. lv .or. lt .or. lq + nlev = -1 + if (l3d_vars) then + if (lu) nlev = size(U, 2) + if (lv) nlev = size(V, 2) + if (lt) nlev = size(T, 2) + + if (lq) then + nlev = size(Q, 2) + ! check whether first constituent in Q is water vapor. + cnst1_is_moisture = m_cnst(1) == 1 + allocate(zlocal(size(Q, 1),nlev)) + end if + + allocate(zk(nlev)) + if ((lq.or.lt) .and. (vcoord == vc_dry_pressure)) then + allocate(pdry_half(nlev+1)) + allocate(pwet_half(nlev+1)) + allocate(zdry_half(nlev+1)) + end if + do i=1,ncol + if (mask_use(i)) then + if (vcoord == vc_moist_pressure) then + psurface = psurf_moist + wvp = -99 + else if (vcoord == vc_dry_pressure) then + ! + ! convert surface pressure to dry + ! + wvp = weight_of_water_vapor_given_z(0.0_r8,latvals(i),ztop) + psurface = psurf_moist-wvp + end if + + do k=1,nlev + ! compute pressure levels + pk = hyam(k)*ps0 + hybm(k)*psurface + ! find height of pressure surface + zk(k) = iterate_z_given_pressure(pk,(vcoord == vc_dry_pressure),ptop,latvals(i),ztop) + end do + + if (lq) then + if (present(Z)) then + zlocal(i,1:nlev) = Z(i,1:nlev) + else + zlocal(i,1:nlev) = zk(:) + end if + end if + + + do k=1,nlev + ! + ! wind components + ! + if (lu.or.lv) call uv_given_z(zk(k),uk,vk,latvals(i),lonvals(i)) + if (lu) U(i,k) = uk + if (lv) V(i,k) = vk + ! + ! temperature and moisture for moist vertical coordinates + ! + if ((lq.or.lt).and.(vcoord == vc_moist_pressure)) then + if (analytic_ic_is_moist()) then + pk = moist_pressure_given_z(zk(k),latvals(i)) + qk = qv_given_moist_pressure(pk,latvals(i)) + else + qk = 0.d0 + end if + if (lq .and. cnst1_is_moisture) Q(i,k,1) = qk + if (lt) then + tvk = Tv_given_z(zk(k),latvals(i)) + T(i,k) = tvk / (1.d0 + Mvap * qk) + end if + end if + end do + ! + ! temperature and moisture for dry-mass vertical coordinates + ! + if ((lq.or.lt).and. (vcoord==vc_dry_pressure)) then + ! + ! compute dry pressure vertical coordinate + ! + pdry_half(1) = hyai(1)*ps0 + hybi(1)*psurface + pwet_half(1) = pdry_half(1) + zdry_half(1) = ztop + do k=2,nlev+1 + pdry_half(k) = hyai(k)*ps0 + hybi(k)*psurface + ! find height of pressure surfaces corresponding moist pressure + zdry_half(k) = iterate_z_given_pressure(pdry_half(k),.true.,ptop,latvals(i),ztop) + pwet_half(k) = pdry_half(k)+weight_of_water_vapor_given_z(zdry_half(k),latvals(i),ztop) + end do + + do k=1,nlev + if (analytic_ic_is_moist()) then + qdry =((pwet_half(k+1)-pwet_half(k))/(pdry_half(k+1)-pdry_half(k)))-1.0_r8 + qdry = MAX(qdry,qv_min/(1.0_r8-qv_min)) + else + qdry = 0.0_r8 + end if + if (lq .and. cnst1_is_moisture) then + Q(i,k,1) = qdry + end if + if (lt) then + ! + ! convert virtual temperature to temperature + ! + tvk = Tv_given_z(zk(k),latvals(i)) + T(i,k) = tvk*(1.0_r8+qdry)/(1.0_r8+(1.0_r8/epsilo)*qdry) + end if + end do + end if + end if + end do + if(lu .and. masterproc.and. verbose_use) write(iulog,*) ' U initialized by "',subname,'"' + if(lv .and. masterproc.and. verbose_use) write(iulog,*) ' V initialized by "',subname,'"' + if(lt .and. masterproc.and. verbose_use) write(iulog,*) ' T initialized by "',subname,'"' +!Un-comment once constituents are working in CAMDEN -JN: +#if 0 + if(lq .and. cnst1_is_moisture .and. masterproc.and. verbose_use) write(iulog,*) & + ' ', trim(cnst_name(m_cnst(1))), ' initialized by "',subname,'"' +#endif + end if + +!Un-comment once constituents are working in CAMDEN -JN: +#if 0 + if (lq) then + ncnst = size(m_cnst, 1) + if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then + do m = 1, ncnst + + ! water vapor already done above + if (m_cnst(m) == 1) cycle + + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m),& + mask=mask_use, verbose=verbose_use, notfound=.false.,& + z=zlocal) + + end do + + end if ! vcoord + end if ! lq +#else + if (lq) then + if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then + !Initialize cloud liquid and rain until constituent routines are enabled: + Q(:,:,ix_cld_liq) = 0.0_r8 + Q(:,:,ix_rain) = 0.0_r8 + end if + end if +#endif + + deallocate(mask_use) + if (l3d_vars) then + deallocate(zk) + if ((lq.or.lt) .and. (vcoord == vc_dry_pressure)) then + deallocate(pdry_half) + deallocate(pwet_half) + deallocate(zdry_half) + end if + end if + end subroutine bc_wav_set_ic + + real(r8) FUNCTION iterate_z_given_pressure(p,ldry_mass_vertical_coordinates,ptop,lat,ztop) + + real(r8), INTENT(IN) :: & + p, &! Pressure (Pa) + ptop,&! Pressure (Pa) + lat,&! latitude + ztop + + logical, INTENT(IN) :: ldry_mass_vertical_coordinates + + integer :: ix + + real(r8) :: z0, z1, z2 + real(r8) :: p0, p1, p2 + z0 = 0.0_r8 + z1 = 10000.0_r8 + if (ldry_mass_vertical_coordinates) then + p0 = weight_of_dry_air_given_z(z0,ptop,lat,ztop) + p1 = weight_of_dry_air_given_z(z1,ptop,lat,ztop) + else + p0 = moist_pressure_given_z(z0,lat) + p1 = moist_pressure_given_z(z1,lat) + endif + + DO ix = 1, 1000 + z2 = z1 - (p1 - p) * (z1 - z0) / (p1 - p0) + if (ldry_mass_vertical_coordinates) then + p2 = weight_of_dry_air_given_z(z2,ptop,lat,ztop) + else + p2 = moist_pressure_given_z(z2,lat) + end if + + IF (ABS(p2 - p)/p < eps.or.ABS(z1-z2) 0.1_r8) then ! intialize q if p > 100 hPa + qv_given_moist_pressure = moistq0 * exp(- (lat/moistqlat)**4) & + * exp(- ((eta-1.0_r8)*psurf_moist/moistqp)**2) + else + qv_given_moist_pressure = qv_min ! above 100 hPa set q to 1e-12 to avoid supersaturation + endif + end if + END FUNCTION qv_given_moist_pressure + + real(r8) FUNCTION weight_of_water_vapor_given_z(z,lat, ztop) + use inic_analytic_utils, only: analytic_ic_is_moist + + real(r8), INTENT(IN) :: z,lat, ztop + real (r8) :: xm,xr,integral + real(r8) :: qv, z1, z2, Tv,pwet, ztmp + integer :: jgw + + if (.not. analytic_ic_is_moist()) then + ! + ! dry case + ! + weight_of_water_vapor_given_z = 0.0_r8 + else + z1=z + z2=ztop + xm=0.5_r8*(z1+z2) + xr=0.5_r8*(z2-z1) + integral=0 + do jgw=1,num_gauss + ztmp=xm+gaussx(jgw)*xr + pwet = moist_pressure_given_z(ztmp,lat); qv= qv_given_moist_pressure(pwet,lat);Tv= Tv_given_z(ztmp,lat) + integral=integral+gaussw(jgw)*gravit*pwet*qv/(Rair*Tv) + enddo + integral=0.5_r8*(z2-z1)*integral ! Scale the answer to the range of integration. + weight_of_water_vapor_given_z = integral + end if + end FUNCTION weight_of_water_vapor_given_z + + + real(r8) FUNCTION weight_of_dry_air_given_z(z,ptop,lat,ztop) + + real (r8), INTENT(IN) :: z,ptop, lat, ztop + real (r8) :: xm,xr,integral + real(r8) :: qv, z1, z2, Tv,pwet, ztmp + integer :: jgw + + z1=z + z2=ztop + xm=0.5*(z1+z2) + xr=0.5*(z2-z1) + integral=0 + do jgw=1,num_gauss + ztmp=xm+gaussx(jgw)*xr + pwet = moist_pressure_given_z(ztmp,lat); qv= qv_given_moist_pressure(pwet,lat);Tv= Tv_given_z(ztmp,lat) + integral=integral+gaussw(jgw)*gravit*pwet*(1-qv)/(Rair*Tv) + enddo + integral=0.5_r8*(z2-z1)*integral ! Scale the answer to the range of integration. + weight_of_dry_air_given_z = integral+ptop + end FUNCTION weight_of_dry_air_given_z + +end module ic_baroclinic diff --git a/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 b/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 new file mode 100644 index 00000000..2171925b --- /dev/null +++ b/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 @@ -0,0 +1,163 @@ +module ic_held_suarez + + !----------------------------------------------------------------------- + ! + ! Purpose: Set Held-Suarez initial conditions based on input coordinates + ! + ! + !----------------------------------------------------------------------- + use cam_logfile, only: iulog + use shr_kind_mod, only: r8 => shr_kind_r8 + use cam_abortutils, only: endrun + use spmd_utils, only: masterproc + use shr_sys_mod, only: shr_sys_flush + + implicit none + private + + ! Public interface + public :: hs94_set_ic + +!============================================================================== +CONTAINS +!============================================================================== + + subroutine hs94_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & + Q, m_cnst, mask, verbose) + !use const_init, only: cnst_init_default + !use constituents, only: cnst_name + use physics_types, only: ix_cld_liq, ix_rain !Remove once constituents are enabled -JN + + !----------------------------------------------------------------------- + ! + ! Purpose: Set Held-Suarez initial values for dynamics state variables + ! + !----------------------------------------------------------------------- + + ! Dummy arguments + real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) + real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity + real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity + real(r8), optional, intent(inout) :: T(:,:) ! temperature + real(r8), optional, intent(inout) :: PS(:) ! surface pressure + real(r8), optional, intent(out) :: PHIS(:) ! surface geopotential + real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) + integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) + logical, optional, intent(in) :: mask(:) ! Only init where .true. + logical, optional, intent(in) :: verbose ! For internal use + + ! Local variables + logical, allocatable :: mask_use(:) + logical :: verbose_use + integer :: i, k, m + integer :: ncol + integer :: nlev + integer :: ncnst + character(len=*), parameter :: subname = 'HS94_SET_IC' + + allocate(mask_use(size(latvals))) + if (present(mask)) then + if (size(mask_use) /= size(mask)) then + call endrun('cnst_init_default: input, mask, is wrong size') + end if + mask_use = mask + else + mask_use = .true. + end if + + if (present(verbose)) then + verbose_use = verbose + else + verbose_use = .true. + end if + + ncol = size(latvals, 1) + nlev = -1 + if (present(U)) then + nlev = size(U, 2) + do k = 1, nlev + where(mask_use) + U(:,k) = 0.0_r8 + end where + end do + if(masterproc .and. verbose_use) then + write(iulog,*) ' U initialized by "',subname,'"' + end if + end if + + if (present(V)) then + nlev = size(V, 2) + do k = 1, nlev + where(mask_use) + V(:,k) = 0.0_r8 + end where + end do + if(masterproc .and. verbose_use) then + write(iulog,*) ' V initialized by "',subname,'"' + end if + end if + + if (present(T)) then + nlev = size(T, 2) + do k = 1, nlev + where(mask_use) + T(:,k) = 250.0_r8 + end where + end do + if(masterproc .and. verbose_use) then + write(iulog,*) ' T initialized by "',subname,'"' + end if + end if + + if (present(PS)) then + where(mask_use) + PS = 100000.0_r8 + end where + if(masterproc .and. verbose_use) then + write(iulog,*) ' PS initialized by "',subname,'"' + end if + end if + + if (present(PHIS)) then + PHIS = 0.0_r8 + if(masterproc .and. verbose_use) then + write(iulog,*) ' PHIS initialized by "',subname,'"' + end if + end if + + if (present(Q)) then + nlev = size(Q, 2) + ncnst = size(m_cnst, 1) + do m = 1, ncnst + if (m_cnst(m) == 1) then + ! No water vapor in Held-Suarez + do k = 1, nlev + where(mask_use) + Q(:,k,m_cnst(m)) = 0.0_r8 + end where + end do +!Un-comment once constituents are working in CAMDEN -JN: +#if 0 + if(masterproc .and. verbose_use) then + write(iulog,*) ' ', trim(cnst_name(m_cnst(m))), ' initialized by "',subname,'"' + end if + + else + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m_cnst(m)),& + mask=mask_use, verbose=verbose_use, notfound=.false.) +#else + else + !Initialize cloud liquid and rain until constituent routines are enabled: + Q(:,:,ix_cld_liq) = 0.0_r8 + Q(:,:,ix_rain) = 0.0_r8 +#endif + end if + end do + end if + + deallocate(mask_use) + + end subroutine hs94_set_ic + +end module ic_held_suarez diff --git a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 new file mode 100644 index 00000000..45b6a27b --- /dev/null +++ b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 @@ -0,0 +1,186 @@ +module ic_us_standard_atmosphere + +!------------------------------------------------------------------------------- +! +! Set analytic initial conditions to be static (u=v=0) with temperature profile +! from the US standard atmosphere. +! +!------------------------------------------------------------------------------- + +use shr_kind_mod, only: r8 => shr_kind_r8 +use spmd_utils, only: masterproc + +use hycoef, only: ps0, hyam, hybm +use physconst, only: gravit +!use constituents, only: cnst_name +!use const_init, only: cnst_init_default + +use std_atm_profile, only: std_atm_pres, std_atm_height, std_atm_temp + +use cam_logfile, only: iulog +use cam_abortutils, only: endrun + +implicit none +private +save + +public :: us_std_atm_set_ic + +!========================================================================================= +CONTAINS +!========================================================================================= + +subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & + Q, m_cnst, mask, verbose) + + !---------------------------------------------------------------------------- + ! + ! Set initial values for static atmosphere with vertical profile from US + ! Standard Atmosphere. + ! + !---------------------------------------------------------------------------- + + ! Arguments + real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) + real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity + real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity + real(r8), optional, intent(inout) :: T(:,:) ! temperature + real(r8), optional, intent(inout) :: PS(:) ! surface pressure + real(r8), optional, intent(in) :: PHIS(:) ! surface geopotential + real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) + integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) + logical, optional, intent(in) :: mask(:) ! Only init where .true. + logical, optional, intent(in) :: verbose ! For internal use + + ! Local variables + logical, allocatable :: mask_use(:) + logical :: verbose_use + integer :: i, k, m + integer :: ncol + integer :: nlev + integer :: ncnst + character(len=*), parameter :: subname = 'us_std_atm_set_ic' + real(r8) :: psurf(1) + real(r8), allocatable :: pmid(:), zmid(:) + !---------------------------------------------------------------------------- + + ncol = size(latvals, 1) + allocate(mask_use(ncol)) + if (present(mask)) then + if (size(mask_use) /= size(mask)) then + call endrun(subname//': input, mask, is wrong size') + end if + mask_use = mask + else + mask_use = .true. + end if + + if (present(verbose)) then + verbose_use = verbose + else + verbose_use = .true. + end if + + nlev = -1 + if (present(U)) then + nlev = size(U, 2) + do k = 1, nlev + where(mask_use) + U(:,k) = 0.0_r8 + end where + end do + if(masterproc .and. verbose_use) then + write(iulog,*) ' U initialized by '//subname + end if + end if + + if (present(V)) then + nlev = size(V, 2) + do k = 1, nlev + where(mask_use) + V(:,k) = 0.0_r8 + end where + end do + if(masterproc .and. verbose_use) then + write(iulog,*) ' V initialized by '//subname + end if + end if + + if (present(T)) then + if (.not.present(PHIS)) then + call endrun(subname//': PHIS must be specified to initiallize T') + end if + nlev = size(T, 2) + allocate(pmid(nlev), zmid(nlev)) + do i = 1, ncol + if (mask_use(i)) then + ! get surface pressure + call std_atm_pres(PHIS(i:i)/gravit, psurf) + ! get pressure levels + do k = 1, nlev + pmid(k) = hyam(k)*ps0 + hybm(k)*psurf(1) + end do + ! get height of pressure level + call std_atm_height(pmid, zmid) + ! given height get temperature + call std_atm_temp(zmid, T(i,:)) + end if + end do + deallocate(pmid, zmid) + + if(masterproc .and. verbose_use) then + write(iulog,*) ' T initialized by "',subname,'"' + end if + end if + + if (present(PS)) then + if (.not.present(PHIS)) then + call endrun(subname//': PHIS must be specified to initiallize PS') + end if + + do i = 1, ncol + if (mask_use(i)) then + call std_atm_pres(PHIS(i:i)/gravit, PS(i:i)) + end if + end do + if(masterproc .and. verbose_use) then + write(iulog,*) ' PS initialized by "',subname,'"' + end if + end if + + if (present(Q)) then + nlev = size(Q, 2) + ncnst = size(m_cnst, 1) + do m = 1, ncnst + if (m_cnst(m) == 1) then + ! No water vapor in profile + do k = 1, nlev + where(mask_use) + Q(:,k,m_cnst(m)) = 0.0_r8 + end where + end do +!Un-comment once constituents are working in CAMDEN -JN: +#if 0 + if(masterproc .and. verbose_use) then + write(iulog,*) ' ', trim(cnst_name(m_cnst(m))), ' initialized by '//subname + end if + else + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m_cnst(m)),& + mask=mask_use, verbose=verbose_use, notfound=.false.) +#else + else + !Initialize cloud liquid and rain until constituent routines are enabled: + Q(:,:,m_cnst(m)) = 0.0_r8 +#endif + end if + end do + end if + + deallocate(mask_use) + +end subroutine us_std_atm_set_ic + +!========================================================================================= + +end module ic_us_standard_atmosphere diff --git a/src/physics/utils/physics_column_type.F90 b/src/physics/utils/physics_column_type.F90 index 697e8248..b040d8f2 100644 --- a/src/physics/utils/physics_column_type.F90 +++ b/src/physics/utils/physics_column_type.F90 @@ -1,8 +1,7 @@ module physics_column_type use shr_kind_mod, only: r8 => shr_kind_r8 - use ccpp_kinds, only: kind_phys - + use ccpp_kinds, only: kind_pcol => kind_phys implicit none private @@ -14,17 +13,23 @@ module physics_column_type module procedure copy_phys_col end interface + !Physics column kind: + public kind_pcol + + !physics column fill value: + real(kind_pcol), parameter :: pcol_fill_val = -1e36_kind_pcol + !> \section arg_table_physics_column_t Argument Table !! \htmlinclude physics_column_t.html type, public :: physics_column_t ! A type to hold all grid and task information for a single physics column ! Column information - real(kind_phys) :: lat_rad = -HUGE(1.0_r8) ! Latitude in radians - real(kind_phys) :: lon_rad = -HUGE(1.0_r8) ! Longitude in radians - real(kind_phys) :: lat_deg = -HUGE(1.0_r8) ! Latitude in degrees - real(kind_phys) :: lon_deg = -HUGE(1.0_r8) ! Longitude in degrees - real(kind_phys) :: area = -1.0_r8 ! Column area - real(kind_phys) :: weight = -1.0_r8 ! Column integration weight + real(kind_pcol) :: lat_rad = pcol_fill_val ! Latitude in radians + real(kind_pcol) :: lon_rad = pcol_fill_val ! Longitude in radians + real(kind_pcol) :: lat_deg = pcol_fill_val ! Latitude in degrees + real(kind_pcol) :: lon_deg = pcol_fill_val ! Longitude in degrees + real(kind_pcol) :: area = pcol_fill_val ! Column area + real(kind_pcol) :: weight = pcol_fill_val ! Column integration weight ! File decomposition integer :: global_col_num = -1 ! Location on data file integer :: coord_indices(2) = -1 ! Global lon/lat (if used) diff --git a/src/physics/utils/physics_grid.F90 b/src/physics/utils/physics_grid.F90 index 8211456f..5ccbefc3 100644 --- a/src/physics/utils/physics_grid.F90 +++ b/src/physics/utils/physics_grid.F90 @@ -20,6 +20,7 @@ module physics_grid public :: get_area_p ! area of a physics column in radians squared public :: get_rlat_all_p ! latitudes of physics cols on task (radians) public :: get_rlon_all_p ! longitudes of physics cols on task (radians) + public :: get_dyn_col_p ! dynamics local blk number and blk offset(s) public :: global_index_p ! global column index of a physics column public :: local_index_p ! local column index of a physics column public :: get_grid_dims ! return grid dimensions @@ -516,6 +517,44 @@ end subroutine get_rlon_all_p !======================================================================== + subroutine get_dyn_col_p(index, blk_num, blk_ind) + use cam_logfile, only: iulog + use cam_abortutils, only: endrun + ! Return the dynamics local block number and block offset(s) for + ! the physics column indicated by . + + ! Dummy arguments + integer, intent(in) :: index ! index of local physics column + integer, intent(out) :: blk_num ! Local dynamics block index + integer, intent(out) :: blk_ind(:) ! Local dynamics block offset(s) + ! Local variables + integer :: off_size + character(len=128) :: errmsg + character(len=*), parameter :: subname = 'get_dyn_col_p_index: ' + + if (.not. phys_grid_initialized()) then + call endrun(subname//'physics grid not initialized') + else if ((index < 1) .or. (index > columns_on_task)) then + write(errmsg, '(a,2(a,i0))') subname, 'index (', index, & + ') out of range (1 to ', columns_on_task + write(iulog, *) trim(errmsg) + call endrun(trim(errmsg)) + else + off_size = SIZE(phys_columns(index)%dyn_block_index, 1) + if (SIZE(blk_ind, 1) < off_size) then + call endrun(subname//'blk_ind too small') + end if + blk_num = phys_columns(index)%local_dyn_block + blk_ind(1:off_size) = phys_columns(index)%dyn_block_index(1:off_size) + if (SIZE(blk_ind, 1) > off_size) then + blk_ind(off_size+1:) = -1 + end if + end if + + end subroutine get_dyn_col_p + + !======================================================================== + integer function global_index_p(index) use cam_logfile, only: iulog use cam_abortutils, only: endrun diff --git a/src/utils/hycoef.F90 b/src/utils/hycoef.F90 new file mode 100644 index 00000000..670ed2bd --- /dev/null +++ b/src/utils/hycoef.F90 @@ -0,0 +1,403 @@ +module hycoef + +use shr_kind_mod, only: r8 => shr_kind_r8 +use spmd_utils, only: masterproc +use pmgrid, only: plev, plevp +use cam_logfile, only: iulog +use cam_abortutils, only: endrun +use pio, only: file_desc_t, var_desc_t, & + pio_inq_dimid, pio_inq_dimlen, pio_inq_varid, & + pio_double, pio_def_dim, pio_def_var, & + pio_put_var, pio_get_var, & + pio_seterrorhandling, PIO_BCAST_ERROR, PIO_NOERR + +implicit none +private +save + +!----------------------------------------------------------------------- +! +! Purpose: Hybrid level definitions: p = a*p0 + b*ps +! interfaces p(k) = hyai(k)*ps0 + hybi(k)*ps +! midpoints p(k) = hyam(k)*ps0 + hybm(k)*ps +! +!----------------------------------------------------------------------- + +real(r8), public, target :: hyai(plevp) ! ps0 component of hybrid coordinate - interfaces +real(r8), public, target :: hyam(plev) ! ps0 component of hybrid coordinate - midpoints +real(r8), public, target :: hybi(plevp) ! ps component of hybrid coordinate - interfaces +real(r8), public, target :: hybm(plev) ! ps component of hybrid coordinate - midpoints + +real(r8), public :: etamid(plev) ! hybrid coordinate - midpoints + +real(r8), public :: hybd(plev) ! difference in b (hybi) across layers +real(r8), public :: hypi(plevp) ! reference pressures at interfaces +real(r8), public :: hypm(plev) ! reference pressures at midpoints +real(r8), public :: hypd(plev) ! reference pressure layer thickness +#ifdef planet_mars +real(r8), public, protected :: ps0 = 6.0e1_r8 ! Base state surface pressure (pascals) +real(r8), public, protected :: psr = 6.0e1_r8 ! Reference surface pressure (pascals) +#else +real(r8), public, protected :: ps0 = 1.0e5_r8 ! Base state surface pressure (pascals) +real(r8), public, protected :: psr = 1.0e5_r8 ! Reference surface pressure (pascals) +#endif +real(r8), target :: alev(plev) ! level values (pascals) for 'lev' coord +real(r8), target :: ailev(plevp) ! interface level values for 'ilev' coord + +integer, public :: nprlev ! number of pure pressure levels at top + +public hycoef_init + +type(var_desc_t) :: hyam_desc, hyai_desc, hybm_desc, hybi_desc, p0_desc +public init_restart_hycoef, write_restart_hycoef + +!======================================================================= +contains +!======================================================================= + +subroutine hycoef_init(file, psdry) + + !use cam_history_support, only: add_hist_coord, add_vert_coord, formula_terms_t + + !----------------------------------------------------------------------- + ! + ! Purpose: + ! Defines the locations of model interfaces from input data in the + ! hybrid coordinate scheme. Actual pressure values of model level + ! interfaces are determined elsewhere from the fields set here. + ! + ! Method: + ! the following fields are set: + ! hyai fraction of reference pressure used for interface pressures + ! hyam fraction of reference pressure used for midpoint pressures + ! hybi fraction of surface pressure used for interface pressures + ! hybm fraction of surface pressure used for midpoint pressures + ! hybd difference of hybi's + ! hypi reference state interface pressures + ! hypm reference state midpoint pressures + ! hypd reference state layer thicknesses + ! hypdln reference state layer thicknesses (log p) + ! hyalph distance from interface to level (used in integrals) + ! prsfac log pressure extrapolation factor (used to compute psl) + ! + ! Author: B. Boville + ! + !----------------------------------------------------------------------- + + ! arguments + type(file_desc_t), intent(inout) :: file + logical, optional, intent(in) :: psdry ! set true when coordinate is based + ! on dry surface pressure + + ! local variables + integer :: k ! Level index + logical :: dry_coord + real(r8) :: amean, bmean, atest, btest, eps +! type(formula_terms_t) :: formula_terms ! For the 'lev' and 'ilev' coords + !----------------------------------------------------------------------- + + ! check for dry pressure coordinate (default is moist) + dry_coord = .false. + if (present(psdry)) dry_coord = psdry + + ! read hybrid coeficients + call hycoef_read(file) + + ! Set layer locations + nprlev = 0 + do k=1,plev + + ! Interfaces. Set nprlev to the interface above, the first time a + ! nonzero surface pressure contribution is found. "nprlev" + ! identifies the lowest pure pressure interface. + + if (nprlev==0 .and. hybi(k).ne.0.0_r8) nprlev = k - 1 + end do + + ! Set nprlev if no nonzero b's have been found. All interfaces are + ! pure pressure. A pure pressure model requires other changes as well. + if (nprlev==0) nprlev = plev + 2 + + ! Set delta sigma part of layer thickness and reference state midpoint + ! pressures + do k=1,plev + hybd(k) = hybi(k+1) - hybi(k) + hypm(k) = hyam(k)*ps0 + hybm(k)*psr + etamid(k) = hyam(k) + hybm(k) + end do + + ! Reference state interface pressures + do k=1,plevp + hypi(k) = hyai(k)*ps0 + hybi(k)*psr + end do + + ! Reference state layer thicknesses + do k=1,plev + hypd(k) = hypi(k+1) - hypi(k) + end do + + ! Test that A's and B's at full levels are arithmetic means of A's and + ! B's at interfaces + eps = 1.e-05_r8 + do k = 1,plev + amean = ( hyai(k+1) + hyai(k) )*0.5_r8 + bmean = ( hybi(k+1) + hybi(k) )*0.5_r8 + if(amean == 0._r8 .and. hyam(k) == 0._r8) then + atest = 0._r8 + else + atest = abs( amean - hyam(k) )/ ( 0.5_r8*( abs(amean + hyam(k)) ) ) + endif + if(bmean == 0._r8 .and. hybm(k) == 0._r8) then + btest = 0._r8 + else + btest = abs( bmean - hybm(k) )/ ( 0.5_r8*( abs(bmean + hybm(k)) ) ) + endif + if (atest > eps) then + if (masterproc) then + write(iulog,9850) + write(iulog,*)'k,atest,eps=',k,atest,eps + end if + end if + + if (btest > eps) then + if (masterproc) then + write(iulog,9850) + write(iulog,*)'k,btest,eps=',k,btest,eps + end if + end if + end do + + ! Add the information for the 'lev' and 'ilev' mdim history coordinates + ! + ! The hybrid coordinate used by the SE dycore is based on a dry surface + ! pressure. Hence it is the dry pressure rather than actual pressure + ! that is computed by the formula_terms attribute. This coordinate is + ! not described by the formula + ! atmosphere_hybrid_sigma_pressure_coordinate since the formula + ! associated with that name uses actual pressure values. Furthermore, + ! the actual pressure field cannot be reconstructed from the hybrid + ! coefficients and the surface pressure field. Hence in the case of a + ! dry coordinate we add neither the standard_name nor the formula_terms + ! attributes to the lev and ilev coordinates. + + ! 0.01 converts Pascals to millibars + alev(:plev) = 0.01_r8*ps0*(hyam(:plev) + hybm(:plev)) + ailev(:plevp) = 0.01_r8*ps0*(hyai(:plevp) + hybi(:plevp)) + +!Undo once history output has been developed -JN: +#if 0 + + if (dry_coord) then + call add_vert_coord('lev', plev, & + 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & + positive='down') + call add_hist_coord('hyam', plev, & + 'hybrid A coefficient at layer midpoints', '1', hyam, dimname='lev') + call add_hist_coord('hybm', plev, & + 'hybrid B coefficient at layer midpoints', '1', hybm, dimname='lev') + else + + formula_terms%a_name = 'hyam' + formula_terms%a_long_name = 'hybrid A coefficient at layer midpoints' + formula_terms%a_values => hyam + formula_terms%b_name = 'hybm' + formula_terms%b_long_name = 'hybrid B coefficient at layer midpoints' + formula_terms%b_values => hybm + formula_terms%p0_name = 'P0' + formula_terms%p0_long_name = 'reference pressure' + formula_terms%p0_units = 'Pa' + formula_terms%p0_value = ps0 + formula_terms%ps_name = 'PS' + + call add_vert_coord('lev', plev, & + 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & + positive='down', & + standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & + formula_terms=formula_terms) + end if + + if (dry_coord) then + call add_vert_coord('ilev', plevp, & + 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & + positive='down') + call add_hist_coord('hyai', plevp, & + 'hybrid A coefficient at layer interfaces', '1', hyai, dimname='ilev') + call add_hist_coord('hybi', plevp, & + 'hybrid B coefficient at layer interfaces', '1', hybi, dimname='ilev') + else + formula_terms%a_name = 'hyai' + formula_terms%a_long_name = 'hybrid A coefficient at layer interfaces' + formula_terms%a_values => hyai + formula_terms%b_name = 'hybi' + formula_terms%b_long_name = 'hybrid B coefficient at layer interfaces' + formula_terms%b_values => hybi + formula_terms%p0_name = 'P0' + formula_terms%p0_long_name = 'reference pressure' + formula_terms%p0_units = 'Pa' + formula_terms%p0_value = ps0 + formula_terms%ps_name = 'PS' + + call add_vert_coord('ilev', plevp, & + 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & + positive='down', & + standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & + formula_terms=formula_terms) + end if + +!Undo once history output has been developed -JN: +#endif + + if (masterproc) then + write(iulog,'(a)')' Layer Locations (*1000) ' + do k=1,plev + write(iulog,9800)k,hyai(k),hybi(k),hyai(k)+hybi(k) + write(iulog,9810) hyam(k), hybm(k), hyam(k)+hybm(k) + end do + + write(iulog,9800)plevp,hyai(plevp),hybi(plevp),hyai(plevp)+hybi(plevp) + write(iulog,9820) + do k=1,plev + write(iulog,9830) k, hypi(k) + write(iulog,9840) hypm(k), hypd(k) + end do + write(iulog,9830) plevp, hypi(plevp) + end if + +9800 format( 1x, i3, 3p, 3(f10.4,10x) ) +9810 format( 1x, 3x, 3p, 3(10x,f10.4) ) +9820 format(1x,'reference pressures (Pa)') +9830 format(1x,i3,f15.4) +9840 format(1x,3x,15x,2f15.4) +9850 format('HYCOEF: A and/or B vertical level coefficients at full',/, & + ' levels are not the arithmetic mean of half-level values') + +end subroutine hycoef_init + +!======================================================================= + +subroutine init_restart_hycoef(File, vdimids) + + type(file_desc_t), intent(inout) :: File + integer, intent(out) :: vdimids(:) + + ! PIO traps errors internally, no need to check ierr + + integer :: ierr + + ierr = PIO_Def_Dim(File, 'lev', plev, vdimids(1)) + ierr = PIO_Def_Dim(File, 'ilev', plevp, vdimids(2)) + + ierr = pio_def_var(File, 'hyai', pio_double, vdimids(2:2), hyai_desc) + ierr = pio_def_var(File, 'hyam', pio_double, vdimids(1:1), hyam_desc) + ierr = pio_def_var(File, 'hybi', pio_double, vdimids(2:2), hybi_desc) + ierr = pio_def_var(File, 'hybm', pio_double, vdimids(1:1), hybm_desc) + + ierr = pio_def_var(File, 'P0', pio_double, p0_desc) + +end subroutine init_restart_hycoef + +!======================================================================= + +subroutine write_restart_hycoef(file) + + type(file_desc_t), intent(inout) :: File + + ! PIO traps errors internally, no need to check ierr + + integer :: ierr + + ierr = pio_put_var(File, hyai_desc, hyai) + ierr = pio_put_var(File, hyam_desc, hyam) + ierr = pio_put_var(File, hybi_desc, hybi) + ierr = pio_put_var(File, hybm_desc, hybm) + + ierr = pio_put_var(File, p0_desc, ps0) + +end subroutine write_restart_hycoef + +!======================================================================= + +subroutine hycoef_read(File) + + ! This code is used both for initial and restart reading. + + type(file_desc_t), intent(inout) :: File + + integer :: flev, filev, lev_dimid, ierr + integer :: pio_errtype + + type(var_desc_t) :: p0_desc + + character(len=*), parameter :: routine = 'hycoef_read' + !---------------------------------------------------------------------------- + + ! PIO traps errors internally, no need to check ierr + + ierr = PIO_Inq_DimID(File, 'lev', lev_dimid) + ierr = PIO_Inq_dimlen(File, lev_dimid, flev) + if (plev /= flev) then + write(iulog,*) routine//': ERROR: file lev does not match model. lev (file, model):',flev, plev + call endrun(routine//': ERROR: file lev does not match model.') + end if + + ierr = PIO_Inq_DimID(File, 'ilev', lev_dimid) + ierr = PIO_Inq_dimlen(File, lev_dimid, filev) + if (plevp /= filev) then + write(iulog,*) routine//':ERROR: file ilev does not match model plevp (file, model):',filev, plevp + call endrun(routine//':ERROR: file ilev does not match model.') + end if + + ierr = pio_inq_varid(File, 'hyai', hyai_desc) + ierr = pio_inq_varid(File, 'hyam', hyam_desc) + ierr = pio_inq_varid(File, 'hybi', hybi_desc) + ierr = pio_inq_varid(File, 'hybm', hybm_desc) + + ierr = pio_get_var(File, hyai_desc, hyai) + ierr = pio_get_var(File, hybi_desc, hybi) + ierr = pio_get_var(File, hyam_desc, hyam) + ierr = pio_get_var(File, hybm_desc, hybm) + + if (masterproc) then + write(iulog,*) routine//': read hyai, hybi, hyam, hybm' + end if + + ! Check whether file contains value for P0. If it does then use it + + ! Set PIO to return error codes. + call pio_seterrorhandling(file, PIO_BCAST_ERROR, pio_errtype) + + ierr = pio_inq_varid(file, 'P0', p0_desc) + if (ierr == PIO_NOERR) then + ierr = pio_get_var(file, p0_desc, ps0) + if (ierr /= PIO_NOERR) then + call endrun(routine//': reading P0.') + end if + psr = ps0 + + if (masterproc) then + write(iulog,*) routine//': read P0 value: ', ps0 + end if + + end if + + ! Put the error handling back the way it was + call pio_seterrorhandling(file, pio_errtype) + +#if ( defined OFFLINE_DYN ) + ! make sure top interface is non zero for fv dycore + if (hyai(1) .eq. 0._r8) then + if (hybm(1) .ne. 0.0_r8) then + hyai(1) = hybm(1)*1.e-2_r8 + else if (hyam(1) .ne. 0.0_r8) then + hyai(1) = hyam(1)*1.e-2_r8 + else + call endrun('Not able to set hyai(1) to non-zero.') + end if + end if +#endif + +end subroutine hycoef_read + +!======================================================================= + +end module hycoef From 7e6a42dd28f3133575a36635ab86308e3b519fdb Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 13 Jan 2021 13:37:41 -0700 Subject: [PATCH 05/45] Update SE dycore code to head of ESCOMP/CAM cam_development branch. --- src/dynamics/se/dp_coupling.F90 | 25 +-- src/dynamics/se/dyn_comp.F90 | 132 ++++++++-------- src/dynamics/se/dyn_grid.F90 | 264 +++++++++++++++++++++++--------- src/utils/cam_field_read.F90 | 12 +- src/utils/cam_pio_utils.F90 | 107 ++++++++++++- 5 files changed, 373 insertions(+), 167 deletions(-) diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 5be7176e..7e2b592f 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -10,7 +10,7 @@ module dp_coupling use constituents, only: pcnst use spmd_dyn, only: local_dp_map -use spmd_utils, only: mpicom, iam +use spmd_utils, only: iam use dyn_grid, only: TimeLevel, edgebuf use dyn_comp, only: dyn_export_t, dyn_import_t @@ -21,7 +21,6 @@ module dp_coupling use dp_mapping, only: nphys_pts -use cam_logfile, only: iulog use perf_mod, only: t_startf, t_stopf, t_barrierf use cam_abortutils, only: endrun @@ -33,7 +32,6 @@ module dp_coupling use dof_mod, only: UniquePoints, PutUniquePoints use element_mod, only: element_t -use fvm_control_volume_mod, only: fvm_struct implicit none private @@ -91,18 +89,13 @@ subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out) !real (kind=r8), allocatable :: frontgf_phys(:,:,:) !real (kind=r8), allocatable :: frontga_phys(:,:,:) - integer :: ncols,i,j,ierr,k,iv + integer :: ncols,ierr integer :: col_ind, blk_ind(1), m, m_cnst - integer :: tsize ! amount of data per grid point passed to physics - integer, allocatable :: bpter(:,:) ! offsets into block buffer for packing data - integer :: cpter(pcols,0:pver) ! offsets into chunk buffer for unpacking data integer :: nphys - real(r8), allocatable :: bbuffer(:), cbuffer(:) ! transpose buffers real(r8), allocatable :: qgll(:,:,:,:) real(r8) :: inv_dp3d(np,np,nlev) integer :: tl_f, tl_qdp_np0, tl_qdp_np1 - logical :: lmono !---------------------------------------------------------------------------- if (.not. local_dp_map) then @@ -575,9 +568,8 @@ subroutine derived_phys_dry(phys_state, phys_tend) use phys_control, only: waccmx_is use geopotential_t, only: geopotential_t ! use check_energy, only: check_energy_timestep_init - use hycoef, only: hyai, hybi, ps0 + use hycoef, only: hyai, ps0 use shr_vmath_mod, only: shr_vmath_log - use gmean_mod, only: gmean ! use qneg_module, only: qneg3 use dyn_comp, only: ixo, ixo2, ixh, ixh2 @@ -586,14 +578,6 @@ subroutine derived_phys_dry(phys_state, phys_tend) type(physics_tend ), intent(inout) :: phys_tend ! local variables - real(r8) :: qbot ! bottom level q before change - real(r8) :: qbotm1 ! bottom-1 level q before change - real(r8) :: dqreq ! q change at pver-1 required to remove q 0') end if @@ -1245,13 +1248,13 @@ subroutine read_inidat(dyn_in) ! Set mask to indicate which columns are active nullify(ldof) - call cam_grid_get_gcid(cam_grid_id('GLL'), ldof) + call cam_grid_get_gcid(cam_grid_id((ini_grid_name), ldof) allocate(pmask(npsq*nelemd)) pmask(:) = (ldof /= 0) ! lat/lon needed in radians - latvals_deg => cam_grid_get_latvals(cam_grid_id('GLL')) - lonvals_deg => cam_grid_get_lonvals(cam_grid_id('GLL')) + latvals_deg => cam_grid_get_latvals(cam_grid_id(ini_grid_name)) + lonvals_deg => cam_grid_get_lonvals(cam_grid_id(ini_grid_name)) allocate(latvals(np*np*nelemd)) allocate(lonvals(np*np*nelemd)) latvals(:) = latvals_deg(:)*deg2rad @@ -1262,7 +1265,7 @@ subroutine read_inidat(dyn_in) ! The grid name is defined in dyn_grid::define_cam_grids. ! Get the number of columns in the global GLL grid. - call cam_grid_dimensions('GLL', dims) + call cam_grid_dimensions(ini_grid_name, dims) dyn_cols = dims(1) ! Set ICs. Either from analytic expressions or read from file. @@ -2075,6 +2078,9 @@ end subroutine set_phis subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) + ! This routine is only called when data will be read from the initial file. It is not + ! called when the initial file is only supplying vertical coordinate info. + type(file_desc_t), pointer :: file type(element_t), pointer :: elem(:) integer, intent(in) :: dyn_cols @@ -2095,22 +2101,15 @@ subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) !---------------------------------------------------------------------------- ! Check that number of columns in IC file matches grid definition. - ! The dimension of the unstructured grid in the IC file can either be 'ncol' - ! or 'ncol_d'. Check for ncol_d first since if a file contains distinct GLL - ! and physics grids the GLL grid will use dimension ncol_d. - ierr = pio_inq_dimid(file, 'ncol_d', ncol_did) + + call cam_grid_get_dim_names(cam_grid_id(ini_grid_name), dimname, dimname2) + + ierr = pio_inq_dimid(file, trim(dimname), ncol_did) if (ierr /= PIO_NOERR) then - if (dyn_ok) then - ierr = pio_inq_dimid(file, 'ncol', ncol_did) - if (ierr /= PIO_NOERR) then - call endrun(subname//': ERROR: neither ncol nor ncol_d dimension found in ' & - //trim(file_desc)//' file') - end if - else - call endrun(trim(subname)//': ERROR: ncol dimension not found in '//trim(file_desc) & - //' file') - end if + call endrun(subname//': ERROR: either ncol or ncol_d dimension not found in ' & + //trim(file_desc)//' file') end if + ierr = pio_inq_dimlen(file, ncol_did, ncol_size) if (ncol_size /= dyn_cols) then if (masterproc) then @@ -2120,36 +2119,31 @@ subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) call endrun(subname//': ERROR: dimension ncol size not same as in ncdata file') end if - ! The dimname that's passed to the read_dyn_var routines must match the - ! dimname that's in the GLL grid object definition. The mapping info used by - ! pio is constructed using the grid object. So this dimname is not necessarily - ! the one in the IC (or topo) file. - grid_id = cam_grid_id('GLL') - call cam_grid_get_dim_names(grid_id, dimname, dimname2) - - ! If coordinates come from an initial file containing only the GLL grid then the - ! the variable names will be lat/lon. On the other hand if the file contains both - ! GLL and a distinct physics grid, then the variable names will be lat_d/lon_d. - ! Check whether lat_d/lon_d are present and use them if they are. Otherwise use - ! lat/lon. - if (dyn_field_exists(file, 'lat_d', required=.false.)) then - coordname = 'lat_d' - else + ! Set coordinate name associated with dimname. + if (dimname == 'ncol') then coordname = 'lat' + else + coordname = 'lat_d' end if - !! Check to make sure file is in correct order + !! Check to make sure file is in correct order call read_dyn_var(coordname, file, dimname, dbuf2) found = .true. do ie = 1, nelemd indx = 1 do j = 1, np do i = 1, np - if ((abs(dbuf2(indx,ie)) > 1.e-12_r8) .and. & - (abs((elem(ie)%spherep(i,j)%lat*rad2deg - dbuf2(indx,ie))/dbuf2(indx,ie)) > 1.0e-10_r8)) then - write(iulog, *) 'XXG ',iam,') ',ie,i,j,elem(ie)%spherep(i,j)%lat,dbuf2(indx,ie)*deg2rad - call shr_sys_flush(iulog) - found = .false. + if (abs(dbuf2(indx,ie)) > 1.e-12_r8) then + if (abs((elem(ie)%spherep(i,j)%lat*rad2deg - dbuf2(indx,ie)) / & + dbuf2(indx,ie)) > 1.0e-10_r8) then + write(iulog, '(2a,4(i0,a),f11.5,a,f11.5)') & + "ncdata file latitudes not in correct column order", & + ' on task ', iam, ': elem(', ie, ')%spherep(', i, & + ', ', j, ')%lat = ', elem(ie)%spherep(i,j)%lat, & + ' /= ', dbuf2(indx, ie)*deg2rad + call shr_sys_flush(iulog) + found = .false. + end if end if indx = indx + 1 end do @@ -2159,10 +2153,10 @@ subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) call endrun("ncdata file latitudes not in correct column order") end if - if (dyn_field_exists(file, 'lon_d', required=.false.)) then - coordname = 'lon_d' - else + if (dimname == 'ncol') then coordname = 'lon' + else + coordname = 'lon_d' end if call read_dyn_var(coordname, file, dimname, dbuf2) @@ -2170,11 +2164,17 @@ subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) indx = 1 do j = 1, np do i = 1, np - if ((abs(dbuf2(indx,ie)) > 1.e-12_r8) .and. & - (abs((elem(ie)%spherep(i,j)%lon*rad2deg - dbuf2(indx,ie))/dbuf2(indx,ie)) > 1.0e-10_r8)) then - write(iulog, *) 'XXG ',iam,') ',ie,i,j,elem(ie)%spherep(i,j)%lon,dbuf2(indx,ie)*deg2rad - call shr_sys_flush(iulog) - found = .false. + if (abs(dbuf2(indx,ie)) > 1.e-12_r8) then + if (abs((elem(ie)%spherep(i,j)%lon*rad2deg - dbuf2(indx,ie)) / & + dbuf2(indx,ie)) > 1.0e-10_r8) then + write(iulog, '(2a,4(i0,a),f11.5,a,f11.5)') & + "ncdata file longitudes not in correct column order", & + ' on task ', iam, ': elem(', ie, ')%spherep(', i, & + ', ', j, ')%lon = ', elem(ie)%spherep(i,j)%lon, & + ' /= ', dbuf2(indx, ie)*deg2rad + call shr_sys_flush(iulog) + found = .false. + end if end if indx = indx + 1 end do @@ -2183,6 +2183,7 @@ subroutine check_file_layout(file, elem, dyn_cols, file_desc, dyn_ok, dimname) if (.not. found) then call endrun("ncdata file longitudes not in correct column order") end if + end subroutine check_file_layout !======================================================================================== @@ -2235,20 +2236,23 @@ subroutine read_dyn_field_2d(fieldname, fh, dimname, buffer) ! Local variables logical :: found + real(r8) :: fillvalue !---------------------------------------------------------------------------- buffer = 0.0_r8 ! call infld(trim(fieldname), fh, dimname, 1, npsq, 1, nelemd, buffer, & ! found, gridname='GLL') !Remove if below works! -JN - call cam_read_field(trim(fieldname), fh, buffer, found, gridname='GLL') + call cam_read_field(trim(fieldname), fh, buffer, found, & + gridname=ini_grid_name, fillvalue=fillvalue) if(.not. found) then call endrun('READ_DYN_FIELD_2D: Could not find '//trim(fieldname)//' field on input datafile') end if ! This code allows use of compiler option to set uninitialized values - ! to NaN. In that case infld can return NaNs where the element GLL points - ! are not "unique columns" - where (shr_infnan_isnan(buffer)) buffer = 0.0_r8 + ! to NaN. In that case cam_read_feild can return NaNs where the element + ! GLL points are not "unique columns". + ! Set NaNs or fillvalue points to zero: + where (shr_infnan_isnan(buffer) .or. (buffer==fillvalue)) buffer = 0.0_r8 end subroutine read_dyn_field_2d @@ -2264,21 +2268,23 @@ subroutine read_dyn_field_3d(fieldname, fh, dimname, buffer) ! Local variables logical :: found + real(r8) :: fillvalue !---------------------------------------------------------------------------- buffer = 0.0_r8 ! call infld(trim(fieldname), fh, dimname, 'lev', 1, npsq, 1, nlev, & ! 1, nelemd, buffer, found, gridname='GLL') !Remove if below works! -JN call cam_read_field(trim(fieldname), fh, buffer, found, 'lev', (/1, nlev/), & - dim3_pos=2, gridname='GLL') + dim3_pos=2, gridname=ini_gird_name, fillvalue=fillvalue) if(.not. found) then call endrun('READ_DYN_FIELD_3D: Could not find '//trim(fieldname)//' field on input datafile') end if ! This code allows use of compiler option to set uninitialized values - ! to NaN. In that case infld can return NaNs where the element GLL points - ! are not "unique columns" - where (shr_infnan_isnan(buffer)) buffer = 0.0_r8 + ! to NaN. In that case infld can return NaNs where the element GLL + ! points are not "unique columns". + ! Set NaNs or fillvalue points to zero: + where (shr_infnan_isnan(buffer) .or. (buffer==fillvalue) buffer = 0.0_r8 end subroutine read_dyn_field_3d diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 87ef19f4..dd682883 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -57,6 +57,10 @@ module dyn_grid integer, parameter :: dyn_decomp = 101 ! The SE dynamics grid integer, parameter :: fvm_decomp = 102 ! The FVM (CSLAM) grid integer, parameter :: physgrid_d = 103 ! physics grid on dynamics decomp +integer, parameter :: ini_decomp = 104 ! alternate dynamics grid for reading initial file + +character(len=3), protected :: ini_grid_name + integer, parameter :: ptimelevels = 2 type (TimeLevel_t) :: TimeLevel ! main time level struct (used by tracers) @@ -64,13 +68,14 @@ module dyn_grid type(element_t), pointer :: elem(:) => null() ! local GLL elements for this task type(fvm_struct), pointer :: fvm(:) => null() ! local FVM elements for this task -public :: & - dyn_decomp, & - ptimelevels, & - TimeLevel, & - hvcoord, & - elem, & - fvm, & +public :: & + dyn_decomp, & + ini_grid_name, & + ptimelevels, & + TimeLevel, & + hvcoord, & + elem, & + fvm, & edgebuf public :: dyn_grid_init @@ -90,6 +95,9 @@ module dyn_grid character(len=shr_kind_cl), public :: se_grid_filename = '' logical, public :: se_write_gll_corners = .false. +! Name of horizontal grid dimension in initial file. +character(len=6) :: ini_grid_hdim_name = ' ' + type block_global_data integer :: UniquePtOffset ! global index of first column in element integer :: NumUniqueP ! number of unique columns in element @@ -97,7 +105,7 @@ module dyn_grid integer :: Owner ! task id of element owner end type block_global_data -type(physics_column_t), pointer :: local_dyn_columns(:) => NULL() +type(physics_column_t), allocatable, target :: local_dyn_columns(:) ! number of global dynamics columns. Set by SE dycore init. integer :: ngcols_d = 0 @@ -215,6 +223,9 @@ subroutine dyn_grid_init() ! initial SE (subcycled) nstep TimeLevel%nstep0 = 0 + ! determine whether initial file uses 'ncol' or 'ncol_d' + call get_hdim_name(fh_ini, ini_grid_hdim_name) + ! Define the dynamics and physics grids on the dynamics decompostion. ! Physics grid on the physics decomposition is defined in phys_grid_init. call define_cam_grids() @@ -270,100 +281,133 @@ end subroutine dyn_grid_init !============================================================================== subroutine get_dyn_grid_info(hdim1_d, hdim2_d, num_lev, & - dycore_name, index_model_top_layer, index_surface_layer, dyn_columns) - use physconst, only: pi - use cam_abortutils, only: endrun - use spmd_utils, only: iam + index_model_top_layer, index_surface_layer, unstructured, dyn_columns) + + use physconst, only: pi + use cam_abortutils, only: endrun + use spmd_utils, only: iam + + !SE dycore: + use coordinate_systems_mod, only: spherical_polar_t + ! Dummy arguments integer, intent(out) :: hdim1_d ! # longitudes or grid size integer, intent(out) :: hdim2_d ! # latitudes or 1 integer, intent(out) :: num_lev ! # levels - character(len=*), intent(out) :: dycore_name integer, intent(out) :: index_model_top_layer integer, intent(out) :: index_surface_layer - type(physics_column_t), pointer :: dyn_columns(:) ! Phys col in Dyn decomp + logical, intent(out) :: unstructured + ! dyn_columns will contain a copy of the physics column info local to this + ! dynamics task + type(physics_column_t), allocatable, intent(out) :: dyn_columns(:) ! Local variables integer :: lindex integer :: gindex integer :: elem_ind, col_ind, ii, jj integer :: num_local_cols + type(spherical_polar_t) :: coord + real(r8) :: dcoord real(kind_pcol), parameter :: radtodeg = 180.0_kind_pcol / pi real(kind_pcol), parameter :: degtorad = pi / 180.0_kind_pcol character(len=*), parameter :: subname = 'get_dyn_grid_info' - if (associated(dyn_columns)) then - call endrun(subname//': dyn_columns must be unassociated pointer') - end if + unstructured = .true. ! SE is an unstructured dycore + if (fv_nphys > 0) then ! physics uses an FVM grid - num_local_cols = nelemd * nc * nc + num_local_cols = nelemd * fv_nphys * fv_nphys else num_local_cols = 0 do elem_ind = 1, nelemd num_local_cols = num_local_cols + elem(elem_ind)%idxP%NumUniquePts end do end if - if (associated(local_dyn_columns)) then + if (allocated(local_dyn_columns)) then ! Check for correct number of columns if (size(local_dyn_columns) /= num_local_cols) then call endrun(subname//': called with inconsistent column numbers') end if else allocate(local_dyn_columns(num_local_cols)) - end if - dyn_columns => local_dyn_columns - hdim1_d = ngcols_d - hdim2_d = 1 - num_lev = nlev - dycore_name = 'SE' - index_model_top_layer = 1 - index_surface_layer = nlev - lindex = 0 - do elem_ind = 1, nelemd if (fv_nphys > 0) then ! physics uses an FVM grid - do col_ind = 0, (nc * nc) - 1 - ii = MOD(col_ind, nc) + 1 - jj = col_ind / nc - dyn_columns(lindex)%lat_rad = real(fvm(elem_ind)%center_cart(ii,jj)%lat, kind_pcol) - dyn_columns(lindex)%lat_deg = dyn_columns(lindex)%lat_rad * radtodeg - dyn_columns(lindex)%lon_rad = real(fvm(elem_ind)%center_cart(ii,jj)%lon, kind_pcol) - dyn_columns(lindex)%lon_deg = dyn_columns(lindex)%lon_rad * radtodeg - dyn_columns(lindex)%area = real(fvm(elem_ind)%area_sphere_physgrid(ii,jj), kind_pcol) - dyn_columns(lindex)%weight = dyn_columns(lindex)%area - ! File decomposition - gindex = ((elem(elem_ind)%GlobalId-1) * nc * nc) + col_ind - dyn_columns(lindex)%global_col_num = gindex - ! Note, coord_indices not used for unstructured dycores - ! Dynamics decomposition - dyn_columns(lindex)%dyn_task = iam - dyn_columns(lindex)%local_dyn_block = elem_ind - dyn_columns(lindex)%global_dyn_block = elem(elem_ind)%GlobalId - allocate(dyn_columns(lindex)%dyn_block_index(1)) - dyn_columns(lindex)%dyn_block_index(1) = col_ind - end do + hdim1_d = nelem * fv_nphys * fv_nphys else - do col_ind = 1, elem(elem_ind)%idxP%NumUniquePts - lindex = lindex + 1 - ii = elem(elem_ind)%idxP%ia(col_ind) - jj = elem(elem_ind)%idxP%ja(col_ind) - - dyn_columns(lindex)%lat_rad = real(elem(elem_ind)%spherep(ii,jj)%lat, kind_pcol) - dyn_columns(lindex)%lat_deg = dyn_columns(lindex)%lat_rad * radtodeg - dyn_columns(lindex)%lon_rad = real(elem(elem_ind)%spherep(ii,jj)%lon, kind_pcol) - dyn_columns(lindex)%lon_deg = dyn_columns(lindex)%lon_rad * radtodeg - dyn_columns(lindex)%area = real(1.0_kind_pcol / elem(elem_ind)%rspheremp(ii,jj), kind_pcol) - dyn_columns(lindex)%weight = dyn_columns(lindex)%area - ! File decomposition - gindex = elem(elem_ind)%idxP%UniquePtoffset + col_ind - 1 - dyn_columns(lindex)%global_col_num = gindex - ! Note, coord_indices not used for unstructured dycores - ! Dynamics decomposition - dyn_columns(lindex)%dyn_task = iam - dyn_columns(lindex)%local_dyn_block = elem_ind - dyn_columns(lindex)%global_dyn_block = elem(elem_ind)%GlobalId - allocate(dyn_columns(lindex)%dyn_block_index(1)) - dyn_columns(lindex)%dyn_block_index(1) = col_ind - end do + hdim1_d = ngcols_d end if + hdim2_d = 1 + num_lev = nlev + index_model_top_layer = 1 + index_surface_layer = nlev + lindex = 0 + do elem_ind = 1, nelemd + if (fv_nphys > 0) then ! physics uses an FVM grid + do col_ind = 0, (fv_nphys * fv_nphys) - 1 + lindex = lindex + 1 + ii = MOD(col_ind, fv_nphys) + 1 + jj = (col_ind / fv_nphys) + 1 + coord = fvm(elem_ind)%center_cart_physgrid(ii, jj) + local_dyn_columns(lindex)%lat_rad = coord%lat + dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg + local_dyn_columns(lindex)%lat_deg = dcoord + local_dyn_columns(lindex)%lon_rad = coord%lon + dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg + local_dyn_columns(lindex)%lon_deg = dcoord + local_dyn_columns(lindex)%area = & + fvm(elem_ind)%area_sphere_physgrid(ii,jj) + local_dyn_columns(lindex)%weight = & + local_dyn_columns(lindex)%area + ! File decomposition + gindex = ((elem(elem_ind)%GlobalId-1) * fv_nphys * fv_nphys) + & + col_ind + 1 + local_dyn_columns(lindex)%global_col_num = gindex + ! Note, coord_indices not used for unstructured dycores + ! Dynamics decomposition + local_dyn_columns(lindex)%dyn_task = iam + local_dyn_columns(lindex)%local_dyn_block = elem_ind + local_dyn_columns(lindex)%global_dyn_block = & + elem(elem_ind)%GlobalId + allocate(local_dyn_columns(lindex)%dyn_block_index(1)) + local_dyn_columns(lindex)%dyn_block_index(1) = col_ind + 1 + end do + else + do col_ind = 1, elem(elem_ind)%idxP%NumUniquePts + lindex = lindex + 1 + ii = elem(elem_ind)%idxP%ia(col_ind) + jj = elem(elem_ind)%idxP%ja(col_ind) + + dcoord = elem(elem_ind)%spherep(ii,jj)%lat + local_dyn_columns(lindex)%lat_rad = dcoord + dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg + local_dyn_columns(lindex)%lat_deg = dcoord + dcoord = elem(elem_ind)%spherep(ii,jj)%lon + local_dyn_columns(lindex)%lon_rad = dcoord + dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg + local_dyn_columns(lindex)%lon_deg = dcoord + local_dyn_columns(lindex)%area = & + 1.0_r8 / elem(elem_ind)%rspheremp(ii,jj) + local_dyn_columns(lindex)%weight = local_dyn_columns(lindex)%area + ! File decomposition + gindex = elem(elem_ind)%idxP%UniquePtoffset + col_ind - 1 + local_dyn_columns(lindex)%global_col_num = gindex + ! Note, coord_indices not used for unstructured dycores + ! Dynamics decomposition + local_dyn_columns(lindex)%dyn_task = iam + local_dyn_columns(lindex)%local_dyn_block = elem_ind + local_dyn_columns(lindex)%global_dyn_block = & + elem(elem_ind)%GlobalId + allocate(local_dyn_columns(lindex)%dyn_block_index(1)) + local_dyn_columns(lindex)%dyn_block_index(1) = col_ind + end do + end if + end do + end if + + ! Copy the information to the output array + if (allocated(dyn_columns)) then + deallocate(dyn_columns) + end if + allocate(dyn_columns(lindex)) + do lindex = 1, num_local_cols + dyn_columns(lindex) = local_dyn_columns(lindex) end do end subroutine get_dyn_grid_info @@ -415,8 +459,8 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & ! local variables real(r8), pointer :: area_d(:) real(r8), pointer :: temp(:) - character(len=256) :: errormsg - character(len=*), parameter :: sub = 'get_horiz_grid_d' + character(len=shr_kind_cl) :: errormsg + character(len=*), parameter :: sub = 'get_horiz_grid_int' !---------------------------------------------------------------------------- ! check that nxy is set to correct size for global arrays @@ -622,6 +666,58 @@ end subroutine dyn_grid_get_elem_coords ! Private routines. !========================================================================================= +subroutine get_hdim_name(fh_ini, ini_grid_hdim_name) + use pio, only: pio_inq_dimid, pio_seterrorhandling + use pio, only: PIO_BCAST_ERROR, PIO_NOERR + + ! Determine whether the initial file uses 'ncol' or 'ncol_d' horizontal + ! dimension in the unstructured grid. It is also possible when using + ! analytic initial conditions that the initial file only contains + ! vertical coordinates. + ! Return 'none' if that is the case. + + ! Arguments + type(file_desc_t), pointer :: fh_ini + character(len=6), intent(out) :: ini_grid_hdim_name ! horizontal dimension name + + ! local variables + integer :: ierr, pio_errtype + integer :: ncol_did + + character(len=*), parameter :: sub = 'get_hdim_name' + !---------------------------------------------------------------------------- + + ! Set PIO to return error flags. + call pio_seterrorhandling(fh_ini, PIO_BCAST_ERROR, pio_errtype) + + ! Check for ncol_d first just in case the initial file also contains fields on + ! the physics grid. + ierr = pio_inq_dimid(fh_ini, 'ncol_d', ncol_did) + if (ierr == PIO_NOERR) then + + ini_grid_hdim_name = 'ncol_d' + + else + + ! if 'ncol_d' not in file, check for 'ncol' + ierr = pio_inq_dimid(fh_ini, 'ncol', ncol_did) + + if (ierr == PIO_NOERR) then + + ini_grid_hdim_name = 'ncol' + + else + + ini_grid_hdim_name = 'none' + + end if + end if + + ! Return PIO to previous error handling. + call pio_seterrorhandling(fh_ini, pio_errtype) + +end subroutine get_hdim_name + subroutine define_cam_grids() ! Create grid objects on the dynamics decomposition for grids used by @@ -644,6 +740,9 @@ subroutine define_cam_grids() use cam_grid_support, only: horiz_coord_t, horiz_coord_create use cam_grid_support, only: cam_grid_register, cam_grid_attribute_register + !SE dycore: + use dimensions_mod, only: nc + ! Local variables integer :: i, ii, j, k, ie, mapind character(len=8) :: latname, lonname, ncolname, areaname @@ -745,6 +844,25 @@ subroutine define_cam_grids() call cam_grid_attribute_register('GLL', 'np', '', np) call cam_grid_attribute_register('GLL', 'ne', '', ne) + ! With CSLAM if the initial file uses the horizontal dimension 'ncol' rather than + ! 'ncol_d' then we need a grid object with the names ncol,lat,lon to read it. + ! Create that grid object here if it's needed. + if (fv_nphys > 0 .and. ini_grid_hdim_name == 'ncol') then + + lat_coord => horiz_coord_create('lat', 'ncol', ngcols_d, & + 'latitude', 'degrees_north', 1, size(pelat_deg), pelat_deg, map=pemap) + lon_coord => horiz_coord_create('lon', 'ncol', ngcols_d, & + 'longitude', 'degrees_east', 1, size(pelon_deg), pelon_deg, map=pemap) + + call cam_grid_register('INI', ini_decomp, lat_coord, lon_coord, & + grid_map, block_indexed=.false., unstruct=.true.) + + ini_grid_name = 'INI' + else + ! The dyn_decomp grid can be used to read the initial file. + ini_grid_name = 'GLL' + end if + ! Coordinate values and maps are copied into the coordinate and attribute objects. ! Locally allocated storage is no longer needed. deallocate(pelat_deg) diff --git a/src/utils/cam_field_read.F90 b/src/utils/cam_field_read.F90 index 97b08995..a07084cf 100644 --- a/src/utils/cam_field_read.F90 +++ b/src/utils/cam_field_read.F90 @@ -186,12 +186,12 @@ subroutine infld_real8_1d(varname, ncid, field, readvar, gridname, & ! Local array, is 1D ! - use pio, only: pio_read_darray + use pio, only: pio_read_darray, PIO_NOERR use pio, only: PIO_MAX_NAME, pio_inq_dimname use cam_grid_support, only: cam_grid_get_decomp, cam_grid_is_unstructured use cam_grid_support, only: cam_grid_dimensions use cam_grid_support, only: cam_grid_is_block_indexed - use cam_pio_utils, only: cam_pio_check_var + use cam_pio_utils, only: cam_pio_check_var, cam_pio_inq_var_fill ! Dummy arguments character(len=*), intent(in) :: varname ! variable name @@ -396,11 +396,11 @@ subroutine infld_real8_2d(varname, ncid, field, readvar, gridname, & ! Local array, is 2D ! - use pio, only: pio_read_darray + use pio, only: pio_read_darray, PIO_NOERR use pio, only: PIO_MAX_NAME, pio_inq_dimname use cam_grid_support, only: cam_grid_get_decomp, cam_grid_is_unstructured use cam_grid_support, only: cam_grid_dimensions,cam_grid_is_block_indexed - use cam_pio_utils, only: cam_pio_check_var + use cam_pio_utils, only: cam_pio_check_var, cam_pio_inq_var_fill ! Dummy arguments character(len=*), intent(in) :: varname ! variable name @@ -654,11 +654,11 @@ subroutine infld_real8_3d(varname, ncid, field, readvar, dim3name, & ! Local array, is 3D ! - use pio, only: pio_read_darray + use pio, only: pio_read_darray, PIO_NOERR use pio, only: PIO_MAX_NAME, pio_inq_dimname use cam_grid_support, only: cam_grid_get_decomp, cam_grid_is_unstructured use cam_grid_support, only: cam_grid_dimensions,cam_grid_is_block_indexed - use cam_pio_utils, only: cam_pio_check_var + use cam_pio_utils, only: cam_pio_check_var, cam_pio_inq_var_fill ! Dummy arguments character(len=*), intent(in) :: varname ! variable name diff --git a/src/utils/cam_pio_utils.F90 b/src/utils/cam_pio_utils.F90 index 15740697..48f2dcf7 100644 --- a/src/utils/cam_pio_utils.F90 +++ b/src/utils/cam_pio_utils.F90 @@ -1,7 +1,7 @@ ! Utility functions in support of PIO io interface module cam_pio_utils - use shr_kind_mod, only: r8=>shr_kind_r8 + use shr_kind_mod, only: r4=>shr_kind_r4, r8=>shr_kind_r8 use shr_sys_mod, only: shr_sys_flush use cam_logfile, only: iulog, debug_output, DEBUGOUT_NONE, DEBUGOUT_INFO use perf_mod, only: t_startf, t_stopf @@ -20,6 +20,8 @@ module cam_pio_utils public :: init_pio_subsystem ! called from cam_comp public :: cam_pio_get_decomp ! Find an existing decomp or create a new one public :: cam_pio_handle_error ! If error, print a custom error message + public :: cam_pio_set_fill ! Set the PIO fill value to PIO_FILL + public :: cam_pio_inq_var_fill ! Return the buffer fill value public :: cam_permute_array public :: calc_permutation @@ -82,6 +84,12 @@ module cam_pio_utils module procedure cam_pio_find_var_array end interface cam_pio_find_var + interface cam_pio_inq_var_fill + module procedure inq_var_fill_i4 + module procedure inq_var_fill_r4 + module procedure inq_var_fill_r8 + end interface cam_pio_inq_var_fill + interface calc_permutation module procedure calc_permutation_int module procedure calc_permutation_char @@ -637,8 +645,7 @@ subroutine cam_pio_newdecomp(iodesc, dims, dof, dtype) strt = strt + 8 end do end if - call pio_initdecomp(pio_subsystem, dtype, dims, dof, iodesc, & - rearr=pio_rearranger) + call pio_initdecomp(pio_subsystem, dtype, dims, dof, iodesc) end subroutine cam_pio_newdecomp @@ -1292,6 +1299,100 @@ logical function cam_pio_fileexists(fname) end function cam_pio_fileexists + integer function cam_pio_set_fill(File, fillmode, old_mode) result(ierr) + +#ifdef PIO2 + use pio, only: PIO_FILL, pio_set_fill +#endif + ! Dummy arguments + type(File_desc_t), intent(in) :: File + integer, optional, intent(in) :: fillmode + integer, optional, intent(out) :: old_mode + ! Local variables + integer :: oldfill + integer :: fillval + +#ifdef PIO2 + if (present(fillmode)) then + fillval = fillmode + else + fillval = PIO_FILL + end if + ierr = pio_set_fill(File, fillval, oldfill) + if (present(old_mode)) then + old_mode = oldfill + end if +#else + ierr = 0 + if (present(old_mode)) then + old_mode = 0 + end if +#endif + end function cam_pio_set_fill + + integer function inq_var_fill_i4(File, vdesc, fillvalue, no_fill) result(ierr) +#ifdef PIO2 + use pio, only: pio_inq_var_fill +#endif + use pio, only: PIO_NOERR + + type(File_desc_t), intent(in) :: File + type(var_desc_t), intent(in) :: vdesc + ! fillvalue needs to not be optional to avoid ambiguity + integer, target, intent(out) :: fillvalue + integer, optional, intent(out) :: no_fill + +#ifdef PIO2 + ierr = pio_inq_var_fill(File, vdesc, no_fill, fillvalue) +#else + ierr = PIO_NOERR + fillvalue = 0 +#endif + + end function inq_var_fill_i4 + + integer function inq_var_fill_r4(File, vdesc, fillvalue, no_fill) result(ierr) +#ifdef PIO2 + use pio, only: pio_inq_var_fill +#endif + use pio, only: PIO_NOERR + + type(File_desc_t), intent(in) :: File + type(var_desc_t), intent(in) :: vdesc + ! fillvalue needs to not be optional to avoid ambiguity + real(r4), target, intent(out) :: fillvalue + integer, optional, intent(out) :: no_fill + +#ifdef PIO2 + ierr = pio_inq_var_fill(File, vdesc, no_fill, fillvalue) +#else + ierr = PIO_NOERR + fillvalue = 0.0_R4 +#endif + + end function inq_var_fill_r4 + + integer function inq_var_fill_r8(File, vdesc, fillvalue, no_fill) result(ierr) +#ifdef PIO2 + use pio, only: pio_inq_var_fill +#endif + use pio, only: PIO_NOERR + + type(File_desc_t), intent(in) :: File + type(var_desc_t), intent(in) :: vdesc + ! fillvalue needs to not be optional to avoid ambiguity + real(r8), target, intent(out) :: fillvalue + integer, optional, intent(out) :: no_fill + +#ifdef PIO2 + ierr = pio_inq_var_fill(File, vdesc, no_fill, fillvalue) +#else + ierr = PIO_NOERR + fillvalue = 0.0_R8 +#endif + + end function inq_var_fill_r8 + subroutine find_dump_filename(fieldname, filename) ! Dummy arguments From 1884f9631b5c7598eb2d4d4ebf5b21bb3346e456 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 14 Jan 2021 15:42:45 -0700 Subject: [PATCH 06/45] Perform miscellaneous code cleanup. --- cime_config/cam_config.py | 2 -- src/dynamics/se/dyn_grid.F90 | 29 ----------------------------- src/dynamics/se/pmgrid.F90 | 3 --- 3 files changed, 34 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 59426f0d..bb7ea114 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -773,10 +773,8 @@ def __init__(self, case, case_log): # Set number of vertical levels if case_nlev: - # Save variable for CPPDEFs nlev = case_nlev else: - # Save variable for CPPDEFs nlev = 30 # Add vertical levels to configure object diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index dd682883..4af58be7 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -562,35 +562,6 @@ end subroutine physgrid_copy_attributes_d !========================================================================================= -integer function get_dyn_grid_parm(name) result(ival) - - ! This function is in the process of being deprecated, but is still needed - ! as a dummy interface to satisfy external references from some chemistry routines. - - use pmgrid, only: plat, plev - - character(len=*), intent(in) :: name - !---------------------------------------------------------------------------- - - if (name.eq.'plat') then - ival = plat - else if(name.eq.'plon') then - if (fv_nphys>0) then - ival = fv_nphys*fv_nphys*nelem_d - else - ival = ngcols_d - end if - else if(name.eq.'plev') then - ival = plev - - else - ival = -1 - end if - -end function get_dyn_grid_parm - -!========================================================================================= - subroutine dyn_grid_get_colndx(igcol, ncols, owners, col, lbk) ! For each global column index return the owning task. If the column is owned diff --git a/src/dynamics/se/pmgrid.F90 b/src/dynamics/se/pmgrid.F90 index fff3dbce..87f7908b 100644 --- a/src/dynamics/se/pmgrid.F90 +++ b/src/dynamics/se/pmgrid.F90 @@ -9,7 +9,4 @@ module pmgrid integer, parameter :: plev = PLEV ! number of vertical levels integer, parameter :: plevp = plev + 1 -integer, parameter :: plon = 1 -integer, parameter :: plat = 1 - end module pmgrid From 6a8570453e25b27f1358371779d4d916a6f14619 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 20 Jan 2021 14:03:07 -0700 Subject: [PATCH 07/45] Add ability to set CPP definitions. Also fix python and meta file bugs. --- cime_config/buildlib | 4 +++ cime_config/cam_autogen.py | 9 +++-- cime_config/cam_config.py | 71 +++++++++++++++++++++++++++++++++----- src/data/physconst.F90 | 1 - 4 files changed, 73 insertions(+), 12 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index aa993e99..f44fd3a0 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -132,6 +132,10 @@ def _build_cam(): .format(gmake, gmake_j, complib, makefile, get_standard_makefile_args(case)) + # Add C Pre-Processor (CPP) definitions, if present: + if config.cpp_defs: + cmd += " USER_CPPDEFS='{}'".format(' '.join(config.cpp_defs)) + retcode, out, err = run_cmd(cmd) _LOGGER.info("%s: \n\n output:\n %s \n\n err:\n\n%s\n", cmd, out, err) expect(retcode == 0, "Command {} failed with rc={}".format(cmd, retcode)) diff --git a/cime_config/cam_autogen.py b/cime_config/cam_autogen.py index 6d721d8a..48381ffa 100644 --- a/cime_config/cam_autogen.py +++ b/cime_config/cam_autogen.py @@ -464,9 +464,12 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n #the host model files list for use by CCPP's capgen: host_files.append(reg_file.file_path) + # Convert preproc defs to string: + preproc_cache_str = ', '.join(preproc_defs) + if os.path.exists(genccpp_dir): do_gen_ccpp = force or build_cache.ccpp_mismatch(sdfs, scheme_files, - preproc_defs, + preproc_cache_str, kind_phys) else: os.makedirs(genccpp_dir) @@ -481,7 +484,7 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n _LOGGER.debug(" host files: %s", ", ".join(host_files)) _LOGGER.debug(" scheme files: %s", ', '.join(scheme_files)) _LOGGER.debug(" suite definition files: %s", ', '.join(sdfs)) - _LOGGER.debug(" preproc defs: %s", ', '.join(preproc_defs)) + _LOGGER.debug(" preproc defs: %s", preproc_cache_str) _LOGGER.debug(" output directory: '%s'", genccpp_dir) _LOGGER.debug(" kind_phys: '%s'", kind_phys) @@ -492,7 +495,7 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n host_name, kind_phys, force_overwrite, _LOGGER) # save build details in the build cache - build_cache.update_ccpp(sdfs, scheme_files, preproc_defs, kind_phys) + build_cache.update_ccpp(sdfs, scheme_files, preproc_cache_str, kind_phys) ##XXgoldyXX: v Temporary fix: Copy CCPP Framework source code into ##XXgoldyXX: v generated code directory request = DatatableReport("utility_files") diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index bb7ea114..11a3873f 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -570,13 +570,21 @@ def __init__(self, case, case_log): case_ny = case.get_value("ATM_NY") # Number of y-dimension grid-points (latitudes) comp_ocn = case.get_value("COMP_OCN") # CESM ocean component exeroot = case.get_value("EXEROOT") # model executable path + nthrds = case.get_value("NTHRDS_ATM") # number of model OpenMP threads # Save case variables needed for code auto-generation: self.__atm_root = case.get_value("COMP_ROOT_DIR_ATM") self.__caseroot = case.get_value("CASEROOT") self.__bldroot = os.path.join(exeroot, "atm", "obj") - self.__cppdefs = case.get_value('CAM_CPPDEFS') - self.__atm_name = case.get_value('COMP_ATM') + self.__atm_name = case.get_value("COMP_ATM") + + # Save CPP definitions as a list: + self.__cppdefs = case.get_value("CAM_CPPDEFS").split() + + # If only "UNSET" is present in the list, then convert to + # empty list: + if len(self.__cppdefs) == 1 and "UNSET" in self.__cppdefs: + self.__cppdefs = list() # The following translation is hard-wired for backwards compatibility # to support the differences between how the config_grids specifies the @@ -706,6 +714,18 @@ def __init__(self, case, case_log): self.__nml_groups.append("air_composition_nl") self.__nml_groups.append("dyn_se_inparm") + # Add required CPP definitons: + self.add_cppdef("_MPI") + self.add_cppdef("SPMD") + + # Add OpenMP CCP definitions, if needed: + if nthrds > 1: + self.add_cppdef("_OPENMP") + + # Add CSLAM CPP definition, if needed: + if atm_grid.find("pg") != -1: + self.add_cppdef("FVM_TRACERS") + elif fv3_grid_re.match(atm_grid) is not None: # Dynamical core self.create_config("dyn", dyn_desc, "fv3", @@ -777,6 +797,9 @@ def __init__(self, case, case_log): else: nlev = 30 + # Add vertical levels CPP definition (REMOVE ONCE HELD-SUAREZ PR IS MERGED!): + self.add_cppdef("PLEV", value=nlev) + # Add vertical levels to configure object nlev_desc = "Number of vertical levels." self.create_config("nlev", nlev_desc, nlev, None, is_nml_attr=True) @@ -813,13 +836,17 @@ def __init__(self, case, case_log): # Set initial and/or boundary conditions #--------------------------------------- - #Check if user specified Analytic Initial Conditions (ICs): + # Check if user specified Analytic Initial Conditions (ICs): if user_config_opts.analytic_ic: - #Set "analytic_ic" to True (1): + # Set "analytic_ic" to True (1): analy_ic_val = 1 #Use Analytic ICs - #Add analytic_ic to namelist group list: + # Add analytic_ic to namelist group list: self.__nml_groups.append("analytic_ic_nl") + + #Add new CPP definition: + self.add_cppdef("ANALYTIC_IC") + else: analy_ic_val = 0 #Don't use Analytic ICs @@ -870,14 +897,18 @@ def __init__(self, case, case_log): # and namelist groups list without underscores @property def config_dict(self): - """Return the configure dictionary of this object""" + """Return the configure dictionary of this object.""" return self.__config_dict @property def nml_groups(self): - """Return the namelist groups list of this object""" + """Return the namelist groups list of this object.""" return self.__nml_groups + @property + def cpp_defs(self): + """Return the CPP definitions list of this object.""" + return self.__cppdefs #++++++++++++++++++++++ # ConfigCAM functions @@ -1004,6 +1035,10 @@ def print_all(self, case_log): # Print variable to logger self.print_config(obj_name, case_log) + # Also print CPP definitions, if any: + if self.__cppdefs: + case_log.debug("\nCAM CPP Defs: {}".format(" ".join(self.__cppdefs))) + # Print additional separator (to help seperate this output from # additional CIME output) case_log.debug("-----------------------------") @@ -1034,10 +1069,30 @@ def set_value(self, obj_name, val): #++++++++++++++++++++++++ + def add_cppdef(self, cppname, value=None): + + """ + Add a CPP definition value to be used during the + building of the model. + """ + + # Check if input value is a logical: + if value is None: + # Create CPP flag string with no equals sign: + cpp_str = "-D{}".format(cppname.upper()) + else: + # Create CPP definition flag string: + cpp_str = "-D{}={}".format(cppname.upper(), value) + + # Add string to CPP definition list: + self.__cppdefs.append(cpp_str) + + #++++++++++++++++++++++++ + def get_value(self, obj_name): """ - return value for specified configure object. + Return value for specified configure object. """ # First check that the given object name exists in the dictionary diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 32a1e935..9239fc3e 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -179,7 +179,6 @@ module physconst ! standard dry air (constant composition) real(kind_phys) :: mmro2, mmrn2 ! Mass mixing ratios of O2 and N2 - real(kind_phys) :: o2_mwi, n2_mwi ! Inverse molecular weights real(kind_phys) :: mbar ! Mean mass at mid level ! coefficients in expressions for molecular diffusion coefficients From e194d6693b135edc956cfd7849a4ceb9a6041c28 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Fri, 22 Jan 2021 14:53:35 -0700 Subject: [PATCH 08/45] Add cam_development updates to physics_grid, and implement 'model_grid_init' in SE dycore. --- src/dynamics/se/dyn_grid.F90 | 325 ++++++++++++++++------------- src/physics/utils/physics_grid.F90 | 276 ++++++++++++------------ 2 files changed, 325 insertions(+), 276 deletions(-) diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 4af58be7..49a5a508 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -37,6 +37,7 @@ module dyn_grid use cam_logfile, only: iulog use cam_abortutils, only: endrun +use string_utils, only: to_str !SE dycore: use dimensions_mod, only: globaluniquecols, nelem, nelemd, nelemdmax, & @@ -78,9 +79,8 @@ module dyn_grid fvm, & edgebuf -public :: dyn_grid_init +public :: model_grid_init public :: get_dyn_grid_info -public :: physgrid_copy_attributes_d !!XXgoldyXX: v try to remove? public :: get_horiz_grid_dim_d @@ -105,7 +105,7 @@ module dyn_grid integer :: Owner ! task id of element owner end type block_global_data -type(physics_column_t), allocatable, target :: local_dyn_columns(:) +type(physics_column_t), allocatable :: local_dyn_columns(:) ! number of global dynamics columns. Set by SE dycore init. integer :: ngcols_d = 0 @@ -120,9 +120,11 @@ module dyn_grid contains !============================================================================= -subroutine dyn_grid_init() +subroutine model_grid_init() - ! Initialize SE grid, and decomposition. + ! Initializes the SE grid and decomposition, + ! and then initializes the physics grid and + ! decomposition based on the dynamics (SE) grid. use hycoef, only: hycoef_init, hypi, hypm, nprlev, & hyam, hybm, hyai, hybi, ps0 @@ -154,10 +156,15 @@ subroutine dyn_grid_init() integer :: ierr integer :: dtime - real(r8), allocatable ::clat(:), clon(:), areaa(:) + real(r8), allocatable :: clat(:), clon(:), areaa(:) integer :: nets, nete - character(len=*), parameter :: sub = 'dyn_grid_init' + ! Variables needed for physics grid initialization: + integer :: num_local_columns + integer :: hdim1_d ! # longitudes or grid size + + + character(len=*), parameter :: sub = 'model_grid_init' !---------------------------------------------------------------------------- ! Get file handle for initial file and first consistency check @@ -263,8 +270,23 @@ subroutine dyn_grid_init() if (do_native_mapping) then - allocate(areaA(ngcols_d)) - allocate(clat(ngcols_d),clon(ngcols_d)) + allocate(areaA(ngcols_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate areaA(ngcols_d) failed with stat: '//& + to_str(ierr)) + end if + + allocate(clat(ngcols_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate clat(ngcols_d) failed with stat: '//& + to_str(ierr)) + end if + allocate(clon(ngcols_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate clon(ngcols_d) failed with stat: '//& + to_str(ierr)) + end if + call get_horiz_grid_int(ngcols_d, clat_d_out=clat, clon_d_out=clon, area_d_out=areaA) ! Create mapping files using SE basis functions @@ -274,14 +296,84 @@ subroutine dyn_grid_init() deallocate(areaa, clat, clon) end if + ! Calculate number of of local columns: + if (fv_nphys > 0) then ! physics uses an FVM grid + num_local_columns = nelemd * fv_nphys * fv_nphys + else + num_local_columns = 0 + do elem_ind = 1, nelemd + num_local_columns = num_local_columns + elem(elem_ind)%idxP%NumUniquePts + end do + end if + + ! Allocate local_dyn_columns structure if not already allocated: + if (.not.allocated(local_dyn_columns)) then + allocate(local_dyn_columns(num_local_columns), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate local_dyn_columns(num_local_columns) '//& + 'failed with stat: '//to_str(ierr)) + end if + end if + + ! Set local_dyn_columns values: + call set_dyn_col_values() + + ! Calculate horizontal dimensions (needed for physics grid): + if (fv_nphys > 0) then ! physics uses an FVM grid + hdim1_d = nelem * fv_nphys * fv_nphys + else + hdim1_d = ngcols_d + end if + + ! Determine grid name and attributes: + if (fv_nphys > 0) then + gridname = 'physgrid_d' + + allocate(grid_attribute_names(2), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate grid_attribute_names(2) failed with stat: '//& + to_str(ierr)) + end if + + grid_attribute_names(1) = 'fv_nphys' + grid_attribute_names(2) = 'ne' + else + gridname = 'GLL' + + allocate(grid_attribute_names(3), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate grid_attribute_names(3) failed with stat: '//& + to_str(ierr)) + end if + + ! For standard CAM-SE, we need to copy the area attribute. + ! For physgrid, the physics grid will create area (GLL has area_d) + grid_attribute_names(1) = 'area' + grid_attribute_names(2) = 'np' + grid_attribute_names(3) = 'ne' + end if + + ! Initialize physics grid decomposition: + call phys_grid_init(hdim1_d, 1, nlev, 'SE', & + 1, nlev, local_dyn_columns, gridname, & + grid_attribute_names) + + ! Deallocate grid_attirbute_names, as it is no longer needed: + deallocate(grid_attribute_names) + + ! Deallocate dyn_columns, as it is now stored in the + ! global phys_columns structure: + deallocate(local_dyn_columns) + + ! Make sure all tasks finish initialization + ! before continuing on with the run: call mpi_barrier(mpicom, ierr) -end subroutine dyn_grid_init +end subroutine model_grid_init !============================================================================== -subroutine get_dyn_grid_info(hdim1_d, hdim2_d, num_lev, & - index_model_top_layer, index_surface_layer, unstructured, dyn_columns) +subroutine set_dyn_col_values() use physconst, only: pi use cam_abortutils, only: endrun @@ -290,127 +382,96 @@ subroutine get_dyn_grid_info(hdim1_d, hdim2_d, num_lev, & !SE dycore: use coordinate_systems_mod, only: spherical_polar_t - ! Dummy arguments - integer, intent(out) :: hdim1_d ! # longitudes or grid size - integer, intent(out) :: hdim2_d ! # latitudes or 1 - integer, intent(out) :: num_lev ! # levels - integer, intent(out) :: index_model_top_layer - integer, intent(out) :: index_surface_layer - logical, intent(out) :: unstructured - ! dyn_columns will contain a copy of the physics column info local to this - ! dynamics task - type(physics_column_t), allocatable, intent(out) :: dyn_columns(:) ! Local variables integer :: lindex integer :: gindex integer :: elem_ind, col_ind, ii, jj integer :: num_local_cols + integer :: ierr type(spherical_polar_t) :: coord real(r8) :: dcoord real(kind_pcol), parameter :: radtodeg = 180.0_kind_pcol / pi real(kind_pcol), parameter :: degtorad = pi / 180.0_kind_pcol character(len=*), parameter :: subname = 'get_dyn_grid_info' - unstructured = .true. ! SE is an unstructured dycore - - if (fv_nphys > 0) then ! physics uses an FVM grid - num_local_cols = nelemd * fv_nphys * fv_nphys - else - num_local_cols = 0 - do elem_ind = 1, nelemd - num_local_cols = num_local_cols + elem(elem_ind)%idxP%NumUniquePts - end do - end if - if (allocated(local_dyn_columns)) then - ! Check for correct number of columns - if (size(local_dyn_columns) /= num_local_cols) then - call endrun(subname//': called with inconsistent column numbers') - end if - else - allocate(local_dyn_columns(num_local_cols)) + lindex = 0 + do elem_ind = 1, nelemd if (fv_nphys > 0) then ! physics uses an FVM grid - hdim1_d = nelem * fv_nphys * fv_nphys + do col_ind = 0, (fv_nphys * fv_nphys) - 1 + lindex = lindex + 1 + ii = MOD(col_ind, fv_nphys) + 1 + jj = (col_ind / fv_nphys) + 1 + coord = fvm(elem_ind)%center_cart_physgrid(ii, jj) + local_dyn_columns(lindex)%lat_rad = coord%lat + dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg + local_dyn_columns(lindex)%lat_deg = dcoord + local_dyn_columns(lindex)%lon_rad = coord%lon + dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg + local_dyn_columns(lindex)%lon_deg = dcoord + local_dyn_columns(lindex)%area = & + fvm(elem_ind)%area_sphere_physgrid(ii,jj) + local_dyn_columns(lindex)%weight = & + local_dyn_columns(lindex)%area + ! File decomposition + gindex = ((elem(elem_ind)%GlobalId-1) * fv_nphys * fv_nphys) + & + col_ind + 1 + local_dyn_columns(lindex)%global_col_num = gindex + ! Note, coord_indices not used for unstructured dycores + ! Dynamics decomposition + local_dyn_columns(lindex)%dyn_task = iam + local_dyn_columns(lindex)%local_dyn_block = elem_ind + local_dyn_columns(lindex)%global_dyn_block = & + elem(elem_ind)%GlobalId + + allocate(local_dyn_columns(lindex)%dyn_block_index(1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate local_dyn_columns('//& + to_str(lindex)//')%dyn_block_index(1)'//& + ' failed with stat: '//to_str(ierr)) + end if + + local_dyn_columns(lindex)%dyn_block_index(1) = col_ind + 1 + end do else - hdim1_d = ngcols_d - end if - hdim2_d = 1 - num_lev = nlev - index_model_top_layer = 1 - index_surface_layer = nlev - lindex = 0 - do elem_ind = 1, nelemd - if (fv_nphys > 0) then ! physics uses an FVM grid - do col_ind = 0, (fv_nphys * fv_nphys) - 1 - lindex = lindex + 1 - ii = MOD(col_ind, fv_nphys) + 1 - jj = (col_ind / fv_nphys) + 1 - coord = fvm(elem_ind)%center_cart_physgrid(ii, jj) - local_dyn_columns(lindex)%lat_rad = coord%lat - dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg - local_dyn_columns(lindex)%lat_deg = dcoord - local_dyn_columns(lindex)%lon_rad = coord%lon - dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg - local_dyn_columns(lindex)%lon_deg = dcoord - local_dyn_columns(lindex)%area = & - fvm(elem_ind)%area_sphere_physgrid(ii,jj) - local_dyn_columns(lindex)%weight = & - local_dyn_columns(lindex)%area - ! File decomposition - gindex = ((elem(elem_ind)%GlobalId-1) * fv_nphys * fv_nphys) + & - col_ind + 1 - local_dyn_columns(lindex)%global_col_num = gindex - ! Note, coord_indices not used for unstructured dycores - ! Dynamics decomposition - local_dyn_columns(lindex)%dyn_task = iam - local_dyn_columns(lindex)%local_dyn_block = elem_ind - local_dyn_columns(lindex)%global_dyn_block = & - elem(elem_ind)%GlobalId - allocate(local_dyn_columns(lindex)%dyn_block_index(1)) - local_dyn_columns(lindex)%dyn_block_index(1) = col_ind + 1 - end do - else - do col_ind = 1, elem(elem_ind)%idxP%NumUniquePts - lindex = lindex + 1 - ii = elem(elem_ind)%idxP%ia(col_ind) - jj = elem(elem_ind)%idxP%ja(col_ind) - - dcoord = elem(elem_ind)%spherep(ii,jj)%lat - local_dyn_columns(lindex)%lat_rad = dcoord - dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg - local_dyn_columns(lindex)%lat_deg = dcoord - dcoord = elem(elem_ind)%spherep(ii,jj)%lon - local_dyn_columns(lindex)%lon_rad = dcoord - dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg - local_dyn_columns(lindex)%lon_deg = dcoord - local_dyn_columns(lindex)%area = & - 1.0_r8 / elem(elem_ind)%rspheremp(ii,jj) - local_dyn_columns(lindex)%weight = local_dyn_columns(lindex)%area - ! File decomposition - gindex = elem(elem_ind)%idxP%UniquePtoffset + col_ind - 1 - local_dyn_columns(lindex)%global_col_num = gindex - ! Note, coord_indices not used for unstructured dycores - ! Dynamics decomposition - local_dyn_columns(lindex)%dyn_task = iam - local_dyn_columns(lindex)%local_dyn_block = elem_ind - local_dyn_columns(lindex)%global_dyn_block = & - elem(elem_ind)%GlobalId - allocate(local_dyn_columns(lindex)%dyn_block_index(1)) - local_dyn_columns(lindex)%dyn_block_index(1) = col_ind - end do - end if - end do - end if + do col_ind = 1, elem(elem_ind)%idxP%NumUniquePts + lindex = lindex + 1 + ii = elem(elem_ind)%idxP%ia(col_ind) + jj = elem(elem_ind)%idxP%ja(col_ind) + + dcoord = elem(elem_ind)%spherep(ii,jj)%lat + local_dyn_columns(lindex)%lat_rad = dcoord + dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg + local_dyn_columns(lindex)%lat_deg = dcoord + dcoord = elem(elem_ind)%spherep(ii,jj)%lon + local_dyn_columns(lindex)%lon_rad = dcoord + dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg + local_dyn_columns(lindex)%lon_deg = dcoord + local_dyn_columns(lindex)%area = & + 1.0_r8 / elem(elem_ind)%rspheremp(ii,jj) + local_dyn_columns(lindex)%weight = local_dyn_columns(lindex)%area + ! File decomposition + gindex = elem(elem_ind)%idxP%UniquePtoffset + col_ind - 1 + local_dyn_columns(lindex)%global_col_num = gindex + ! Note, coord_indices not used for unstructured dycores + ! Dynamics decomposition + local_dyn_columns(lindex)%dyn_task = iam + local_dyn_columns(lindex)%local_dyn_block = elem_ind + local_dyn_columns(lindex)%global_dyn_block = & + elem(elem_ind)%GlobalId + + allocate(local_dyn_columns(lindex)%dyn_block_index(1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate local_dyn_columns('//& + to_str(lindex)//')%dyn_block_index(1)'//& + ' failed with stat: '//to_str(ierr)) + end if - ! Copy the information to the output array - if (allocated(dyn_columns)) then - deallocate(dyn_columns) - end if - allocate(dyn_columns(lindex)) - do lindex = 1, num_local_cols - dyn_columns(lindex) = local_dyn_columns(lindex) + local_dyn_columns(lindex)%dyn_block_index(1) = col_ind + end do + end if end do - end subroutine get_dyn_grid_info + end subroutine set_dyn_col_values !============================================================================== @@ -532,36 +593,6 @@ end subroutine get_horiz_grid_int !========================================================================================= -subroutine physgrid_copy_attributes_d(gridname, grid_attribute_names) - - ! create list of attributes for the physics grid that should be copied - ! from the corresponding grid object on the dynamics decomposition - - use cam_grid_support, only: max_hcoordname_len - - ! Dummy arguments - character(len=max_hcoordname_len), intent(out) :: gridname - character(len=max_hcoordname_len), pointer, intent(out) :: grid_attribute_names(:) - - if (fv_nphys > 0) then - gridname = 'physgrid_d' - allocate(grid_attribute_names(2)) - grid_attribute_names(1) = 'fv_nphys' - grid_attribute_names(2) = 'ne' - else - gridname = 'GLL' - allocate(grid_attribute_names(3)) - ! For standard CAM-SE, we need to copy the area attribute. - ! For physgrid, the physics grid will create area (GLL has area_d) - grid_attribute_names(1) = 'area' - grid_attribute_names(2) = 'np' - grid_attribute_names(3) = 'ne' - end if - -end subroutine physgrid_copy_attributes_d - -!========================================================================================= - subroutine dyn_grid_get_colndx(igcol, ncols, owners, col, lbk) ! For each global column index return the owning task. If the column is owned diff --git a/src/physics/utils/physics_grid.F90 b/src/physics/utils/physics_grid.F90 index 5ccbefc3..df99fc40 100644 --- a/src/physics/utils/physics_grid.F90 +++ b/src/physics/utils/physics_grid.F90 @@ -1,5 +1,26 @@ module physics_grid +!------------------------------------------------------------------------------ +! +! The phys_grid module represents the host model physics decomposition. +! +! phys_grid_init receives the physics column info (area, weight, centers) +! from the dycore. +! The routine then creates the physics decomposition which +! is the arrangement of columns across the atmosphere model's +! MPI tasks as well as the arrangement into groups to +! facilitate efficient threading. +! The routine then creates a grid object to allow for data +! to be read into and written from this decomposition. +! The phys_grid module also provides interfaces for retrieving information +! about the decomposition +! +! Note: This current implementation does not perform load balancing, +! physics columns ae always on the same task as the corresponding +! column received from the dycore. +! +!------------------------------------------------------------------------------ + use shr_kind_mod, only: r8 => shr_kind_r8 use ccpp_kinds, only: kind_phys use physics_column_type, only: physics_column_t, assignment(=) @@ -39,6 +60,9 @@ module physics_grid ! Physics decomposition information type(physics_column_t), protected, public, allocatable :: phys_columns(:) + ! Memory debugging control + logical :: calc_memory_increase = .false. + ! These variables are last to provide a limited table to search !> \section arg_table_physics_grid Argument Table @@ -63,9 +87,10 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & dyn_columns, dyn_gridname, dyn_attributes) ! use mpi, only: MPI_reduce ! XXgoldyXX: Should this work? - use mpi, only: MPI_INTEGER, MPI_MIN + use mpi, only: MPI_INTEGER, MPI_REAL8, MPI_MIN, MPI_MAX + use shr_mem_mod, only: shr_mem_getusage use cam_abortutils, only: endrun, check_allocate - use spmd_utils, only: npes, mpicom + use spmd_utils, only: npes, mpicom, masterprocid, masterproc use string_utils, only: to_str use cam_grid_support, only: cam_grid_register, cam_grid_attribute_register use cam_grid_support, only: iMap @@ -95,9 +120,11 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & type(horiz_coord_t), pointer :: lat_coord type(horiz_coord_t), pointer :: lon_coord real(r8), pointer :: area_d(:) + real(r8) :: mem_hw_beg, mem_hw_end + real(r8) :: mem_beg, mem_end logical :: unstructured real(r8) :: temp ! For MPI - integer :: ierr ! For MPI + integer :: ierr ! For error codes character(len=*), parameter :: subname = 'phys_grid_init' @@ -108,6 +135,10 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & nullify(lon_coord) nullify(area_d) + if (calc_memory_increase) then + call shr_mem_getusage(mem_hw_beg, mem_beg) + end if + ! Check that the physics grid is not already initialized: if (phys_grid_initialized) then call endrun(subname//": Physics grid is already initialized.") @@ -283,22 +314,26 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & dyn_attributes(index)) end do - if ((.not. cam_grid_attr_exists('physgrid', 'area')) .and. & - unstructured) then - ! Physgrid always needs an area attribute. If we did not inherit one - ! from the dycore (i.e., physics and dynamics are on different - ! grids), create that attribute here (Note, a separate physics - ! grid is only supported for unstructured grids). - allocate(area_d(columns_on_task), stat=ierr) - call check_allocate(ierr, subname, 'area_d(columns_on_task)', & - file=__FILE__, line=__LINE__) + if (.not. cam_grid_attr_exists('physgrid', 'area')) then + ! Physgird always needs an area attribute + if (unstructured) then + ! Physgrid always needs an area attribute. If we did not inherit one + ! from the dycore (i.e., physics and dynamics are on different + ! grids), create that attribute here (Note, a separate physics + ! grid is only supported for unstructured grids). + allocate(area_d(columns_on_task), stat=ierr) + call check_allocate(ierr, subname, 'area_d(columns_on_task)', & + file=__FILE__, line=__LINE__) - do col_index = 1, columns_on_task - area_d(col_index) = phys_columns(col_index)%area - end do - call cam_grid_attribute_register('physgrid', 'area', & - 'physics column areas', 'ncol', area_d, map=grid_map(3,:)) - nullify(area_d) ! Belongs to attribute now + do col_index = 1, columns_on_task + area_d(col_index) = phys_columns(col_index)%area + end do + call cam_grid_attribute_register('physgrid', 'area', & + 'physics column areas', 'ncol', area_d, map=grid_map(3,:)) + nullify(area_d) ! Belongs to attribute now + else + call endrun(subname//"No 'area' attribute from dycore") + end if end if ! Cleanup pointers (they belong to the grid now) nullify(grid_map) @@ -313,6 +348,25 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & call t_stopf("phys_grid_init") call t_adj_detailf(+2) + ! Calculate memory usage stats if requested: + if (calc_memory_increase) then + call shr_mem_getusage(mem_hw_end, mem_end) + temp = mem_end - mem_beg + call MPI_reduce(temp, mem_end, 1, MPI_REAL8, MPI_MAX, masterprocid, & + mpicom, ierr) + if (masterproc) then + write(iulog, *) 'phys_grid_init: Increase in memory usage = ', & + mem_end, ' (MB)' + end if + temp = mem_hw_end - mem_hw_beg + call MPI_reduce(temp, mem_hw_end, 1, MPI_REAL8, MPI_MAX, & + masterprocid, mpicom, ierr) + if (masterproc) then + write(iulog, *) subname, 'Increase in memory highwater = ', & + mem_end, ' (MB)' + end if + end if + end subroutine phys_grid_init !======================================================================== @@ -328,16 +382,10 @@ real(r8) function get_dlat_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'get_dlat_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task - write(iulog, *) errmsg - call endrun(errmsg) - else - get_dlat_p = phys_columns(index)%lat_deg - end if + ! Check that input is valid: + call check_phys_input(index) + + get_dlat_p = phys_columns(index)%lat_deg end function get_dlat_p @@ -354,16 +402,10 @@ real(r8) function get_dlon_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'get_dlon_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task - write(iulog, *) errmsg - call endrun(errmsg) - else - get_dlon_p = phys_columns(index)%lon_deg - end if + ! Check that input is valid: + call check_phys_input(index) + + get_dlon_p = phys_columns(index)%lon_deg end function get_dlon_p @@ -380,16 +422,10 @@ real(r8) function get_rlat_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'get_rlat_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task, ')' - write(iulog, *) errmsg - call endrun(errmsg) - else - get_rlat_p = phys_columns(index)%lat_rad - end if + ! Check that input is valid: + call check_phys_input(index) + + get_rlat_p = phys_columns(index)%lat_rad end function get_rlat_p @@ -406,16 +442,10 @@ real(r8) function get_rlon_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'get_rlon_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task, ')' - write(iulog, *) errmsg - call endrun(errmsg) - else - get_rlon_p = phys_columns(index)%lon_rad - end if + ! Check that input is valid: + call check_phys_input(index) + + get_rlon_p = phys_columns(index)%lon_rad end function get_rlon_p @@ -432,16 +462,10 @@ real(r8) function get_area_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'get_area_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task, ')' - write(iulog, *) errmsg - call endrun(errmsg) - else - get_area_p = phys_columns(index)%area - end if + ! Check that input is valid: + call check_phys_input(index) + + get_area_p = phys_columns(index)%area end function get_area_p @@ -465,18 +489,13 @@ subroutine get_rlat_all_p(rlatdim, rlats) character(len=*), parameter :: subname = 'get_rlat_all_p: ' !----------------------------------------------------------------------- - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((rlatdim < 1) .or. (rlatdim > columns_on_task)) then - write(errmsg, '(a,3(a,i0))') subname, 'dimension provided (', rlatdim, & - ') out of range (1 to ', columns_on_task, ')' - write(iulog, *) trim(errmsg) - call endrun(trim(errmsg)) - else - do index = 1, rlatdim - rlats(index) = phys_columns(index)%lat_rad - end do - end if + + ! Check that input is valid: + call check_phys_input(rlatdim) + + do index = 1, rlatdim + rlats(index) = phys_columns(index)%lat_rad + end do end subroutine get_rlat_all_p @@ -500,18 +519,13 @@ subroutine get_rlon_all_p(rlondim, rlons) character(len=*), parameter :: subname = 'get_rlon_all_p: ' !----------------------------------------------------------------------- - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((rlondim < 1) .or. (rlondim > columns_on_task)) then - write(errmsg, '(a,3(a,i0))') subname, 'dimension provided (', rlondim, & - ') out of range (1 to ', columns_on_task, ')' - write(iulog, *) trim(errmsg) - call endrun(trim(errmsg)) - else - do index = 1, rlondim - rlons(index) = phys_columns(index)%lon_rad - end do - end if + + ! Check that input is valid: + call check_phys_input(rlondim) + + do index = 1, rlondim + rlons(index) = phys_columns(index)%lon_rad + end do end subroutine get_rlon_all_p @@ -532,23 +546,17 @@ subroutine get_dyn_col_p(index, blk_num, blk_ind) character(len=128) :: errmsg character(len=*), parameter :: subname = 'get_dyn_col_p_index: ' - if (.not. phys_grid_initialized()) then - call endrun(subname//'physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, 'index (', index, & - ') out of range (1 to ', columns_on_task - write(iulog, *) trim(errmsg) - call endrun(trim(errmsg)) - else - off_size = SIZE(phys_columns(index)%dyn_block_index, 1) - if (SIZE(blk_ind, 1) < off_size) then - call endrun(subname//'blk_ind too small') - end if - blk_num = phys_columns(index)%local_dyn_block - blk_ind(1:off_size) = phys_columns(index)%dyn_block_index(1:off_size) - if (SIZE(blk_ind, 1) > off_size) then - blk_ind(off_size+1:) = -1 - end if + ! Check that input is valid: + call check_phys_input(index) + + off_size = SIZE(phys_columns(index)%dyn_block_index, 1) + if (SIZE(blk_ind, 1) < off_size) then + call endrun(subname//'blk_ind too small') + end if + blk_num = phys_columns(index)%local_dyn_block + blk_ind(1:off_size) = phys_columns(index)%dyn_block_index(1:off_size) + if (SIZE(blk_ind, 1) > off_size) then + blk_ind(off_size+1:) = -1 end if end subroutine get_dyn_col_p @@ -566,16 +574,10 @@ integer function global_index_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'global_index_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task - write(iulog, *) errmsg - call endrun(errmsg) - else - global_index_p = phys_columns(index)%global_col_num - end if + ! Check that input is valid: + call check_phys_input(index) + + global_index_p = phys_columns(index)%global_col_num end function global_index_p @@ -590,16 +592,10 @@ integer function local_index_p(index) character(len=128) :: errmsg character(len=*), parameter :: subname = 'local_index_p' - if (.not. phys_grid_initialized) then - call endrun(subname//': physics grid not initialized') - else if ((index < 1) .or. (index > columns_on_task)) then - write(errmsg, '(a,2(a,i0))') subname, ': index (', index, & - ') out of range (1 to ', columns_on_task - write(iulog, *) errmsg - call endrun(errmsg) - else - local_index_p = phys_columns(index)%phys_chunk_index - end if + ! Check that input is valid: + call check_phys_input(index) + + local_index_p = phys_columns(index)%phys_chunk_index end function local_index_p @@ -619,4 +615,26 @@ subroutine get_grid_dims(hdim1_d_out, hdim2_d_out) end subroutine get_grid_dims + !======================================================================== + + subroutine check_phys_input(subname, index_val) + use cam_abortutils, only: endrun + use string_utils, only: to_str + ! Check that the physics grid is initialized, and that the + ! user-provided index value is within an acceptable range. + ! If either check fails then end the model simulation. + + character(len=*), intent(in) :: subname !Calling subroutine name + integer, intent(in) :: index_val !User-specified index value + + ! Check if physics grid is initialized, + ! if so, then check that index value is within bounds: + if (.not. phys_grid_initialized) then + call endrun(subname//': physics grid not initialized') + else if ((index_val < 1) .or. (index_val > columns_on_task)) then + call endrun(subname//': index ('//to_str(index_val)//& + ') out of range (1 to '//& + to_str(columns_on_task)//')') + end if + end module physics_grid From a34d9ac3afee804c4ad1c2e37937510537b9d84b Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 4 Feb 2021 09:14:02 -0700 Subject: [PATCH 09/45] Make modifications necessary for model to compile with SE dycore. --- cime_config/buildlib | 14 + cime_config/cam_config.py | 3 + src/data/physconst.F90 | 708 +++++++++++++++++- src/data/registry.xml | 14 +- src/data/registry_v1_0.xsd | 14 +- src/dynamics/se/dp_coupling.F90 | 31 +- src/dynamics/se/dp_mapping.F90 | 5 +- src/dynamics/se/dycore/bndry_mod.F90 | 12 +- .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 2 +- src/dynamics/se/dycore/dimensions_mod.F90 | 58 +- src/dynamics/se/dycore/dof_mod.F90 | 4 +- src/dynamics/se/dycore/edge_mod.F90 | 2 +- .../se/dycore/fvm_consistent_se_cslam.F90 | 19 +- src/dynamics/se/dycore/interpolate_mod.F90 | 2 +- src/dynamics/se/dycore/parallel_mod.F90 | 9 +- src/dynamics/se/dycore/prim_advance_mod.F90 | 31 +- src/dynamics/se/dycore/prim_driver_mod.F90 | 15 +- src/dynamics/se/dycore/prim_init.F90 | 4 +- src/dynamics/se/dycore/prim_state_mod.F90 | 28 +- src/dynamics/se/dycore/reduction_mod.F90 | 4 +- src/dynamics/se/dycore/schedule_mod.F90 | 2 +- src/dynamics/se/dyn_comp.F90 | 22 +- src/dynamics/se/dyn_grid.F90 | 33 +- src/dynamics/se/native_mapping.F90 | 19 +- src/dynamics/se/stepon.F90 | 1 + src/dynamics/se/test_fvm_mapping.F90 | 2 +- src/dynamics/tests/inic_analytic_utils.F90 | 7 +- .../initial_conditions/ic_us_standard_atm.F90 | 8 +- src/dynamics/utils/hycoef.F90 | 279 +++---- src/physics/utils/physics_grid.F90 | 25 +- src/utils/datetime.F90 | 53 ++ src/utils/hycoef.F90 | 403 ---------- src/utils/std_atm_profile.F90 | 166 ++++ src/utils/string_utils.F90 | 48 ++ 34 files changed, 1309 insertions(+), 738 deletions(-) create mode 100644 src/utils/datetime.F90 delete mode 100644 src/utils/hycoef.F90 create mode 100644 src/utils/std_atm_profile.F90 diff --git a/cime_config/buildlib b/cime_config/buildlib index f44fd3a0..bfe6a23a 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -103,6 +103,20 @@ def _build_cam(): paths.append(path) # End if # End for + # Add main dynamics directory: + dyn_dir = os.path.join(atm_root, "src", "dynamics", dycore) + paths.append(dyn_dir) + # Add IC source directories if using a non-null dycore: + if dycore != "none": + paths.append(os.path.join(atm_root, "src", "dynamics", "tests")) + paths.append(os.path.join(atm_root, "src", "dynamics", "tests", + "initial_conditions")) + # Add any necessary dycore sub-directories: + for root, direcs, _ in os.walk(dyn_dir): + for direc in direcs: + dyn_subdir = os.path.join(root, direc) + if dyn_subdir not in paths: + paths.append(dyn_subdir) #If using the CMEPS/NUOPC coupler, then add additional path: if case.get_value("COMP_INTERFACE") == "nuopc": paths.append(os.path.join(__CIMEROOT, "src", "drivers", diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 11a3873f..1b81f42c 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -821,6 +821,9 @@ def __init__(self, case, case_log): csnp_desc = "Number of points on each edge of the elements in a cubed sphere grid." self.create_config("csnp", csnp_desc, csnp_val) + # Add number of points (NP) CPP definition: + self.add_cppdef("NP", csnp_val) + else: # Add number of latitudes in grid to configure object nlat_desc = "Number of unique latitude points in rectangular lat/lon" \ diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 9239fc3e..2d44900b 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -28,12 +28,11 @@ module physconst private save - public :: physconst_readnl - public :: physconst_init - public :: composition_init - public :: physconst_update - public :: physconst_calc_kappav - public :: get_cp + public :: physconst_readnl + public :: physconst_init + public :: physconst_update + public :: physconst_calc_kappav + public :: composition_init ! ! subroutines to compute thermodynamic quantities @@ -41,7 +40,23 @@ module physconst ! doi: 10.1029/2017MS001257 ! + public :: get_dp ! pressure level thickness from dry dp and dry mixing ratios + public :: get_pmid_from_dp ! full level pressure from dp (approximation depends on dycore) + public :: get_thermal_energy ! thermal energy quantity = dp*cp*T + public :: get_virtual_temp ! virtual temperature + public :: get_cp ! (generalized) heat capacity + public :: get_cp_dry ! (generalized) heat capacity for dry air + public :: get_gz_given_dp_Tv_Rdry ! geopotential (with dp,dry R and Tv as input) + public :: get_R_dry ! (generalized) dry air gas constant + public :: get_kappa_dry ! (generalized) dry kappa = R_dry/cp_dry + public :: get_dp_ref ! reference pressure layer thickness (include topography) public :: get_molecular_diff_coef ! molecular diffusion and thermal conductivity + public :: get_rho_dry ! dry densisty from temperature (temp) and pressure (dp_dry and tracer) + public :: get_exner ! Exner pressure + + public :: get_molecular_diff_coef_reference ! reference vertical profile of density, molecular diffusion & + ! and thermal conductivity + !> \section arg_table_physconst Argument Table !! \htmlinclude physconst.html @@ -175,7 +190,7 @@ module physconst real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_R(:) real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_mwi(:)!inverse molecular weights dry air real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_kv(:) !molecular diffusion - real(kind_phsy), allocatable, protected, public :: thermodynamic_active_species_kc(:) !thermal conductivity + real(kind_phys), allocatable, protected, public :: thermodynamic_active_species_kc(:) !thermal conductivity ! standard dry air (constant composition) real(kind_phys) :: mmro2, mmrn2 ! Mass mixing ratios of O2 and N2 @@ -224,7 +239,7 @@ subroutine physconst_readnl(nlfile) use shr_nl_mod, only: find_group_name => shr_nl_find_group_name use shr_flux_mod, only: shr_flux_adjust_constants ! use mpi, only: mpi_bcast !!XXgoldyXX: Why not? - use mpi, only: mpi_real8 + use mpi, only: mpi_real8, mpi_character use spmd_utils, only: masterproc, masterprocid, mpicom, npes use cam_logfile, only: iulog @@ -232,7 +247,7 @@ subroutine physconst_readnl(nlfile) character(len=*), intent(in) :: nlfile ! Local variables - integer :: unitn, ierr + integer :: unitn, ierr, i character(len=*), parameter :: subname = 'physconst_readnl' logical :: newg, newsday, newmwh2o, newcpwv logical :: newmwdry, newcpair, newrearth, newtmelt, newomega @@ -563,6 +578,8 @@ subroutine composition_init() if (ix<1) then write(iulog, *) subname//' dry air component not found: ', dry_air_species(dry_air_species_num) call endrun(subname // ':: dry air component not found') +!Un-comment once constituents are enabled -JN: +#if 0 else mw = 2.0_kind_phys*cnst_mw(ix) icnst = dry_air_species_num @@ -573,6 +590,7 @@ subroutine composition_init() thermodynamic_active_species_mwi(icnst) = 1.0_kind_phys/mw thermodynamic_active_species_kv(icnst) = 3.42_kind_phys thermodynamic_active_species_kc(icnst) = 56._kind_phys +#endif end if ! ! if last major species is not N2 then add code here @@ -610,6 +628,8 @@ subroutine composition_init() if (ix<1) then write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) call endrun(subname // ':: dry air component not found') +!Un-comment once constituents are enabled -JN: +#if 0 else mw = cnst_mw(ix) thermodynamic_active_species_idx(icnst) = ix @@ -620,6 +640,7 @@ subroutine composition_init() thermodynamic_active_species_kv(icnst) = 3.9_kind_phys thermodynamic_active_species_kc(icnst) = 75.9_kind_phys icnst = icnst+1 +#endif end if ! ! O2 @@ -630,6 +651,8 @@ subroutine composition_init() if (ix<1) then write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) call endrun(subname // ':: dry air component not found') +!Un-comment once constituents are enabled -JN: +#if 0 else mw = cnst_mw(ix) thermodynamic_active_species_idx(icnst) = ix @@ -640,6 +663,7 @@ subroutine composition_init() thermodynamic_active_species_kv(icnst) = 4.03_kind_phys thermodynamic_active_species_kc(icnst) = 56._kind_phys icnst = icnst+1 +#endif end if ! ! H @@ -650,6 +674,8 @@ subroutine composition_init() if (ix<1) then write(iulog, *) subname//' dry air component not found: ', dry_air_species(i) call endrun(subname // ':: dry air component not found') +!Un-comment once constituents are enabled -JN: +#if 0 else mw = cnst_mw(ix) thermodynamic_active_species_idx(icnst) = ix @@ -660,6 +686,7 @@ subroutine composition_init() thermodynamic_active_species_kv(icnst) = 0.0_kind_phys thermodynamic_active_species_kc(icnst) = 0.0_kind_phys icnst = icnst+1 +#endif end if ! ! If support for more major species is to be included add code here @@ -784,8 +811,8 @@ subroutine composition_init() if (ix<1) then write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i) call endrun(subname // ':: moist air component not found') + else - mw = cnst_mw(ix) thermodynamic_active_species_idx(icnst) = ix thermodynamic_active_species_cp (icnst) = cpice thermodynamic_active_species_cv (icnst) = cpice @@ -981,7 +1008,245 @@ subroutine physconst_calc_kappav(i0,i1,j0,j1,k0,k1,ntotq,tracer,kappav,cpv) !!XXgoldyXX: ^ until we get constituents figured out in CCPP end subroutine physconst_calc_kappav + ! + !**************************************************************************************************************** + ! + ! Compute pressure level thickness from dry pressure and thermodynamic active species mixing ratios + ! + ! Tracer can either be in units of dry mixing ratio (mixing_ratio=1) or "mass" (=m*dp_dry) (mixing_ratio=2) + ! + !**************************************************************************************************************** + ! + subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp,ps,ptop) + !Given that this routine is only used by the dycore, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac ! array bounds + real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(r8), intent(out) :: dp(i0:i1,j0:j1,k0:k1) ! pressure level thickness + real(r8), optional,intent(out) :: ps(i0:i1,j0:j1) ! surface pressure (if ps present then ptop + ! must be present) + real(r8), optional,intent(in) :: ptop ! pressure at model top + + integer :: i,j,k,m_cnst,nq + + dp = dp_dry + if (mixing_ratio==1) then + do nq=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = active_species_idx(nq) + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + dp(i,j,k) = dp(i,j,k) + dp_dry(i,j,k)*tracer(i,j,k,m_cnst) + end do + end do + end do + end do + else + do nq=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = active_species_idx(nq) + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + dp(i,j,k) = dp(i,j,k) + tracer(i,j,k,m_cnst) + end do + end do + end do + end do + end if + if (present(ps)) then + if (present(ptop)) then + ps = ptop + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + ps(i,j) = ps(i,j)+dp(i,j,k) + end do + end do + end do + else + call endrun('get_dp: if ps is present ptop must be present') + end if + end if + end subroutine get_dp + ! + !************************************************************************************************************************* + ! + ! compute mid-level (full level) pressure from dry pressure and water tracers + ! + !************************************************************************************************************************* + ! + subroutine get_pmid_from_dpdry(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, & + dp_dry, ptop, pmid, pint, dp) + + !Given that this routine is only used by the dycore, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! array bounds + real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness + real(r8), intent(in) :: ptop ! model top pressure + real(r8), intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure + real(r8), optional, intent(out) :: pint(i0:i1,j0:j1,nlev+1) ! half-level pressure + real(r8), optional, intent(out) :: dp(i0:i1,j0:j1,nlev) ! presure level thickness + + real(r8) :: dp_local(i0:i1,j0:j1,nlev) ! local pressure level thickness + real(r8) :: pint_local(i0:i1,j0:j1,nlev+1) ! local interface pressure + integer :: k + + call get_dp(i0,i1,j0,j1,1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp_local) + pint_local(:,:,1) = ptop + do k=2,nlev+1 + pint_local(:,:,k) = dp_local(:,:,k-1)+pint_local(:,:,k-1) + end do + + call get_pmid_from_dp(i0,i1,j0,j1,1,nlev,dp_local,ptop,pmid,pint_local) + + if (present(pint)) pint=pint_local + if (present(dp)) dp=dp_local + end subroutine get_pmid_from_dpdry + ! + !************************************************************************************************************************* + ! + ! compute mid-level (full level) pressure + ! + !************************************************************************************************************************* + ! + subroutine get_pmid_from_dp(i0,i1,j0,j1,k0,k1,dp,ptop,pmid,pint) + !Given that this routine is only used by the dycore, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + use physics_types, only: dycore_gz_log_calc + + integer, intent(in) :: i0,i1,j0,j1,k0,k1 ! array bounds + real(r8), intent(in) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(r8), intent(in) :: ptop ! pressure at model top + real(r8), intent(out) :: pmid(i0:i1,j0:j1,k0:k1) ! mid (full) level pressure + real(r8), optional, intent(out) :: pint(i0:i1,j0:j1,k0:k1+1) ! pressure at interfaces (half levels) + + real(r8) :: pint_local(i0:i1,j0:j1,k0:k1+1) + integer :: k + + pint_local(:,:,k0) = ptop + do k=k0+1,k1+1 + pint_local(:,:,k) = dp(:,:,k-1)+pint_local(:,:,k-1) + end do + + if (dycore_gz_log_calc) then + do k=k0,k1 + pmid(:,:,k) = dp(:,:,k)/(log(pint_local(:,:,k+1))-log(pint_local(:,:,k))) + end do + else + do k=k0,k1 + pmid(:,:,k) = 0.5_r8*(pint_local(:,:,k)+pint_local(:,:,k+1)) + end do + end if + if (present(pint)) pint=pint_local + end subroutine get_pmid_from_dp + ! + !**************************************************************************************************************** + ! + ! Compute Exner pressure + ! + !**************************************************************************************************************** + ! + subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,& + dp_dry,ptop,p00,inv_exner,exner,poverp0) + + !Given that this routine is only used by the dycore, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! index bounds + real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness + real(r8), intent(in) :: ptop ! pressure at model top + real(r8), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa) + logical , intent(in) :: inv_exner ! logical for outputting inverse Exner or Exner pressure + real(r8), intent(out) :: exner(i0:i1,j0:j1,nlev) + real(r8), optional, intent(out) :: poverp0(i0:i1,j0:j1,nlev)! for efficiency when a routine needs this variable + + real(r8) :: pmid(i0:i1,j0:j1,nlev),kappa_dry(i0:i1,j0:j1,nlev) + ! + ! compute mid level pressure + ! + call get_pmid_from_dpdry(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,ptop,pmid) + ! + ! compute kappa = Rd/cpd + ! + if (mixing_ratio==1) then + call get_kappa_dry(i0,i1,j0,j1,1,nlev,nlev,ntrac,tracer,active_species_idx,kappa_dry) + else + call get_kappa_dry(i0,i1,j0,j1,1,nlev,nlev,ntrac,tracer,active_species_idx,kappa_dry,1.0_r8/dp_dry) + end if + if (inv_exner) then + exner(:,:,:) = (p00/pmid(:,:,:))**kappa_dry(:,:,:) + else + exner(:,:,:) = (pmid(:,:,:)/p00)**kappa_dry(:,:,:) + end if + if (present(poverp0)) poverp0=pmid(:,:,:)/p00 + end subroutine get_exner + ! + !**************************************************************************************************************** + ! + ! Compute geopotential from pressure level thickness and virtual temperature + ! + !**************************************************************************************************************** + ! + subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pmid) + !Given that this routine is only used by the dycore, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + use physics_types, only: dycore_gz_log_calc + + integer, intent(in) :: i0,i1,j0,j1,nlev ! array bounds + real(r8), intent(in) :: dp (i0:i1,j0:j1,nlev) ! pressure level thickness + real(r8), intent(in) :: T_v (i0:i1,j0:j1,nlev) ! virtual temperature + real(r8), intent(in) :: R_dry(i0:i1,j0:j1,nlev) ! R dry + real(r8), intent(in) :: phis (i0:i1,j0:j1) ! surface geopotential + real(r8), intent(in) :: ptop ! model top presure + real(r8), intent(out) :: gz(i0:i1,j0:j1,nlev) ! geopotential + real(r8), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure + + + real(r8), dimension(i0:i1,j0:j1,nlev) :: pmid_local + real(r8), dimension(i0:i1,j0:j1,nlev+1) :: pint + real(r8), dimension(i0:i1,j0:j1) :: gzh, Rdry_tv + integer :: k + + call get_pmid_from_dp(i0,i1,j0,j1,1,nlev,dp,ptop,pmid_local,pint) + + ! + ! integrate hydrostatic eqn + ! + gzh = phis + if (dycore_gz_log_calc) then + do k=nlev,1,-1 + Rdry_tv(:,:) = R_dry(:,:,k)*T_v(:,:,k) + gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*(1.0_r8-pint(:,:,k)/pmid_local(:,:,k)) + gzh(:,:) = gzh(:,:) + Rdry_tv(:,:)*(log(pint(:,:,k+1))-log(pint(:,:,k))) + end do + else + do k=nlev,1,-1 + Rdry_tv(:,:) = R_dry(:,:,k)*T_v(:,:,k) + gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*0.5_r8*dp(:,:,k)/pmid_local(:,:,k) + gzh(:,:) = gzh(:,:) + Rdry_tv(:,:)*dp(:,:,k)/pmid_local(:,:,k) + end do + end if + if (present(pmid)) pmid=pmid_local + end subroutine get_gz_given_dp_Tv_Rdry ! !************************************************************************************************************************* ! @@ -1117,6 +1382,47 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp end if end subroutine get_molecular_diff_coef ! + !************************************************************************************************************************* + ! + ! compute reference vertical profile of density, molecular diffusion and thermal conductivity + ! + !************************************************************************************************************************* + ! + subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvis_ref,kmcnd_ref,rho_ref) + + !Given that this routine is only used with the dycore structures, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + + ! args + integer, intent(in) :: k0,k1 !min/max vertical index + real(r8), intent(in) :: tref !reference temperature + real(r8), intent(in) :: press(k0:k1) !pressure + real(r8), intent(in) :: sponge_factor(k0:k1) !multiply kmvis and kmcnd with sponge_factor (for sponge layer) + real(r8), intent(out) :: kmvis_ref(k0:k1) !reference molecular diffusion coefficient + real(r8), intent(out) :: kmcnd_ref(k0:k1) !reference thermal conductivity coefficient + real(r8), intent(out) :: rho_ref(k0:k1) !reference density + + ! local vars + integer :: k + + !-------------------------------------------- + ! Set constants needed for updates + !-------------------------------------------- + + do k=k0,k1 + rho_ref(k) = press(k)/(tref*Rair) !ideal gas law for dry air + kmvis_ref(k) = sponge_factor(k)* & + (kv1*mmro2*o2_mwi + & + kv2*mmrn2*n2_mwi)*mbar* & + tref**kv4 * 1.e-7_r8 + kmcnd_ref(k) = sponge_factor(k)* & + (kc1*mmro2*o2_mwi + & + kc2*mmrn2*n2_mwi)*mbar* & + tref**kc4 * 1.e-5_r8 + end do + end subroutine get_molecular_diff_coef_reference + ! !**************************************************************************************************************** ! ! Compute dry air heaet capacity under constant pressure @@ -1170,6 +1476,186 @@ subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_spec end if end subroutine get_cp_dry ! + !**************************************************************************************************************** + ! + ! Compute generalized dry air gas constant R + ! + !**************************************************************************************************************** + ! + subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry,fact) + !Only called by the dycore, so "r8" kind is used: + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac !array boundas + real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac)!tracer array + integer, intent(in) :: active_species_idx_dycore(:) !index of active species in tracer + real(r8), intent(out) :: R_dry(i0:i1,j0:j1,k0:k1) !dry air R + real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio + + integer :: i,j,k,m_cnst,nq + real(r8):: factor(i0:i1,j0:j1,k0_trac:k1_trac), residual(i0:i1,j0:j1,k0:k1), mm + if (dry_air_species_num==0) then + ! + ! dry air not species dependent + ! + R_dry = rair + else + if (present(fact)) then + factor = fact(:,:,:) + else + factor = 1.0_r8 + endif + + R_dry = 0.0_r8 + residual = 1.0_r8 + do nq=1,dry_air_species_num-1 + m_cnst = active_species_idx_dycore(nq) + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + mm = tracer(i,j,k,m_cnst)*factor(i,j,k) + R_dry(i,j,k) = R_dry(i,j,k)+thermodynamic_active_species_R(nq)*mm + residual(i,j,k) = residual(i,j,k) - mm + end do + end do + end do + end do + ! + ! last dry air constituent derived from the others + ! + nq = dry_air_species_num + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + R_dry(i,j,k) = R_dry(i,j,k)+thermodynamic_active_species_R(nq)*residual(i,j,k) + end do + end do + end do + end if + end subroutine get_R_dry + ! + !**************************************************************************************************************** + ! + ! g*compute thermal energy = cp*T*dp, where dp is pressure level thickness, cp is generalized cp and T temperature + ! + ! Note:tracer is in units of m*dp_dry ("mass") + ! + !**************************************************************************************************************** + ! + subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy, & + active_species_idx_dycore) + + !Only called by the dycore, so "r8" kind is used: + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(r8), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,ntrac)!tracer array (mass weighted) + real(r8), intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature + real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry presure level thickness + real(r8), optional, intent(out):: thermal_energy(i0:i1,j0:j1,k0:k1) !thermal energy in each column: sum cp*T*dp + ! + ! array of indicies for index of thermodynamic active species in dycore tracer array + ! (if different from physics index) + ! + integer, optional, dimension(:) :: active_species_idx_dycore + + ! local vars + integer :: nq, itrac + integer, dimension(thermodynamic_active_species_num) :: idx_local + ! + ! some sanity checks + ! + if (present(active_species_idx_dycore)) then + idx_local = active_species_idx_dycore + else + idx_local = thermodynamic_active_species_idx + end if + ! + ! "mass-weighted" cp (dp must be dry) + ! + if (dry_air_species_num==0) then + thermal_energy(:,:,:) = thermodynamic_active_species_cp(0)*dp_dry(:,:,:) + else + call get_cp_dry(i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer_mass,idx_local,thermal_energy,fact=1.0_r8/dp_dry(:,:,:)) + thermal_energy(:,:,:) = thermal_energy(:,:,:)*dp_dry(:,:,:) + end if + ! + ! tracer is in units of m*dp ("mass"), where m is dry mixing ratio and dry pressure level thickness + ! + do nq=dry_air_species_num+1,thermodynamic_active_species_num + itrac = idx_local(nq) + thermal_energy(:,:,:) = thermal_energy(:,:,:)+thermodynamic_active_species_cp(nq)*tracer_mass(:,:,:,itrac) + end do + thermal_energy(:,:,:) = thermal_energy(:,:,:)*temp(:,:,:) + end subroutine get_thermal_energy + ! + !**************************************************************************************************************** + ! + ! Compute virtual temperature T_v + ! + ! tracer is in units of dry mixing ratio unless optional argument dp_dry is present in which case tracer is + ! in units of "mass" (=m*dp) + ! + ! If temperature is not supplied then just return factor that T needs to be multiplied by to get T_v + ! + !**************************************************************************************************************** + ! + subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q, & + active_species_idx_dycore) + + !Given that this routine is only used by the dycore, + !the "r8" kind is used instead of "kind_phys": + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_logfile, only: iulog + ! args + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntrac) !tracer array + real(r8), intent(out) :: T_v(i0:i1,j0:j1,k0:k1) !virtual temperature + real(r8), optional, intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature + real(r8), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry pressure level thickness + real(r8), optional,intent(out) :: sum_q(i0:i1,j0:j1,k0:k1) !sum tracer + ! + ! array of indicies for index of thermodynamic active species in dycore tracer array + ! (if different from physics index) + ! + integer, optional, intent(in) :: active_species_idx_dycore(:) + + ! local vars + integer :: itrac,nq + real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, factor, Rd + integer, dimension(thermodynamic_active_species_num) :: idx_local,idx + if (present(active_species_idx_dycore)) then + idx_local = active_species_idx_dycore + else + idx_local = thermodynamic_active_species_idx + end if + + if (present(dp_dry)) then + factor = 1.0_r8/dp_dry + else + factor = 1.0_r8 + end if + + sum_species = 1.0_r8 !all dry air species sum to 1 + do nq=dry_air_species_num+1,thermodynamic_active_species_num + itrac = idx_local(nq) + sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:) + end do + + call get_R_dry (i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer,idx_local,Rd,fact=factor) + t_v(:,:,:) = Rd(:,:,:) + do nq=dry_air_species_num+1,thermodynamic_active_species_num + itrac = idx_local(nq) + t_v(:,:,:) = t_v(:,:,:)+thermodynamic_active_species_R(nq)*tracer(:,:,:,itrac)*factor(:,:,:) + end do + if (present(temp)) then + t_v(:,:,:) = t_v(:,:,:)*temp(:,:,:)/(Rd(:,:,:)*sum_species) + else + t_v(:,:,:) = t_v(:,:,:)/(Rd(:,:,:)*sum_species) + end if + if (present(sum_q)) sum_q=sum_species + end subroutine get_virtual_temp + ! !************************************************************************************************************************* ! ! Compute generalized heat capacity at constant pressure @@ -1192,7 +1678,7 @@ subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species ! local vars integer :: nq,i,j,k, itrac - real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, sum_cp, factor + real(kind_phys), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, sum_cp, factor integer, dimension(thermodynamic_active_species_num) :: idx_local if (present(active_species_idx_dycore)) then @@ -1229,5 +1715,205 @@ subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species end if end subroutine get_cp + ! + !************************************************************************************************************************* + ! + ! compute reference pressure levels + ! + !************************************************************************************************************************* + ! + subroutine get_dp_ref(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) + !Only called by the dycore, so "r8" kind is used: + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,k0,k1 + real(r8), intent(in) :: hyai(k0:k1+1),hybi(k0:k1+1),ps0 + real(r8), intent(in) :: phis(i0:i1,j0:j1) + real(r8), intent(out) :: dp_ref(i0:i1,j0:j1,k0:k1) + real(r8), intent(out) :: ps_ref(i0:i1,j0:j1) + integer :: k + ! + ! use static reference pressure (hydrostatic balance incl. effect of topography) + ! + ps_ref(:,:) = ps0*exp(-phis(:,:)/(Rair*Tref)) + do k=k0,k1 + dp_ref(:,:,k) = ((hyai(k+1)-hyai(k))*ps0 + (hybi(k+1)-hybi(k))*ps_ref(:,:)) + end do + end subroutine get_dp_ref + ! + !************************************************************************************************************************* + ! + ! compute dry densisty from temperature (temp) and pressure (dp_dry and tracer) + ! + !************************************************************************************************************************* + ! + subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_mass,& + rho_dry, rhoi_dry,active_species_idx_dycore,pint_out,pmid_out) + + !Only called by the dycore, so "r8" kind is used: + use shr_kind_mod, only: r8=>shr_kind_r8 + + ! args + integer, intent(in) :: i0,i1,j0,j1,k1,ntrac,nlev + real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,ntrac) ! Tracer array + real(r8), intent(in) :: temp(i0:i1,j0:j1,1:nlev) ! Temperature + real(r8), intent(in) :: ptop + real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) + logical, intent(in) :: tracer_mass + real(r8), optional,intent(out) :: rho_dry(i0:i1,j0:j1,1:k1) + real(r8), optional,intent(out) :: rhoi_dry(i0:i1,j0:j1,1:k1+1) + ! + ! array of indicies for index of thermodynamic active species in dycore tracer array + ! (if different from physics index) + ! + integer, optional, intent(in) :: active_species_idx_dycore(:) + real(r8),optional,intent(out) :: pint_out(i0:i1,j0:j1,1:k1+1) + real(r8),optional,intent(out) :: pmid_out(i0:i1,j0:j1,1:k1) + + ! local vars + integer :: i,j,k + real(r8), dimension(i0:i1,j0:j1,1:k1) :: pmid + real(r8):: pint(i0:i1,j0:j1,1:k1+1) + real(r8), allocatable :: R_dry(:,:,:) + integer, dimension(thermodynamic_active_species_num):: idx_local + + if (present(active_species_idx_dycore)) then + idx_local = active_species_idx_dycore + else + idx_local = thermodynamic_active_species_idx + end if + ! + ! we assume that air is dry where molecular viscosity may be significant + ! + call get_pmid_from_dp(i0,i1,j0,j1,1,k1,dp_dry,ptop,pmid,pint=pint) + if (present(pint_out)) pint_out=pint + if (present(pint_out)) pmid_out=pmid + if (present(rhoi_dry)) then + allocate(R_dry(i0:i1,j0:j1,1:k1+1)) + if (tracer_mass) then + call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) + else + call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry) + end if + do k=2,k1+1 + rhoi_dry(i0:i1,j0:j1,k) = 0.5_r8*(temp(i0:i1,j0:j1,k)+temp(i0:i1,j0:j1,k-1))!could be more accurate! + rhoi_dry(i0:i1,j0:j1,k) = pint(i0:i1,j0:j1,k)/(rhoi_dry(i0:i1,j0:j1,k)*R_dry(i0:i1,j0:j1,k)) !ideal gas law for dry air + end do + ! + ! extrapolate top level value + ! + k=1 + rhoi_dry(i0:i1,j0:j1,k) = 1.5_r8*(temp(i0:i1,j0:j1,1)-0.5_r8*temp(i0:i1,j0:j1,2)) + rhoi_dry(i0:i1,j0:j1,k) = pint(i0:i1,j0:j1,1)/(rhoi_dry(i0:i1,j0:j1,k)*R_dry(i0:i1,j0:j1,k)) !ideal gas law for dry air + deallocate(R_dry) + end if + if (present(rho_dry)) then + allocate(R_dry(i0:i1,j0:j1,1:k1)) + if (tracer_mass) then + call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) + else + call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry) + end if + do k=1,k1 + do j=j0,j1 + do i=i0,i1 + rho_dry(i,j,k) = pmid(i,j,k)/(temp(i,j,k)*R_dry(i,j,k)) !ideal gas law for dry air + end do + end do + end do + end if + end subroutine get_rho_dry + ! + !************************************************************************************************************************* + ! + ! compute molecular weight dry air + ! + !************************************************************************************************************************* + ! + subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbarv,fact) + !Only called by the dycore, so "r8" kind is used: + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac, nlev + real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx(:) !index of active species in tracer + real(r8), intent(out) :: mbarv(i0:i1,j0:j1,k0:k1) !molecular weight of dry air + real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio + + integer :: i,j,k,m_cnst,nq + real(r8):: factor(i0:i1,j0:j1,k0:k1), residual(i0:i1,j0:j1,k0:k1), mm + ! + ! dry air not species dependent + ! + if (dry_air_species_num==0) then + mbarv = mwdry + else + if (present(fact)) then + factor = fact(:,:,:) + else + factor = 1.0_r8 + endif + + mbarv = 0.0_r8 + residual = 1.0_r8 + do nq=1,dry_air_species_num-1 + m_cnst = active_species_idx(nq) + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + mm = tracer(i,j,k,m_cnst)*factor(i,j,k) + mbarv(i,j,k) = mbarv(i,j,k)+thermodynamic_active_species_mwi(nq)*mm + residual(i,j,k) = residual(i,j,k) - mm + end do + end do + end do + end do + nq = dry_air_species_num + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + mbarv(i,j,k) = mbarv(i,j,k)+thermodynamic_active_species_mwi(nq)*residual(i,j,k) + end do + end do + end do + mbarv(i0:i1,j0:j1,k0:k1) = 1.0_r8/mbarv(i0:i1,j0:j1,k0:k1) + end if + end subroutine get_mbarv + ! + !************************************************************************************************************************* + ! + ! compute generalized kappa =Rdry/cpdry + ! + !************************************************************************************************************************* + ! + subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,kappa_dry,fact) + !Only called by the dycore, so "r8" kind is used: + use shr_kind_mod, only: r8=>shr_kind_r8 + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,nlev + real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx(:) !index of thermodynamic active tracers + real(r8), intent(out) :: kappa_dry(i0:i1,j0:j1,k0:k1) !kappa dry + real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio + ! + real(r8), allocatable, dimension(:,:,:) :: cp_dry,R_dry + ! + ! dry air not species dependent + if (dry_air_species_num==0) then + kappa_dry= rair/cpair + else + allocate(R_dry(i0:i1,j0:j1,k0:k1)) + allocate(cp_dry(i0:i1,j0:j1,k0:k1)) + if (present(fact)) then + call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry,fact=fact) + call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry,fact=fact) + else + call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry) + call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry) + end if + kappa_dry = R_dry/cp_dry + deallocate(R_dry,cp_dry) + end if + end subroutine get_kappa_dry end module physconst diff --git a/src/data/registry.xml b/src/data/registry.xml index ea665fa1..37d6dfed 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -270,21 +270,19 @@ flag indicating if vertical coordinate is lagrangian .false. + + .false. + .true. + - geopotential_at_surface air_temperature x_wind y_wind lagrangian_tendency_of_air_pressure dry_static_energy_content_of_atmosphere_layer - constituent_mixing_ratio diff --git a/src/data/registry_v1_0.xsd b/src/data/registry_v1_0.xsd index 6aebc7f5..7efb8c37 100644 --- a/src/data/registry_v1_0.xsd +++ b/src/data/registry_v1_0.xsd @@ -118,10 +118,18 @@ + + + + + + + + - + - + @@ -153,7 +161,7 @@ - + diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 7e2b592f..6ef65607 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -45,7 +45,7 @@ module dp_coupling CONTAINS !========================================================================================= -subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out) +subroutine d_p_coupling(phys_state, phys_tend, dyn_out) ! Convert the dynamics output state into the physics input state. ! Note that all pressures and tracer mixing ratios coming from the dycore are based on @@ -251,7 +251,7 @@ subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out) do m = 1, pcnst do ilyr = 1, pver - phys_state(lchnk)%q(icol, ilyr,m) = real(q_tmp(blk_ind(1), ilyr,m, ie), kind_phys) + phys_state%q(icol, ilyr,m) = real(q_tmp(blk_ind(1), ilyr,m, ie), kind_phys) end do end do end do @@ -419,7 +419,7 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) uv_tmp(blk_ind(1),2,ilyr,ie) = real(phys_tend%dvdt(icol,ilyr), r8) do m = 1, pcnst dq_tmp(blk_ind(1),ilyr,m,ie) = & - (real(phys_state(lchnk)%q(icol,ilyr,m), r8) - q_prev(icol,ilyr,m,lchnk)) + (real(phys_state%q(icol,ilyr,m), r8) - q_prev(icol,ilyr,m)) end do end do end do @@ -562,14 +562,15 @@ subroutine derived_phys_dry(phys_state, phys_tend) use physics_types, only: psdry, pint, lnpint, pintdry, lnpintdry use physics_types, only: pdel, rpdel, pdeldry, rpdeldry use physics_types, only: pmid, lnpmid, pmiddry, lnpmiddry - use physics_types, only: exner, zi, zm + use physics_types, only: exner, zi, zm, ps, lagrangian_vertical use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update use shr_const_mod, only: shr_const_rwv - use phys_control, only: waccmx_is - use geopotential_t, only: geopotential_t +! use phys_control, only: waccmx_is + use geopotential_t, only: geopotential_t_run ! use check_energy, only: check_energy_timestep_init use hycoef, only: hyai, ps0 use shr_vmath_mod, only: shr_vmath_log + use shr_kind_mod, only: shr_kind_cx ! use qneg_module, only: qneg3 use dyn_comp, only: ixo, ixo2, ixh, ixh2 @@ -583,6 +584,10 @@ subroutine derived_phys_dry(phys_state, phys_tend) integer :: m, i, k + !Needed for "geopotential_t" CCPP scheme: + integer :: errflg + character(len=shr_kind_cx) :: errmsg + !-------------------------------------------- ! Variables needed for WACCM-X !-------------------------------------------- @@ -690,7 +695,7 @@ subroutine derived_phys_dry(phys_state, phys_tend) phys_state%q(i,k,ix_qv) = factor_array(i,k)*phys_state%q(i,k,ix_qv) phys_state%q(i,k,ix_cld_liq) = factor_array(i,k)*phys_state%q(i,k,ix_cld_liq) phys_state%q(i,k,ix_rain) = factor_array(i,k)*phys_state%q(i,k,ix_rain) - end do + end do end do #endif @@ -753,9 +758,11 @@ subroutine derived_phys_dry(phys_state, phys_tend) ! phys_state%t , phys_state%q(:,:,ix_qv), rairv, gravit, zvirv , & ! phys_state%zi , phys_state%zm , ncol ) - call geopotential_t(lnpint, lnpmid, pint, pmid, pdel, rpdel, & - phys_state%t, phys_state%q(:,:,ix_qv), rairv, & - gravit, zvirv, zi, zm, pcols) + call geopotential_t_run(pver, lagrangian_vertical, pver, 1, & + pverp, 1, lnpint, pint, pmid, pdel, & + rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & + rairv, gravit, zvirv, zi, zm, pcols, & + errflg, errmsg) !NOTE: Should dry static energy be done in CCPP physics suite? -JN: @@ -763,7 +770,7 @@ subroutine derived_phys_dry(phys_state, phys_tend) do k = 1, pver do i = 1, pcols phys_state%s(i,k) = cpair*phys_state%t(i,k) & - + gravit*phys_state%zm(i,k) + phys_state%phis(i) + + gravit*zm(i,k) + phys_state%phis(i) end do end do @@ -800,7 +807,7 @@ subroutine thermodynamic_consistency(phys_state, phys_tend, ncols, pver) type(physics_tend ), intent(inout) :: phys_tend integer, intent(in) :: ncols, pver - real(r8):: inv_cp(ncols,pver) + real(kind_phys) :: inv_cp(ncols,pver) !---------------------------------------------------------------------------- if (lcp_moist.and.phys_dyn_cp==1) then diff --git a/src/dynamics/se/dp_mapping.F90 b/src/dynamics/se/dp_mapping.F90 index 41eb948c..96fedd96 100644 --- a/src/dynamics/se/dp_mapping.F90 +++ b/src/dynamics/se/dp_mapping.F90 @@ -117,7 +117,8 @@ end subroutine dp_reorder subroutine dp_allocate(elem) use spmd_utils, only: masterproc, masterprocid, npes - use spmd_utils, only: mpicom, mpi_integer + use spmd_utils, only: mpicom + use mpi, only: mpi_integer !SE dycore: use dimensions_mod, only: nelem, nelemd @@ -176,13 +177,13 @@ end subroutine dp_deallocate !!! subroutine dp_write(elem, fvm, grid_format, filename_in) + use mpi, only: mpi_integer, mpi_real8 use cam_abortutils, only: endrun use netcdf, only: nf90_create, nf90_close, nf90_enddef use netcdf, only: nf90_def_dim, nf90_def_var, nf90_put_var use netcdf, only: nf90_double, nf90_int, nf90_put_att use netcdf, only: nf90_noerr, nf90_strerror, nf90_clobber use spmd_utils, only: masterproc, masterprocid, mpicom, npes - use spmd_utils, only: mpi_integer, mpi_real8 use cam_logfile, only: iulog use shr_sys_mod, only: shr_sys_flush diff --git a/src/dynamics/se/dycore/bndry_mod.F90 b/src/dynamics/se/dycore/bndry_mod.F90 index 84dc8a45..c0079489 100644 --- a/src/dynamics/se/dycore/bndry_mod.F90 +++ b/src/dynamics/se/dycore/bndry_mod.F90 @@ -47,9 +47,9 @@ subroutine bndry_exchange_a2a(par,nthreads,ithr,buffer,location) use schedtype_mod, only: schedule_t, cycle_t, schedule use thread_mod, only: omp_in_parallel, omp_get_thread_num use perf_mod, only: t_startf, t_stopf - use spmd_utils, only: mpi_real8, mpi_success use parallel_mod, only: parallel_t use perf_mod, only: t_startf, t_stopf + use mpi, only: mpi_real8, mpi_success type (parallel_t) :: par integer, intent(in) :: nthreads @@ -156,9 +156,9 @@ subroutine bndry_exchange_a2ao(par,nthreads,ithr,buffer,location) use schedtype_mod, only : schedule_t, cycle_t, schedule use thread_mod, only : omp_in_parallel, omp_get_thread_num use perf_mod, only : t_startf, t_stopf - use spmd_utils, only: mpi_real8, mpi_success, mpi_status_size use parallel_mod, only: parallel_t use perf_mod, only : t_startf, t_stopf + use mpi, only: mpi_real8, mpi_success, mpi_status_size type (parallel_t) :: par integer, intent(in) :: nthreads @@ -236,9 +236,9 @@ subroutine bndry_exchange_p2p(par,nthreads,ithr,buffer,location) use edgetype_mod, only: Edgebuffer_t use schedtype_mod, only: schedule_t, cycle_t, schedule use thread_mod, only: omp_in_parallel, omp_get_thread_num - use spmd_utils, only: mpi_real8, mpi_success use parallel_mod, only: parallel_t use perf_mod, only: t_startf, t_stopf + use mpi, only: mpi_real8, mpi_success type (parallel_t) :: par integer, intent(in) :: nthreads @@ -334,8 +334,8 @@ subroutine bndry_exchange_p2p_start(par,nthreads,ithr,buffer,location) use edgetype_mod, only: Edgebuffer_t use schedtype_mod, only: schedule_t, cycle_t, schedule use thread_mod, only: omp_in_parallel, omp_get_thread_num - use spmd_utils, only: mpi_real8, mpi_success use parallel_mod, only: parallel_t + use mpi, only: mpi_real8, mpi_success type (parallel_t) :: par integer, intent(in) :: nthreads @@ -463,7 +463,7 @@ subroutine long_bndry_exchange_nonth(par,buffer) use schedtype_mod, only: schedule_t, cycle_t, schedule use thread_mod, only: omp_in_parallel use parallel_mod, only: parallel_t, status, srequest, rrequest - use spmd_utils, only: mpi_integer, mpi_success + use mpi, only: mpi_integer, mpi_success type (parallel_t) :: par type (LongEdgeBuffer_t) :: buffer @@ -855,7 +855,7 @@ subroutine ghost_exchangeVfull(par,ithr,buffer) use schedtype_mod, only : schedule_t, cycle_t, schedule use dimensions_mod, only: nelemd use parallel_mod, only : status, srequest, rrequest, parallel_t - use spmd_utils, only: mpi_integer, mpi_success,mpi_real8 + use mpi, only: mpi_integer, mpi_success,mpi_real8 implicit none type (parallel_t) :: par diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 index 6c2760b6..c16e2410 100644 --- a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -8,7 +8,7 @@ module comp_gll_ctr_vol use shr_sys_mod, only: shr_sys_flush use global_norms_mod, only: wrap_repro_sum use physconst, only: pi - use infnan, only: isnan + use shr_infnan_mod, only: isnan=>shr_infnan_isnan use coordinate_systems_mod, only: cartesian3d_t, cartesian2d_t use coordinate_systems_mod, only: spherical_polar_t, change_coordinates diff --git a/src/dynamics/se/dycore/dimensions_mod.F90 b/src/dynamics/se/dycore/dimensions_mod.F90 index a012c761..046fa2f8 100644 --- a/src/dynamics/se/dycore/dimensions_mod.F90 +++ b/src/dynamics/se/dycore/dimensions_mod.F90 @@ -1,21 +1,23 @@ module dimensions_mod use shr_kind_mod, only: r8=>shr_kind_r8 -#ifdef FVM_TRACERS - use constituents, only: ntrac_d=>pcnst ! _EXTERNAL -#else - use constituents, only: qsize_d=>pcnst ! _EXTERNAL -#endif +!Un-comment or modify once a formal plan for pcnst has been developed -JN: +!#ifdef FVM_TRACERS +! use constituents, only: ntrac_d=>pcnst ! _EXTERNAL +!#else +! use constituents, only: qsize_d=>pcnst ! _EXTERNAL +!#endif implicit none private -! set MAX number of tracers. actual number of tracers is a run time argument +! set MAX number of tracers. actual number of tracers is a run time argument #ifdef FVM_TRACERS - integer, parameter :: qsize_d =10 ! SE tracers (currently SE supports 10 condensate loading tracers) + integer, parameter :: qsize_d = 10 ! SE tracers (currently SE supports 10 condensate loading tracers) + integer, parameter :: ntrac_d = 3 ! Needed until pcnst is resolved. -JN #else - integer, parameter :: ntrac_d = 0 ! No fvm tracers if CSLAM is off + integer, parameter :: ntrac_d = 0 ! No fvm tracers if CSLAM is off + integer, parameter :: qsize_d = 3 ! Needed until pcnst is resolved. -JN #endif - ! ! The variables below hold indices of water vapor and condensate loading tracers as well as ! associated heat capacities (initialized in dyn_init): @@ -37,8 +39,8 @@ module dimensions_mod ! .false.: force dycore to use cpd (cp dry) instead of moist cp ! .true. : use moist cp in dycore ! - logical , public :: lcp_moist = .true. - + logical , public :: lcp_moist = .true. + integer, parameter, public :: np = NP integer, parameter, public :: nc = 3 !cslam resolution integer , public :: fv_nphys !physics-grid resolution - the "MAX" is so that the code compiles with NC=0 @@ -48,8 +50,8 @@ module dimensions_mod ! ! hyperviscosity is applied on approximate pressure levels ! Similar to CAM-EUL; see CAM5 scietific documentation (Note TN-486), equation (3.09), page 58. - ! - logical, public :: hypervis_dynamic_ref_state = .false. + ! + logical, public :: hypervis_dynamic_ref_state = .false. ! fvm dimensions: logical, public :: lprint!for debugging integer, parameter, public :: ngpc=3 !number of Gausspoints for the fvm integral approximation !phl change from 4 @@ -67,41 +69,41 @@ module dimensions_mod logical, public :: large_Courant_incr integer, public :: kmin_jet,kmax_jet !min and max level index for the jet - integer, public :: fvm_supercycling + integer, public :: fvm_supercycling integer, public :: fvm_supercycling_jet integer, allocatable, public :: kord_tr(:), kord_tr_cslam(:) - + real(r8), public :: nu_scale_top(PLEV)! scaling of del2 viscosity in sopnge layer (initialized in dyn_comp) - real(r8), public :: nu_lev(PLEV) + real(r8), public :: nu_lev(PLEV) real(r8), public :: otau(PLEV) integer, public :: ksponge_end ! sponge is active k=1,ksponge_end real(r8), public :: nu_div_lev(PLEV) = 1.0_r8 ! scaling of viscosity in sponge layer ! (set in prim_state; if applicable) - real(r8), public :: kmvis_ref(PLEV) !reference profiles for molecular diffusion - real(r8), public :: kmcnd_ref(PLEV) !reference profiles for molecular diffusion + real(r8), public :: kmvis_ref(PLEV) !reference profiles for molecular diffusion + real(r8), public :: kmcnd_ref(PLEV) !reference profiles for molecular diffusion real(r8), public :: rho_ref(PLEV) !reference profiles for rho real(r8), public :: km_sponge_factor(PLEV) !scaling for molecular diffusion (when used as sponge) - real(r8), public :: kmvisi_ref(PLEV+1) !reference profiles for molecular diffusion - real(r8), public :: kmcndi_ref(PLEV+1) !reference profiles for molecular diffusion + real(r8), public :: kmvisi_ref(PLEV+1) !reference profiles for molecular diffusion + real(r8), public :: kmcndi_ref(PLEV+1) !reference profiles for molecular diffusion real(r8), public :: rhoi_ref(PLEV+1) !reference profiles for rho - integer, public :: nhc_phys - integer, public :: nhe_phys - integer, public :: nhr_phys - integer, public :: ns_phys + integer, public :: nhc_phys + integer, public :: nhe_phys + integer, public :: nhr_phys + integer, public :: ns_phys - integer, public :: npdg = 0 ! dg degree for hybrid cg/dg element 0=disabled + integer, public :: npdg = 0 ! dg degree for hybrid cg/dg element 0=disabled integer, parameter, public :: npsq = np*np integer, parameter, public :: nlev=PLEV integer, parameter, public :: nlevp=nlev+1 -! params for a mesh +! params for a mesh ! integer, public, parameter :: max_elements_attached_to_node = 7 -! integer, public, parameter :: s_nv = 2*max_elements_attached_to_node +! integer, public, parameter :: s_nv = 2*max_elements_attached_to_node !default for non-refined mesh (note that these are *not* parameters now) integer, public :: max_elements_attached_to_node = 4 @@ -127,7 +129,7 @@ subroutine set_mesh_dimensions() ! new "params" max_elements_attached_to_node = 7 ! variable resolution - s_nv = 2*max_elements_attached_to_node + s_nv = 2*max_elements_attached_to_node !recalculate these max_corner_elem = max_elements_attached_to_node-3 diff --git a/src/dynamics/se/dycore/dof_mod.F90 b/src/dynamics/se/dycore/dof_mod.F90 index 4b33c278..c6c97741 100644 --- a/src/dynamics/se/dycore/dof_mod.F90 +++ b/src/dynamics/se/dycore/dof_mod.F90 @@ -1,9 +1,9 @@ module dof_mod use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 + use mpi, only: mpi_integer use dimensions_mod, only: np, npsq, nelem, nelemd use quadrature_mod, only: quadrature_t use element_mod, only: element_t,index_t - use spmd_utils, only: mpi_integer use parallel_mod, only: parallel_t use edge_mod, only: initedgebuffer,freeedgebuffer, & longedgevpack, longedgevunpackmin @@ -270,7 +270,7 @@ subroutine putUniquePoints4D(idxUnique,d3,d4,src,dest) end subroutine putUniquePoints4D subroutine SetElemOffset(par,elem,GlobalUniqueColsP) - use spmd_utils, only : mpi_sum + use mpi, only: mpi_sum type (parallel_t) :: par type (element_t) :: elem(:) diff --git a/src/dynamics/se/dycore/edge_mod.F90 b/src/dynamics/se/dycore/edge_mod.F90 index 7fa1e146..c939ce5b 100644 --- a/src/dynamics/se/dycore/edge_mod.F90 +++ b/src/dynamics/se/dycore/edge_mod.F90 @@ -15,7 +15,7 @@ module edge_mod Longedgebuffer_t, initedgebuffer_callid, Ghostbuffer3D_t use element_mod, only: element_t use gbarrier_mod, only: gbarrier_init, gbarrier_delete - use spmd_utils, only: mpi_real8, mpi_integer, mpi_info_null, mpi_success + use mpi, only: mpi_real8, mpi_integer, mpi_info_null, mpi_success implicit none private diff --git a/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 b/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 index ede1f440..20391710 100644 --- a/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 +++ b/src/dynamics/se/dycore/fvm_consistent_se_cslam.F90 @@ -10,7 +10,7 @@ module fvm_consistent_se_cslam use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct use hybrid_mod, only: hybrid_t, config_thread_region, get_loop_ranges, threadOwnsVertLevel - use perf_mod, only: t_startf, t_stopf + use perf_mod, only: t_startf, t_stopf implicit none private save @@ -36,10 +36,11 @@ subroutine run_consistent_se_cslam(elem,fvm,hybrid,dt_fvm,tl,nets,nete,hvcoord,& use fvm_reconstruction_mod, only: reconstruction use fvm_analytic_mod , only: gauss_points use edge_mod , only: ghostpack, ghostunpack - use edgetype_mod , only: edgebuffer_t + use edgetype_mod , only: edgebuffer_t use bndry_mod , only: ghost_exchange use hybvcoord_mod , only: hvcoord_t - use constituents , only: qmin +!Un-comment once constituents are enabled -JN: +! use constituents , only: qmin use dimensions_mod , only: large_Courant_incr,irecons_tracer_lev use thread_mod , only: vert_num_threads, omp_set_nested implicit none @@ -199,7 +200,7 @@ subroutine run_consistent_se_cslam(elem,fvm,hybrid,dt_fvm,tl,nets,nete,hvcoord,& call fill_halo_fvm(ghostbufQ1,elem,fvm,hybridnew,nets,nete,1,kmin_jet_local,kmax_jet_local,klev,active=ActiveJetThread) !call t_stopf('fvm:fill_halo_fvm:large_Courant') !call t_startf('fvm:large_Courant_number_increment') - if(ActiveJetThread) then + if(ActiveJetThread) then do k=kmin_jet_local,kmax_jet_local !1,nlev do ie=nets,nete call large_courant_number_increment(fvm(ie),k) @@ -220,14 +221,16 @@ subroutine run_consistent_se_cslam(elem,fvm,hybrid,dt_fvm,tl,nets,nete,hvcoord,& inv_dp_area(i,j) = 1.0_r8/fvm(ie)%dp_fvm(i,j,k) end do end do - + do itr=1,ntrac do j=1,nc do i=1,nc ! convert to mixing ratio fvm(ie)%c(i,j,k,itr) = fvm(ie)%c(i,j,k,itr)*inv_dp_area(i,j) ! remove round-off undershoots - fvm(ie)%c(i,j,k,itr) = MAX(fvm(ie)%c(i,j,k,itr),qmin(itr)) + !fvm(ie)%c(i,j,k,itr) = MAX(fvm(ie)%c(i,j,k,itr),qmin(itr)) +!Remove once constituents are enabled and ucomment above line -JN: + fvm(ie)%c(i,j,k,itr) = MAX(fvm(ie)%c(i,j,k,itr), 0._r8) end do end do end do @@ -246,7 +249,7 @@ subroutine run_consistent_se_cslam(elem,fvm,hybrid,dt_fvm,tl,nets,nete,hvcoord,& end do end do !call t_stopf('fvm:end_of_reconstruct_subroutine') - !$OMP END PARALLEL + !$OMP END PARALLEL call omp_set_nested(.false.) end subroutine run_consistent_se_cslam @@ -280,7 +283,7 @@ subroutine swept_flux(elem,fvm,ilev,ctracer,irecons_tracer_actual,gsweights,gspt REAL(KIND=r8), dimension(num_area) :: dp_area real (kind=r8) :: dp(1-nhc:nc+nhc,1-nhc:nc+nhc) - + logical :: tl1,tl2,tr1,tr2 integer, dimension(4), parameter :: imin_side = (/1 ,0 ,1 ,1 /) diff --git a/src/dynamics/se/dycore/interpolate_mod.F90 b/src/dynamics/se/dycore/interpolate_mod.F90 index 10716fb3..c6b1ce0b 100644 --- a/src/dynamics/se/dycore/interpolate_mod.F90 +++ b/src/dynamics/se/dycore/interpolate_mod.F90 @@ -1,5 +1,6 @@ module interpolate_mod use shr_kind_mod, only: r8=>shr_kind_r8 + use mpi, only: MPI_MAX, MPI_SUM, MPI_MIN, mpi_real8, MPI_integer use element_mod, only: element_t use dimensions_mod, only: np, ne, nelemd, nc, nhe, nhc use quadrature_mod, only: quadrature_t, legendre, quad_norm @@ -10,7 +11,6 @@ module interpolate_mod use quadrature_mod, only: quadrature_t, gauss, gausslobatto use parallel_mod, only: syncmp, parallel_t use cam_abortutils, only: endrun - use spmd_utils, only: MPI_MAX, MPI_SUM, MPI_MIN, mpi_real8, MPI_integer use cube_mod, only: convert_gbl_index, dmap, ref2sphere use mesh_mod, only: MeshUseMeshFile use control_mod, only: cubed_sphere_map diff --git a/src/dynamics/se/dycore/parallel_mod.F90 b/src/dynamics/se/dycore/parallel_mod.F90 index f7dc0fa7..88b96388 100644 --- a/src/dynamics/se/dycore/parallel_mod.F90 +++ b/src/dynamics/se/dycore/parallel_mod.F90 @@ -4,7 +4,7 @@ module parallel_mod ! --------------------------- use dimensions_mod, only : nmpi_per_node, nlev, qsize_d, ntrac_d ! --------------------------- - use spmd_utils, only: MPI_STATUS_SIZE, MPI_MAX_ERROR_STRING, MPI_TAG_UB + use mpi, only: MPI_STATUS_SIZE, MPI_MAX_ERROR_STRING, MPI_TAG_UB implicit none private @@ -110,8 +110,9 @@ end subroutine copy_par function initmpi(npes_homme) result(par) use cam_logfile, only: iulog use cam_abortutils, only: endrun - use spmd_utils, only: mpicom, MPI_COMM_NULL, MPI_MAX_PROCESSOR_NAME - use spmd_utils, only: MPI_CHARACTER, MPI_INTEGER, MPI_BAND, iam, npes + use spmd_utils, only: mpicom, iam, npes + use mpi, only: MPI_COMM_NULL, MPI_MAX_PROCESSOR_NAME + use mpi, only: MPI_CHARACTER, MPI_INTEGER, MPI_BAND integer, intent(in) :: npes_homme @@ -227,7 +228,7 @@ end function initmpi ! ===================================== subroutine syncmp(par) use cam_abortutils, only: endrun - use spmd_utils, only: MPI_MAX_ERROR_STRING, MPI_ERROR + use mpi, only: MPI_MAX_ERROR_STRING, MPI_ERROR type (parallel_t), intent(in) :: par diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 index ca9c1253..2812905d 100644 --- a/src/dynamics/se/dycore/prim_advance_mod.F90 +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -469,7 +469,8 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, use fvm_control_volume_mod, only: fvm_struct use physconst, only: thermodynamic_active_species_idx_dycore use physconst, only: get_molecular_diff_coef,get_rho_dry - use cam_history, only: outfld, hist_fld_active +!Un-comment once history output has been resolved in CAMDEN -JN: +! use cam_history, only: outfld, hist_fld_active type (hybrid_t) , intent(in) :: hybrid type (element_t) , intent(inout), target :: elem(:) @@ -834,6 +835,8 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, ! ! diagnostics ! +!Un-comment once history outputs are enabled -JN: +#if 0 if (hist_fld_active('nu_kmvis')) then do ie=nets,nete tmp_kmvis = 0.0_r8 @@ -861,7 +864,7 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, call outfld('nu_kmcnd_dp',RESHAPE(tmp_kmcnd(:,:,:), (/npsq,nlev/)), npsq, ie) end do end if - +#endif ! ! scale by reference value ! @@ -1535,8 +1538,9 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf use dimensions_mod, only: npsq,nlev,np,lcp_moist,nc,ntrac,qsize use physconst, only: gravit, cpair, rearth,omega use element_mod, only: element_t - use cam_history, only: outfld, hist_fld_active - use constituents, only: cnst_get_ind +!Un-comment once constituents and history outputs are enabled -JN: +! use cam_history, only: outfld, hist_fld_active +! use constituents, only: cnst_get_ind use string_utils, only: strlist_get_ind use hycoef, only: hyai, ps0 use fvm_control_volume_mod, only: fvm_struct @@ -1577,6 +1581,9 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf !----------------------------------------------------------------------- +!Un-comment once history outputs are enabled -JN: +#if 0 + name_out1 = 'SE_' //trim(outfld_name_suffix) name_out2 = 'KE_' //trim(outfld_name_suffix) name_out3 = 'WV_' //trim(outfld_name_suffix) @@ -1713,13 +1720,15 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf end do end if +#endif end subroutine calc_tot_energy_dynamics subroutine output_qdp_var_dynamics(qdp,nx,num_trac,nets,nete,outfld_name) use dimensions_mod, only: nlev,ntrac - use cam_history , only: outfld, hist_fld_active - use constituents , only: cnst_get_ind +!Un-comment once constituents and history outputs are enabled -JN: +! use cam_history , only: outfld, hist_fld_active +! use constituents , only: cnst_get_ind !------------------------------Arguments-------------------------------- integer ,intent(in) :: nx,num_trac,nets,nete @@ -1733,6 +1742,9 @@ subroutine output_qdp_var_dynamics(qdp,nx,num_trac,nets,nete,outfld_name) character(len=16) :: name_out1,name_out2,name_out3,name_out4 !----------------------------------------------------------------------- +!Un-comment once history outputs are enabled -JN: +#if 0 + name_out1 = 'WV_' //trim(outfld_name) name_out2 = 'WI_' //trim(outfld_name) name_out3 = 'WL_' //trim(outfld_name) @@ -1752,6 +1764,7 @@ subroutine output_qdp_var_dynamics(qdp,nx,num_trac,nets,nete,outfld_name) if (ixtt>0 ) call util_function(qdp(:,:,:,ixtt ,ie),nx,nlev,name_out4,ie) end do end if +#endif end subroutine output_qdp_var_dynamics ! @@ -1759,13 +1772,16 @@ end subroutine output_qdp_var_dynamics ! subroutine util_function(f_in,nx,nz,name_out,ie) use physconst, only: gravit - use cam_history, only: outfld, hist_fld_active +!Un-comment once history outputs are enabled -JN: +! use cam_history, only: outfld, hist_fld_active integer, intent(in) :: nx,nz,ie real(kind=r8), intent(in) :: f_in(nx,nx,nz) character(len=16), intent(in) :: name_out real(kind=r8) :: f_out(nx*nx) integer :: i,j,k real(kind=r8) :: inv_g +!Un-comment once history outputs are enabled -JN: +#if 0 if (hist_fld_active(name_out)) then f_out = 0.0_r8 inv_g = 1.0_r8/gravit @@ -1779,6 +1795,7 @@ subroutine util_function(f_in,nx,nz,name_out,ie) f_out = f_out*inv_g call outfld(name_out,f_out,nx*nx,ie) end if +#endif end subroutine util_function subroutine compute_omega(hybrid,n0,qn0,elem,deriv,nets,nete,dt,hvcoord) diff --git a/src/dynamics/se/dycore/prim_driver_mod.F90 b/src/dynamics/se/dycore/prim_driver_mod.F90 index ffc010d1..0e47b8a2 100644 --- a/src/dynamics/se/dycore/prim_driver_mod.F90 +++ b/src/dynamics/se/dycore/prim_driver_mod.F90 @@ -564,7 +564,7 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) ! call Prim_Advec_Tracers_fvm(elem,fvm,hvcoord,hybrid,& dt_q,tl,nets,nete,ghostBufQnhcJet_h,ghostBufQ1_h, ghostBufFluxJet_h,kmin_jet,kmax_jet) - end if + end if #ifdef waccm_debug do ie=nets,nete @@ -594,7 +594,9 @@ subroutine prim_set_dry_mass(elem, hvcoord,initial_global_ave_dry_ps,q) use element_mod, only: element_t use hybvcoord_mod , only: hvcoord_t use dimensions_mod, only: nelemd, nlev, np - use constituents, only: cnst_type, qmin, pcnst +!Un-comment once constitutents are enabled -JN: +! use constituents, only: cnst_type, qmin, pcnst + use constituents, only: pcnst use cam_logfile, only: iulog use spmd_utils, only: masterproc @@ -628,16 +630,19 @@ subroutine prim_set_dry_mass(elem, hvcoord,initial_global_ave_dry_ps,q) ! and conserve mixing ratio (not mass) of 'dry' tracers ! do m_cnst=1,pcnst - if (cnst_type(m_cnst).ne.'dry') then +!Un-comment once constitutents are enabled -JN: +! if (cnst_type(m_cnst).ne.'dry') then do k=1,nlev do j = 1,np do i = 1,np q(i,j,k,ie,m_cnst) = q(i,j,k,ie,m_cnst)*factor(i,j,k) - q(i,j,k,ie,m_cnst) = max(qmin(m_cnst),q(i,j,k,ie,m_cnst)) +!Un-comment once constitutents are enabled -JN: +! q(i,j,k,ie,m_cnst) = max(qmin(m_cnst),q(i,j,k,ie,m_cnst)) + q(i,j,k,ie,m_cnst) = max(0._r8,q(i,j,k,ie,m_cnst)) end do end do end do - end if +! end if end do end do if (masterproc) then diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index afbd9486..450bbafb 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -21,6 +21,8 @@ module prim_init subroutine prim_init1(elem, fvm, par, Tl) use cam_logfile, only: iulog use shr_sys_mod, only: shr_sys_flush + use shr_infnan_mod, only: nan=>shr_infnan_nan, assignment(=) + use mpi, only: mpi_integer, mpi_max use thread_mod, only: max_num_threads use dimensions_mod, only: np, nlev, nelem, nelemd, nelemdmax use dimensions_mod, only: GlobalUniqueCols, fv_nphys,irecons_tracer @@ -44,7 +46,6 @@ subroutine prim_init1(elem, fvm, par, Tl) use schedule_mod, only: genEdgeSched use prim_advection_mod, only: prim_advec_init1 use cam_abortutils, only: endrun - use spmd_utils, only: mpi_integer, mpi_max use parallel_mod, only: parallel_t, syncmp, global_shared_buf, nrepro_vars use spacecurve_mod, only: genspacepart use dof_mod, only: global_dof, CreateUniqueIndex, SetElemOffset @@ -52,7 +53,6 @@ subroutine prim_init1(elem, fvm, par, Tl) use physconst, only: pi use reduction_mod, only: red_min, red_max, red_max_int, red_flops use reduction_mod, only: red_sum, red_sum_int, initreductionbuffer - use infnan, only: nan, assignment(=) use shr_reprosum_mod, only: repro_sum => shr_reprosum_calc use fvm_analytic_mod, only: compute_basic_coordinate_vars use fvm_control_volume_mod, only: fvm_struct, allocate_physgrid_vars diff --git a/src/dynamics/se/dycore/prim_state_mod.F90 b/src/dynamics/se/dycore/prim_state_mod.F90 index 4c845ba0..e09664fb 100644 --- a/src/dynamics/se/dycore/prim_state_mod.F90 +++ b/src/dynamics/se/dycore/prim_state_mod.F90 @@ -20,7 +20,9 @@ module prim_state_mod subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) use dimensions_mod, only: ntrac - use constituents, only: cnst_name +!Un-comment once constitutents are enabled -JN: +! use constituents, only: cnst_name + use string_utils, only: to_str !Remove once constituents are enabled -JN use physconst, only: thermodynamic_active_species_idx_dycore, dry_air_species_num use physconst, only: thermodynamic_active_species_num,thermodynamic_active_species_idx use cam_control_mod, only: initial_run @@ -31,7 +33,7 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) type (TimeLevel_t), target, intent(in) :: tl type (hybrid_t), intent(in) :: hybrid integer, intent(in) :: nets,nete - type(fvm_struct), intent(inout) :: fvm(:) + type(fvm_struct), intent(inout) :: fvm(:) real (kind=r8), optional, intent(in) :: omega_cn(2,nets:nete) ! Local variables... integer :: k,ie,m_cnst @@ -117,7 +119,7 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) nm2 = nm+statediag_numtrac!number of vars after tracers end if - do ie=nets,nete + do ie=nets,nete min_local(ie,1) = MINVAL(elem(ie)%state%v(:,:,1,:,n0)) max_local(ie,1) = MAXVAL(elem(ie)%state%v(:,:,1,:,n0)) min_local(ie,2) = MINVAL(elem(ie)%state%v(:,:,2,:,n0)) @@ -141,9 +143,11 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) min_local(ie,8) = MINVAL(elem(ie)%state%psdry(:,:)) max_local(ie,8) = MAXVAL(elem(ie)%state%psdry(:,:)) min_local(ie,9) = MINVAL(moist_ps(:,:,ie)) - max_local(ie,9) = MAXVAL(moist_ps(:,:,ie)) + max_local(ie,9) = MAXVAL(moist_ps(:,:,ie)) do q=1,statediag_numtrac - varname(nm+q) = TRIM(cnst_name(q)) +!Un-comment once constitutents are enabled -JN: + !varname(nm+q) = TRIM(cnst_name(q)) + varname(nm+q) = "tracer_"//to_str(q) !remove once constituents are enabled -JN min_local(ie,nm+q) = MINVAL(fvm(ie)%c(1:nc,1:nc,:,q)) max_local(ie,nm+q) = MAXVAL(fvm(ie)%c(1:nc,1:nc,:,q)) end do @@ -151,9 +155,11 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) min_local(ie,6) = MINVAL(elem(ie)%state%psdry(:,:)) max_local(ie,6) = MAXVAL(elem(ie)%state%psdry(:,:)) min_local(ie,7) = MINVAL(moist_ps(:,:,ie)) - max_local(ie,7) = MAXVAL(moist_ps(:,:,ie)) + max_local(ie,7) = MAXVAL(moist_ps(:,:,ie)) do q=1,statediag_numtrac - varname(nm+q) = TRIM(cnst_name(q)) +!Un-comment once constitutents are enabled -JN: + !varname(nm+q) = TRIM(cnst_name(q)) + varname(nm+q) = "tracer_"//to_str(q) !remove once constituents are enabled -JN tmp_q = elem(ie)%state%Qdp(:,:,:,q,n0_qdp)/elem(ie)%state%dp3d(:,:,:,n0) min_local(ie,nm+q) = MINVAL(tmp_q) max_local(ie,nm+q) = MAXVAL(tmp_q) @@ -170,13 +176,17 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) max_local(ie,nm2+2) = MAXVAL(elem(ie)%derived%FM(:,:,:,:)) if (ntrac>0) then do q=1,statediag_numtrac - varname(nm2+2+q) = TRIM('F'//TRIM(cnst_name(q))) +!Un-comment once constitutents are enabled -JN: + !varname(nm2+2+q) = TRIM('F'//TRIM(cnst_name(q))) + varname(nm2+2+q) = "Ftracer_"//to_str(q) !remove once constituents are enabled -JN min_local(ie,nm2+2+q) = MINVAL(fvm(ie)%fc(1:nc,1:nc,:,q)) max_local(ie,nm2+2+q) = MAXVAL(fvm(ie)%fc(1:nc,1:nc,:,q)) end do else do q=1,statediag_numtrac - varname(nm2+2+q) = TRIM('F'//TRIM(cnst_name(q))) +!Un-comment once constitutents are enabled -JN: + !varname(nm2+2+q) = TRIM('F'//TRIM(cnst_name(q))) + varname(nm2+2+q) = "Ftracer_"//to_str(q) !remove once constituents are enabled -JN tmp_q = elem(ie)%derived%FQ(:,:,:,q) min_local(ie,nm2+2+q) = MINVAL(tmp_q) max_local(ie,nm2+2+q) = MAXVAL(tmp_q) diff --git a/src/dynamics/se/dycore/reduction_mod.F90 b/src/dynamics/se/dycore/reduction_mod.F90 index 3f8afbc3..b5cbdb13 100644 --- a/src/dynamics/se/dycore/reduction_mod.F90 +++ b/src/dynamics/se/dycore/reduction_mod.F90 @@ -1,7 +1,7 @@ module reduction_mod use shr_kind_mod, only: r8=>shr_kind_r8 - use spmd_utils, only: mpi_sum, mpi_min, mpi_max, mpi_real8, mpi_integer - use spmd_utils, only: mpi_success + use mpi, only: mpi_sum, mpi_min, mpi_max, mpi_real8, mpi_integer + use mpi, only: mpi_success use cam_abortutils, only: endrun implicit none diff --git a/src/dynamics/se/dycore/schedule_mod.F90 b/src/dynamics/se/dycore/schedule_mod.F90 index cabdcbb7..f952c821 100644 --- a/src/dynamics/se/dycore/schedule_mod.F90 +++ b/src/dynamics/se/dycore/schedule_mod.F90 @@ -34,12 +34,12 @@ module schedule_mod contains subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) + use mpi, only: mpi_status_size, mpi_info_null, mpi_success use element_mod, only: element_t use metagraph_mod, only: metavertex_t use dimensions_mod, only: nelem, max_neigh_edges use gridgraph_mod, only: gridvertex_t, gridedge_t, assignment ( = ) use cam_abortutils, only: endrun - use spmd_utils, only: mpi_status_size, mpi_info_null, mpi_success use parallel_mod, only: nComPoints, rrequest, srequest, status, npackpoints type(parallel_t), intent(inout) :: par diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index db96e089..2d298f15 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -57,7 +57,6 @@ module dyn_comp dyn_import_t, & dyn_export_t, & dyn_readnl, & - dyn_register, & dyn_init, & dyn_run, & dyn_final @@ -94,16 +93,18 @@ module dyn_comp real(r8), parameter :: rad2deg = 180.0_r8 / pi real(r8), parameter :: deg2rad = pi / 180.0_r8 +integer, parameter :: max_fieldname_len = 27 !Remove once history is enabled -JN + !=============================================================================== contains !=============================================================================== subroutine dyn_readnl(NLFileName) + use mpi, only: mpi_real8, mpi_integer, mpi_character, mpi_logical use physconst, only: thermodynamic_active_species_num use shr_nl_mod, only: find_group_name => shr_nl_find_group_name use shr_file_mod, only: shr_file_getunit, shr_file_freeunit use spmd_utils, only: masterproc, masterprocid, mpicom, npes - use spmd_utils, only: mpi_real8, mpi_integer, mpi_character, mpi_logical use dyn_grid, only: se_write_grid_file, se_grid_filename, se_write_gll_corners use dp_mapping, only: nphys_pts use native_mapping, only: native_mapping_readnl @@ -579,6 +580,8 @@ subroutine dyn_init(dyn_in, dyn_out) !use cam_history, only: addfld, add_default, horiz_only, register_vector_field !use gravity_waves_sources, only: gws_init + use physics_types, only: ix_qv, ix_cld_liq !Use until constituents are fully-enabled -JN + !SE dycore: use prim_advance_mod, only: prim_advance_init use thread_mod, only: horz_num_threads @@ -1007,7 +1010,9 @@ subroutine dyn_run(dyn_state) if (iam >= par%nprocs) return - ldiag = hist_fld_active('ABS_dPSdt') +!Un-comment once history output is enabled -JN +! ldiag = hist_fld_active('ABS_dPSdt') + ldiag = .false. if (ldiag) then allocate(ps_before(np,np,nelemd)) allocate(abs_ps_tend(np,np,nelemd)) @@ -1248,7 +1253,7 @@ subroutine read_inidat(dyn_in) ! Set mask to indicate which columns are active nullify(ldof) - call cam_grid_get_gcid(cam_grid_id((ini_grid_name), ldof) + call cam_grid_get_gcid(cam_grid_id(ini_grid_name), ldof) allocate(pmask(npsq*nelemd)) pmask(:) = (ldof /= 0) @@ -1622,7 +1627,8 @@ subroutine read_inidat(dyn_in) factor_array(:,:,:,:) = 1.0_r8/factor_array(:,:,:,:) do m_cnst = 1, pcnst - if (cnst_type(m_cnst) == 'wet') then +!Un-comment once constituents are enabled -JN: +! if (cnst_type(m_cnst) == 'wet') then do ie = 1, nelemd do k = 1, nlev do j = 1, np @@ -1640,7 +1646,7 @@ subroutine read_inidat(dyn_in) end do end do end do - end if +! end if end do ! initialize dp3d and qdp @@ -2275,7 +2281,7 @@ subroutine read_dyn_field_3d(fieldname, fh, dimname, buffer) ! call infld(trim(fieldname), fh, dimname, 'lev', 1, npsq, 1, nlev, & ! 1, nelemd, buffer, found, gridname='GLL') !Remove if below works! -JN call cam_read_field(trim(fieldname), fh, buffer, found, 'lev', (/1, nlev/), & - dim3_pos=2, gridname=ini_gird_name, fillvalue=fillvalue) + dim3_pos=2, gridname=ini_grid_name, fillvalue=fillvalue) if(.not. found) then call endrun('READ_DYN_FIELD_3D: Could not find '//trim(fieldname)//' field on input datafile') end if @@ -2284,7 +2290,7 @@ subroutine read_dyn_field_3d(fieldname, fh, dimname, buffer) ! to NaN. In that case infld can return NaNs where the element GLL ! points are not "unique columns". ! Set NaNs or fillvalue points to zero: - where (shr_infnan_isnan(buffer) .or. (buffer==fillvalue) buffer = 0.0_r8 + where (shr_infnan_isnan(buffer) .or. (buffer==fillvalue)) buffer = 0.0_r8 end subroutine read_dyn_field_3d diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 49a5a508..55306505 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -26,10 +26,12 @@ module dyn_grid ! !------------------------------------------------------------------------------- +use mpi, only: mpi_integer, mpi_real8 +use pio, only: file_desc_t use shr_kind_mod, only: r8 => shr_kind_r8, shr_kind_cl use spmd_utils, only: masterproc, iam, mpicom, mstrid=>masterprocid, & - npes, mpi_integer, mpi_real8 -!use constituents, only: pcnst + npes +use constituents, only: pcnst use physconst, only: pi use cam_initfiles, only: initial_file_get_id use physics_column_type, only: physics_column_t, kind_pcol @@ -80,12 +82,10 @@ module dyn_grid edgebuf public :: model_grid_init -public :: get_dyn_grid_info !!XXgoldyXX: v try to remove? public :: get_horiz_grid_dim_d public :: dyn_grid_get_colndx ! get element block/column and MPI process indices -public :: get_dyn_grid_parm !!XXgoldyXX: ^ try to remove? public :: dyn_grid_get_elem_coords ! get coords of a specified block element @@ -126,14 +126,17 @@ subroutine model_grid_init() ! and then initializes the physics grid and ! decomposition based on the dynamics (SE) grid. + use mpi, only: mpi_max use hycoef, only: hycoef_init, hypi, hypm, nprlev, & hyam, hybm, hyai, hybi, ps0 use physconst, only: thermodynamic_active_species_num - use ref_pres, only: ref_pres_init !Brought in via Held-Suarez - JN - use spmd_utils, only: MPI_MAX, MPI_INTEGER, mpicom + use ref_pres, only: ref_pres_init + use pmgrid, only: plev, plevp !Remove once phys_vert_coord is enabled!! -JN use time_manager, only: get_nstep, get_step_size use dp_mapping, only: dp_init, dp_write use native_mapping, only: do_native_mapping, create_native_mapping_files + use cam_grid_support, only: hclen=>max_hcoordname_len + use physics_grid, only: phys_grid_init !SE dycore: use parallel_mod, only: par @@ -150,7 +153,7 @@ subroutine model_grid_init() type(file_desc_t), pointer :: fh_ini integer :: qsize_local - integer :: k + integer :: k, elem_ind type(hybrid_t) :: hybrid integer :: ierr @@ -162,9 +165,11 @@ subroutine model_grid_init() ! Variables needed for physics grid initialization: integer :: num_local_columns integer :: hdim1_d ! # longitudes or grid size + character(len=hclen) :: gridname + character(len=hclen), allocatable :: grid_attribute_names(:) - character(len=*), parameter :: sub = 'model_grid_init' + character(len=*), parameter :: subname = 'model_grid_init' !---------------------------------------------------------------------------- ! Get file handle for initial file and first consistency check @@ -183,7 +188,7 @@ subroutine model_grid_init() end do ! Initialize reference pressures - call ref_pres_init(hypi, hypm, nprlev) + call ref_pres_init(plev, plevp, hypi, hypm, nprlev) if (iam < par%nprocs) then @@ -377,7 +382,6 @@ subroutine set_dyn_col_values() use physconst, only: pi use cam_abortutils, only: endrun - use spmd_utils, only: iam !SE dycore: use coordinate_systems_mod, only: spherical_polar_t @@ -392,7 +396,7 @@ subroutine set_dyn_col_values() real(r8) :: dcoord real(kind_pcol), parameter :: radtodeg = 180.0_kind_pcol / pi real(kind_pcol), parameter :: degtorad = pi / 180.0_kind_pcol - character(len=*), parameter :: subname = 'get_dyn_grid_info' + character(len=*), parameter :: subname = 'set_dyn_col_values' lindex = 0 do elem_ind = 1, nelemd @@ -1032,7 +1036,8 @@ subroutine write_grid_mapping(par, elem) use cam_pio_utils, only: cam_pio_createfile, cam_pio_newdecomp use pio, only: pio_def_dim, var_desc_t, pio_int, pio_def_var, & pio_enddef, pio_closefile, io_desc_t, & - pio_write_darray, pio_freedecomp + pio_write_darray, pio_freedecomp, & + pio_offset_kind ! SE dycore: use parallel_mod, only: parallel_t @@ -1047,10 +1052,10 @@ subroutine write_grid_mapping(par, elem) type(file_desc_t) :: nc type(var_desc_t) :: vid - type(io_desc_t) :: iodesc + type(io_desc_t), pointer :: iodesc integer :: dim1, dim2, ierr, i, j, ie, cc, base, ii, jj integer :: subelement_corners(npm12*nelemd,4) - integer :: dof(npm12*nelemd*4) + integer(kind=pio_offset_kind) :: dof(npm12*nelemd*4) !---------------------------------------------------------------------------- ! Create a CS grid mapping file for postprocessing tools diff --git a/src/dynamics/se/native_mapping.F90 b/src/dynamics/se/native_mapping.F90 index cb69049a..dba52916 100644 --- a/src/dynamics/se/native_mapping.F90 +++ b/src/dynamics/se/native_mapping.F90 @@ -6,12 +6,12 @@ module native_mapping ! using the SE basis functions. The output mapping file name is generated based on the SE model resolution ! and the input grid file name and ends in '_date_native.nc' ! - use cam_logfile, only : iulog - use shr_kind_mod, only : r8 => shr_kind_r8, shr_kind_cl - use shr_const_mod, only : pi=>shr_const_pi - use cam_abortutils, only : endrun - use spmd_utils, only : iam, masterproc, mpi_character, mpi_logical, mpi_integer, mpi_max, & - mpicom, mstrid=>masterprocid + use mpi, only: mpi_character, mpi_logical, mpi_integer, mpi_max + use cam_logfile, only: iulog + use shr_kind_mod, only: r8 => shr_kind_r8, shr_kind_cl + use shr_const_mod, only: pi=>shr_const_pi + use cam_abortutils, only: endrun + use spmd_utils, only: iam, masterproc, mpicom, mstrid=>masterprocid implicit none private @@ -84,8 +84,10 @@ end subroutine native_mapping_readnl subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, areaa) - use cam_pio_utils, only : cam_pio_openfile, cam_pio_createfile - use pio, only : pio_noerr, pio_openfile, pio_createfile, pio_closefile, & + use shr_infnan_mod, only: isnan=>shr_infnan_isnan + use cam_pio_utils, only: cam_pio_openfile, cam_pio_createfile + + use pio, only: pio_noerr, pio_openfile, pio_createfile, pio_closefile, & pio_get_var, pio_put_var, pio_write_darray,pio_int, pio_double, & pio_def_var, pio_put_att, pio_global, file_desc_t, var_desc_t, & io_desc_t, pio_internal_error,pio_inq_dimlen, pio_inq_varid, & @@ -104,7 +106,6 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are use dimensions_mod, only : nelemd, ne, np, npsq, nelem use reduction_mod, only : ParallelMin,ParallelMax use cube_mod, only : convert_gbl_index - use infnan, only : isnan use dof_mod, only : CreateMetaData use thread_mod, only: omp_get_thread_num use datetime_mod, only: datetime diff --git a/src/dynamics/se/stepon.F90 b/src/dynamics/se/stepon.F90 index 5735e85d..18768a0f 100644 --- a/src/dynamics/se/stepon.F90 +++ b/src/dynamics/se/stepon.F90 @@ -8,6 +8,7 @@ module stepon !SE dycore: use parallel_mod, only: par +use dimensions_mod, only: nelemd implicit none private diff --git a/src/dynamics/se/test_fvm_mapping.F90 b/src/dynamics/se/test_fvm_mapping.F90 index 262d4a12..fc2bfb34 100644 --- a/src/dynamics/se/test_fvm_mapping.F90 +++ b/src/dynamics/se/test_fvm_mapping.F90 @@ -376,7 +376,7 @@ subroutine test_mapping_output_phys_state(phys_state,fvm) ! use ppgrid, only: begchunk, endchunk, pver, pcols ! use constituents, only: cnst_get_ind,cnst_name - type(physics_state), intent(inout) :: phys_state(begchunk:endchunk) + type(physics_state), intent(inout) :: phys_state type(fvm_struct), pointer:: fvm(:) #ifdef debug_coupling integer :: lchnk, ncol,k,icol,m_cnst,nq,ie diff --git a/src/dynamics/tests/inic_analytic_utils.F90 b/src/dynamics/tests/inic_analytic_utils.F90 index 60091f05..f40069cf 100644 --- a/src/dynamics/tests/inic_analytic_utils.F90 +++ b/src/dynamics/tests/inic_analytic_utils.F90 @@ -40,9 +40,10 @@ end function analytic_ic_is_moist subroutine analytic_ic_readnl(nlfile) + use mpi, only: MPI_CHARACTER, MPI_LOGICAL use shr_nl_mod, only: find_group_name => shr_nl_find_group_name use shr_file_mod, only: shr_file_getunit, shr_file_freeunit - use spmd_utils, only: masterproc, masterprocid, mpicom, mpi_character, mpi_logical + use spmd_utils, only: masterproc, masterprocid, mpicom use shr_string_mod, only: shr_string_toLower ! Dummy argument @@ -82,8 +83,8 @@ subroutine analytic_ic_readnl(nlfile) end if ! Broadcast namelist variables - call mpi_bcast(analytic_ic_type, len(analytic_ic_type), mpi_character, masterprocid, mpicom, ierr) - call mpi_bcast(nl_not_found, 1, mpi_logical, masterprocid, mpicom, ierr) + call mpi_bcast(analytic_ic_type, len(analytic_ic_type), MPI_CHARACTER, masterprocid, mpicom, ierr) + call mpi_bcast(nl_not_found, 1, MPI_LOGICAL, masterprocid, mpicom, ierr) if (nl_not_found) then ! If analytic IC functionality is turned on (via a configure switch), then diff --git a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 index 45b6a27b..760d3537 100644 --- a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 +++ b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 @@ -32,11 +32,11 @@ module ic_us_standard_atmosphere subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & Q, m_cnst, mask, verbose) - + !---------------------------------------------------------------------------- ! ! Set initial values for static atmosphere with vertical profile from US - ! Standard Atmosphere. + ! Standard Atmosphere. ! !---------------------------------------------------------------------------- @@ -121,7 +121,7 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & do k = 1, nlev pmid(k) = hyam(k)*ps0 + hybm(k)*psurf(1) end do - ! get height of pressure level + ! get height of pressure level call std_atm_height(pmid, zmid) ! given height get temperature call std_atm_temp(zmid, T(i,:)) @@ -138,7 +138,7 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & if (.not.present(PHIS)) then call endrun(subname//': PHIS must be specified to initiallize PS') end if - + do i = 1, ncol if (mask_use(i)) then call std_atm_pres(PHIS(i:i)/gravit, PS(i:i)) diff --git a/src/dynamics/utils/hycoef.F90 b/src/dynamics/utils/hycoef.F90 index fdac5cfa..670ed2bd 100644 --- a/src/dynamics/utils/hycoef.F90 +++ b/src/dynamics/utils/hycoef.F90 @@ -2,7 +2,7 @@ module hycoef use shr_kind_mod, only: r8 => shr_kind_r8 use spmd_utils, only: masterproc -use vert_coord, only: pver, pverp +use pmgrid, only: plev, plevp use cam_logfile, only: iulog use cam_abortutils, only: endrun use pio, only: file_desc_t, var_desc_t, & @@ -23,25 +23,28 @@ module hycoef ! !----------------------------------------------------------------------- -real(r8), public, allocatable, target :: hyai(:) ! ps0 component of hybrid coordinate - interfaces -real(r8), public, allocatable, target :: hyam(:) ! ps0 component of hybrid coordinate - midpoints -real(r8), public, allocatable, target :: hybi(:) ! ps component of hybrid coordinate - interfaces -real(r8), public, allocatable, target :: hybm(:) ! ps component of hybrid coordinate - midpoints - -real(r8), public, allocatable :: etamid(:) ! hybrid coordinate - midpoints - -real(r8), public, allocatable :: hybd(:) ! difference in b (hybi) across layers -real(r8), public, allocatable :: hypi(:) ! reference pressures at interfaces -real(r8), public, allocatable :: hypm(:) ! reference pressures at midpoints -real(r8), public, allocatable :: hypd(:) ! reference pressure layer thickness - -real(r8), public, protected :: ps0 ! Base state surface pressure (pascals) -real(r8), public, protected :: psr ! Reference surface pressure (pascals) - -real(r8), allocatable, target :: alev(:) ! level values (pascals) for 'lev' coord -real(r8), allocatable, target :: ailev(:) ! interface level values for 'ilev' coord +real(r8), public, target :: hyai(plevp) ! ps0 component of hybrid coordinate - interfaces +real(r8), public, target :: hyam(plev) ! ps0 component of hybrid coordinate - midpoints +real(r8), public, target :: hybi(plevp) ! ps component of hybrid coordinate - interfaces +real(r8), public, target :: hybm(plev) ! ps component of hybrid coordinate - midpoints + +real(r8), public :: etamid(plev) ! hybrid coordinate - midpoints + +real(r8), public :: hybd(plev) ! difference in b (hybi) across layers +real(r8), public :: hypi(plevp) ! reference pressures at interfaces +real(r8), public :: hypm(plev) ! reference pressures at midpoints +real(r8), public :: hypd(plev) ! reference pressure layer thickness +#ifdef planet_mars +real(r8), public, protected :: ps0 = 6.0e1_r8 ! Base state surface pressure (pascals) +real(r8), public, protected :: psr = 6.0e1_r8 ! Reference surface pressure (pascals) +#else +real(r8), public, protected :: ps0 = 1.0e5_r8 ! Base state surface pressure (pascals) +real(r8), public, protected :: psr = 1.0e5_r8 ! Reference surface pressure (pascals) +#endif +real(r8), target :: alev(plev) ! level values (pascals) for 'lev' coord +real(r8), target :: ailev(plevp) ! interface level values for 'ilev' coord -integer, public :: nprlev ! number of pure pressure levels at top +integer, public :: nprlev ! number of pure pressure levels at top public hycoef_init @@ -54,9 +57,7 @@ module hycoef subroutine hycoef_init(file, psdry) -! use cam_history_support, only: add_hist_coord, add_vert_coord, formula_terms_t - use physconst, only: ps_base, ps_ref - use string_utils, only: to_str + !use cam_history_support, only: add_hist_coord, add_vert_coord, formula_terms_t !----------------------------------------------------------------------- ! @@ -86,80 +87,15 @@ subroutine hycoef_init(file, psdry) ! arguments type(file_desc_t), intent(inout) :: file logical, optional, intent(in) :: psdry ! set true when coordinate is based - ! on dry surface pressure + ! on dry surface pressure ! local variables integer :: k ! Level index - integer :: iret ! Return status integer logical :: dry_coord real(r8) :: amean, bmean, atest, btest, eps ! type(formula_terms_t) :: formula_terms ! For the 'lev' and 'ilev' coords - - character(len=*), parameter :: subname = 'hycoef_init' - !----------------------------------------------------------------------- - ! Initalize reference pressures: - ps0 = real(ps_base, r8) ! Base state surface pressure (pascals) - psr = real(ps_ref, r8) ! Reference surface pressure (pascals) - - ! Allocate public variables: - - allocate(hyai(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hyai(pverp) failed with stat: '//to_str(iret)) - end if - - allocate(hyam(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hyam(pver) failed with stat: '//to_str(iret)) - end if - - allocate(hybi(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hybi(pverp) failed with stat: '//to_str(iret)) - end if - - allocate(hybm(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hybm(pver) failed with stat: '//to_str(iret)) - end if - - allocate(etamid(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate etamid(pver) failed with stat: '//to_str(iret)) - end if - - allocate(hybd(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hybd(pver) failed with stat: '//to_str(iret)) - end if - - allocate(hypi(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hypi(pverp) failed with stat: '//to_str(iret)) - end if - - allocate(hypm(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hypm(pver) failed with stat: '//to_str(iret)) - end if - - allocate(hypd(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate hypd(pver) failed with stat: '//to_str(iret)) - end if - - allocate(alev(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate alev(pver) failed with stat: '//to_str(iret)) - end if - - allocate(ailev(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate ailev(pverp) failed with stat: '//to_str(iret)) - end if - ! check for dry pressure coordinate (default is moist) dry_coord = .false. if (present(psdry)) dry_coord = psdry @@ -169,46 +105,41 @@ subroutine hycoef_init(file, psdry) ! Set layer locations nprlev = 0 - do k=1,pver + do k=1,plev ! Interfaces. Set nprlev to the interface above, the first time a ! nonzero surface pressure contribution is found. "nprlev" ! identifies the lowest pure pressure interface. -! Remove this line once determine its replacement doesn't cause answer changes -! if (nprlev==0 .and. hybi(k).ne.0.0_r8) nprlev = k - 1 - if (hybi(k) /= 0.0_r8) then - nprlev = k - 1 - exit - end if + if (nprlev==0 .and. hybi(k).ne.0.0_r8) nprlev = k - 1 end do ! Set nprlev if no nonzero b's have been found. All interfaces are ! pure pressure. A pure pressure model requires other changes as well. - if (nprlev==0) nprlev = pver + 2 + if (nprlev==0) nprlev = plev + 2 ! Set delta sigma part of layer thickness and reference state midpoint ! pressures - do k=1,pver + do k=1,plev hybd(k) = hybi(k+1) - hybi(k) hypm(k) = hyam(k)*ps0 + hybm(k)*psr etamid(k) = hyam(k) + hybm(k) end do ! Reference state interface pressures - do k=1,pverp + do k=1,plevp hypi(k) = hyai(k)*ps0 + hybi(k)*psr end do ! Reference state layer thicknesses - do k=1,pver + do k=1,plev hypd(k) = hypi(k+1) - hypi(k) end do ! Test that A's and B's at full levels are arithmetic means of A's and ! B's at interfaces eps = 1.e-05_r8 - do k = 1,pver + do k = 1,plev amean = ( hyai(k+1) + hyai(k) )*0.5_r8 bmean = ( hybi(k+1) + hybi(k) )*0.5_r8 if(amean == 0._r8 .and. hyam(k) == 0._r8) then @@ -250,82 +181,86 @@ subroutine hycoef_init(file, psdry) ! attributes to the lev and ilev coordinates. ! 0.01 converts Pascals to millibars - alev(:pver) = 0.01_r8*ps0*(hyam(:pver) + hybm(:pver)) - ailev(:pverp) = 0.01_r8*ps0*(hyai(:pverp) + hybi(:pverp)) - -! -------------------- -! THIS CODE BLOCK TEMPORARILY COMMENTED OUT UNTIL HISTORY OUTPUT IS ENABLED -! if (dry_coord) then -! call add_vert_coord('lev', plev, & -! 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & -! positive='down') -! call add_hist_coord('hyam', plev, & -! 'hybrid A coefficient at layer midpoints', '1', hyam, dimname='lev') -! call add_hist_coord('hybm', plev, & -! 'hybrid B coefficient at layer midpoints', '1', hybm, dimname='lev') -! else -! -! formula_terms%a_name = 'hyam' -! formula_terms%a_long_name = 'hybrid A coefficient at layer midpoints' -! formula_terms%a_values => hyam -! formula_terms%b_name = 'hybm' -! formula_terms%b_long_name = 'hybrid B coefficient at layer midpoints' -! formula_terms%b_values => hybm -! formula_terms%p0_name = 'P0' -! formula_terms%p0_long_name = 'reference pressure' -! formula_terms%p0_units = 'Pa' -! formula_terms%p0_value = ps0 -! formula_terms%ps_name = 'PS' -! -! call add_vert_coord('lev', plev, & -! 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & -! positive='down', & -! standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & -! formula_terms=formula_terms) -! end if -! -! if (dry_coord) then -! call add_vert_coord('ilev', plevp, & -! 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & -! positive='down') -! call add_hist_coord('hyai', plevp, & -! 'hybrid A coefficient at layer interfaces', '1', hyai, dimname='ilev') -! call add_hist_coord('hybi', plevp, & -! 'hybrid B coefficient at layer interfaces', '1', hybi, dimname='ilev') -! else -! formula_terms%a_name = 'hyai' -! formula_terms%a_long_name = 'hybrid A coefficient at layer interfaces' -! formula_terms%a_values => hyai -! formula_terms%b_name = 'hybi' -! formula_terms%b_long_name = 'hybrid B coefficient at layer interfaces' -! formula_terms%b_values => hybi -! formula_terms%p0_name = 'P0' -! formula_terms%p0_long_name = 'reference pressure' -! formula_terms%p0_units = 'Pa' -! formula_terms%p0_value = ps0 -! formula_terms%ps_name = 'PS' -! -! call add_vert_coord('ilev', plevp, & -! 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & -! positive='down', & -! standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & -! formula_terms=formula_terms) -! end if -! + alev(:plev) = 0.01_r8*ps0*(hyam(:plev) + hybm(:plev)) + ailev(:plevp) = 0.01_r8*ps0*(hyai(:plevp) + hybi(:plevp)) + +!Undo once history output has been developed -JN: +#if 0 + + if (dry_coord) then + call add_vert_coord('lev', plev, & + 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & + positive='down') + call add_hist_coord('hyam', plev, & + 'hybrid A coefficient at layer midpoints', '1', hyam, dimname='lev') + call add_hist_coord('hybm', plev, & + 'hybrid B coefficient at layer midpoints', '1', hybm, dimname='lev') + else + + formula_terms%a_name = 'hyam' + formula_terms%a_long_name = 'hybrid A coefficient at layer midpoints' + formula_terms%a_values => hyam + formula_terms%b_name = 'hybm' + formula_terms%b_long_name = 'hybrid B coefficient at layer midpoints' + formula_terms%b_values => hybm + formula_terms%p0_name = 'P0' + formula_terms%p0_long_name = 'reference pressure' + formula_terms%p0_units = 'Pa' + formula_terms%p0_value = ps0 + formula_terms%ps_name = 'PS' + + call add_vert_coord('lev', plev, & + 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & + positive='down', & + standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & + formula_terms=formula_terms) + end if + + if (dry_coord) then + call add_vert_coord('ilev', plevp, & + 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & + positive='down') + call add_hist_coord('hyai', plevp, & + 'hybrid A coefficient at layer interfaces', '1', hyai, dimname='ilev') + call add_hist_coord('hybi', plevp, & + 'hybrid B coefficient at layer interfaces', '1', hybi, dimname='ilev') + else + formula_terms%a_name = 'hyai' + formula_terms%a_long_name = 'hybrid A coefficient at layer interfaces' + formula_terms%a_values => hyai + formula_terms%b_name = 'hybi' + formula_terms%b_long_name = 'hybrid B coefficient at layer interfaces' + formula_terms%b_values => hybi + formula_terms%p0_name = 'P0' + formula_terms%p0_long_name = 'reference pressure' + formula_terms%p0_units = 'Pa' + formula_terms%p0_value = ps0 + formula_terms%ps_name = 'PS' + + call add_vert_coord('ilev', plevp, & + 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & + positive='down', & + standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & + formula_terms=formula_terms) + end if + +!Undo once history output has been developed -JN: +#endif + if (masterproc) then write(iulog,'(a)')' Layer Locations (*1000) ' - do k=1,pver + do k=1,plev write(iulog,9800)k,hyai(k),hybi(k),hyai(k)+hybi(k) write(iulog,9810) hyam(k), hybm(k), hyam(k)+hybm(k) end do - write(iulog,9800)pverp,hyai(pverp),hybi(pverp),hyai(pverp)+hybi(pverp) + write(iulog,9800)plevp,hyai(plevp),hybi(plevp),hyai(plevp)+hybi(plevp) write(iulog,9820) - do k=1,pver + do k=1,plev write(iulog,9830) k, hypi(k) write(iulog,9840) hypm(k), hypd(k) end do - write(iulog,9830) pverp, hypi(pverp) + write(iulog,9830) plevp, hypi(plevp) end if 9800 format( 1x, i3, 3p, 3(f10.4,10x) ) @@ -349,8 +284,8 @@ subroutine init_restart_hycoef(File, vdimids) integer :: ierr - ierr = PIO_Def_Dim(File, 'lev', pver, vdimids(1)) - ierr = PIO_Def_Dim(File, 'ilev', pverp, vdimids(2)) + ierr = PIO_Def_Dim(File, 'lev', plev, vdimids(1)) + ierr = PIO_Def_Dim(File, 'ilev', plevp, vdimids(2)) ierr = pio_def_var(File, 'hyai', pio_double, vdimids(2:2), hyai_desc) ierr = pio_def_var(File, 'hyam', pio_double, vdimids(1:1), hyam_desc) @@ -400,15 +335,15 @@ subroutine hycoef_read(File) ierr = PIO_Inq_DimID(File, 'lev', lev_dimid) ierr = PIO_Inq_dimlen(File, lev_dimid, flev) - if (pver /= flev) then - write(iulog,*) routine//': ERROR: file lev does not match model. lev (file, model):',flev, pver + if (plev /= flev) then + write(iulog,*) routine//': ERROR: file lev does not match model. lev (file, model):',flev, plev call endrun(routine//': ERROR: file lev does not match model.') end if ierr = PIO_Inq_DimID(File, 'ilev', lev_dimid) ierr = PIO_Inq_dimlen(File, lev_dimid, filev) - if (pverp /= filev) then - write(iulog,*) routine//':ERROR: file ilev does not match model ilev (file, model):',filev, pverp + if (plevp /= filev) then + write(iulog,*) routine//':ERROR: file ilev does not match model plevp (file, model):',filev, plevp call endrun(routine//':ERROR: file ilev does not match model.') end if diff --git a/src/physics/utils/physics_grid.F90 b/src/physics/utils/physics_grid.F90 index df99fc40..a0af8c72 100644 --- a/src/physics/utils/physics_grid.F90 +++ b/src/physics/utils/physics_grid.F90 @@ -90,10 +90,11 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & use mpi, only: MPI_INTEGER, MPI_REAL8, MPI_MIN, MPI_MAX use shr_mem_mod, only: shr_mem_getusage use cam_abortutils, only: endrun, check_allocate + use cam_logfile, only: iulog use spmd_utils, only: npes, mpicom, masterprocid, masterproc use string_utils, only: to_str + use cam_map_utils, only: iMap use cam_grid_support, only: cam_grid_register, cam_grid_attribute_register - use cam_grid_support, only: iMap use cam_grid_support, only: horiz_coord_t, horiz_coord_create use cam_grid_support, only: cam_grid_attribute_copy, cam_grid_attr_exists @@ -383,7 +384,7 @@ real(r8) function get_dlat_p(index) character(len=*), parameter :: subname = 'get_dlat_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) get_dlat_p = phys_columns(index)%lat_deg @@ -403,7 +404,7 @@ real(r8) function get_dlon_p(index) character(len=*), parameter :: subname = 'get_dlon_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) get_dlon_p = phys_columns(index)%lon_deg @@ -423,7 +424,7 @@ real(r8) function get_rlat_p(index) character(len=*), parameter :: subname = 'get_rlat_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) get_rlat_p = phys_columns(index)%lat_rad @@ -443,7 +444,7 @@ real(r8) function get_rlon_p(index) character(len=*), parameter :: subname = 'get_rlon_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) get_rlon_p = phys_columns(index)%lon_rad @@ -463,7 +464,7 @@ real(r8) function get_area_p(index) character(len=*), parameter :: subname = 'get_area_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) get_area_p = phys_columns(index)%area @@ -491,7 +492,7 @@ subroutine get_rlat_all_p(rlatdim, rlats) !----------------------------------------------------------------------- ! Check that input is valid: - call check_phys_input(rlatdim) + call check_phys_input(subname, rlatdim) do index = 1, rlatdim rlats(index) = phys_columns(index)%lat_rad @@ -521,7 +522,7 @@ subroutine get_rlon_all_p(rlondim, rlons) !----------------------------------------------------------------------- ! Check that input is valid: - call check_phys_input(rlondim) + call check_phys_input(subname, rlondim) do index = 1, rlondim rlons(index) = phys_columns(index)%lon_rad @@ -547,7 +548,7 @@ subroutine get_dyn_col_p(index, blk_num, blk_ind) character(len=*), parameter :: subname = 'get_dyn_col_p_index: ' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) off_size = SIZE(phys_columns(index)%dyn_block_index, 1) if (SIZE(blk_ind, 1) < off_size) then @@ -575,7 +576,7 @@ integer function global_index_p(index) character(len=*), parameter :: subname = 'global_index_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) global_index_p = phys_columns(index)%global_col_num @@ -593,7 +594,7 @@ integer function local_index_p(index) character(len=*), parameter :: subname = 'local_index_p' ! Check that input is valid: - call check_phys_input(index) + call check_phys_input(subname, index) local_index_p = phys_columns(index)%phys_chunk_index @@ -637,4 +638,6 @@ subroutine check_phys_input(subname, index_val) to_str(columns_on_task)//')') end if + end subroutine check_phys_input + end module physics_grid diff --git a/src/utils/datetime.F90 b/src/utils/datetime.F90 new file mode 100644 index 00000000..fa667d53 --- /dev/null +++ b/src/utils/datetime.F90 @@ -0,0 +1,53 @@ +module datetime_mod + +implicit none + +private + +public :: datetime + +contains + + subroutine datetime(cdate, ctime) +!----------------------------------------------------------------------- +! +! Purpose: +! +! A generic Date and Time routine +! +! Author: CCM Core group +! +!----------------------------------------------------------------------- +! +! $Id$ +! +!----------------------------------------------------------------------- + implicit none +!----------------------------------------------------------------------- +! +!-----------------------------Arguments--------------------------------- + character , intent(out) :: cdate*8 + character , intent(out) :: ctime*8 +!----------------------------------------------------------------------- +! +!---------------------------Local Variables------------------------------ + integer, dimension(8) :: values + character :: date*8, time*10, zone*5 +!----------------------------------------------------------------------- + + call date_and_time (date, time, zone, values) + cdate(1:2) = date(5:6) + cdate(3:3) = '/' + cdate(4:5) = date(7:8) + cdate(6:6) = '/' + cdate(7:8) = date(3:4) + ctime(1:2) = time(1:2) + ctime(3:3) = ':' + ctime(4:5) = time(3:4) + ctime(6:6) = ':' + ctime(7:8) = time(5:6) + + return + end subroutine datetime + +end module datetime_mod diff --git a/src/utils/hycoef.F90 b/src/utils/hycoef.F90 deleted file mode 100644 index 670ed2bd..00000000 --- a/src/utils/hycoef.F90 +++ /dev/null @@ -1,403 +0,0 @@ -module hycoef - -use shr_kind_mod, only: r8 => shr_kind_r8 -use spmd_utils, only: masterproc -use pmgrid, only: plev, plevp -use cam_logfile, only: iulog -use cam_abortutils, only: endrun -use pio, only: file_desc_t, var_desc_t, & - pio_inq_dimid, pio_inq_dimlen, pio_inq_varid, & - pio_double, pio_def_dim, pio_def_var, & - pio_put_var, pio_get_var, & - pio_seterrorhandling, PIO_BCAST_ERROR, PIO_NOERR - -implicit none -private -save - -!----------------------------------------------------------------------- -! -! Purpose: Hybrid level definitions: p = a*p0 + b*ps -! interfaces p(k) = hyai(k)*ps0 + hybi(k)*ps -! midpoints p(k) = hyam(k)*ps0 + hybm(k)*ps -! -!----------------------------------------------------------------------- - -real(r8), public, target :: hyai(plevp) ! ps0 component of hybrid coordinate - interfaces -real(r8), public, target :: hyam(plev) ! ps0 component of hybrid coordinate - midpoints -real(r8), public, target :: hybi(plevp) ! ps component of hybrid coordinate - interfaces -real(r8), public, target :: hybm(plev) ! ps component of hybrid coordinate - midpoints - -real(r8), public :: etamid(plev) ! hybrid coordinate - midpoints - -real(r8), public :: hybd(plev) ! difference in b (hybi) across layers -real(r8), public :: hypi(plevp) ! reference pressures at interfaces -real(r8), public :: hypm(plev) ! reference pressures at midpoints -real(r8), public :: hypd(plev) ! reference pressure layer thickness -#ifdef planet_mars -real(r8), public, protected :: ps0 = 6.0e1_r8 ! Base state surface pressure (pascals) -real(r8), public, protected :: psr = 6.0e1_r8 ! Reference surface pressure (pascals) -#else -real(r8), public, protected :: ps0 = 1.0e5_r8 ! Base state surface pressure (pascals) -real(r8), public, protected :: psr = 1.0e5_r8 ! Reference surface pressure (pascals) -#endif -real(r8), target :: alev(plev) ! level values (pascals) for 'lev' coord -real(r8), target :: ailev(plevp) ! interface level values for 'ilev' coord - -integer, public :: nprlev ! number of pure pressure levels at top - -public hycoef_init - -type(var_desc_t) :: hyam_desc, hyai_desc, hybm_desc, hybi_desc, p0_desc -public init_restart_hycoef, write_restart_hycoef - -!======================================================================= -contains -!======================================================================= - -subroutine hycoef_init(file, psdry) - - !use cam_history_support, only: add_hist_coord, add_vert_coord, formula_terms_t - - !----------------------------------------------------------------------- - ! - ! Purpose: - ! Defines the locations of model interfaces from input data in the - ! hybrid coordinate scheme. Actual pressure values of model level - ! interfaces are determined elsewhere from the fields set here. - ! - ! Method: - ! the following fields are set: - ! hyai fraction of reference pressure used for interface pressures - ! hyam fraction of reference pressure used for midpoint pressures - ! hybi fraction of surface pressure used for interface pressures - ! hybm fraction of surface pressure used for midpoint pressures - ! hybd difference of hybi's - ! hypi reference state interface pressures - ! hypm reference state midpoint pressures - ! hypd reference state layer thicknesses - ! hypdln reference state layer thicknesses (log p) - ! hyalph distance from interface to level (used in integrals) - ! prsfac log pressure extrapolation factor (used to compute psl) - ! - ! Author: B. Boville - ! - !----------------------------------------------------------------------- - - ! arguments - type(file_desc_t), intent(inout) :: file - logical, optional, intent(in) :: psdry ! set true when coordinate is based - ! on dry surface pressure - - ! local variables - integer :: k ! Level index - logical :: dry_coord - real(r8) :: amean, bmean, atest, btest, eps -! type(formula_terms_t) :: formula_terms ! For the 'lev' and 'ilev' coords - !----------------------------------------------------------------------- - - ! check for dry pressure coordinate (default is moist) - dry_coord = .false. - if (present(psdry)) dry_coord = psdry - - ! read hybrid coeficients - call hycoef_read(file) - - ! Set layer locations - nprlev = 0 - do k=1,plev - - ! Interfaces. Set nprlev to the interface above, the first time a - ! nonzero surface pressure contribution is found. "nprlev" - ! identifies the lowest pure pressure interface. - - if (nprlev==0 .and. hybi(k).ne.0.0_r8) nprlev = k - 1 - end do - - ! Set nprlev if no nonzero b's have been found. All interfaces are - ! pure pressure. A pure pressure model requires other changes as well. - if (nprlev==0) nprlev = plev + 2 - - ! Set delta sigma part of layer thickness and reference state midpoint - ! pressures - do k=1,plev - hybd(k) = hybi(k+1) - hybi(k) - hypm(k) = hyam(k)*ps0 + hybm(k)*psr - etamid(k) = hyam(k) + hybm(k) - end do - - ! Reference state interface pressures - do k=1,plevp - hypi(k) = hyai(k)*ps0 + hybi(k)*psr - end do - - ! Reference state layer thicknesses - do k=1,plev - hypd(k) = hypi(k+1) - hypi(k) - end do - - ! Test that A's and B's at full levels are arithmetic means of A's and - ! B's at interfaces - eps = 1.e-05_r8 - do k = 1,plev - amean = ( hyai(k+1) + hyai(k) )*0.5_r8 - bmean = ( hybi(k+1) + hybi(k) )*0.5_r8 - if(amean == 0._r8 .and. hyam(k) == 0._r8) then - atest = 0._r8 - else - atest = abs( amean - hyam(k) )/ ( 0.5_r8*( abs(amean + hyam(k)) ) ) - endif - if(bmean == 0._r8 .and. hybm(k) == 0._r8) then - btest = 0._r8 - else - btest = abs( bmean - hybm(k) )/ ( 0.5_r8*( abs(bmean + hybm(k)) ) ) - endif - if (atest > eps) then - if (masterproc) then - write(iulog,9850) - write(iulog,*)'k,atest,eps=',k,atest,eps - end if - end if - - if (btest > eps) then - if (masterproc) then - write(iulog,9850) - write(iulog,*)'k,btest,eps=',k,btest,eps - end if - end if - end do - - ! Add the information for the 'lev' and 'ilev' mdim history coordinates - ! - ! The hybrid coordinate used by the SE dycore is based on a dry surface - ! pressure. Hence it is the dry pressure rather than actual pressure - ! that is computed by the formula_terms attribute. This coordinate is - ! not described by the formula - ! atmosphere_hybrid_sigma_pressure_coordinate since the formula - ! associated with that name uses actual pressure values. Furthermore, - ! the actual pressure field cannot be reconstructed from the hybrid - ! coefficients and the surface pressure field. Hence in the case of a - ! dry coordinate we add neither the standard_name nor the formula_terms - ! attributes to the lev and ilev coordinates. - - ! 0.01 converts Pascals to millibars - alev(:plev) = 0.01_r8*ps0*(hyam(:plev) + hybm(:plev)) - ailev(:plevp) = 0.01_r8*ps0*(hyai(:plevp) + hybi(:plevp)) - -!Undo once history output has been developed -JN: -#if 0 - - if (dry_coord) then - call add_vert_coord('lev', plev, & - 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & - positive='down') - call add_hist_coord('hyam', plev, & - 'hybrid A coefficient at layer midpoints', '1', hyam, dimname='lev') - call add_hist_coord('hybm', plev, & - 'hybrid B coefficient at layer midpoints', '1', hybm, dimname='lev') - else - - formula_terms%a_name = 'hyam' - formula_terms%a_long_name = 'hybrid A coefficient at layer midpoints' - formula_terms%a_values => hyam - formula_terms%b_name = 'hybm' - formula_terms%b_long_name = 'hybrid B coefficient at layer midpoints' - formula_terms%b_values => hybm - formula_terms%p0_name = 'P0' - formula_terms%p0_long_name = 'reference pressure' - formula_terms%p0_units = 'Pa' - formula_terms%p0_value = ps0 - formula_terms%ps_name = 'PS' - - call add_vert_coord('lev', plev, & - 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & - positive='down', & - standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & - formula_terms=formula_terms) - end if - - if (dry_coord) then - call add_vert_coord('ilev', plevp, & - 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & - positive='down') - call add_hist_coord('hyai', plevp, & - 'hybrid A coefficient at layer interfaces', '1', hyai, dimname='ilev') - call add_hist_coord('hybi', plevp, & - 'hybrid B coefficient at layer interfaces', '1', hybi, dimname='ilev') - else - formula_terms%a_name = 'hyai' - formula_terms%a_long_name = 'hybrid A coefficient at layer interfaces' - formula_terms%a_values => hyai - formula_terms%b_name = 'hybi' - formula_terms%b_long_name = 'hybrid B coefficient at layer interfaces' - formula_terms%b_values => hybi - formula_terms%p0_name = 'P0' - formula_terms%p0_long_name = 'reference pressure' - formula_terms%p0_units = 'Pa' - formula_terms%p0_value = ps0 - formula_terms%ps_name = 'PS' - - call add_vert_coord('ilev', plevp, & - 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & - positive='down', & - standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & - formula_terms=formula_terms) - end if - -!Undo once history output has been developed -JN: -#endif - - if (masterproc) then - write(iulog,'(a)')' Layer Locations (*1000) ' - do k=1,plev - write(iulog,9800)k,hyai(k),hybi(k),hyai(k)+hybi(k) - write(iulog,9810) hyam(k), hybm(k), hyam(k)+hybm(k) - end do - - write(iulog,9800)plevp,hyai(plevp),hybi(plevp),hyai(plevp)+hybi(plevp) - write(iulog,9820) - do k=1,plev - write(iulog,9830) k, hypi(k) - write(iulog,9840) hypm(k), hypd(k) - end do - write(iulog,9830) plevp, hypi(plevp) - end if - -9800 format( 1x, i3, 3p, 3(f10.4,10x) ) -9810 format( 1x, 3x, 3p, 3(10x,f10.4) ) -9820 format(1x,'reference pressures (Pa)') -9830 format(1x,i3,f15.4) -9840 format(1x,3x,15x,2f15.4) -9850 format('HYCOEF: A and/or B vertical level coefficients at full',/, & - ' levels are not the arithmetic mean of half-level values') - -end subroutine hycoef_init - -!======================================================================= - -subroutine init_restart_hycoef(File, vdimids) - - type(file_desc_t), intent(inout) :: File - integer, intent(out) :: vdimids(:) - - ! PIO traps errors internally, no need to check ierr - - integer :: ierr - - ierr = PIO_Def_Dim(File, 'lev', plev, vdimids(1)) - ierr = PIO_Def_Dim(File, 'ilev', plevp, vdimids(2)) - - ierr = pio_def_var(File, 'hyai', pio_double, vdimids(2:2), hyai_desc) - ierr = pio_def_var(File, 'hyam', pio_double, vdimids(1:1), hyam_desc) - ierr = pio_def_var(File, 'hybi', pio_double, vdimids(2:2), hybi_desc) - ierr = pio_def_var(File, 'hybm', pio_double, vdimids(1:1), hybm_desc) - - ierr = pio_def_var(File, 'P0', pio_double, p0_desc) - -end subroutine init_restart_hycoef - -!======================================================================= - -subroutine write_restart_hycoef(file) - - type(file_desc_t), intent(inout) :: File - - ! PIO traps errors internally, no need to check ierr - - integer :: ierr - - ierr = pio_put_var(File, hyai_desc, hyai) - ierr = pio_put_var(File, hyam_desc, hyam) - ierr = pio_put_var(File, hybi_desc, hybi) - ierr = pio_put_var(File, hybm_desc, hybm) - - ierr = pio_put_var(File, p0_desc, ps0) - -end subroutine write_restart_hycoef - -!======================================================================= - -subroutine hycoef_read(File) - - ! This code is used both for initial and restart reading. - - type(file_desc_t), intent(inout) :: File - - integer :: flev, filev, lev_dimid, ierr - integer :: pio_errtype - - type(var_desc_t) :: p0_desc - - character(len=*), parameter :: routine = 'hycoef_read' - !---------------------------------------------------------------------------- - - ! PIO traps errors internally, no need to check ierr - - ierr = PIO_Inq_DimID(File, 'lev', lev_dimid) - ierr = PIO_Inq_dimlen(File, lev_dimid, flev) - if (plev /= flev) then - write(iulog,*) routine//': ERROR: file lev does not match model. lev (file, model):',flev, plev - call endrun(routine//': ERROR: file lev does not match model.') - end if - - ierr = PIO_Inq_DimID(File, 'ilev', lev_dimid) - ierr = PIO_Inq_dimlen(File, lev_dimid, filev) - if (plevp /= filev) then - write(iulog,*) routine//':ERROR: file ilev does not match model plevp (file, model):',filev, plevp - call endrun(routine//':ERROR: file ilev does not match model.') - end if - - ierr = pio_inq_varid(File, 'hyai', hyai_desc) - ierr = pio_inq_varid(File, 'hyam', hyam_desc) - ierr = pio_inq_varid(File, 'hybi', hybi_desc) - ierr = pio_inq_varid(File, 'hybm', hybm_desc) - - ierr = pio_get_var(File, hyai_desc, hyai) - ierr = pio_get_var(File, hybi_desc, hybi) - ierr = pio_get_var(File, hyam_desc, hyam) - ierr = pio_get_var(File, hybm_desc, hybm) - - if (masterproc) then - write(iulog,*) routine//': read hyai, hybi, hyam, hybm' - end if - - ! Check whether file contains value for P0. If it does then use it - - ! Set PIO to return error codes. - call pio_seterrorhandling(file, PIO_BCAST_ERROR, pio_errtype) - - ierr = pio_inq_varid(file, 'P0', p0_desc) - if (ierr == PIO_NOERR) then - ierr = pio_get_var(file, p0_desc, ps0) - if (ierr /= PIO_NOERR) then - call endrun(routine//': reading P0.') - end if - psr = ps0 - - if (masterproc) then - write(iulog,*) routine//': read P0 value: ', ps0 - end if - - end if - - ! Put the error handling back the way it was - call pio_seterrorhandling(file, pio_errtype) - -#if ( defined OFFLINE_DYN ) - ! make sure top interface is non zero for fv dycore - if (hyai(1) .eq. 0._r8) then - if (hybm(1) .ne. 0.0_r8) then - hyai(1) = hybm(1)*1.e-2_r8 - else if (hyam(1) .ne. 0.0_r8) then - hyai(1) = hyam(1)*1.e-2_r8 - else - call endrun('Not able to set hyai(1) to non-zero.') - end if - end if -#endif - -end subroutine hycoef_read - -!======================================================================= - -end module hycoef diff --git a/src/utils/std_atm_profile.F90 b/src/utils/std_atm_profile.F90 new file mode 100644 index 00000000..04378085 --- /dev/null +++ b/src/utils/std_atm_profile.F90 @@ -0,0 +1,166 @@ +module std_atm_profile + +!------------------------------------------------------------------------------- +! +! The barometric formula for U.S. Standard Atmosphere is valid up to 86 km. +! see https://en.wikipedia.org/wiki/Barometric_formula. +! +! N.B. The extension above 86 km is using data from Hanli. It is not complete +! since the hardcoded parameter (c1) needs adjustment above 86 km. +! +!------------------------------------------------------------------------------- + +use shr_kind_mod, only: r8 => shr_kind_r8 +use cam_logfile, only: iulog +use cam_abortutils, only: endrun + +implicit none +private +save + +public :: & + std_atm_pres, & ! compute pressure given height + std_atm_height, & ! compute height given pressure + std_atm_temp ! compute temperature given height + +! Parameters for barometric formula for U.S. Standard Atmosphere. + +integer, parameter :: nreg = 15 ! number of regions + +real(r8), parameter :: hb(nreg) = & ! height at bottom of layer (m) + (/0.0_r8, 1.1e4_r8, 2.0e4_r8, 3.2e4_r8, 4.7e4_r8, 5.1e4_r8, 7.1e4_r8, 8.6e4_r8, & + 9.1e4_r8, 1.1e5_r8, 1.2e5_r8, 1.5e5_r8, 2.0e5_r8, 3.0e5_r8, 7.e5_r8/) + +real(r8), parameter :: pb(nreg) = & ! standard pressure (Pa) + (/101325._r8, 22632.1_r8, 5474.89_r8, 868.02_r8, 110.91_r8, 66.94_r8, 3.96_r8, 3.7e-1_r8, & + 1.5e-1_r8, 7.1e-3_r8, 2.5e-3_r8, 4.5e-4_r8, 8.47e-5_r8, 8.77e-6_r8, 3.19e-8_r8/) + +real(r8), parameter :: tb(nreg) = & ! standard temperature (K) + (/288.15_r8, 216.65_r8, 216.65_r8, 228.65_r8, 270.65_r8, 270.65_r8, 214.65_r8, 186.87_r8, & + 186.87_r8, 240._r8, 360._r8, 634.39_r8, 854.56_r8, 976.01_r8, 1.e3_r8/) + +real(r8), parameter :: lb(nreg) = & ! temperature lapse rate (K/m) + (/-0.0065_r8, 0.0_r8, 0.001_r8, 0.0028_r8, 0.0_r8, -0.0028_r8, -0.001852_r8, 0.0_r8, & + 2.796e-3_r8, 0.012_r8, 9.15e-3_r8, 4.4e-3_r8, 1.21e-3_r8, 6.e-5_r8, 0.0_r8/) + +real(r8), parameter :: rg = 8.3144598_r8 ! universal gas constant (J/mol/K) +real(r8), parameter :: g0 = 9.80665_r8 ! gravitational acceleration (m/s^2) +real(r8), parameter :: mw = 0.0289644_r8 ! molar mass of dry air (kg/mol) +real(r8), parameter :: c1 = g0*mw/rg + +!========================================================================================= +CONTAINS +!========================================================================================= + +subroutine std_atm_pres(height, pstd) + + ! arguments + real(r8), intent(in) :: height(:) ! height above sea level in meters + real(r8), intent(out) :: pstd(:) ! std pressure in Pa + + integer :: i, ii, k, nlev + character(len=*), parameter :: routine = 'std_atm_pres' + !---------------------------------------------------------------------------- + + nlev = size(height) + do k = 1, nlev + if (height(k) < 0.0_r8) then + ! Extrapolate below mean sea level using troposphere lapse rate. + ii = 1 + else + ! find region containing height + find_region: do i = nreg, 1, -1 + if (height(k) >= hb(i)) then + ii = i + exit find_region + end if + end do find_region + end if + + if (lb(ii) /= 0._r8) then + pstd(k) = pb(ii) * ( tb(ii) / (tb(ii) + lb(ii)*(height(k) - hb(ii)) ) )**(c1/lb(ii)) + else + pstd(k) = pb(ii) * exp( -c1*(height(k) - hb(ii))/tb(ii) ) + end if + + end do + +end subroutine std_atm_pres + +!========================================================================================= + +subroutine std_atm_height(pstd, height) + + ! arguments + real(r8), intent(in) :: pstd(:) ! std pressure in Pa + real(r8), intent(out) :: height(:) ! height above sea level in meters + + integer :: i, ii, k, nlev + logical :: found_region + character(len=*), parameter :: routine = 'std_atm_height' + !---------------------------------------------------------------------------- + + nlev = size(height) + do k = 1, nlev + + if (pstd(k) <= pb(nreg)) then + ii = nreg + else if (pstd(k) > pb(1)) then + ii = 1 + else + ! find region containing pressure + find_region: do i = 2, nreg + if (pstd(k) > pb(i)) then + ii = i - 1 + exit find_region + end if + end do find_region + end if + + if (lb(ii) /= 0._r8) then + height(k) = hb(ii) + (tb(ii)/lb(ii)) * ( (pb(ii)/pstd(k))**(lb(ii)/c1) - 1._r8 ) + else + height(k) = hb(ii) + (tb(ii)/c1)*log(pb(ii)/pstd(k)) + end if + end do + +end subroutine std_atm_height + +!========================================================================================= + +subroutine std_atm_temp(height, temp) + + ! arguments + real(r8), intent(in) :: height(:) ! std pressure in Pa + real(r8), intent(out) :: temp(:) ! temperature + + ! local vars + integer :: i, ii, k, nlev + character(len=*), parameter :: routine = 'std_atm_temp' + !---------------------------------------------------------------------------- + + nlev = size(height) + do k = 1, nlev + if (height(k) < 0.0_r8) then + ii = 1 + else + ! find region containing height + find_region: do i = nreg, 1, -1 + if (height(k) >= hb(i)) then + ii = i + exit find_region + end if + end do find_region + end if + + if (lb(ii) /= 0._r8) then + temp(k) = tb(ii) + lb(ii)*(height(k) - hb(ii)) + else + temp(k) = tb(ii) + end if + + end do + +end subroutine std_atm_temp + +end module std_atm_profile diff --git a/src/utils/string_utils.F90 b/src/utils/string_utils.F90 index 7751da69..30f730e8 100644 --- a/src/utils/string_utils.F90 +++ b/src/utils/string_utils.F90 @@ -10,6 +10,7 @@ module string_utils public :: to_upper ! Convert character string to upper case public :: to_lower ! Convert character string to lower case + public :: strlist_get_ind ! find string in a list of strings and return its index public :: increment_string ! increments a string public :: last_sig_char ! Position of last significant character in string public :: to_str ! convert integer to left justified string @@ -20,6 +21,53 @@ module string_utils CONTAINS + !========================================================================================= + + subroutine strlist_get_ind(strlist, str, ind, abort) + + ! Get the index of a given string in a list of strings. Optional abort argument + ! allows returning control to caller when the string is not found. Default + ! behavior is to call endrun when string is not found. + + use cam_logfile, only: iulog + use cam_abortutils, only: endrun + + ! Arguments + character(len=*), intent(in) :: strlist(:) ! list of strings + character(len=*), intent(in) :: str ! string to search for + integer, intent(out) :: ind ! index of str in strlist + logical, optional, intent(in) :: abort ! flag controlling abort + + ! Local variables + integer :: m + logical :: abort_on_error + character(len=*), parameter :: sub='strlist_get_ind' + !---------------------------------------------------------------------------- + + ! Find string in list + do m = 1, size(strlist) + if (str == strlist(m)) then + ind = m + return + end if + end do + + ! String not found + abort_on_error = .true. + if (present(abort)) abort_on_error = abort + + if (abort_on_error) then + write(iulog, *) sub//': FATAL: string:', trim(str), ' not found in list:', strlist(:) + call endrun(sub//': FATAL: string not found') + end if + + ! error return + ind = -1 + + end subroutine strlist_get_ind + + !========================================================================================= + integer function increment_string(str, increment) !----------------------------------------------------------------------- ! ... Increment a string whose ending characters are digits. From 31f8be06d05e2c652012cb125b9ee9cb0d676584 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 17 Mar 2021 14:09:04 -0600 Subject: [PATCH 10/45] Remove pmgrid.F90 and replace with vert_coord module. --- cime_config/cam_config.py | 38 +---- src/dynamics/se/dp_coupling.F90 | 2 +- src/dynamics/se/dyn_grid.F90 | 11 +- src/dynamics/se/pmgrid.F90 | 12 -- src/dynamics/utils/hycoef.F90 | 279 ++++++++++++++++++++------------ 5 files changed, 187 insertions(+), 155 deletions(-) delete mode 100644 src/dynamics/se/pmgrid.F90 diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 1b81f42c..830dc244 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -636,30 +636,6 @@ def __init__(self, case, case_log): 'vert_coord_nl', 'ref_pres_nl'] - #---------------------------------------- - # Set CAM grid variables (nlat,nlon,nlev) - #---------------------------------------- - - # Set number of vertical levels - if case_nlev: - nlev = case_nlev - else: - nlev = '30' # Default value - - # Add vertical levels to configure object - nlev_desc = "Number of vertical levels." - self.create_config("nlev", nlev_desc, nlev, None, is_nml_attr=True) - - # Add number of latitudes in grid to configure object - nlat_desc = "Number of unique latitude points in rectangular lat/lon" \ - " grid.\nSet to 1 (one) for unstructured grids." - self.create_config("nlat", nlat_desc, case_ny) - - # Add number of longitudes in grid to configure object - nlon_desc = "Number of unique longitude points in rectangular lat/lon" \ - " grid.\nTotal number of columns for unstructured grids." - self.create_config("nlon", nlon_desc, case_nx) - #------------------------ # Set CAM physics columns #------------------------ @@ -787,23 +763,23 @@ def __init__(self, case, case_log): raise CamConfigValError(emsg.format(user_dyn_opt, dyn)) # End if - #-------------------------------------------------------- - # Set CAM grid variables (nlev and horizontal dimensions) - #-------------------------------------------------------- + #---------------------------------------- + # Set CAM grid variables (nlat,nlon,nlev) + #---------------------------------------- # Set number of vertical levels if case_nlev: nlev = case_nlev else: - nlev = 30 - - # Add vertical levels CPP definition (REMOVE ONCE HELD-SUAREZ PR IS MERGED!): - self.add_cppdef("PLEV", value=nlev) + nlev = '30' # Default value # Add vertical levels to configure object nlev_desc = "Number of vertical levels." self.create_config("nlev", nlev_desc, nlev, None, is_nml_attr=True) + # Add vertical levels CPP definition (REMOVE ONCE SE DIMENSIONS_MOD HAS INIT SUBROUTINE): + self.add_cppdef("PLEV", value=nlev) + #Set horizontal dimension variables: if dyn == "se": # Extract cubed-sphere grid values from hgrid string: diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 6ef65607..089e5cef 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -17,7 +17,7 @@ module dp_coupling use physics_types, only: physics_state, physics_tend use physics_types, only: ix_qv, ix_cld_liq, ix_rain !Remove once constituents are enabled use physics_grid, only: pcols => columns_on_task, get_dyn_col_p -use physics_grid, only: pver, pverp +use vert_coord, only: pver, pverp use dp_mapping, only: nphys_pts diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 55306505..c901ace0 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -127,11 +127,11 @@ subroutine model_grid_init() ! decomposition based on the dynamics (SE) grid. use mpi, only: mpi_max + use vert_coord, only: vert_coord_init, pver use hycoef, only: hycoef_init, hypi, hypm, nprlev, & hyam, hybm, hyai, hybi, ps0 use physconst, only: thermodynamic_active_species_num use ref_pres, only: ref_pres_init - use pmgrid, only: plev, plevp !Remove once phys_vert_coord is enabled!! -JN use time_manager, only: get_nstep, get_step_size use dp_mapping, only: dp_init, dp_write use native_mapping, only: do_native_mapping, create_native_mapping_files @@ -175,6 +175,9 @@ subroutine model_grid_init() ! Get file handle for initial file and first consistency check fh_ini => initial_file_get_id() + ! Set vertical coordinate information not provided by namelist: + call vert_coord_init(1, pver) + ! Initialize hybrid coordinate arrays call hycoef_init(fh_ini, psdry=.true.) @@ -188,7 +191,7 @@ subroutine model_grid_init() end do ! Initialize reference pressures - call ref_pres_init(plev, plevp, hypi, hypm, nprlev) + call ref_pres_init(hypi, hypm, nprlev) if (iam < par%nprocs) then @@ -359,8 +362,8 @@ subroutine model_grid_init() end if ! Initialize physics grid decomposition: - call phys_grid_init(hdim1_d, 1, nlev, 'SE', & - 1, nlev, local_dyn_columns, gridname, & + call phys_grid_init(hdim1_d, 1, 'SE', & + local_dyn_columns, gridname, & grid_attribute_names) ! Deallocate grid_attirbute_names, as it is no longer needed: diff --git a/src/dynamics/se/pmgrid.F90 b/src/dynamics/se/pmgrid.F90 deleted file mode 100644 index 87f7908b..00000000 --- a/src/dynamics/se/pmgrid.F90 +++ /dev/null @@ -1,12 +0,0 @@ -module pmgrid - -! PLON and PLAT do not correspond to the number of latitudes and longitudes in -! this version of dynamics. - -implicit none -save - -integer, parameter :: plev = PLEV ! number of vertical levels -integer, parameter :: plevp = plev + 1 - -end module pmgrid diff --git a/src/dynamics/utils/hycoef.F90 b/src/dynamics/utils/hycoef.F90 index 670ed2bd..fdac5cfa 100644 --- a/src/dynamics/utils/hycoef.F90 +++ b/src/dynamics/utils/hycoef.F90 @@ -2,7 +2,7 @@ module hycoef use shr_kind_mod, only: r8 => shr_kind_r8 use spmd_utils, only: masterproc -use pmgrid, only: plev, plevp +use vert_coord, only: pver, pverp use cam_logfile, only: iulog use cam_abortutils, only: endrun use pio, only: file_desc_t, var_desc_t, & @@ -23,28 +23,25 @@ module hycoef ! !----------------------------------------------------------------------- -real(r8), public, target :: hyai(plevp) ! ps0 component of hybrid coordinate - interfaces -real(r8), public, target :: hyam(plev) ! ps0 component of hybrid coordinate - midpoints -real(r8), public, target :: hybi(plevp) ! ps component of hybrid coordinate - interfaces -real(r8), public, target :: hybm(plev) ! ps component of hybrid coordinate - midpoints - -real(r8), public :: etamid(plev) ! hybrid coordinate - midpoints - -real(r8), public :: hybd(plev) ! difference in b (hybi) across layers -real(r8), public :: hypi(plevp) ! reference pressures at interfaces -real(r8), public :: hypm(plev) ! reference pressures at midpoints -real(r8), public :: hypd(plev) ! reference pressure layer thickness -#ifdef planet_mars -real(r8), public, protected :: ps0 = 6.0e1_r8 ! Base state surface pressure (pascals) -real(r8), public, protected :: psr = 6.0e1_r8 ! Reference surface pressure (pascals) -#else -real(r8), public, protected :: ps0 = 1.0e5_r8 ! Base state surface pressure (pascals) -real(r8), public, protected :: psr = 1.0e5_r8 ! Reference surface pressure (pascals) -#endif -real(r8), target :: alev(plev) ! level values (pascals) for 'lev' coord -real(r8), target :: ailev(plevp) ! interface level values for 'ilev' coord +real(r8), public, allocatable, target :: hyai(:) ! ps0 component of hybrid coordinate - interfaces +real(r8), public, allocatable, target :: hyam(:) ! ps0 component of hybrid coordinate - midpoints +real(r8), public, allocatable, target :: hybi(:) ! ps component of hybrid coordinate - interfaces +real(r8), public, allocatable, target :: hybm(:) ! ps component of hybrid coordinate - midpoints + +real(r8), public, allocatable :: etamid(:) ! hybrid coordinate - midpoints + +real(r8), public, allocatable :: hybd(:) ! difference in b (hybi) across layers +real(r8), public, allocatable :: hypi(:) ! reference pressures at interfaces +real(r8), public, allocatable :: hypm(:) ! reference pressures at midpoints +real(r8), public, allocatable :: hypd(:) ! reference pressure layer thickness -integer, public :: nprlev ! number of pure pressure levels at top +real(r8), public, protected :: ps0 ! Base state surface pressure (pascals) +real(r8), public, protected :: psr ! Reference surface pressure (pascals) + +real(r8), allocatable, target :: alev(:) ! level values (pascals) for 'lev' coord +real(r8), allocatable, target :: ailev(:) ! interface level values for 'ilev' coord + +integer, public :: nprlev ! number of pure pressure levels at top public hycoef_init @@ -57,7 +54,9 @@ module hycoef subroutine hycoef_init(file, psdry) - !use cam_history_support, only: add_hist_coord, add_vert_coord, formula_terms_t +! use cam_history_support, only: add_hist_coord, add_vert_coord, formula_terms_t + use physconst, only: ps_base, ps_ref + use string_utils, only: to_str !----------------------------------------------------------------------- ! @@ -87,15 +86,80 @@ subroutine hycoef_init(file, psdry) ! arguments type(file_desc_t), intent(inout) :: file logical, optional, intent(in) :: psdry ! set true when coordinate is based - ! on dry surface pressure + ! on dry surface pressure ! local variables integer :: k ! Level index + integer :: iret ! Return status integer logical :: dry_coord real(r8) :: amean, bmean, atest, btest, eps ! type(formula_terms_t) :: formula_terms ! For the 'lev' and 'ilev' coords + + character(len=*), parameter :: subname = 'hycoef_init' + !----------------------------------------------------------------------- + ! Initalize reference pressures: + ps0 = real(ps_base, r8) ! Base state surface pressure (pascals) + psr = real(ps_ref, r8) ! Reference surface pressure (pascals) + + ! Allocate public variables: + + allocate(hyai(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hyai(pverp) failed with stat: '//to_str(iret)) + end if + + allocate(hyam(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hyam(pver) failed with stat: '//to_str(iret)) + end if + + allocate(hybi(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hybi(pverp) failed with stat: '//to_str(iret)) + end if + + allocate(hybm(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hybm(pver) failed with stat: '//to_str(iret)) + end if + + allocate(etamid(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate etamid(pver) failed with stat: '//to_str(iret)) + end if + + allocate(hybd(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hybd(pver) failed with stat: '//to_str(iret)) + end if + + allocate(hypi(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hypi(pverp) failed with stat: '//to_str(iret)) + end if + + allocate(hypm(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hypm(pver) failed with stat: '//to_str(iret)) + end if + + allocate(hypd(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate hypd(pver) failed with stat: '//to_str(iret)) + end if + + allocate(alev(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate alev(pver) failed with stat: '//to_str(iret)) + end if + + allocate(ailev(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate ailev(pverp) failed with stat: '//to_str(iret)) + end if + ! check for dry pressure coordinate (default is moist) dry_coord = .false. if (present(psdry)) dry_coord = psdry @@ -105,41 +169,46 @@ subroutine hycoef_init(file, psdry) ! Set layer locations nprlev = 0 - do k=1,plev + do k=1,pver ! Interfaces. Set nprlev to the interface above, the first time a ! nonzero surface pressure contribution is found. "nprlev" ! identifies the lowest pure pressure interface. - if (nprlev==0 .and. hybi(k).ne.0.0_r8) nprlev = k - 1 +! Remove this line once determine its replacement doesn't cause answer changes +! if (nprlev==0 .and. hybi(k).ne.0.0_r8) nprlev = k - 1 + if (hybi(k) /= 0.0_r8) then + nprlev = k - 1 + exit + end if end do ! Set nprlev if no nonzero b's have been found. All interfaces are ! pure pressure. A pure pressure model requires other changes as well. - if (nprlev==0) nprlev = plev + 2 + if (nprlev==0) nprlev = pver + 2 ! Set delta sigma part of layer thickness and reference state midpoint ! pressures - do k=1,plev + do k=1,pver hybd(k) = hybi(k+1) - hybi(k) hypm(k) = hyam(k)*ps0 + hybm(k)*psr etamid(k) = hyam(k) + hybm(k) end do ! Reference state interface pressures - do k=1,plevp + do k=1,pverp hypi(k) = hyai(k)*ps0 + hybi(k)*psr end do ! Reference state layer thicknesses - do k=1,plev + do k=1,pver hypd(k) = hypi(k+1) - hypi(k) end do ! Test that A's and B's at full levels are arithmetic means of A's and ! B's at interfaces eps = 1.e-05_r8 - do k = 1,plev + do k = 1,pver amean = ( hyai(k+1) + hyai(k) )*0.5_r8 bmean = ( hybi(k+1) + hybi(k) )*0.5_r8 if(amean == 0._r8 .and. hyam(k) == 0._r8) then @@ -181,86 +250,82 @@ subroutine hycoef_init(file, psdry) ! attributes to the lev and ilev coordinates. ! 0.01 converts Pascals to millibars - alev(:plev) = 0.01_r8*ps0*(hyam(:plev) + hybm(:plev)) - ailev(:plevp) = 0.01_r8*ps0*(hyai(:plevp) + hybi(:plevp)) - -!Undo once history output has been developed -JN: -#if 0 - - if (dry_coord) then - call add_vert_coord('lev', plev, & - 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & - positive='down') - call add_hist_coord('hyam', plev, & - 'hybrid A coefficient at layer midpoints', '1', hyam, dimname='lev') - call add_hist_coord('hybm', plev, & - 'hybrid B coefficient at layer midpoints', '1', hybm, dimname='lev') - else - - formula_terms%a_name = 'hyam' - formula_terms%a_long_name = 'hybrid A coefficient at layer midpoints' - formula_terms%a_values => hyam - formula_terms%b_name = 'hybm' - formula_terms%b_long_name = 'hybrid B coefficient at layer midpoints' - formula_terms%b_values => hybm - formula_terms%p0_name = 'P0' - formula_terms%p0_long_name = 'reference pressure' - formula_terms%p0_units = 'Pa' - formula_terms%p0_value = ps0 - formula_terms%ps_name = 'PS' - - call add_vert_coord('lev', plev, & - 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & - positive='down', & - standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & - formula_terms=formula_terms) - end if - - if (dry_coord) then - call add_vert_coord('ilev', plevp, & - 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & - positive='down') - call add_hist_coord('hyai', plevp, & - 'hybrid A coefficient at layer interfaces', '1', hyai, dimname='ilev') - call add_hist_coord('hybi', plevp, & - 'hybrid B coefficient at layer interfaces', '1', hybi, dimname='ilev') - else - formula_terms%a_name = 'hyai' - formula_terms%a_long_name = 'hybrid A coefficient at layer interfaces' - formula_terms%a_values => hyai - formula_terms%b_name = 'hybi' - formula_terms%b_long_name = 'hybrid B coefficient at layer interfaces' - formula_terms%b_values => hybi - formula_terms%p0_name = 'P0' - formula_terms%p0_long_name = 'reference pressure' - formula_terms%p0_units = 'Pa' - formula_terms%p0_value = ps0 - formula_terms%ps_name = 'PS' - - call add_vert_coord('ilev', plevp, & - 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & - positive='down', & - standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & - formula_terms=formula_terms) - end if - -!Undo once history output has been developed -JN: -#endif - + alev(:pver) = 0.01_r8*ps0*(hyam(:pver) + hybm(:pver)) + ailev(:pverp) = 0.01_r8*ps0*(hyai(:pverp) + hybi(:pverp)) + +! -------------------- +! THIS CODE BLOCK TEMPORARILY COMMENTED OUT UNTIL HISTORY OUTPUT IS ENABLED +! if (dry_coord) then +! call add_vert_coord('lev', plev, & +! 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & +! positive='down') +! call add_hist_coord('hyam', plev, & +! 'hybrid A coefficient at layer midpoints', '1', hyam, dimname='lev') +! call add_hist_coord('hybm', plev, & +! 'hybrid B coefficient at layer midpoints', '1', hybm, dimname='lev') +! else +! +! formula_terms%a_name = 'hyam' +! formula_terms%a_long_name = 'hybrid A coefficient at layer midpoints' +! formula_terms%a_values => hyam +! formula_terms%b_name = 'hybm' +! formula_terms%b_long_name = 'hybrid B coefficient at layer midpoints' +! formula_terms%b_values => hybm +! formula_terms%p0_name = 'P0' +! formula_terms%p0_long_name = 'reference pressure' +! formula_terms%p0_units = 'Pa' +! formula_terms%p0_value = ps0 +! formula_terms%ps_name = 'PS' +! +! call add_vert_coord('lev', plev, & +! 'hybrid level at midpoints (1000*(A+B))', 'hPa', alev, & +! positive='down', & +! standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & +! formula_terms=formula_terms) +! end if +! +! if (dry_coord) then +! call add_vert_coord('ilev', plevp, & +! 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & +! positive='down') +! call add_hist_coord('hyai', plevp, & +! 'hybrid A coefficient at layer interfaces', '1', hyai, dimname='ilev') +! call add_hist_coord('hybi', plevp, & +! 'hybrid B coefficient at layer interfaces', '1', hybi, dimname='ilev') +! else +! formula_terms%a_name = 'hyai' +! formula_terms%a_long_name = 'hybrid A coefficient at layer interfaces' +! formula_terms%a_values => hyai +! formula_terms%b_name = 'hybi' +! formula_terms%b_long_name = 'hybrid B coefficient at layer interfaces' +! formula_terms%b_values => hybi +! formula_terms%p0_name = 'P0' +! formula_terms%p0_long_name = 'reference pressure' +! formula_terms%p0_units = 'Pa' +! formula_terms%p0_value = ps0 +! formula_terms%ps_name = 'PS' +! +! call add_vert_coord('ilev', plevp, & +! 'hybrid level at interfaces (1000*(A+B))', 'hPa', ailev, & +! positive='down', & +! standard_name='atmosphere_hybrid_sigma_pressure_coordinate', & +! formula_terms=formula_terms) +! end if +! if (masterproc) then write(iulog,'(a)')' Layer Locations (*1000) ' - do k=1,plev + do k=1,pver write(iulog,9800)k,hyai(k),hybi(k),hyai(k)+hybi(k) write(iulog,9810) hyam(k), hybm(k), hyam(k)+hybm(k) end do - write(iulog,9800)plevp,hyai(plevp),hybi(plevp),hyai(plevp)+hybi(plevp) + write(iulog,9800)pverp,hyai(pverp),hybi(pverp),hyai(pverp)+hybi(pverp) write(iulog,9820) - do k=1,plev + do k=1,pver write(iulog,9830) k, hypi(k) write(iulog,9840) hypm(k), hypd(k) end do - write(iulog,9830) plevp, hypi(plevp) + write(iulog,9830) pverp, hypi(pverp) end if 9800 format( 1x, i3, 3p, 3(f10.4,10x) ) @@ -284,8 +349,8 @@ subroutine init_restart_hycoef(File, vdimids) integer :: ierr - ierr = PIO_Def_Dim(File, 'lev', plev, vdimids(1)) - ierr = PIO_Def_Dim(File, 'ilev', plevp, vdimids(2)) + ierr = PIO_Def_Dim(File, 'lev', pver, vdimids(1)) + ierr = PIO_Def_Dim(File, 'ilev', pverp, vdimids(2)) ierr = pio_def_var(File, 'hyai', pio_double, vdimids(2:2), hyai_desc) ierr = pio_def_var(File, 'hyam', pio_double, vdimids(1:1), hyam_desc) @@ -335,15 +400,15 @@ subroutine hycoef_read(File) ierr = PIO_Inq_DimID(File, 'lev', lev_dimid) ierr = PIO_Inq_dimlen(File, lev_dimid, flev) - if (plev /= flev) then - write(iulog,*) routine//': ERROR: file lev does not match model. lev (file, model):',flev, plev + if (pver /= flev) then + write(iulog,*) routine//': ERROR: file lev does not match model. lev (file, model):',flev, pver call endrun(routine//': ERROR: file lev does not match model.') end if ierr = PIO_Inq_DimID(File, 'ilev', lev_dimid) ierr = PIO_Inq_dimlen(File, lev_dimid, filev) - if (plevp /= filev) then - write(iulog,*) routine//':ERROR: file ilev does not match model plevp (file, model):',filev, plevp + if (pverp /= filev) then + write(iulog,*) routine//':ERROR: file ilev does not match model ilev (file, model):',filev, pverp call endrun(routine//':ERROR: file ilev does not match model.') end if From d87fb559a3acc695a3eea70c0aef14d12ee0a012 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Tue, 30 Mar 2021 12:31:25 -0600 Subject: [PATCH 11/45] Allow SE dycore to use run-time configured vertical levels and tracer numbers. --- cime_config/cam_config.py | 3 - src/dynamics/se/dp_mapping.F90 | 9 +- src/dynamics/se/dycore/dimensions_mod.F90 | 175 +++-- src/dynamics/se/dycore/element_mod.F90 | 627 ++++++++++++++++-- .../se/dycore/fvm_control_volume_mod.F90 | 504 +++++++++++--- src/dynamics/se/dycore/hybvcoord_mod.F90 | 23 +- src/dynamics/se/dycore/parallel_mod.F90 | 34 +- src/dynamics/se/dycore/prim_init.F90 | 6 +- src/dynamics/se/dycore/prim_state_mod.F90 | 85 ++- src/dynamics/se/dycore/vertremap_mod.F90 | 1 - src/dynamics/se/dyn_grid.F90 | 53 +- 11 files changed, 1277 insertions(+), 243 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 830dc244..26b1e157 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -777,9 +777,6 @@ def __init__(self, case, case_log): nlev_desc = "Number of vertical levels." self.create_config("nlev", nlev_desc, nlev, None, is_nml_attr=True) - # Add vertical levels CPP definition (REMOVE ONCE SE DIMENSIONS_MOD HAS INIT SUBROUTINE): - self.add_cppdef("PLEV", value=nlev) - #Set horizontal dimension variables: if dyn == "se": # Extract cubed-sphere grid values from hgrid string: diff --git a/src/dynamics/se/dp_mapping.F90 b/src/dynamics/se/dp_mapping.F90 index 96fedd96..b3ff8850 100644 --- a/src/dynamics/se/dp_mapping.F90 +++ b/src/dynamics/se/dp_mapping.F90 @@ -4,7 +4,7 @@ module dp_mapping use shr_const_mod, only: pi => shr_const_pi !SE dycore: - use dimensions_mod, only: np, npsq, fv_nphys + use dimensions_mod, only: np, fv_nphys use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl use coordinate_systems_mod, only: spherical_polar_t use fvm_control_volume_mod, only: fvm_struct @@ -23,7 +23,7 @@ module dp_mapping ! no physgrid: nphys_pts = npsq (physics on GLL grid) ! physgrid: nphys_pts = nphys2 (physics on CSLAM grid) ! Value is set when se_fv_nphys namelist variable is read - integer, public :: nphys_pts = npsq + integer, public :: nphys_pts ! NOTE: dp_gid() is in space filling curve rank order ! all other global arrays are in block id (global id) order @@ -52,12 +52,15 @@ subroutine dp_init(elem,fvm) use cam_logfile, only: iulog !SE dycore: - use dimensions_mod, only: nelemd, nc, irecons_tracer + use dimensions_mod, only: nelemd, nc, irecons_tracer, npsq use element_mod, only: element_t type(element_t) , dimension(nelemd), intent(in) :: elem type (fvm_struct), dimension(nelemd), intent(in) :: fvm + !Initialize total number of physics points per spectral element: + nphys_pts = npsq + num_weights_phys2fvm = 0 num_weights_fvm2phys = 0 if (fv_nphys>0) then diff --git a/src/dynamics/se/dycore/dimensions_mod.F90 b/src/dynamics/se/dycore/dimensions_mod.F90 index 046fa2f8..383af01f 100644 --- a/src/dynamics/se/dycore/dimensions_mod.F90 +++ b/src/dynamics/se/dycore/dimensions_mod.F90 @@ -1,23 +1,9 @@ module dimensions_mod use shr_kind_mod, only: r8=>shr_kind_r8 -!Un-comment or modify once a formal plan for pcnst has been developed -JN: -!#ifdef FVM_TRACERS -! use constituents, only: ntrac_d=>pcnst ! _EXTERNAL -!#else -! use constituents, only: qsize_d=>pcnst ! _EXTERNAL -!#endif implicit none private -! set MAX number of tracers. actual number of tracers is a run time argument -#ifdef FVM_TRACERS - integer, parameter :: qsize_d = 10 ! SE tracers (currently SE supports 10 condensate loading tracers) - integer, parameter :: ntrac_d = 3 ! Needed until pcnst is resolved. -JN -#else - integer, parameter :: ntrac_d = 0 ! No fvm tracers if CSLAM is off - integer, parameter :: qsize_d = 3 ! Needed until pcnst is resolved. -JN -#endif ! ! The variables below hold indices of water vapor and condensate loading tracers as well as ! associated heat capacities (initialized in dyn_init): @@ -42,11 +28,14 @@ module dimensions_mod logical , public :: lcp_moist = .true. integer, parameter, public :: np = NP - integer, parameter, public :: nc = 3 !cslam resolution - integer , public :: fv_nphys !physics-grid resolution - the "MAX" is so that the code compiles with NC=0 + integer, parameter, public :: nc = 3 !cslam resolution + integer , public :: fv_nphys !physics-grid resolution - the "MAX" is so that the code compiles with NC=0 - integer :: ntrac = 0 !ntrac is set in dyn_comp - integer :: qsize = 0 !qsize is set in dyn_comp + integer, public, protected :: qsize_d !SE tracer dimension size + integer, public, protected :: ntrac_d !FVM tracer dimension size + + integer, public :: ntrac = 0 !ntrac is set in dyn_comp + integer, public :: qsize = 0 !qsize is set in dyn_comp ! ! hyperviscosity is applied on approximate pressure levels ! Similar to CAM-EUL; see CAM5 scietific documentation (Note TN-486), equation (3.09), page 58. @@ -56,7 +45,7 @@ module dimensions_mod logical, public :: lprint!for debugging integer, parameter, public :: ngpc=3 !number of Gausspoints for the fvm integral approximation !phl change from 4 integer, parameter, public :: irecons_tracer=6!=1 is PCoM, =3 is PLM, =6 is PPM for tracer reconstruction - integer, public :: irecons_tracer_lev(PLEV) + integer, allocatable, public :: irecons_tracer_lev(:) integer, parameter, public :: nhe=1 !Max. Courant number integer, parameter, public :: nhr=2 !halo width needed for reconstruction - phl integer, parameter, public :: nht=nhe+nhr !total halo width where reconstruction is needed (nht<=nc) - phl @@ -74,20 +63,20 @@ module dimensions_mod integer, allocatable, public :: kord_tr(:), kord_tr_cslam(:) - real(r8), public :: nu_scale_top(PLEV)! scaling of del2 viscosity in sopnge layer (initialized in dyn_comp) - real(r8), public :: nu_lev(PLEV) - real(r8), public :: otau(PLEV) + real(r8), allocatable, public :: nu_scale_top(:) ! scaling of del2 viscosity in sponge layer (initialized in dyn_comp) + real(r8), allocatable, public :: nu_lev(:) + real(r8), allocatable, public :: otau(:) + integer, public :: ksponge_end ! sponge is active k=1,ksponge_end - real(r8), public :: nu_div_lev(PLEV) = 1.0_r8 ! scaling of viscosity in sponge layer - ! (set in prim_state; if applicable) - real(r8), public :: kmvis_ref(PLEV) !reference profiles for molecular diffusion - real(r8), public :: kmcnd_ref(PLEV) !reference profiles for molecular diffusion - real(r8), public :: rho_ref(PLEV) !reference profiles for rho - real(r8), public :: km_sponge_factor(PLEV) !scaling for molecular diffusion (when used as sponge) - real(r8), public :: kmvisi_ref(PLEV+1) !reference profiles for molecular diffusion - real(r8), public :: kmcndi_ref(PLEV+1) !reference profiles for molecular diffusion - real(r8), public :: rhoi_ref(PLEV+1) !reference profiles for rho + real (r8), allocatable, public :: nu_div_lev(:) ! scaling of viscosity in sponge layer + real(r8), allocatable, public :: kmvis_ref(:) !reference profiles for molecular diffusion + real(r8), allocatable, public :: kmcnd_ref(:) !reference profiles for molecular diffusion + real(r8), allocatable, public :: rho_ref(:) !reference profiles for rho + real(r8), allocatable, public :: km_sponge_factor(:) !scaling for molecular diffusion (when used as sponge) + real(r8), allocatable, public :: kmvisi_ref(:) !reference profiles for molecular diffusion + real(r8), allocatable, public :: kmcndi_ref(:) !reference profiles for molecular diffusion + real(r8), allocatable, public :: rhoi_ref(:) !reference profiles for rho integer, public :: nhc_phys integer, public :: nhe_phys @@ -96,10 +85,9 @@ module dimensions_mod integer, public :: npdg = 0 ! dg degree for hybrid cg/dg element 0=disabled - integer, parameter, public :: npsq = np*np - integer, parameter, public :: nlev=PLEV - integer, parameter, public :: nlevp=nlev+1 - + integer, public, protected :: npsq + integer, public, protected :: nlev + integer, public, protected :: nlevp ! params for a mesh ! integer, public, parameter :: max_elements_attached_to_node = 7 @@ -111,8 +99,6 @@ module dimensions_mod integer, public :: max_corner_elem = 1 !max_elements_attached_to_node-3 integer, public :: max_neigh_edges = 8 !4 + 4*max_corner_elem - public :: qsize,qsize_d,ntrac_d,ntrac - integer, public :: ne integer, public :: nelem ! total number of elements integer, public :: nelemd ! number of elements per MPI task @@ -121,9 +107,123 @@ module dimensions_mod integer, public :: nnodes,npart,nmpi_per_node integer, public :: GlobalUniqueCols + !Public subroutines + public :: dimensions_mod_init public :: set_mesh_dimensions +!============================================================================== contains +!============================================================================== + + subroutine dimensions_mod_init() + + ! Allocate and initalize the relevant SE dycore dimension variables. + + use vert_coord, only: pver, pverp + use constituents, only: pcnst + use cam_abortutils, only: endrun + use string_utils, only: to_str + + ! Local variables: + + integer :: iret + + character(len=*), parameter :: subname = 'dimensions_mod_init' + + ! Set tracer dimension variables: + +#ifdef FVM_TRACERS + qsize_d = 10 ! SE tracers (currently SE supports 10 condensate loading tracers) + ntrac_d = pcnst +#else + qsize_d = pcnst + ntrac_d = 0 ! No fvm tracers if CSLAM is off +#endif + + ! Set grid dimension variables: + + npsq = np*np + nlev = pver + nlevp = pverp + + ! Allocate vertically-dimensioned variables: + + allocate(irecons_tracer_lev(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate irecons_tracer_lev(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(nu_scale_top(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate nu_scale_top(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(nu_lev(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate nu_lev(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(otau(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate otau(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(nu_div_lev(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate nu_div_lev(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(kmvis_ref(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate kmvis_ref(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(kmcnd_ref(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate kmcnd_ref(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(rho_ref(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate rho_ref(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(km_sponge_factor(pver), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate km_sponge_factor(pver) failed with stat: '//& + to_str(iret)) + end if + + allocate(kmvisi_ref(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate kmvisi_ref(pverp) failed with stat: '//& + to_str(iret)) + end if + + allocate(kmcndi_ref(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate kmcndi_ref(pverp) failed with stat: '//& + to_str(iret)) + end if + + allocate(rhoi_ref(pverp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate rhoi_ref(pverp) failed with stat: '//& + to_str(iret)) + end if + + + end subroutine dimensions_mod_init + +!============================================================================== subroutine set_mesh_dimensions() @@ -138,6 +238,7 @@ subroutine set_mesh_dimensions() end subroutine set_mesh_dimensions +!============================================================================== end module dimensions_mod diff --git a/src/dynamics/se/dycore/element_mod.F90 b/src/dynamics/se/dycore/element_mod.F90 index 422799b8..c72f417d 100644 --- a/src/dynamics/se/dycore/element_mod.F90 +++ b/src/dynamics/se/dycore/element_mod.F90 @@ -2,9 +2,10 @@ module element_mod use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 use coordinate_systems_mod, only: spherical_polar_t, cartesian2D_t, cartesian3D_t, distance - use dimensions_mod, only: np, nc, npsq, nlev, nlevp, qsize_d, max_neigh_edges,ntrac_d use edgetype_mod, only: edgedescriptor_t use gridgraph_mod, only: gridvertex_t + use cam_abortutils, only: endrun + use string_utils, only: to_str implicit none private @@ -21,12 +22,12 @@ module element_mod ! vertically-lagrangian code advects dp3d instead of ps ! tracers Q, Qdp always use 2 level time scheme - real (kind=r8) :: v (np,np,2,nlev,timelevels) ! velocity - real (kind=r8) :: T (np,np,nlev,timelevels) ! temperature - real (kind=r8) :: dp3d (np,np,nlev,timelevels) ! dry delta p on levels - real (kind=r8) :: psdry (np,np) ! dry surface pressure - real (kind=r8) :: phis (np,np) ! surface geopotential (prescribed) - real (kind=r8) :: Qdp (np,np,nlev,qsize_d,2) ! Tracer mass + real(kind=r8), allocatable :: v(:,:,:,:,:) ! velocity + real(kind=r8), allocatable :: T(:,:,:,:) ! temperature + real(kind=r8), allocatable :: dp3d(:,:,:,:) ! dry delta p on levels + real(kind=r8), allocatable :: psdry(:,:) ! dry surface pressure + real(kind=r8), allocatable :: phis(:,:) ! surface geopotential (prescribed) + real(kind=r8), allocatable :: Qdp(:,:,:,:,:) ! Tracer mass end type elem_state_t @@ -35,44 +36,44 @@ module element_mod ! ! storage for subcycling tracers/dynamics ! - real (kind=r8) :: vn0 (np,np,2,nlev) ! velocity for SE tracer advection - real (kind=r8) :: dpdiss_biharmonic(np,np,nlev) ! mean dp dissipation tendency, if nu_p>0 - real (kind=r8) :: dpdiss_ave(np,np,nlev) ! mean dp used to compute psdiss_tens + real(kind=r8), allocatable :: vn0(:,:,:,:) ! velocity for SE tracer advection + real(kind=r8), allocatable :: dpdiss_biharmonic(:,:,:) ! mean dp dissipation tendency, if nu_p>0 + real(kind=r8), allocatable :: dpdiss_ave(:,:,:) ! mean dp used to compute psdiss_tens ! diagnostics for explicit timestep - real (kind=r8) :: phi(np,np,nlev) ! geopotential - real (kind=r8) :: omega(np,np,nlev) ! vertical velocity + real(kind=r8), allocatable :: phi(:,:,:) ! geopotential + real(kind=r8), allocatable :: omega(:,:,:) ! vertical velocity ! semi-implicit diagnostics: computed in explict-component, reused in Helmholtz-component. - real (kind=r8) :: zeta(np,np,nlev) ! relative vorticity - real (kind=r8) :: div(np,np,nlev,timelevels) ! divergence + real(kind=r8), allocatable :: zeta(:,:,:) ! relative vorticity + real(kind=r8), allocatable :: div(:,:,:,:) ! divergence ! tracer advection fields used for consistency and limiters - real (kind=r8) :: dp(np,np,nlev) ! for dp_tracers at physics timestep - real (kind=r8) :: divdp(np,np,nlev) ! divergence of dp - real (kind=r8) :: divdp_proj(np,np,nlev) ! DSSed divdp - real (kind=r8) :: mass(MAX(qsize_d,ntrac_d)+9) ! total tracer mass for diagnostics + real(kind=r8), allocatable :: dp(:,:,:) ! for dp_tracers at physics timestep + real(kind=r8), allocatable :: divdp(:,:,:) ! divergence of dp + real(kind=r8), allocatable :: divdp_proj(:,:,:) ! DSSed divdp + real(kind=r8), allocatable :: mass(:) ! total tracer mass for diagnostics ! forcing terms for CAM - real (kind=r8) :: FQ(np,np,nlev,qsize_d) ! tracer forcing - real (kind=r8) :: FM(np,np,2,nlev) ! momentum forcing - real (kind=r8) :: FDP(np,np,nlev) ! save full updated dp right after physics - real (kind=r8) :: FT(np,np,nlev) ! temperature forcing - real (kind=r8) :: etadot_prescribed(np,np,nlevp) ! prescribed vertical tendency - real (kind=r8) :: u_met(np,np,nlev) ! zonal component of prescribed meteorology winds - real (kind=r8) :: dudt_met(np,np,nlev) ! rate of change of zonal component of prescribed meteorology winds - real (kind=r8) :: v_met(np,np,nlev) ! meridional component of prescribed meteorology winds - real (kind=r8) :: dvdt_met(np,np,nlev) ! rate of change of meridional component of prescribed meteorology winds - real (kind=r8) :: T_met(np,np,nlev) ! prescribed meteorology temperature - real (kind=r8) :: dTdt_met(np,np,nlev) ! rate of change of prescribed meteorology temperature - real (kind=r8) :: ps_met(np,np) ! surface pressure of prescribed meteorology - real (kind=r8) :: dpsdt_met(np,np) ! rate of change of surface pressure of prescribed meteorology - real (kind=r8) :: nudge_factor(np,np,nlev) ! nudging factor (prescribed) - real (kind=r8) :: Utnd(npsq,nlev) ! accumulated U tendency due to nudging towards prescribed met - real (kind=r8) :: Vtnd(npsq,nlev) ! accumulated V tendency due to nudging towards prescribed met - real (kind=r8) :: Ttnd(npsq,nlev) ! accumulated T tendency due to nudging towards prescribed met - - real (kind=r8) :: pecnd(np,np,nlev) ! pressure perturbation from condensate + real(kind=r8), allocatable :: FQ(:,:,:,:) ! tracer forcing + real(kind=r8), allocatable :: FM(:,:,:,:) ! momentum forcing + real(kind=r8), allocatable :: FDP(:,:,:) ! save full updated dp right after physics + real(kind=r8), allocatable :: FT(:,:,:) ! temperature forcing + real(kind=r8), allocatable :: etadot_prescribed(:,:,:) ! prescribed vertical tendency + real(kind=r8), allocatable :: u_met(:,:,:) ! zonal component of prescribed meteorology winds + real(kind=r8), allocatable :: dudt_met(:,:,:) ! rate of change of zonal component of prescribed meteorology winds + real(kind=r8), allocatable :: v_met(:,:,:) ! meridional component of prescribed meteorology winds + real(kind=r8), allocatable :: dvdt_met(:,:,:) ! rate of change of meridional component of prescribed meteorology winds + real(kind=r8), allocatable :: T_met(:,:,:) ! prescribed meteorology temperature + real(kind=r8), allocatable :: dTdt_met(:,:,:) ! rate of change of prescribed meteorology temperature + real(kind=r8), allocatable :: ps_met(:,:) ! surface pressure of prescribed meteorology + real(kind=r8), allocatable :: dpsdt_met(:,:) ! rate of change of surface pressure of prescribed meteorology + real(kind=r8), allocatable :: nudge_factor(:,:,:) ! nudging factor (prescribed) + real(kind=r8), allocatable :: Utnd(:,:) ! accumulated U tendency due to nudging towards prescribed met + real(kind=r8), allocatable :: Vtnd(:,:) ! accumulated V tendency due to nudging towards prescribed met + real(kind=r8), allocatable :: Ttnd(:,:) ! accumulated T tendency due to nudging towards prescribed met + + real(kind=r8), allocatable :: pecnd(:,:,:) ! pressure perturbation from condensate end type derived_state_t @@ -94,7 +95,7 @@ module element_mod ! ============= DATA-STRUCTURES COMMON TO ALL SOLVERS ================ type, public :: index_t - integer :: ia(npsq),ja(npsq) + integer, allocatable :: ia(:),ja(:) integer :: is,ie integer :: NumUniquePts integer :: UniquePtOffset @@ -106,43 +107,43 @@ module element_mod integer :: GlobalId ! Coordinate values of element points - type (spherical_polar_t) :: spherep(np,np) ! Spherical coords of GLL points + type(spherical_polar_t), allocatable :: spherep(:,:) ! Spherical coords of GLL points ! Equ-angular gnomonic projection coordinates - type (cartesian2D_t) :: cartp(np,np) ! gnomonic coords of GLL points - type (cartesian2D_t) :: corners(4) ! gnomonic coords of element corners - real (kind=r8) :: u2qmap(4,2) ! bilinear map from ref element to quad in cubedsphere coordinates + type(cartesian2D_t), allocatable :: cartp(:,:) ! gnomonic coords of GLL points + type(cartesian2D_t) :: corners(4) ! gnomonic coords of element corners + real(kind=r8) :: u2qmap(4,2) ! bilinear map from ref element to quad in cubedsphere coordinates ! SHOULD BE REMOVED ! 3D cartesian coordinates - type (cartesian3D_t) :: corners3D(4) + type(cartesian3D_t) :: corners3D(4) ! Element diagnostics - real (kind=r8) :: area ! Area of element - real (kind=r8) :: normDinv ! some type of norm of Dinv used for CFL - real (kind=r8) :: dx_short ! short length scale in km - real (kind=r8) :: dx_long ! long length scale in km + real(kind=r8) :: area ! Area of element + real(kind=r8) :: normDinv ! some type of norm of Dinv used for CFL + real(kind=r8) :: dx_short ! short length scale in km + real(kind=r8) :: dx_long ! long length scale in km - real (kind=r8) :: variable_hyperviscosity(np,np) ! hyperviscosity based on above - real (kind=r8) :: hv_courant ! hyperviscosity courant number - real (kind=r8) :: tensorVisc(np,np,2,2) !og, matrix V for tensor viscosity + real(kind=r8), allocatable :: variable_hyperviscosity(:,:) ! hyperviscosity based on above + real(kind=r8) :: hv_courant ! hyperviscosity courant number + real(kind=r8), allocatable :: tensorVisc(:,:,:,:) !og, matrix V for tensor viscosity ! Edge connectivity information ! integer :: node_numbers(4) ! integer :: node_multiplicity(4) ! number of elements sharing corner node - type (GridVertex_t) :: vertex ! element grid vertex information - type (EdgeDescriptor_t) :: desc + type(GridVertex_t) :: vertex ! element grid vertex information + type(EdgeDescriptor_t) :: desc - type (elem_state_t) :: state + type(elem_state_t) :: state - type (derived_state_t) :: derived + type(derived_state_t) :: derived ! Metric terms - real (kind=r8) :: met(np,np,2,2) ! metric tensor on velocity and pressure grid - real (kind=r8) :: metinv(np,np,2,2) ! metric tensor on velocity and pressure grid - real (kind=r8) :: metdet(np,np) ! g = SQRT(det(g_ij)) on velocity and pressure grid - real (kind=r8) :: rmetdet(np,np) ! 1/metdet on velocity pressure grid - real (kind=r8) :: D(np,np,2,2) ! Map covariant field on cube to vector field on the sphere - real (kind=r8) :: Dinv(np,np,2,2) ! Map vector field on the sphere to covariant v on cube + real(kind=r8), allocatable :: met(:,:,:,:) ! metric tensor on velocity and pressure grid + real(kind=r8), allocatable :: metinv(:,:,:,:) ! metric tensor on velocity and pressure grid + real(kind=r8), allocatable :: metdet(:,:) ! g = SQRT(det(g_ij)) on velocity and pressure grid + real(kind=r8), allocatable :: rmetdet(:,:) ! 1/metdet on velocity pressure grid + real(kind=r8), allocatable :: D(:,:,:,:) ! Map covariant field on cube to vector field on the sphere + real(kind=r8), allocatable :: Dinv(:,:,:,:) ! Map vector field on the sphere to covariant v on cube ! Mass flux across the sides of each sub-element. @@ -179,28 +180,28 @@ module element_mod ! | (1,1,1) | | | (4,1,1) | ! --------------------------------------------------------------- ! First Coordinate -------> - real (kind=r8) :: sub_elem_mass_flux(nc,nc,4,nlev) + real(kind=r8), allocatable :: sub_elem_mass_flux(:,:,:,:) ! Convert vector fields from spherical to rectangular components ! The transpose of this operation is its pseudoinverse. - real (kind=r8) :: vec_sphere2cart(np,np,3,2) + real(kind=r8), allocatable :: vec_sphere2cart(:,:,:,:) ! Mass matrix terms for an element on a cube face - real (kind=r8) :: mp(np,np) ! mass matrix on v and p grid - real (kind=r8) :: rmp(np,np) ! inverse mass matrix on v and p grid + real(kind=r8), allocatable :: mp(:,:) ! mass matrix on v and p grid + real(kind=r8), allocatable :: rmp(:,:) ! inverse mass matrix on v and p grid ! Mass matrix terms for an element on the sphere ! This mass matrix is used when solving the equations in weak form ! with the natural (surface area of the sphere) inner product - real (kind=r8) :: spheremp(np,np) ! mass matrix on v and p grid - real (kind=r8) :: rspheremp(np,np) ! inverse mass matrix on v and p grid + real(kind=r8), allocatable :: spheremp(:,:) ! mass matrix on v and p grid + real(kind=r8), allocatable :: rspheremp(:,:) ! inverse mass matrix on v and p grid - integer(i8) :: gdofP(np,np) ! global degree of freedom (P-grid) + integer(i8), allocatable :: gdofP(:,:) ! global degree of freedom (P-grid) - real (kind=r8) :: fcor(np,np) ! Coreolis term + real(kind=r8), allocatable :: fcor(:,:) ! Coriolis term - type (index_t) :: idxP - type (index_t),pointer :: idxV + type(index_t) :: idxP + type(index_t), pointer :: idxV integer :: FaceNum ! force element_t to be a multiple of 8 bytes. @@ -216,12 +217,17 @@ module element_mod public :: element_var_coordinates3D public :: GetColumnIdP,GetColumnIdV public :: allocate_element_desc + public :: allocate_element_dims public :: PrintElem +!============================================================================== contains +!============================================================================== subroutine PrintElem(arr) + use dimensions_mod, only: np + real(kind=r8) :: arr(:,:) integer :: i,j @@ -350,21 +356,70 @@ end function element_var_coordinates3d !___________________________________________________________________ subroutine allocate_element_desc(elem) + use dimensions_mod, only: max_neigh_edges + type (element_t), intent(inout) :: elem(:) - integer :: num, j,i + integer :: num, j, i, iret + + character(len=*), parameter :: subname = 'allocate_element_desc (SE)' num = SIZE(elem) do j=1,num allocate(elem(j)%desc%putmapP(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%putmapP(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%getmapP(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%getmapP(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%putmapP_ghost(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%putmapP_ghost(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%getmapP_ghost(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%getmapP_ghost(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%putmapS(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%putmapS(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%getmapS(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%getmapS(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%reverse(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%reverse(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%globalID(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%globalID(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + allocate(elem(j)%desc%loc2buf(max_neigh_edges)) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%loc2buf(max_neigh_edges) failed with stat: '//& + to_str(iret)) + end if + do i=1,max_neigh_edges elem(j)%desc%loc2buf(i)=i elem(j)%desc%globalID(i)=-1 @@ -373,5 +428,435 @@ subroutine allocate_element_desc(elem) end do end subroutine allocate_element_desc + !___________________________________________________________________ + subroutine allocate_element_dims(elem) + + ! Allocate the SE element arrays using the pre-calculated SE dimensions + + use dimensions_mod, only: np, nc, npsq, nlev, nlevp, qsize_d, ntrac_d + + !Dummy arguments: + type(element_t), intent(inout) :: elem(:) + + !Local arguments: + integer :: num, i, iret + + character(len=*), parameter :: subname = 'allocate_element_dims (SE)' + + !--------------- + + num = size(elem) + + do i=1,num + + !Coordinate values of element points: + allocate(elem(i)%spherep(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%spherep(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Gnomonic coords of GLL points: + allocate(elem(i)%cartp(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%cartp(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Variable Hyperviscosity: + allocate(elem(i)%variable_hyperviscosity(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%variable_hyperviscosity(np,np) failed with stat: '//& + to_str(iret)) + end if + + !og, matrix V for tensor viscosity: + allocate(elem(i)%tensorVisc(np,np,2,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%tensorVisc(np,np,2,2) failed with stat: '//& + to_str(iret)) + end if + + !Allocate "state" variables: + !-------------------------- + + ! velocity + allocate(elem(i)%state%v(np,np,2,nlev,timelevels), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%state%v(np,np,2,nlev,timelevels) failed with stat: '//& + to_str(iret)) + end if + + ! temperature + allocate(elem(i)%state%T(np,np,nlev,timelevels), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%state%T(np,np,nlev,timelevels) failed with stat: '//& + to_str(iret)) + end if + + ! dry delta p on levels + allocate(elem(i)%state%dp3d(np,np,nlev,timelevels), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%state%dp3d(np,np,nlev,timelevels) failed with stat: '//& + to_str(iret)) + end if + + ! dry surface pressure + allocate(elem(i)%state%psdry(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%state%psdry(np,np) failed with stat: '//& + to_str(iret)) + end if + + ! surface geopotential (prescribed) + allocate(elem(i)%state%phis(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%state%phis(np,np) failed with stat: '//& + to_str(iret)) + end if + + ! Tracer mass + allocate(elem(i)%state%Qdp(np,np,nlev,qsize_d,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%state%Qdp(np,np,nlev,qsize_d,2) failed with stat: '//& + to_str(iret)) + end if + !-------------------------- + + !Allocate "derived" variables: + !---------------------------- + + ! velocity for SE tracer advection + allocate(elem(i)%derived%vn0(np,np,2,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%vn0(np,np,2,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! mean dp dissipation tendency, if nu_p>0 + allocate(elem(i)%derived%dpdiss_biharmonic(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dpdiss_biharmonic(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! mean dp used to compute psdiss_tens + allocate(elem(i)%derived%dpdiss_ave(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dpdiss_ave(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! geopotential + allocate(elem(i)%derived%phi(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%phi(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! vertical velocity + allocate(elem(i)%derived%phi(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%phi(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! relative vorticity + allocate(elem(i)%derived%omega(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%omega(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! divergence + allocate(elem(i)%derived%zeta(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%zeta(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! divergence + allocate(elem(i)%derived%div(np,np,nlev,timelevels), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%div(np,np,nlev,timelevels) failed with stat: '//& + to_str(iret)) + end if + + ! for dp_tracers at physics timestep + allocate(elem(i)%derived%dp(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dp(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! divergence of dp + allocate(elem(i)%derived%divdp(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%divdp(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! DSSed divdp + allocate(elem(i)%derived%divdp_proj(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%divdp_proj(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! total tracer mass for diagnostics + allocate(elem(i)%derived%mass(max(qsize_d,ntrac_d)+9), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%mass(max(qsize_d,ntrac_d)+9) failed with stat: '//& + to_str(iret)) + end if + + ! tracer forcing + allocate(elem(i)%derived%FQ(np,np,nlev,qsize_d), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%FQ(np,np,nlev,qsize_d) failed with stat: '//& + to_str(iret)) + end if + + ! momentum forcing + allocate(elem(i)%derived%FM(np,np,2,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%FM(np,np,2,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! save full updated dp right after physics + allocate(elem(i)%derived%FDP(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%FDP(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! temperature forcing + allocate(elem(i)%derived%FT(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%FT(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! prescribed vertical tendency + allocate(elem(i)%derived%etadot_prescribed(np,np,nlevp), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%etadot_prescribed(np,np,nlevp) failed with stat: '//& + to_str(iret)) + end if + + ! zonal component of prescribed meteorology winds + allocate(elem(i)%derived%u_met(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%u_met(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! rate of change of zonal component of prescribed meteorology winds + allocate(elem(i)%derived%dudt_met(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dudt_met(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! meridional component of prescribed meteorology winds + allocate(elem(i)%derived%v_met(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%v_met(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! rate of change of meridional component of prescribed meteorology winds + allocate(elem(i)%derived%dvdt_met(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dvdt_met(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! prescribed meteorology temperature + allocate(elem(i)%derived%T_met(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%T_met(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! rate of change of prescribed meteorology temperature + allocate(elem(i)%derived%dTdt_met(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dTdt_met(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! surface pressure of prescribed meteorology + allocate(elem(i)%derived%ps_met(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%ps_met(np,np) failed with stat: '//& + to_str(iret)) + end if + + ! rate of change of surface pressure of prescribed meteorology + allocate(elem(i)%derived%dpsdt_met(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%dpsdt_met(np,np) failed with stat: '//& + to_str(iret)) + end if + + ! nudging factor (prescribed) + allocate(elem(i)%derived%nudge_factor(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%nudge_factor(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! accumulated U tendency due to nudging towards prescribed met + allocate(elem(i)%derived%Utnd(npsq,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%Utnd(npsq,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! accumulated V tendency due to nudging towards prescribed met + allocate(elem(i)%derived%Vtnd(npsq,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%Vtnd(npsq,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! accumulated T tendency due to nudging towards prescribed met + allocate(elem(i)%derived%Ttnd(npsq,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%Ttnd(npsq,nlev) failed with stat: '//& + to_str(iret)) + end if + + ! pressure perturbation from condensate + allocate(elem(i)%derived%pecnd(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%derived%pecnd(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + !---------------------------- + + !Allocate "Metric terms": + !----------------------- + + ! metric tensor on velocity and pressure grid + allocate(elem(i)%met(np,np,2,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%met(np,np,2,2) failed with stat: '//& + to_str(iret)) + end if + + ! metric tensor on velocity and pressure grid + allocate(elem(i)%metinv(np,np,2,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%metinv(np,np,2,2) failed with stat: '//& + to_str(iret)) + end if + + ! g = SQRT(det(g_ij)) on velocity and pressure grid + allocate(elem(i)%metdet(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%metdet(np,np) failed with stat: '//& + to_str(iret)) + end if + + ! 1/metdet on velocity pressure grid + allocate(elem(i)%rmetdet(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%rmetdet(np,np) failed with stat: '//& + to_str(iret)) + end if + + ! Map covariant field on cube to vector field on the sphere + allocate(elem(i)%D(np,np,2,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%D(np,np,2,2) failed with stat: '//& + to_str(iret)) + end if + + ! Map vector field on the sphere to covariant v on cube + allocate(elem(i)%Dinv(np,np,2,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%Dinv(np,np,2,2) failed with stat: '//& + to_str(iret)) + end if + !----------------------- + + !First Coordinate: + allocate(elem(i)%sub_elem_mass_flux(nc,nc,4,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%sub_elem_mass_flux(nc,nc,4,nlev) failed with stat: '//& + to_str(iret)) + end if + + !Spherical -> rectangular converter: + allocate(elem(i)%vec_sphere2cart(np,np,3,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%vec_sphere2cart(np,np,3,2) failed with stat: '//& + to_str(iret)) + end if + + !Mass matrix on v and p grid: + allocate(elem(i)%mp(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%mp(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Inverse mass matrix on v and p grid: + allocate(elem(i)%rmp(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%rmp(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Mass matrix on v and p grid: + allocate(elem(i)%spheremp(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%spheremp(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Inverse mass matrix on v and p grid: + allocate(elem(i)%rspheremp(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%rspheremp(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Global degree of freedom (P-grid): + allocate(elem(i)%gdofP(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%gdofP(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Coriolis term: + allocate(elem(i)%fcor(np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%fcor(np,np) failed with stat: '//& + to_str(iret)) + end if + + !Index terms: + !----------- + allocate(elem(i)%idxP%ia(npsq), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%idxP%ia(npsq) failed with stat: '//& + to_str(iret)) + end if + + allocate(elem(i)%idxP%ja(npsq), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%idxP%ja(npsq) failed with stat: '//& + to_str(iret)) + end if + !----------- + + end do + + end subroutine allocate_element_dims + end module element_mod diff --git a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 index c1b3c6fc..88232737 100644 --- a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 +++ b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 @@ -8,7 +8,7 @@ ! interpolation points for the reconstruction (projection from one face to another ! ! when the element is on the cube edge) ! ! It also intialize the start values, see also fvm_analytic ! -!-----------------------------------------------------------------------------------! +!-----------------------------------------------------------------------------------! module fvm_control_volume_mod use shr_kind_mod, only: r8=>shr_kind_r8 use coordinate_systems_mod, only: spherical_polar_t @@ -16,35 +16,36 @@ module fvm_control_volume_mod use dimensions_mod, only: nc, nhe, nlev, ntrac_d, qsize_d,ne, np, nhr, ns, nhc use dimensions_mod, only: fv_nphys, nhe_phys, nhr_phys, ns_phys, nhc_phys,fv_nphys use dimensions_mod, only: irecons_tracer + use string_utils, only: to_str use cam_abortutils, only: endrun implicit none private - integer, parameter, private:: nh = nhr+(nhe-1) ! = 2 (nhr=2; nhe=1) - ! = 3 (nhr=2; nhe=2) + integer :: nh ! = 2 (nhr=2; nhe=1) + ! = 3 (nhr=2; nhe=2) type, public :: fvm_struct ! fvm tracer mixing ratio: (kg/kg) - real (kind=r8) :: c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d) - real (kind=r8) :: se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev) + real(kind=r8), allocatable :: c(:,:,:,:) + real(kind=r8), allocatable :: se_flux(:,:,:,:) - real (kind=r8) :: dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev) - real (kind=r8) :: dp_ref(nlev) - real (kind=r8) :: dp_ref_inverse(nlev) - real (kind=r8) :: psc(nc,nc) + real(kind=r8), allocatable :: dp_fvm(:,:,:) + real(kind=r8), allocatable :: dp_ref(:) + real(kind=r8), allocatable :: dp_ref_inverse(:) + real(kind=r8), allocatable :: psc(:,:) - real (kind=r8) :: inv_area_sphere(nc,nc) ! inverse area_sphere - real (kind=r8) :: inv_se_area_sphere(nc,nc) ! inverse area_sphere + real(kind=r8), allocatable :: inv_area_sphere(:,:) ! inverse area_sphere + real(kind=r8), allocatable :: inv_se_area_sphere(:,:) ! inverse area_sphere integer :: faceno !face number - ! number of south,....,swest and 0 for interior element - integer :: cubeboundary + ! number of south,....,swest and 0 for interior element + integer :: cubeboundary #ifdef waccm_debug - real (kind=r8) :: CSLAM_gamma(nc,nc,nlev,4) -#endif - real (kind=r8) :: displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4) - integer :: flux_vec (2,1-nhc:nc+nhc,1-nhc:nc+nhc,4) + real(kind=r8), allocatable :: CSLAM_gamma(:,:,:,:) +#endif + real(kind=r8), allocatable :: displ_max(:,:,:) + integer, allocatable :: flux_vec(:,:,:,:) ! ! ! cartesian location of vertices for flux sides @@ -61,44 +62,44 @@ module fvm_control_volume_mod ! x-coordinate of vertex 4: vtx_cart(4,1,i,j) = fvm%acartx(i ) ! y-coordinate of vertex 4: vtx_cart(4,2,i,j) = fvm%acarty(j+1) ! - real (kind=r8) :: vtx_cart (4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) + real(kind=r8), allocatable :: vtx_cart(:,:,:,:) ! ! flux_orient(1,i,j) = panel on which control volume (i,j) is located ! flux_orient(2,i,j) = cshift value for vertex permutation ! - real (kind=r8) :: flux_orient(2 ,1-nhc:nc+nhc,1-nhc:nc+nhc) + real(kind=r8), allocatable :: flux_orient(:,:,:) ! ! i,j: indicator function for non-existent cells (0 for corner halo and 1 elsewhere) ! - integer :: ifct (1-nhc:nc+nhc,1-nhc:nc+nhc) - integer :: rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc) - ! - real (kind=r8) :: dalpha, dbeta ! central-angle for gnomonic coordinates - type (spherical_polar_t) :: center_cart(nc,nc) ! center of fvm cell in gnomonic coordinates - real (kind=r8) :: area_sphere(nc,nc) ! spherical area of fvm cell - real (kind=r8) :: spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc) ! centroids + integer, allocatable :: ifct(:,:) + integer, allocatable :: rot_matrix(:,:,:,:) + ! + real(kind=r8) :: dalpha, dbeta ! central-angle for gnomonic coordinates + type(spherical_polar_t), allocatable :: center_cart(:,:) ! center of fvm cell in gnomonic coordinates + real(kind=r8), allocatable :: area_sphere(:,:) ! spherical area of fvm cell + real(kind=r8), allocatable :: spherecentroid(:,:,:) ! centroids ! ! pre-computed metric terms (for efficiency) ! ! recons_metrics(1,:,:) = spherecentroid(1,:,:)**2 -spherecentroid(3,:,:) ! recons_metrics(2,:,:) = spherecentroid(2,:,:)**2 -spherecentroid(4,:,:) ! recons_metrics(3,:,:) = spherecentroid(1,:,:)*spherecentroid(2,:,:)-spherecentroid(5,:,:) - ! - real (kind=r8) :: recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe) + + real(kind=r8), allocatable :: recons_metrics(:,:,:) ! ! recons_metrics_integral(1,:,:) = 2.0_r8*spherecentroid(1,:,:)**2 -spherecentroid(3,:,:) ! recons_metrics_integral(2,:,:) = 2.0_r8*spherecentroid(2,:,:)**2 -spherecentroid(4,:,:) ! recons_metrics_integral(3,:,:) = 2.0_r8*spherecentroid(1,:,:)*spherecentroid(2,:,:)-spherecentroid(5,:,:) ! - real (kind=r8) :: recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe) + real(kind=r8), allocatable :: recons_metrics_integral(:,:,:) ! - integer :: jx_min(3), jx_max(3), jy_min(3), jy_max(3) !bounds for computation + integer :: jx_min(3), jx_max(3), jy_min(3), jy_max(3) !bounds for computation - ! provide fixed interpolation points with respect to the arrival grid for - ! reconstruction - integer :: ibase(1-nh:nc+nh,1:nhr,2) - real (kind=r8) :: halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) - real (kind=r8) :: centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe) !for finite-difference reconstruction + ! provide fixed interpolation points with respect to the arrival grid for + ! reconstruction + integer, allocatable :: ibase(:,:,:) + real(kind=r8), allocatable :: halo_interp_weight(:,:,:,:) + real(kind=r8), allocatable :: centroid_stretch(:,:,:) !for finite-difference reconstruction ! ! pre-compute weights for reconstruction at cell vertices ! @@ -115,12 +116,12 @@ module fvm_control_volume_mod ! recons(3,a,b) * (cartx - centroid(1,a,b))**2 + & ! recons(4,a,b) * (carty - centroid(2,a,b))**2 + & ! recons(5,a,b) * (cartx - centroid(1,a,b)) * (carty - centroid(2,a,b)) - ! - real (kind=r8) :: vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe) + ! + real(kind=r8), allocatable :: vertex_recons_weights(:,:,:,:) ! ! for mapping fvm2dyn ! - real (kind=r8) :: norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc) + real(kind=r8), allocatable :: norm_elem_coord(:,:,:) ! !****************************************** ! @@ -128,57 +129,60 @@ module fvm_control_volume_mod ! !****************************************** ! - real (kind=r8) , allocatable :: phis_physgrid(:,:) - real (kind=r8) , allocatable :: vtx_cart_physgrid(:,:,:,:) - real (kind=r8) , allocatable :: flux_orient_physgrid(:,:,:) - integer , allocatable :: ifct_physgrid(:,:) - integer , allocatable :: rot_matrix_physgrid(:,:,:,:) - real (kind=r8) , allocatable :: spherecentroid_physgrid(:,:,:) - real (kind=r8) , allocatable :: recons_metrics_physgrid(:,:,:) - real (kind=r8) , allocatable :: recons_metrics_integral_physgrid(:,:,:) + real(kind=r8) , allocatable :: phis_physgrid(:,:) + real(kind=r8) , allocatable :: vtx_cart_physgrid(:,:,:,:) + real(kind=r8) , allocatable :: flux_orient_physgrid(:,:,:) + integer , allocatable :: ifct_physgrid(:,:) + integer , allocatable :: rot_matrix_physgrid(:,:,:,:) + real(kind=r8) , allocatable :: spherecentroid_physgrid(:,:,:) + real(kind=r8) , allocatable :: recons_metrics_physgrid(:,:,:) + real(kind=r8) , allocatable :: recons_metrics_integral_physgrid(:,:,:) ! centroid_stretch_physgrid for finite-difference reconstruction - real (kind=r8) , allocatable :: centroid_stretch_physgrid (:,:,:) - real (kind=r8) :: dalpha_physgrid, dbeta_physgrid ! central-angle for gnomonic coordinates - type (spherical_polar_t) , allocatable :: center_cart_physgrid(:,:) ! center of fvm cell in gnomonic coordinates - real (kind=r8) , allocatable :: area_sphere_physgrid(:,:) ! spherical area of fvm cell - integer :: jx_min_physgrid(3), jx_max_physgrid(3) !bounds for computation - integer :: jy_min_physgrid(3), jy_max_physgrid(3) !bounds for computation - integer , allocatable :: ibase_physgrid(:,:,:) - real (kind=r8) , allocatable :: halo_interp_weight_physgrid(:,:,:,:) - real (kind=r8) , allocatable :: vertex_recons_weights_physgrid(:,:,:,:) - - real (kind=r8) , allocatable :: norm_elem_coord_physgrid(:,:,:) - real (kind=r8) , allocatable :: Dinv_physgrid(:,:,:,:) - - real (kind=r8) , allocatable :: fc(:,:,:,:) - real (kind=r8) , allocatable :: fc_phys(:,:,:,:) - real (kind=r8) , allocatable :: ft(:,:,:) - real (kind=r8) , allocatable :: fm(:,:,:,:) - real (kind=r8) , allocatable :: dp_phys(:,:,:) + real(kind=r8) , allocatable :: centroid_stretch_physgrid (:,:,:) + real(kind=r8) :: dalpha_physgrid, dbeta_physgrid ! central-angle for gnomonic coordinates + type(spherical_polar_t) , allocatable :: center_cart_physgrid(:,:) ! center of fvm cell in gnomonic coordinates + real(kind=r8) , allocatable :: area_sphere_physgrid(:,:) ! spherical area of fvm cell + integer :: jx_min_physgrid(3), jx_max_physgrid(3) !bounds for computation + integer :: jy_min_physgrid(3), jy_max_physgrid(3) !bounds for computation + integer , allocatable :: ibase_physgrid(:,:,:) + real(kind=r8) , allocatable :: halo_interp_weight_physgrid(:,:,:,:) + real(kind=r8) , allocatable :: vertex_recons_weights_physgrid(:,:,:,:) + + real(kind=r8) , allocatable :: norm_elem_coord_physgrid(:,:,:) + real(kind=r8) , allocatable :: Dinv_physgrid(:,:,:,:) + + real(kind=r8) , allocatable :: fc(:,:,:,:) + real(kind=r8) , allocatable :: fc_phys(:,:,:,:) + real(kind=r8) , allocatable :: ft(:,:,:) + real(kind=r8) , allocatable :: fm(:,:,:,:) + real(kind=r8) , allocatable :: dp_phys(:,:,:) end type fvm_struct public :: fvm_mesh, fvm_set_cubeboundary, allocate_physgrid_vars + public :: allocate_fvm_dims - - real (kind=r8),parameter, public :: bignum = 1.0E20_r8 + real(kind=r8),parameter, public :: bignum = 1.0E20_r8 +!============================================================================== contains +!============================================================================== + subroutine fvm_set_cubeboundary(elem, fvm) implicit none type (element_t) , intent(in) :: elem type (fvm_struct), intent(inout) :: fvm - + logical :: corner integer :: j, mynbr_cnt, mystart - integer :: nbrsface(8)! store the neighbours in north, south - + integer :: nbrsface(8)! store the neighbours in north, south + fvm%faceno=elem%FaceNum ! write the neighbors in the structure fvm%cubeboundary=0 corner=.FALSE. do j=1,8 - mynbr_cnt = elem%vertex%nbrs_ptr(j+1) - elem%vertex%nbrs_ptr(j) !length of neighbor location - mystart = elem%vertex%nbrs_ptr(j) + mynbr_cnt = elem%vertex%nbrs_ptr(j+1) - elem%vertex%nbrs_ptr(j) !length of neighbor location + mystart = elem%vertex%nbrs_ptr(j) !NOTE: assuming that we do not have multiple corner neighbors (so not a refined mesh) if (mynbr_cnt > 0 ) then nbrsface(j)=elem%vertex%nbrs_face(mystart) @@ -218,7 +222,7 @@ subroutine fvm_mesh(elem, fvm) real (kind=r8) :: tmp(np,np) ! ! initialize metric and related terms on panel - ! + ! call compute_halo_vars(& !input fvm%faceno,fvm%cubeboundary,nc,nhc,nhe, & !input fvm%jx_min,fvm%jx_max,fvm%jy_min,fvm%jy_max,&!output @@ -266,7 +270,9 @@ subroutine allocate_physgrid_vars(fvm,par) use dimensions_mod, only : nelemd type (fvm_struct), intent(inout) :: fvm(:) type (parallel_t), intent(in) :: par - integer :: ie + integer :: ie, iret + + character(len=*), parameter :: subname = 'allocate_physgrid_vars (SE)' nhc_phys = fv_nphys nhe_phys = 0 @@ -280,32 +286,340 @@ subroutine allocate_physgrid_vars(fvm,par) end if do ie=1,nelemd - allocate(fvm(ie)%phis_physgrid (fv_nphys,fv_nphys)) - allocate(fvm(ie)%vtx_cart_physgrid (4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) - allocate(fvm(ie)%flux_orient_physgrid (2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) - allocate(fvm(ie)%ifct_physgrid (1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) - allocate(fvm(ie)%rot_matrix_physgrid (2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) + allocate(fvm(ie)%phis_physgrid (fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%phis_physgrid(fv_nphys,fv_nphys) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%vtx_cart_physgrid (4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%vtx_cart_physgrid(4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%flux_orient_physgrid (2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%flux_orient_physgrid(2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%ifct_physgrid (1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%ifct_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%rot_matrix_physgrid (2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%rot_matrix_physgrid(2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& + ' failed with stat: '//to_str(iret)) + end if allocate(fvm(ie)%spherecentroid_physgrid(irecons_tracer-1,& - 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) - allocate(fvm(ie)%recons_metrics_physgrid (3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)) - allocate(fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)) - allocate(fvm(ie)%centroid_stretch_physgrid (7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)) - allocate(fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys)) - allocate(fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys)) - allocate(fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)) - allocate(fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)) + 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%spherecentroid_physgrid(irecons_tracer-1,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%recons_metrics_physgrid (3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%recons_metrics_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%centroid_stretch_physgrid (7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%centroid_stretch_physgrid(7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys) failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys) failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)'//& + ' failed with stat: '//to_str(iret)) + end if + allocate(fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,& - 1-nhe_phys:fv_nphys+nhe_phys)) - + 1-nhe_phys:fv_nphys+nhe_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,'//& + '1-nhe_phys:fv_nphys+nhe_phys) failed with stat: '//to_str(iret)) + end if + allocate(fvm(ie)%norm_elem_coord_physgrid(2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys )) - allocate(fvm(ie)%Dinv_physgrid ( 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2)) - - allocate(fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d))) - allocate(fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d))) - allocate(fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)) - allocate(fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev)) - allocate(fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,'//& + '1-nhe_phys:fv_nphys+nhe_phys) failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%Dinv_physgrid ( 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%Dinv_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d)) failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d))'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)'//& + ' failed with stat: '//to_str(iret)) + end if + end do end subroutine allocate_physgrid_vars + + !====================== + + subroutine allocate_fvm_dims(fvm) + + ! Allocate the SE FVM arrays using the pre-calculated SE dimensions + + use dimensions_mod, only: nelemd + + !Dummy arguments: + type(fvm_struct), intent(inout) :: fvm(:) + + !Local arguments + integer :: ie, iret + + character(len=*), parameter :: subname = 'allocate_fvm_dims (SE)' + + !--------------- + + !Set "nh" integer: + nh = nhr+(nhe-1) + + do ie=1,nelemd + + !fvm tracer mixing ratio: + allocate(fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%dp_ref(nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%dp_ref(nlev) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%dp_ref_inverse(nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%dp_ref_inverse(nlev) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%psc(nc,nc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%psc(nc,nc) failed with stat: '//& + to_str(iret)) + end if + + ! inverse area_sphere + allocate(fvm(ie)%inv_area_sphere(nc,nc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%inv_area_sphere(nc,nc) failed with stat: '//& + to_str(iret)) + end if + + ! inverse area_sphere + allocate(fvm(ie)%inv_se_area_sphere(nc,nc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%inv_se_area_sphere(nc,nc) failed with stat: '//& + to_str(iret)) + end if + +#ifdef waccm_debug + allocate(fvm(ie)%CSLAM_gamma(nc,nc,nlev,4), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%CSLAM_gamma(nc,nc,nlev,4) failed with stat: '//& + to_str(iret)) + end if +#endif + + allocate(fvm(ie)%displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%flux_vec(2,1-nhc:nc+nhc,1-nhc:nc+nhc,4), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%flux_vec(2,1-nhc:nc+nhc,1-nhc:nc+nhc,4) failed with stat: '//& + to_str(iret)) + end if + + ! cartesian location of vertices for flux sides + allocate(fvm(ie)%vtx_cart(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%vtx_cart(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%flux_orient(2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%flux_orient(2,1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& + to_str(iret)) + end if + + ! indicator function for non-existent cells + allocate(fvm(ie)%ifct(1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%ifct(1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& + to_str(iret)) + end if + + ! center of fvm cell in gnomonic coordinates + allocate(fvm(ie)%center_cart(nc,nc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%center_cart(nc,nc) failed with stat: '//& + to_str(iret)) + end if + + ! spherical area of fvm cell + allocate(fvm(ie)%area_sphere(nc,nc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%area_sphere(nc,nc) failed with stat: '//& + to_str(iret)) + end if + + ! centroids + allocate(fvm(ie)%spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc)'//& + ' failed with stat: '//to_str(iret)) + end if + + ! pre-computed metric terms (for efficiency) + allocate(fvm(ie)%recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fvm(ie)%recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& + ' failed with stat: '//to_str(iret)) + end if + + ! provide fixed interpolation points with respect to the arrival grid for reconstruction + allocate(fvm(ie)%ibase(1-nh:nc+nh,1:nhr,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%ibase(1-nh:nc+nh,1:nhr,2) failed with stat: '//& + to_str(iret)) + end if + + allocate(fvm(ie)%halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) failed with stat: '//& + to_str(iret)) + end if + + ! for finite-difference reconstruction + allocate(fvm(ie)%centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& + ' failed with stat: '//to_str(iret)) + end if + + ! pre-compute weights for reconstruction at cell vertices + allocate(fvm(ie)%vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fvm(ie)%vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& + ' failed with stat: '//to_str(iret)) + end if + + ! for mapping fvm2dyn + allocate(fvm(ie)%norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fvm(ie)%norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc)'//& + ' failed with stat: '//to_str(iret)) + end if + + end do + + end subroutine allocate_fvm_dims + + !====================== + end module fvm_control_volume_mod diff --git a/src/dynamics/se/dycore/hybvcoord_mod.F90 b/src/dynamics/se/dycore/hybvcoord_mod.F90 index 641a255e..4a319fbe 100644 --- a/src/dynamics/se/dycore/hybvcoord_mod.F90 +++ b/src/dynamics/se/dycore/hybvcoord_mod.F90 @@ -13,16 +13,17 @@ module hybvcoord_mod ! midpoints p(k) = hyam(k)*ps0 + hybm(k)*ps !----------------------------------------------------------------------- type, public :: hvcoord_t - real(r8) ps0 ! base state surface-pressure for level definitions - real(r8) hyai(plevp) ! ps0 component of hybrid coordinate - interfaces - real(r8) hyam(plev) ! ps0 component of hybrid coordinate - midpoints - real(r8) hybi(plevp) ! ps component of hybrid coordinate - interfaces - real(r8) hybm(plev) ! ps component of hybrid coordinate - midpoints - real(r8) hybd(plev) ! difference in b (hybi) across layers - real(r8) prsfac ! log pressure extrapolation factor (time, space independent) - real(r8) etam(plev) ! eta-levels at midpoints - real(r8) etai(plevp) ! eta-levels at interfaces - integer nprlev ! number of pure pressure levels at top - integer pad + real(r8) :: ps0 ! base state surface-pressure for level definitions + real(r8), allocatable :: hyai(:) ! ps0 component of hybrid coordinate - interfaces + real(r8), allocatable :: hyam(:) ! ps0 component of hybrid coordinate - midpoints + real(r8), allocatable :: hybi(:) ! ps component of hybrid coordinate - interfaces + real(r8), allocatable :: hybm(:) ! ps component of hybrid coordinate - midpoints + real(r8), allocatable :: hybd(:) ! difference in b (hybi) across layers + real(r8) :: prsfac ! log pressure extrapolation factor (time, space independent) + real(r8), allocatable :: etam(:) ! eta-levels at midpoints + real(r8), allocatable :: etai(:) ! eta-levels at interfaces + integer :: nprlev ! number of pure pressure levels at top + integer :: pad end type hvcoord_t + end module hybvcoord_mod diff --git a/src/dynamics/se/dycore/parallel_mod.F90 b/src/dynamics/se/dycore/parallel_mod.F90 index 88b96388..d36daa86 100644 --- a/src/dynamics/se/dycore/parallel_mod.F90 +++ b/src/dynamics/se/dycore/parallel_mod.F90 @@ -2,7 +2,7 @@ module parallel_mod ! --------------------------- use shr_kind_mod, only: r8=>shr_kind_r8 ! --------------------------- - use dimensions_mod, only : nmpi_per_node, nlev, qsize_d, ntrac_d + use dimensions_mod, only : nmpi_per_node ! --------------------------- use mpi, only: MPI_STATUS_SIZE, MPI_MAX_ERROR_STRING, MPI_TAG_UB @@ -21,7 +21,7 @@ module parallel_mod integer, public, parameter :: HME_BNDRY_A2A = 3 integer, public, parameter :: HME_BNDRY_A2AO = 4 - integer, public, parameter :: nrepro_vars = MAX(10, nlev*qsize_d, nlev*ntrac_d) + integer, public, protected :: nrepro_vars integer, public :: MaxNumberFrames integer, public :: numframes @@ -43,7 +43,7 @@ module parallel_mod integer, public :: nPackPoints real(r8), public, allocatable :: global_shared_buf(:,:) - real(r8), public :: global_shared_sum(nrepro_vars) + real(r8), public, allocatable :: global_shared_sum(:) ! ================================================== ! Define type parallel_t for distributed memory info @@ -113,6 +113,8 @@ function initmpi(npes_homme) result(par) use spmd_utils, only: mpicom, iam, npes use mpi, only: MPI_COMM_NULL, MPI_MAX_PROCESSOR_NAME use mpi, only: MPI_CHARACTER, MPI_INTEGER, MPI_BAND + use dimensions_mod, only: nlev, qsize_d, ntrac_d + use string_utils, only: to_str integer, intent(in) :: npes_homme @@ -127,6 +129,9 @@ function initmpi(npes_homme) result(par) integer, allocatable :: tarray(:) integer :: namelen, i integer :: color + integer :: iret + + character(len=*), parameter :: subname = 'initmpi (SE)' !================================================ ! Basic MPI initialization @@ -143,6 +148,16 @@ function initmpi(npes_homme) result(par) nmpi_per_node = 2 PartitionForNodes = .TRUE. + ! Initialize number of SE dycore variables used in repro_sum: + nrepro_vars = MAX(10, nlev*qsize_d, nlev*ntrac_d) + + ! Allocate repro_sum variable: + allocate(global_shared_sum(nrepro_vars), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate global_shared_sum(nrepro_vars) failed with stat: '//& + to_str(iret)) + end if + ! The SE dycore needs to split from CAM communicator for npes > par%nprocs color = iam / npes_homme call mpi_comm_split(mpicom, color, iam, par%comm, ierr) @@ -150,7 +165,7 @@ function initmpi(npes_homme) result(par) call MPI_comm_size(par%comm, par%nprocs, ierr) call MPI_comm_rank(par%comm, par%rank, ierr) if ( par%nprocs /= npes_homme) then - call endrun('INITMPI: SE communicator count mismatch') + call endrun(subname//': SE communicator count mismatch') end if if(par%rank == par%root) then @@ -175,7 +190,12 @@ function initmpi(npes_homme) result(par) my_name(:) = '' call MPI_Get_Processor_Name(my_name, namelen, ierr) - allocate(the_names(par%nprocs)) + allocate(the_names(par%nprocs), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate the_names(par%nprocs) failed with stat: '//& + to_str(iret)) + end if + do i = 1, par%nprocs the_names(i)(:) = '' end do @@ -203,7 +223,7 @@ function initmpi(npes_homme) result(par) call MPI_Allreduce(nmpi_per_node,tmp,1,MPI_INTEGER,MPI_BAND,par%comm,ierr) if(tmp /= nmpi_per_node) then if (par%masterproc) then - write(iulog,*)'initmpi: disagrement accross nodes for nmpi_per_node' + write(iulog,*) subname//': disagrement accross nodes for nmpi_per_node' end if nmpi_per_node = 1 PartitionForNodes = .FALSE. @@ -212,7 +232,7 @@ function initmpi(npes_homme) result(par) end if if(PartitionForFrames .and. par%masterproc) then - write(iulog,*)'initmpi: FrameWeight: ', FrameWeight + write(iulog,*) subname//': FrameWeight: ', FrameWeight end if deallocate(the_names) diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index 450bbafb..ed015f66 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -27,7 +27,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use dimensions_mod, only: np, nlev, nelem, nelemd, nelemdmax use dimensions_mod, only: GlobalUniqueCols, fv_nphys,irecons_tracer use control_mod, only: topology, partmethod - use element_mod, only: element_t, allocate_element_desc + use element_mod, only: element_t, allocate_element_dims, allocate_element_desc use fvm_mod, only: fvm_init1 use mesh_mod, only: MeshUseMeshFile use time_mod, only: timelevel_init, timelevel_t @@ -55,7 +55,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use reduction_mod, only: red_sum, red_sum_int, initreductionbuffer use shr_reprosum_mod, only: repro_sum => shr_reprosum_calc use fvm_analytic_mod, only: compute_basic_coordinate_vars - use fvm_control_volume_mod, only: fvm_struct, allocate_physgrid_vars + use fvm_control_volume_mod, only: fvm_struct, allocate_physgrid_vars, allocate_fvm_dims type(element_t), pointer :: elem(:) type(fvm_struct), pointer :: fvm(:) @@ -167,11 +167,13 @@ subroutine prim_init1(elem, fvm, par, Tl) if (nelemd > 0) then allocate(elem(nelemd)) + call allocate_element_dims(elem) call allocate_element_desc(elem) end if if (fv_nphys > 0) then allocate(fvm(nelemd)) + call allocate_fvm_dims(fvm) call allocate_physgrid_vars(fvm,par) else ! Even if fvm not needed, still desirable to allocate it as empty diff --git a/src/dynamics/se/dycore/prim_state_mod.F90 b/src/dynamics/se/dycore/prim_state_mod.F90 index e09664fb..460048da 100644 --- a/src/dynamics/se/dycore/prim_state_mod.F90 +++ b/src/dynamics/se/dycore/prim_state_mod.F90 @@ -28,7 +28,9 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) use cam_control_mod, only: initial_run use time_mod, only: tstep use control_mod, only: rsplit, qsplit - use perf_mod, only: t_startf, t_stopf + use perf_mod, only: t_startf, t_stopf + use cam_abortutils, only: endrun + use string_utils, only: to_str type (element_t), intent(inout) :: elem(:) type (TimeLevel_t), target, intent(in) :: tl type (hybrid_t), intent(in) :: hybrid @@ -36,24 +38,85 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) type(fvm_struct), intent(inout) :: fvm(:) real (kind=r8), optional, intent(in) :: omega_cn(2,nets:nete) ! Local variables... - integer :: k,ie,m_cnst + integer :: k,ie,m_cnst,iret integer, parameter :: type=ORDERED - integer, parameter :: vmax=11+2*MAX(qsize_d,ntrac_d) + integer :: vmax - character(len=10) :: varname(vmax) + character(len=10), allocatable :: varname(:) - real (kind=r8), dimension(nets:nete,vmax) :: min_local,max_local - real (kind=r8), dimension(vmax) :: min_p,max_p,mass,mass_chg - real (kind=r8), dimension(np,np,nets:nete):: moist_ps - real (kind=r8), dimension(nc,nc,nets:nete):: moist_ps_fvm + real(kind=r8), allocatable :: min_local(:,:) + real(kind=r8), allocatable :: max_local(:,:) + real(kind=r8), allocatable :: min_p(:) + real(kind=r8), allocatable :: max_p(:) + real(kind=r8), allocatable :: mass(:) + real(kind=r8), allocatable :: mass_chg(:) + real(kind=r8), dimension(np,np,nets:nete):: moist_ps + real(kind=r8), dimension(nc,nc,nets:nete):: moist_ps_fvm - real (kind=r8) :: tmp_gll(np,np,vmax,nets:nete),tmp_mass(vmax)! - real (kind=r8) :: tmp_fvm(nc,nc,vmax,nets:nete) - real (kind=r8) :: tmp_q(np,np,nlev) + real(kind=r8), allocatable :: tmp_gll(:,:,:,:) + real(kind=r8), allocatable :: tmp_mass(:) + real(kind=r8), allocatable :: tmp_fvm(:,:,:,:) + real(kind=r8) :: tmp_q(np,np,nlev) integer :: n0, n0_qdp, q, nm, nm2 real(kind=r8) :: da_gll(np,np,nets:nete),da_fvm(nc,nc,nets:nete) + character(len=*), parameter :: subname = 'prim_printstate (SE)' + + !Allocate tracer-dimensioned variables: + !------------- + vmax = 11+2*max(qsize_d,ntrac_d) + allocate(varname(vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate varname(vmax) failed with stat: '//to_str(iret)) + end if + + allocate(min_local(nets:nete, vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate min_local(nets:nete,vmax) failed with stat: '//to_str(iret)) + end if + + allocate(max_local(nets:nete, vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate max_local(nets:nete,vmax) failed with stat: '//to_str(iret)) + end if + + allocate(min_p(vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate min_p(vmax) failed with stat: '//to_str(iret)) + end if + + allocate(max_p(vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate max_p(vmax) failed with stat: '//to_str(iret)) + end if + + allocate(mass(vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mass(vmax) failed with stat: '//to_str(iret)) + end if + + allocate(mass_chg(vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mass_chg(vmax) failed with stat: '//to_str(iret)) + end if + + allocate(tmp_gll(np,np,vmax,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate tmp_gll(np,np,vmax,nets:nete) failed with stat: '//to_str(iret)) + end if + + allocate(tmp_mass(vmax), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate tmp_mass(vmax) failed with stat: '//to_str(iret)) + end if + + allocate(tmp_fvm(nc,nc,vmax,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate tmp_fvm(nc,nc,vmax,nets:nete) failed with stat: '//to_str(iret)) + end if + !------------- + !dynamics variables in n0 are at time = 'time': time=tl%nstep*tstep if (hybrid%masterthread) then write(iulog,*) "nstep=",tl%nstep," time=",Time_at(tl%nstep)/(24*3600)," [day]" diff --git a/src/dynamics/se/dycore/vertremap_mod.F90 b/src/dynamics/se/dycore/vertremap_mod.F90 index 3b57fd89..d0c88432 100644 --- a/src/dynamics/se/dycore/vertremap_mod.F90 +++ b/src/dynamics/se/dycore/vertremap_mod.F90 @@ -17,7 +17,6 @@ module vertremap_mod use shr_kind_mod, only: r8=>shr_kind_r8 use dimensions_mod, only: np,nlev,qsize,nlevp,npsq,nc - use hybvcoord_mod, only: hvcoord_t use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct use perf_mod, only: t_startf, t_stopf ! _EXTERNAL diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index c901ace0..45b3ed10 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -127,7 +127,7 @@ subroutine model_grid_init() ! decomposition based on the dynamics (SE) grid. use mpi, only: mpi_max - use vert_coord, only: vert_coord_init, pver + use vert_coord, only: vert_coord_init, pver, pverp use hycoef, only: hycoef_init, hypi, hypm, nprlev, & hyam, hybm, hyai, hybi, ps0 use physconst, only: thermodynamic_active_species_num @@ -145,7 +145,7 @@ subroutine model_grid_init() use control_mod, only: qsplit, rsplit use time_mod, only: tstep, nsplit use fvm_mod, only: fvm_init2, fvm_init3, fvm_pg_init - use dimensions_mod, only: irecons_tracer + use dimensions_mod, only: irecons_tracer, dimensions_mod_init use comp_gll_ctr_vol, only: gll_grid_write ! Local variables @@ -178,9 +178,58 @@ subroutine model_grid_init() ! Set vertical coordinate information not provided by namelist: call vert_coord_init(1, pver) + ! Initialize SE-dycore specific variables: + call dimensions_mod_init() + ! Initialize hybrid coordinate arrays call hycoef_init(fh_ini, psdry=.true.) + !Allocate SE dycore "hvcoord" structure: + !+++++++ + allocate(hvcoord%hyai(pverp), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%hyai(pverp) failed with stat: '//& + to_str(ierr)) + end if + + allocate(hvcoord%hyam(pver), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%hyam(pver) failed with stat: '//& + to_str(ierr)) + end if + + allocate(hvcoord%hybi(pverp), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%hybi(pverp) failed with stat: '//& + to_str(ierr)) + end if + + allocate(hvcoord%hybm(pver), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%hybm(pver) failed with stat: '//& + to_str(ierr)) + end if + + allocate(hvcoord%hybd(pver), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%hybd(pver) failed with stat: '//& + to_str(ierr)) + end if + + allocate(hvcoord%etam(pver), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%etam(pver) failed with stat: '//& + to_str(ierr)) + end if + + allocate(hvcoord%etai(pverp), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate hvcoord%etai(pverp) failed with stat: '//& + to_str(ierr)) + end if + !+++++++ + + !Set SE "hvcoord" values: hvcoord%hyam = hyam hvcoord%hyai = hyai hvcoord%hybm = hybm From 1f820f5708bc86afbf2b8819f15d6e69510cae41 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Fri, 9 Apr 2021 10:37:02 -0600 Subject: [PATCH 12/45] Add stat checks to all 'allocate' statements in SE dycore. --- src/dynamics/se/advect_tend.F90 | 24 +- src/dynamics/se/dp_coupling.F90 | 116 +++++++-- src/dynamics/se/dp_mapping.F90 | 136 +++++++++-- .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 57 ++++- src/dynamics/se/dycore/control_mod.F90 | 2 +- src/dynamics/se/dycore/cube_mod.F90 | 64 ++++- src/dynamics/se/dycore/derivative_mod.F90 | 60 ++++- src/dynamics/se/dycore/dof_mod.F90 | 35 ++- src/dynamics/se/dycore/edge_mod.F90 | 216 +++++++++++++--- src/dynamics/se/dycore/fvm_mapping.F90 | 231 ++++++++++++++---- src/dynamics/se/dycore/fvm_mod.F90 | 45 ++-- src/dynamics/se/dycore/gridgraph_mod.F90 | 40 ++- src/dynamics/se/dycore/hybrid_mod.F90 | 79 ++++-- src/dynamics/se/dycore/hybvcoord_mod.F90 | 3 - src/dynamics/se/dycore/interpolate_mod.F90 | 117 +++++++-- src/dynamics/se/dycore/ll_mod.F90 | 16 +- src/dynamics/se/dycore/mesh_mod.F90 | 54 +++- src/dynamics/se/dycore/metagraph_mod.F90 | 115 +++++++-- src/dynamics/se/dycore/prim_advance_mod.F90 | 21 +- src/dynamics/se/dycore/prim_advection_mod.F90 | 22 +- src/dynamics/se/dycore/prim_driver_mod.F90 | 23 +- src/dynamics/se/dycore/prim_init.F90 | 62 ++++- src/dynamics/se/dycore/quadrature_mod.F90 | 40 ++- src/dynamics/se/dycore/reduction_mod.F90 | 46 +++- src/dynamics/se/dycore/schedtype_mod.F90 | 2 +- src/dynamics/se/dycore/schedule_mod.F90 | 198 ++++++++++++--- src/dynamics/se/dycore/spacecurve_mod.F90 | 29 ++- src/dynamics/se/dyn_comp.F90 | 207 +++++++++++++--- src/dynamics/se/dyn_grid.F90 | 192 ++++++++++++--- src/dynamics/se/native_mapping.F90 | 85 ++++++- src/dynamics/se/stepon.F90 | 56 ++++- 31 files changed, 1983 insertions(+), 410 deletions(-) diff --git a/src/dynamics/se/advect_tend.F90 b/src/dynamics/se/advect_tend.F90 index 0ac430c7..fe3c67bd 100644 --- a/src/dynamics/se/advect_tend.F90 +++ b/src/dynamics/se/advect_tend.F90 @@ -26,6 +26,8 @@ subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) use time_manager, only: get_step_size ! use constituents, only: tottnam,pcnst use constituents, only: pcnst + use cam_abortutils, only: endrun + use string_utils, only: to_str ! SE dycore: use dimensions_mod, only: nc,np,nlev,ntrac @@ -38,21 +40,34 @@ subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) type(fvm_struct), intent(in) :: fvm(:) integer, intent(in) :: nets,nete,qn0,n0 real(r8) :: dt,idt - integer :: i,j,ic,nx,ie + integer :: i,j,ic,nx,ie,iret logical :: init real(r8), allocatable, dimension(:,:) :: ftmp + character(len=*), parameter :: subname = 'compute_adv_tends_xyz' + if (ntrac>0) then nx=nc else nx=np endif - allocate( ftmp(nx*nx,nlev) ) + + allocate( ftmp(nx*nx,nlev), stat=iret ) + if (iret /= 0) then + call endrun(subname//': allocate ftmp(nx*nx,nlev) failed with stat: '//& + to_str(iret)) + end if + init = .false. if ( .not. allocated( adv_tendxyz ) ) then init = .true. allocate( adv_tendxyz(nx,nx,nlev,pcnst,nets:nete) ) + if (iret /= 0) then + call endrun(subname//': allocate adv_tendxyz(nx,nx,nlev,pcnst,nets:nete) failed with stat: '//& + to_str(iret)) + end if + adv_tendxyz(:,:,:,:,:) = 0._r8 endif @@ -89,6 +104,11 @@ subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) deallocate(adv_tendxyz) endif deallocate(ftmp) +#else + if (.not. init) then + deallocate(adv_tendxyz) + end if + deallocate(ftmp) #endif end subroutine compute_adv_tends_xyz diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 089e5cef..72ec02d2 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -23,6 +23,7 @@ module dp_coupling use perf_mod, only: t_startf, t_stopf, t_barrierf use cam_abortutils, only: endrun +use string_utils, only: to_str !SE dycore: use parallel_mod, only: par @@ -96,6 +97,9 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) real(r8), allocatable :: qgll(:,:,:,:) real(r8) :: inv_dp3d(np,np,nlev) integer :: tl_f, tl_qdp_np0, tl_qdp_np1 + + character(len=*), parameter :: subname = 'd_p_coupling' + !---------------------------------------------------------------------------- if (.not. local_dp_map) then @@ -109,19 +113,63 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) if (fv_nphys > 0) then nphys = fv_nphys else - allocate(qgll(np,np,nlev,pcnst)) + allocate(qgll(np,np,nlev,pcnst), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate qgll(np,np,nlev,pcnst) failed with stat: '//& + to_str(ierr)) + end if + nphys = np end if ! Allocate temporary arrays to hold data for physics decomposition - allocate(ps_tmp(nphys_pts,nelemd)) - allocate(dp3d_tmp(nphys_pts,pver,nelemd)) - allocate(dp3d_tmp_tmp(nphys_pts,pver)) - allocate(phis_tmp(nphys_pts,nelemd)) - allocate(T_tmp(nphys_pts,pver,nelemd)) - allocate(uv_tmp(nphys_pts,2,pver,nelemd)) - allocate(q_tmp(nphys_pts,pver,pcnst,nelemd)) - allocate(omega_tmp(nphys_pts,pver,nelemd)) + allocate(ps_tmp(nphys_pts,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate ps_tmp(nphys_pts,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(dp3d_tmp(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dp3d_tmp(nphys_pts,pver,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(dp3d_tmp_tmp(nphys_pts,pver), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dp3d_tmp_tmp(nphys_pts,pver) failed with stat: '//& + to_str(ierr)) + end if + + allocate(phis_tmp(nphys_pts,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate phis_tmp(nphys_pts,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(T_tmp(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate T_tmp(nphys_pts,pver,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(uv_tmp(nphys_pts,2,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate uv_tmp(nphys_pts,2,pver,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(q_tmp(nphys_pts,pver,pcnst,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate q_tmp(nphys_pts,pver,pcnst,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(omega_tmp(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate q_tmp(nphys_pts,pver,pcnst,nelemd) failed with stat: '//& + to_str(ierr)) + end if !Remove once a gravity wave parameterization is available -JN #if 0 @@ -216,7 +264,11 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) ! q_prev is for saving the tracer fields for calculating tendencies if (.not. allocated(q_prev)) then - allocate(q_prev(pcols,pver,pcnst)) + allocate(q_prev(pcols,pver,pcnst), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate q_prev(pcols,pver,pcnst) failed with stat: '//& + to_str(ierr)) + end if end if q_prev = 0.0_r8 @@ -224,8 +276,18 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) !Remove once a gravity wave parameterization is available -JN #if 0 if (use_gw_front .or. use_gw_front_igw) then - allocate(frontgf_phys(pcols, pver, begchunk:endchunk)) - allocate(frontga_phys(pcols, pver, begchunk:endchunk)) + allocate(frontgf_phys(pcols, pver, begchunk:endchunk), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate frontgf_phys(pcols, pver, begchunk:endchunk)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(frontga_phys(pcols, pver, begchunk:endchunk), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate frontga_phys(pcols, pver, begchunk:endchunk)'//& + ' failed with stat: '//to_str(ierr)) + end if + end if #endif !$omp parallel do num_threads(max_num_threads) private (col_ind, icol, ie, blk_ind, ilyr, m) @@ -343,6 +405,9 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) integer :: num_trac integer :: nets, nete integer :: kptr, ii + integer :: ierr + + character(len=*), parameter :: subname='p_d_coupling' !---------------------------------------------------------------------------- if (.not. local_dp_map) then @@ -355,10 +420,29 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) nullify(elem) end if - allocate(T_tmp(nphys_pts,pver,nelemd)) - allocate(uv_tmp(nphys_pts,2,pver,nelemd)) - allocate(dq_tmp(nphys_pts,pver,pcnst,nelemd)) - allocate(dp_phys(nphys_pts,pver,nelemd)) + allocate(T_tmp(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate T_tmp(nphys_pts,pver,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(uv_tmp(nphys_pts,2,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate uv_tmp(nphys_pts,2,pver,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(dq_tmp(nphys_pts,pver,pcnst,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dq_tmp(nphys_pts,pver,pcnst,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(dp_phys(nphys_pts,pver,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dp_phys(nphys_pts,pver,nelemd) failed with stat: '//& + to_str(ierr)) + end if T_tmp = 0.0_r8 uv_tmp = 0.0_r8 diff --git a/src/dynamics/se/dp_mapping.F90 b/src/dynamics/se/dp_mapping.F90 index b3ff8850..140ccb3f 100644 --- a/src/dynamics/se/dp_mapping.F90 +++ b/src/dynamics/se/dp_mapping.F90 @@ -2,6 +2,8 @@ module dp_mapping use shr_const_mod, only: pi => shr_const_pi + use cam_abortutils, only: endrun + use string_utils, only: to_str !SE dycore: use dimensions_mod, only: np, fv_nphys @@ -55,9 +57,14 @@ subroutine dp_init(elem,fvm) use dimensions_mod, only: nelemd, nc, irecons_tracer, npsq use element_mod, only: element_t + ! Dummy variables: type(element_t) , dimension(nelemd), intent(in) :: elem type (fvm_struct), dimension(nelemd), intent(in) :: fvm + ! Local variables: + integer :: iret + character(len=*), parameter :: subname = 'dp_init' + !Initialize total number of physics points per spectral element: nphys_pts = npsq @@ -67,15 +74,50 @@ subroutine dp_init(elem,fvm) num_weights_phys2fvm = (nc+fv_nphys)**2 num_weights_fvm2phys = (nc+fv_nphys)**2 - allocate(weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd)) - allocate(weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)) - allocate(weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)) + allocate(weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd)'//& + ' failed with stat: '//to_str(iret)) + end if + allocate(weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd)'//& + ' failed with stat: '//to_str(iret)) + end if - allocate(weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd)) - allocate(weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)) - allocate(weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)) - allocate(jall_fvm2phys(nelemd)) - allocate(jall_phys2fvm(nelemd)) + allocate(weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(jall_fvm2phys(nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate jall_fvm2phys(nelemd) failed with stat: '//to_str(iret)) + end if + + allocate(jall_phys2fvm(nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate jall_phys2fvm(nelemd) failed with stat: '//to_str(iret)) + end if call fvm2phys_init(elem,fvm,nc,fv_nphys,irecons_tracer,& weights_all_fvm2phys,weights_eul_index_all_fvm2phys,weights_lgr_index_all_fvm2phys,& @@ -90,7 +132,6 @@ subroutine dp_init(elem,fvm) end subroutine dp_init subroutine dp_reorder(before, after) - use cam_abortutils, only: endrun use cam_logfile, only: iulog use spmd_utils, only: masterproc use shr_sys_mod, only: shr_sys_flush @@ -134,11 +175,25 @@ subroutine dp_allocate(elem) integer,dimension(nelemd) :: lgid integer,dimension(:),allocatable :: displs,recvcount + character(len=*), parameter :: subname = 'dp_allocate' + ! begin - allocate(displs(npes)) - allocate(dp_gid(nelem)) - allocate(recvcount(npes)) + allocate(displs(npes), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate displs(npes) failed with stat: '//to_str(ierror)) + end if + + allocate(dp_gid(nelem), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate dp_gid(nelem) failed with stat: '//to_str(ierror)) + end if + + allocate(recvcount(npes), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate recvcount(npes) failed with stat: '//to_str(ierror)) + end if + call mpi_gather(nelemd, 1, mpi_integer, recvcount, 1, mpi_integer, & masterprocid, mpicom, ierror) lgid(:) = elem(:)%globalid @@ -151,7 +206,11 @@ subroutine dp_allocate(elem) call mpi_gatherv(lgid, nelemd, mpi_integer, dp_gid, recvcount, displs, & mpi_integer, masterprocid, mpicom, ierror) if (masterproc) then - allocate(dp_owner(nelem)) + allocate(dp_owner(nelem), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate dp_owner(nelem) failed with stat: '//to_str(ierror)) + end if + dp_owner(:) = -1 do i = 1,npes do j = displs(i)+1,displs(i)+recvcount(i) @@ -164,7 +223,11 @@ subroutine dp_allocate(elem) ! minimize global memory use call mpi_barrier(mpicom,ierror) if (.not.masterproc) then - allocate(dp_owner(nelem)) + allocate(dp_owner(nelem), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate dp_owner(nelem) failed with stat: '//to_str(ierror)) + end if + end if call mpi_bcast(dp_gid,nelem,mpi_integer,masterprocid,mpicom,ierror) call mpi_bcast(dp_owner,nelem,mpi_integer,masterprocid,mpicom,ierror) @@ -181,7 +244,6 @@ end subroutine dp_deallocate subroutine dp_write(elem, fvm, grid_format, filename_in) use mpi, only: mpi_integer, mpi_real8 - use cam_abortutils, only: endrun use netcdf, only: nf90_create, nf90_close, nf90_enddef use netcdf, only: nf90_def_dim, nf90_def_var, nf90_put_var use netcdf, only: nf90_double, nf90_int, nf90_put_att @@ -207,7 +269,7 @@ subroutine dp_write(elem, fvm, grid_format, filename_in) ! Local variables integer :: i, ie, ierror, j, status, ivtx integer :: grid_corners_id, grid_rank_id, grid_size_id - character(len=256) :: errormsg + character(len=shr_kind_cl) :: errormsg character(len=shr_kind_cl) :: filename integer :: ncid integer :: grid_dims_id, grid_area_id, grid_center_lat_id @@ -225,6 +287,8 @@ subroutine dp_write(elem, fvm, grid_format, filename_in) real(r8) :: x, y type (spherical_polar_t) :: sphere + character(len=*), parameter :: subname = 'dp_write' + ! begin !! Check to see if we are doing grid output @@ -259,11 +323,26 @@ subroutine dp_write(elem, fvm, grid_format, filename_in) ! Allocate workspace and calculate PE displacement information if (IOroot) then - allocate(displs(npes)) - allocate(recvcount(npes)) + allocate(displs(npes), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate displs(npes) failed with stat: '//to_str(ierror)) + end if + + allocate(recvcount(npes), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate recvcount(npes) failed with stat: '//to_str(ierror)) + end if + else - allocate(displs(0)) - allocate(recvcount(0)) + allocate(displs(0), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate displs(0) failed with stat: '//to_str(ierror)) + end if + + allocate(recvcount(0), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate recvcount(0) failed with stat: '//to_str(ierror)) + end if end if gridsize = nelem * fv_nphys*fv_nphys if(masterproc) then @@ -279,11 +358,22 @@ subroutine dp_write(elem, fvm, grid_format, filename_in) do i = 2, npes displs(i) = displs(i-1)+recvcount(i-1) end do - allocate(recvbuf(gridsize)) + allocate(recvbuf(gridsize), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate recvbuf(gridsize) failed with stat: '//to_str(ierror)) + end if + else - allocate(recvbuf(0)) + allocate(recvbuf(0), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate recvbuf(0) failed with stat: '//to_str(ierror)) + end if + + end if + allocate(gwork(4, gridsize), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate gwork(4, gridsize) failed with stat: '//to_str(ierror)) end if - allocate(gwork(4, gridsize)) if (IOroot) then ! Define the horizontal grid dimensions for SCRIP output diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 index c16e2410..7e741ab1 100644 --- a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -4,6 +4,7 @@ module comp_gll_ctr_vol use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl use cam_abortutils, only: endrun + use string_utils, only: to_str use cam_logfile, only: iulog use shr_sys_mod, only: shr_sys_flush use global_norms_mod, only: wrap_repro_sum @@ -98,14 +99,14 @@ subroutine gll_grid_write(elem, grid_format, filename_in) use element_mod, only: element_t use dof_mod, only: UniquePoints use coordinate_systems_mod, only: cart2spherical - + ! Inputs type(element_t), intent(in) :: elem(:) character(len=*), intent(in) :: grid_format character(len=*), intent(in) :: filename_in - + real(r8), parameter :: rad2deg = 180._r8/pi - + ! Local variables !!XXgoldyXX: v debug only #ifdef USE_PIO3D @@ -272,7 +273,12 @@ subroutine gll_grid_write(elem, grid_format, filename_in) end if ! Work array to gather info before writing - allocate(gwork(np*np, nv_max, nelemd)) + allocate(gwork(np*np, nv_max, nelemd), stat=ierror) + if (ierror /= 0) then + call endrun(subname//': allocate gwork(np*np, nv_max, nelemd) failed with stat: '//& + to_str(ierror)) + end if + ! Write grid size status = pio_put_var(file, grid_dims_id, (/ gridsize /)) @@ -333,7 +339,11 @@ subroutine gll_grid_write(elem, grid_format, filename_in) end do !!XXgoldyXX: v debug only #ifdef USE_PIO3D -allocate(ldof(np*np*nelemd*nv_max)) +allocate(ldof(np*np*nelemd*nv_max), stat=ierror) +if (ierror /= 0) then + call endrun(subname//': allocate ldof(np*np*nelemd*nv_max) failed with stat: '//to_str(ierror)) +end if + ldof = 0 do ie = 1, nelemd do index = 1, elem(ie)%idxP%NumUniquePts @@ -351,7 +361,12 @@ subroutine gll_grid_write(elem, grid_format, filename_in) end do end do end do -allocate(iodesc) +allocate(iodesc, stat=ierror) +if (ierror /= 0) then + call endrun(subname//': allocate iodesc failed with stat: '//to_str(ierror)) +end if + + call cam_pio_newdecomp(iodesc, (/ nv_max, gridsize /), ldof, PIO_double) call pio_write_darray(file, grid_corner_lat_id, iodesc, gwork, status) #else @@ -434,13 +449,35 @@ subroutine InitControlVolumesData(par, elem, nelemd) integer, intent(in) :: nelemd integer :: ie + integer :: iret + + character(len=*), parameter :: subname='InitControlVolumesData (SE)' ! Cannot be done in a threaded region - allocate(cvlist(nelemd)) + allocate(cvlist(nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate vlist(nelemd) failed with stat: '//to_str(iret)) + end if + do ie = 1, nelemd - allocate(cvlist(ie)%vert(nv_max, np,np)) - allocate(cvlist(ie)%vert_latlon(nv_max,np,np)) - allocate(cvlist(ie)%face_no(nv_max,np,np)) + allocate(cvlist(ie)%vert(nv_max, np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate cvlist(ie)%vert(nv_max,np,np) failed with stat: '//& + to_str(iret)) + end if + + allocate(cvlist(ie)%vert_latlon(nv_max,np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate cvlist(ie)%vert_latlon(nv_max,np,np) failed with stat: '//& + to_str(iret)) + end if + + allocate(cvlist(ie)%face_no(nv_max,np,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate cvlist(ie)%face_no(nv_max,np,np) failed with stat: '//& + to_str(iret)) + end if + end do call initedgebuffer(par,edge1,elem,3,bndry_type=HME_BNDRY_P2P, nthreads=1) diff --git a/src/dynamics/se/dycore/control_mod.F90 b/src/dynamics/se/dycore/control_mod.F90 index d5fc4abe..fb7046d9 100644 --- a/src/dynamics/se/dycore/control_mod.F90 +++ b/src/dynamics/se/dycore/control_mod.F90 @@ -16,7 +16,7 @@ module control_mod integer, public :: rk_stage_user = 0 ! number of RK stages to use integer, public :: ftype = 2 ! Forcing Type integer, public :: ftype_conserve = 1 !conserve momentum (dp*u) - integer, public :: statediag_numtrac = 3 + integer, public :: statediag_numtrac = 3 integer, public :: qsplit = 1 ! ratio of dynamics tsteps to tracer tsteps integer, public :: rsplit =-1 ! for vertically lagrangian dynamics, apply remap diff --git a/src/dynamics/se/dycore/cube_mod.F90 b/src/dynamics/se/dycore/cube_mod.F90 index e467fc42..f7e1e019 100644 --- a/src/dynamics/se/dycore/cube_mod.F90 +++ b/src/dynamics/se/dycore/cube_mod.F90 @@ -7,6 +7,7 @@ module cube_mod use physconst, only: pi, rearth use control_mod, only: hypervis_scaling, cubed_sphere_map use cam_abortutils, only: endrun + use string_utils, only: to_str implicit none private @@ -877,11 +878,13 @@ subroutine rotation_init_atomic(elem, rot_type) integer :: ii,i,j,k integer :: ir,jr integer :: start, cnt + integer :: iret real (kind=r8) :: Dloc(2,2,np) real (kind=r8) :: Drem(2,2,np) real (kind=r8) :: x1,x2 + character(len=*), parameter :: subname = 'rotation_init_atomic (SE)' myface_no = elem%vertex%face_number @@ -911,7 +914,12 @@ subroutine rotation_init_atomic(elem, rot_type) ! ===================================================== if (nrot > 0) then - allocate(elem%desc%rot(nrot)) + allocate(elem%desc%rot(nrot), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%rot(nrot) failed with stat: '//& + to_str(iret)) + end if + elem%desc%use_rotation=1 irot=0 @@ -929,9 +937,20 @@ subroutine rotation_init_atomic(elem, rot_type) irot=irot+1 if (inbr <= 4) then - allocate(elem%desc%rot(irot)%R(2,2,np)) ! edge + allocate(elem%desc%rot(irot)%R(2,2,np), stat=iret) ! edge + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%rot(irot)%R(2,2,np)'//& + ' failed with stat: '//to_str(iret)) + end if + + else - allocate(elem%desc%rot(irot)%R(2,2,1 )) ! corner + allocate(elem%desc%rot(irot)%R(2,2,1 ), stat=iret) ! corner + if (iret /= 0) then + call endrun(subname//': allocate elem%desc%rot(irot)%R(2,2,1)'//& + ' failed with stat: '//to_str(iret)) + end if + end if ! Initialize Dloc and Drem for no-rotation possibilities Dloc(1,1,:) = 1.0_r8 @@ -1448,10 +1467,16 @@ subroutine CubeTopology(GridEdge, GridVertex) integer :: offset, ierr, loc logical, allocatable :: nbrs_used(:,:,:,:) + character(len=*), parameter :: subname = 'CubeTopology (SE)' if (0==ne) call endrun('Error in CubeTopology: ne is zero') allocate(GridElem(ne,ne,nfaces),stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate GridElem(ne,ne,nfaces)'//& + ' failed with stat: '//to_str(ierr)) + end if + do k = 1, nfaces do j = 1, ne do i = 1, ne @@ -1464,7 +1489,12 @@ subroutine CubeTopology(GridEdge, GridVertex) call endrun('error in allocation of GridElem structure') end if - allocate(nbrs_used(ne,ne,nfaces,8)) + allocate(nbrs_used(ne,ne,nfaces,8), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate nbrs_used(ne,ne,nfaces,8)'//& + ' failed with stat: '//to_str(ierr)) + end if + nbrs_used = .false. @@ -1489,7 +1519,12 @@ subroutine CubeTopology(GridEdge, GridVertex) end do end do - allocate(Mesh(ne,ne)) + allocate(Mesh(ne,ne), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Mesh(ne,ne)'//& + ' failed with stat: '//to_str(ierr)) + end if + if(IsFactorable(ne)) then call GenspaceCurve(Mesh) else @@ -1499,9 +1534,22 @@ subroutine CubeTopology(GridEdge, GridVertex) call endrun('Fatal SFC error') end if - allocate(Mesh2(ne2,ne2)) - allocate(Mesh2_map(ne2,ne2,2)) - allocate(sfcij(0:ne2*ne2,2)) + allocate(Mesh2(ne2,ne2), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Mesh2(ne2,ne2)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(Mesh2_map(ne2,ne2,2), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Mesh2_map(ne2,ne2,2)'//& + ' failed with stat: '//to_str(ierr)) + end if + allocate(sfcij(0:ne2*ne2,2), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate sfcij(0:ne2*ne2,2)'//& + ' failed with stat: '//to_str(ierr)) + end if call GenspaceCurve(Mesh2) ! SFC partition for ne2 diff --git a/src/dynamics/se/dycore/derivative_mod.F90 b/src/dynamics/se/dycore/derivative_mod.F90 index 682afeae..8f7299b1 100644 --- a/src/dynamics/se/dycore/derivative_mod.F90 +++ b/src/dynamics/se/dycore/derivative_mod.F90 @@ -1,6 +1,7 @@ module derivative_mod use shr_kind_mod, only: r8=>shr_kind_r8 use cam_abortutils, only: endrun + use string_utils, only: to_str use dimensions_mod, only : np, nc, npdg, nelemd, nlev use quadrature_mod, only : quadrature_t, gauss, gausslobatto,legendre, jacobi ! needed for spherical differential operators: @@ -901,12 +902,16 @@ function remap_phys2gll(pin,nphys) result(pout) real(kind=r8),save,pointer :: delta(:) ! length of i'th intersection real(kind=r8),save,pointer :: delta_a(:) ! length of arrival cells integer in_i,in_j,ia,ja,id,jd,count,i,j + integer :: iret logical :: found real(kind=r8) :: tol = 1.0e-13_r8 real(kind=r8) :: weight,x1,x2,dx real(kind=r8) :: gll_edges(np+1),phys_edges(nphys+1) type(quadrature_t) :: gll_pts + + character(len=*), parameter :: subname = 'remap_phys2gll (SE)' + if (nphys_init/=nphys) then ! setup (most be done on masterthread only) since all data is static ! MT: move barrier inside if loop - we dont want a barrier every regular call @@ -915,10 +920,30 @@ function remap_phys2gll(pin,nphys) result(pout) nphys_init=nphys ! find number of intersections nintersect = np+nphys-1 ! max number of possible intersections - allocate(acell(nintersect)) - allocate(dcell(nintersect)) - allocate(delta(nintersect)) - allocate(delta_a(np)) + allocate(acell(nintersect), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate acell(nintersect) failed with stat: '//& + to_str(iret)) + end if + + allocate(dcell(nintersect), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate dcell(nintersect) failed with stat: '//& + to_str(iret)) + end if + + allocate(delta(nintersect), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate delta(nintersect) failed with stat: '//& + to_str(iret)) + end if + + allocate(delta_a(np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate delta_a(np) failed with stat: '//& + to_str(iret)) + end if + ! compute phys grid cell edges on [-1,1] do i=1,nphys+1 @@ -2238,12 +2263,25 @@ subroutine allocate_subcell_integration_matrix_cslam(np, intervals) real (kind=r8) :: legrange_div(np) real (kind=r8) :: a,b,x,y, x_j, x_i real (kind=r8) :: r(1) + integer :: iret integer i,j,n,m + character(len=*), parameter :: subname = 'allocate_subcell_integration_matrix_cslam (SE)' + if (ALLOCATED(integration_matrix)) deallocate(integration_matrix) - allocate(integration_matrix(intervals,np)) + allocate(integration_matrix(intervals,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate integration_matrix(intervals,np)'//& + ' failed with stat: '//to_str(iret)) + end if + if (ALLOCATED(boundary_interp_matrix)) deallocate(boundary_interp_matrix) - allocate(boundary_interp_matrix(intervals,2,np)) + allocate(boundary_interp_matrix(intervals,2,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate boundary_interp_matrix(intervals,2,np)'//& + ' failed with stat: '//to_str(iret)) + end if + gll = gausslobatto(np) @@ -2343,10 +2381,18 @@ subroutine allocate_subcell_integration_matrix_physgrid(np, intervals) real (kind=r8) :: legrange_div(np) real (kind=r8) :: a,b,x,y, x_j, x_i real (kind=r8) :: r(1) + integer :: iret integer i,j,n,m + character(len=*), parameter :: subname = 'allocate_subcell_integration_matrix_physgrid (SE)' + if (ALLOCATED(integration_matrix_physgrid)) deallocate(integration_matrix_physgrid) - allocate(integration_matrix_physgrid(intervals,np)) + allocate(integration_matrix_physgrid(intervals,np), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate integration_matrix_physgrid(intervals,np)'//& + ' failed with stat: '//to_str(iret)) + end if + gll = gausslobatto(np) diff --git a/src/dynamics/se/dycore/dof_mod.F90 b/src/dynamics/se/dycore/dof_mod.F90 index c6c97741..24f64096 100644 --- a/src/dynamics/se/dycore/dof_mod.F90 +++ b/src/dynamics/se/dycore/dof_mod.F90 @@ -1,6 +1,8 @@ module dof_mod use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 use mpi, only: mpi_integer + use cam_abortutils, only: endrun + use string_utils, only: to_str use dimensions_mod, only: np, npsq, nelem, nelemd use quadrature_mod, only: quadrature_t use element_mod, only: element_t,index_t @@ -283,10 +285,27 @@ subroutine SetElemOffset(par,elem,GlobalUniqueColsP) integer :: ie, ig, nprocs, ierr logical, parameter :: Debug = .FALSE. + character(len=*), parameter :: subname = 'SetElemOffset (SE)' + nprocs = par%nprocs - allocate(numElemP(nelem)) - allocate(numElem2P(nelem)) - allocate(gOffset(nelem)) + allocate(numElemP(nelem), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate numElemP(nelem) failed with stat: '//& + to_str(ierr)) + end if + + allocate(numElem2P(nelem), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate numElem2P(nelem) failed with stat: '//& + to_str(ierr)) + end if + + allocate(gOffset(nelem), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate gOffset(nelem) failed with stat: '//& + to_str(ierr)) + end if + numElemP=0;numElem2P=0;gOffset=0 do ie = 1, nelemd @@ -318,10 +337,18 @@ subroutine CreateUniqueIndex(ig,gdof,idx) integer, allocatable :: ldof(:,:) integer :: i,j,ii,npts + integer :: ierr + + character(len=*), parameter :: subname = 'CreateUniqueIndex (SE)' npts = size(gdof,dim=1) - allocate(ldof(npts,npts)) + allocate(ldof(npts,npts), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate ldof(npts,npts) failed with stat: '//& + to_str(ierr)) + end if + ! ==================== ! Form the local DOF ! ==================== diff --git a/src/dynamics/se/dycore/edge_mod.F90 b/src/dynamics/se/dycore/edge_mod.F90 index c939ce5b..110b9a7a 100644 --- a/src/dynamics/se/dycore/edge_mod.F90 +++ b/src/dynamics/se/dycore/edge_mod.F90 @@ -7,6 +7,7 @@ module edge_mod use coordinate_systems_mod, only: cartesian3D_t use schedtype_mod, only: cycle_t, schedule_t, pgindex_t, schedule, HME_Ordinal,HME_Cardinal use cam_abortutils, only: endrun + use string_utils, only: to_str use cam_logfile, only: iulog use parallel_mod, only: parallel_t, & MAX_ACTIVE_MSG, HME_status_size, BNDRY_TAG_BASE, HME_BNDRY_A2A, HME_BNDRY_P2P, & @@ -187,7 +188,7 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen type (EdgeBuffer_t), target, intent(out) :: edge type (element_t), intent(in) :: elem(:) integer, intent(in) :: nlyr - integer, optional, intent(in) :: bndry_type + integer, optional, intent(in) :: bndry_type integer, optional, intent(in) :: nthreads integer, optional, intent(in) :: CardinalLength integer, optional, intent(in) :: OrdinalLength @@ -240,9 +241,9 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen integer :: errorcode,errorlen integer :: CardinalLen, OrdinalLen character(len=80) :: errorstring - character(len=80), parameter :: subname='initedgeBuffer' + character(len=*), parameter :: subname='initedgeBuffer (SE)' - if(present(bndry_type)) then + if(present(bndry_type)) then if ( MPI_VERSION >= 3 ) then edge%bndry_type = bndry_type else @@ -253,12 +254,12 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen endif ! Set the length of the cardinal and ordinal message lengths - if(present(CardinalLength)) then + if(present(CardinalLength)) then CardinalLen = CardinalLength else CardinalLen = np endif - if(present(OrdinalLength)) then + if(present(OrdinalLength)) then OrdinalLen = OrdinalLength else OrdinalLen = 1 @@ -283,15 +284,40 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen edge%id = initedgebuffer_callid edge%tag = BNDRY_TAG_BASE + MODULO(edge%id, MAX_ACTIVE_MSG) - allocate(edge%putmap(max_neigh_edges,nelemd)) - allocate(edge%getmap(max_neigh_edges,nelemd)) - allocate(edge%reverse(max_neigh_edges,nelemd)) + allocate(edge%putmap(max_neigh_edges,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%putmap(max_neigh_edges,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%getmap(max_neigh_edges,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%getmap(max_neigh_edges,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + + allocate(edge%reverse(max_neigh_edges,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%reverse(max_neigh_edges,nelemd) failed with stat: '//& + to_str(ierr)) + end if edge%putmap(:,:)=-1 edge%getmap(:,:)=-1 - allocate(putmap2(max_neigh_edges,nelemd)) - allocate(getmap2(max_neigh_edges,nelemd)) + allocate(putmap2(max_neigh_edges,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate putmap2(max_neigh_edges,nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(getmap2(max_neigh_edges,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate getmap2(max_neigh_edges,nelemd) failed with stat: '//& + to_str(ierr)) + end if + putmap2(:,:)=-1 getmap2(:,:)=-1 do ie=1,nelemd @@ -311,17 +337,77 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen edge%nIntra=nIntra if(nInter>0) then - allocate(edge%rcountsInter(nInter),edge%rdisplsInter(nInter)) - allocate(edge%scountsInter(nInter),edge%sdisplsInter(nInter)) + allocate(edge%rcountsInter(nInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%rcountsInter(nInter) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%rdisplsInter(nInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%rdisplsInter(nInter) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%scountsInter(nInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%scountsInter(nInter) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%sdisplsInter(nInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%sdisplsInter(nInter) failed with stat: '//& + to_str(ierr)) + end if + endif if(nIntra>0) then - allocate(edge%rcountsIntra(nIntra),edge%rdisplsIntra(nIntra)) - allocate(edge%scountsIntra(nIntra),edge%sdisplsIntra(nIntra)) + allocate(edge%rcountsIntra(nIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%rcountsIntra(nIntra) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%rdisplsIntra(nIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%rdisplsIntra(nIntra) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%scountsIntra(nIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%scountsIntra(nIntra) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%sdisplsIntra(nIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%sdisplsIntra(nIntra) failed with stat: '//& + to_str(ierr)) + end if + endif if (nSendCycles>0) then - allocate(edge%scountsFull(nSendCycles),edge%sdisplsFull(nSendCycles)) - allocate(edge%Srequest(nSendCycles)) + allocate(edge%scountsFull(nSendCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%scountsFull(nSendCycles) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%sdisplsFull(nSendCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%sdisplsFull(nSendCycles) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%Srequest(nSendCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%Srequest(nSendCycles) failed with stat: '//& + to_str(ierr)) + end if + edge%scountsFull(:) = 0 endif ! @@ -334,7 +420,7 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen ie = pSchedule%pIndx(j)%elemid len = CalcSegmentLength(pSchedule%pIndx(j),CardinalLen,OrdinalLen,nlyr) edge%putmap(il,ie) = 0 - if(nSendCycles>0) then + if(nSendCycles>0) then edge%sdisplsFull(icycle) = edge%putmap(il,ie) edge%scountsFull(icycle) = len endif @@ -378,12 +464,42 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen enddo if (nRecvCycles>0) then - allocate(edge%rcountsFull(nRecvCycles),edge%rdisplsFull(nRecvCycles)) - allocate(edge%getDisplsFull(nRecvCycles),edge%putDisplsFull(nRecvCycles)) + allocate(edge%rcountsFull(nRecvCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%rcountsFull(nRecvCycles) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%rdisplsFull(nRecvCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%rdisplsFull(nRecvCycles) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%getDisplsFull(nRecvCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%getDisplsFull(nRecvCycles) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%putDisplsFull(nRecvCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%putDisplsFull(nRecvCycles) failed with stat: '//& + to_str(ierr)) + end if edge%rcountsFull(:) = 0 ! allocate the MPI Send/Recv request handles - allocate(edge%Rrequest(nRecvCycles)) - allocate(edge%status(HME_status_size,nRecvCycles)) + allocate(edge%Rrequest(nRecvCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%Rrequest(nRecvCycles) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%status(HME_status_size,nRecvCycles), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%status(HME_status_size,nRecvCycles) failed with stat: '//& + to_str(ierr)) + end if endif ! @@ -479,8 +595,17 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen end if call gbarrier_init(edge%gbarrier, nlen) - allocate(edge%moveLength(nlen)) - allocate(edge%movePtr(nlen)) + allocate(edge%moveLength(nlen), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%moveLength(nlen) failed with stat: '//& + to_str(ierr)) + end if + + allocate(edge%movePtr(nlen), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%movePtr(nlen) failed with stat: '//& + to_str(ierr)) + end if if (nlen > 1) then ! the master thread performs no data movement because it is busy with the @@ -514,8 +639,16 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen edge%nlyr=nlyr edge%nbuf=nbuf - allocate(edge%receive(nbuf)) - allocate(edge%buf(nbuf)) + allocate(edge%receive(nbuf), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%receive(nbuf) failed with stat: '//& + to_str(ierr)) + end if + allocate(edge%buf(nbuf), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%buf(nbuf) failed with stat: '//& + to_str(ierr)) + end if 21 format('RANK: ',i2, A,8(i6)) @@ -564,6 +697,9 @@ subroutine initEdgeBuffer_i8(edge,nlyr) ! Local variables integer :: nbuf + integer :: ierr + + character(len=*), parameter :: subname = '' ! sanity check for threading if (omp_get_num_threads()>1) then @@ -573,10 +709,19 @@ subroutine initEdgeBuffer_i8(edge,nlyr) nbuf=4*(np+max_corner_elem)*nelemd edge%nlyr=nlyr edge%nbuf=nbuf - allocate(edge%buf(nlyr,nbuf)) + allocate(edge%buf(nlyr,nbuf), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%buf(nlyr,nbuf) failed with stat: '//& + to_str(ierr)) + end if edge%buf(:,:)=0 - allocate(edge%receive(nlyr,nbuf)) + allocate(edge%receive(nlyr,nbuf), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate edge%receive(nlyr,nbuf) failed with stat: '//& + to_str(ierr)) + end if + edge%receive(:,:)=0 end subroutine initEdgeBuffer_i8 @@ -2144,6 +2289,9 @@ subroutine initGhostBuffer3d(ghost,nlyr,np,nhc_in) ! Local variables integer :: nbuf,nhc,i + integer :: ierr + + character(len=*), parameter :: subname = 'initGhostBuffer3d' ! sanity check for threading if (omp_get_num_threads()>1) then @@ -2163,8 +2311,18 @@ subroutine initGhostBuffer3d(ghost,nlyr,np,nhc_in) ghost%np = np ghost%nbuf = nbuf ghost%elem_size = np*(nhc+1) - allocate(ghost%buf (np,(nhc+1),nlyr,nbuf)) - allocate(ghost%receive(np,(nhc+1),nlyr,nbuf)) + allocate(ghost%buf (np,(nhc+1),nlyr,nbuf), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate ghost%buf(np,(nhc+1),nlyr,nbuf) failed with stat: '//& + to_str(ierr)) + end if + + allocate(ghost%receive(np,(nhc+1),nlyr,nbuf), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate ghost%receive(np,(nhc+1),nlyr,nbuf) failed with stat: '//& + to_str(ierr)) + end if + ghost%buf=0 ghost%receive=0 diff --git a/src/dynamics/se/dycore/fvm_mapping.F90 b/src/dynamics/se/dycore/fvm_mapping.F90 index 9ff11819..a563ade9 100644 --- a/src/dynamics/se/dycore/fvm_mapping.F90 +++ b/src/dynamics/se/dycore/fvm_mapping.F90 @@ -1,14 +1,14 @@ ! ! pg3->GLL and GLL->pg3 mapping algorithm described in: ! -! Adam R. Herrington, Peter H. Lauritzen, Mark A. Taylor, Steve Goldhaber, Brian Eaton, Kevin A Reed and Paul A. Ullrich, 2018: -! Physics-dynamics coupling with element-based high-order Galerkin methods: quasi equal-area physics grid: +! Adam R. Herrington, Peter H. Lauritzen, Mark A. Taylor, Steve Goldhaber, Brian Eaton, Kevin A Reed and Paul A. Ullrich, 2018: +! Physics-dynamics coupling with element-based high-order Galerkin methods: quasi equal-area physics grid: ! Mon. Wea. Rev., DOI:MWR-D-18-0136.1 ! ! pg2->pg3 mapping algorithm described in: ! -! Adam R. Herrington, Peter H. Lauritzen, Kevin A Reed, Steve Goldhaber, and Brian Eaton, 2019: -! Exploring a lower resolution physics grid in CAM-SE-CSLAM. J. Adv. Model. Earth Syst. +! Adam R. Herrington, Peter H. Lauritzen, Kevin A Reed, Steve Goldhaber, and Brian Eaton, 2019: +! Exploring a lower resolution physics grid in CAM-SE-CSLAM. J. Adv. Model. Earth Syst. ! !#define PCoM !replace PPM with PCoM for mass variables for fvm2phys and phys2fvm !#define skip_high_order_fq_map !do mass and correlation preserving phys2fvm mapping but no high-order pre-mapping of fq @@ -18,7 +18,9 @@ module fvm_mapping use dimensions_mod, only: irecons_tracer use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct - use perf_mod, only: t_startf, t_stopf + use perf_mod, only: t_startf, t_stopf + use cam_abortutils, only: endrun + use string_utils, only: to_str implicit none private @@ -42,16 +44,16 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ use dimensions_mod, only: np, nc,nlev use dimensions_mod, only: fv_nphys, nhc_phys,ntrac,nhc,ksponge_end, nu_scale_top use hybrid_mod, only: hybrid_t - use cam_abortutils, only: endrun use physconst, only: thermodynamic_active_species_num, thermodynamic_active_species_idx type (element_t), intent(inout):: elem(:) type(fvm_struct), intent(inout):: fvm(:) - + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) logical, intent(in) :: no_cslam integer, intent(in) :: nets, nete, tl_f, tl_qdp integer :: ie,i,j,k,m_cnst,nq + integer :: iret real (kind=r8), dimension(:,:,:,:,:) , allocatable :: fld_phys, fld_gll, fld_fvm real (kind=r8), allocatable, dimension(:,:,:,:,:) :: qgll real (kind=r8) :: element_ave @@ -61,9 +63,15 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ integer :: nflds logical, allocatable :: llimiter(:) - allocate(qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete)) - - do ie=nets,nete + character(len=*), parameter :: subname = 'phys2dyn_forcings_fvm (SE)' + + allocate(qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + do ie=nets,nete do nq=1,thermodynamic_active_species_num qgll(:,:,:,nq,ie) = elem(ie)%state%Qdp(:,:,:,nq,tl_qdp)/elem(ie)%state%dp3d(:,:,:,tl_f) end do @@ -81,13 +89,27 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! call t_startf('p2d-pg2:copying') nflds = 4+ntrac - allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)) - allocate(fld_gll(np,np,nlev,3,nets:nete)) - allocate(llimiter(nflds)) + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//& + ': allocate fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fld_gll(np,np,nlev,3,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_gll(np,np,nlev,3,nets:nete) failed with stat: '//to_str(iret)) + end if + + allocate(llimiter(nflds), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate llimiter(nflds) failed with stat: '//to_str(iret)) + end if + fld_phys = -9.99E99_r8!xxx necessary? llimiter = .false. - + do ie=nets,nete ! ! pack fields that need to be interpolated @@ -120,7 +142,7 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! map fq from phys to fvm ! call t_startf('p2d-pg2:phys2fvm') - + do ie=nets,nete do k=1,nlev call phys2fvm(ie,k,fvm(ie),& @@ -133,11 +155,20 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! overwrite SE Q with cslam Q ! nflds = thermodynamic_active_species_num - allocate(fld_gll(np,np,nlev,nflds,nets:nete)) - allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete)) + allocate(fld_gll(np,np,nlev,nflds,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_gll(np,np,nlev,nflds,nets:nete) failed with stat: '//to_str(iret)) + end if + + allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + do ie=nets,nete ! - ! compute cslam updated Q value + ! compute cslam updated Q value do m_cnst=1,thermodynamic_active_species_num fld_fvm(1:nc,1:nc,:,m_cnst,ie) = fvm(ie)%c(1:nc,1:nc,:,thermodynamic_active_species_idx(m_cnst))+& fvm(ie)%fc(1:nc,1:nc,:,thermodynamic_active_species_idx(m_cnst))/fvm(ie)%dp_fvm(1:nc,1:nc,:) @@ -151,8 +182,8 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! fld_gll now holds q cslam value on gll grid ! ! convert fld_gll to increment (q_new-q_old) - ! - do ie=nets,nete + ! + do ie=nets,nete do m_cnst=1,thermodynamic_active_species_num elem(ie)%derived%fq(:,:,:,m_cnst) =& fld_gll(:,:,:,m_cnst,ie)-qgll(:,:,:,m_cnst,ie) @@ -173,9 +204,22 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! ! nflds is ft, fu, fv, + thermo species nflds = 3+thermodynamic_active_species_num - allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)) - allocate(fld_gll(np,np,nlev,nflds,nets:nete)) - allocate(llimiter(nflds)) + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fld_gll(np,np,nlev,nflds,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_gll(np,np,nlev,nflds,nets:nete) failed with stat: '//to_str(iret)) + end if + + allocate(llimiter(nflds), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate llimiter(nflds) failed with stat: '//to_str(iret)) + end if + llimiter(1:nflds) = .false. do ie=nets,nete ! @@ -401,13 +445,13 @@ subroutine dyn2fvm_mass_vars(dp_gll,ps_gll,q_gll,& inv_darea_dp_fvm,q_gll(:,:,k,m_cnst)) end do end do - end subroutine dyn2fvm_mass_vars - + end subroutine dyn2fvm_mass_vars + ! ! this subroutine assumes that the fvm halo has already been filled ! (if nc/=fv_nphys) ! - + subroutine dyn2phys_all_vars(nets,nete,elem,fvm,& num_trac,ptop,tl,& dp3d_phys,ps_phys,q_phys,T_phys,omega_phys,phis_phys) @@ -431,33 +475,71 @@ subroutine dyn2phys_all_vars(nets,nete,elem,fvm,& real (kind=r8), dimension(fv_nphys,fv_nphys,num_trac) :: q_phys_tmp real (kind=r8), dimension(nc,nc) :: inv_darea_dp_fvm integer :: k,m_cnst,ie + integer :: iret + character(len=*), parameter :: subname = 'dyn2phys_all_vars' - !OMP BARRIER OMP MASTER needed if (nc.ne.fv_nphys) then save_max_overlap = 4 !max number of mass overlap areas between phys and fvm grids - allocate(save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete)) - allocate(save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete)) - allocate(save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete)) - allocate(save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete)) - allocate(save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete)) - allocate(save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete)) + allocate(save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//& + ': allocate save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete) failed with stat: '//& + to_str(iret)) + end if + + allocate(save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete) failed with stat: '//& + to_str(iret)) + end if + + allocate(save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete) failed with stat: '//& + to_str(iret)) + end if + save_num_overlap = 0 - allocate(save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete)) + allocate(save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//& + ': allocate save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + end if do ie=nets,nete tmp = 1.0_r8 inv_area = 1.0_r8/dyn2phys(tmp,elem(ie)%metdet(:,:)) phis_phys(:,ie) = RESHAPE(fvm(ie)%phis_physgrid,SHAPE(phis_phys(:,ie))) - ps_phys(:,ie) = ptop + ps_phys(:,ie) = ptop if (nc.ne.fv_nphys) then tmp = 1.0_r8 do k=1,nlev inv_darea_dp_fvm = dyn2fvm(elem(ie)%state%dp3d(:,:,k,tl),elem(ie)%metdet(:,:)) inv_darea_dp_fvm = 1.0_r8/inv_darea_dp_fvm - + T_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%state%T(:,:,k,tl),elem(ie)%metdet(:,:),inv_area),SHAPE(T_phys(:,k,ie))) Omega_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%derived%omega(:,:,k),elem(ie)%metdet(:,:),inv_area), & SHAPE(Omega_phys(:,k,ie))) @@ -612,6 +694,10 @@ subroutine setup_interpdata_for_gll_to_phys_vec_mapping(interpdata,interp_p) integer i,j,ioff,ngrid real (kind=r8) :: dx + integer :: iret + + character(len=*), parameter :: subname = 'setup_interpdata_for_gll_to_phys_vec_mapping (SE)' + ngrid = fv_nphys*fv_nphys interpdata%n_interp=ngrid ! @@ -619,9 +705,24 @@ subroutine setup_interpdata_for_gll_to_phys_vec_mapping(interpdata,interp_p) ! gp_quadrature = gausslobatto(np) call interpolate_create(gp_quadrature,interp_p) - allocate(interpdata%interp_xy(ngrid)) - allocate(interpdata%ilat(ngrid) ) - allocate(interpdata%ilon(ngrid) ) + allocate(interpdata%interp_xy(ngrid), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interpdata%interp_xy(ngrid) failed with stat: '//& + to_str(iret)) + end if + + allocate(interpdata%ilat(ngrid), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interpdata%ilat(ngrid) failed with stat: '//& + to_str(iret)) + end if + + allocate(interpdata%ilon(ngrid), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interpdata%ilon(ngrid) failed with stat: '//& + to_str(iret)) + end if + ! !WARNING: THIS CODE INTERFERES WITH LAT-LON OUTPUT ! OF REGULAR SE IF nc>0 @@ -918,7 +1019,7 @@ subroutine fvm2phys(ie,k,fvm,q_fvm,q_phys,num_trac) end do end do call get_q_overlap_save(ie,k,fvm,q_fvm,num_trac,q_phys) - save_dp_phys(:,:,k,ie) = save_dp_phys(:,:,k,ie)/fvm%area_sphere_physgrid + save_dp_phys(:,:,k,ie) = save_dp_phys(:,:,k,ie)/fvm%area_sphere_physgrid end subroutine fvm2phys @@ -932,6 +1033,7 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) integer :: h,jx,jy,jdx,jdy,m_cnst + integer :: iret real(kind=r8), dimension(fv_nphys,fv_nphys) :: phys_cdp_max, phys_cdp_min @@ -945,11 +1047,32 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) real(kind=r8), allocatable, dimension(:,:,:) :: dq_min_overlap,dq_max_overlap real(kind=r8), allocatable, dimension(:,:,:) :: dq_overlap real(kind=r8), allocatable, dimension(:,:,:) :: fq_phys_overlap - - allocate(dq_min_overlap (save_max_overlap,fv_nphys,fv_nphys)) - allocate(dq_max_overlap (save_max_overlap,fv_nphys,fv_nphys)) - allocate(dq_overlap (save_max_overlap,fv_nphys,fv_nphys)) - allocate(fq_phys_overlap (save_max_overlap,fv_nphys,fv_nphys)) + + character(len=*), parameter :: subname = 'phys2fvm (SE)' + + allocate(dq_min_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate dq_min_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(dq_max_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate dq_max_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(dq_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate dq_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fq_phys_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fq_phys_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& + ' failed with stat: '//to_str(iret)) + end if do m_cnst=1,num_trac fqdp_fvm(:,:,m_cnst) = 0.0_r8 @@ -958,13 +1081,13 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) fq_phys_overlap,1) mass_phys(1:fv_nphys,1:fv_nphys) = fq_phys(1:fv_nphys,1:fv_nphys,m_cnst)*& (save_dp_phys(1:fv_nphys,1:fv_nphys,k,ie)*fvm%area_sphere_physgrid) - + min_patch = MINVAL(fvm%c(0:nc+1,0:nc+1,k,m_cnst)) max_patch = MAXVAL(fvm%c(0:nc+1,0:nc+1,k,m_cnst)) do jy=1,fv_nphys do jx=1,fv_nphys num = save_num_overlap(jx,jy,k,ie) -#ifdef debug_coupling +#ifdef debug_coupling save_q_overlap(:,jx,jy,k,m_cnst,ie) = 0.0_r8 save_q_phys(jx,jy,k,m_cnst,ie) = 0.0_r8 tmp = save_q_phys(jx,jy,k,m_cnst,ie)+fq_phys(jx,jy,m_cnst) !updated physics grid mixing ratio @@ -973,8 +1096,8 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) #else tmp = save_q_phys(jx,jy,k,m_cnst,ie)+fq_phys(jx,jy,m_cnst) !updated physics grid mixing ratio phys_cdp_max(jx,jy)= MAX(MAX(MAXVAL(save_q_overlap(1:num,jx,jy,k,m_cnst,ie)),max_patch),tmp) - phys_cdp_min(jx,jy)= MIN(MIN(MINVAL(save_q_overlap(1:num,jx,jy,k,m_cnst,ie)),min_patch),tmp) -#endif + phys_cdp_min(jx,jy)= MIN(MIN(MINVAL(save_q_overlap(1:num,jx,jy,k,m_cnst,ie)),min_patch),tmp) +#endif ! ! add high-order fq change when it does not violate monotonicity ! @@ -1009,7 +1132,7 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) ! total mass change from physics on physics grid ! num = save_num_overlap(jx,jy,k,ie) - fq = mass_phys(jx,jy)/(fvm%area_sphere_physgrid(jx,jy)*save_dp_phys(jx,jy,k,ie)) + fq = mass_phys(jx,jy)/(fvm%area_sphere_physgrid(jx,jy)*save_dp_phys(jx,jy,k,ie)) if (fq<0.0_r8) then sum_dq_min = SUM(dq_min_overlap(1:num,jx,jy)*save_air_mass_overlap(1:num,jx,jy,k,ie)) if (sum_dq_min>1.0E-14_r8) then @@ -1021,7 +1144,7 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) end do end if end if - + if (fq>0.0_r8) then sum_dq_max = SUM(dq_max_overlap(1:num,jx,jy)*save_air_mass_overlap(1:num,jx,jy,k,ie)) if (sum_dq_max<-1.0E-14_r8) then @@ -1035,11 +1158,11 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) end if end do end do -#endif +#endif ! ! convert to mass per unit area ! - fqdp_fvm(:,:,m_cnst) = fqdp_fvm(:,:,m_cnst)*fvm%inv_area_sphere(:,:) + fqdp_fvm(:,:,m_cnst) = fqdp_fvm(:,:,m_cnst)*fvm%inv_area_sphere(:,:) end do deallocate(dq_min_overlap) deallocate(dq_max_overlap) @@ -1069,7 +1192,7 @@ subroutine get_dp_overlap_save(ie,k,fvm,recons) call get_fvm_recons(fvm,fvm%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,k),recons,1,llimiter) do h=1,jall_fvm2phys(ie) - jx = weights_lgr_index_all_fvm2phys(h,1,ie); jy = weights_lgr_index_all_fvm2phys(h,2,ie) + jx = weights_lgr_index_all_fvm2phys(h,1,ie); jy = weights_lgr_index_all_fvm2phys(h,2,ie) jdx = weights_eul_index_all_fvm2phys(h,1,ie); jdy = weights_eul_index_all_fvm2phys(h,2,ie) save_num_overlap(jx,jy,k,ie) = save_num_overlap(jx,jy,k,ie)+1!could be pre-computed idx = save_num_overlap(jx,jy,k,ie) diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 index b8426b7c..e198f320 100644 --- a/src/dynamics/se/dycore/fvm_mod.F90 +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -9,7 +9,7 @@ ! 7.Februar 2012: cslam_run and cslam_runair ! !-----------------------------------------------------------------------------! -module fvm_mod +module fvm_mod use shr_kind_mod, only: r8=>shr_kind_r8 use edge_mod, only: initghostbuffer, freeghostbuffer, ghostpack, ghostunpack use edgetype_mod, only: edgebuffer_t @@ -19,17 +19,17 @@ module fvm_mod use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct use hybrid_mod, only: hybrid_t - + implicit none private save - + type (EdgeBuffer_t) :: edgeveloc type (EdgeBuffer_t), public :: ghostBufQnhc_s type (EdgeBuffer_t), public :: ghostBufQnhc_vh type (EdgeBuffer_t), public :: ghostBufQnhc_h type (EdgeBuffer_t), public :: ghostBufQ1_h - type (EdgeBuffer_t), public :: ghostBufQ1_vh + type (EdgeBuffer_t), public :: ghostBufQ1_vh ! type (EdgeBuffer_t), private :: ghostBufFlux_h type (EdgeBuffer_t), public :: ghostBufFlux_vh type (EdgeBuffer_t), public :: ghostBufQnhcJet_h @@ -172,12 +172,12 @@ subroutine PrintArray(i1,i2,array) sz = size(array,dim=1) - if (sz == 9) then + if (sz == 9) then do i=i2,i1,-1 write(6,9) array(-2,i),array(-1,i), array(0,i), & array( 1,i), array(2,i), array(3,i), & array( 4,i), array(5,i), array(6,i) - enddo + enddo endif 9 format('|',9(f10.1,'|')) @@ -192,6 +192,7 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, use fvm_reconstruction_mod, only: extend_panel_interpolate use cam_abortutils, only: endrun + use string_utils, only: to_str use dimensions_mod, only: fv_nphys,nhr,nhr_phys,nhc,nhc_phys,ns,ns_phys,nhe_phys,nc use perf_mod, only : t_startf, t_stopf ! _EXTERNAL @@ -205,9 +206,12 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, ! real (kind=r8) :: ftmp(1-nhcc:nphys+nhcc,1-nhcc:nphys+nhcc,numlev,num_flds,nets:nete) real (kind=r8), allocatable :: fld_tmp(:,:) - integer :: ie,k,itr,nht_phys,nh_phys + integer :: ie,k,itr,nht_phys,nh_phys + integer :: iret type (edgeBuffer_t) :: cellghostbuf - + + character(len=*), parameter :: subname = 'fill_halo_and_extend_panel (SE)' + if (lfill_halo) then ! !********************************************* @@ -241,7 +245,13 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, call endrun("fill_halo_and_extend_panel: ndepth>nhr_phys") nht_phys = nhe_phys+nhr_phys nh_phys = nhr_phys - allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)) + + allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + do ie=nets,nete do itr=1,num_flds do k=1,numlev @@ -261,7 +271,12 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, nhe_phys= 0 nht_phys= nhe_phys+nhr nh_phys = nhr - allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)) + allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)'//& + ' failed with stat: '//to_str(iret)) + end if + do ie=nets,nete do itr=1,num_flds do k=1,numlev @@ -467,13 +482,13 @@ subroutine fvm_init2(elem,fvm,hybrid,nets,nete) call compute_ghost_corner_orientation(hybrid,elem,nets,nete) ! run some tests: ! call test_ghost(hybrid,elem,nets,nete) - + do ie=nets,nete call fvm_set_cubeboundary(elem(ie),fvm(ie)) call fvm_mesh(elem(ie),fvm(ie)) fvm(ie)%inv_area_sphere = 1.0_r8/fvm(ie)%area_sphere ! - ! compute CSLAM areas consistent with SE area (at 1 degree they can be up to + ! compute CSLAM areas consistent with SE area (at 1 degree they can be up to ! 1E-6 different than the correct spherical areas used in CSLAM) ! call subcell_integration(one, np, nc, elem(ie)%metdet,fvm(ie)%inv_se_area_sphere) @@ -483,7 +498,7 @@ subroutine fvm_init2(elem,fvm,hybrid,nets,nete) fvm(ie)%fm(:,:,:,:) = 0.0_r8 fvm(ie)%ft(:,:,: ) = 0.0_r8 enddo - ! Need to allocate ghostBufQnhc after compute_ghost_corner_orientation because it + ! Need to allocate ghostBufQnhc after compute_ghost_corner_orientation because it ! changes the values for reverse call initghostbuffer(hybrid%par,ghostBufQnhc_s,elem,nlev*(ntrac+1),nhc,nc,nthreads=1) @@ -502,7 +517,7 @@ subroutine fvm_init2(elem,fvm,hybrid,nets,nete) else call initghostbuffer(hybrid%par,ghostBufPG_s,elem,nlev*(3+thermodynamic_active_species_num),nhc_phys,fv_nphys,nthreads=1) end if - + if (fvm_supercycling.ne.fvm_supercycling_jet) then ! ! buffers for running different fvm time-steps in the jet region @@ -513,7 +528,7 @@ subroutine fvm_init2(elem,fvm,hybrid,nets,nete) end if end subroutine fvm_init2 - + subroutine fvm_init3(elem,fvm,hybrid,nets,nete,irecons) use control_mod , only: neast, nwest, seast, swest use fvm_analytic_mod, only: compute_reconstruct_matrix diff --git a/src/dynamics/se/dycore/gridgraph_mod.F90 b/src/dynamics/se/dycore/gridgraph_mod.F90 index cbafebcb..3e097b00 100644 --- a/src/dynamics/se/dycore/gridgraph_mod.F90 +++ b/src/dynamics/se/dycore/gridgraph_mod.F90 @@ -74,9 +74,15 @@ module GridGraph_mod subroutine allocate_gridvertex_nbrs(vertex, dim) + use cam_abortutils, only: endrun + use string_utils, only: to_str + type (GridVertex_t), intent(inout) :: vertex integer, optional, intent(in) :: dim integer :: num + integer :: iret + + character(len=*), parameter :: subname = 'allocate_gridvertex_nbrs (SE)' if (present(dim)) then num = dim @@ -84,11 +90,26 @@ subroutine allocate_gridvertex_nbrs(vertex, dim) num = max_neigh_edges end if - allocate(vertex%nbrs(num)) - allocate(vertex%nbrs_face(num)) - allocate(vertex%nbrs_wgt(num)) - allocate(vertex%nbrs_wgt_ghost(num)) + allocate(vertex%nbrs(num), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate vertex%nbrs(num) failed with stat: '//to_str(iret)) + end if + + allocate(vertex%nbrs_face(num), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate vertex%nbrs_face(num) failed with stat: '//to_str(iret)) + end if + allocate(vertex%nbrs_wgt(num), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate vertex%nbrs_wgt(num) failed with stat: '//to_str(iret)) + end if + + allocate(vertex%nbrs_wgt_ghost(num), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate vertex%nbrs_wgt_ghost(num) failed with stat: '//& + to_str(iret)) + end if end subroutine allocate_gridvertex_nbrs !====================================================================== @@ -288,6 +309,9 @@ end subroutine PrintChecksum subroutine CreateSubGridGraph(Vertex, SVertex, local2global) + use cam_abortutils, only: endrun + use string_utils, only: to_str + implicit none type (GridVertex_t),intent(in) :: Vertex(:) @@ -296,13 +320,19 @@ subroutine CreateSubGridGraph(Vertex, SVertex, local2global) integer :: nelem,nelem_s,n,ncount,cnt,pos, orig_start integer :: inbr,i,ig,j,k, new_pos + integer :: iret integer,allocatable :: global2local(:) + character(len=*), parameter :: subname = 'CreateSubGridGraph (SE)' + nelem = SIZE(Vertex) nelem_s = SiZE(SVertex) - allocate(global2local(nelem)) + allocate(global2local(nelem), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate global2local(nelem) failed with stat: '//to_str(iret)) + end if global2local(:) = 0 do i=1,nelem_s diff --git a/src/dynamics/se/dycore/hybrid_mod.F90 b/src/dynamics/se/dycore/hybrid_mod.F90 index 19f1043a..941256b9 100644 --- a/src/dynamics/se/dycore/hybrid_mod.F90 +++ b/src/dynamics/se/dycore/hybrid_mod.F90 @@ -5,7 +5,7 @@ module hybrid_mod use parallel_mod , only : parallel_t, copy_par -use thread_mod , only : omp_set_num_threads, omp_get_thread_num +use thread_mod , only : omp_set_num_threads, omp_get_thread_num use thread_mod , only : horz_num_threads, vert_num_threads, tracer_num_threads use dimensions_mod, only : nlev, qsize, ntrac @@ -46,11 +46,11 @@ module hybrid_mod public :: threadOwnsTracer, threadOwnsVertlevel public :: config_thread_region - interface config_thread_region + interface config_thread_region module procedure config_thread_region_par module procedure config_thread_region_hybrid end interface - interface PrintHybrid + interface PrintHybrid module procedure PrintHybridnew end interface @@ -122,9 +122,9 @@ function config_thread_region_hybrid(old,region_name) result(new) new%par = old%par ! relies on parallel_mod copy constructor new%nthreads = old%nthreads * region_num_threads - if( region_num_threads .ne. 1 ) then + if( region_num_threads .ne. 1 ) then new%ithr = old%ithr * region_num_threads + ithr - else + else new%ithr = old%ithr endif new%masterthread = old%masterthread @@ -134,21 +134,34 @@ function config_thread_region_hybrid(old,region_name) result(new) end function config_thread_region_hybrid function config_thread_region_par(par,region_name) result(hybrid) + + use cam_abortutils, only: endrun + use string_utils, only: to_str + type (parallel_t) , intent(in) :: par character(len=*), intent(in) :: region_name type (hybrid_t) :: hybrid - ! local + ! local integer :: ithr integer :: ibeg_range, iend_range integer :: kbeg_range, kend_range integer :: qbeg_range, qend_range integer :: nthreads + integer :: iret + + character(len=*), parameter :: subname = 'config_thread_region_par (SE)' ithr = omp_get_thread_num() if ( TRIM(region_name) == 'serial') then region_num_threads = 1 - if ( .NOT. allocated(work_pool_horz) ) allocate(work_pool_horz(horz_num_threads,2)) + if ( .NOT. allocated(work_pool_horz) ) then + allocate(work_pool_horz(horz_num_threads,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate work_pool_horz(horz_num_threads,2)'//& + ' failed with stat: '//to_str(iret)) + end if + end if call set_thread_ranges_1D ( work_pool_horz, ibeg_range, iend_range, ithr ) hybrid%ibeg = 1; hybrid%iend = nelemd_save hybrid%kbeg = 1; hybrid%kend = nlev @@ -156,7 +169,7 @@ function config_thread_region_par(par,region_name) result(hybrid) endif if ( TRIM(region_name) == 'horizontal') then - region_num_threads = horz_num_threads + region_num_threads = horz_num_threads call set_thread_ranges_1D ( work_pool_horz, ibeg_range, iend_range, ithr ) hybrid%ibeg = ibeg_range; hybrid%iend = iend_range hybrid%kbeg = 1; hybrid%kend = nlev @@ -164,13 +177,13 @@ function config_thread_region_par(par,region_name) result(hybrid) endif if ( TRIM(region_name) == 'vertical') then - region_num_threads = vert_num_threads + region_num_threads = vert_num_threads call set_thread_ranges_1D ( work_pool_vert, kbeg_range, kend_range, ithr ) hybrid%ibeg = 1; hybrid%iend = nelemd_save hybrid%kbeg = kbeg_range; hybrid%kend = kend_range hybrid%qbeg = 1; hybrid%qend = qsize endif - + if ( TRIM(region_name) == 'tracer' ) then region_num_threads = tracer_num_threads call set_thread_ranges_1D ( work_pool_trac, qbeg_range, qend_range, ithr) @@ -186,7 +199,7 @@ function config_thread_region_par(par,region_name) result(hybrid) hybrid%kbeg = 1; hybrid%kend = nlev hybrid%qbeg = qbeg_range; hybrid%qend = qend_range endif - + if ( TRIM(region_name) == 'vertical_and_tracer' ) then region_num_threads = vert_num_threads*tracer_num_threads call set_thread_ranges_2D ( work_pool_vert, work_pool_trac, kbeg_range, kend_range, & @@ -207,16 +220,28 @@ end function config_thread_region_par subroutine init_loop_ranges(nelemd) + use cam_abortutils, only: endrun + use string_utils, only: to_str + integer, intent(in) :: nelemd integer :: ith, beg_index, end_index + integer :: iret + + character(len=*), parameter :: subname = 'init_loop_ranges (SE)' + - if ( init_ranges ) then nelemd_save=nelemd - if ( .NOT. allocated(work_pool_horz) ) allocate(work_pool_horz(horz_num_threads,2)) + if ( .NOT. allocated(work_pool_horz) ) then + allocate(work_pool_horz(horz_num_threads,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate work_pool_horz(horz_num_threads,2)'//& + ' failed with stat: '//to_str(iret)) + end if + end if if(nelemd0 .and. ntracshr_kind_r8 - use cam_logfile, only: iulog - use dimensions_mod, only: plev => nlev, plevp => nlevp - use physconst, only: pstd implicit none private diff --git a/src/dynamics/se/dycore/interpolate_mod.F90 b/src/dynamics/se/dycore/interpolate_mod.F90 index c6b1ce0b..0c2aad61 100644 --- a/src/dynamics/se/dycore/interpolate_mod.F90 +++ b/src/dynamics/se/dycore/interpolate_mod.F90 @@ -15,6 +15,7 @@ module interpolate_mod use mesh_mod, only: MeshUseMeshFile use control_mod, only: cubed_sphere_map use cam_logfile, only: iulog + use string_utils, only: to_str implicit none private @@ -205,17 +206,49 @@ subroutine interpolate_create(gquad,interp) integer k,j integer npts + integer iret real (kind=r8), dimension(:), allocatable :: gamma real (kind=r8), dimension(:), allocatable :: leg + character(len=*), parameter :: subname = 'interpolate_create (SE)' + npts = size(gquad%points) - allocate(interp%Imat(npts,npts)) - allocate(interp%rk(npts)) - allocate(interp%vtemp(npts)) - allocate(interp%glp(npts)) - allocate(gamma(npts)) - allocate(leg(npts)) + allocate(interp%Imat(npts,npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interp%Imat(npts,npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(interp%rk(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interp%rk(npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(interp%vtemp(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interp%vtemp(npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(interp%glp(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate interp%glp(npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(gamma(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate gamma(npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(leg(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate leg(npts) failed with stat: '//& + to_str(iret)) + end if gamma = quad_norm(gquad,npts) @@ -1120,12 +1153,33 @@ subroutine setup_latlon_interp(elem,interpdata,par) integer :: k integer, allocatable :: global_elem_gid(:,:),local_elem_gid(:,:), local_elem_num(:,:) + character(len=*), parameter :: subname = 'setup_latlon_interp (SE)' + ! these arrays often are too large for stack, so lets make sure ! they go on the heap: - allocate(local_elem_num(nlat,nlon)) - allocate(local_elem_gid(nlat,nlon)) - allocate(global_elem_gid(nlat,nlon)) - allocate(cart_vec(nlat,nlon)) + allocate(local_elem_num(nlat,nlon), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate local_elem_num(nlat,nlon) failed with stat: '//& + to_str(ierr)) + end if + + allocate(local_elem_gid(nlat,nlon), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate local_elem_gid(nlat,nlon) failed with stat: '//& + to_str(ierr)) + end if + + allocate(global_elem_gid(nlat,nlon), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate global_elem_gid(nlat,nlon) failed with stat: '//& + to_str(ierr)) + end if + + allocate(cart_vec(nlat,nlon), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate cart_vec(nlat,nlon) failed with stat: '//& + to_str(ierr)) + end if if (par%masterproc) then write(iulog,'(a,i4,a,i4,a)') 'Initializing ',nlat,' x ',nlon,' lat-lon interpolation grid: ' @@ -1149,9 +1203,24 @@ subroutine setup_latlon_interp(elem,interpdata,par) nullify(lon) endif - allocate(lat(nlat)) - allocate(gweight(nlat)) - allocate(lon(nlon)) + allocate(lat(nlat), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lat(nlat) failed with stat: '//& + to_str(ierr)) + end if + + allocate(gweight(nlat), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate gweight(nlat) failed with stat: '//& + to_str(ierr)) + end if + + allocate(lon(nlon), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lon(nlon) failed with stat: '//& + to_str(ierr)) + end if + call interp_init() gweight=0 do i=1,nlon @@ -1340,9 +1409,25 @@ subroutine setup_latlon_interp(elem,interpdata,par) if (associated(interpdata(ii)%ilon))then if(size(interpdata(ii)%ilon)>0)deallocate(interpdata(ii)%ilon) endif - allocate(interpdata(ii)%interp_xy( ngrid ) ) - allocate(interpdata(ii)%ilat( ngrid ) ) - allocate(interpdata(ii)%ilon( ngrid ) ) + + allocate(interpdata(ii)%interp_xy( ngrid ), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate interpdata(ii)%interp_xy(ngrid) failed with stat: '//& + to_str(ierr)) + end if + + allocate(interpdata(ii)%ilat( ngrid ), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate interpdata(ii)%ilat(ngrid) failed with stat: '//& + to_str(ierr)) + end if + + allocate(interpdata(ii)%ilon( ngrid ), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate interpdata(ii)%ilon(ngrid) failed with stat: '//& + to_str(ierr)) + end if + interpdata(ii)%n_interp=0 ! reset counter enddo do j=1,nlat diff --git a/src/dynamics/se/dycore/ll_mod.F90 b/src/dynamics/se/dycore/ll_mod.F90 index cf445c86..d2700503 100644 --- a/src/dynamics/se/dycore/ll_mod.F90 +++ b/src/dynamics/se/dycore/ll_mod.F90 @@ -112,6 +112,10 @@ subroutine LLFindEdge(Edge,src,dest,id,found) end subroutine LLFindEdge subroutine LLAddEdge(EdgeList,src,dest,id) + + use cam_abortutils, only: endrun + use string_utils, only: to_str + type (root_t), intent(inout) :: EdgeList integer, intent(in) :: src integer, intent(in) :: dest @@ -121,6 +125,10 @@ subroutine LLAddEdge(EdgeList,src,dest,id) type(node_t), pointer :: new_node type(node_t), pointer :: parent + integer :: iret + + character(len=*), parameter :: subname = 'LLAddEdge (SE)' + temp_node => EdgeList%first parent => EdgeList%first @@ -128,7 +136,13 @@ subroutine LLAddEdge(EdgeList,src,dest,id) parent => temp_node temp_node => parent%next enddo - allocate(new_node) + + allocate(new_node, stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate new_node failed with stat: '//& + to_str(iret)) + end if + NumEdges = NumEdges + 1 new_node%src=src diff --git a/src/dynamics/se/dycore/mesh_mod.F90 b/src/dynamics/se/dycore/mesh_mod.F90 index c5e22868..c34f0cc4 100644 --- a/src/dynamics/se/dycore/mesh_mod.F90 +++ b/src/dynamics/se/dycore/mesh_mod.F90 @@ -4,6 +4,7 @@ module mesh_mod use physconst, only: PI use control_mod, only: MAX_FILE_LEN use cam_abortutils, only: endrun + use string_utils, only: to_str use netcdf, only: nf90_strerror, nf90_open, nf90_close use netcdf, only: NF90_NOWRITE, nf90_NoErr @@ -381,10 +382,17 @@ subroutine create_index_table(index_table, element_nodes) integer , intent(in) :: element_nodes(p_number_elements, 4) integer :: cnt, cnt_index, node integer :: k, ll + integer :: iret + + character(len=*), parameter :: subname = 'create_index_table (SE)' !Create an index table so that we can find neighbors on O(n) ! so for each node, we want to know which elements it is part of - allocate(index_table(p_number_nodes, max_elements_attached_to_node + 1)) + allocate(index_table(p_number_nodes, max_elements_attached_to_node + 1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate (index_table(p_number_nodes,max_elements_attached_to_node+1)'//& + ' failed with stat: '//to_str(iret)) + end if !the last column in the index table is a count of the number of elements index_table = 0 @@ -639,6 +647,9 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) real(kind=r8) :: x, y, h integer :: i, j, i2, j2, ne, ne2 integer :: sfc_index, face + integer :: iret + + character(len=*), parameter :: subname = 'initialize_space_filling_curve (SE)' if (SIZE(GridVertex) /= p_number_elements) then call endrun('initialize_space_filling_curve:Element count check failed & @@ -664,9 +675,20 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) ne2=2**ceiling( log(real(ne))/log(2._r8) ) if (ne2 > logical :: Verbose = .FALSE. logical :: Debug = .FALSE. + character(len=*), parameter :: subname = 'initMetaGraph (SE)' if(Debug) write(iulog,*)'initMetagraph: point #1' ! Number of grid vertices @@ -197,7 +227,7 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) tail_processor_number = GridEdge(i)%tail%processor_number head_processor_number = GridEdge(i)%head%processor_number if(tail_processor_number .eq. ThisProcessorNumber .or. & - head_processor_number .eq. ThisProcessorNumber ) then + head_processor_number .eq. ThisProcessorNumber ) then call LLInsertEdge(mEdgeList,tail_processor_number,head_processor_number,eNum) endif enddo @@ -205,12 +235,16 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) call LLGetEdgeCount(nedges) NULLIFY(MetaVertex%edges) - - allocate(MetaVertex%edges(nedges)) + + allocate(MetaVertex%edges(nedges), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate MetaVertex%edges(nedges) failed with stat: '//& + to_str(iret)) + end if ! Initalize the Meta Vertices to zero... probably should be done ! in a separate routine - MetaVertex%nmembers=0 + MetaVertex%nmembers=0 MetaVertex%number=0 MetaVertex%nedges=0 if(Debug) write(iulog,*)'initMetagraph: point #2' @@ -235,7 +269,12 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) if(Debug) write(iulog,*)'initMetagraph: point #4 ' ! Allocate space for the members of the MetaVertices if(Debug) write(iulog,*)'initMetagraph: point #4.1 i,MetaVertex%nmembers',i,MetaVertex%nmembers - allocate(MetaVertex%members(MetaVertex%nmembers)) + + allocate(MetaVertex%members(MetaVertex%nmembers), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate MetaVertex%members(MetaVertex%nmembers) failed with stat: '//& + to_str(iret)) + end if do j=1, MetaVertex%nmembers call allocate_gridvertex_nbrs(MetaVertex%members(j)) @@ -246,7 +285,7 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) ! Set the identity of the members of the MetaVertices ic=1 do j=1,nelem - if( GridVertex(j)%processor_number .eq. ThisProcessorNumber) then + if( GridVertex(j)%processor_number .eq. ThisProcessorNumber) then MetaVertex%members(ic) = GridVertex(j) ic=ic+1 endif @@ -273,7 +312,7 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) head_processor_number = GridEdge(i)%head%processor_number tail_processor_number = GridEdge(i)%tail%processor_number call LLFindEdge(mEdgeList,tail_processor_number,head_processor_number,j,found) - if(found) then + if(found) then ! Increment the number of grid edges contained in the grid edge ! and setup the pointers @@ -317,10 +356,30 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) do i=1,nedges ! Allocate space for the member edges and edge index - allocate(MetaVertex%edges(i)%members (MetaVertex%edges(i)%nmembers)) - allocate(MetaVertex%edges(i)%edgeptrP(MetaVertex%edges(i)%nmembers)) - allocate(MetaVertex%edges(i)%edgeptrS(MetaVertex%edges(i)%nmembers)) - allocate(MetaVertex%edges(i)%edgeptrP_ghost(MetaVertex%edges(i)%nmembers)) + allocate(MetaVertex%edges(i)%members (MetaVertex%edges(i)%nmembers), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate MetaVertex%edges(i)%members(MetaVertex%edges(i)%nmembers)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(MetaVertex%edges(i)%edgeptrP(MetaVertex%edges(i)%nmembers), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate MetaVertex%edges(i)%edgeptrP(MetaVertex%edges(i)%nmembers)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(MetaVertex%edges(i)%edgeptrS(MetaVertex%edges(i)%nmembers), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate MetaVertex%edges(i)%edgeptrS(MetaVertex%edges(i)%nmembers)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(MetaVertex%edges(i)%edgeptrP_ghost(MetaVertex%edges(i)%nmembers), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate MetaVertex%edges(i)%edgeptrP_ghost(MetaVertex%edges(i)%nmembers)'//& + ' failed with stat: '//to_str(iret)) + end if + MetaVertex%edges(i)%edgeptrP(:)=0 MetaVertex%edges(i)%edgeptrS(:)=0 MetaVertex%edges(i)%edgeptrP_ghost(:)=0 @@ -328,13 +387,17 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) if(Debug) write(iulog,*)'initMetagraph: point #14' ! Insert the edges into the proper meta edges - allocate(icount(nelem_edge)) + allocate(icount(nelem_edge), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate icount(nelem_edge) failed with stat: '//to_str(iret)) + end if + icount=1 do i=1,nelem_edge head_processor_number = GridEdge(i)%head%processor_number tail_processor_number = GridEdge(i)%tail%processor_number call LLFindEdge(mEdgeList,tail_processor_number,head_processor_number,j,found) - if(found) then + if(found) then MetaVertex%edges(j)%members(icount(j)) = GridEdge(i) if(icount(j)+1 .le. MetaVertex%edges(j)%nmembers) then @@ -342,7 +405,7 @@ subroutine initMetaGraph(ThisProcessorNumber,MetaVertex,GridVertex,GridEdge) wgtP=Gridedge(i)%tail%nbrs_wgt(ii) MetaVertex%edges(j)%edgeptrP(icount(j)+1) = MetaVertex%edges(j)%edgeptrP(icount(j)) + wgtP - + wgtS = 1 MetaVertex%edges(j)%edgeptrS(icount(j)+1) = MetaVertex%edges(j)%edgeptrS(icount(j)) + wgtS diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 index 2812905d..306e8486 100644 --- a/src/dynamics/se/dycore/prim_advance_mod.F90 +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -3,6 +3,7 @@ module prim_advance_mod use edgetype_mod, only: EdgeBuffer_t use perf_mod, only: t_startf, t_stopf, t_adj_detailf !, t_barrierf _EXTERNAL use cam_abortutils, only: endrun + use string_utils, only: to_str use parallel_mod, only: parallel_t, HME_BNDRY_P2P!,HME_BNDRY_A2A use thread_mod , only: horz_num_threads, vert_num_threads, omp_set_nested @@ -26,12 +27,20 @@ subroutine prim_advance_init(par, elem) type (parallel_t) :: par type (element_t), target, intent(inout) :: elem(:) integer :: i + integer :: iret + + character(len=*), parameter :: subname = 'prim_advance_init (SE)' call initEdgeBuffer(par,edge3 ,elem,4*nlev ,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) call initEdgeBuffer(par,edgeSponge,elem,4*ksponge_end,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) call initEdgeBuffer(par,edgeOmega ,elem,nlev ,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) - if(.not. allocated(ur_weights)) allocate(ur_weights(qsplit)) + if(.not. allocated(ur_weights)) then + allocate(ur_weights(qsplit), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate ur_weights(qsplit) failed with stat: '//to_str(iret)) + end if + end if ur_weights(:)=0.0_r8 if(mod(qsplit,2).NE.0)then @@ -287,14 +296,22 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu ! local integer :: i,j,k,ie,q + integer :: iret real (kind=r8) :: v1,dt_local, dt_local_tracer,tmp real (kind=r8) :: dt_local_tracer_fvm real (kind=r8) :: ftmp(np,np,nlev,qsize,nets:nete) !diagnostics real (kind=r8) :: pdel(np,np,nlev) real (kind=r8), allocatable :: ftmp_fvm(:,:,:,:,:) !diagnostics + character(len=*), parameter :: subname = 'applyCAMforcing (SE)' - if (ntrac>0) allocate(ftmp_fvm(nc,nc,nlev,ntrac,nets:nete)) + if (ntrac>0) then + allocate(ftmp_fvm(nc,nc,nlev,ntrac,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate ftmp_fvm(nc,nc,nlev,ntrac,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + end if if (ftype==0) then ! diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90 index 41e15744..eb491ab3 100644 --- a/src/dynamics/se/dycore/prim_advection_mod.F90 +++ b/src/dynamics/se/dycore/prim_advection_mod.F90 @@ -63,8 +63,9 @@ module prim_advection_mod subroutine Prim_Advec_Init1(par, elem) - use dimensions_mod, only : nlev, qsize, nelemd,ntrac - use parallel_mod, only : parallel_t, boundaryCommMethod + use dimensions_mod, only: nlev, qsize, nelemd,ntrac + use parallel_mod, only: parallel_t, boundaryCommMethod + use string_utils, only: to_str type(parallel_t) :: par type (element_t) :: elem(:) ! @@ -74,8 +75,10 @@ subroutine Prim_Advec_Init1(par, elem) ! threads. But in this case we want shared pointers. real(kind=r8), pointer :: buf_ptr(:) => null() real(kind=r8), pointer :: receive_ptr(:) => null() - integer :: advec_remap_num_threads + integer :: advec_remap_num_threads + integer :: iret + character(len=*), parameter :: subname = 'Prim_Advec_Init1 (SE)' ! ! Set the number of threads used in the subroutine Prim_Advec_tracers_remap() @@ -109,8 +112,15 @@ subroutine Prim_Advec_Init1(par, elem) ! this static array is shared by all threads, so dimension for all threads (nelemd), not nets:nete: - allocate (qmin(nlev,qsize,nelemd)) - allocate (qmax(nlev,qsize,nelemd)) + allocate(qmin(nlev,qsize,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate qmin(nlev,qsize,nelemd) failed with stat: '//to_str(iret)) + end if + + allocate(qmax(nlev,qsize,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate qmax(nlev,qsize,nelemd) failed with stat: '//to_str(iret)) + end if end subroutine Prim_Advec_Init1 @@ -133,7 +143,7 @@ end subroutine Prim_Advec_Init2 subroutine Prim_Advec_Tracers_fvm(elem,fvm,hvcoord,hybrid,& dt,tl,nets,nete,ghostbufQnhc,ghostBufQ1, ghostBufFlux,kmin,kmax) use fvm_consistent_se_cslam, only: run_consistent_se_cslam - use edgetype_mod, only: edgebuffer_t + use edgetype_mod, only: edgebuffer_t implicit none type (element_t), intent(inout) :: elem(:) type (fvm_struct), intent(inout) :: fvm(:) diff --git a/src/dynamics/se/dycore/prim_driver_mod.F90 b/src/dynamics/se/dycore/prim_driver_mod.F90 index 0e47b8a2..db14697f 100644 --- a/src/dynamics/se/dycore/prim_driver_mod.F90 +++ b/src/dynamics/se/dycore/prim_driver_mod.F90 @@ -9,7 +9,7 @@ module prim_driver_mod use derivative_mod, only: derivative_t use fvm_control_volume_mod, only: fvm_struct - use element_mod, only: element_t, timelevels, allocate_element_desc + use element_mod, only: element_t, timelevels use thread_mod , only: horz_num_threads, vert_num_threads, tracer_num_threads use thread_mod , only: omp_set_nested use perf_mod, only: t_startf, t_stopf @@ -59,7 +59,7 @@ subroutine prim_init2(elem, fvm, hybrid, nets, nete, tl, hvcoord) ! variables used to calculate CFL real (kind=r8) :: dtnu ! timestep*viscosity parameter real (kind=r8) :: dt_dyn_vis ! viscosity timestep used in dynamics - real (kind=r8) :: dt_dyn_del2_sponge, dt_remap + real (kind=r8) :: dt_dyn_del2_sponge, dt_remap real (kind=r8) :: dt_tracer_vis ! viscosity timestep used in tracers real (kind=r8) :: dp @@ -659,11 +659,12 @@ subroutine prim_set_dry_mass(elem, hvcoord,initial_global_ave_dry_ps,q) end subroutine prim_set_dry_mass subroutine get_global_ave_surface_pressure(elem, global_ave_ps_inic) - use element_mod , only : element_t - use dimensions_mod , only : np - use global_norms_mod , only : global_integral - use hybrid_mod , only : config_thread_region, get_loop_ranges, hybrid_t - use parallel_mod , only : par + use element_mod , only: element_t + use dimensions_mod , only: np + use global_norms_mod , only: global_integral + use hybrid_mod , only: config_thread_region, get_loop_ranges, hybrid_t + use parallel_mod , only: par + use string_utils , only: to_str type (element_t) , intent(in) :: elem(:) real (kind=r8), intent(out) :: global_ave_ps_inic @@ -672,12 +673,18 @@ subroutine get_global_ave_surface_pressure(elem, global_ave_ps_inic) real (kind=r8), allocatable :: tmp(:,:,:) type (hybrid_t) :: hybrid integer :: ie, nets, nete + integer :: iret + + character(len=*), parameter :: subname = 'get_global_ave_surface_pressure (SE)' !JMD $OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,n) !JMD hybrid = config_thread_region(par,'horizontal') hybrid = config_thread_region(par,'serial') call get_loop_ranges(hybrid,ibeg=nets,iend=nete) - allocate(tmp(np,np,nets:nete)) + allocate(tmp(np,np,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate tmp(np,np,nets:nete) failed with stat: '//to_str(iret)) + end if do ie=nets,nete tmp(:,:,ie)=elem(ie)%state%psdry(:,:) diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index ed015f66..d3b6a980 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -1,6 +1,6 @@ module prim_init - use shr_kind_mod, only: r8=>shr_kind_r8 + use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cs use dimensions_mod, only: nc use reduction_mod, only: reductionbuffer_ordered_1d_t use quadrature_mod, only: quadrature_t, gausslobatto @@ -46,6 +46,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use schedule_mod, only: genEdgeSched use prim_advection_mod, only: prim_advec_init1 use cam_abortutils, only: endrun + use string_utils, only: to_str use parallel_mod, only: parallel_t, syncmp, global_shared_buf, nrepro_vars use spacecurve_mod, only: genspacepart use dof_mod, only: global_dof, CreateUniqueIndex, SetElemOffset @@ -75,12 +76,12 @@ subroutine prim_init1(elem, fvm, par, Tl) real(r8), allocatable :: aratio(:,:) real(r8) :: area(1), xtmp - character(len=80) :: rot_type ! cube edge rotation type + character(len=shr_kind_cs) :: rot_type ! cube edge rotation type integer :: i character(len=128) :: errmsg - character(len=*), parameter :: subname = 'PRIM_INIT1: ' + character(len=*), parameter :: subname = 'PRIM_INIT1 (SE): ' ! ==================================== ! Set cube edge rotation type for model @@ -108,8 +109,15 @@ subroutine prim_init1(elem, fvm, par, Tl) nelem_edge = CubeEdgeCount() end if - allocate(GridVertex(nelem)) - allocate(GridEdge(nelem_edge)) + allocate(GridVertex(nelem), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate GridVertex(nelem) failed with stat: '//to_str(ierr)) + end if + + allocate(GridEdge(nelem_edge), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate GridEdge(nelem_edge) failed with stat: '//to_str(ierr)) + end if do j = 1, nelem call allocate_gridvertex_nbrs(GridVertex(j)) @@ -145,8 +153,15 @@ subroutine prim_init1(elem, fvm, par, Tl) ! =========================================================== ! given partition, count number of local element descriptors ! =========================================================== - allocate(MetaVertex(1)) - allocate(Schedule(1)) + allocate(MetaVertex(1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate MetaVertex(1) failed with stat: '//to_str(ierr)) + end if + + allocate(Schedule(1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Schedule(1) failed with stat: '//to_str(ierr)) + end if nelem_edge = SIZE(GridEdge) @@ -166,19 +181,31 @@ subroutine prim_init1(elem, fvm, par, Tl) call mpi_allreduce(nelemd, nelemdmax, 1, MPI_INTEGER, MPI_MAX, par%comm, ierr) if (nelemd > 0) then - allocate(elem(nelemd)) + allocate(elem(nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate elem(nelemd) failed with stat: '//to_str(ierr)) + end if + call allocate_element_dims(elem) call allocate_element_desc(elem) end if if (fv_nphys > 0) then - allocate(fvm(nelemd)) + allocate(fvm(nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate fvm(nelemd) failed with stat: '//to_str(ierr)) + end if + call allocate_fvm_dims(fvm) call allocate_physgrid_vars(fvm,par) else ! Even if fvm not needed, still desirable to allocate it as empty ! so it can be passed as a (size zero) array rather than pointer. - allocate(fvm(0)) + allocate(fvm(0), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate fvm(0) failed with stat: '//to_str(ierr)) + end if + end if ! ==================================================== @@ -187,7 +214,12 @@ subroutine prim_init1(elem, fvm, par, Tl) call genEdgeSched(par, elem, par%rank+1, Schedule(1), MetaVertex(1)) - allocate(global_shared_buf(nelemd, nrepro_vars)) + allocate(global_shared_buf(nelemd, nrepro_vars), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate global_shared_buf(nelemd, nrepro_vars)'//& + 'failed with stat: '//to_str(ierr)) + end if + global_shared_buf = 0.0_r8 call syncmp(par) @@ -253,7 +285,11 @@ subroutine prim_init1(elem, fvm, par, Tl) call shr_sys_flush(iulog) end if call mass_matrix(par, elem) - allocate(aratio(nelemd,1)) + allocate(aratio(nelemd,1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate aratio(nelemd,1) failed with stat: '//to_str(ierr)) + end if + if (topology == "cube") then area = 0 @@ -308,7 +344,7 @@ subroutine prim_init1(elem, fvm, par, Tl) elem(ie)%derived%FM=0.0_r8 elem(ie)%derived%FQ=0.0_r8 elem(ie)%derived%FT=0.0_r8 - elem(ie)%derived%FDP=0.0_r8 + elem(ie)%derived%FDP=0.0_r8 elem(ie)%derived%pecnd=0.0_r8 elem(ie)%derived%Omega=0 diff --git a/src/dynamics/se/dycore/quadrature_mod.F90 b/src/dynamics/se/dycore/quadrature_mod.F90 index ca6ba835..16497400 100644 --- a/src/dynamics/se/dycore/quadrature_mod.F90 +++ b/src/dynamics/se/dycore/quadrature_mod.F90 @@ -1,6 +1,8 @@ #undef _GAUSS_TABLE module quadrature_mod use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_abortutils, only: endrun + use string_utils, only: to_str implicit none private @@ -44,8 +46,22 @@ function gauss(npts) result(gs) integer, intent(in) :: npts type (quadrature_t) :: gs - allocate(gs%points(npts)) - allocate(gs%weights(npts)) + ! Local variables: + integer :: iret + + character(len=*), parameter :: subname = 'gauss (SE)' + + allocate(gs%points(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate gs%points(npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(gs%weights(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate gs%weights(npts) failed with stat: '//& + to_str(iret)) + end if gs%points=gauss_pts(npts) gs%weights=gauss_wts(npts,gs%points) @@ -276,8 +292,22 @@ function gausslobatto(npts) result(gll) integer, intent(in) :: npts type (quadrature_t) :: gll - allocate(gll%points(npts)) - allocate(gll%weights(npts)) + ! Local variables: + integer :: iret + + character(len=*), parameter :: subname = 'gausslobatto (SE)' + + allocate(gll%points(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate gll%points(npts) failed with stat: '//& + to_str(iret)) + end if + + allocate(gll%weights(npts), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate gll%weights(npts) failed with stat: '//& + to_str(iret)) + end if gll%points=gausslobatto_pts(npts) gll%weights=gausslobatto_wts(npts,gll%points) @@ -287,7 +317,7 @@ end function gausslobatto ! ============================================================== ! gausslobatto_pts: ! - ! Compute the Gauss-Lobatto Collocation points + ! Compute the Gauss-Lobatto Collocation points ! for Jacobi Polynomials ! ! ============================================================== diff --git a/src/dynamics/se/dycore/reduction_mod.F90 b/src/dynamics/se/dycore/reduction_mod.F90 index b5cbdb13..5a777964 100644 --- a/src/dynamics/se/dycore/reduction_mod.F90 +++ b/src/dynamics/se/dycore/reduction_mod.F90 @@ -3,6 +3,7 @@ module reduction_mod use mpi, only: mpi_sum, mpi_min, mpi_max, mpi_real8, mpi_integer use mpi, only: mpi_success use cam_abortutils, only: endrun + use string_utils, only: to_str implicit none private @@ -184,8 +185,13 @@ subroutine InitReductionBuffer_int_1d(red,len) integer, intent(in) :: len type (ReductionBuffer_int_1d_t),intent(out) :: red + ! Local variables: + integer :: iret + + character(len=*), parameter :: subname = 'InitReductionBuffer_int_1d (SE)' + if (omp_get_num_threads()>1) then - call endrun("Error: attempt to allocate reduction buffer in threaded region") + call endrun(subname//": Error: attempt to allocate reduction buffer in threaded region") endif ! if buffer is already allocated and large enough, do nothing @@ -193,7 +199,13 @@ subroutine InitReductionBuffer_int_1d(red,len) !buffer is too small, or has not yet been allocated if (red%len>0) deallocate(red%buf) red%len = len - allocate(red%buf(len)) + + allocate(red%buf(len), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate red%buf(len) failed with stat: '//& + to_str(iret)) + end if + red%buf = 0 red%ctr = 0 endif @@ -205,14 +217,25 @@ subroutine InitReductionBuffer_r_1d(red,len) integer, intent(in) :: len type (ReductionBuffer_r_1d_t),intent(out) :: red + ! Local variables: + integer :: iret + + character(len=*), parameter :: subname = 'InitReductionBuffer_r_1d (SE)' + if (omp_get_num_threads()>1) then - call endrun("Error: attempt to allocate reduction buffer in threaded region") + call endrun(subname//": Error: attempt to allocate reduction buffer in threaded region") endif if (len > red%len) then if (red%len>0) deallocate(red%buf) red%len = len - allocate(red%buf(len)) + + allocate(red%buf(len), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate red%buf(len) failed with stat: '//& + to_str(iret)) + end if + red%buf = 0.0_R8 red%ctr = 0 endif @@ -224,14 +247,25 @@ subroutine InitReductionBuffer_ordered_1d(red,len,nthread) integer, intent(in) :: nthread type (ReductionBuffer_ordered_1d_t),intent(out) :: red + ! Local variables: + integer :: iret + + character(len=*), parameter :: subname = 'InitReductionBuffer_ordered_1d (SE)' + if (omp_get_num_threads()>1) then - call endrun("Error: attempt to allocate reduction buffer in threaded region") + call endrun(subname//": Error: attempt to allocate reduction buffer in threaded region") endif if (len > red%len) then if (red%len>0) deallocate(red%buf) red%len = len - allocate(red%buf(len,nthread+1)) + + allocate(red%buf(len,nthread+1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate red%buf(len,nthread+1) failed with stat: '//& + to_str(iret)) + end if + red%buf = 0.0_R8 red%ctr = 0 endif diff --git a/src/dynamics/se/dycore/schedtype_mod.F90 b/src/dynamics/se/dycore/schedtype_mod.F90 index a4efb146..115ee3fc 100644 --- a/src/dynamics/se/dycore/schedtype_mod.F90 +++ b/src/dynamics/se/dycore/schedtype_mod.F90 @@ -39,7 +39,7 @@ module schedtype_mod integer,pointer :: Local2Global(:) integer,pointer :: destFull(:) integer,pointer :: srcFull(:) - type (Cycle_t), pointer :: Cycle(:) + type (Cycle_t), pointer :: Cycle(:) type (Cycle_t), pointer :: SendCycle(:) type (Cycle_t), pointer :: RecvCycle(:) type (Cycle_t), pointer :: MoveCycle(:) diff --git a/src/dynamics/se/dycore/schedule_mod.F90 b/src/dynamics/se/dycore/schedule_mod.F90 index f952c821..fa1c6628 100644 --- a/src/dynamics/se/dycore/schedule_mod.F90 +++ b/src/dynamics/se/dycore/schedule_mod.F90 @@ -40,6 +40,8 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) use dimensions_mod, only: nelem, max_neigh_edges use gridgraph_mod, only: gridvertex_t, gridedge_t, assignment ( = ) use cam_abortutils, only: endrun + use string_utils, only: to_str + use shr_kind_mod, only: shr_kind_cs use parallel_mod, only: nComPoints, rrequest, srequest, status, npackpoints type(parallel_t), intent(inout) :: par @@ -63,9 +65,9 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) integer :: iSched logical, parameter :: VerbosePrint=.FALSE. logical, parameter :: Debug=.FALSE. - character(len=*), parameter :: subname = 'genEdgeSched' + character(len=*), parameter :: subname = 'genEdgeSched (SE)' integer :: errorcode,errorlen - character*(80) :: errorstring + character(len=shr_kind_cs) :: errorstring integer, allocatable :: intracommranks(:) integer :: numIntra, numInter, rank logical :: OnNode @@ -77,8 +79,8 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) integer :: icIntra, icInter integer, allocatable :: srcFull(:), destFull(:), srcweightFull(:), destweightFull(:) - integer, allocatable :: srcInter(:),destInter(:), srcweightInter(:),destweightInter(:) - integer, allocatable :: srcIntra(:),destIntra(:), srcweightIntra(:),destweightIntra(:) + integer, allocatable :: srcInter(:),destInter(:), srcweightInter(:),destweightInter(:) + integer, allocatable :: srcIntra(:),destIntra(:), srcweightIntra(:),destweightIntra(:) logical :: reorder integer :: sizeGroup, groupFull @@ -94,7 +96,12 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) ! It looks like this is only used in this routine... ! so no need to put it in the schedule data-structure ! ===================================================== - allocate(Global2Local(nelem)) + allocate(Global2Local(nelem), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Global2Local(nelem) failed with stat: '//& + to_str(ierr)) + end if + if(Debug) write(iulog,*)'genEdgeSched: point #1' iSched = PartNumber @@ -120,15 +127,42 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) if(Debug) write(iulog,*)'genEdgeSched: point #5' ! Temporary array to calculate the Buffer Slot - allocate(tmpP(2,nedges+1)) - allocate(tmpS(2,nedges+1)) - allocate(tmpP_ghost(2,nedges+1)) - + allocate(tmpP(2,nedges+1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate tmpP(2,nedges+1) failed with stat: '//& + to_str(ierr)) + end if + + allocate(tmpS(2,nedges+1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate tmpS(2,nedges+1) failed with stat: '//& + to_str(ierr)) + end if + + allocate(tmpP_ghost(2,nedges+1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate tmpP_ghost(2,nedges+1) failed with stat: '//& + to_str(ierr)) + end if ! Allocate all the cycle structures - allocate(LSchedule%SendCycle(nedges)) - allocate(LSchedule%RecvCycle(nedges)) - allocate(LSchedule%MoveCycle(1)) + allocate(LSchedule%SendCycle(nedges), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%SendCycle(nedges) failed with stat: '//& + to_str(ierr)) + end if + + allocate(LSchedule%RecvCycle(nedges), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%RecvCycle(nedges) failed with stat: '//& + to_str(ierr)) + end if + + allocate(LSchedule%MoveCycle(1), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%MoveCycle(1) failed with stat: '//& + to_str(ierr)) + end if ! Initialize the schedules... LSchedule%MoveCycle(1)%ptrP = 0 @@ -139,9 +173,23 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) !================================================================== ! Allocate and initalized the index translation arrays Global2Local = -1 - allocate(LSchedule%Local2Global(nelemd0)) - allocate(LSchedule%pIndx(max_neigh_edges*nelemd0)) - allocate(LSchedule%gIndx(max_neigh_edges*nelemd0)) + allocate(LSchedule%Local2Global(nelemd0), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%Local2Global(nelemd0) failed with stat: '//& + to_str(ierr)) + end if + + allocate(LSchedule%pIndx(max_neigh_edges*nelemd0), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%pIndx(max_neigh_edges*nelemd0) failed with stat: '//& + to_str(ierr)) + end if + + allocate(LSchedule%gIndx(max_neigh_edges*nelemd0), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%gIndx(max_neigh_edges*nelemd0) failed with stat: '//& + to_str(ierr)) + end if LSchedule%pIndx(:)%elemId = -1 LSchedule%pIndx(:)%edgeId = -1 @@ -306,9 +354,23 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) nSend = nedges nRecv = nedges - allocate(Rrequest(nRecv)) - allocate(Srequest(nSend)) - allocate(status(MPI_STATUS_SIZE,nRecv)) + allocate(Rrequest(nRecv), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Rrequest(nRecv) failed with stat: '//& + to_str(ierr)) + end if + + allocate(Srequest(nSend), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate Srequest(nSend) failed with stat: '//& + to_str(ierr)) + end if + + allocate(status(MPI_STATUS_SIZE,nRecv), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate status(MPI_STATUS_SIZE,nRecv) failed with stat: '//& + to_str(ierr)) + end if !=============================================================== ! Number of communication points ... to be used later to @@ -321,11 +383,16 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) #if MPI_VERSION >= 3 ! Create a communicator that only contains the on-node MPI ranks call MPI_Comm_split_type(par%comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, par%intracomm, ierr) - + call MPI_Comm_size(par%intracomm, par%intracommsize, ierr) call MPI_Comm_rank(par%intracomm, par%intracommrank, ierr) - allocate(intracommranks(par%intracommsize)) + allocate(intracommranks(par%intracommsize), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate intracommranks(par%intracommsize) failed with stat: '//& + to_str(ierr)) + end if + call MPI_Allgather(par%rank,1,MPIinteger_t,intracommranks,1,MPIinteger_t,par%intracomm,ierr) numIntra=0 @@ -333,7 +400,7 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) rank = LSchedule%SendCycle(icycle)%dest - 1 onNode = isIntraComm(intracommranks,rank) LSchedule%SendCycle(icycle)%onNode = onNode - if(onNode) then + if(onNode) then numIntra=numIntra+1 endif enddo @@ -342,9 +409,9 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) onNode = isIntraComm(intracommranks,rank) LSchedule%RecvCycle(icycle)%onNode = onNode enddo - numInter = nsend-numIntra + numInter = nsend-numIntra + - deallocate(intracommranks) #else numIntra = 0 @@ -360,12 +427,68 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) LSchedule%nInter = numInter LSchedule%nIntra = numIntra - allocate(srcFull(nRecv), srcWeightFull(nRecv),destFull(nSend),destWeightFull(nSend)) - if(numInter>0) then - allocate(srcInter(numInter),srcWeightInter(numInter),destInter(numInter), destWeightInter(numInter)) + allocate(srcFull(nRecv), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate srcFull(nRecv) failed with stat: '//to_str(ierr)) + end if + + allocate(srcWeightFull(nRecv), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate srcWeightFull(nRecv) failed with stat: '//to_str(ierr)) + end if + + allocate(destFull(nSend), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate destFull(nSend) failed with stat: '//to_str(ierr)) + end if + + allocate(destWeightFull(nSend), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate destWeightFull(nSend) failed with stat: '//to_str(ierr)) + end if + + if(numInter>0) then + allocate(srcInter(numInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate srcInter(numInter) failed with stat: '//to_str(ierr)) + end if + + allocate(srcWeightInter(numInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate srcWeightInter(numInter) failed with stat: '//to_str(ierr)) + end if + + allocate(destInter(numInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate destInter(numInter) failed with stat: '//to_str(ierr)) + end if + + allocate(destWeightInter(numInter), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate destWeightInter(numInter) failed with stat: '//to_str(ierr)) + end if endif - if(numIntra>0) then - allocate(srcIntra(numIntra),srcWeightIntra(numIntra),destIntra(numIntra), destWeightIntra(numIntra)) + + if(numIntra>0) then + allocate(srcIntra(numIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate srcIntra(numIntra) failed with stat: '//to_str(ierr)) + end if + + allocate(srcWeightIntra(numIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate srcWeightIntra(numIntra) failed with stat: '//to_str(ierr)) + end if + + allocate(destIntra(numIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate destIntra(numIntra) failed with stat: '//to_str(ierr)) + end if + + allocate(destWeightIntra(numIntra), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate destWeightIntra(numIntra) failed with stat: '//to_str(ierr)) + end if endif icIntra=0 @@ -375,13 +498,13 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) wgt = LSchedule%SendCycle(icycle)%lengthP destFull(icycle) = dest destWeightFull(icycle) = wgt - if(LSchedule%SendCycle(icycle)%onNode) then + if(LSchedule%SendCycle(icycle)%onNode) then icIntra=icIntra+1 - destIntra(icIntra) = dest + destIntra(icIntra) = dest destWeightIntra(icIntra) = wgt else icInter=icInter+1 - destInter(icInter) = dest + destInter(icInter) = dest destWeightInter(icInter) = wgt endif enddo @@ -390,7 +513,7 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) icInter=0 do icycle=1,nRecv src = LSchedule%RecvCycle(icycle)%source - 1 - wgt = LSchedule%RecvCycle(icycle)%lengthP + wgt = LSchedule%RecvCycle(icycle)%lengthP srcFull(icycle) = src srcWeightFUll(icycle) = wgt if(LSchedule%RecvCycle(icycle)%onNode) then @@ -404,7 +527,7 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) endif enddo - ! construct the FULL communication graph + ! construct the FULL communication graph reorder=.FALSE. call MPI_Dist_graph_create_adjacent(par%comm, nRecv,srcFull,srcWeightFull, & nSend,destFull,destWeightFull,MPI_INFO_NULL,reorder,par%commGraphFull,ierr) @@ -413,7 +536,14 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) call MPI_Error_String(errorcode,errorstring,errorlen,ierr) print *,subname,': Error after call to MPI_dist_graph_create_adjacent(FULL) ',errorstring endif - allocate(LSchedule%destFull(nSend),LSchedule%srcFull(nRecv)) + allocate(LSchedule%destFull(nSend), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%destFull(nSend) failed with stat: '//to_str(ierr)) + end if + allocate(LSchedule%srcFull(nRecv), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate LSchedule%srcFull(nRecv) failed with stat: '//to_str(ierr)) + end if LSchedule%destFull(:) = destFull(:) LSchedule%srcFull(:) = srcFull(:) ! construct the FULL communication -group- (for one-sided operations): diff --git a/src/dynamics/se/dycore/spacecurve_mod.F90 b/src/dynamics/se/dycore/spacecurve_mod.F90 index c7631121..6c5955a3 100644 --- a/src/dynamics/se/dycore/spacecurve_mod.F90 +++ b/src/dynamics/se/dycore/spacecurve_mod.F90 @@ -1,5 +1,7 @@ module spacecurve_mod - use cam_logfile, only: iulog + use cam_logfile, only: iulog + use cam_abortutils, only: endrun + use string_utils, only: to_str implicit none private @@ -902,14 +904,21 @@ function Factor(num) result(res) type (factor_t) :: res integer :: tmp,tmp2,tmp3,tmp5 integer :: i,n + integer :: iret logical :: found + character(len=*), parameter :: subname = 'Factor (SE)' + ! -------------------------------------- ! Allocate for max # of factors ! -------------------------------------- tmp = num tmp2 = log2(num) - allocate(res%factors(tmp2)) + allocate(res%factors(tmp2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate res%factors(tmp2)'//& + ' failed with stat: '//to_str(iret)) + end if n=0 !----------------------- @@ -971,7 +980,6 @@ end function Factor !--------------------------------------------------------- function IsFactorable(n) - use cam_abortutils, only: endrun integer,intent(in) :: n type (factor_t) :: fact @@ -1016,6 +1024,9 @@ subroutine GenSpaceCurve(Mesh) integer :: level,dim integer :: gridsize + integer :: iret + + character(len=*), parameter :: subname = 'GenSpaceCurve (SE)' ! Setup the size of the grid to traverse @@ -1025,10 +1036,18 @@ subroutine GenSpaceCurve(Mesh) level = fact%numfact if(verbose) write(iulog,*)'GenSpacecurve: level is ',level - allocate(ordered(gridsize,gridsize)) + allocate(ordered(gridsize,gridsize), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate ordered(gridsize,gridsize)'//& + ' failed with stat: '//to_str(iret)) + end if ! Setup the working arrays for the traversal - allocate(pos(0:dim-1)) + allocate(pos(0:dim-1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate pos(0:dim-1)'//& + ' failed with stat: '//to_str(iret)) + end if ! The array ordered will contain the visitation order ordered(:,:) = 0 diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 2d298f15..de708a3e 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -36,6 +36,7 @@ module dyn_comp use cam_logfile, only: iulog use cam_abortutils, only: endrun use cam_map_utils, only: iMap +use string_utils, only: to_str use shr_sys_mod, only: shr_sys_flush use parallel_mod, only: par @@ -611,6 +612,7 @@ subroutine dyn_init(dyn_in, dyn_out) integer :: ixcldice, ixcldliq, ixrain, ixsnow, ixgraupel integer :: m_cnst, m + integer :: iret ! variables for initializing energy and axial angular momentum diagnostics character (len = 3), dimension(12) :: stage = (/"dED","dAF","dBD","dAD","dAR","dBF","dBH","dCH","dAH",'dBS','dAS','p2d'/) @@ -658,13 +660,28 @@ subroutine dyn_init(dyn_in, dyn_out) !---------------------------------------------------------------------------- ! Now allocate and set condenstate vars - allocate(cnst_name_gll(qsize)) ! constituent names for gll tracers - allocate(cnst_longname_gll(qsize)) ! long name of constituents for gll tracers + allocate(cnst_name_gll(qsize), stat=iret) ! constituent names for gll tracers + if (iret /= 0) then + call endrun(subname//': allocate cnst_name_gll(qsize) failed with stat: '//to_str(iret)) + end if + + allocate(cnst_longname_gll(qsize), stat=iret) ! long name of constituents for gll tracers + if (iret /= 0) then + call endrun(subname//': allocate cnst_longname_gll(qsize) failed with stat: '//to_str(iret)) + end if + + allocate(kord_tr(qsize), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate kord_tr(qsize) failed with stat: '//to_str(iret)) + end if - allocate(kord_tr(qsize)) kord_tr(:) = vert_remap_tracer_alg if (ntrac>0) then - allocate(kord_tr_cslam(ntrac)) + allocate(kord_tr_cslam(ntrac), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate kord_tr_cslam(ntrac) failed with stat: '//to_str(iret)) + end if + kord_tr_cslam(:) = vert_remap_tracer_alg end if @@ -990,6 +1007,7 @@ subroutine dyn_run(dyn_state) integer :: nets, nete, ithr integer :: i, ie, j, k, m, nq, m_cnst integer :: n0_qdp, nsplit_local + integer :: iret logical :: ldiag real(r8) :: ftmp(npsq,nlev,3) @@ -999,6 +1017,9 @@ subroutine dyn_run(dyn_state) real(r8), allocatable, dimension(:,:,:) :: ps_before real(r8), allocatable, dimension(:,:,:) :: abs_ps_tend real (kind=r8) :: omega_cn(2,nelemd) !min and max of vertical Courant number + + character(len=*), parameter :: subname = 'dyn_run' + !---------------------------------------------------------------------------- #ifdef debug_coupling @@ -1014,8 +1035,15 @@ subroutine dyn_run(dyn_state) ! ldiag = hist_fld_active('ABS_dPSdt') ldiag = .false. if (ldiag) then - allocate(ps_before(np,np,nelemd)) - allocate(abs_ps_tend(np,np,nelemd)) + allocate(ps_before(np,np,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate ps_before(np,np,nelemd) failed with stat: '//to_str(iret)) + end if + + allocate(abs_ps_tend(np,np,nelemd), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate abs_ps_tend(np,np,nelemd) failed with stat: '//to_str(iret)) + end if end if @@ -1248,20 +1276,37 @@ subroutine read_inidat(dyn_in) nullify(elem) end if - allocate(qtmp(np,np,nlev,nelemd,pcnst)) + allocate(qtmp(np,np,nlev,nelemd,pcnst), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate qtmp(np,np,nlev,nelemd,pcnst) failed with stat: '//& + to_str(ierr)) + end if + qtmp = 0._r8 ! Set mask to indicate which columns are active nullify(ldof) call cam_grid_get_gcid(cam_grid_id(ini_grid_name), ldof) - allocate(pmask(npsq*nelemd)) + allocate(pmask(npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pmask(npsq*nelemd) failed with stat: '//to_str(ierr)) + end if + pmask(:) = (ldof /= 0) ! lat/lon needed in radians latvals_deg => cam_grid_get_latvals(cam_grid_id(ini_grid_name)) lonvals_deg => cam_grid_get_lonvals(cam_grid_id(ini_grid_name)) - allocate(latvals(np*np*nelemd)) - allocate(lonvals(np*np*nelemd)) + allocate(latvals(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate latvals(np*np*nelemd) failed with stat: '//to_str(ierr)) + end if + + allocate(lonvals(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lonvals(np*np*nelemd) failed with stat: '//to_str(ierr)) + end if + latvals(:) = latvals_deg(:)*deg2rad lonvals(:) = lonvals_deg(:)*deg2rad @@ -1279,7 +1324,11 @@ subroutine read_inidat(dyn_in) ! PHIS has already been set by set_phis. Get local copy for ! possible use in setting T and PS in the analytic IC code. - allocate(phis_tmp(npsq,nelemd)) + allocate(phis_tmp(npsq,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate phis_tmp(npsq,nelemd) failed with stat: '//to_str(ierr)) + end if + do ie = 1, nelemd k = 1 do j = 1, np @@ -1291,7 +1340,11 @@ subroutine read_inidat(dyn_in) end do inic_wet = .false. - allocate(glob_ind(npsq * nelemd)) + allocate(glob_ind(npsq * nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate glob_ind(npsq*nelemd) failed with stat: '//to_str(ierr)) + end if + j = 1 do ie = 1, nelemd do i = 1, npsq @@ -1302,9 +1355,18 @@ subroutine read_inidat(dyn_in) end do ! First, initialize all the variables, then assign - allocate(dbuf4(npsq, nlev, nelemd, (qsize + 4))) + allocate(dbuf4(npsq, nlev, nelemd, (qsize + 4)), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dbuf4(npsq,nlev,nelemd,(qsize+4)) failed with stat: '//& + to_str(ierr)) + end if + dbuf4 = 0.0_r8 - allocate(m_ind(qsize)) + allocate(m_ind(qsize), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate m_ind(qsize) failed with stat: '//to_str(ierr)) + end if + do m_cnst = 1, qsize m_ind(m_cnst) = m_cnst end do @@ -1361,8 +1423,15 @@ subroutine read_inidat(dyn_in) ! Read ICs from file. Assume all fields in the initial file are on the GLL grid. - allocate(dbuf2(npsq,nelemd)) - allocate(dbuf3(npsq,nlev,nelemd)) + allocate(dbuf2(npsq,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dbuf2(npsq,nelemd) failed with stat: '//to_str(ierr)) + end if + + allocate(dbuf3(npsq,nlev,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dbuf3(npsq,nlev,nelemd) failed with stat: '//to_str(ierr)) + end if ! Check that number of columns in IC file matches grid definition. call check_file_layout(fh_ini, elem, dyn_cols, 'ncdata', .true., dimname) @@ -1453,7 +1522,10 @@ subroutine read_inidat(dyn_in) end if call random_seed(size=rndm_seed_sz) - allocate(rndm_seed(rndm_seed_sz)) + allocate(rndm_seed(rndm_seed_sz), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate rndm_seed(rndm_seed_sz) failed with stat: '//to_str(ierr)) + end if do ie = 1, nelemd ! seed random number generator based on element ID @@ -1517,7 +1589,10 @@ subroutine read_inidat(dyn_in) end if end do - allocate(dbuf3(npsq,nlev,nelemd)) + allocate(dbuf3(npsq,nlev,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dbuf3(npsq,nlev,nelemd) failed with stat: '//to_str(ierr)) + end if do m_cnst = 1, pcnst @@ -1613,7 +1688,12 @@ subroutine read_inidat(dyn_in) write(iulog,*) 'Convert specific/wet mixing ratios to dry' end if - allocate(factor_array(np,np,nlev,nelemd)) + allocate(factor_array(np,np,nlev,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate factor_array(np,np,nlev,nelemd) failed with stat: '//& + to_str(ierr)) + end if + ! ! compute: factor_array = 1/(1-sum(q)) ! @@ -1906,11 +1986,21 @@ subroutine set_phis(dyn_in) nullify(elem) end if - allocate(phis_tmp(npsq,nelemd)) + allocate(phis_tmp(npsq,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate phis_tmp(npsq,nelemd) failed with stat: '//& + to_str(ierr)) + end if + phis_tmp = 0.0_r8 if (fv_nphys > 0) then - allocate(phis_phys_tmp(fv_nphys**2,nelemd)) + allocate(phis_phys_tmp(fv_nphys**2,nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate phis_phys_tmp(fv_nphys**2,nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + phis_phys_tmp = 0.0_r8 do ie=1,nelemd elem(ie)%sub_elem_mass_flux=0.0_r8 @@ -1923,7 +2013,12 @@ subroutine set_phis(dyn_in) ! Set mask to indicate which columns are active in GLL grid. nullify(ldof) call cam_grid_get_gcid(cam_grid_id('GLL'), ldof) - allocate(pmask(npsq*nelemd)) + allocate(pmask(npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pmask(npsq*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + pmask(:) = (ldof /= 0) deallocate(ldof) @@ -1979,12 +2074,27 @@ subroutine set_phis(dyn_in) ! lat/lon needed in radians latvals_deg => cam_grid_get_latvals(cam_grid_id('GLL')) lonvals_deg => cam_grid_get_lonvals(cam_grid_id('GLL')) - allocate(latvals(np*np*nelemd)) - allocate(lonvals(np*np*nelemd)) + allocate(latvals(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate latvals(np*np*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(lonvals(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lonvals(np*np*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + latvals(:) = latvals_deg(:)*deg2rad lonvals(:) = lonvals_deg(:)*deg2rad - allocate(glob_ind(npsq*nelemd)) + allocate(glob_ind(npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate glob_ind(npsq*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + j = 1 do ie = 1, nelemd do i = 1, npsq @@ -2000,8 +2110,18 @@ subroutine set_phis(dyn_in) if (fv_nphys > 0) then ! initialize PHIS on physgrid - allocate(latvals_phys(fv_nphys*fv_nphys*nelemd)) - allocate(lonvals_phys(fv_nphys*fv_nphys*nelemd)) + allocate(latvals_phys(fv_nphys*fv_nphys*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate latvals_phys(fv_nphys*fv_nphys*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(lonvals_phys(fv_nphys*fv_nphys*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lonvals_phys(fv_nphys*fv_nphys*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + indx = 1 do ie = 1, nelemd do j = 1, fv_nphys @@ -2013,9 +2133,18 @@ subroutine set_phis(dyn_in) end do end do - allocate(pmask_phys(fv_nphys*fv_nphys*nelemd)) + allocate(pmask_phys(fv_nphys*fv_nphys*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pmask_phys(fv_nphys*fv_nphys*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + pmask_phys(:) = .true. - allocate(glob_ind(fv_nphys*fv_nphys*nelemd)) + allocate(glob_ind(fv_nphys*fv_nphys*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate glob_ind(fv_nphys*fv_nphys*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if j = 1 do ie = 1, nelemd @@ -2334,9 +2463,12 @@ subroutine map_phis_from_physgrid_to_gll(fvm,elem,phis_phys_tmp,phis_tmp,pmask) logical , intent(in) :: pmask(npsq*nelemd) type(hybrid_t) :: hybrid - integer :: nets, nete, ie,i,j,indx + integer :: nets, nete, ie,i,j,indx, iret real(r8), allocatable :: fld_phys(:,:,:,:,:),fld_gll(:,:,:,:,:) logical :: llimiter(1) + + character(len=*), parameter :: subname = 'map_phis_from_physgrid_to_gll' + !---------------------------------------------------------------------------- !!$OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,ie) @@ -2345,8 +2477,19 @@ subroutine map_phis_from_physgrid_to_gll(fvm,elem,phis_phys_tmp,phis_tmp,pmask) call get_loop_ranges(hybrid, ibeg=nets, iend=nete) - allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete)) - allocate(fld_gll(np,np,1,1,nets:nete)) + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate '//& + 'fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fld_gll(np,np,1,1,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_gll(np,np,1,1,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + fld_phys = 0.0_r8 do ie = nets, nete fld_phys(1:fv_nphys,1:fv_nphys,1,1,ie) = RESHAPE(phis_phys_tmp(:,ie),(/fv_nphys,fv_nphys/)) diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 45b3ed10..2cfd3335 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -574,22 +574,23 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & real(r8), intent(out), optional :: lon_d_out(:) ! column degree longitudes ! local variables + integer :: ierr real(r8), pointer :: area_d(:) real(r8), pointer :: temp(:) character(len=shr_kind_cl) :: errormsg - character(len=*), parameter :: sub = 'get_horiz_grid_int' + character(len=*), parameter :: subname = 'get_horiz_grid_int' !---------------------------------------------------------------------------- ! check that nxy is set to correct size for global arrays if (fv_nphys > 0) then if (nxy < fv_nphys*fv_nphys*nelem_d) then - write(errormsg, *) sub//': arrays too small; Passed', & + write(errormsg, *) subname//': arrays too small; Passed', & nxy, ', needs to be at least', fv_nphys*fv_nphys*nelem_d call endrun(errormsg) end if else if (nxy < ngcols_d) then - write(errormsg,*) sub//': arrays not large enough; ', & + write(errormsg,*) subname//': arrays not large enough; ', & 'Passed', nxy, ', needs to be at least', ngcols_d call endrun(errormsg) end if @@ -597,14 +598,14 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & if ( present(area_d_out) ) then if (size(area_d_out) /= nxy) then - call endrun(sub//': bad area_d_out array size') + call endrun(subname//': bad area_d_out array size') end if area_d => area_d_out call create_global_area(area_d) else if ( present(wght_d_out) ) then if (size(wght_d_out) /= nxy) then - call endrun(sub//': bad wght_d_out array size') + call endrun(subname//': bad wght_d_out array size') end if area_d => wght_d_out call create_global_area(area_d) @@ -619,7 +620,7 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & if (present(clon_d_out)) then if (size(clon_d_out) /= nxy) then - call endrun(sub//': bad clon_d_out array size in dyn_grid') + call endrun(subname//': bad clon_d_out array size in dyn_grid') end if end if @@ -632,14 +633,22 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & if (present(clon_d_out)) then call create_global_coords(clat_d_out, clon_d_out, lat_d_out, lon_d_out) else - allocate(temp(nxy)) + allocate(temp(nxy), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate temp(nxy) failed with stat: '//to_str(ierr)) + end if + call create_global_coords(clat_d_out, temp, lat_d_out, lon_d_out) deallocate(temp) end if else if (present(clon_d_out)) then - allocate(temp(nxy)) + allocate(temp(nxy), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate temp(nxy) failed with stat: '//to_str(ierr)) + end if + call create_global_coords(temp, clon_d_out, lat_d_out, lon_d_out) deallocate(temp) @@ -688,8 +697,10 @@ subroutine dyn_grid_get_elem_coords(ie, rlon, rlat, cdex) real(r8),optional, intent(out) :: rlat(:) ! latitudes of the columns in the element integer, optional, intent(out) :: cdex(:) ! global column index - integer :: sb,eb, ii, i,j, icol, igcol + integer :: sb,eb, ii, i,j, icol, igcol, ierr real(r8), allocatable :: clat(:), clon(:) + + character(len=*), parameter :: subname = 'dyn_grid_get_elem_coords' !---------------------------------------------------------------------------- if (fv_nphys > 0) then @@ -699,7 +710,16 @@ subroutine dyn_grid_get_elem_coords(ie, rlon, rlat, cdex) sb = elem(ie)%idxp%UniquePtOffset eb = sb + elem(ie)%idxp%NumUniquePts-1 - allocate( clat(sb:eb), clon(sb:eb) ) + allocate(clat(sb:eb), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate clat(sb:eb) failed with stat: '//to_str(ierr)) + end if + + allocate(clon(sb:eb), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate clon(sb:eb) failed with stat: '//to_str(ierr)) + end if + call UniqueCoords( elem(ie)%idxP, elem(ie)%spherep, clat(sb:eb), clon(sb:eb) ) if (present(cdex)) cdex(:) = -1 @@ -742,7 +762,7 @@ subroutine get_hdim_name(fh_ini, ini_grid_hdim_name) integer :: ierr, pio_errtype integer :: ncol_did - character(len=*), parameter :: sub = 'get_hdim_name' + character(len=*), parameter :: subname = 'get_hdim_name' !---------------------------------------------------------------------------- ! Set PIO to return error flags. @@ -802,7 +822,7 @@ subroutine define_cam_grids() use dimensions_mod, only: nc ! Local variables - integer :: i, ii, j, k, ie, mapind + integer :: i, ii, j, k, ie, mapind, ierr character(len=8) :: latname, lonname, ncolname, areaname type(horiz_coord_t), pointer :: lat_coord @@ -825,6 +845,8 @@ subroutine define_cam_grids() real(r8), allocatable :: physgrid_coord(:) real(r8), pointer :: physgrid_area(:) integer(iMap), pointer :: physgrid_map(:) + + character(len=*), parameter :: subname = 'define_cam_grids' !---------------------------------------------------------------------------- !----------------------- @@ -841,10 +863,29 @@ subroutine define_cam_grids() end do end do - allocate(pelat_deg(np*np*nelemd)) - allocate(pelon_deg(np*np*nelemd)) - allocate(pearea(np*np*nelemd)) - allocate(pemap(np*np*nelemd)) + allocate(pelat_deg(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pelat_deg(np*np*nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(pelon_deg(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pelon_deg(np*np*nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(pearea(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pearea(np*np*nelemd) failed with stat: '//& + to_str(ierr)) + end if + + allocate(pemap(np*np*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate pemap(np*np*nelemd) failed with stat: '//& + to_str(ierr)) + end if pemap = 0_iMap ii = 1 @@ -882,7 +923,12 @@ subroutine define_cam_grids() 'longitude', 'degrees_east', 1, size(pelon_deg), pelon_deg, map=pemap) ! Map for GLL grid - allocate(grid_map(3,npsq*nelemd)) + allocate(grid_map(3,npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate grid_map(3,npsq*nelemd) failed with stat: '//& + to_str(ierr)) + end if + grid_map = 0_iMap mapind = 1 do j = 1, nelemd @@ -944,9 +990,22 @@ subroutine define_cam_grids() ncols_fvm = nc * nc * nelemd ngcols_fvm = nc * nc * nelem_d - allocate(fvm_coord(ncols_fvm)) - allocate(fvm_map(ncols_fvm)) - allocate(fvm_area(ncols_fvm)) + allocate(fvm_coord(ncols_fvm), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate fvm_coord(ncols_fvm) failed with stat: '//& + to_str(ierr)) + end if + allocate(fvm_map(ncols_fvm), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate fvm_map(ncols_fvm) failed with stat: '//& + to_str(ierr)) + end if + + allocate(fvm_area(ncols_fvm), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate fvm_area(ncols_fvm) failed with stat: '//& + to_str(ierr)) + end if do ie = 1, nelemd k = 1 @@ -979,7 +1038,12 @@ subroutine define_cam_grids() map=fvm_map) ! Map for FVM grid - allocate(grid_map(3, ncols_fvm)) + allocate(grid_map(3, ncols_fvm), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate grid_map(3, ncols_fvm) failed with stat: '//& + to_str(ierr)) + end if + grid_map = 0_iMap mapind = 1 do j = 1, nelemd @@ -1014,9 +1078,24 @@ subroutine define_cam_grids() ncols_physgrid = fv_nphys * fv_nphys * nelemd ngcols_physgrid = fv_nphys * fv_nphys * nelem_d - allocate(physgrid_coord(ncols_physgrid)) - allocate(physgrid_map(ncols_physgrid)) - allocate(physgrid_area(ncols_physgrid)) + + allocate(physgrid_coord(ncols_physgrid), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate physgrid_coord(ncols_physgrid) failed with stat: '//& + to_str(ierr)) + end if + + allocate(physgrid_map(ncols_physgrid), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate physgrid_map(ncols_physgrid) failed with stat: '//& + to_str(ierr)) + end if + + allocate(physgrid_area(ncols_physgrid), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate physgrid_area(ncols_physgrid) failed with stat: '//& + to_str(ierr)) + end if do ie = 1, nelemd k = 1 @@ -1049,7 +1128,12 @@ subroutine define_cam_grids() map=physgrid_map) ! Map for physics grid - allocate(grid_map(3, ncols_physgrid)) + allocate(grid_map(3, ncols_physgrid), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate grid_map(3, ncols_physgrid) failed with stat: '//& + to_str(ierr)) + end if + grid_map = 0_iMap mapind = 1 do j = 1, nelemd @@ -1167,11 +1251,11 @@ subroutine create_global_area(area_d) integer :: ie, sb, eb, i, j, k integer :: ierr integer :: ibuf - character(len=*), parameter :: sub = 'create_global_area' + character(len=*), parameter :: subname = 'create_global_area' !---------------------------------------------------------------------------- if (masterproc) then - write(iulog, *) sub//': INFO: Non-scalable action: gathering global area in SE dycore.' + write(iulog, *) subname//': INFO: Non-scalable action: gathering global area in SE dycore.' end if if (fv_nphys > 0) then ! physics uses an FVM grid @@ -1179,8 +1263,16 @@ subroutine create_global_area(area_d) ! first gather all data onto masterproc, in mpi task order (via ! mpi_gatherv) then redorder into globalID order (via dp_reorder) ncol = fv_nphys*fv_nphys*nelem_d - allocate(rbuf(ncol)) - allocate(dp_area(fv_nphys*fv_nphys,nelem_d)) + allocate(rbuf(ncol), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate rbuf(ncol) failed with stat: '//to_str(ierr)) + end if + + allocate(dp_area(fv_nphys*fv_nphys,nelem_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dp_area(fv_nphys*fv_nphys,nelem_d)'//& + ' failed with stat: '//to_str(ierr)) + end if do ie = 1, nelemd k = 1 @@ -1202,7 +1294,7 @@ subroutine create_global_area(area_d) end do ! Check to make sure we counted correctly if (rdispls(npes) + recvcounts(npes) /= ncol) then - call endrun(sub//': bad rdispls array size') + call endrun(subname//': bad rdispls array size') end if end if @@ -1222,7 +1314,12 @@ subroutine create_global_area(area_d) else ! physics is on the GLL grid - allocate(rbuf(ngcols_d)) + allocate(rbuf(ngcols_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate rbuf(ngcols_d) failed with stat: '//& + to_str(ierr)) + end if + do ie = 1, nelemdmax if (ie <= nelemd) then rdispls(iam+1) = elem(ie)%idxp%UniquePtOffset - 1 @@ -1278,11 +1375,11 @@ subroutine create_global_coords(clat, clon, lat_out, lon_out) integer :: ierr integer :: ibuf integer :: ncol - character(len=*), parameter :: sub='create_global_coords' + character(len=*), parameter :: subname = 'create_global_coords' !---------------------------------------------------------------------------- if (masterproc) then - write(iulog, *) sub//': INFO: Non-scalable action: Creating global coords in SE dycore.' + write(iulog, *) subname//': INFO: Non-scalable action: Creating global coords in SE dycore.' end if clat(:) = -iam @@ -1300,9 +1397,24 @@ subroutine create_global_coords(clat, clon, lat_out, lon_out) ! mpi_gatherv) then redorder into globalID order (via dp_reorder) ncol = fv_nphys*fv_nphys*nelem_d - allocate(rbuf(ncol)) - allocate(dp_lon(fv_nphys*fv_nphys,nelem_d)) - allocate(dp_lat(fv_nphys*fv_nphys,nelem_d)) + allocate(rbuf(ncol), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate rbuf(ncol) failed with stat: '//& + to_str(ierr)) + end if + + allocate(dp_lon(fv_nphys*fv_nphys,nelem_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dp_lon(fv_nphys*fv_nphys,nelem_d)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(dp_lat(fv_nphys*fv_nphys,nelem_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dp_lat(fv_nphys*fv_nphys,nelem_d)'//& + ' failed with stat: '//to_str(ierr)) + end if + do ie = 1, nelemd k = 1 @@ -1326,7 +1438,7 @@ subroutine create_global_coords(clat, clon, lat_out, lon_out) end do ! Check to make sure we counted correctly if (rdispls(npes) + recvcounts(npes) /= ncol) then - call endrun(sub//': bad rdispls array size') + call endrun(subname//': bad rdispls array size') end if end if @@ -1365,7 +1477,11 @@ subroutine create_global_coords(clat, clon, lat_out, lon_out) else ! physics uses the GLL grid - allocate(rbuf(ngcols_d)) + allocate(rbuf(ngcols_d), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate rbuf(ngcols_d) failed with stat: '//& + to_str(ierr)) + end if do ie = 1, nelemdmax diff --git a/src/dynamics/se/native_mapping.F90 b/src/dynamics/se/native_mapping.F90 index dba52916..bb901791 100644 --- a/src/dynamics/se/native_mapping.F90 +++ b/src/dynamics/se/native_mapping.F90 @@ -86,6 +86,7 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are use shr_infnan_mod, only: isnan=>shr_infnan_isnan use cam_pio_utils, only: cam_pio_openfile, cam_pio_createfile + use string_utils, only: to_str use pio, only: pio_noerr, pio_openfile, pio_createfile, pio_closefile, & pio_get_var, pio_put_var, pio_write_darray,pio_int, pio_double, & @@ -153,6 +154,8 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are character(len=8) :: cdate, ctime integer :: olditype, oldnlat, oldnlon, itype + character(len=*), parameter :: subname = 'create_native_mapping_files' + !Remove once "official" CAMDEN fillvalue code has been developed -JN: real(r8) :: fillvalue = 9.87e36_r8 @@ -202,11 +205,35 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are ierr = pio_inq_dimid( ogfile, 'grid_size', dimid) ierr = pio_inq_dimlen( ogfile, dimid, npts) - allocate(lat(npts), lon(npts), grid_imask(npts), areab(npts)) + allocate(lat(npts), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lat(npts) failed with stat: '//to_str(ierr)) + end if + + allocate(lon(npts), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate lon(npts) failed with stat: '//to_str(ierr)) + end if + + allocate(grid_imask(npts), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate grid_imask(npts) failed with stat: '//& + to_str(ierr)) + end if + + allocate(areab(npts), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate areab(npts) failed with stat: '//to_str(ierr)) + end if ierr = pio_inq_dimid( ogfile, 'grid_rank', dimid) ierr = pio_inq_dimlen(ogfile, dimid, dg_rank) - allocate(dg_dims(dg_rank)) + allocate(dg_dims(dg_rank), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate dg_dims(dg_rank) failed with stat: '//& + to_str(ierr)) + end if + ierr = pio_inq_varid( ogfile, 'grid_dims', vid) ierr = pio_get_var( ogfile, vid, dg_dims) @@ -309,9 +336,24 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are ! allocate storage do ii=1,nelemd ngrid = interpdata(ii)%n_interp - allocate(interpdata(ii)%interp_xy( ngrid ) ) - allocate(interpdata(ii)%ilat( ngrid ) ) - allocate(interpdata(ii)%ilon( ngrid ) ) + allocate(interpdata(ii)%interp_xy( ngrid ), stat=ierr ) + if (ierr /= 0) then + call endrun(subname//': allocate interpdata(ii)%interp_xy(ngrid)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(interpdata(ii)%ilat( ngrid ), stat=ierr ) + if (ierr /= 0) then + call endrun(subname//': allocate interpdata(ii)%ilat(ngrid)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(interpdata(ii)%ilon( ngrid ), stat=ierr ) + if (ierr /= 0) then + call endrun(subname//': allocate interpdata(ii)%ilon(ngrid)'//& + ' failed with stat: '//to_str(ierr)) + end if + interpdata(ii)%n_interp=0 ! reset counter enddo @@ -338,10 +380,29 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are end do - allocate(h(int(countx))) - allocate(h1d(int(countx)*npsq*nelemd)) - allocate(row(int(countx)*npsq*nelemd)) - allocate(col(int(countx)*npsq*nelemd)) + allocate(h(int(countx)), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate h(int(countx)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(h1d(int(countx)*npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate h1d(int(countx)*npsq*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(row(int(countx)*npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate row(int(countx)*npsq*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if + + allocate(col(int(countx)*npsq*nelemd), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate col(int(countx)*npsq*nelemd)'//& + ' failed with stat: '//to_str(ierr)) + end if row = 0 col = 0 @@ -387,7 +448,11 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are call mpi_allreduce(cntperelem_in, cntperelem_out, nelem, MPI_INTEGER, MPI_MAX, par%comm, ierr) - allocate(ldof(ngrid)) + allocate(ldof(ngrid), stat=ierr) + if (ierr /= 0) then + call endrun(subname//': allocate ldof(ngrid) failed with stat: '//to_str(ierr)) + end if + ldof = 0 ii=1 do ie=1,nelemd diff --git a/src/dynamics/se/stepon.F90 b/src/dynamics/se/stepon.F90 index 18768a0f..e108335a 100644 --- a/src/dynamics/se/stepon.F90 +++ b/src/dynamics/se/stepon.F90 @@ -181,6 +181,8 @@ subroutine diag_dynvar_ic(elem, fvm) !use physconst, only: get_sum_species, get_ps,thermodynamic_active_species_idx !use physconst, only: thermodynamic_active_species_idx_dycore,get_dp_ref use hycoef, only: hyai, hybi, ps0 + use cam_abortutils, only: endrun + use string_utils, only: to_str !SE dycore: use time_mod, only: TimeLevel_Qdp ! dynamics typestep @@ -200,6 +202,7 @@ subroutine diag_dynvar_ic(elem, fvm) ! Local variables integer :: ie, i, j, k, m, m_cnst, nq integer :: tl_f, tl_qdp + integer :: iret character(len=fieldname_len) :: tfname type(hybrid_t) :: hybrid @@ -210,12 +213,19 @@ subroutine diag_dynvar_ic(elem, fvm) logical, allocatable :: llimiter(:) real(r8) :: qtmp(np,np,nlev), dp_ref(np,np,nlev), ps_ref(np,np) real(r8), allocatable :: factor_array(:,:,:) + + character(len=*), parameter :: subname = 'diag_dynvar_ic' !---------------------------------------------------------------------------- tl_f = timelevel%n0 call TimeLevel_Qdp(TimeLevel, qsplit, tl_Qdp) - allocate(ftmp(npsq,nlev,2)) + allocate(ftmp(npsq,nlev,2), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate ftmp(npsq,nlev,2) failed with stat: '//& + to_str(iret)) + end if + !REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #if 0 @@ -303,6 +313,11 @@ subroutine diag_dynvar_ic(elem, fvm) if (hist_fld_active('PS_gll')) then allocate(fld_2d(np,np)) + if (iret /= 0) then + call endrun(subname//': allocate fld_2d(np, np) failed with stat: '//& + to_str(iret)) + end if + do ie = 1, nelemd call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,tl_f),fld_2d,hyai(1)*ps0) @@ -326,6 +341,11 @@ subroutine diag_dynvar_ic(elem, fvm) !REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #if 0 allocate(fld_2d(np,np)) + if (iret /= 0) then + call endrun(subname//': allocate fld_2d(np, np) failed with stat: '//& + to_str(iret)) + end if + do ie = 1, nelemd call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,tl_f),fld_2d,hyai(1)*ps0) @@ -337,7 +357,13 @@ subroutine diag_dynvar_ic(elem, fvm) call outfld('PS&IC', ftmp(:,1,1), npsq, ie) end do deallocate(fld_2d) - if (fv_nphys < 1) allocate(factor_array(np,np,nlev)) + if (fv_nphys < 1) then + allocate(factor_array(np,np,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate factor_array(np,np,nlev) failed with stat: '//& + to_str(iret)) + end if + end if #endif do ie = 1, nelemd @@ -374,10 +400,28 @@ subroutine diag_dynvar_ic(elem, fvm) hybrid = config_thread_region(par,'serial') call get_loop_ranges(hybrid, ibeg=nets, iend=nete) - allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete)) - allocate(fld_gll(np,np,nlev,ntrac,nets:nete)) - allocate(llimiter(ntrac)) - allocate(factor_array(nc,nc,nlev)) + allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(fld_gll(np,np,nlev,ntrac,nets:nete), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fld_gll(np,np,nlev,ntrac,nets:nete)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(llimiter(ntrac), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate llimiter(ntrac) failed with stat: '//to_str(iret)) + end if + + allocate(factor_array(nc,nc,nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate factor_array(nc,nc,nlev) failed with stat: '//to_str(iret)) + end if + llimiter = .true. do ie = nets, nete call get_sum_species(1,nc,1,nc,1,nlev,ntrac,fvm(ie)%c(1:nc,1:nc,:,:),thermodynamic_active_species_idx,factor_array) From b813ba24b49838796373ef665e9a1f4c7e275eef Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 12 Apr 2021 10:53:21 -0600 Subject: [PATCH 13/45] Fix runtime namelist errors for SE dycore. --- cime_config/cam_config.py | 15 +- cime_config/namelist_definition_cam.xml | 346 ++++++++++-------------- src/dynamics/se/dyn_comp.F90 | 12 +- 3 files changed, 171 insertions(+), 202 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 26b1e157..f0b1c7cf 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -569,8 +569,9 @@ def __init__(self, case, case_log): case_nx = case.get_value("ATM_NX") # Number of x-dimension grid-points (longitudes) case_ny = case.get_value("ATM_NY") # Number of y-dimension grid-points (latitudes) comp_ocn = case.get_value("COMP_OCN") # CESM ocean component - exeroot = case.get_value("EXEROOT") # model executable path - nthrds = case.get_value("NTHRDS_ATM") # number of model OpenMP threads + exeroot = case.get_value("EXEROOT") # Model executable path + nthrds = case.get_value("NTHRDS_ATM") # Number of model OpenMP threads + start_date = case.get_value("RUN_STARTDATE") # Model simulation starte date # Save case variables needed for code auto-generation: self.__atm_root = case.get_value("COMP_ROOT_DIR_ATM") @@ -636,6 +637,16 @@ def __init__(self, case, case_log): 'vert_coord_nl', 'ref_pres_nl'] + #---------------------------------------------------- + # Set CAM start date (needed for namelist generation) + #---------------------------------------------------- + + # Remove dashes from CIME-provided start date: + start_date_cam = start_date.replace('-','') + + self.create_config("ic_ymd", "Start date of model run.", + start_date_cam, is_nml_attr=True) + #------------------------ # Set CAM physics columns #------------------------ diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index de33b4ef..e369513b 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -115,7 +115,7 @@ nudging_nl Full pathname of analyses data to use for nudging. - (e.g. '/$DIN_LOC_ROOT/atm/cam/nudging/') + (e.g. '$DIN_LOC_ROOT/atm/cam/nudging/') Default: none @@ -2158,8 +2158,8 @@ Default: UNKNOWN. - atm/cam/topo/fv_0.9x1.25_nc3000_Nsw006_Nrs002_Co008_Fi001_ZR_c160505.nc - atm/cam/topo/fv_1.9x2.5_nc3000_Nsw084_Nrs016_Co120_Fi001_ZR_061116.nc + $DIN_LOC_ROOT/atm/cam/topo/fv_0.9x1.25_nc3000_Nsw006_Nrs002_Co008_Fi001_ZR_c160505.nc + $DIN_LOC_ROOT/atm/cam/topo/fv_1.9x2.5_nc3000_Nsw084_Nrs016_Co120_Fi001_ZR_061116.nc @@ -2246,7 +2246,7 @@ from deep convection. - atm/waccm/gw/newmfspectra40_dc25.nc + $DIN_LOC_ROOT/atm/waccm/gw/newmfspectra40_dc25.nc @@ -2258,7 +2258,7 @@ from shallow convection. - atm/waccm/gw/mfspectra_shallow_c140530.nc + $DIN_LOC_ROOT/atm/waccm/gw/mfspectra_shallow_c140530.nc @@ -2490,7 +2490,7 @@ values. - atm/cam/ggas/ghg_hist_1765-2005_c091218.nc + $DIN_LOC_ROOT/atm/cam/ggas/ghg_hist_1765-2005_c091218.nc @@ -2679,7 +2679,7 @@ enabled via the argument "-prog_species GHG" to configure. - atm/cam/ggas/noaamisc.r8.nc + $DIN_LOC_ROOT/atm/cam/ggas/noaamisc.r8.nc @@ -2750,8 +2750,8 @@ Default: none - atm/cam/ggas/emissions-cmip6_CO2_anthro_surface_175001-201512_fv_0.9x1.25_c20181011.nc - atm/cam/ggas/emissions-cmip6_CO2_anthro_surface_175001-201512_fv_1.9x2.5_c20181011.nc + $DIN_LOC_ROOT/atm/cam/ggas/emissions-cmip6_CO2_anthro_surface_175001-201512_fv_0.9x1.25_c20181011.nc + $DIN_LOC_ROOT/atm/cam/ggas/emissions-cmip6_CO2_anthro_surface_175001-201512_fv_1.9x2.5_c20181011.nc @@ -6878,11 +6878,11 @@ Default: UNKNOWN. - atm/waccm/emis/meteor_smoke_kalashnikova.nc - atm/waccm/emis/meteor_smoke_kalashnikova.nc - atm/waccm/emis/meteor_smoke_kalashnikova.nc - atm/waccm/emis/meteor_smoke_kalashnikova.nc - atm/waccm/emis/early_earth_haze.nc + $DIN_LOC_ROOT/atm/waccm/emis/meteor_smoke_kalashnikova.nc + $DIN_LOC_ROOT/atm/waccm/emis/meteor_smoke_kalashnikova.nc + $DIN_LOC_ROOT/atm/waccm/emis/meteor_smoke_kalashnikova.nc + $DIN_LOC_ROOT/atm/waccm/emis/meteor_smoke_kalashnikova.nc + $DIN_LOC_ROOT/atm/waccm/emis/early_earth_haze.nc @@ -6895,10 +6895,10 @@ Default: UNKNOWN. - atm/waccm/emis/smoke_grf_frentzke.nc - atm/waccm/emis/smoke_grf_frentzke.nc - atm/waccm/emis/smoke_grf_frentzke.nc - atm/waccm/emis/smoke_grf_frentzke.nc + $DIN_LOC_ROOT/atm/waccm/emis/smoke_grf_frentzke.nc + $DIN_LOC_ROOT/atm/waccm/emis/smoke_grf_frentzke.nc + $DIN_LOC_ROOT/atm/waccm/emis/smoke_grf_frentzke.nc + $DIN_LOC_ROOT/atm/waccm/emis/smoke_grf_frentzke.nc @@ -6968,8 +6968,8 @@ Default: UNKNOWN. - atm/cam/physprops/mice_warren2008.nc - atm/cam/physprops/mice_warren2008.nc + $DIN_LOC_ROOT/atm/cam/physprops/mice_warren2008.nc + $DIN_LOC_ROOT/atm/cam/physprops/mice_warren2008.nc @@ -6985,8 +6985,8 @@ Default: UNKNOWN. - atm/cam/dst/soil_erosion_factor_1x1_c120907.nc - atm/cam/dst/soil_erosion_factor_1x1_c120907.nc + $DIN_LOC_ROOT/atm/cam/dst/soil_erosion_factor_1x1_c120907.nc + $DIN_LOC_ROOT/atm/cam/dst/soil_erosion_factor_1x1_c120907.nc @@ -7183,7 +7183,7 @@ high latitude electric potential model. - atm/waccm/efld/wei05sc_c080415.nc + $DIN_LOC_ROOT/atm/waccm/efld/wei05sc_c080415.nc @@ -7353,7 +7353,7 @@ Default: UNKNOWN. - atm/cam/rad/abs_ems_factors_fastvx.c030508.nc + $DIN_LOC_ROOT/atm/cam/rad/abs_ems_factors_fastvx.c030508.nc @@ -7615,7 +7615,7 @@ Default: none - atm/cam/physprops/water_refindex_rrtmg_c080910.nc + $DIN_LOC_ROOT/atm/cam/physprops/water_refindex_rrtmg_c080910.nc @@ -7628,14 +7628,14 @@ Default: none - atm/cam/chem/trop_mam/atmsrf_ne5np4_110920.nc - atm/cam/chem/trop_mam/atmsrf_ne16np4_110920.nc - atm/cam/chem/trop_mam/atmsrf_ne30np4_110920.nc - atm/cam/chem/trop_mam/atmsrf_ne30pg3_180522.nc - atm/cam/chem/trop_mam/atmsrf_ne60np4_110920.nc - atm/cam/chem/trop_mam/atmsrf_ne120np4_110920.nc - atm/cam/chem/trop_mam/atmsrf_ne240np4_110920.nc - atm/cam/chem/trop_mam/atmsrf_ne0np4conus30x8_161116.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne5np4_110920.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne16np4_110920.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne30np4_110920.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne30pg3_180522.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne60np4_110920.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne120np4_110920.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne240np4_110920.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/atmsrf_ne0np4conus30x8_161116.nc @@ -7686,7 +7686,7 @@ Default: none - atm/cam/physprops/iceoptics_c080917.nc + $DIN_LOC_ROOT/atm/cam/physprops/iceoptics_c080917.nc @@ -7698,7 +7698,7 @@ Default: none - atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc + $DIN_LOC_ROOT/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc @@ -7752,8 +7752,8 @@ Default: UNKNOWN. - atm/cam/scam/iop/ARM97_4scam.nc - atm/cam/scam/iop/ARM95_4scam.nc + $DIN_LOC_ROOT/atm/cam/scam/iop/ARM97_4scam.nc + $DIN_LOC_ROOT/atm/cam/scam/iop/ARM95_4scam.nc @@ -7995,12 +7995,12 @@ as a time series - atm/cam/solar/solar_ave_sc19-sc23.c090810.nc - atm/cam/solar/solar_ave_sc19-sc23.c090810.nc - atm/cam/solar/spectral_irradiance_Lean_1610-2009_ann_c100405.nc - atm/cam/solar/spectral_irradiance_Lean_1610-2009_ann_c100405.nc - atm/cam/solar/SolarForcing1995-2005avg_c160929.nc - atm/cam/solar/SolarForcing1995-2005avg_c160929.nc + $DIN_LOC_ROOT/atm/cam/solar/solar_ave_sc19-sc23.c090810.nc + $DIN_LOC_ROOT/atm/cam/solar/solar_ave_sc19-sc23.c090810.nc + $DIN_LOC_ROOT/atm/cam/solar/spectral_irradiance_Lean_1610-2009_ann_c100405.nc + $DIN_LOC_ROOT/atm/cam/solar/spectral_irradiance_Lean_1610-2009_ann_c100405.nc + $DIN_LOC_ROOT/atm/cam/solar/SolarForcing1995-2005avg_c160929.nc + $DIN_LOC_ROOT/atm/cam/solar/SolarForcing1995-2005avg_c160929.nc @@ -8022,10 +8022,10 @@ solar and geomagnetic activity( F10.7, F10.7a, Kp, Ap ). - atm/waccm/phot/wasolar_ave.nc - atm/waccm/solar/wasolar_c140408.nc - atm/waccm/solar/wasolar_c140408.nc - atm/cam/solar/SolarForcing1995-2005avg_c160929.nc + $DIN_LOC_ROOT/atm/waccm/phot/wasolar_ave.nc + $DIN_LOC_ROOT/atm/waccm/solar/wasolar_c140408.nc + $DIN_LOC_ROOT/atm/waccm/solar/wasolar_c140408.nc + $DIN_LOC_ROOT/atm/cam/solar/SolarForcing1995-2005avg_c160929.nc @@ -8037,7 +8037,7 @@ (solar wind velocity and density; IMF By and Bz components). - atm/waccm/solar/solar_wind_imf_OMNI_WACCMX_2000001-2017365_c180731.nc + $DIN_LOC_ROOT/atm/waccm/solar/solar_wind_imf_OMNI_WACCMX_2000001-2017365_c180731.nc @@ -8224,7 +8224,7 @@ UNSET_PATH - atm/cam/chem/trop_mozart/ub/clim_p_trop.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/ub/clim_p_trop.nc @@ -8401,7 +8401,7 @@ Full pathname of boundary dataset for airplane emissions. - atm/cam/chem/trop_mozart/emis/emissions.aircraft.T42LR.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/emis/emissions.aircraft.T42LR.nc @@ -8413,7 +8413,7 @@ method of calculating dry deposition of chemical tracers. - atm/cam/chem/trop_mozart/dvel/clim_soilw.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/dvel/clim_soilw.nc @@ -8425,7 +8425,7 @@ in the 'table' method of calculating dry deposition of chemical tracers. - atm/cam/chem/trop_mozart/dvel/depvel_monthly.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/dvel/depvel_monthly.nc @@ -8437,8 +8437,8 @@ method of calculating dry deposition of chemical tracers. - atm/cam/chem/trop_mozart/dvel/regrid_vegetation.nc - atm/cam/chem/trop_mozart/dvel/regrid_vegetation_all_zero_aquaplanet_1deg_regularGrid_c20170421.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/dvel/regrid_vegetation.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/dvel/regrid_vegetation_all_zero_aquaplanet_1deg_regularGrid_c20170421.nc @@ -8450,7 +8450,7 @@ method of calculating dry deposition of chemical tracers. - atm/cam/chem/trop_mozart/dvel/season_wes.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/dvel/season_wes.nc @@ -8504,7 +8504,7 @@ for high solar fluxes from the Scherliess low latitude electric potential model. - atm/waccm/efld/coeff_hflux.dat + $DIN_LOC_ROOT/atm/waccm/efld/coeff_hflux.dat @@ -8516,7 +8516,7 @@ for low solar fluxes from the Scherliess low latitude electric potential model. - atm/waccm/efld/coeff_lflux.dat + $DIN_LOC_ROOT/atm/waccm/efld/coeff_lflux.dat @@ -8549,7 +8549,7 @@ photo reactions producing electrons. - atm/waccm/phot/electron_121129.dat + $DIN_LOC_ROOT/atm/waccm/phot/electron_121129.dat @@ -8611,7 +8611,7 @@ Full pathname of dataset for EUVAC solar EUV model (0.05-121nm). - atm/waccm/phot/EUVAC_reference_c170222.nc + $DIN_LOC_ROOT/atm/waccm/phot/EUVAC_reference_c170222.nc @@ -8740,7 +8740,7 @@ Full pathname of dataset for fixed lower boundary conditions. - atm/waccm/lb/LBC_1765-2100_1.9x2.5_CCMI_RCP60_za_RNOCStrend_c141002.nc + $DIN_LOC_ROOT/atm/waccm/lb/LBC_1765-2100_1.9x2.5_CCMI_RCP60_za_RNOCStrend_c141002.nc @@ -8850,7 +8850,7 @@ Full pathname of dataset for chemical tracers constrained in the stratosphere - atm/cam/chem/trop_mozart/ub/ubvals_b40.20th.track1_1996-2005_c110315.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/ub/ubvals_b40.20th.track1_1996-2005_c110315.nc @@ -8898,7 +8898,7 @@ Full pathname of dataset for the neutral species absorption cross sections. - atm/waccm/phot/photon_c130710.dat + $DIN_LOC_ROOT/atm/waccm/phot/photon_c130710.dat @@ -8909,7 +8909,7 @@ Full pathname of dataset for fast-tuv photolysis cross sections - atm/cam/chem/trop_mozart/phot/tuv_xsect.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/phot/tuv_xsect.nc @@ -8920,7 +8920,7 @@ Full pathname of dataset of O2 cross sections for fast-tuv photolysis - atm/cam/chem/trop_mozart/phot/o2src.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/phot/o2src.nc @@ -8931,7 +8931,7 @@ Full pathname of dataset of O2 and 03 column densities above the model for look-up-table photolysis - atm/cam/chem/trop_mozart/phot/exo_coldens.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/phot/exo_coldens.nc @@ -8953,7 +8953,7 @@ {{ hilight }}aircraft_specifier{{ closehilight }}. - atm/cam/ggas + $DIN_LOC_ROOT/atm/cam/ggas @@ -9067,8 +9067,8 @@ Default: UNKNOWN. - atm/cam/chem/trop_mozart_aero/aero - atm/cam/chem/trop_mam/aero + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/aero @@ -9189,8 +9189,8 @@ Default: UNKNOWN. - atm/cam/chem/trop_mozart_aero/aero - atm/cam/chem/trop_mam/aero + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero + $DIN_LOC_ROOT/atm/cam/chem/trop_mam/aero @@ -9301,7 +9301,7 @@ Default: UNKNOWN. - atm/cam/chem/methane + $DIN_LOC_ROOT/atm/cam/chem/methane @@ -9411,8 +9411,8 @@ {{ hilight }}prescribed_ozone_filelist{{ closehilight }}. - atm/cam/ozone - atm/cam/ozone_strataero + $DIN_LOC_ROOT/atm/cam/ozone + $DIN_LOC_ROOT/atm/cam/ozone_strataero @@ -9625,10 +9625,10 @@ {{ hilight }}prescribed_strataero_filelist{{ closehilight }}. - atm/waccm/sulf - atm/cam/volc - atm/cam/ozone - atm/cam/ozone_strataero + $DIN_LOC_ROOT/atm/waccm/sulf + $DIN_LOC_ROOT/atm/cam/volc + $DIN_LOC_ROOT/atm/cam/ozone + $DIN_LOC_ROOT/atm/cam/ozone_strataero @@ -9747,7 +9747,7 @@ Full pathname of dataset for radiative source function used in look up table photloysis - atm/waccm/phot/RSF_GT200nm_v3.0_c080811.nc + $DIN_LOC_ROOT/atm/waccm/phot/RSF_GT200nm_v3.0_c080811.nc @@ -9759,7 +9759,7 @@ to calculate its upper boundary concentration. - atm/waccm/ub/snoe_eof.nc + $DIN_LOC_ROOT/atm/waccm/ub/snoe_eof.nc @@ -9770,10 +9770,10 @@ Full pathname of boundary dataset for soil erodibility factors. - atm/cam/dst/dst_source2x2tunedcam6-2x2-04062017.nc - atm/cam/dst/dst_source2x2_cam5.4_c150327.nc - atm/cam/dst/dst_source2x2tuned-cam4-06132012.nc - atm/cam/dst/dst_source1x1tuned-cam4-06202012.nc + $DIN_LOC_ROOT/atm/cam/dst/dst_source2x2tunedcam6-2x2-04062017.nc + $DIN_LOC_ROOT/atm/cam/dst/dst_source2x2_cam5.4_c150327.nc + $DIN_LOC_ROOT/atm/cam/dst/dst_source2x2tuned-cam4-06132012.nc + $DIN_LOC_ROOT/atm/cam/dst/dst_source1x1tuned-cam4-06202012.nc @@ -9809,7 +9809,7 @@ Full pathname of dataset containing tropopheric sulfate aerosols - atm/waccm/sulf/sulfate.ar5_camchem_c130304.nc + $DIN_LOC_ROOT/atm/waccm/sulf/sulfate.ar5_camchem_c130304.nc @@ -9913,7 +9913,7 @@ Full pathname of dataset for TGCM upper boundary - atm/waccm/ub/tgcm_ubc_1993_c100204.nc + $DIN_LOC_ROOT/atm/waccm/ub/tgcm_ubc_1993_c100204.nc @@ -9997,7 +9997,7 @@ Default: none. - atm/cam/chem/trop_mozart/ub/EESC_1850-2100_c090603.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/ub/EESC_1850-2100_c090603.nc @@ -10046,7 +10046,7 @@ Default: none. - atm/cam/chem/trop_mozart/ub + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/ub @@ -10143,10 +10143,10 @@ Default: UNKNOWN. - atm/cam/chem/trop_mozart_aero/oxid - atm/waccm/halons - atm/cam/chem/methane - atm/waccm/halons + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/oxid + $DIN_LOC_ROOT/atm/waccm/halons + $DIN_LOC_ROOT/atm/cam/chem/methane + $DIN_LOC_ROOT/atm/waccm/halons @@ -10377,7 +10377,7 @@ cross sections. - atm/waccm/phot/effxstex.txt + $DIN_LOC_ROOT/atm/waccm/phot/effxstex.txt @@ -10388,7 +10388,7 @@ Full pathname of cross section dataset for long wavelengh photolysis - atm/waccm/phot/temp_prs_GT200nm_JPL10_c140624.nc + $DIN_LOC_ROOT/atm/waccm/phot/temp_prs_GT200nm_JPL10_c140624.nc @@ -10399,7 +10399,7 @@ Full pathname of cross section dataset for short wavelengh photolysis - atm/waccm/phot/xs_short_jpl10_c140303.nc + $DIN_LOC_ROOT/atm/waccm/phot/xs_short_jpl10_c140303.nc @@ -10527,8 +10527,8 @@ Default: UNKNOWN. - atm/waccm/ub - atm/waccm/waccm_forcing + $DIN_LOC_ROOT/atm/waccm/ub + $DIN_LOC_ROOT/atm/waccm/waccm_forcing @@ -10623,8 +10623,8 @@ Default: UNKNOWN. - atm/waccm/phot/xh2o_c080826.nc - atm/waccm/phot/xh2o_c080826.nc + $DIN_LOC_ROOT/atm/waccm/phot/xh2o_c080826.nc + $DIN_LOC_ROOT/atm/waccm/phot/xh2o_c080826.nc @@ -10675,7 +10675,7 @@ Filepath for qbo forcing dataset. - atm/waccm/qbo/qbocyclic28months.nc + $DIN_LOC_ROOT/atm/waccm/qbo/qbocyclic28months.nc @@ -10708,7 +10708,7 @@ Default: None. - atm/waccm/geomag/igrf_ceofs_c160412.nc + $DIN_LOC_ROOT/atm/waccm/geomag/igrf_ceofs_c160412.nc @@ -10832,32 +10832,6 @@ - - integer - se - dyn_se_inparm - 1,2,3,4,5,6 - - Number of water tracers active in condensate-loading terms in dynamical core - 1: water vapor only - 2: water vapor and cloud liquid - 3: water vapor, cloud liquid and cloud ice - 4: water vapor, cloud liquid, cloud ice and rain - 5: water vapor, cloud liquid, cloud ice, rain and snow - 6: water vapor, cloud liquid, cloud ice, rain, snow and graupel - Default: 3 for CAM4, CAM5; 5 for CAM6; 1 for Held_Suarez, Adiabatic and Kessler - - - 3 - 3 - 3 - 5 - 1 - 1 - 1 - 1 - - integer se @@ -10982,8 +10956,8 @@ none - atm/cam/coords/ne0np4CONUS.ne30x8.g - atm/cam/coords/ne0np4EQFACE.ne5x4.g + $DIN_LOC_ROOT/atm/cam/coords/ne0np4CONUS.ne30x8.g + $DIN_LOC_ROOT/atm/cam/coords/ne0np4EQFACE.ne5x4.g @@ -11030,21 +11004,6 @@ 7 - - logical - se - dyn_se_inparm - - If TRUE nsplit is dynamically adjusted (increased by 2 if vertical - velocity Courant number is larger than 0.85) and reset when vertical - velocity Courant number is less than 0.38 - - - .false. - .true. - .true. - - integer se @@ -11288,86 +11247,81 @@ 3 - - integer - se - dyn_se_inparm - 1,2 - - CAM-SE vertical remap algorithm - 1: PPM vertical remap with mirroring at the boundaries - (solid wall bc's, high-order throughout) - PCoM in sponge - 2: PPM vertical remap without mirroring at the boundaries - (no bc's enforced, first-order at two cells bordering top and bottom - boundaries) - - - 1 - - - - logical + + char*32 se dyn_se_inparm - Set .true. to allow writing SE dynamics fields to the restart file using the - unstructured grid format. This allows the restart file to be used as an - initial file, but its use as a restart file will introduce roundoff size - differences into the simulation. - - - .false. - - + CAM-SE vertical remapping of temperature: - - - real - se - dyn_se_inparm - - Nudging factor for prescribed winds in SE dycore - Units: 1/sec + "thermal_energy_over_P": Map cp*T (thermal energy conserving) using a pressure coordinate. + "Tv_over_logP" : Map virtual temperature using a log pressure coordinate. + + Default: "thermal_energy_over_P" - 2.0e-5 + thermal_energy_over_P - - real + + char*32 se dyn_se_inparm - Nudging factor for prescribed temperature in SE dycore - Units: 1/sec + CAM-SE vertical remap algorithm for u,v,T, and water species: + + "PPM_bc_mirror": PPM vertical remap with mirroring at the boundaries (solid wall boundary conditions, high-order throughout) + "PPM_bc_PCoM" : PPM vertical remap without mirroring at the boundaries (no boundary conditions enforced, first-order at two cells bordering top and bottom boundaries) + "PPM_bc_linear_extrapolation": PPM with linear extrapolation in ghost cells (code from A. Bradley, DOE) + + The following options use the FV3 vertical remapping algorithm: + + "FV3_PPM": Monotone PPM + "FV3_CS" : Monotonic cubic spline with 2*delta_z oscillations removed + "FV3_CS_2dz_filter": Selectively monotonic cubic spline, where local extrema are retained, with 2*delta_z oscillations removed + "FV3_non_monotone_CS_2dz_filter": Non-monotonic (linear) cubic spline with 2*delta_z oscillations removed; + + Default: "FV3_CS" - 2.0e-5 + FV3_CS - - real + + char*32 se dyn_se_inparm - Nudging factor for prescribed surface pressure in SE dycore - Units: 1/sec + CAM-SE vertical remap algorithm for non-water tracers: + + "PPM_bc_mirror": PPM vertical remap with mirroring at the boundaries (solid wall boundary conditions, high-order throughout) + "PPM_bc_PCoM" : PPM vertical remap without mirroring at the boundaries (no boundary conditions enforced, first-order at two cells bordering top and bottom boundaries) + "PPM_bc_linear_extrapolation": PPM with linear extrapolation in ghost cells (code from A. Bradley, DOE) + + The following options use the FV3 vertical remapping algorithm: + + "FV3_PPM": Monotone PPM + "FV3_CS" : Monotonic cubic spline with 2*delta_z oscillations removed + "FV3_non_monotone_CS_2dz_filter": Non-monotonic (linear) cubic spline with 2*delta_z oscillations removed; + + Default: "PPM_bc_linear_extrapolation" - 0.0 + PPM_bc_linear_extrapolation - - integer + + logical se dyn_se_inparm - 0,1 - Switch to turn on/off time evolution of dynamics nudging + Set .true. to allow writing SE dynamics fields to the restart file using the + unstructured grid format. This allows the restart file to be used as an + initial file, but its use as a restart file will introduce roundoff size + differences into the simulation. - 0 + .false. diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index de708a3e..ed948fdc 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -240,7 +240,6 @@ subroutine dyn_readnl(NLFileName) se_hypervis_scaling = 0 se_max_hypervis_courant = 1.0e99_r8 se_mesh_file = '' - se_npes = npes se_write_restart_unstruct = .false. ! Read the namelist (dyn_se_inparm) @@ -308,9 +307,14 @@ subroutine dyn_readnl(NLFileName) call MPI_bcast(se_raytau0, 1, mpi_real8, masterprocid, mpicom, ierr) call MPI_bcast(se_molecular_diff, 1, mpi_real8, masterprocid, mpicom, ierr) - ! Check that se_npes is a positive integer: - if (se_npes <= 0) then - call endrun('dyn_readnl: ERROR: se_npes must be > 0') + ! If se_npes is set to zero, then make it match host model: + if (se_npes == 0) then + se_npes = npes + else + ! Check that se_npes is a positive integer: + if (se_npes < 0) then + call endrun('dyn_readnl: ERROR: se_npes must be >= 0') + end if end if ! Initialize the SE structure that holds the MPI decomposition information From 42ea4f9618f44ae4c9d37b6e37adcf870b4be12a Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 19 Apr 2021 14:31:21 -0600 Subject: [PATCH 14/45] Apply code fixes for SE dycore initialization. --- cime_config/cam_config.py | 2 +- cime_config/namelist_definition_cam.xml | 22 +++- src/control/cam_comp.F90 | 9 +- src/control/cam_control_mod.F90 | 30 +++-- src/control/cam_initfiles.F90 | 24 +++- src/control/camsrfexch.F90 | 22 ++-- src/control/runtime_opts.F90 | 4 +- src/data/physconst.F90 | 117 +++++++++++++++--- src/dynamics/se/dycore/element_mod.F90 | 27 ++-- src/dynamics/se/dycore/global_norms_mod.F90 | 24 ++-- src/dynamics/se/dycore/prim_state_mod.F90 | 9 +- src/dynamics/se/dyn_comp.F90 | 8 +- src/dynamics/tests/inic_analytic.F90 | 50 +++++--- .../initial_conditions/ic_baro_dry_jw06.F90 | 19 ++- .../initial_conditions/ic_baroclinic.F90 | 39 +++++- .../initial_conditions/ic_held_suarez.F90 | 9 +- .../initial_conditions/ic_us_standard_atm.F90 | 22 +++- src/utils/cam_field_read.F90 | 4 +- src/utils/cam_pio_utils.F90 | 2 +- 19 files changed, 324 insertions(+), 119 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index f0b1c7cf..f34b6c4e 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -705,7 +705,7 @@ def __init__(self, case, case_log): self.add_cppdef("_MPI") self.add_cppdef("SPMD") - # Add OpenMP CCP definitions, if needed: + # Add OpenMP CPP definitions, if needed: if nthrds > 1: self.add_cppdef("_OPENMP") diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index e369513b..7ffb1c88 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -10901,10 +10901,20 @@ Number of hyperviscosity subcycles per dynamics timestep. - 2 + 3 4 - 6 - 8 + + + + integer + se + dyn_se_inparm + + Number of hyperviscosity subcycles per dynamics timestep in sponge del2 diffusion. + + + 1 + 4 @@ -11067,8 +11077,10 @@ Second-order viscosity applied only near the model top [m^2/s]. - 1.25e5 - 1.0e6 + 5.0e5 + 1.0e6 + + 0.0 2.0e5 diff --git a/src/control/cam_comp.F90 b/src/control/cam_comp.F90 index 36c9ed53..8f4be12c 100644 --- a/src/control/cam_comp.F90 +++ b/src/control/cam_comp.F90 @@ -15,7 +15,7 @@ module cam_comp use shr_sys_mod, only: shr_sys_flush use spmd_utils, only: masterproc, mpicom - use cam_control_mod, only: cam_ctrl_init, cam_ctrl_set_orbit + use cam_control_mod, only: cam_ctrl_init, cam_ctrl_set_orbit, cam_ctrl_set_physics_type use cam_control_mod, only: caseid, ctitle use runtime_opts, only: read_namelist use time_manager, only: timemgr_init, get_step_size @@ -78,6 +78,7 @@ subroutine cam_init(caseid, ctitle, model_doi_url, & use cam_instance, only: inst_suffix ! use history_defaults, only: initialize_iop_history use stepon, only: stepon_init + use physconst, only: composition_init ! Arguments character(len=cl), intent(in) :: caseid ! case ID @@ -145,12 +146,18 @@ subroutine cam_init(caseid, ctitle, model_doi_url, & filein = "atm_in" // trim(inst_suffix) call read_namelist(filein, single_column, scmlat, scmlon) + ! Determine if physics is "simple", which needs to be known by some dycores: + call cam_ctrl_set_physics_type() + ! Open initial or restart file, and topo file if specified. call cam_initfiles_open() ! Initialize model grids and decompositions call model_grid_init() + ! Initialize composition-dependent constants: + call composition_init() + ! Initialize ghg surface values before default initial distributions ! are set in dyn_init !!XXgoldyXX: This needs to be converted to CCPP and the issue of diff --git a/src/control/cam_control_mod.F90 b/src/control/cam_control_mod.F90 index 49c26f16..91254667 100644 --- a/src/control/cam_control_mod.F90 +++ b/src/control/cam_control_mod.F90 @@ -118,23 +118,35 @@ end subroutine cam_ctrl_set_orbit !--------------------------------------------------------------------------- - subroutine cam_ctrl_set_physics_type(phys_package) - ! Dummy argument - character(len=*), intent(in) :: phys_package - ! Local variable + subroutine cam_ctrl_set_physics_type() + + use shr_kind_mod, only: SHR_KIND_CS + use cam_ccpp_cap, only: ccpp_physics_suite_list + + ! Local variables: + + ! suite_names: List of CCPP suites + character(len=SHR_KIND_CS), allocatable :: suite_names(:) + ! suite_name: CCPP suite we are running + character(len=SHR_KIND_CS) :: suite_name + character(len=*), parameter :: subname = 'cam_ctrl_set_physics_type' - adiabatic = trim(phys_package) == 'adiabatic' - ideal_phys = trim(phys_package) == 'held_suarez' - kessler_phys = trim(phys_package) == 'kessler' - tj2016_phys = trim(phys_package) == 'tj2016' + !Determine CCPP physics suite names: + call ccpp_physics_suite_list(suite_names) + suite_name = suite_names(1) + + adiabatic = trim(suite_name) == 'adiabatic' + ideal_phys = trim(suite_name) == 'held_suarez' + kessler_phys = trim(suite_name) == 'kessler_cam' + tj2016_phys = trim(suite_name) == 'tj2016' simple_phys = adiabatic .or. ideal_phys .or. kessler_phys .or. tj2016_phys moist_physics = .not. (adiabatic .or. ideal_phys) if ((.not. moist_physics) .and. aqua_planet) then - call endrun (subname//': FATAL: AQUA_PLANET not compatible with dry physics package, ('//trim(phys_package)//')') + call endrun (subname//': FATAL: AQUA_PLANET not compatible with dry physics package, ('//trim(suite_name)//')') end if if (masterproc) then diff --git a/src/control/cam_initfiles.F90 b/src/control/cam_initfiles.F90 index 28ed5311..a9292214 100644 --- a/src/control/cam_initfiles.F90 +++ b/src/control/cam_initfiles.F90 @@ -208,19 +208,30 @@ end subroutine cam_initfiles_readnl subroutine cam_initfiles_open() use ioFileMod, only: cam_get_file + use string_utils, only: to_str ! Open the initial conditions and topography files. ! ncdata_loc: filepath of initial file on local disk - character(len=256) :: ncdata_loc + character(len=cl) :: ncdata_loc ! bnd_topo_loc: filepath of topo file on local disk - character(len=256) :: bnd_topo_loc + character(len=cl) :: bnd_topo_loc + + integer :: iret + + character(len=*), parameter :: subname = 'cam_initfiles_open' + !----------------------------------------------------------------------- ! Open initial dataset if (initial_run) then call cam_get_file(ncdata, ncdata_loc) - allocate(fh_ini) + allocate(fh_ini, stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fh_ini failed with stat: '//& + to_str(iret)) + end if + call cam_pio_openfile(fh_ini, ncdata_loc, pio_nowrite) else fh_ini => fh_restart @@ -229,7 +240,12 @@ subroutine cam_initfiles_open() ! Open topography dataset if used. if (trim(bnd_topo) /= trim(unset_path_str)) then if ((trim(bnd_topo) /= 'bnd_topo') .and. (len_trim(bnd_topo) > 0)) then - allocate(fh_topo) + allocate(fh_topo, stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate fh_topo failed with stat: '//& + to_str(iret)) + end if + call cam_get_file(bnd_topo, bnd_topo_loc) call cam_pio_openfile(fh_topo, bnd_topo_loc, pio_nowrite) else diff --git a/src/control/camsrfexch.F90 b/src/control/camsrfexch.F90 index b6e2c56c..d1108f03 100644 --- a/src/control/camsrfexch.F90 +++ b/src/control/camsrfexch.F90 @@ -9,6 +9,7 @@ module camsrfexch use constituents, only: pcnst use shr_infnan_mod, only: posinf => shr_infnan_posinf, assignment(=) use cam_abortutils, only: endrun + use string_utils, only: to_str use cam_logfile, only: iulog use physics_grid, only: phys_grid_initialized use srf_field_check, only: active_Sl_ram1, active_Sl_fv, active_Sl_soilw @@ -60,20 +61,20 @@ subroutine hub2atm_alloc(cam_in) ! LOCAL VARIABLES: integer :: ierror ! Error code - character(len=*), parameter :: sub = 'hub2atm_alloc' + character(len=*), parameter :: subname = 'hub2atm_alloc' !----------------------------------------------------------------------- if ( .not. phys_grid_initialized ) then - call endrun(sub//": phys_grid_init not called yet") + call endrun(subname//": phys_grid_init not called yet") end if if (associated(cam_in)) then deallocate(cam_in) nullify(cam_in) end if - allocate (cam_in, stat=ierror) + allocate(cam_in, stat=ierror) if ( ierror /= 0 )then - write(iulog,*) sub//': Allocation error: ', ierror - call endrun(sub//': allocation error') + call endrun(subname//': allocate cam_in failed with stat: '//& + to_str(ierror)) end if cam_in%ncol = 0 @@ -92,20 +93,21 @@ subroutine atm2hub_alloc(cam_out) ! LOCAL VARIABLES: integer :: ierror ! Error code - character(len=*), parameter :: sub = 'atm2hub_alloc' + character(len=*), parameter :: subname = 'atm2hub_alloc' !----------------------------------------------------------------------- if (.not. phys_grid_initialized) then - call endrun(sub//": phys_grid_init not called yet") + call endrun(subname//": phys_grid_init not called yet") end if if (associated(cam_out)) then deallocate(cam_out) nullify(cam_out) end if - allocate (cam_out, stat=ierror) + allocate(cam_out, stat=ierror) if ( ierror /= 0 )then - write(iulog,*) sub//': Allocation error: ', ierror - call endrun(sub//': allocation error: cam_out') + call endrun(subname//': allocate cam_out failed with stat: '//& + to_str(ierror)) + end if end subroutine atm2hub_alloc diff --git a/src/control/runtime_opts.F90 b/src/control/runtime_opts.F90 index d29360e8..a7139e78 100644 --- a/src/control/runtime_opts.F90 +++ b/src/control/runtime_opts.F90 @@ -38,7 +38,7 @@ subroutine read_namelist(nlfilename, single_column, scmlat, scmlon) ! use phys_debug_util, only: phys_debug_readnl ! use cam_diagnostics, only: diag_readnl -! use inic_analytic_utils, only: analytic_ic_readnl + use inic_analytic_utils, only: analytic_ic_readnl ! use tracers, only: tracers_readnl ! use nudging, only: nudging_readnl @@ -82,7 +82,7 @@ subroutine read_namelist(nlfilename, single_column, scmlat, scmlon) ! call phys_debug_readnl(nlfilename) ! call diag_readnl(nlfilename) ! call check_energy_readnl(nlfilename) -! call analytic_ic_readnl(nlfilename) + call analytic_ic_readnl(nlfilename) ! call scam_readnl(nlfilename, single_column, scmlat, scmlon) ! call nudging_readnl(nlfilename) diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 2d44900b..4022228f 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -22,6 +22,7 @@ module physconst use vert_coord, only: pver, pverp use physics_grid, only: pcols => columns_on_task use cam_abortutils, only: endrun + use string_utils, only: to_str use constituents, only: pcnst implicit none @@ -451,31 +452,44 @@ subroutine physconst_init(pcols, pver, pverp) !------------------------------------------------------------------------ allocate(cpairv(pcols,pver), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate cpairv failed') + call endrun(subname//': allocate cpairv(pcols,pver) failed with stat: '//& + to_str(ierr)) end if + allocate(rairv(pcols,pver), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate rairv failed') + call endrun(subname//': allocate rairv(pcols,pver) failed with stat: '//& + to_str(ierr)) end if + allocate(cappav(pcols,pver), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate cappav failed') + call endrun(subname//': allocate cappav(pcols,pver) failed with stat: '//& + to_str(ierr)) end if + allocate(mbarv(pcols,pver), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate mbarv failed') + call endrun(subname//': allocate mbarv(pcols,pver) failed with stat: '//& + to_str(ierr)) end if + allocate(zvirv(pcols,pver), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate zvirv failed') + call endrun(subname//': allocate zvirv(pcols,pver) failed with stat: '//& + to_str(ierr)) end if + allocate(kmvis(pcols,pverp), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate kmvis failed') + call endrun(subname//': allocate kmvis(pcols,pverp) failed with stat: '//& + to_str(ierr)) end if + allocate(kmcnd(pcols,pverp), stat=ierr) if (ierr /= 0) then - call endrun(subname//': allocate kmcnd failed') + call endrun(subname//': allocate kmcnd(pcols,pverp) failed with stat: '//& + to_str(ierr)) end if !------------------------------------------------------------------------ @@ -528,6 +542,7 @@ subroutine composition_init() real(kind_phys) :: mw, dof1, dof2, dof3 integer :: icnst,ix,i + integer :: iret ! standard dry air (constant composition) o2_mwi = 1._kind_phys/32._kind_phys @@ -539,16 +554,55 @@ subroutine composition_init() ! init for variable composition dry air i = dry_air_species_num+water_species_in_air_num - allocate(thermodynamic_active_species_idx(i)) - allocate(thermodynamic_active_species_idx_dycore(i)) - allocate(thermodynamic_active_species_cp(0:i)) - allocate(thermodynamic_active_species_cv(0:i)) - allocate(thermodynamic_active_species_R(0:i)) + allocate(thermodynamic_active_species_idx(i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_idx(i)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(thermodynamic_active_species_idx_dycore(i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_idx_dycore(i)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(thermodynamic_active_species_cp(0:i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_cp(0:i)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(thermodynamic_active_species_cv(0:i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_cv(0:i)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(thermodynamic_active_species_R(0:i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_R(0:i)'//& + ' failed with stat: '//to_str(iret)) + end if i = dry_air_species_num - allocate(thermodynamic_active_species_mwi(i)) - allocate(thermodynamic_active_species_kv(i)) - allocate(thermodynamic_active_species_kc(i)) + allocate(thermodynamic_active_species_mwi(i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_mwi(i)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(thermodynamic_active_species_kv(i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_kv(i)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(thermodynamic_active_species_kc(i), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate thermodynamic_active_species_kc(i)'//& + ' failed with stat: '//to_str(iret)) + end if + thermodynamic_active_species_idx = -999 thermodynamic_active_species_idx_dycore = -999 thermodynamic_active_species_cp = 0.0_kind_phys @@ -1772,11 +1826,14 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ ! local vars integer :: i,j,k + integer :: iret real(r8), dimension(i0:i1,j0:j1,1:k1) :: pmid real(r8):: pint(i0:i1,j0:j1,1:k1+1) real(r8), allocatable :: R_dry(:,:,:) integer, dimension(thermodynamic_active_species_num):: idx_local + character(len=*), parameter :: subname = 'get_rho_dry' + if (present(active_species_idx_dycore)) then idx_local = active_species_idx_dycore else @@ -1789,7 +1846,12 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ if (present(pint_out)) pint_out=pint if (present(pint_out)) pmid_out=pmid if (present(rhoi_dry)) then - allocate(R_dry(i0:i1,j0:j1,1:k1+1)) + allocate(R_dry(i0:i1,j0:j1,1:k1+1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate R_dry(i0:i1,j0:j1,1:k1+1)'//& + ' failed with stat: '//to_str(iret)) + end if + if (tracer_mass) then call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) else @@ -1808,7 +1870,12 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ deallocate(R_dry) end if if (present(rho_dry)) then - allocate(R_dry(i0:i1,j0:j1,1:k1)) + allocate(R_dry(i0:i1,j0:j1,1:k1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate R_dry(i0:i1,j0:j1,1:k1)'//& + ' failed with stat: '//to_str(iret)) + end if + if (tracer_mass) then call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) else @@ -1897,13 +1964,25 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio ! real(r8), allocatable, dimension(:,:,:) :: cp_dry,R_dry + integer :: iret + character(len=*), parameter :: subname = 'get_kappa_dry' ! ! dry air not species dependent if (dry_air_species_num==0) then kappa_dry= rair/cpair else - allocate(R_dry(i0:i1,j0:j1,k0:k1)) - allocate(cp_dry(i0:i1,j0:j1,k0:k1)) + allocate(R_dry(i0:i1,j0:j1,k0:k1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate R_dry(i0:i1,j0:j1,k0:k1)'//& + ' failed with stat: '//to_str(iret)) + end if + + allocate(cp_dry(i0:i1,j0:j1,k0:k1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate cp_dry(i0:i1,j0:j1,k0:k1)'//& + ' failed with stat: '//to_str(iret)) + end if + if (present(fact)) then call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry,fact=fact) call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry,fact=fact) diff --git a/src/dynamics/se/dycore/element_mod.F90 b/src/dynamics/se/dycore/element_mod.F90 index c72f417d..a9fc8588 100644 --- a/src/dynamics/se/dycore/element_mod.F90 +++ b/src/dynamics/se/dycore/element_mod.F90 @@ -366,55 +366,55 @@ subroutine allocate_element_desc(elem) num = SIZE(elem) do j=1,num - allocate(elem(j)%desc%putmapP(max_neigh_edges)) + allocate(elem(j)%desc%putmapP(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%putmapP(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%getmapP(max_neigh_edges)) + allocate(elem(j)%desc%getmapP(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%getmapP(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%putmapP_ghost(max_neigh_edges)) + allocate(elem(j)%desc%putmapP_ghost(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%putmapP_ghost(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%getmapP_ghost(max_neigh_edges)) + allocate(elem(j)%desc%getmapP_ghost(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%getmapP_ghost(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%putmapS(max_neigh_edges)) + allocate(elem(j)%desc%putmapS(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%putmapS(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%getmapS(max_neigh_edges)) + allocate(elem(j)%desc%getmapS(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%getmapS(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%reverse(max_neigh_edges)) + allocate(elem(j)%desc%reverse(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%reverse(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%globalID(max_neigh_edges)) + allocate(elem(j)%desc%globalID(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%globalID(max_neigh_edges) failed with stat: '//& to_str(iret)) end if - allocate(elem(j)%desc%loc2buf(max_neigh_edges)) + allocate(elem(j)%desc%loc2buf(max_neigh_edges), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%desc%loc2buf(max_neigh_edges) failed with stat: '//& to_str(iret)) @@ -555,20 +555,13 @@ subroutine allocate_element_dims(elem) end if ! vertical velocity - allocate(elem(i)%derived%phi(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%phi(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if - - ! relative vorticity allocate(elem(i)%derived%omega(np,np,nlev), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%derived%omega(np,np,nlev) failed with stat: '//& to_str(iret)) end if - ! divergence + ! relative vorticity allocate(elem(i)%derived%zeta(np,np,nlev), stat=iret) if (iret /= 0) then call endrun(subname//': allocate elem%derived%zeta(np,np,nlev) failed with stat: '//& diff --git a/src/dynamics/se/dycore/global_norms_mod.F90 b/src/dynamics/se/dycore/global_norms_mod.F90 index 8f55f639..c5f4571c 100644 --- a/src/dynamics/se/dycore/global_norms_mod.F90 +++ b/src/dynamics/se/dycore/global_norms_mod.F90 @@ -40,7 +40,7 @@ subroutine global_integrals(elem, h,hybrid,npts,num_flds,nets,nete,I_sphere) type (hybrid_t) , intent(in) :: hybrid real (kind=r8) :: I_sphere(num_flds) - + real (kind=r8) :: I_priv real (kind=r8) :: I_shared common /gblintcom/I_shared @@ -211,7 +211,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& use dimensions_mod, only: nu_scale_top,nu_div_lev,nu_lev use quadrature_mod, only: gausslobatto, quadrature_t - + use reduction_mod, only: ParallelMin,ParallelMax use physconst, only: ra, rearth, pi use control_mod, only: nu, nu_div, nu_q, nu_p, nu_s, nu_top, fine_ne, rk_stage_user, max_hypervis_courant @@ -224,7 +224,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& use mesh_mod, only: MeshUseMeshFile use dimensions_mod, only: ksponge_end, kmvis_ref, kmcnd_ref,rho_ref use physconst, only: cpair - + type(element_t) , intent(inout) :: elem(:) integer , intent(in) :: nets,nete type (hybrid_t) , intent(in) :: hybrid @@ -235,7 +235,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& real (kind=r8), intent(in) :: dt_remap_actual,dt_tracer_fvm_actual,dt_tracer_se_actual,& dt_dyn_actual,dt_dyn_visco_actual,dt_dyn_del2_actual, & dt_tracer_visco_actual, dt_phys - + ! Element statisics real (kind=r8) :: max_min_dx,min_min_dx,min_max_dx,max_unif_dx ! used for normalizing scalar HV real (kind=r8) :: max_normDinv, min_normDinv ! used for CFL @@ -259,7 +259,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& real(kind=r8) :: h(np,np,nets:nete) - + ! Eigenvalues calculated by folks at UMich (Paul U & Jared W) select case (np) case (2) @@ -623,7 +623,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& umax = 400.0_r8 end if ugw = 342.0_r8 !max gravity wave speed - + dt_max_adv = S_rk/(umax*max_normDinv*lambda_max*ra) dt_max_gw = S_rk/(ugw*max_normDinv*lambda_max*ra) dt_max_tracer_se = S_rk_tracer*min_gw/(umax*max_normDinv*ra) @@ -642,8 +642,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& max_laplace = MAX(MAXVAL(nu_scale_top(:))*nu_top,MAXVAL(kmvis_ref(:)/rho_ref(:))) max_laplace = MAX(max_laplace,MAXVAL(kmcnd_ref(:)/(cpair*rho_ref(:)))) dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) - - if (hybrid%masterthread) then + + if (hybrid%masterthread) then write(iulog,'(a,f10.2,a)') ' ' write(iulog,'(a,f10.2,a)') 'Estimates for maximum stable and actual time-steps for different aspects of algorithm:' write(iulog,'(a,f12.8,a)') '(assume max wind is ',umax,'m/s)' @@ -652,7 +652,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_dyn (time-stepping dycore ; u,v,T,dM) < ',& MIN(dt_max_adv,dt_max_gw),'s ',dt_dyn_actual,'s' if (dt_dyn_actual>MIN(dt_max_adv,dt_max_gw)) write(iulog,*) 'WARNING: dt_dyn theoretically unstable' - + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_dyn_vis (hyperviscosity) ; u,v,T,dM) < ',dt_max_hypervis,& 's ',dt_dyn_visco_actual,'s' if (dt_dyn_visco_actual>dt_max_hypervis) write(iulog,*) 'WARNING: dt_dyn_vis theoretically unstable' @@ -662,7 +662,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_vis (hyperviscosity tracers; q ) < ',dt_max_hypervis_tracer,'s',& dt_tracer_visco_actual,'s' if (dt_tracer_visco_actual>dt_max_hypervis_tracer) write(iulog,*) 'WARNING: dt_tracer_hypervis theoretically unstable' - + if (ntrac>0) then write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_fvm (time-stepping tracers ; q ) < ',dt_max_tracer_fvm,& 's ',dt_tracer_fvm_actual @@ -687,8 +687,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,*) 'tstep_type = ',tstep_type end if end subroutine print_cfl - - ! + + ! ! ============================ ! global_maximum: ! diff --git a/src/dynamics/se/dycore/prim_state_mod.F90 b/src/dynamics/se/dycore/prim_state_mod.F90 index 460048da..4ff733f0 100644 --- a/src/dynamics/se/dycore/prim_state_mod.F90 +++ b/src/dynamics/se/dycore/prim_state_mod.F90 @@ -257,6 +257,7 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) end if end do + !JMD This is a Thread Safe Reduction do k = 1, nm2+2+statediag_numtrac if (k==1) call t_startf('parallelMin') @@ -359,14 +360,14 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) write(iulog,100) varname(k),min_p(k),max_p(k) end do end if - + 100 format (A12,4(E23.15)) 101 format (A12,A23,A23,A23,A23) #ifdef waccm_debug call prim_printstate_cslam_gamma(elem, tl,hybrid,nets,nete, fvm) #endif - call prim_printstate_U(elem, tl,hybrid,nets,nete, fvm) + call prim_printstate_U(elem, tl,hybrid,nets,nete, fvm) end subroutine prim_printstate @@ -511,7 +512,7 @@ subroutine prim_printstate_U(elem, tl,hybrid,nets,nete, fvm) integer :: k,ie real (kind=r8), dimension(nets:nete,nlev) :: max_local - real (kind=r8), dimension(nets:nete,nlev) :: min_local + real (kind=r8), dimension(nets:nete,nlev) :: min_local real (kind=r8), dimension(nlev) :: max_p real (kind=r8), dimension(nlev) :: min_p integer :: n0, n0_qdp, q, nm, nm2 @@ -535,7 +536,7 @@ subroutine prim_printstate_U(elem, tl,hybrid,nets,nete, fvm) !JMD This is a Thread Safe Reduction do k = 1, nlev max_p(k) = Parallelmax(max_local(:,k),hybrid) - min_p(k) = Parallelmin(min_local(:,k),hybrid) + min_p(k) = Parallelmin(min_local(:,k),hybrid) end do if (hybrid%masterthread) then write(iulog,*) ' ' diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index ed948fdc..75779414 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -1422,7 +1422,6 @@ subroutine read_inidat(dyn_in) end do deallocate(dbuf4) - else ! Read ICs from file. Assume all fields in the initial file are on the GLL grid. @@ -1633,12 +1632,13 @@ subroutine read_inidat(dyn_in) end do end do ! pcnst -!Un-comment once constituents are enabled in CAMDEN -JN: -#endif ! Cleanup deallocate(dbuf3) +!Un-comment once constituents are enabled in CAMDEN -JN: +#endif + ! Put the error handling back the way it was call pio_seterrorhandling(fh_ini, pio_errtype) @@ -1801,6 +1801,7 @@ subroutine read_inidat(dyn_in) ! scale PS to achieve prescribed dry mass following FV dycore (dryairm.F90) #ifndef planet_mars +#if 0 if (runtype == 0) then initial_global_ave_dry_ps = 98288.0_r8 if (.not. associated(fh_topo)) then @@ -1813,6 +1814,7 @@ subroutine read_inidat(dyn_in) call prim_set_dry_mass(elem, hvcoord, initial_global_ave_dry_ps, qtmp) end if endif +#endif #endif ! store Q values: ! diff --git a/src/dynamics/tests/inic_analytic.F90 b/src/dynamics/tests/inic_analytic.F90 index bf19dff6..0d60e0bc 100644 --- a/src/dynamics/tests/inic_analytic.F90 +++ b/src/dynamics/tests/inic_analytic.F90 @@ -9,6 +9,7 @@ module inic_analytic use cam_logfile, only: iulog use shr_kind_mod, only: r8 => shr_kind_r8 use cam_abortutils, only: endrun + use string_utils, only: to_str use shr_sys_mod, only: shr_sys_flush use inic_analytic_utils, only: analytic_ic_active, analytic_ic_type @@ -77,10 +78,16 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & integer :: rndm_seed_sz integer :: i, k integer :: ncol, nlev + integer :: iret character(len=*), parameter :: subname = 'DYN_SET_INIC_COL' #ifdef ANALYTIC_IC - allocate(mask_use(size(latvals))) + allocate(mask_use(size(latvals)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then if (size(mask_use) /= size(mask)) then call endrun('cnst_init_default: input, mask, is wrong size') @@ -186,7 +193,11 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & pertlim,' to initial temperature field' end if call random_seed(size=rndm_seed_sz) - allocate(rndm_seed(rndm_seed_sz)) + allocate(rndm_seed(rndm_seed_sz), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate rndm_seed(rndm_seed_sz) failed with stat: '//& + to_str(iret)) + end if ncol = size(T, 1) nlev = size(T, 2) @@ -242,6 +253,7 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & integer :: i, bbeg, bend integer :: size1, size2, size3 integer :: nblks, blksize + integer :: iret logical :: verbose character(len=4) :: mname character(len=*), parameter :: subname = 'DYN_SET_INIC_CBLOCK' @@ -299,7 +311,7 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), V=V(:,:,i), mask=mask(bbeg:bend), verbose=verbose) end if - if (present(PS).and.present(PHIS_IN).and.present(T)) then + if (present(PS).and.present(PHIS_IN).and.present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), PS=PS(:,i), PHIS_IN=PHIS_IN(:,i), T=T(:,:,i), & mask=mask(bbeg:bend), verbose=verbose) @@ -307,7 +319,7 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & if (present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), T=T(:,:,i), mask=mask(bbeg:bend), verbose=verbose) - end if + end if if (present(PHIS_OUT)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), mask=mask(bbeg:bend), verbose=verbose) @@ -334,9 +346,9 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & if (present(PS).and.present(PHIS_IN).and.present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), PHIS_IN=PHIS_IN(:,i),PS=PS(:,i),T=T(:,:,i), & - verbose=verbose) + verbose=verbose) else - if (present(T)) then + if (present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), T=T(:,:,i), verbose=verbose) end if @@ -383,7 +395,7 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & if (present(PS).and.present(PHIS_IN).and.present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), PHIS_IN=PHIS_IN(:,i),PS=PS(:,i),T=T(:,i,:), & - mask=mask(bbeg:bend), verbose=verbose) + mask=mask(bbeg:bend), verbose=verbose) else if (present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & @@ -415,7 +427,7 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & if (present(PS).and.present(PHIS_IN).and.present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & glob_ind(bbeg:bend), PHIS_IN=PHIS_IN(:,i),PS=PS(:,i),T=T(:,i,:), & - verbose=verbose) + verbose=verbose) else if (present(T)) then call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & @@ -444,7 +456,12 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & call endrun(subname//': there must be a global index for every column') end if nblks = size2 - allocate(lat_use(size(lonvals))) + allocate(lat_use(size(lonvals)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate lat_use(size(lonvals)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then call endrun(subname//': mask not supported for lon/lat') else @@ -463,7 +480,7 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & end if if (present(PS).and.present(PHIS_IN).and.present(T)) then call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & - PS=PS(:,i),T=T(:,i,:),PHIS_IN=PHIS_IN(:,i), verbose=verbose) + PS=PS(:,i),T=T(:,i,:),PHIS_IN=PHIS_IN(:,i), verbose=verbose) else if (present(T)) then call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & @@ -492,7 +509,12 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & end if ! Case: lon,lev,lat nblks = size3 - allocate(lat_use(size(lonvals))) + allocate(lat_use(size(lonvals)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate lat_use(size(lonvals)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then call endrun(subname//': mask not supported for lon/lat') else @@ -511,15 +533,15 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & end if if (present(PS).and.present(PHIS_IN).and.present(T)) then call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & - T=T(:,:,i),PS=PS(:,i), PHIS_IN=PHIS_IN(:,i), verbose=verbose) + T=T(:,:,i),PS=PS(:,i), PHIS_IN=PHIS_IN(:,i), verbose=verbose) else if (present(T)) then call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & T=T(:,:,i), verbose=verbose) - end if + end if if (present(PS)) then call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & - PS=PS(:,i), verbose=verbose) + PS=PS(:,i), verbose=verbose) end if if (present(PHIS_OUT)) then call dyn_set_inic_col(vcoord,lat_use, lonvals, glob_ind(bbeg:bend), & diff --git a/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 b/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 index a8f4a282..48183a0a 100644 --- a/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 +++ b/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 @@ -3,11 +3,11 @@ module ic_baro_dry_jw06 ! ! Purpose: Set idealized initial conditions for the Jablonowski and ! Williamson baroclinic instability test. - ! References: - ! Jablonowski, C., and D. L. Williamson (2006), A Baroclinic Instability Test Case for + ! References: + ! Jablonowski, C., and D. L. Williamson (2006), A Baroclinic Instability Test Case for ! Atmospheric Model Dynamical Cores, Quart. J. Roy. Met. Soc., Vol. 132, 2943-2975 - ! Jablonowski, C., and D. L. Williamson (2006), A Baroclinic Wave Test Case for Dynamical - ! Cores of General Circulation Models: Model Intercomparisons, + ! Jablonowski, C., and D. L. Williamson (2006), A Baroclinic Wave Test Case for Dynamical + ! Cores of General Circulation Models: Model Intercomparisons, ! NCAR Technical Note NCAR/TN-469+STR, Boulder, CO, 89 pp. ! !----------------------------------------------------------------------- @@ -54,8 +54,9 @@ subroutine bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U, V, T, PS, PHIS, & !use constituents, only: cnst_name !use const_init, only: cnst_init_default + use string_utils, only: to_str !Remove once constituents are enabled -JN - use physics_types, only : ix_cld_liq, ix_rain + use physics_types, only: ix_cld_liq, ix_rain !----------------------------------------------------------------------- ! @@ -85,6 +86,7 @@ subroutine bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U, V, T, PS, PHIS, & integer :: ncol integer :: nlev integer :: ncnst + integer :: iret character(len=*), parameter :: subname = 'BC_DRY_JW06_SET_IC' real(r8) :: tmp real(r8) :: r(size(latvals)) @@ -97,7 +99,12 @@ subroutine bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U, V, T, PS, PHIS, & a_omega = rearth*omega exponent = rair*gamma/gravit - allocate(mask_use(size(latvals))) + allocate(mask_use(size(latvals)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then if (size(mask_use) /= size(mask)) then call endrun(subname//': input, mask, is wrong size') diff --git a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 index e8b9c4c9..4bc65860 100644 --- a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 +++ b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 @@ -82,6 +82,7 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & !use constituents, only: cnst_name !use const_init, only: cnst_init_default use inic_analytic_utils, only: analytic_ic_is_moist + use string_utils, only: to_str !----------------------------------------------------------------------- ! @@ -111,6 +112,7 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & integer :: ncol integer :: nlev integer :: ncnst + integer :: iret character(len=*), parameter :: subname = 'BC_WAV_SET_IC' real(r8) :: ztop,ptop real(r8) :: uk,vk,Tvk,qk,pk !mid-level state @@ -139,7 +141,12 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & call endrun(subname//' ERROR: vcoord value out of range') end if - allocate(mask_use(size(latvals))) + allocate(mask_use(size(latvals)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then if (size(mask_use) /= size(mask)) then call endrun(subname//': input, mask, is wrong size') @@ -225,14 +232,34 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & nlev = size(Q, 2) ! check whether first constituent in Q is water vapor. cnst1_is_moisture = m_cnst(1) == 1 - allocate(zlocal(size(Q, 1),nlev)) + allocate(zlocal(size(Q, 1),nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate zlocal(size(Q, 1),nlev) failed with stat: '//& + to_str(iret)) + end if + end if allocate(zk(nlev)) if ((lq.or.lt) .and. (vcoord == vc_dry_pressure)) then - allocate(pdry_half(nlev+1)) - allocate(pwet_half(nlev+1)) - allocate(zdry_half(nlev+1)) + allocate(pdry_half(nlev+1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate pdry_half(nlev+1) failed with stat: '//& + to_str(iret)) + end if + + allocate(pwet_half(nlev+1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate pwet_half(nlev+1) failed with stat: '//& + to_str(iret)) + end if + + allocate(zdry_half(nlev+1), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate zdry_half(nlev+1) failed with stat: '//& + to_str(iret)) + end if + end if do i=1,ncol if (mask_use(i)) then @@ -261,7 +288,7 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & zlocal(i,1:nlev) = zk(:) end if end if - + do k=1,nlev ! diff --git a/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 b/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 index 2171925b..d25fc5a0 100644 --- a/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 +++ b/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 @@ -26,6 +26,7 @@ subroutine hs94_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & Q, m_cnst, mask, verbose) !use const_init, only: cnst_init_default !use constituents, only: cnst_name + use string_utils, only: to_str use physics_types, only: ix_cld_liq, ix_rain !Remove once constituents are enabled -JN !----------------------------------------------------------------------- @@ -54,9 +55,15 @@ subroutine hs94_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & integer :: ncol integer :: nlev integer :: ncnst + integer :: iret character(len=*), parameter :: subname = 'HS94_SET_IC' - allocate(mask_use(size(latvals))) + allocate(mask_use(size(latvals)), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then if (size(mask_use) /= size(mask)) then call endrun('cnst_init_default: input, mask, is wrong size') diff --git a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 index 760d3537..43198c04 100644 --- a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 +++ b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 @@ -19,6 +19,7 @@ module ic_us_standard_atmosphere use cam_logfile, only: iulog use cam_abortutils, only: endrun +use string_utils, only: to_str implicit none private @@ -60,13 +61,19 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & integer :: ncol integer :: nlev integer :: ncnst + integer :: iret character(len=*), parameter :: subname = 'us_std_atm_set_ic' real(r8) :: psurf(1) real(r8), allocatable :: pmid(:), zmid(:) !---------------------------------------------------------------------------- ncol = size(latvals, 1) - allocate(mask_use(ncol)) + allocate(mask_use(ncol), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate mask_use(ncol)) failed with stat: '//& + to_str(iret)) + end if + if (present(mask)) then if (size(mask_use) /= size(mask)) then call endrun(subname//': input, mask, is wrong size') @@ -112,7 +119,18 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & call endrun(subname//': PHIS must be specified to initiallize T') end if nlev = size(T, 2) - allocate(pmid(nlev), zmid(nlev)) + allocate(pmid(nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate pmid(nlev) failed with stat: '//& + to_str(iret)) + end if + + allocate(zmid(nlev), stat=iret) + if (iret /= 0) then + call endrun(subname//': allocate zmid(nlev) failed with stat: '//& + to_str(iret)) + end if + do i = 1, ncol if (mask_use(i)) then ! get surface pressure diff --git a/src/utils/cam_field_read.F90 b/src/utils/cam_field_read.F90 index a07084cf..af8dcdde 100644 --- a/src/utils/cam_field_read.F90 +++ b/src/utils/cam_field_read.F90 @@ -392,7 +392,7 @@ subroutine infld_real8_2d(varname, ncid, field, readvar, gridname, & ! ! infld_real8_2d: ! Netcdf I/O of 8-byte real field from netCDF file - ! Field on file is either 2D or 3D + ! Field on file must be 1D, 2D, or 3D ! Local array, is 2D ! @@ -548,7 +548,7 @@ subroutine infld_real8_2d(varname, ncid, field, readvar, gridname, & else if (ndims < target_ndims) then call safe_endrun(subname//': too few dimensions for '//trim(varname)) end if ! No else, things are okay - call print_input_field_info(dimlens, pdims, 2, 3, dim_bounds, 2, & + call print_input_field_info(dimlens, pdims, 1, 3, dim_bounds, 2, & varname, subname) ! ! Get array dimension id's and sizes diff --git a/src/utils/cam_pio_utils.F90 b/src/utils/cam_pio_utils.F90 index 48f2dcf7..a02b0b2d 100644 --- a/src/utils/cam_pio_utils.F90 +++ b/src/utils/cam_pio_utils.F90 @@ -729,7 +729,7 @@ subroutine find_iodesc(ldimlens, fdimlens, dtype, map, iodesc_p, found, perm) iodesc_p => curr end if if(masterproc .and. (debug_output > DEBUGOUT_INFO)) then - write(iulog,*) "FIND_IODESC: Using decomp, '", curr%tag, "'" + write(iulog,*) "FIND_IODESC: Using decomp, '"//trim(curr%tag)//"'" call shr_sys_flush(iulog) end if From 9810fdeada55b8195c0fffb89c58f5f781569609 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Tue, 20 Apr 2021 11:42:54 -0600 Subject: [PATCH 15/45] Remove temporary dyn_init debugging code. --- src/dynamics/se/dyn_comp.F90 | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 75779414..2a7191ac 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -1801,7 +1801,6 @@ subroutine read_inidat(dyn_in) ! scale PS to achieve prescribed dry mass following FV dycore (dryairm.F90) #ifndef planet_mars -#if 0 if (runtype == 0) then initial_global_ave_dry_ps = 98288.0_r8 if (.not. associated(fh_topo)) then @@ -1814,7 +1813,6 @@ subroutine read_inidat(dyn_in) call prim_set_dry_mass(elem, hvcoord, initial_global_ave_dry_ps, qtmp) end if endif -#endif #endif ! store Q values: ! From b52e5e911ad0fab9c60e43555fb72469d30a3bb1 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 26 Apr 2021 09:48:05 -0600 Subject: [PATCH 16/45] Add code modifications needed for SE dycore to run to completion. (Please note that this commit excludes a required Kessler physics and/or registry temperature tendency variable name change, which should be brought in separately. Until that change is implemented either registry.xml or kessler_update.meta must be modified manually.) --- src/dynamics/se/dp_coupling.F90 | 26 ++++++++++++++++---------- src/dynamics/se/dp_mapping.F90 | 3 --- src/dynamics/se/dyn_comp.F90 | 10 +++++----- src/dynamics/se/dyn_grid.F90 | 11 ++++++++++- src/physics/utils/phys_comp.F90 | 1 + 5 files changed, 32 insertions(+), 19 deletions(-) diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 72ec02d2..d1a22fcc 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -73,7 +73,7 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) ! LOCAL VARIABLES type(element_t), pointer :: elem(:) ! pointer to dyn_out element array integer :: ie ! indices over elements - integer :: icol, ilyr ! indices over chunks, columns, layers + integer :: icol, ilyr ! indices over columns, layers real(r8), allocatable :: ps_tmp(:,:) ! temp array to hold ps real(r8), allocatable :: dp3d_tmp(:,:,:) ! temp array to hold dp3d @@ -91,7 +91,7 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) !real (kind=r8), allocatable :: frontga_phys(:,:,:) integer :: ncols,ierr - integer :: col_ind, blk_ind(1), m, m_cnst + integer :: blk_ind(1), m, m_cnst integer :: nphys real(r8), allocatable :: qgll(:,:,:,:) @@ -234,6 +234,7 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) call UniquePoints(elem(ie)%idxP, elem(ie)%state%phis, phis_tmp(1:ncols,ie)) call UniquePoints(elem(ie)%idxP, nlev, pcnst, qgll,q_tmp(1:ncols,:,:,ie)) + end do call t_stopf('UniquePoints') @@ -290,9 +291,9 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) end if #endif - !$omp parallel do num_threads(max_num_threads) private (col_ind, icol, ie, blk_ind, ilyr, m) - do col_ind = 1, pcols - call get_dyn_col_p(col_ind, ie, blk_ind) + !$omp parallel do num_threads(max_num_threads) private (icol, ie, blk_ind, ilyr, m) + do icol = 1, pcols + call get_dyn_col_p(icol, ie, blk_ind) ps(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) phys_state%phis(icol) = real(phis_tmp(blk_ind(1), ie), kind_phys) do ilyr = 1, pver @@ -318,6 +319,11 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) end do end do + ! Re-set physics momentum tendencies to zero: + ! Is there a better solution here? -JN + phys_tend%dudt(:,:) = 0._kind_phys + phys_tend%dvdt(:,:) = 0._kind_phys + !Remove once a gravity wave parameterization is available -JN #if 0 if (use_gw_front .or. use_gw_front_igw) then @@ -391,9 +397,8 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) integer :: ic , ncols ! index type(element_t), pointer :: elem(:) ! pointer to dyn_in element array integer :: ie ! index for elements - integer :: col_ind ! index over columns integer :: blk_ind(1) ! element offset - integer :: icol, ilyr ! indices for chunk, column, layer + integer :: icol, ilyr ! indices for column, layer real(r8), allocatable :: dp_phys(:,:,:) ! temp array to hold dp on physics grid real(r8), allocatable :: T_tmp(:,:,:) ! temp array to hold T @@ -487,9 +492,9 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) #endif call t_startf('pd_copy') - !$omp parallel do num_threads(max_num_threads) private (col_ind, icol, ie, blk_ind, ilyr, m) - do col_ind = 1, pcols - call get_dyn_col_p(col_ind, ie, blk_ind) + !$omp parallel do num_threads(max_num_threads) private (icol, ie, blk_ind, ilyr, m) + do icol = 1, pcols + call get_dyn_col_p(icol, ie, blk_ind) ! test code -- does nothing unless cpp macro debug_coupling is defined. call test_mapping_overwrite_tendencies(phys_state, & @@ -903,6 +908,7 @@ subroutine thermodynamic_consistency(phys_state, phys_tend, ncols, pver) ! consistency (not taking into account dme adjust) ! call get_cp(1,ncols,1,pver,1,1,pcnst,phys_state%q(1:ncols,1:pver,:),.true.,inv_cp) + phys_tend%dtdt(1:ncols,1:pver) = phys_tend%dtdt(1:ncols,1:pver)*cpair*inv_cp end if end subroutine thermodynamic_consistency diff --git a/src/dynamics/se/dp_mapping.F90 b/src/dynamics/se/dp_mapping.F90 index 140ccb3f..7122cafe 100644 --- a/src/dynamics/se/dp_mapping.F90 +++ b/src/dynamics/se/dp_mapping.F90 @@ -65,9 +65,6 @@ subroutine dp_init(elem,fvm) integer :: iret character(len=*), parameter :: subname = 'dp_init' - !Initialize total number of physics points per spectral element: - nphys_pts = npsq - num_weights_phys2fvm = 0 num_weights_fvm2phys = 0 if (fv_nphys>0) then diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 2a7191ac..6ca84197 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -107,7 +107,6 @@ subroutine dyn_readnl(NLFileName) use shr_file_mod, only: shr_file_getunit, shr_file_freeunit use spmd_utils, only: masterproc, masterprocid, mpicom, npes use dyn_grid, only: se_write_grid_file, se_grid_filename, se_write_gll_corners - use dp_mapping, only: nphys_pts use native_mapping, only: native_mapping_readnl !SE dycore: @@ -383,13 +382,11 @@ subroutine dyn_readnl(NLFileName) molecular_diff = se_molecular_diff if (fv_nphys > 0) then - ! Use finite volume physics grid and CSLAM for tracer advection - nphys_pts = fv_nphys*fv_nphys + ! Use CSLAM for tracer advection qsize = thermodynamic_active_species_num ! number tracers advected by GLL ntrac = pcnst ! number tracers advected by CSLAM else - ! Use GLL grid for physics and tracer advection - nphys_pts = npsq + ! Use GLL for tracer advection qsize = pcnst ntrac = 0 end if @@ -1924,6 +1921,9 @@ subroutine read_inidat(dyn_in) call mark_as_initialized("inverse_exner_function_wrt_surface_pressure") call mark_as_initialized("lagrangian_tendency_of_air_pressure") + !This quantity should be removed once CCPP manages physics-scheme-initialized host variable:: + call mark_as_initialized("tendency_of_temperature") + end subroutine read_inidat diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 2cfd3335..b6afd3f3 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -133,7 +133,7 @@ subroutine model_grid_init() use physconst, only: thermodynamic_active_species_num use ref_pres, only: ref_pres_init use time_manager, only: get_nstep, get_step_size - use dp_mapping, only: dp_init, dp_write + use dp_mapping, only: dp_init, dp_write, nphys_pts use native_mapping, only: do_native_mapping, create_native_mapping_files use cam_grid_support, only: hclen=>max_hcoordname_len use physics_grid, only: phys_grid_init @@ -181,6 +181,15 @@ subroutine model_grid_init() ! Initialize SE-dycore specific variables: call dimensions_mod_init() + ! Initialize total number of physics points per spectral element: + if (fv_nphys > 0) then + ! Use finite volume physics grid + nphys_pts = fv_nphys*fv_nphys + else + ! Use GLL grid for physics + nphys_pts = npsq + end if + ! Initialize hybrid coordinate arrays call hycoef_init(fh_ini, psdry=.true.) diff --git a/src/physics/utils/phys_comp.F90 b/src/physics/utils/phys_comp.F90 index 74081406..099f2ae0 100644 --- a/src/physics/utils/phys_comp.F90 +++ b/src/physics/utils/phys_comp.F90 @@ -218,6 +218,7 @@ subroutine phys_run2(dtime_phys, phys_state, phys_tend, cam_in, cam_out) ! Threading vars col_start = 1 col_end = columns_on_task + ! Run CCPP suite do part_ind = 1, size(suite_parts, 1) call cam_ccpp_physics_run(suite_name, suite_parts(part_ind), & From a64e7c25c2f523427c2b5278546ad08b1c68c229 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Sat, 1 May 2021 09:07:27 -0600 Subject: [PATCH 17/45] Fix bug in physconst, and add extra mark_as_init calls. --- src/data/generate_registry_data.py | 6 ++++-- src/data/physconst.F90 | 7 ++++--- src/dynamics/se/dyn_comp.F90 | 6 +++--- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/data/generate_registry_data.py b/src/data/generate_registry_data.py index 7c867f3c..186b5bcf 100755 --- a/src/data/generate_registry_data.py +++ b/src/data/generate_registry_data.py @@ -231,7 +231,8 @@ def write_initial_value(self, outfile, indent, init_var, ddt_str): # end if if not init_val: if self.var_type.lower() == 'real': - init_val = 'nan' +# init_val = 'nan' + init_val = 'inf' #DEBUG -JN elif self.var_type.lower() == 'integer': init_val = 'HUGE(1)' elif self.var_type.lower() == 'character': @@ -1201,7 +1202,8 @@ def write_allocate_routine(self, outfile): args.append('{}_in'.format(reall_var)) outfile.write('subroutine {}({})'.format(subname, ', '.join(args)), 1) # Use statements - nanmods = 'nan => shr_infnan_nan, assignment(=)' +# nanmods = 'nan => shr_infnan_nan, assignment(=)' + nanmods = 'inf => shr_infnan_inf, assignment(=)' #DEBUG -JN outfile.write('use shr_infnan_mod, only: {}'.format(nanmods), 2) outfile.write('use cam_abortutils, only: endrun', 2) # Dummy arguments diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 4022228f..e4d7609a 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -237,6 +237,7 @@ module physconst ! Read namelist variables. subroutine physconst_readnl(nlfile) + use shr_kind_mod, only: r8=>shr_kind_r8 use shr_nl_mod, only: find_group_name => shr_nl_find_group_name use shr_flux_mod, only: shr_flux_adjust_constants ! use mpi, only: mpi_bcast !!XXgoldyXX: Why not? @@ -351,8 +352,8 @@ subroutine physconst_readnl(nlfile) Cpd_on_Cpv = cpair / cpwv ! Adjust constants in shr_flux_mod. - call shr_flux_adjust_constants(zvir=real(zvir, kind_phys), & - cpvir=real(cpvir, kind_phys), gravit=real(gravit, kind_phys)) + call shr_flux_adjust_constants(zvir=real(zvir, r8), & + cpvir=real(cpvir, r8), gravit=real(gravit, r8)) end if ez = omega / sqrt(0.375_kind_phys) @@ -499,7 +500,7 @@ subroutine physconst_init(pcols, pver, pverp) rairv(:pcols,:pver) = rair cappav(:pcols,:pver) = rair/cpair mbarv(:pcols,:pver) = mwdry - zvirv(:pcols,:pver) = cpair + zvirv(:pcols,:pver) = zvir !!XXgoldyXX: v until we get constituents figured out in CCPP #if 0 diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 6ca84197..69392bab 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -1905,7 +1905,7 @@ subroutine read_inidat(dyn_in) call mark_as_initialized("geopotential_height_at_interface") call mark_as_initialized("dry_static_energy_content_of_atmosphere_layer") - !These quantities are calculated in d_p_coupling using the variables initialized here: + !These variables are calculated in d_p_coupling, but need to be marked here: call mark_as_initialized("air_pressure") call mark_as_initialized("natural_log_of_air_pressure") call mark_as_initialized("air_pressure_at_interface") @@ -1920,9 +1920,9 @@ subroutine read_inidat(dyn_in) call mark_as_initialized("reciprocal_of_pressure_thickness") call mark_as_initialized("inverse_exner_function_wrt_surface_pressure") call mark_as_initialized("lagrangian_tendency_of_air_pressure") - - !This quantity should be removed once CCPP manages physics-scheme-initialized host variable:: call mark_as_initialized("tendency_of_temperature") + call mark_as_initialized("tendency_of_eastward_wind") + call mark_as_initialized("tendency_of_northward_wind") end subroutine read_inidat From cf8c72662f6e1fe48115e689ffaa6f903a76e74d Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Sat, 1 May 2021 09:47:03 -0600 Subject: [PATCH 18/45] Remove mistakenly added debugging code in registry generator. --- src/data/generate_registry_data.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/data/generate_registry_data.py b/src/data/generate_registry_data.py index 186b5bcf..7c867f3c 100755 --- a/src/data/generate_registry_data.py +++ b/src/data/generate_registry_data.py @@ -231,8 +231,7 @@ def write_initial_value(self, outfile, indent, init_var, ddt_str): # end if if not init_val: if self.var_type.lower() == 'real': -# init_val = 'nan' - init_val = 'inf' #DEBUG -JN + init_val = 'nan' elif self.var_type.lower() == 'integer': init_val = 'HUGE(1)' elif self.var_type.lower() == 'character': @@ -1202,8 +1201,7 @@ def write_allocate_routine(self, outfile): args.append('{}_in'.format(reall_var)) outfile.write('subroutine {}({})'.format(subname, ', '.join(args)), 1) # Use statements -# nanmods = 'nan => shr_infnan_nan, assignment(=)' - nanmods = 'inf => shr_infnan_inf, assignment(=)' #DEBUG -JN + nanmods = 'nan => shr_infnan_nan, assignment(=)' outfile.write('use shr_infnan_mod, only: {}'.format(nanmods), 2) outfile.write('use cam_abortutils, only: endrun', 2) # Dummy arguments From a7e54d88c95cf9e1ec9fb78f79b5187e7ffe060d Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 20 May 2021 11:59:36 -0600 Subject: [PATCH 19/45] Fix 'allow_abrev' python version issue, and use 'csne' to set the se_ne namelist default value. --- cime_config/cam_config.py | 16 ++++++++++++---- cime_config/namelist_definition_cam.xml | 10 ++++++++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index f34b6c4e..2d974d28 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -799,7 +799,7 @@ def __init__(self, case, case_log): # Add number of elements along edge of cubed-sphere grid csne_desc = "Number of elements along one edge of a cubed sphere grid." - self.create_config("csne", csne_desc, csne_val) + self.create_config("csne", csne_desc, csne_val, is_nml_attr=True) # Add number of points on each cubed-sphere element edge csnp_desc = "Number of points on each edge of the elements in a cubed sphere grid." @@ -923,9 +923,17 @@ def parse_config_opts(cls, config_opts, test_mode=False): SystemExit: 2 """ cco_str = "CAM_CONFIG_OPTS" - parser = argparse.ArgumentParser(description=cco_str, - prog="ConfigCAM", allow_abbrev=False, - epilog="Allowed values of "+cco_str) + + #Don't allow abbreviations if using python 3.5 or greater: + if sys.version_info[0] > 2 and sys.version_info[1] > 4: + parser = argparse.ArgumentParser(description=cco_str, + prog="ConfigCAM", allow_abbrev=False, + epilog="Allowed values of "+cco_str) + else: + parser = argparse.ArgumentParser(description=cco_str, + prog="ConfigCAM", + epilog="Allowed values of "+cco_str) + parser.add_argument("--physics-suites", "-physics-suites", type=str, required=True, metavar='', diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 7ffb1c88..f5eda85b 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -10977,10 +10977,16 @@ Number of elements along a cube edge. Must match value of grid. Set this to zero to use a refined mesh. - Default: UNKNOWN. + Default: 0 - 30 + 0 + 5 + 16 + 30 + 60 + 120 + 240 From 77521d2e6fff871a1c57e3880ea7d92c36ca6577 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Fri, 21 May 2021 21:40:44 -0600 Subject: [PATCH 20/45] Add bug fixes needed to run with PGI and NAG compilers. --- cime_config/namelist_definition_cam.xml | 40 +++++++++++++++++++++++++ src/data/registry.xml | 2 +- src/dynamics/se/dp_coupling.F90 | 9 +++--- src/dynamics/se/dyn_comp.F90 | 24 +++++++-------- 4 files changed, 58 insertions(+), 17 deletions(-) diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index f5eda85b..1f1f9429 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -10917,6 +10917,46 @@ 4 + + integer + se + dyn_se_inparm + + Variable to specify the vertical index at which the + Rayleigh friction term is centered (the peak value). + Default: 2 + + + 2 + + + + real + se + dyn_se_inparm + + Rayleigh friction parameter to determine the width of the profile. If set + to 0 then a width is chosen by the algorithm (see rayleigh_friction.F90). + Default: 0.5. + + + 0.5 + 3 + + + + real + se + dyn_se_inparm + + Rayleigh friction parameter to determine the approximate value of the decay + time (days) at model top. If 0.0 then no Rayleigh friction is applied. + Default: 0. + + + 0.0 + + integer se diff --git a/src/data/registry.xml b/src/data/registry.xml index 37d6dfed..e1dc7ae8 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -282,7 +282,7 @@ x_wind y_wind lagrangian_tendency_of_air_pressure - dry_static_energy_content_of_atmosphere_layer + dry_static_energy constituent_mixing_ratio diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index d1a22fcc..61b9f278 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -319,8 +319,9 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) end do end do - ! Re-set physics momentum tendencies to zero: + ! Re-set physics tendencies to zero: ! Is there a better solution here? -JN + phys_tend%dTdt(:,:) = 0._kind_phys phys_tend%dudt(:,:) = 0._kind_phys phys_tend%dvdt(:,:) = 0._kind_phys @@ -549,11 +550,11 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) !$omp parallel do num_threads(max_num_threads) private(ie,ncols) do ie = 1, nelemd ncols = elem(ie)%idxP%NumUniquePts - call putUniquePoints(elem(ie)%idxP, nlev, T_tmp(1:pcols,:,ie), & + call putUniquePoints(elem(ie)%idxP, nlev, T_tmp(1:ncols,:,ie), & elem(ie)%derived%fT(:,:,:)) - call putUniquePoints(elem(ie)%idxV, 2, nlev, uv_tmp(1:pcols,:,:,ie), & + call putUniquePoints(elem(ie)%idxV, 2, nlev, uv_tmp(1:ncols,:,:,ie), & elem(ie)%derived%fM(:,:,:,:)) - call putUniquePoints(elem(ie)%idxV, nlev, pcnst, dq_tmp(1:pcols,:,:,ie), & + call putUniquePoints(elem(ie)%idxV, nlev, pcnst, dq_tmp(1:ncols,:,:,ie), & elem(ie)%derived%fQ(:,:,:,:)) end do call t_stopf('putUniquePoints') diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 69392bab..f82013ed 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -1890,39 +1890,39 @@ subroutine read_inidat(dyn_in) !the initial values itself: call mark_as_initialized("surface_air_pressure") call mark_as_initialized("pressure_thickness") - call mark_as_initialized("eastward_wind") - call mark_as_initialized("northward_wind") - call mark_as_initialized("temperature") + call mark_as_initialized("x_wind") !eastward wind + call mark_as_initialized("y_wind") !northward wind + call mark_as_initialized("air_temperature") !These calls will need to be modified once constituents are enabled: call mark_as_initialized("water_vapor_specific_humidity") - call mark_as_initialized("cloud_liquid_water_mixing_ratio") + call mark_as_initialized("cloud_liquid_water_mixing_ratio_of_moist_air") call mark_as_initialized("rain_water_mixing_ratio") !These calls may be removed if geopotential_t is only allowed to run !in a CCPP physics suite: call mark_as_initialized("geopotential_height") call mark_as_initialized("geopotential_height_at_interface") - call mark_as_initialized("dry_static_energy_content_of_atmosphere_layer") + call mark_as_initialized("dry_static_energy") !These variables are calculated in d_p_coupling, but need to be marked here: call mark_as_initialized("air_pressure") - call mark_as_initialized("natural_log_of_air_pressure") + call mark_as_initialized("ln_of_air_pressure") call mark_as_initialized("air_pressure_at_interface") - call mark_as_initialized("natural_log_of_air_pressure_at_interface") + call mark_as_initialized("ln_of_air_pressure_at_interface") call mark_as_initialized("pressure_thickness_of_dry_air") call mark_as_initialized("surface_pressure_of_dry_air") call mark_as_initialized("air_pressure_of_dry_air") call mark_as_initialized("air_pressure_of_dry_air_at_interface") - call mark_as_initialized("natural_log_of_air_pressure_of_dry_air_at_interface") - call mark_as_initialized("natural_log_of_air_pressure_of_dry_air") + call mark_as_initialized("ln_of_air_pressure_of_dry_air_at_interface") + call mark_as_initialized("ln_of_air_pressure_of_dry_air") call mark_as_initialized("reciprocal_of_pressure_thickness_of_dry_air") call mark_as_initialized("reciprocal_of_pressure_thickness") call mark_as_initialized("inverse_exner_function_wrt_surface_pressure") call mark_as_initialized("lagrangian_tendency_of_air_pressure") - call mark_as_initialized("tendency_of_temperature") - call mark_as_initialized("tendency_of_eastward_wind") - call mark_as_initialized("tendency_of_northward_wind") + call mark_as_initialized("total_tendency_of_air_temperature") + call mark_as_initialized("total_tendency_of_x_wind") + call mark_as_initialized("total_tendency_of_y_wind") end subroutine read_inidat From 4420c65b5d370a0c2213643e50eae2c0ca11c9e3 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 2 Jun 2021 10:30:25 -0600 Subject: [PATCH 21/45] Remove FVM_TRACERS CPP def, and perform some code clean-up. --- cime_config/cam_config.py | 19 +++-- cime_config/namelist_definition_cam.xml | 3 + src/control/cam_comp.F90 | 8 -- src/dynamics/se/dycore/dimensions_mod.F90 | 92 +++++++++-------------- 4 files changed, 51 insertions(+), 71 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 2d974d28..f19f4fef 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -697,7 +697,8 @@ def __init__(self, case, case_log): # Horizontal grid self.create_config("hgrid", hgrid_desc, atm_grid, se_grid_re, is_nml_attr=True) - # Add SE namelist groups to nmlgen list: + + # Add SE namelist groups to nmlgen list self.__nml_groups.append("air_composition_nl") self.__nml_groups.append("dyn_se_inparm") @@ -709,10 +710,6 @@ def __init__(self, case, case_log): if nthrds > 1: self.add_cppdef("_OPENMP") - # Add CSLAM CPP definition, if needed: - if atm_grid.find("pg") != -1: - self.add_cppdef("FVM_TRACERS") - elif fv3_grid_re.match(atm_grid) is not None: # Dynamical core self.create_config("dyn", dyn_desc, "fv3", @@ -797,6 +794,13 @@ def __init__(self, case, case_log): csnp_re = re.search(r"np[0-9]+", atm_grid) csnp_val = int(csnp_re.group()[2:]) + # Extract number of CSLAM physics grid points, if available: + npg_re = re.search(r"pg[1-9]+", atm_grid) + if npg_re: + npg_val = int(npg_re.group()[2:]) + else: + npg_val = 0 #No CSLAM grid points + # Add number of elements along edge of cubed-sphere grid csne_desc = "Number of elements along one edge of a cubed sphere grid." self.create_config("csne", csne_desc, csne_val, is_nml_attr=True) @@ -805,6 +809,11 @@ def __init__(self, case, case_log): csnp_desc = "Number of points on each edge of the elements in a cubed sphere grid." self.create_config("csnp", csnp_desc, csnp_val) + # Add number of CSLAM physics grid points: + npg_desc = "Number of physics grid cells on each edge of" \ + " the elements in a cubed sphere grid." + self.create_config("npg", npg_desc, npg_val, is_nml_attr=True) + # Add number of points (NP) CPP definition: self.add_cppdef("NP", csnp_val) diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 1f1f9429..3ca6d51d 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -11640,6 +11640,9 @@ 0 + 2 + 3 + 4 diff --git a/src/control/cam_comp.F90 b/src/control/cam_comp.F90 index 8f4be12c..933b2d6f 100644 --- a/src/control/cam_comp.F90 +++ b/src/control/cam_comp.F90 @@ -297,10 +297,6 @@ subroutine cam_run2(cam_out, cam_in) ! call ionosphere_run2( phys_state, dyn_in) ! call t_stopf ('ionosphere_run2') - if (is_first_step() .or. is_first_restart_step()) then - call t_startf('cam_run2_memusage') - call t_stopf('cam_run2_memusage') - end if end subroutine cam_run2 ! @@ -329,10 +325,6 @@ subroutine cam_run3(cam_out) call stepon_run3(dtime_phys, cam_out, phys_state, dyn_in, dyn_out) call t_stopf ('stepon_run3') - if (is_first_step() .or. is_first_restart_step()) then - call t_startf('cam_run3_memusage') - call t_stopf('cam_run3_memusage') - end if end subroutine cam_run3 ! diff --git a/src/dynamics/se/dycore/dimensions_mod.F90 b/src/dynamics/se/dycore/dimensions_mod.F90 index 383af01f..51799649 100644 --- a/src/dynamics/se/dycore/dimensions_mod.F90 +++ b/src/dynamics/se/dycore/dimensions_mod.F90 @@ -121,7 +121,7 @@ subroutine dimensions_mod_init() use vert_coord, only: pver, pverp use constituents, only: pcnst - use cam_abortutils, only: endrun + use cam_abortutils, only: endrun, check_allocate use string_utils, only: to_str ! Local variables: @@ -131,14 +131,15 @@ subroutine dimensions_mod_init() character(len=*), parameter :: subname = 'dimensions_mod_init' ! Set tracer dimension variables: - -#ifdef FVM_TRACERS - qsize_d = 10 ! SE tracers (currently SE supports 10 condensate loading tracers) - ntrac_d = pcnst -#else - qsize_d = pcnst - ntrac_d = 0 ! No fvm tracers if CSLAM is off -#endif + if (fv_nphys > 0) then + ! Use CSLAM for tracer advection + qsize_d = 10 ! SE tracers (currently SE supports 10 condensate loading tracers) + ntrac_d = pcnst + else + ! Use GLL for tracer advection + qsize_d = pcnst + ntrac_d = 0 ! No fvm tracers if CSLAM is off + end if ! Set grid dimension variables: @@ -149,77 +150,52 @@ subroutine dimensions_mod_init() ! Allocate vertically-dimensioned variables: allocate(irecons_tracer_lev(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate irecons_tracer_lev(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'irecons_tracer_lev(pver)', & + file=__FILE__, line=__LINE__) allocate(nu_scale_top(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate nu_scale_top(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'nu_scale_top(pver)', & + file=__FILE__, line=__LINE__) allocate(nu_lev(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate nu_lev(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'nu_lev(pver)', & + file=__FILE__, line=__LINE__) allocate(otau(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate otau(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'otau(pver)', & + file=__FILE__, line=__LINE__) allocate(nu_div_lev(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate nu_div_lev(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'nu_div_lev(pver)', & + file=__FILE__, line=__LINE__) allocate(kmvis_ref(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate kmvis_ref(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'kmvis_ref(pver)', & + file=__FILE__, line=__LINE__) allocate(kmcnd_ref(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate kmcnd_ref(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'kmcnd_ref(pver)', & + file=__FILE__, line=__LINE__) allocate(rho_ref(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate rho_ref(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'rho_ref(pver)', & + file=__FILE__, line=__LINE__) allocate(km_sponge_factor(pver), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate km_sponge_factor(pver) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'km_sponge_factor(pver)', & + file=__FILE__, line=__LINE__) allocate(kmvisi_ref(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate kmvisi_ref(pverp) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'kmvisi_ref(pverp)', & + file=__FILE__, line=__LINE__) allocate(kmcndi_ref(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate kmcndi_ref(pverp) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'kmcndi_ref(pverp)', & + file=__FILE__, line=__LINE__) allocate(rhoi_ref(pverp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate rhoi_ref(pverp) failed with stat: '//& - to_str(iret)) - end if - + call check_allocate(iret, subname, 'rhoi_ref(pverp)', & + file=__FILE__, line=__LINE__) end subroutine dimensions_mod_init From db48d38d74b8da5d491df55944752431f2cfbc20 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 3 Jun 2021 15:26:20 -0600 Subject: [PATCH 22/45] Ensure the proper kind for physconst calls, and add extra check_allocate calls. --- src/data/physconst.F90 | 11 +- src/dynamics/se/advect_tend.F90 | 16 +- src/dynamics/se/dp_mapping.F90 | 119 ++++----- .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 18 +- .../se/dycore/coordinate_systems_mod.F90 | 12 +- src/dynamics/se/dycore/cube_mod.F90 | 20 +- src/dynamics/se/dycore/derivative_mod.F90 | 66 ++--- src/dynamics/se/dycore/dimensions_mod.F90 | 3 +- src/dynamics/se/dycore/global_norms_mod.F90 | 120 +++++---- src/dynamics/se/dycore/interpolate_mod.F90 | 10 +- src/dynamics/se/dycore/mesh_mod.F90 | 6 +- src/dynamics/se/dycore/prim_advance_mod.F90 | 172 +++++++----- src/dynamics/se/dycore/prim_advection_mod.F90 | 13 +- src/dynamics/se/dycore/prim_init.F90 | 2 +- src/dynamics/se/dycore/quadrature_mod.F90 | 4 +- src/dynamics/se/dyn_grid.F90 | 245 +++++++----------- src/dynamics/se/native_mapping.F90 | 79 +++--- src/dynamics/tests/inic_analytic.F90 | 27 +- .../initial_conditions/ic_baroclinic.F90 | 37 ++- 19 files changed, 447 insertions(+), 533 deletions(-) diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index e4d7609a..be87fccc 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -1553,7 +1553,7 @@ subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_speci ! ! dry air not species dependent ! - R_dry = rair + R_dry = real(rair, r8) else if (present(fact)) then factor = fact(:,:,:) @@ -1970,7 +1970,7 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, ! ! dry air not species dependent if (dry_air_species_num==0) then - kappa_dry= rair/cpair + kappa_dry= real(rair/cpair, r8) else allocate(R_dry(i0:i1,j0:j1,k0:k1), stat=iret) if (iret /= 0) then @@ -1985,13 +1985,14 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, end if if (present(fact)) then - call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry,fact=fact) + call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,real(tracer, kind_phys),active_species_idx,cp_dry,& + fact=real(fact, kind_phys)) call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry,fact=fact) else - call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry) + call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,real(tracer, kind_phys),active_species_idx,cp_dry) call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry) end if - kappa_dry = R_dry/cp_dry + kappa_dry = R_dry/real(cp_dry, r8) deallocate(R_dry,cp_dry) end if end subroutine get_kappa_dry diff --git a/src/dynamics/se/advect_tend.F90 b/src/dynamics/se/advect_tend.F90 index fe3c67bd..0f6c0681 100644 --- a/src/dynamics/se/advect_tend.F90 +++ b/src/dynamics/se/advect_tend.F90 @@ -26,8 +26,7 @@ subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) use time_manager, only: get_step_size ! use constituents, only: tottnam,pcnst use constituents, only: pcnst - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: check_allocate ! SE dycore: use dimensions_mod, only: nc,np,nlev,ntrac @@ -53,20 +52,15 @@ subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) endif allocate( ftmp(nx*nx,nlev), stat=iret ) - if (iret /= 0) then - call endrun(subname//': allocate ftmp(nx*nx,nlev) failed with stat: '//& - to_str(iret)) - end if - + call check_allocate(iret, subname, 'ftmp(nx*nx,nlev)', & + file=__FILE__, line=__LINE__) init = .false. if ( .not. allocated( adv_tendxyz ) ) then init = .true. allocate( adv_tendxyz(nx,nx,nlev,pcnst,nets:nete) ) - if (iret /= 0) then - call endrun(subname//': allocate adv_tendxyz(nx,nx,nlev,pcnst,nets:nete) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'adv_tendxyz(nx,nx,nlev,pcnst,nets:nete)', & + file=__FILE__, line=__LINE__) adv_tendxyz(:,:,:,:,:) = 0._r8 endif diff --git a/src/dynamics/se/dp_mapping.F90 b/src/dynamics/se/dp_mapping.F90 index 7122cafe..32de46fc 100644 --- a/src/dynamics/se/dp_mapping.F90 +++ b/src/dynamics/se/dp_mapping.F90 @@ -2,8 +2,7 @@ module dp_mapping use shr_const_mod, only: pi => shr_const_pi - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate !SE dycore: use dimensions_mod, only: np, fv_nphys @@ -72,49 +71,44 @@ subroutine dp_init(elem,fvm) num_weights_fvm2phys = (nc+fv_nphys)**2 allocate(weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'weights_all_fvm2phys(num_weights_fvm2phys,irecons_tracer,nelemd)', & + file=__FILE__, line=__LINE__) + allocate(weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'weights_eul_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)', & + file=__FILE__, line=__LINE__) allocate(weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'weights_lgr_index_all_fvm2phys(num_weights_fvm2phys,2,nelemd)', & + file=__FILE__, line=__LINE__) allocate(weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'weights_all_phys2fvm(num_weights_phys2fvm,irecons_tracer,nelemd)', & + file=__FILE__, line=__LINE__) allocate(weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'weights_eul_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)', & + file=__FILE__, line=__LINE__) allocate(weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'weights_lgr_index_all_phys2fvm(num_weights_phys2fvm,2,nelemd)', & + file=__FILE__, line=__LINE__) allocate(jall_fvm2phys(nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate jall_fvm2phys(nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'jall_fvm2phys(nelemd)', & + file=__FILE__, line=__LINE__) allocate(jall_phys2fvm(nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate jall_phys2fvm(nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'jall_phys2fvm(nelemd)', & + file=__FILE__, line=__LINE__) call fvm2phys_init(elem,fvm,nc,fv_nphys,irecons_tracer,& weights_all_fvm2phys,weights_eul_index_all_fvm2phys,weights_lgr_index_all_fvm2phys,& @@ -177,19 +171,16 @@ subroutine dp_allocate(elem) ! begin allocate(displs(npes), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate displs(npes) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'displs(npes)', & + file=__FILE__, line=__LINE__) allocate(dp_gid(nelem), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate dp_gid(nelem) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'dp_gid(nelem)', & + file=__FILE__, line=__LINE__) allocate(recvcount(npes), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate recvcount(npes) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'recvcount(npes)', & + file=__FILE__, line=__LINE__) call mpi_gather(nelemd, 1, mpi_integer, recvcount, 1, mpi_integer, & masterprocid, mpicom, ierror) @@ -204,9 +195,8 @@ subroutine dp_allocate(elem) mpi_integer, masterprocid, mpicom, ierror) if (masterproc) then allocate(dp_owner(nelem), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate dp_owner(nelem) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'dp_owner(nelem)', & + file=__FILE__, line=__LINE__) dp_owner(:) = -1 do i = 1,npes @@ -221,10 +211,8 @@ subroutine dp_allocate(elem) call mpi_barrier(mpicom,ierror) if (.not.masterproc) then allocate(dp_owner(nelem), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate dp_owner(nelem) failed with stat: '//to_str(ierror)) - end if - + call check_allocate(ierror, subname, 'dp_owner(nelem)', & + file=__FILE__, line=__LINE__) end if call mpi_bcast(dp_gid,nelem,mpi_integer,masterprocid,mpicom,ierror) call mpi_bcast(dp_owner,nelem,mpi_integer,masterprocid,mpicom,ierror) @@ -321,25 +309,21 @@ subroutine dp_write(elem, fvm, grid_format, filename_in) ! Allocate workspace and calculate PE displacement information if (IOroot) then allocate(displs(npes), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate displs(npes) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'displs(npes)', & + file=__FILE__, line=__LINE__) allocate(recvcount(npes), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate recvcount(npes) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'recvcount(npes)', & + file=__FILE__, line=__LINE__) else allocate(displs(0), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate displs(0) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'displs(0)', & + file=__FILE__, line=__LINE__) allocate(recvcount(0), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate recvcount(0) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'recvcount(0)', & + file=__FILE__, line=__LINE__) end if gridsize = nelem * fv_nphys*fv_nphys if(masterproc) then @@ -356,21 +340,18 @@ subroutine dp_write(elem, fvm, grid_format, filename_in) displs(i) = displs(i-1)+recvcount(i-1) end do allocate(recvbuf(gridsize), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate recvbuf(gridsize) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'recvbuf(gridsize)', & + file=__FILE__, line=__LINE__) else allocate(recvbuf(0), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate recvbuf(0) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'recvbuf(0)', & + file=__FILE__, line=__LINE__) end if allocate(gwork(4, gridsize), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate gwork(4, gridsize) failed with stat: '//to_str(ierror)) - end if + call check_allocate(ierror, subname, 'gwork(4, gridsize)', & + file=__FILE__, line=__LINE__) if (IOroot) then ! Define the horizontal grid dimensions for SCRIP output diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 index 7e741ab1..6b37a024 100644 --- a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -682,7 +682,7 @@ function make_unique(a, n) result(m) ! if (ABS(a(j)-a(i)).lt. 1e-6) a(j) = 9999 delta = abs(a(j)-a(i)) if (delta < 1.e-6_r8) a(j) = 9999.0_r8 - if (abs((2.0_r8*pi) - delta) < 1.0e-6_r8) a(j) = 9999.0_r8 + if (abs((2.0_r8*real(pi, r8)) - delta) < 1.0e-6_r8) a(j) = 9999.0_r8 end do end do m = 0 @@ -691,7 +691,7 @@ function make_unique(a, n) result(m) end do if (mod(m,2).ne.0) then do i=1,n - print *,'angle with centroid: ',i,a(i),mod(a(i),2*pi) + print *,'angle with centroid: ',i,a(i),mod(a(i),2*real(pi, r8)) end do call endrun("Error: Found an odd number or nodes for cv element. Should be even.") end if @@ -1385,7 +1385,7 @@ subroutine InitControlVolumes_gll(elem, hybrid,nets,nete) ! MNL: dx and dy are no longer part of element_t ! but they are easily computed for the ! uniform case - dx = pi/(2.0d0*dble(ne)) + dx = real(pi, r8)/(2.0d0*dble(ne)) dy = dx ! intialize local element dual grid, local element areas @@ -1393,8 +1393,8 @@ subroutine InitControlVolumes_gll(elem, hybrid,nets,nete) do ie=nets,nete call convert_gbl_index(elem(ie)%vertex%number,ie1,je1,face_no) - start%x=-pi/4 + ie1*dx - start%y=-pi/4 + je1*dy + start%x=r-real(pi, r8)/4 + ie1*dx + start%y=-1._r8*real(pi, r8)/4 + je1*dy endd%x =start%x + dx endd%y =start%y + dy cartp_nm1(0:np,0:np) = element_coordinates(start,endd,gllnm1) @@ -2260,7 +2260,7 @@ subroutine VerifVolumes(elem, hybrid,nets,nete) real(r8), pointer :: locvol(:,:) - dx = pi/(2.0d0*dble(ne)) + dx = real(pi, r8)/(2.0d0*dble(ne)) dy = dx if(.not. initialized) then @@ -2333,13 +2333,13 @@ subroutine VerifVolumes(elem, hybrid,nets,nete) if(hybrid%masterthread) then write(*,'(a,i2,a,2e23.15)') "cube face:",face," : SURFACE FV =",& - 6_r8*psum/(4_r8 * pi), & - 6_r8*psum/(4_r8 * pi)-1 + 6_r8*psum/(4_r8 * real(pi, r8)), & + 6_r8*psum/(4_r8 * real(pi, r8))-1 end if end do if(hybrid%masterthread) then - write(iulog, *) "SURFACE FV (total)= ", ptot/(4_r8 * pi) + write(iulog, *) "SURFACE FV (total)= ", ptot/(4_r8 * real(pi, r8)) end if end subroutine VerifVolumes diff --git a/src/dynamics/se/dycore/coordinate_systems_mod.F90 b/src/dynamics/se/dycore/coordinate_systems_mod.F90 index b5a845ac..f653d76f 100644 --- a/src/dynamics/se/dycore/coordinate_systems_mod.F90 +++ b/src/dynamics/se/dycore/coordinate_systems_mod.F90 @@ -1,7 +1,7 @@ module coordinate_systems_mod use shr_kind_mod, only: r8=>shr_kind_r8 - use physconst, only: pi use cam_abortutils, only: endrun + use physconst, only: pi ! WARNING: When using this class be sure that you know if the ! cubic coordinates are on the unit cube or the [-\pi/4,\pi/4] cube @@ -290,7 +290,7 @@ pure function cart_to_spherical(cart) result (sphere) if ( abs(abs(sphere%lat)-PI/2) >= DIST_THRESHOLD ) then sphere%lon=ATAN2(cart%y,cart%x) if (sphere%lon<0) then - sphere%lon=sphere%lon + 2*PI + sphere%lon=sphere%lon + 2*real(pi, r8) end if end if @@ -565,7 +565,7 @@ pure function sphere2cubedsphere (sphere, face_no) result(cart) lat = sphere%lat lon = sphere%lon - twopi = 2.0_r8 * pi + twopi = 2.0_r8 * real(pi, r8) pi2 = pi * 0.5_r8 pi3 = pi * 1.5_r8 pi4 = pi * 0.25_r8 @@ -573,14 +573,14 @@ pure function sphere2cubedsphere (sphere, face_no) result(cart) select case (face_no) case (1) xp = lon - if (pi < lon) xp = lon - twopi !if lon in [0,2\pi] + if (real(pi, r8) < lon) xp = lon - twopi !if lon in [0,2\pi] yp = atan(tan(lat)/cos(xp)) case (2) xp = lon - pi2 yp = atan(tan(lat)/cos(xp)) case (3) - xp = lon - pi - if (lon < 0) xp = lon + pi !if lon in [0,2\pi] + xp = lon - real(pi, r8) + if (lon < 0) xp = lon + real(pi, r8) !if lon in [0,2\pi] yp = atan(tan(lat)/cos(xp)) case (4) xp = lon - pi3 diff --git a/src/dynamics/se/dycore/cube_mod.F90 b/src/dynamics/se/dycore/cube_mod.F90 index f7e1e019..1c085d4c 100644 --- a/src/dynamics/se/dycore/cube_mod.F90 +++ b/src/dynamics/se/dycore/cube_mod.F90 @@ -16,10 +16,10 @@ module cube_mod integer,public, parameter :: nInnerElemEdge = 8 ! number of edges for an interior element integer,public, parameter :: nCornerElemEdge = 4 ! number of corner elements - real(kind=r8), public, parameter :: cube_xstart = -0.25_R8*PI - real(kind=r8), public, parameter :: cube_xend = 0.25_R8*PI - real(kind=r8), public, parameter :: cube_ystart = -0.25_R8*PI - real(kind=r8), public, parameter :: cube_yend = 0.25_R8*PI + real(kind=r8), public, parameter :: cube_xstart = -0.25_R8*real(pi, r8) + real(kind=r8), public, parameter :: cube_xend = 0.25_R8*real(pi, r8) + real(kind=r8), public, parameter :: cube_ystart = -0.25_R8*real(pi, r8) + real(kind=r8), public, parameter :: cube_yend = 0.25_R8*real(pi, r8) type, public :: face_t @@ -421,8 +421,8 @@ subroutine metric_atomic(elem,gll_points,alpha) DE(2,1)=sum(elem%D(i,j,2,:)*E(:,1)) DE(2,2)=sum(elem%D(i,j,2,:)*E(:,2)) - lamStar1=1/(eig(1)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) - lamStar2=1/(eig(2)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) + lamStar1=1/(eig(1)**(hypervis_scaling/4.0_r8)) *(real(rearth**2.0_r8, r8)) + lamStar2=1/(eig(2)**(hypervis_scaling/4.0_r8)) *(real(rearth**2.0_r8, r8)) !matrix (DE) * Lam^* * Lam , tensor HV when V is applied at each Laplace calculation ! DEL(1:2,1) = lamStar1*eig(1)*DE(1:2,1) @@ -454,8 +454,8 @@ subroutine metric_atomic(elem,gll_points,alpha) ! compute element length scales, based on SVDs, in km: - elem%dx_short = 1.0_r8/(max_svd*0.5_r8*dble(np-1)*ra*1000.0_r8) - elem%dx_long = 1.0_r8/(min_svd*0.5_r8*dble(np-1)*ra*1000.0_r8) + elem%dx_short = 1.0_r8/(max_svd*0.5_r8*dble(np-1)*real(ra, r8)*1000.0_r8) + elem%dx_long = 1.0_r8/(min_svd*0.5_r8*dble(np-1)*real(ra, r8)*1000.0_r8) ! optional noramlization: elem%D = elem%D * sqrt(alpha) @@ -834,13 +834,13 @@ subroutine coreolis_init_atomic(elem) integer :: i,j real (kind=r8) :: lat,lon,rangle - rangle = rotate_grid * PI / 180._r8 + rangle = rotate_grid * real(pi, r8) / 180._r8 do j=1,np do i=1,np if ( rotate_grid /= 0) then lat = elem%spherep(i,j)%lat lon = elem%spherep(i,j)%lon - elem%fcor(i,j)= 2*omega* & + elem%fcor(i,j)= 2*real(omega, r8)* & (-cos(lon)*cos(lat)*sin(rangle) + sin(lat)*cos(rangle)) else elem%fcor(i,j) = 2.0_r8*omega*SIN(elem%spherep(i,j)%lat) diff --git a/src/dynamics/se/dycore/derivative_mod.F90 b/src/dynamics/se/dycore/derivative_mod.F90 index 8f7299b1..6c51bba8 100644 --- a/src/dynamics/se/dycore/derivative_mod.F90 +++ b/src/dynamics/se/dycore/derivative_mod.F90 @@ -1060,8 +1060,8 @@ subroutine gradient_sphere(s,deriv,Dinv,ds) dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) end do - v1(l ,j ) = dsdx00*ra - v2(j ,l ) = dsdy00*ra + v1(l ,j ) = dsdx00*real(ra, r8) + v2(j ,l ) = dsdy00*real(ra, r8) end do end do ! convert covarient to latlon @@ -1121,9 +1121,9 @@ function curl_sphere_wk_testcov(s,deriv,elem) result(ds) !DIR$ UNROLL(NP) do j=1,np ! phi(n)_y sum over second index, 1st index fixed at m - dscontra(m,n,1)=dscontra(m,n,1)-(elem%mp(m,j)*s(m,j)*deriv%Dvv(n,j) )*ra + dscontra(m,n,1)=dscontra(m,n,1)-(elem%mp(m,j)*s(m,j)*deriv%Dvv(n,j) )*real(ra, r8) ! phi(m)_x sum over first index, second index fixed at n - dscontra(m,n,2)=dscontra(m,n,2)+(elem%mp(j,n)*s(j,n)*deriv%Dvv(m,j) )*ra + dscontra(m,n,2)=dscontra(m,n,2)+(elem%mp(j,n)*s(j,n)*deriv%Dvv(m,j) )*real(ra, r8) enddo enddo enddo @@ -1185,12 +1185,12 @@ function gradient_sphere_wk_testcov(s,deriv,elem) result(ds) dscontra(m,n,1)=dscontra(m,n,1)-(& (elem%mp(j,n)*elem%metinv(m,n,1,1)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) ) +& (elem%mp(m,j)*elem%metinv(m,n,2,1)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) ) & - ) *ra + ) *real(ra, r8) dscontra(m,n,2)=dscontra(m,n,2)-(& (elem%mp(j,n)*elem%metinv(m,n,1,2)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) ) +& (elem%mp(m,j)*elem%metinv(m,n,2,2)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) ) & - ) *ra + ) *real(ra, r8) enddo enddo enddo @@ -1238,9 +1238,9 @@ function gradient_sphere_wk_testcontra(s,deriv,elem) result(ds) !DIR$ UNROLL(NP) do j=1,np ! phi(m)_x sum over first index, second index fixed at n - dscov(m,n,1)=dscov(m,n,1)-(elem%mp(j,n)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) )*ra + dscov(m,n,1)=dscov(m,n,1)-(elem%mp(j,n)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) )*real(ra, r8) ! phi(n)_y sum over second index, 1st index fixed at m - dscov(m,n,2)=dscov(m,n,2)-(elem%mp(m,j)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) )*ra + dscov(m,n,2)=dscov(m,n,2)-(elem%mp(m,j)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) )*real(ra, r8) enddo enddo enddo @@ -1280,7 +1280,7 @@ function ugradv_sphere(u,v,deriv,elem) result(ugradv) do component=1,3 ! Dot u with the gradient of each component call gradient_sphere(dum_cart(:,:,component),deriv,elem%Dinv,temp) - dum_cart(:,:,component) = sum( u(:,:,:) * temp,3) + dum_cart(:,:,component) = sum( u(:,:,:) * temp,3) enddo ! cartesian -> latlon @@ -1328,8 +1328,8 @@ function curl_sphere(s,deriv,elem) result(ds) dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) end do - v2(l ,j ) = -dsdx00*ra - v1(j ,l ) = dsdy00*ra + v2(l ,j ) = -dsdx00*real(ra, r8) + v1(j ,l ) = dsdy00*real(ra, r8) end do end do ! convert contra -> latlon *and* divide by jacobian @@ -1389,7 +1389,7 @@ subroutine divergence_sphere_wk(v,deriv,elem,div) do j=1,np div(m,n)=div(m,n)-(elem%spheremp(j,n)*vtemp(j,n,1)*deriv%Dvv(m,j) & +elem%spheremp(m,j)*vtemp(m,j,2)*deriv%Dvv(n,j)) & - * ra + * real(ra, r8) enddo end do @@ -1428,22 +1428,22 @@ function element_boundary_integral(v,deriv,elem) result(result) result=0 j=1 do i=1,np - result(i,j)=result(i,j)-deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*ra + result(i,j)=result(i,j)-deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*real(ra, r8) enddo j=np do i=1,np - result(i,j)=result(i,j)+deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*ra + result(i,j)=result(i,j)+deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*real(ra, r8) enddo i=1 do j=1,np - result(i,j)=result(i,j)-deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*ra + result(i,j)=result(i,j)-deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*real(ra, r8) enddo i=np do j=1,np - result(i,j)=result(i,j)+deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*ra + result(i,j)=result(i,j)+deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*real(ra, r8) enddo end function element_boundary_integral @@ -1493,13 +1493,13 @@ function edge_flux_u_cg( v,p,pedges, deriv, elem, u_is_contra) result(result) j=1 pstar=p(i,j) if (ucontra(i,j,2)>0) pstar=pedges(i,0) - flux = -pstar*ucontra(i,j,2)*( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ra) + flux = -pstar*ucontra(i,j,2)*( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*real(ra, r8)) result(i,j)=result(i,j)+flux j=np pstar=p(i,j) if (ucontra(i,j,2)<0) pstar=pedges(i,np+1) - flux = pstar*ucontra(i,j,2)* ( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ra) + flux = pstar*ucontra(i,j,2)* ( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*real(ra, r8)) result(i,j)=result(i,j)+flux enddo @@ -1507,13 +1507,13 @@ function edge_flux_u_cg( v,p,pedges, deriv, elem, u_is_contra) result(result) i=1 pstar=p(i,j) if (ucontra(i,j,1)>0) pstar=pedges(0,j) - flux = -pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ra) + flux = -pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*real(ra, r8)) result(i,j)=result(i,j)+flux i=np pstar=p(i,j) if (ucontra(i,j,1)<0) pstar=pedges(np+1,j) - flux = pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ra) + flux = pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*real(ra, r8)) result(i,j)=result(i,j)+flux end do @@ -1568,7 +1568,7 @@ subroutine vorticity_sphere(v,deriv,elem,vort) do j=1,np do i=1,np - vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*ra) + vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*real(ra, r8)) end do end do @@ -1622,7 +1622,7 @@ function vorticity_sphere_diag(v,deriv,elem) result(vort) do j=1,np do i=1,np - vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*ra) + vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*real(ra, r8)) end do end do @@ -1676,7 +1676,7 @@ subroutine divergence_sphere(v,deriv,elem,div) do j=1,np do i=1,np - div(i,j)=(div(i,j)+vvtemp(i,j))*(elem%rmetdet(i,j)*ra) + div(i,j)=(div(i,j)+vvtemp(i,j))*(elem%rmetdet(i,j)*real(ra, r8)) end do end do @@ -1809,8 +1809,8 @@ subroutine vlaplace_sphere_wk_mol(v,deriv,elem,undamprrcart,mol_nu,laplace) do n=1,np do m=1,np ! add in correction so we dont damp rigid rotation - laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) - laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(real(ra**2, r8)) + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(real(ra**2, r8)) enddo enddo end if @@ -1856,8 +1856,8 @@ function vlaplace_sphere_wk_cartesian(v,deriv,elem,var_coef,undamprrcart) result if (undamprrcart) then ! add in correction so we dont damp rigid rotation - laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*(ra**2) - laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*(ra**2) + laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*(real(ra**2, r8)) + laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*(real(ra**2, r8)) end if end function vlaplace_sphere_wk_cartesian @@ -1907,8 +1907,8 @@ function vlaplace_sphere_wk_contra(v,deriv,elem,var_coef,undamprrcart,nu_ratio) do n=1,np do m=1,np ! add in correction so we dont damp rigid rotation - laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) - laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(real(ra**2, r8)) + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(real(ra**2, r8)) enddo enddo end if @@ -2105,10 +2105,10 @@ subroutine subcell_div_fluxes(u, p, n, metdet,fluxes) flux_l(:,:) = MATMUL(boundary_interp_matrix(:,1,:),lr) flux_r(:,:) = MATMUL(boundary_interp_matrix(:,2,:),lr) - fluxes(:,:,1) = -flux_b(:,:)*ra - fluxes(:,:,2) = flux_r(:,:)*ra - fluxes(:,:,3) = flux_t(:,:)*ra - fluxes(:,:,4) = -flux_l(:,:)*ra + fluxes(:,:,1) = -flux_b(:,:)*real(ra, r8) + fluxes(:,:,2) = flux_r(:,:)*real(ra, r8) + fluxes(:,:,3) = flux_t(:,:)*real(ra, r8) + fluxes(:,:,4) = -flux_l(:,:)*real(ra, r8) end subroutine subcell_div_fluxes diff --git a/src/dynamics/se/dycore/dimensions_mod.F90 b/src/dynamics/se/dycore/dimensions_mod.F90 index 51799649..bdba4287 100644 --- a/src/dynamics/se/dycore/dimensions_mod.F90 +++ b/src/dynamics/se/dycore/dimensions_mod.F90 @@ -121,8 +121,7 @@ subroutine dimensions_mod_init() use vert_coord, only: pver, pverp use constituents, only: pcnst - use cam_abortutils, only: endrun, check_allocate - use string_utils, only: to_str + use cam_abortutils, only: check_allocate ! Local variables: diff --git a/src/dynamics/se/dycore/global_norms_mod.F90 b/src/dynamics/se/dycore/global_norms_mod.F90 index c5f4571c..ade5dbcf 100644 --- a/src/dynamics/se/dycore/global_norms_mod.F90 +++ b/src/dynamics/se/dycore/global_norms_mod.F90 @@ -3,6 +3,7 @@ module global_norms_mod use shr_kind_mod, only: r8=>shr_kind_r8 use cam_logfile, only: iulog use edgetype_mod, only: EdgeBuffer_t + use physconst, only: pi implicit none private @@ -31,7 +32,6 @@ subroutine global_integrals(elem, h,hybrid,npts,num_flds,nets,nete,I_sphere) use hybrid_mod, only: hybrid_t use element_mod, only: element_t use dimensions_mod, only: np, nelemd - use physconst, only: pi use parallel_mod, only: global_shared_buf, global_shared_sum type(element_t) , intent(in) :: elem(:) @@ -74,13 +74,12 @@ subroutine global_integrals(elem, h,hybrid,npts,num_flds,nets,nete,I_sphere) !JMD print *,'global_integral: before wrap_repro_sum' call wrap_repro_sum(nvars=num_flds, comm=hybrid%par%comm) !JMD print *,'global_integral: after wrap_repro_sum' - I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*PI) + I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*real(pi, r8)) end subroutine global_integrals subroutine global_integrals_general(h,hybrid,npts,da,num_flds,nets,nete,I_sphere) use hybrid_mod, only: hybrid_t use dimensions_mod, only: nc, nelemd - use physconst, only: pi use parallel_mod, only: global_shared_buf, global_shared_sum integer, intent(in) :: npts,nets,nete,num_flds @@ -89,7 +88,7 @@ subroutine global_integrals_general(h,hybrid,npts,da,num_flds,nets,nete,I_sphere real (kind=r8), intent(in) :: da(npts,npts,nets:nete) real (kind=r8) :: I_sphere(num_flds) - + real (kind=r8) :: I_priv real (kind=r8) :: I_shared common /gblintcom/I_shared @@ -121,7 +120,7 @@ subroutine global_integrals_general(h,hybrid,npts,da,num_flds,nets,nete,I_sphere !JMD print *,'global_integral: before wrap_repro_sum' call wrap_repro_sum(nvars=num_flds, comm=hybrid%par%comm) !JMD print *,'global_integral: after wrap_repro_sum' - I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*PI) + I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*real(pi, r8)) end subroutine global_integrals_general @@ -137,7 +136,6 @@ function global_integral(elem, h,hybrid,npts,nets,nete) result(I_sphere) use hybrid_mod, only: hybrid_t use element_mod, only: element_t use dimensions_mod, only: np, nelemd - use physconst, only: pi use parallel_mod, only: global_shared_buf, global_shared_sum type(element_t) , intent(in) :: elem(:) @@ -181,7 +179,7 @@ function global_integral(elem, h,hybrid,npts,nets,nete) result(I_sphere) !JMD print *,'global_integral: after wrap_repro_sum' I_tmp = global_shared_sum(1) !JMD print *,'global_integral: after global_shared_sum' - I_sphere = I_tmp(1)/(4.0_r8*PI) + I_sphere = I_tmp(1)/(4.0_r8*real(pi, r8)) end function global_integral @@ -287,7 +285,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& lambda_max = 0.0_r8 lambda_vis = 0.0_r8 end select - + if ((lambda_max.eq.0_r8).and.(hybrid%masterthread)) then print*, "lambda_max not calculated for NP = ",np print*, "Estimate of gravity wave timestep will be incorrect" @@ -296,11 +294,11 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& print*, "lambda_vis not calculated for NP = ",np print*, "Estimate of viscous CFLs will be incorrect" end if - + do ie=nets,nete elem(ie)%variable_hyperviscosity = 1.0_r8 end do - + gp=gausslobatto(np) min_gw = minval(gp%weights) ! @@ -313,8 +311,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& h(:,:,nets:nete)=1.0_r8 ! Calculate surface area by integrating 1.0_r8 over sphere and dividing by 4*PI (Should be 1) I_sphere = global_integral(elem, h(:,:,nets:nete),hybrid,np,nets,nete) - - min_normDinv = 1E99_r8 + + min_normDinv = 1E99_r8 max_normDinv = 0 min_max_dx = 1E99_r8 min_min_dx = 1E99_r8 @@ -326,33 +324,33 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& max_normDinv = max(max_normDinv,elem(ie)%normDinv) min_normDinv = min(min_normDinv,elem(ie)%normDinv) min_min_dx = min(min_min_dx,elem(ie)%dx_short) - max_min_dx = max(max_min_dx,elem(ie)%dx_short) + max_min_dx = max(max_min_dx,elem(ie)%dx_short) min_max_dx = min(min_max_dx,elem(ie)%dx_long) - + elem(ie)%area = sum(elem(ie)%spheremp(:,:)) min_area = min(min_area,elem(ie)%area) max_area = max(max_area,elem(ie)%area) max_ratio = max(max_ratio,elem(ie)%dx_long/elem(ie)%dx_short) global_shared_buf(ie,1) = elem(ie)%area - global_shared_buf(ie,2) = elem(ie)%dx_short + global_shared_buf(ie,2) = elem(ie)%dx_short enddo call wrap_repro_sum(nvars=2, comm=hybrid%par%comm) avg_area = global_shared_sum(1)/dble(nelem) avg_min_dx = global_shared_sum(2)/dble(nelem) - + min_area = ParallelMin(min_area,hybrid) max_area = ParallelMax(max_area,hybrid) - min_normDinv = ParallelMin(min_normDinv,hybrid) + min_normDinv = ParallelMin(min_normDinv,hybrid) max_normDinv = ParallelMax(max_normDinv,hybrid) - min_min_dx = ParallelMin(min_min_dx,hybrid) + min_min_dx = ParallelMin(min_min_dx,hybrid) max_min_dx = ParallelMax(max_min_dx,hybrid) - min_max_dx = ParallelMin(min_max_dx,hybrid) + min_max_dx = ParallelMin(min_max_dx,hybrid) max_ratio = ParallelMax(max_ratio,hybrid) ! Physical units for area min_area = min_area*rearth*rearth/1000000._r8 max_area = max_area*rearth*rearth/1000000._r8 - avg_area = avg_area*rearth*rearth/1000000._r8 + avg_area = avg_area*rearth*rearth/1000000._r8 if (hybrid%masterthread) then write(iulog,* )"" write(iulog,* )"Running Global Integral Diagnostic..." @@ -361,7 +359,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f9.3)') 'Element area: max/min',(max_area/min_area) if (.not.MeshUseMeshFile) then write(iulog,'(a,f6.3,f8.2)') "Average equatorial node spacing (deg, km) = ", & - dble(90)/dble(ne*(np-1)), PI*rearth/(2000.0_r8*dble(ne*(np-1))) + dble(90)/dble(ne*(np-1)), real(pi, r8)*real(rearth, r8)/(2000.0_r8*dble(ne*(np-1))) end if write(iulog,'(a,2f9.3)') 'norm of Dinv (min, max): ', min_normDinv, max_normDinv write(iulog,'(a,1f8.2)') 'Max Dinv-based element distortion: ', max_ratio @@ -369,8 +367,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,3f8.2)') "dx based on sqrt element area: ave,min,max = ", & sqrt(avg_area)/(np-1),sqrt(min_area)/(np-1),sqrt(max_area)/(np-1) end if - - + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! SCALAR, RESOLUTION-AWARE HYPERVISCOSITY ! this block of code initializes the variable_hyperviscsoity() array @@ -381,12 +379,12 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& ! Mike Levy !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (hypervis_power /= 0) then - + min_hypervis = 1d99 max_hypervis = 0 avg_hypervis = 0 - - + + max_unif_dx = min_max_dx ! use this for average resolution, unless: ! viscosity in namelist specified for smallest element: if (fine_ne>0) then @@ -395,7 +393,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& if (np /= 4 ) call endrun('ERROR: setting fine_ne only supported with NP=4') max_unif_dx = (111.28_r8*30)/dble(fine_ne) ! in km endif - + ! ! note: if L = eigenvalue of metinv, then associated length scale (km) is ! dx = 1.0_r8/( sqrt(L)*0.5_r8*dble(np-1)*ra*1000.0_r8) @@ -413,17 +411,17 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& normDinv_hypervis = 0 do ie=nets,nete ! variable viscosity based on map from ulatlon -> ucontra - + ! dx_long elem(ie)%variable_hyperviscosity = sqrt((elem(ie)%dx_long/max_unif_dx) ** hypervis_power) elem(ie)%hv_courant = dtnu*(elem(ie)%variable_hyperviscosity(1,1)**2) * & - (lambda_vis**2) * ((ra*elem(ie)%normDinv)**4) - + (lambda_vis**2) * ((real(ra, r8)*elem(ie)%normDinv)**4) + ! Check to see if this is stable if (elem(ie)%hv_courant.gt.max_hypervis_courant) then stable_hv = sqrt( max_hypervis_courant / & - ( dtnu * (lambda_vis)**2 * (ra*elem(ie)%normDinv)**4 ) ) - + ( dtnu * (lambda_vis)**2 * (real(ra, r8)*elem(ie)%normDinv)**4 ) ) + #if 0 ! Useful print statements for debugging the adjustments to hypervis print*, "Adjusting hypervis on elem ", elem(ie)%GlobalId @@ -434,15 +432,15 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& #endif ! make sure that: elem(ie)%hv_courant <= max_hypervis_courant elem(ie)%variable_hyperviscosity = stable_hv - elem(ie)%hv_courant = dtnu*(stable_hv**2) * (lambda_vis)**2 * (ra*elem(ie)%normDinv)**4 + elem(ie)%hv_courant = dtnu*(stable_hv**2) * (lambda_vis)**2 * (real(ra, r8)*elem(ie)%normDinv)**4 end if normDinv_hypervis = max(normDinv_hypervis, elem(ie)%hv_courant/dtnu) - + min_hypervis = min(min_hypervis, elem(ie)%variable_hyperviscosity(1,1)) max_hypervis = max(max_hypervis, elem(ie)%variable_hyperviscosity(1,1)) global_shared_buf(ie,1) = elem(ie)%variable_hyperviscosity(1,1) end do - + min_hypervis = ParallelMin(min_hypervis, hybrid) max_hypervis = ParallelMax(max_hypervis, hybrid) call wrap_repro_sum(nvars=1, comm=hybrid%par%comm) @@ -462,7 +460,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& elem(ie)%variable_hyperviscosity(:,:) = zeta(:,:,ie)*elem(ie)%rspheremp(:,:) end do call FreeEdgeBuffer(edgebuf) - + ! replace hypervis w/ bilinear based on continuous corner values do ie=nets,nete noreast = elem(ie)%variable_hyperviscosity(np,np) @@ -490,9 +488,9 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& (lambda**(-hypervis_scaling/2) ) else ! constant coefficient formula: - normDinv_hypervis = (lambda_vis**2) * (ra*max_normDinv)**4 + normDinv_hypervis = (lambda_vis**2) * (real(ra, r8)*max_normDinv)**4 endif - + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! TENSOR, RESOLUTION-AWARE HYPERVISCOSITY ! The tensorVisc() array is computed in cube_mod.F90 @@ -501,7 +499,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& ! Oksana Guba !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (hypervis_scaling /= 0) then - + call initEdgeBuffer(hybrid%par,edgebuf,elem,1) do rowind=1,2 do colind=1,2 @@ -509,7 +507,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& zeta(:,:,ie) = elem(ie)%tensorVisc(:,:,rowind,colind)*elem(ie)%spheremp(:,:) call edgeVpack(edgebuf,zeta(1,1,ie),1,0,ie) end do - + call bndry_exchange(hybrid,edgebuf) do ie=nets,nete call edgeVunpack(edgebuf,zeta(1,1,ie),1,0,ie) @@ -518,9 +516,9 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& enddo !rowind enddo !colind call FreeEdgeBuffer(edgebuf) - + !IF BILINEAR MAP OF V NEEDED - + do rowind=1,2 do colind=1,2 ! replace hypervis w/ bilinear based on continuous corner values @@ -589,7 +587,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& if (tstep_type==1) then S_rk = 2.0_r8 rk_str = ' * RK2-SSP 3 stage (same as tracers)' - elseif (tstep_type==2) then + elseif (tstep_type==2) then S_rk = 2.0_r8 rk_str = ' * classic RK3' elseif (tstep_type==3) then @@ -599,7 +597,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& S_rk = 3.0_r8 rk_str = ' * Kinnmark&Gray RK3 5 stage (3rd order)' end if - if (hybrid%masterthread) then + if (hybrid%masterthread) then write(iulog,'(a,f12.8,a)') 'Model top is ',ptop,'Pa' write(iulog,'(a)') ' ' write(iulog,'(a)') 'Timestepping methods used in dynamical core:' @@ -624,14 +622,14 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& end if ugw = 342.0_r8 !max gravity wave speed - dt_max_adv = S_rk/(umax*max_normDinv*lambda_max*ra) - dt_max_gw = S_rk/(ugw*max_normDinv*lambda_max*ra) - dt_max_tracer_se = S_rk_tracer*min_gw/(umax*max_normDinv*ra) + dt_max_adv = S_rk/(umax*max_normDinv*lambda_max*real(ra, r8)) + dt_max_gw = S_rk/(ugw*max_normDinv*lambda_max*real(ra, r8)) + dt_max_tracer_se = S_rk_tracer*min_gw/(umax*max_normDinv*real(ra, r8)) if (ntrac>0) then if (large_Courant_incr) then - dt_max_tracer_fvm = dble(nhe)*(4.0_r8*pi*Rearth/dble(4.0_r8*ne*nc))/umax + dt_max_tracer_fvm = dble(nhe)*(4.0_r8*real(pi, r8)*real(Rearth, r8)/dble(4.0_r8*ne*nc))/umax else - dt_max_tracer_fvm = dble(nhe)*(2.0_r8*pi*Rearth/dble(4.0_r8*ne*nc))/umax + dt_max_tracer_fvm = dble(nhe)*(2.0_r8*real(pi, r8)*real(Rearth, r8)/dble(4.0_r8*ne*nc))/umax end if else dt_max_tracer_fvm = -1.0_r8 @@ -640,8 +638,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& dt_max_hypervis_tracer = s_hypervis/(nu_q*normDinv_hypervis) max_laplace = MAX(MAXVAL(nu_scale_top(:))*nu_top,MAXVAL(kmvis_ref(:)/rho_ref(:))) - max_laplace = MAX(max_laplace,MAXVAL(kmcnd_ref(:)/(cpair*rho_ref(:)))) - dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) + max_laplace = MAX(max_laplace,MAXVAL(kmcnd_ref(:)/(real(cpair, r8)*rho_ref(:)))) + dt_max_laplacian_top = 1.0_r8/(max_laplace*((real(ra, r8)*max_normDinv)**2)*lambda_vis) if (hybrid%masterthread) then write(iulog,'(a,f10.2,a)') ' ' @@ -671,8 +669,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f10.2)') '* dt_remap (vertical remap dt) ',dt_remap_actual do k=1,ksponge_end max_laplace = MAX(nu_scale_top(k)*nu_top,kmvis_ref(k)/rho_ref(k)) - max_laplace = MAX(max_laplace,kmcnd_ref(k)/(cpair*rho_ref(k))) - dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) + max_laplace = MAX(max_laplace,kmcnd_ref(k)/(real(cpair, r8)*rho_ref(k))) + dt_max_laplacian_top = 1.0_r8/(max_laplace*((real(ra, r8)*max_normDinv)**2)*lambda_vis) write(iulog,'(a,f10.2,a,f10.2,a)') '* dt (del2 sponge ; u,v,T,dM) < ',& dt_max_laplacian_top,'s',dt_dyn_del2_actual,'s' @@ -1066,7 +1064,7 @@ subroutine wrap_repro_sum (nvars, comm, nsize) !$OMP END MASTER !$OMP BARRIER - + end subroutine wrap_repro_sum subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu,factor,str) @@ -1080,7 +1078,7 @@ subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min real (kind=r8), intent(in) :: max_min_dx,min_min_dx,factor real (kind=r8), intent(inout) :: nu character(len=4), intent(in) :: str - + real(r8) :: uniform_res_hypervis_scaling,nu_fac real(kind=r8) :: nu_min, nu_max ! @@ -1094,7 +1092,7 @@ subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min ! - Boville, B. A., 1991: Sensitivity of simulated climate to ! model resolution. J. Climate, 4, 469-485. ! - ! - TAKAHASHI ET AL., 2006: GLOBAL SIMULATION OF MESOSCALE SPECTRUM + ! - TAKAHASHI ET AL., 2006: GLOBAL SIMULATION OF MESOSCALE SPECTRUM ! uniform_res_hypervis_scaling = 1.0_r8/log10(2.0_r8) ! @@ -1103,27 +1101,27 @@ subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min ! ! grid spacing in meters = max_min_dx*1000.0_r8 ! - nu_fac = (rearth/6.37122E6_r8)*1.0E15_r8/(110000.0_r8**uniform_res_hypervis_scaling) + nu_fac = (real(rearth, r8)/6.37122E6_r8)*1.0E15_r8/(110000.0_r8**uniform_res_hypervis_scaling) if (nu < 0) then if (ne <= 0) then - if (hypervis_scaling/=0) then + if (hypervis_scaling/=0) then nu_min = factor*nu_fac*(max_min_dx*1000.0_r8)**uniform_res_hypervis_scaling nu_max = factor*nu_fac*(min_min_dx*1000.0_r8)**uniform_res_hypervis_scaling - nu = factor*nu_min + nu = factor*nu_min if (hybrid%masterthread) then write(iulog,'(a,a)') "Automatically setting nu",TRIM(str) write(iulog,'(a,2e9.2,a,2f9.2)') "Value at min/max grid spacing: ",nu_min,nu_max,& " Max/min grid spacing (km) = ",max_min_dx,min_min_dx end if - nu = nu_min*(2.0_r8*rearth/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(rearth**4) + nu = nu_min*(2.0_r8*real(rearth, r8)/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(real(rearth**4, r8)) if (hybrid%masterthread) & write(iulog,'(a,a,a,e9.3)') "Nu_tensor",TRIM(str)," = ",nu else if (hypervis_power/=0) then call endrun('ERROR: Automatic scaling of scalar viscosity not implemented') end if else - nu = factor*nu_fac*((30.0_r8/ne)*110000.0_r8)**uniform_res_hypervis_scaling + nu = factor*nu_fac*((30.0_r8/ne)*110000.0_r8)**uniform_res_hypervis_scaling if (hybrid%masterthread) then write(iulog,'(a,a,a,e9.2)') "Automatically setting nu",TRIM(str)," =",nu end if diff --git a/src/dynamics/se/dycore/interpolate_mod.F90 b/src/dynamics/se/dycore/interpolate_mod.F90 index 0c2aad61..6252843a 100644 --- a/src/dynamics/se/dycore/interpolate_mod.F90 +++ b/src/dynamics/se/dycore/interpolate_mod.F90 @@ -188,12 +188,12 @@ function get_interp_gweight() result(gw) end function get_interp_gweight function get_interp_lat() result(thislat) real(kind=r8) :: thislat(nlat) - thislat=lat*180.0_r8/PI + thislat=lat*180.0_r8/real(PI, r8) return end function get_interp_lat function get_interp_lon() result(thislon) real(kind=r8) :: thislon(nlon) - thislon=lon*180.0_r8/PI + thislon=lon*180.0_r8/real(PI, r8) return end function get_interp_lon @@ -1025,10 +1025,10 @@ subroutine cube_facepoint_ne(sphere, ne, cart, number) yp = cube%y ! MNL: for uniform grids (on cube face), analytic solution is fine - x1 = xp + 0.25_r8*PI - x2 = yp + 0.25_r8*PI + x1 = xp + 0.25_r8*real(PI, r8) + x2 = yp + 0.25_r8*real(PI, r8) - dx = (0.5_r8*PI)/ne + dx = (0.5_r8*real(PI, r8))/ne ie = INT(ABS(x1)/dx) je = INT(ABS(x2)/dx) ! if we are exactly on an element edge, we can put the point in diff --git a/src/dynamics/se/dycore/mesh_mod.F90 b/src/dynamics/se/dycore/mesh_mod.F90 index c34f0cc4..ac849cc3 100644 --- a/src/dynamics/se/dycore/mesh_mod.F90 +++ b/src/dynamics/se/dycore/mesh_mod.F90 @@ -669,7 +669,7 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) call endrun('initialize_space_filling_curve: Unreasonably small element found. less than .00001') end if - ne = CEILING(0.5_r8*PI/(h/2)); + ne = CEILING(0.5_r8*real(PI, r8)/(h/2)); ! find the smallest ne2 which is a power of 2 and ne2>ne ne2=2**ceiling( log(real(ne))/log(2._r8) ) @@ -717,8 +717,8 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) y = centroids(i,2) ! map this element to an (i2,j2) element ! [ -PI/4, PI/4 ] -> [ 0, ne2 ] - i2=nint( (0.5_r8 + 2.0_r8*x/PI)*ne2 + 0.5_r8 ) - j2=nint( (0.5_r8 + 2.0_r8*y/PI)*ne2 + 0.5_r8 ) + i2=nint( (0.5_r8 + 2.0_r8*x/real(PI, r8))*ne2 + 0.5_r8 ) + j2=nint( (0.5_r8 + 2.0_r8*y/real(PI, r8))*ne2 + 0.5_r8 ) if (face == 4 .or. face == 6 ) i2 = ne2-i2+1 if (face == 1 .or. face == 2 .or. face == 6) j2 = ne2-j2+1 if (i2<1 ) i2=1 diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 index 306e8486..b3d099f6 100644 --- a/src/dynamics/se/dycore/prim_advance_mod.F90 +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -56,6 +56,13 @@ subroutine prim_advance_init(par, elem) end subroutine prim_advance_init subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, nete) + use ccpp_kinds, only: kind_phys + use physconst, only: get_cp, thermodynamic_active_species_num + use physconst, only: get_kappa_dry, dry_air_species_num + use physconst, only: thermodynamic_active_species_idx_dycore + use physconst, only: cpair + + !SE dycore: use control_mod, only: tstep_type, qsplit use derivative_mod, only: derivative_t use dimensions_mod, only: np, nlev @@ -66,10 +73,7 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net use dimensions_mod, only: lcp_moist use fvm_control_volume_mod, only: fvm_struct use control_mod, only: raytau0 - use physconst, only: get_cp, thermodynamic_active_species_num - use physconst, only: get_kappa_dry, dry_air_species_num - use physconst, only: thermodynamic_active_species_idx_dycore - use physconst, only: cpair, rair + implicit none type (element_t), intent(inout), target :: elem(:) @@ -86,10 +90,12 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net real (kind=r8) :: dt_vis, eta_ave_w real (kind=r8) :: dp(np,np) integer :: ie,nm1,n0,np1,k,qn0,m_cnst, nq - real (kind=r8) :: inv_cp_full(np,np,nlev,nets:nete) real (kind=r8) :: qwater(np,np,nlev,thermodynamic_active_species_num,nets:nete) integer :: qidx(thermodynamic_active_species_num) real (kind=r8) :: kappa(np,np,nlev,nets:nete) + + real (kind=kind_phys) :: inv_cp_full(np,np,nlev,nets:nete) + call t_startf('prim_advance_exp') nm1 = tl%nm1 n0 = tl%n0 @@ -144,12 +150,13 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net ! if (lcp_moist) then do ie=nets,nete - call get_cp(1,np,1,np,1,nlev,thermodynamic_active_species_num,qwater(:,:,:,:,ie),& - .true.,inv_cp_full(:,:,:,ie),active_species_idx_dycore=qidx) + call get_cp(1,np,1,np,1,nlev,thermodynamic_active_species_num,& + real(qwater(:,:,:,:,ie), kind_phys), & + .true.,inv_cp_full(:,:,:,ie),active_species_idx_dycore=qidx) end do else do ie=nets,nete - inv_cp_full(:,:,:,ie) = 1.0_r8/cpair + inv_cp_full(:,:,:,ie) = 1.0_kind_phys/cpair end do end if do ie=nets,nete @@ -165,13 +172,19 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net ! not optimal for regular CFL ! u1 = u0 + dt/2 RHS(u0) call compute_and_apply_rhs(np1,n0,n0,dt/2,elem,hvcoord,hybrid,& - deriv,nets,nete,eta_ave_w/3,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,eta_ave_w/3,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u2 = u1 + dt/2 RHS(u1) call compute_and_apply_rhs(np1,np1,np1,dt/2,elem,hvcoord,hybrid,& - deriv,nets,nete,eta_ave_w/3,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,eta_ave_w/3,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u3 = u2 + dt/2 RHS(u2) call compute_and_apply_rhs(np1,np1,np1,dt/2,elem,hvcoord,hybrid,& - deriv,nets,nete,eta_ave_w/3,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,eta_ave_w/3,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! unew = u/3 +2*u3/3 = u + 1/3 (RHS(u) + RHS(u1) + RHS(u2)) do ie=nets,nete @@ -186,50 +199,72 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net ! classic RK3 CFL=sqrt(3) ! u1 = u0 + dt/3 RHS(u0) call compute_and_apply_rhs(np1,n0,n0,dt/3,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u2 = u0 + dt/2 RHS(u1) call compute_and_apply_rhs(np1,n0,np1,dt/2,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u3 = u0 + dt RHS(u2) call compute_and_apply_rhs(np1,n0,np1,dt,elem,hvcoord,hybrid,& - deriv,nets,nete,eta_ave_w,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,eta_ave_w,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) else if (tstep_type==3) then ! KG 4th order 4 stage: CFL=sqrt(8) ! low storage version of classic RK4 ! u1 = u0 + dt/4 RHS(u0) call compute_and_apply_rhs(np1,n0,n0,dt/4,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u2 = u0 + dt/3 RHS(u1) call compute_and_apply_rhs(np1,n0,np1,dt/3,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u3 = u0 + dt/2 RHS(u2) call compute_and_apply_rhs(np1,n0,np1,dt/2,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! u4 = u0 + dt RHS(u3) call compute_and_apply_rhs(np1,n0,np1,dt,elem,hvcoord,hybrid,& - deriv,nets,nete,eta_ave_w,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,eta_ave_w,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) else if (tstep_type==4) then ! ! Ullrich 3nd order 5 stage: CFL=sqrt( 4^2 -1) = 3.87 ! u1 = u0 + dt/5 RHS(u0) (save u1 in timelevel nm1) ! rhs: t=t call compute_and_apply_rhs(nm1,n0,n0,dt/5,elem,hvcoord,hybrid,& - deriv,nets,nete,eta_ave_w/4,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,eta_ave_w/4,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! ! u2 = u0 + dt/5 RHS(u1); rhs: t=t+dt/5 ! call compute_and_apply_rhs(np1,n0,nm1,dt/5,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! ! u3 = u0 + dt/3 RHS(u2); rhs: t=t+2*dt/5 ! call compute_and_apply_rhs(np1,n0,np1,dt/3,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! ! u4 = u0 + 2dt/3 RHS(u3); rhs: t=t+2*dt/5+dt/3 ! call compute_and_apply_rhs(np1,n0,np1,2*dt/3,elem,hvcoord,hybrid,& - deriv,nets,nete,0.0_r8,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,0.0_r8,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! compute (5*u1/4 - u0/4) in timelevel nm1: do ie=nets,nete elem(ie)%state%v(:,:,:,:,nm1)= (5*elem(ie)%state%v(:,:,:,:,nm1) & @@ -244,7 +279,9 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net ! phl: rhs: t=t+2*dt/5+dt/3+3*dt/4 -wrong RK times ... ! call compute_and_apply_rhs(np1,nm1,np1,3*dt/4,elem,hvcoord,hybrid,& - deriv,nets,nete,3*eta_ave_w/4,inv_cp_full,qwater,qidx,kappa) + deriv,nets,nete,3*eta_ave_w/4,& + real(inv_cp_full, r8),& + qwater,qidx,kappa) ! final method is the same as: ! u5 = u0 + dt/4 RHS(u0)) + 3dt/4 RHS(u4) else @@ -263,7 +300,7 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net ! forward-in-time, hypervis applied to dp3d call advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,np1,qn0,nets,nete,dt_vis,eta_ave_w,& - inv_cp_full,hvcoord) + real(inv_cp_full, r8),hvcoord) call t_stopf('advance_hypervis') ! @@ -556,14 +593,14 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, ! T1 = .0065*Tref*Cp/g ! = ~191 ! T0 = Tref-T1 ! = ~97 ! - T1 = lapse_rate*Tref*cpair/gravit + T1 = real(lapse_rate*Tref*cpair/gravit, r8) T0 = Tref-T1 do ie=nets,nete do k=1,nlev dp3d_ref(:,:,k,ie) = ((hvcoord%hyai(k+1)-hvcoord%hyai(k))*hvcoord%ps0 + & (hvcoord%hybi(k+1)-hvcoord%hybi(k))*ps_ref(:,:,ie)) tmp = hvcoord%hyam(k)*hvcoord%ps0+hvcoord%hybm(k)*ps_ref(:,:,ie) - tmp2 = (tmp/hvcoord%ps0)**cappa + tmp2 = (tmp/hvcoord%ps0)**real(cappa, r8) T_ref(:,:,k,ie) = (T0+T1*tmp2) end do end do @@ -1108,6 +1145,13 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& ! allows us to fuse these two loops for more cache reuse ! ! =================================== + use ccpp_kinds, only: kind_phys + use physconst, only: get_gz_given_dp_Tv_Rdry + use physconst, only: thermodynamic_active_species_num, get_virtual_temp, get_cp_dry + use physconst, only: thermodynamic_active_species_idx_dycore,get_R_dry + use physconst, only: dry_air_species_num,get_exner + + !SE dycore: use dimensions_mod, only: np, nc, nlev, ntrac, ksponge_end use hybrid_mod, only: hybrid_t use element_mod, only: element_t @@ -1117,11 +1161,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& use edgetype_mod, only: edgedescriptor_t use bndry_mod, only: bndry_exchange use hybvcoord_mod, only: hvcoord_t - use physconst, only: epsilo, get_gz_given_dp_Tv_Rdry - use physconst, only: thermodynamic_active_species_num, get_virtual_temp, get_cp_dry - use physconst, only: thermodynamic_active_species_idx_dycore,get_R_dry - use physconst, only: dry_air_species_num,get_exner - use time_mod, only : tevolve + use time_mod, only: tevolve implicit none integer, intent(in) :: np1,nm1,n0,nets,nete @@ -1156,7 +1196,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& real (kind=r8), dimension(np,np,nlev) :: vort ! vorticity real (kind=r8), dimension(np,np,nlev) :: p_dry ! pressure dry real (kind=r8), dimension(np,np,nlev) :: dp_dry ! delta pressure dry - real (kind=r8), dimension(np,np,nlev) :: R_dry, cp_dry! + real (kind=r8), dimension(np,np,nlev) :: R_dry real (kind=r8), dimension(np,np,nlev) :: p_full ! pressure real (kind=r8), dimension(np,np,nlev) :: dp_full real (kind=r8), dimension(np,np) :: exner @@ -1169,6 +1209,8 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& real (kind=r8), dimension(np,np,2) :: grad_exner real (kind=r8), dimension(np,np) :: theta_v + real (kind=kind_phys), dimension(np,np,nlev) :: cp_dry + type (EdgeDescriptor_t):: desc real (kind=r8) :: sum_water(np,np,nlev), density_inv(np,np) @@ -1190,7 +1232,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& call get_R_dry(1,np,1,np,1,nlev,1,nlev,thermodynamic_active_species_num,& qwater(:,:,:,:,ie),qidx,R_dry) call get_cp_dry(1,np,1,np,1,nlev,1,nlev,thermodynamic_active_species_num,& - qwater(:,:,:,:,ie),qidx,cp_dry) + real(qwater(:,:,:,:,ie),kind_phys),qidx,cp_dry) do k=1,nlev dp_dry(:,:,k) = elem(ie)%state%dp3d(:,:,k,n0) @@ -1318,8 +1360,8 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& theta_v(:,:)=T_v(:,:,k)/exner(:,:) call gradient_sphere(exner(:,:),deriv,elem(ie)%Dinv,grad_exner) - grad_exner(:,:,1) = cp_dry(:,:,k)*theta_v(:,:)*grad_exner(:,:,1) - grad_exner(:,:,2) = cp_dry(:,:,k)*theta_v(:,:)*grad_exner(:,:,2) + grad_exner(:,:,1) = real(cp_dry(:,:,k), r8)*theta_v(:,:)*grad_exner(:,:,1) + grad_exner(:,:,2) = real(cp_dry(:,:,k), r8)*theta_v(:,:)*grad_exner(:,:,2) else exner(:,:)=(p_full(:,:,k)/hvcoord%ps0)**kappa(:,:,k,ie) theta_v(:,:)=T_v(:,:,k)/exner(:,:) @@ -1330,8 +1372,8 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& grad_kappa_term(:,:,1)=-suml*grad_kappa_term(:,:,1) grad_kappa_term(:,:,2)=-suml*grad_kappa_term(:,:,2) - grad_exner(:,:,1) = cp_dry(:,:,k)*theta_v(:,:)*(grad_exner(:,:,1)+grad_kappa_term(:,:,1)) - grad_exner(:,:,2) = cp_dry(:,:,k)*theta_v(:,:)*(grad_exner(:,:,2)+grad_kappa_term(:,:,2)) + grad_exner(:,:,1) = real(cp_dry(:,:,k), r8)*theta_v(:,:)*(grad_exner(:,:,1)+grad_kappa_term(:,:,1)) + grad_exner(:,:,2) = real(cp_dry(:,:,k), r8)*theta_v(:,:)*(grad_exner(:,:,2)+grad_kappa_term(:,:,2)) end if do j=1,np @@ -1552,17 +1594,20 @@ subroutine distribute_flux_at_corners(cflux, corners, getmapP) end subroutine distribute_flux_at_corners subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suffix) - use dimensions_mod, only: npsq,nlev,np,lcp_moist,nc,ntrac,qsize - use physconst, only: gravit, cpair, rearth,omega - use element_mod, only: element_t + use ccpp_kinds, only: kind_phys + use physconst, only: gravit, cpair, rearth, omega + use physconst, only: get_dp, get_cp + use physconst, only: thermodynamic_active_species_idx_dycore + use hycoef, only: hyai, ps0 + use string_utils, only: strlist_get_ind !Un-comment once constituents and history outputs are enabled -JN: ! use cam_history, only: outfld, hist_fld_active ! use constituents, only: cnst_get_ind - use string_utils, only: strlist_get_ind - use hycoef, only: hyai, ps0 + + !SE dycore: + use element_mod, only: element_t + use dimensions_mod, only: npsq,nlev,np,lcp_moist,nc,ntrac,qsize use fvm_control_volume_mod, only: fvm_struct - use physconst, only: get_dp, get_cp - use physconst, only: thermodynamic_active_species_idx_dycore use dimensions_mod, only: cnst_name_gll !------------------------------Arguments-------------------------------- @@ -1590,7 +1635,8 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf real(kind=r8) :: mr(npsq) ! wind AAM real(kind=r8) :: mo(npsq) ! mass AAM real(kind=r8) :: mr_cnst, mo_cnst, cos_lat, mr_tmp, mo_tmp - real(kind=r8) :: cp(np,np,nlev) + + real(kind=kind_phys) :: cp(np,np,nlev) integer :: ie,i,j,k integer :: ixwv,ixcldice, ixcldliq, ixtt ! CLDICE, CLDLIQ and test tracer indices @@ -1632,8 +1678,8 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf ke = 0.0_r8 call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),2,thermodynamic_active_species_idx_dycore,& elem(ie)%state%dp3d(:,:,:,tl),pdel,ps=ps,ptop=hyai(1)*ps0) - call get_cp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),& - .false.,cp,dp_dry=elem(ie)%state%dp3d(:,:,:,tl),& + call get_cp(1,np,1,np,1,nlev,qsize,real(elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp), kind_phys),& + .false.,cp,dp_dry=real(elem(ie)%state%dp3d(:,:,:,tl), kind_phys),& active_species_idx_dycore=thermodynamic_active_species_idx_dycore) do k = 1, nlev do j=1,np @@ -1641,14 +1687,14 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf ! ! kinetic energy ! - ke_tmp = 0.5_r8*(elem(ie)%state%v(i,j,1,k,tl)**2+ elem(ie)%state%v(i,j,2,k,tl)**2)*pdel(i,j,k)/gravit + ke_tmp = 0.5_r8*(elem(ie)%state%v(i,j,1,k,tl)**2+ elem(ie)%state%v(i,j,2,k,tl)**2)*pdel(i,j,k)/real(gravit, r8) if (lcp_moist) then - se_tmp = cp(i,j,k)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/gravit + se_tmp = real(cp(i,j,k), r8)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/real(gravit, r8) else ! ! using CAM physics definition of internal energy ! - se_tmp = cpair*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/gravit + se_tmp = real(cpair, r8)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/real(gravit, r8) end if se (i+(j-1)*np) = se (i+(j-1)*np) + se_tmp ke (i+(j-1)*np) = ke (i+(j-1)*np) + ke_tmp @@ -1658,7 +1704,7 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf do j=1,np do i = 1, np - se(i+(j-1)*np) = se(i+(j-1)*np) + elem(ie)%state%phis(i,j)*ps(i,j)/gravit + se(i+(j-1)*np) = se(i+(j-1)*np) + elem(ie)%state%phis(i,j)*ps(i,j)/real(gravit, r8) end do end do ! @@ -1713,8 +1759,8 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf if ( hist_fld_active(name_out1).or.hist_fld_active(name_out2)) then call strlist_get_ind(cnst_name_gll, 'CLDLIQ', ixcldliq, abort=.false.) call strlist_get_ind(cnst_name_gll, 'CLDICE', ixcldice, abort=.false.) - mr_cnst = rearth**3/gravit - mo_cnst = omega*rearth**4/gravit + mr_cnst = real(rearth**3/gravit, r8) + mo_cnst = real(omega*rearth**4/gravit, r8) do ie=nets,nete mr = 0.0_r8 mo = 0.0_r8 @@ -1801,7 +1847,7 @@ subroutine util_function(f_in,nx,nz,name_out,ie) #if 0 if (hist_fld_active(name_out)) then f_out = 0.0_r8 - inv_g = 1.0_r8/gravit + inv_g = 1.0_r8/real(gravit, r8) do k = 1, nz do j = 1, nx do i = 1, nx @@ -1998,7 +2044,7 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) ! Calculate (dry) geopotential values !-------------------------------------- - dPhi (:,:,:) = 0.5_r8*(rair*elem(ie)%state%T (:,:,:,nt) & + dPhi (:,:,:) = 0.5_r8*(real(rair, r8)*elem(ie)%state%T (:,:,:,nt) & *elem(ie)%state%dp3d(:,:,:,nt) & /P_val(:,:,:) ) Phi_val (:,:,nlev) = elem(ie)%state%phis(:,:) + dPhi(:,:,nlev) @@ -2026,7 +2072,7 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) Phis_avg(:,:,ie) = E_phis/E_Awgt do kk=1,nlev Phi_avg(:,:,kk,ie) = E_phi(kk) /E_Awgt - RT_avg (:,:,kk,ie) = E_T (kk)*rair/E_Awgt + RT_avg (:,:,kk,ie) = E_T (kk)*real(rair, r8)/E_Awgt end do end do ! ie=nets,nete @@ -2098,7 +2144,7 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) if(.FALSE.) then ! DRY ADIABATIC laspe rate !------------------------------ - RT_lapse(:,:) = -cappa + RT_lapse(:,:) = -1._r8*real(cappa, r8) else ! ENVIRONMENTAL (empirical) laspe rate !-------------------------------------- @@ -2263,21 +2309,21 @@ subroutine solve_diffusion(dt,nx,nlev,i,j,nlay,pmid,pint,km,fld,boundary_conditi else if (boundary_condition==1) then value_level0 = 0.75_r8*fld(i,j,1) ! value above sponge k=1 - alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) - alm = dt*(km(i,j,k )*gravit*gravit/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) + alp = dt*(km(i,j,k+1)*real(gravit*gravit, r8)/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*real(gravit*gravit, r8)/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) next_iterate(k) = (fld(i,j,k) + alp * current_guess(k+1) + alm * value_level0)/(1._r8 + alp + alm) else ! ! set fld'=0 at model top ! k=1 - alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) - alm = dt*(km(i,j,k )*gravit*gravit/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) + alp = dt*(km(i,j,k+1)*real(gravit*gravit, r8)/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*real(gravit*gravit, r8)/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) next_iterate(k) = (fld(i,j,1) + alp * current_guess(2) + alm * current_guess(1))/(1._r8 + alp + alm) end if do k = 2, nlay-1 - alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k )-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) - alm = dt*(km(i,j,k )*gravit*gravit/(pmid(i,j,k-1)-pmid(i,j,k )))/(pint(i,j,k)-pint(i,j,k+1)) + alp = dt*(km(i,j,k+1)*real(gravit*gravit, r8)/(pmid(i,j,k )-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*real(gravit*gravit, r8)/(pmid(i,j,k-1)-pmid(i,j,k )))/(pint(i,j,k)-pint(i,j,k+1)) next_iterate(k) = (fld(i,j,k) + alp * current_guess(k+1) + alm * current_guess(k-1))/(1._r8 + alp + alm) end do next_iterate(nlay) = (fld(i,j,nlay) + alp * fld(i,j,nlay) + alm * current_guess(nlay-1))/(1._r8 + alp + alm) ! bottom BC diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90 index eb491ab3..44ec3955 100644 --- a/src/dynamics/se/dycore/prim_advection_mod.F90 +++ b/src/dynamics/se/dycore/prim_advection_mod.F90 @@ -18,7 +18,6 @@ module prim_advection_mod ! use shr_kind_mod, only: r8=>shr_kind_r8 use dimensions_mod, only: nlev, np, qsize, nc - use physconst, only: cpair use derivative_mod, only: derivative_t use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct @@ -962,7 +961,7 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) use dimensions_mod, only : lcp_moist, kord_tr,kord_tr_cslam use cam_logfile, only : iulog use physconst, only : pi,get_thermal_energy,get_dp,get_virtual_temp - use physconst , only : thermodynamic_active_species_idx_dycore + use physconst , only : thermodynamic_active_species_idx_dycore use thread_mod , only : omp_set_nested use control_mod, only: vert_remap_uvTq_alg type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) @@ -970,25 +969,25 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) type (element_t), intent(inout) :: elem(:) ! real (kind=r8) :: dpc_star(nc,nc,nlev) !Lagrangian levels on CSLAM grid - + type (hvcoord_t) :: hvcoord integer :: ie,i,j,k,np1,nets,nete,np1_qdp,q, m_cnst real (kind=r8), dimension(np,np,nlev) :: dp_moist,dp_star_moist, dp_dry,dp_star_dry real (kind=r8), dimension(np,np,nlev) :: internal_energy_star real (kind=r8), dimension(np,np,nlev,2):: ttmp - real(r8), parameter :: rad2deg = 180.0_r8/pi + real(r8), parameter :: rad2deg = 180.0_r8/real(pi, r8) integer :: region_num_threads,qbeg,qend,kord_uvT(1) - type (hybrid_t) :: hybridnew,hybridnew2 + type (hybrid_t) :: hybridnew,hybridnew2 real (kind=r8) :: ptop kord_uvT = vert_remap_uvTq_alg - + ptop = hvcoord%hyai(1)*hvcoord%ps0 do ie=nets,nete ! ! prepare for mapping of temperature ! - if (vert_remap_uvTq_alg>-20) then + if (vert_remap_uvTq_alg>-20) then if (lcp_moist) then ! ! compute internal energy on Lagrangian levels diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index d3b6a980..a65f5a06 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -297,7 +297,7 @@ subroutine prim_init1(elem, fvm, par, Tl) aratio(ie,1) = sum(elem(ie)%mp(:,:)*elem(ie)%metdet(:,:)) end do call repro_sum(aratio, area, nelemd, nelemd, 1, commid=par%comm) - area(1) = 4.0_r8*pi/area(1) ! ratio correction + area(1) = 4.0_r8*real(pi, r8)/area(1) ! ratio correction deallocate(aratio) if (par%masterproc) then write(iulog,'(2a,f20.17)') subname, "re-initializing cube elements: area correction=", area(1) diff --git a/src/dynamics/se/dycore/quadrature_mod.F90 b/src/dynamics/se/dycore/quadrature_mod.F90 index 16497400..9f1d42a0 100644 --- a/src/dynamics/se/dycore/quadrature_mod.F90 +++ b/src/dynamics/se/dycore/quadrature_mod.F90 @@ -161,7 +161,7 @@ function gauss_pts(np1) result(pts) ! Compute first half of the roots by "polynomial deflation". ! ============================================================ - dth = PI/(2*n+2) + dth = real(pi, r8)/(2*n+2) nh = (n+1)/2 @@ -396,7 +396,7 @@ function gausslobatto_pts(np1) result(pts) a = -(jac(n+1)*jacm1(n-1)-jacm1(n+1)*jac(n-1))/det b = -(jac(n )*jacm1(n+1)-jacm1(n )*jac(n+1))/det - dth = PI/(2*n+1) + dth = real(pi, r8)/(2*n+1) cd = COS(c2*dth) sd = SIN(c2*dth) cs = COS(dth) diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index b6afd3f3..27bcc804 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -38,8 +38,7 @@ module dyn_grid use cam_map_utils, only: iMap use cam_logfile, only: iulog -use cam_abortutils, only: endrun -use string_utils, only: to_str +use cam_abortutils, only: endrun, check_allocate !SE dycore: use dimensions_mod, only: globaluniquecols, nelem, nelemd, nelemdmax, & @@ -196,46 +195,33 @@ subroutine model_grid_init() !Allocate SE dycore "hvcoord" structure: !+++++++ allocate(hvcoord%hyai(pverp), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%hyai(pverp) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%hyai(pverp)', & + file=__FILE__, line=__LINE__) allocate(hvcoord%hyam(pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%hyam(pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%hyam(pver)', & + file=__FILE__, line=__LINE__) allocate(hvcoord%hybi(pverp), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%hybi(pverp) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%hybi(pverp)', & + file=__FILE__, line=__LINE__) allocate(hvcoord%hybm(pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%hybm(pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%hybm(pver)', & + file=__FILE__, line=__LINE__) allocate(hvcoord%hybd(pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%hybd(pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%hybd(pver)', & + file=__FILE__, line=__LINE__) allocate(hvcoord%etam(pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%etam(pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%etam(pver)', & + file=__FILE__, line=__LINE__) allocate(hvcoord%etai(pverp), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate hvcoord%etai(pverp) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'hvcoord%etai(pverp)', & + file=__FILE__, line=__LINE__) + !+++++++ !Set SE "hvcoord" values: @@ -337,21 +323,16 @@ subroutine model_grid_init() if (do_native_mapping) then allocate(areaA(ngcols_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate areaA(ngcols_d) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'areaA(ngcols_d)', & + file=__FILE__, line=__LINE__) allocate(clat(ngcols_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate clat(ngcols_d) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'clat(ngcols_d)', & + file=__FILE__, line=__LINE__) + allocate(clon(ngcols_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate clon(ngcols_d) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'clon(ngcols_d)', & + file=__FILE__, line=__LINE__) call get_horiz_grid_int(ngcols_d, clat_d_out=clat, clon_d_out=clon, area_d_out=areaA) @@ -375,10 +356,9 @@ subroutine model_grid_init() ! Allocate local_dyn_columns structure if not already allocated: if (.not.allocated(local_dyn_columns)) then allocate(local_dyn_columns(num_local_columns), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate local_dyn_columns(num_local_columns) '//& - 'failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'local_dyn_columns(num_local_columns)', & + file=__FILE__, line=__LINE__) + end if ! Set local_dyn_columns values: @@ -396,10 +376,8 @@ subroutine model_grid_init() gridname = 'physgrid_d' allocate(grid_attribute_names(2), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate grid_attribute_names(2) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'grid_attribute_names(2)', & + file=__FILE__, line=__LINE__) grid_attribute_names(1) = 'fv_nphys' grid_attribute_names(2) = 'ne' @@ -407,10 +385,8 @@ subroutine model_grid_init() gridname = 'GLL' allocate(grid_attribute_names(3), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate grid_attribute_names(3) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'grid_attribute_names(3)', & + file=__FILE__, line=__LINE__) ! For standard CAM-SE, we need to copy the area attribute. ! For physgrid, the physics grid will create area (GLL has area_d) @@ -442,7 +418,7 @@ end subroutine model_grid_init subroutine set_dyn_col_values() use physconst, only: pi - use cam_abortutils, only: endrun + use string_utils, only: to_str !SE dycore: use coordinate_systems_mod, only: spherical_polar_t @@ -489,11 +465,10 @@ subroutine set_dyn_col_values() elem(elem_ind)%GlobalId allocate(local_dyn_columns(lindex)%dyn_block_index(1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate local_dyn_columns('//& - to_str(lindex)//')%dyn_block_index(1)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, & + 'local_dyn_columns('//& + to_str(lindex)//')%dyn_block_index(1)', & + file=__FILE__, line=__LINE__) local_dyn_columns(lindex)%dyn_block_index(1) = col_ind + 1 end do @@ -525,11 +500,10 @@ subroutine set_dyn_col_values() elem(elem_ind)%GlobalId allocate(local_dyn_columns(lindex)%dyn_block_index(1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate local_dyn_columns('//& - to_str(lindex)//')%dyn_block_index(1)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, & + 'local_dyn_columns('//& + to_str(lindex)//')%dyn_block_index(1)', & + file=__FILE__, line=__LINE__) local_dyn_columns(lindex)%dyn_block_index(1) = col_ind end do @@ -643,9 +617,8 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & call create_global_coords(clat_d_out, clon_d_out, lat_d_out, lon_d_out) else allocate(temp(nxy), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate temp(nxy) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'temp(nxy)', & + file=__FILE__, line=__LINE__) call create_global_coords(clat_d_out, temp, lat_d_out, lon_d_out) deallocate(temp) @@ -654,9 +627,8 @@ subroutine get_horiz_grid_int(nxy, clat_d_out, clon_d_out, area_d_out, & else if (present(clon_d_out)) then allocate(temp(nxy), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate temp(nxy) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'temp(nxy)', & + file=__FILE__, line=__LINE__) call create_global_coords(temp, clon_d_out, lat_d_out, lon_d_out) deallocate(temp) @@ -720,14 +692,12 @@ subroutine dyn_grid_get_elem_coords(ie, rlon, rlat, cdex) eb = sb + elem(ie)%idxp%NumUniquePts-1 allocate(clat(sb:eb), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate clat(sb:eb) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'clat(sb:eb)', & + file=__FILE__, line=__LINE__) allocate(clon(sb:eb), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate clon(sb:eb) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'clon(sb:eb)', & + file=__FILE__, line=__LINE__) call UniqueCoords( elem(ie)%idxP, elem(ie)%spherep, clat(sb:eb), clon(sb:eb) ) @@ -873,28 +843,20 @@ subroutine define_cam_grids() end do allocate(pelat_deg(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pelat_deg(np*np*nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pelat_deg(np*np*nelemd)', & + file=__FILE__, line=__LINE__) allocate(pelon_deg(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pelon_deg(np*np*nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pelon_deg(np*np*nelemd)', & + file=__FILE__, line=__LINE__) allocate(pearea(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pearea(np*np*nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pearea(np*np*nelemd)', & + file=__FILE__, line=__LINE__) allocate(pemap(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pemap(np*np*nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pemap(np*np*nelemd)', & + file=__FILE__, line=__LINE__) pemap = 0_iMap ii = 1 @@ -933,10 +895,8 @@ subroutine define_cam_grids() ! Map for GLL grid allocate(grid_map(3,npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate grid_map(3,npsq*nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'grid_map(3,npsq*nelemd)', & + file=__FILE__, line=__LINE__) grid_map = 0_iMap mapind = 1 @@ -1000,21 +960,16 @@ subroutine define_cam_grids() ncols_fvm = nc * nc * nelemd ngcols_fvm = nc * nc * nelem_d allocate(fvm_coord(ncols_fvm), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate fvm_coord(ncols_fvm) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'fvm_coord(ncols_fvm)', & + file=__FILE__, line=__LINE__) + allocate(fvm_map(ncols_fvm), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate fvm_map(ncols_fvm) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'fvm_map(ncols_fvm)', & + file=__FILE__, line=__LINE__) allocate(fvm_area(ncols_fvm), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate fvm_area(ncols_fvm) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'fvm_area(ncols_fvm)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd k = 1 @@ -1048,10 +1003,8 @@ subroutine define_cam_grids() ! Map for FVM grid allocate(grid_map(3, ncols_fvm), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate grid_map(3, ncols_fvm) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'grid_map(3, ncols_fvm)', & + file=__FILE__, line=__LINE__) grid_map = 0_iMap mapind = 1 @@ -1089,22 +1042,16 @@ subroutine define_cam_grids() ngcols_physgrid = fv_nphys * fv_nphys * nelem_d allocate(physgrid_coord(ncols_physgrid), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate physgrid_coord(ncols_physgrid) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'physgrid_coord(ncols_physgrid)', & + file=__FILE__, line=__LINE__) allocate(physgrid_map(ncols_physgrid), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate physgrid_map(ncols_physgrid) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'physgrid_map(ncols_physgrid)', & + file=__FILE__, line=__LINE__) allocate(physgrid_area(ncols_physgrid), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate physgrid_area(ncols_physgrid) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'physgrid_area(ncols_physgrid)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd k = 1 @@ -1138,10 +1085,8 @@ subroutine define_cam_grids() ! Map for physics grid allocate(grid_map(3, ncols_physgrid), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate grid_map(3, ncols_physgrid) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'grid_map(3, ncols_physgrid)', & + file=__FILE__, line=__LINE__) grid_map = 0_iMap mapind = 1 @@ -1273,15 +1218,12 @@ subroutine create_global_area(area_d) ! mpi_gatherv) then redorder into globalID order (via dp_reorder) ncol = fv_nphys*fv_nphys*nelem_d allocate(rbuf(ncol), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate rbuf(ncol) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'rbuf(ncol)', & + file=__FILE__, line=__LINE__) allocate(dp_area(fv_nphys*fv_nphys,nelem_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dp_area(fv_nphys*fv_nphys,nelem_d)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dp_area(fv_nphys*fv_nphys,nelem_d)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd k = 1 @@ -1324,10 +1266,8 @@ subroutine create_global_area(area_d) else ! physics is on the GLL grid allocate(rbuf(ngcols_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate rbuf(ngcols_d) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'rbuf(ngcols_d)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemdmax if (ie <= nelemd) then @@ -1407,23 +1347,16 @@ subroutine create_global_coords(clat, clon, lat_out, lon_out) ncol = fv_nphys*fv_nphys*nelem_d allocate(rbuf(ncol), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate rbuf(ncol) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'rbuf(ncol)', & + file=__FILE__, line=__LINE__) allocate(dp_lon(fv_nphys*fv_nphys,nelem_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dp_lon(fv_nphys*fv_nphys,nelem_d)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dp_lon(fv_nphys*fv_nphys,nelem_d)', & + file=__FILE__, line=__LINE__) allocate(dp_lat(fv_nphys*fv_nphys,nelem_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dp_lat(fv_nphys*fv_nphys,nelem_d)'//& - ' failed with stat: '//to_str(ierr)) - end if - + call check_allocate(ierr, subname, 'dp_lat(fv_nphys*fv_nphys,nelem_d)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd k = 1 @@ -1487,10 +1420,8 @@ subroutine create_global_coords(clat, clon, lat_out, lon_out) else ! physics uses the GLL grid allocate(rbuf(ngcols_d), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate rbuf(ngcols_d) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'rbuf(ngcols_d)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemdmax diff --git a/src/dynamics/se/native_mapping.F90 b/src/dynamics/se/native_mapping.F90 index bb901791..86316f2c 100644 --- a/src/dynamics/se/native_mapping.F90 +++ b/src/dynamics/se/native_mapping.F90 @@ -86,7 +86,7 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are use shr_infnan_mod, only: isnan=>shr_infnan_isnan use cam_pio_utils, only: cam_pio_openfile, cam_pio_createfile - use string_utils, only: to_str + use cam_abortutils, only: check_allocate use pio, only: pio_noerr, pio_openfile, pio_createfile, pio_closefile, & pio_get_var, pio_put_var, pio_write_darray,pio_int, pio_double, & @@ -206,33 +206,26 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are ierr = pio_inq_dimid( ogfile, 'grid_size', dimid) ierr = pio_inq_dimlen( ogfile, dimid, npts) allocate(lat(npts), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate lat(npts) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'lat(npts)', & + file=__FILE__, line=__LINE__) allocate(lon(npts), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate lon(npts) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'lon(npts)', & + file=__FILE__, line=__LINE__) allocate(grid_imask(npts), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate grid_imask(npts) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'grid_mask(npts)', & + file=__FILE__, line=__LINE__) allocate(areab(npts), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate areab(npts) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'areab(npts)', & + file=__FILE__, line=__LINE__) ierr = pio_inq_dimid( ogfile, 'grid_rank', dimid) ierr = pio_inq_dimlen(ogfile, dimid, dg_rank) allocate(dg_dims(dg_rank), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dg_dims(dg_rank) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dg_dims(dg_rank)', & + file=__FILE__, line=__LINE__) ierr = pio_inq_varid( ogfile, 'grid_dims', vid) ierr = pio_get_var( ogfile, vid, dg_dims) @@ -337,22 +330,16 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are do ii=1,nelemd ngrid = interpdata(ii)%n_interp allocate(interpdata(ii)%interp_xy( ngrid ), stat=ierr ) - if (ierr /= 0) then - call endrun(subname//': allocate interpdata(ii)%interp_xy(ngrid)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'interpdata(ii)%interp_xy(ngrid)', & + file=__FILE__, line=__LINE__) allocate(interpdata(ii)%ilat( ngrid ), stat=ierr ) - if (ierr /= 0) then - call endrun(subname//': allocate interpdata(ii)%ilat(ngrid)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'interpdata(ii)%ilat(ngrid)', & + file=__FILE__, line=__LINE__) allocate(interpdata(ii)%ilon( ngrid ), stat=ierr ) - if (ierr /= 0) then - call endrun(subname//': allocate interpdata(ii)%ilon(ngrid)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'interpdata(ii)%ilon(ngrid)', & + file=__FILE__, line=__LINE__) interpdata(ii)%n_interp=0 ! reset counter enddo @@ -381,28 +368,20 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are allocate(h(int(countx)), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate h(int(countx)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'h(int(countx))', & + file=__FILE__, line=__LINE__) allocate(h1d(int(countx)*npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate h1d(int(countx)*npsq*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'h1d(int(countx)*npsq*nelemd)', & + file=__FILE__, line=__LINE__) allocate(row(int(countx)*npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate row(int(countx)*npsq*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'row(int(countx)*npsq*nelemd)', & + file=__FILE__, line=__LINE__) allocate(col(int(countx)*npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate col(int(countx)*npsq*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'col(int(countx)*npsq*nelemd)', & + file=__FILE__, line=__LINE__) row = 0 col = 0 @@ -449,9 +428,8 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are allocate(ldof(ngrid), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate ldof(ngrid) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'ldof(ngrid)', & + file=__FILE__, line=__LINE__) ldof = 0 ii=1 @@ -567,6 +545,9 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are deallocate(areaB) allocate(grid_imask(ncol)) + call check_allocate(ierr, subname, 'grid_imask(ncol)', & + file=__FILE__, line=__LINE__) + grid_imask=1 ierr = pio_put_var(ogfile, maska_id, grid_imask) diff --git a/src/dynamics/tests/inic_analytic.F90 b/src/dynamics/tests/inic_analytic.F90 index 0d60e0bc..8a65e2aa 100644 --- a/src/dynamics/tests/inic_analytic.F90 +++ b/src/dynamics/tests/inic_analytic.F90 @@ -8,8 +8,7 @@ module inic_analytic !----------------------------------------------------------------------- use cam_logfile, only: iulog use shr_kind_mod, only: r8 => shr_kind_r8 - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use shr_sys_mod, only: shr_sys_flush use inic_analytic_utils, only: analytic_ic_active, analytic_ic_type @@ -83,10 +82,8 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & #ifdef ANALYTIC_IC allocate(mask_use(size(latvals)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'mask_use(size(latvals))', & + file=__FILE__, line=__LINE__) if (present(mask)) then if (size(mask_use) /= size(mask)) then @@ -194,10 +191,8 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & end if call random_seed(size=rndm_seed_sz) allocate(rndm_seed(rndm_seed_sz), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate rndm_seed(rndm_seed_sz) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'rndm_seed(rndm_seed_sz)', & + file=__FILE__, line=__LINE__) ncol = size(T, 1) nlev = size(T, 2) @@ -457,10 +452,8 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & end if nblks = size2 allocate(lat_use(size(lonvals)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate lat_use(size(lonvals)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'lat_use(size(lonvals))', & + file=__FILE__, line=__LINE__) if (present(mask)) then call endrun(subname//': mask not supported for lon/lat') @@ -510,10 +503,8 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & ! Case: lon,lev,lat nblks = size3 allocate(lat_use(size(lonvals)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate lat_use(size(lonvals)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'lat_use(size(lonvals))', & + file=__FILE__, line=__LINE__) if (present(mask)) then call endrun(subname//': mask not supported for lon/lat') diff --git a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 index 4bc65860..3061cd41 100644 --- a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 +++ b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 @@ -82,7 +82,7 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & !use constituents, only: cnst_name !use const_init, only: cnst_init_default use inic_analytic_utils, only: analytic_ic_is_moist - use string_utils, only: to_str + use cam_abortutils, only: check_allocate !----------------------------------------------------------------------- ! @@ -142,10 +142,8 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & end if allocate(mask_use(size(latvals)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'mask_use(size(latvals))', & + file=__FILE__, line=__LINE__) if (present(mask)) then if (size(mask_use) /= size(mask)) then @@ -233,32 +231,27 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & ! check whether first constituent in Q is water vapor. cnst1_is_moisture = m_cnst(1) == 1 allocate(zlocal(size(Q, 1),nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate zlocal(size(Q, 1),nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'zlocal(size(Q, 1),nlev)', & + file=__FILE__, line=__LINE__) end if - allocate(zk(nlev)) + allocate(zk(nlev), stat=iret) + call check_allocate(iret, subname, 'zk(nlev)', & + file=__FILE__, line=__LINE__) + if ((lq.or.lt) .and. (vcoord == vc_dry_pressure)) then allocate(pdry_half(nlev+1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate pdry_half(nlev+1) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'pdry_half(nlev+1)', & + file=__FILE__, line=__LINE__) allocate(pwet_half(nlev+1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate pwet_half(nlev+1) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'pwet_half(nlev+1)', & + file=__FILE__, line=__LINE__) allocate(zdry_half(nlev+1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate zdry_half(nlev+1) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'zdry_half(nlev+1)', & + file=__FILE__, line=__LINE__) end if do i=1,ncol From 11d7eb26276511d18b892351b1be02f5e7dc6168 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Tue, 15 Jun 2021 13:53:50 -0600 Subject: [PATCH 23/45] Add final SE-dycore check_allocate calls, move ps to phys_state, and fix namelist bug. --- cime_config/namelist_definition_cam.xml | 22 + src/data/registry.xml | 1 + src/dynamics/se/dp_coupling.F90 | 123 ++---- .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 45 +- src/dynamics/se/dycore/cube_mod.F90 | 60 +-- src/dynamics/se/dycore/derivative_mod.F90 | 48 +-- src/dynamics/se/dycore/dof_mod.F90 | 27 +- src/dynamics/se/dycore/edge_mod.F90 | 188 +++------ src/dynamics/se/dycore/element_mod.F90 | 392 ++++++------------ .../se/dycore/fvm_control_volume_mod.F90 | 322 ++++++-------- src/dynamics/se/dycore/fvm_mapping.F90 | 154 +++---- src/dynamics/se/dycore/fvm_mod.F90 | 17 +- src/dynamics/se/dycore/gridgraph_mod.F90 | 32 +- src/dynamics/se/dycore/hybrid_mod.F90 | 38 +- src/dynamics/se/dycore/interpolate_mod.F90 | 99 ++--- src/dynamics/se/dycore/ll_mod.F90 | 9 +- src/dynamics/se/dycore/mesh_mod.F90 | 46 +- src/dynamics/se/dycore/metagraph_mod.F90 | 73 ++-- src/dynamics/se/dycore/parallel_mod.F90 | 15 +- src/dynamics/se/dycore/prim_advance_mod.F90 | 14 +- src/dynamics/se/dycore/prim_advection_mod.F90 | 12 +- src/dynamics/se/dycore/prim_driver_mod.F90 | 7 +- src/dynamics/se/dycore/prim_init.F90 | 51 +-- src/dynamics/se/dycore/prim_state_mod.F90 | 52 +-- src/dynamics/se/dycore/quadrature_mod.F90 | 27 +- src/dynamics/se/dycore/reduction_mod.F90 | 21 +- src/dynamics/se/dycore/schedule_mod.F90 | 159 +++---- src/dynamics/se/dycore/spacecurve_mod.F90 | 21 +- src/dynamics/se/dyn_comp.F90 | 178 +++----- src/dynamics/se/stepon.F90 | 53 +-- .../initial_conditions/ic_baro_dry_jw06.F90 | 9 +- .../initial_conditions/ic_held_suarez.F90 | 9 +- .../initial_conditions/ic_us_standard_atm.F90 | 21 +- 33 files changed, 864 insertions(+), 1481 deletions(-) diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 3ca6d51d..3a87048c 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -10957,6 +10957,28 @@ 0.0 + + real + se + dyn_se_inparm + + Used by SE dycore to apply sponge layer diffusion to u, v, and T for + stability of WACCM configurations. The diffusion is modeled on 3D molecular + diffusion and thermal conductivity by using actual molecular diffusion and + thermal conductivity coefficients multiplied by the value of + se_molecular_diff. + + If set <= 0.0 then the code is not activated. If set > 0.0 then + the molecular diffusion and thermal conductivity coefficients will be + multiplied by a factor of se_molecular_diff. + + Default: 0. + + + 0.0 + 100.0 + + integer se diff --git a/src/data/registry.xml b/src/data/registry.xml index e1dc7ae8..1e5bea45 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -277,6 +277,7 @@ .true. + surface_air_pressure geopotential_at_surface air_temperature x_wind diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 61b9f278..dacb335d 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -22,8 +22,7 @@ module dp_coupling use dp_mapping, only: nphys_pts use perf_mod, only: t_startf, t_stopf, t_barrierf -use cam_abortutils, only: endrun -use string_utils, only: to_str +use cam_abortutils, only: endrun, check_allocate !SE dycore: use parallel_mod, only: par @@ -52,7 +51,7 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) ! Note that all pressures and tracer mixing ratios coming from the dycore are based on ! dry air mass. - use physics_types, only: ps, pdel + use physics_types, only: pdel ! use gravity_waves_sources, only: gws_src_fnct use dyn_comp, only: frontgf_idx, frontga_idx ! use phys_control, only: use_gw_front, use_gw_front_igw @@ -114,70 +113,55 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) nphys = fv_nphys else allocate(qgll(np,np,nlev,pcnst), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate qgll(np,np,nlev,pcnst) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'qgll(np,np,nlev,pcnst)', & + file=__FILE__, line=__LINE__) nphys = np end if ! Allocate temporary arrays to hold data for physics decomposition allocate(ps_tmp(nphys_pts,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate ps_tmp(nphys_pts,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'ps_tmp(nphys_pts,nelemd)', & + file=__FILE__, line=__LINE__) allocate(dp3d_tmp(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dp3d_tmp(nphys_pts,pver,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dp3d_tmp(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) allocate(dp3d_tmp_tmp(nphys_pts,pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dp3d_tmp_tmp(nphys_pts,pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dp3d_tmp_tmp(nphys_pts,pver)', & + file=__FILE__, line=__LINE__) allocate(phis_tmp(nphys_pts,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate phis_tmp(nphys_pts,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'phis_tmp(nphys_pts,nelemd)', & + file=__FILE__, line=__LINE__) allocate(T_tmp(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate T_tmp(nphys_pts,pver,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'T_tmp(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) allocate(uv_tmp(nphys_pts,2,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate uv_tmp(nphys_pts,2,pver,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'uv_tmp(nphys_pts,2,pver,nelemd)', & + file=__FILE__, line=__LINE__) allocate(q_tmp(nphys_pts,pver,pcnst,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate q_tmp(nphys_pts,pver,pcnst,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'q_tmp(nphys_pts,pver,pcnst,nelemd)', & + file=__FILE__, line=__LINE__) allocate(omega_tmp(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate q_tmp(nphys_pts,pver,pcnst,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'omega_tmp(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) !Remove once a gravity wave parameterization is available -JN #if 0 if (use_gw_front .or. use_gw_front_igw) then allocate(frontgf(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) call endrun("dp_coupling: Allocate of frontgf failed.") + call check_allocate(ierr, subname, 'frontgf(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) + allocate(frontga(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) call endrun("dp_coupling: Allocate of frontga failed.") + call check_allocate(ierr, subname, 'frontga(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) end if #endif @@ -266,10 +250,8 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) ! q_prev is for saving the tracer fields for calculating tendencies if (.not. allocated(q_prev)) then allocate(q_prev(pcols,pver,pcnst), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate q_prev(pcols,pver,pcnst) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'q_prev(pcols,pver,pcnst)', & + file=__FILE__, line=__LINE__) end if q_prev = 0.0_r8 @@ -278,23 +260,18 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) #if 0 if (use_gw_front .or. use_gw_front_igw) then allocate(frontgf_phys(pcols, pver, begchunk:endchunk), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate frontgf_phys(pcols, pver, begchunk:endchunk)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'frontgf_phys(pcols, pver, begchunk:endchunk)', & + file=__FILE__, line=__LINE__) allocate(frontga_phys(pcols, pver, begchunk:endchunk), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate frontga_phys(pcols, pver, begchunk:endchunk)'//& - ' failed with stat: '//to_str(ierr)) - end if - + call check_allocate(ierr, subname, 'frontga_phys(pcols, pver, begchunk:endchunk)', & + file=__FILE__, line=__LINE__) end if #endif !$omp parallel do num_threads(max_num_threads) private (icol, ie, blk_ind, ilyr, m) do icol = 1, pcols call get_dyn_col_p(icol, ie, blk_ind) - ps(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) + phys_state%ps(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) phys_state%phis(icol) = real(phis_tmp(blk_ind(1), ie), kind_phys) do ilyr = 1, pver pdel(icol, ilyr) = real(dp3d_tmp(blk_ind(1), ilyr, ie), kind_phys) @@ -427,28 +404,20 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) end if allocate(T_tmp(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate T_tmp(nphys_pts,pver,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'T_tmp(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) allocate(uv_tmp(nphys_pts,2,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate uv_tmp(nphys_pts,2,pver,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'uv_tmp(nphys_pts,2,pver,nelemd)', & + file=__FILE__, line=__LINE__) allocate(dq_tmp(nphys_pts,pver,pcnst,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dq_tmp(nphys_pts,pver,pcnst,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dq_tmp(nphys_pts,pver,pcnst,nelemd)', & + file=__FILE__, line=__LINE__) allocate(dp_phys(nphys_pts,pver,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dp_phys(nphys_pts,pver,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dp_phys(nphys_pts,pver,nelemd)', & + file=__FILE__, line=__LINE__) T_tmp = 0.0_r8 uv_tmp = 0.0_r8 @@ -652,7 +621,7 @@ subroutine derived_phys_dry(phys_state, phys_tend) use physics_types, only: psdry, pint, lnpint, pintdry, lnpintdry use physics_types, only: pdel, rpdel, pdeldry, rpdeldry use physics_types, only: pmid, lnpmid, pmiddry, lnpmiddry - use physics_types, only: exner, zi, zm, ps, lagrangian_vertical + use physics_types, only: exner, zi, zm, lagrangian_vertical use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update use shr_const_mod, only: shr_const_rwv ! use phys_control, only: waccmx_is @@ -737,17 +706,17 @@ subroutine derived_phys_dry(phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i=1, pcols ! Set model-top values assuming zero moisture: - ps(i) = pintdry(i,1) - pint(i,1) = pintdry(i,1) + phys_state%ps(i) = pintdry(i,1) + pint(i,1) = pintdry(i,1) end do !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i=1, pcols ! Calculate wet (total) pressure variables for rest of column: - pint(i,k+1) = pint(i,k) + pdel(i,k) - pmid(i,k) = (pint(i,k+1) + pint(i,k))/2._kind_phys - ps(i) = ps(i) + pdel(i,k) + pint(i,k+1) = pint(i,k) + pdel(i,k) + pmid(i,k) = (pint(i,k+1) + pint(i,k))/2._kind_phys + phys_state%ps(i) = phys_state%ps(i) + pdel(i,k) end do ! Calculate (natural) logarithms: call shr_vmath_log(pint(1:pcols,k), lnpint(1:pcols,k), pcols) diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 index 6b37a024..11d4d0ab 100644 --- a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -3,8 +3,7 @@ module comp_gll_ctr_vol use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use cam_logfile, only: iulog use shr_sys_mod, only: shr_sys_flush use global_norms_mod, only: wrap_repro_sum @@ -274,11 +273,8 @@ subroutine gll_grid_write(elem, grid_format, filename_in) ! Work array to gather info before writing allocate(gwork(np*np, nv_max, nelemd), stat=ierror) - if (ierror /= 0) then - call endrun(subname//': allocate gwork(np*np, nv_max, nelemd) failed with stat: '//& - to_str(ierror)) - end if - + call check_allocate(ierror, subname, 'gwork(np*np, nv_max, nelemd)', & + file=__FILE__, line=__LINE__) ! Write grid size status = pio_put_var(file, grid_dims_id, (/ gridsize /)) @@ -340,9 +336,8 @@ subroutine gll_grid_write(elem, grid_format, filename_in) !!XXgoldyXX: v debug only #ifdef USE_PIO3D allocate(ldof(np*np*nelemd*nv_max), stat=ierror) -if (ierror /= 0) then - call endrun(subname//': allocate ldof(np*np*nelemd*nv_max) failed with stat: '//to_str(ierror)) -end if +call check_allocate(ierror, subname, 'ldof(np*np*nelemd*nv_max)', & + file=__FILE__, line=__LINE__) ldof = 0 do ie = 1, nelemd @@ -362,10 +357,8 @@ subroutine gll_grid_write(elem, grid_format, filename_in) end do end do allocate(iodesc, stat=ierror) -if (ierror /= 0) then - call endrun(subname//': allocate iodesc failed with stat: '//to_str(ierror)) -end if - +call check_allocate(ierror, subname, 'iodesc', & + file=__FILE__, line=__LINE__) call cam_pio_newdecomp(iodesc, (/ nv_max, gridsize /), ldof, PIO_double) call pio_write_darray(file, grid_corner_lat_id, iodesc, gwork, status) @@ -455,29 +448,21 @@ subroutine InitControlVolumesData(par, elem, nelemd) ! Cannot be done in a threaded region allocate(cvlist(nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate vlist(nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'vlist(nelemd)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd allocate(cvlist(ie)%vert(nv_max, np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate cvlist(ie)%vert(nv_max,np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'cvlist(ie)%vert(nv_max,np,np)', & + file=__FILE__, line=__LINE__) allocate(cvlist(ie)%vert_latlon(nv_max,np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate cvlist(ie)%vert_latlon(nv_max,np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'cvlist(ie)%vert_latlon(nv_max,np,np)', & + file=__FILE__, line=__LINE__) allocate(cvlist(ie)%face_no(nv_max,np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate cvlist(ie)%face_no(nv_max,np,np) failed with stat: '//& - to_str(iret)) - end if - + call check_allocate(iret, subname, 'cvlist(ie)%face_no(nv_max,np,np)', & + file=__FILE__, line=__LINE__) end do call initedgebuffer(par,edge1,elem,3,bndry_type=HME_BNDRY_P2P, nthreads=1) diff --git a/src/dynamics/se/dycore/cube_mod.F90 b/src/dynamics/se/dycore/cube_mod.F90 index 1c085d4c..edad65cf 100644 --- a/src/dynamics/se/dycore/cube_mod.F90 +++ b/src/dynamics/se/dycore/cube_mod.F90 @@ -6,8 +6,7 @@ module cube_mod use physconst, only: pi, rearth use control_mod, only: hypervis_scaling, cubed_sphere_map - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate implicit none private @@ -915,10 +914,8 @@ subroutine rotation_init_atomic(elem, rot_type) if (nrot > 0) then allocate(elem%desc%rot(nrot), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%rot(nrot) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%rot(nrot)', & + file=__FILE__, line=__LINE__) elem%desc%use_rotation=1 irot=0 @@ -938,19 +935,13 @@ subroutine rotation_init_atomic(elem, rot_type) if (inbr <= 4) then allocate(elem%desc%rot(irot)%R(2,2,np), stat=iret) ! edge - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%rot(irot)%R(2,2,np)'//& - ' failed with stat: '//to_str(iret)) - end if - + call check_allocate(iret, subname, 'elem%desc%rot(irot)%R(2,2,np)', & + file=__FILE__, line=__LINE__) else allocate(elem%desc%rot(irot)%R(2,2,1 ), stat=iret) ! corner - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%rot(irot)%R(2,2,1)'//& - ' failed with stat: '//to_str(iret)) - end if - + call check_allocate(iret, subname, 'elem%desc%rot(irot)%R(2,2,1)', & + file=__FILE__, line=__LINE__) end if ! Initialize Dloc and Drem for no-rotation possibilities Dloc(1,1,:) = 1.0_r8 @@ -1472,10 +1463,8 @@ subroutine CubeTopology(GridEdge, GridVertex) if (0==ne) call endrun('Error in CubeTopology: ne is zero') allocate(GridElem(ne,ne,nfaces),stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate GridElem(ne,ne,nfaces)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'GridElem(ne,ne,nfaces)', & + file=__FILE__, line=__LINE__) do k = 1, nfaces do j = 1, ne @@ -1490,10 +1479,8 @@ subroutine CubeTopology(GridEdge, GridVertex) end if allocate(nbrs_used(ne,ne,nfaces,8), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate nbrs_used(ne,ne,nfaces,8)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'nbrs_used(ne,ne,nfaces,8)', & + file=__FILE__, line=__LINE__) nbrs_used = .false. @@ -1520,10 +1507,8 @@ subroutine CubeTopology(GridEdge, GridVertex) end do allocate(Mesh(ne,ne), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Mesh(ne,ne)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Mesh(ne,ne)', & + file=__FILE__, line=__LINE__) if(IsFactorable(ne)) then call GenspaceCurve(Mesh) @@ -1535,21 +1520,16 @@ subroutine CubeTopology(GridEdge, GridVertex) end if allocate(Mesh2(ne2,ne2), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Mesh2(ne2,ne2)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Mesh2(ne2,ne2)', & + file=__FILE__, line=__LINE__) allocate(Mesh2_map(ne2,ne2,2), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Mesh2_map(ne2,ne2,2)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Mesh2_map(ne2,ne2,2)', & + file=__FILE__, line=__LINE__) + allocate(sfcij(0:ne2*ne2,2), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate sfcij(0:ne2*ne2,2)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'sfcij(0:ne2*ne2,2)', & + file=__FILE__, line=__LINE__) call GenspaceCurve(Mesh2) ! SFC partition for ne2 diff --git a/src/dynamics/se/dycore/derivative_mod.F90 b/src/dynamics/se/dycore/derivative_mod.F90 index 6c51bba8..5e583415 100644 --- a/src/dynamics/se/dycore/derivative_mod.F90 +++ b/src/dynamics/se/dycore/derivative_mod.F90 @@ -1,7 +1,6 @@ module derivative_mod use shr_kind_mod, only: r8=>shr_kind_r8 - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use dimensions_mod, only : np, nc, npdg, nelemd, nlev use quadrature_mod, only : quadrature_t, gauss, gausslobatto,legendre, jacobi ! needed for spherical differential operators: @@ -921,29 +920,20 @@ function remap_phys2gll(pin,nphys) result(pout) ! find number of intersections nintersect = np+nphys-1 ! max number of possible intersections allocate(acell(nintersect), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate acell(nintersect) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'acell(nintersect)', & + file=__FILE__, line=__LINE__) allocate(dcell(nintersect), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate dcell(nintersect) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'dcell(nintersect)', & + file=__FILE__, line=__LINE__) allocate(delta(nintersect), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate delta(nintersect) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'delta(nintersect)', & + file=__FILE__, line=__LINE__) allocate(delta_a(np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate delta_a(np) failed with stat: '//& - to_str(iret)) - end if - + call check_allocate(iret, subname, 'delta_a(np)', & + file=__FILE__, line=__LINE__) ! compute phys grid cell edges on [-1,1] do i=1,nphys+1 @@ -2270,18 +2260,13 @@ subroutine allocate_subcell_integration_matrix_cslam(np, intervals) if (ALLOCATED(integration_matrix)) deallocate(integration_matrix) allocate(integration_matrix(intervals,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate integration_matrix(intervals,np)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'integration_matrix(intervals,np)', & + file=__FILE__, line=__LINE__) if (ALLOCATED(boundary_interp_matrix)) deallocate(boundary_interp_matrix) allocate(boundary_interp_matrix(intervals,2,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate boundary_interp_matrix(intervals,2,np)'//& - ' failed with stat: '//to_str(iret)) - end if - + call check_allocate(iret, subname, 'boundary_interp_matrix(intervals,2,np)', & + file=__FILE__, line=__LINE__) gll = gausslobatto(np) @@ -2388,11 +2373,8 @@ subroutine allocate_subcell_integration_matrix_physgrid(np, intervals) if (ALLOCATED(integration_matrix_physgrid)) deallocate(integration_matrix_physgrid) allocate(integration_matrix_physgrid(intervals,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate integration_matrix_physgrid(intervals,np)'//& - ' failed with stat: '//to_str(iret)) - end if - + call check_allocate(iret, subname, 'integration_matrix_physgrid(intervals,np)', & + file=__FILE__, line=__LINE__) gll = gausslobatto(np) diff --git a/src/dynamics/se/dycore/dof_mod.F90 b/src/dynamics/se/dycore/dof_mod.F90 index 24f64096..5ba19601 100644 --- a/src/dynamics/se/dycore/dof_mod.F90 +++ b/src/dynamics/se/dycore/dof_mod.F90 @@ -1,8 +1,7 @@ module dof_mod use shr_kind_mod, only: r8=>shr_kind_r8, i8=>shr_kind_i8 use mpi, only: mpi_integer - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use dimensions_mod, only: np, npsq, nelem, nelemd use quadrature_mod, only: quadrature_t use element_mod, only: element_t,index_t @@ -289,22 +288,16 @@ subroutine SetElemOffset(par,elem,GlobalUniqueColsP) nprocs = par%nprocs allocate(numElemP(nelem), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate numElemP(nelem) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'numElemP(nelem)', & + file=__FILE__, line=__LINE__) allocate(numElem2P(nelem), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate numElem2P(nelem) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'numElem2P(nelem)', & + file=__FILE__, line=__LINE__) allocate(gOffset(nelem), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate gOffset(nelem) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'gOffset(nelem)', & + file=__FILE__, line=__LINE__) numElemP=0;numElem2P=0;gOffset=0 @@ -344,10 +337,8 @@ subroutine CreateUniqueIndex(ig,gdof,idx) npts = size(gdof,dim=1) allocate(ldof(npts,npts), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate ldof(npts,npts) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'ldof(npts,npts)', & + file=__FILE__, line=__LINE__) ! ==================== ! Form the local DOF diff --git a/src/dynamics/se/dycore/edge_mod.F90 b/src/dynamics/se/dycore/edge_mod.F90 index 110b9a7a..ba5365fa 100644 --- a/src/dynamics/se/dycore/edge_mod.F90 +++ b/src/dynamics/se/dycore/edge_mod.F90 @@ -6,8 +6,7 @@ module edge_mod use thread_mod, only: max_num_threads, omp_get_num_threads, omp_get_thread_num use coordinate_systems_mod, only: cartesian3D_t use schedtype_mod, only: cycle_t, schedule_t, pgindex_t, schedule, HME_Ordinal,HME_Cardinal - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use cam_logfile, only: iulog use parallel_mod, only: parallel_t, & MAX_ACTIVE_MSG, HME_status_size, BNDRY_TAG_BASE, HME_BNDRY_A2A, HME_BNDRY_P2P, & @@ -285,38 +284,27 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen edge%tag = BNDRY_TAG_BASE + MODULO(edge%id, MAX_ACTIVE_MSG) allocate(edge%putmap(max_neigh_edges,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%putmap(max_neigh_edges,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%putmap(max_neigh_edges,nelemd)', & + file=__FILE__, line=__LINE__) allocate(edge%getmap(max_neigh_edges,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%getmap(max_neigh_edges,nelemd) failed with stat: '//& - to_str(ierr)) - end if - + call check_allocate(ierr, subname, 'edge%getmap(max_neigh_edges,nelemd)', & + file=__FILE__, line=__LINE__) allocate(edge%reverse(max_neigh_edges,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%reverse(max_neigh_edges,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%reverse(max_neigh_edges,nelemd)', & + file=__FILE__, line=__LINE__) edge%putmap(:,:)=-1 edge%getmap(:,:)=-1 allocate(putmap2(max_neigh_edges,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate putmap2(max_neigh_edges,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'putmap2(max_neigh_edges,nelemd)', & + file=__FILE__, line=__LINE__) allocate(getmap2(max_neigh_edges,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate getmap2(max_neigh_edges,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'getmap2(max_neigh_edges,nelemd)', & + file=__FILE__, line=__LINE__) putmap2(:,:)=-1 getmap2(:,:)=-1 @@ -338,75 +326,52 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen if(nInter>0) then allocate(edge%rcountsInter(nInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%rcountsInter(nInter) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%rcountsInter(nInter)', & + file=__FILE__, line=__LINE__) allocate(edge%rdisplsInter(nInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%rdisplsInter(nInter) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%rdisplsInter(nInter)', & + file=__FILE__, line=__LINE__) allocate(edge%scountsInter(nInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%scountsInter(nInter) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%scountsInter(nInter)', & + file=__FILE__, line=__LINE__) allocate(edge%sdisplsInter(nInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%sdisplsInter(nInter) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%sdisplsInter(nInter)', & + file=__FILE__, line=__LINE__) endif if(nIntra>0) then allocate(edge%rcountsIntra(nIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%rcountsIntra(nIntra) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%rcountsIntra(nIntra)', & + file=__FILE__, line=__LINE__) allocate(edge%rdisplsIntra(nIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%rdisplsIntra(nIntra) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%rdisplsIntra(nIntra)', & + file=__FILE__, line=__LINE__) allocate(edge%scountsIntra(nIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%scountsIntra(nIntra) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%scountsIntra(nIntra)', & + file=__FILE__, line=__LINE__) allocate(edge%sdisplsIntra(nIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%sdisplsIntra(nIntra) failed with stat: '//& - to_str(ierr)) - end if - + call check_allocate(ierr, subname, 'edge%sdisplsIntra(nIntra)', & + file=__FILE__, line=__LINE__) endif if (nSendCycles>0) then allocate(edge%scountsFull(nSendCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%scountsFull(nSendCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%scountsFull(nSendCycles)', & + file=__FILE__, line=__LINE__) allocate(edge%sdisplsFull(nSendCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%sdisplsFull(nSendCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%sdisplsFull(nSendCycles)', & + file=__FILE__, line=__LINE__) allocate(edge%Srequest(nSendCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%Srequest(nSendCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%Srequest(nSendCycles)', & + file=__FILE__, line=__LINE__) edge%scountsFull(:) = 0 endif @@ -465,41 +430,30 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen if (nRecvCycles>0) then allocate(edge%rcountsFull(nRecvCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%rcountsFull(nRecvCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%rcountsFull(nRecvCycles)', & + file=__FILE__, line=__LINE__) allocate(edge%rdisplsFull(nRecvCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%rdisplsFull(nRecvCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%rdisplsFull(nRecvCycles)', & + file=__FILE__, line=__LINE__) allocate(edge%getDisplsFull(nRecvCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%getDisplsFull(nRecvCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%getDisplsFull(nRecvCycles)', & + file=__FILE__, line=__LINE__) allocate(edge%putDisplsFull(nRecvCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%putDisplsFull(nRecvCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%putDisplsFull(nRecvCycles)', & + file=__FILE__, line=__LINE__) + edge%rcountsFull(:) = 0 ! allocate the MPI Send/Recv request handles allocate(edge%Rrequest(nRecvCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%Rrequest(nRecvCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%Rrequest(nRecvCycles)', & + file=__FILE__, line=__LINE__) allocate(edge%status(HME_status_size,nRecvCycles), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%status(HME_status_size,nRecvCycles) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%status(HME_status_size,nRecvCycles)', & + file=__FILE__, line=__LINE__) endif ! @@ -596,16 +550,12 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen call gbarrier_init(edge%gbarrier, nlen) allocate(edge%moveLength(nlen), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%moveLength(nlen) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%moveLength(nlen)', & + file=__FILE__, line=__LINE__) allocate(edge%movePtr(nlen), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%movePtr(nlen) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%movePtr(nlen)', & + file=__FILE__, line=__LINE__) if (nlen > 1) then ! the master thread performs no data movement because it is busy with the @@ -640,15 +590,12 @@ subroutine initEdgeBuffer_r8(par,edge,elem,nlyr, bndry_type,nthreads,CardinalLen edge%nbuf=nbuf allocate(edge%receive(nbuf), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%receive(nbuf) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%receive(nbuf)', & + file=__FILE__, line=__LINE__) + allocate(edge%buf(nbuf), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%buf(nbuf) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%buf(nbuf)', & + file=__FILE__, line=__LINE__) 21 format('RANK: ',i2, A,8(i6)) @@ -710,17 +657,14 @@ subroutine initEdgeBuffer_i8(edge,nlyr) edge%nlyr=nlyr edge%nbuf=nbuf allocate(edge%buf(nlyr,nbuf), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%buf(nlyr,nbuf) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%buf(nlyr,nbuf)', & + file=__FILE__, line=__LINE__) + edge%buf(:,:)=0 allocate(edge%receive(nlyr,nbuf), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate edge%receive(nlyr,nbuf) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'edge%receive(nlyr,nbuf)', & + file=__FILE__, line=__LINE__) edge%receive(:,:)=0 @@ -2312,16 +2256,12 @@ subroutine initGhostBuffer3d(ghost,nlyr,np,nhc_in) ghost%nbuf = nbuf ghost%elem_size = np*(nhc+1) allocate(ghost%buf (np,(nhc+1),nlyr,nbuf), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate ghost%buf(np,(nhc+1),nlyr,nbuf) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'ghost%buf(np,(nhc+1),nlyr,nbuf)', & + file=__FILE__, line=__LINE__) allocate(ghost%receive(np,(nhc+1),nlyr,nbuf), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate ghost%receive(np,(nhc+1),nlyr,nbuf) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'ghost%receive(np,(nhc+1),nlyr,nbuf)', & + file=__FILE__, line=__LINE__) ghost%buf=0 ghost%receive=0 diff --git a/src/dynamics/se/dycore/element_mod.F90 b/src/dynamics/se/dycore/element_mod.F90 index a9fc8588..e5d1fc55 100644 --- a/src/dynamics/se/dycore/element_mod.F90 +++ b/src/dynamics/se/dycore/element_mod.F90 @@ -4,8 +4,7 @@ module element_mod use coordinate_systems_mod, only: spherical_polar_t, cartesian2D_t, cartesian3D_t, distance use edgetype_mod, only: edgedescriptor_t use gridgraph_mod, only: gridvertex_t - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate implicit none private @@ -367,58 +366,40 @@ subroutine allocate_element_desc(elem) do j=1,num allocate(elem(j)%desc%putmapP(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%putmapP(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%putmapP(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%getmapP(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%getmapP(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%getmapP(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%putmapP_ghost(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%putmapP_ghost(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%putmapP_ghost(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%getmapP_ghost(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%getmapP_ghost(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%getmapP_ghost(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%putmapS(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%putmapS(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%putmapS(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%getmapS(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%getmapS(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%getmapS(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%reverse(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%reverse(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%reverse(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%globalID(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%globalID(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%globalID(max_neigh_edges)', & + file=__FILE__, line=__LINE__) allocate(elem(j)%desc%loc2buf(max_neigh_edges), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%desc%loc2buf(max_neigh_edges) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%desc%loc2buf(max_neigh_edges)', & + file=__FILE__, line=__LINE__) do i=1,max_neigh_edges elem(j)%desc%loc2buf(i)=i @@ -451,76 +432,57 @@ subroutine allocate_element_dims(elem) !Coordinate values of element points: allocate(elem(i)%spherep(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%spherep(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%spherep(np,np)', & + file=__FILE__, line=__LINE__) !Gnomonic coords of GLL points: allocate(elem(i)%cartp(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%cartp(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%cartp(np,np)', & + file=__FILE__, line=__LINE__) !Variable Hyperviscosity: allocate(elem(i)%variable_hyperviscosity(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%variable_hyperviscosity(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%variable_hyperviscosity(np,np)', & + file=__FILE__, line=__LINE__) !og, matrix V for tensor viscosity: allocate(elem(i)%tensorVisc(np,np,2,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%tensorVisc(np,np,2,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%tensorVisc(np,np,2,2)', & + file=__FILE__, line=__LINE__) !Allocate "state" variables: !-------------------------- ! velocity allocate(elem(i)%state%v(np,np,2,nlev,timelevels), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%state%v(np,np,2,nlev,timelevels) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%state%v(np,np,2,nlev,timelevels)', & + file=__FILE__, line=__LINE__) ! temperature allocate(elem(i)%state%T(np,np,nlev,timelevels), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%state%T(np,np,nlev,timelevels) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%state%T(np,np,nlev,timelevels)', & + file=__FILE__, line=__LINE__) ! dry delta p on levels allocate(elem(i)%state%dp3d(np,np,nlev,timelevels), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%state%dp3d(np,np,nlev,timelevels) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%state%dp3d(np,np,nlev,timelevels)', & + file=__FILE__, line=__LINE__) ! dry surface pressure allocate(elem(i)%state%psdry(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%state%psdry(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%state%psdry(np,np)', & + file=__FILE__, line=__LINE__) ! surface geopotential (prescribed) allocate(elem(i)%state%phis(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%state%phis(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%state%phis(np,np)', & + file=__FILE__, line=__LINE__) ! Tracer mass allocate(elem(i)%state%Qdp(np,np,nlev,qsize_d,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%state%Qdp(np,np,nlev,qsize_d,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%state%Qdp(np,np,nlev,qsize_d,2)', & + file=__FILE__, line=__LINE__) + !-------------------------- !Allocate "derived" variables: @@ -528,206 +490,149 @@ subroutine allocate_element_dims(elem) ! velocity for SE tracer advection allocate(elem(i)%derived%vn0(np,np,2,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%vn0(np,np,2,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%vn0(np,np,2,nlev)', & + file=__FILE__, line=__LINE__) ! mean dp dissipation tendency, if nu_p>0 allocate(elem(i)%derived%dpdiss_biharmonic(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dpdiss_biharmonic(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dpdiss_biharmonic(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! mean dp used to compute psdiss_tens allocate(elem(i)%derived%dpdiss_ave(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dpdiss_ave(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dpdiss_ave(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! geopotential allocate(elem(i)%derived%phi(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%phi(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%phi(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! vertical velocity allocate(elem(i)%derived%omega(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%omega(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%omega(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! relative vorticity allocate(elem(i)%derived%zeta(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%zeta(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%zeta(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! divergence allocate(elem(i)%derived%div(np,np,nlev,timelevels), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%div(np,np,nlev,timelevels) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%div(np,np,nlev,timelevels)', & + file=__FILE__, line=__LINE__) ! for dp_tracers at physics timestep allocate(elem(i)%derived%dp(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dp(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dp(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! divergence of dp allocate(elem(i)%derived%divdp(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%divdp(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%divdp(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! DSSed divdp allocate(elem(i)%derived%divdp_proj(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%divdp_proj(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%divdp_proj(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! total tracer mass for diagnostics allocate(elem(i)%derived%mass(max(qsize_d,ntrac_d)+9), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%mass(max(qsize_d,ntrac_d)+9) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%mass(max(qsize_d,ntrac_d)+9)', & + file=__FILE__, line=__LINE__) ! tracer forcing allocate(elem(i)%derived%FQ(np,np,nlev,qsize_d), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%FQ(np,np,nlev,qsize_d) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%FQ(np,np,nlev,qsize_d)', & + file=__FILE__, line=__LINE__) ! momentum forcing allocate(elem(i)%derived%FM(np,np,2,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%FM(np,np,2,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%FM(np,np,2,nlev)', & + file=__FILE__, line=__LINE__) ! save full updated dp right after physics allocate(elem(i)%derived%FDP(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%FDP(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%FDP(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! temperature forcing allocate(elem(i)%derived%FT(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%FT(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%FT(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! prescribed vertical tendency allocate(elem(i)%derived%etadot_prescribed(np,np,nlevp), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%etadot_prescribed(np,np,nlevp) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%etadot_prescribed(np,np,nlevp)', & + file=__FILE__, line=__LINE__) ! zonal component of prescribed meteorology winds allocate(elem(i)%derived%u_met(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%u_met(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%u_met(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! rate of change of zonal component of prescribed meteorology winds allocate(elem(i)%derived%dudt_met(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dudt_met(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dudt_met(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! meridional component of prescribed meteorology winds allocate(elem(i)%derived%v_met(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%v_met(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%v_met(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! rate of change of meridional component of prescribed meteorology winds allocate(elem(i)%derived%dvdt_met(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dvdt_met(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dvdt_met(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! prescribed meteorology temperature allocate(elem(i)%derived%T_met(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%T_met(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%T_met(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! rate of change of prescribed meteorology temperature allocate(elem(i)%derived%dTdt_met(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dTdt_met(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dTdt_met(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! surface pressure of prescribed meteorology allocate(elem(i)%derived%ps_met(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%ps_met(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%ps_met(np,np)', & + file=__FILE__, line=__LINE__) ! rate of change of surface pressure of prescribed meteorology allocate(elem(i)%derived%dpsdt_met(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%dpsdt_met(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%dpsdt_met(np,np)', & + file=__FILE__, line=__LINE__) ! nudging factor (prescribed) allocate(elem(i)%derived%nudge_factor(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%nudge_factor(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%nudge_factor(np,np,nlev)', & + file=__FILE__, line=__LINE__) ! accumulated U tendency due to nudging towards prescribed met allocate(elem(i)%derived%Utnd(npsq,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%Utnd(npsq,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%Utnd(npsq,nlev)', & + file=__FILE__, line=__LINE__) ! accumulated V tendency due to nudging towards prescribed met allocate(elem(i)%derived%Vtnd(npsq,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%Vtnd(npsq,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%Vtnd(npsq,nlev)', & + file=__FILE__, line=__LINE__) ! accumulated T tendency due to nudging towards prescribed met allocate(elem(i)%derived%Ttnd(npsq,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%Ttnd(npsq,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%Ttnd(npsq,nlev)', & + file=__FILE__, line=__LINE__) ! pressure perturbation from condensate allocate(elem(i)%derived%pecnd(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%derived%pecnd(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%derived%pecnd(np,np,nlev)', & + file=__FILE__, line=__LINE__) + !---------------------------- !Allocate "Metric terms": @@ -735,116 +640,87 @@ subroutine allocate_element_dims(elem) ! metric tensor on velocity and pressure grid allocate(elem(i)%met(np,np,2,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%met(np,np,2,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%met(np,np,2,2)', & + file=__FILE__, line=__LINE__) ! metric tensor on velocity and pressure grid allocate(elem(i)%metinv(np,np,2,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%metinv(np,np,2,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%metinv(np,np,2,2)', & + file=__FILE__, line=__LINE__) ! g = SQRT(det(g_ij)) on velocity and pressure grid allocate(elem(i)%metdet(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%metdet(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%metdet(np,np)', & + file=__FILE__, line=__LINE__) ! 1/metdet on velocity pressure grid allocate(elem(i)%rmetdet(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%rmetdet(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%rmetdet(np,np)', & + file=__FILE__, line=__LINE__) ! Map covariant field on cube to vector field on the sphere allocate(elem(i)%D(np,np,2,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%D(np,np,2,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%D(np,np,2,2)', & + file=__FILE__, line=__LINE__) ! Map vector field on the sphere to covariant v on cube allocate(elem(i)%Dinv(np,np,2,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%Dinv(np,np,2,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%Dinv(np,np,2,2)', & + file=__FILE__, line=__LINE__) + !----------------------- !First Coordinate: allocate(elem(i)%sub_elem_mass_flux(nc,nc,4,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%sub_elem_mass_flux(nc,nc,4,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%sub_elem_mass_flux(nc,nc,4,nlev)', & + file=__FILE__, line=__LINE__) !Spherical -> rectangular converter: allocate(elem(i)%vec_sphere2cart(np,np,3,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%vec_sphere2cart(np,np,3,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%vec_sphere2cart(np,np,3,2)', & + file=__FILE__, line=__LINE__) !Mass matrix on v and p grid: allocate(elem(i)%mp(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%mp(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%mp(np,np)', & + file=__FILE__, line=__LINE__) !Inverse mass matrix on v and p grid: allocate(elem(i)%rmp(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%rmp(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%rmp(np,np)', & + file=__FILE__, line=__LINE__) !Mass matrix on v and p grid: allocate(elem(i)%spheremp(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%spheremp(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%spheremp(np,np)', & + file=__FILE__, line=__LINE__) !Inverse mass matrix on v and p grid: allocate(elem(i)%rspheremp(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%rspheremp(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%rspheremp(np,np)', & + file=__FILE__, line=__LINE__) !Global degree of freedom (P-grid): allocate(elem(i)%gdofP(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%gdofP(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%gdofP(np,np)', & + file=__FILE__, line=__LINE__) !Coriolis term: allocate(elem(i)%fcor(np,np), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%fcor(np,np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%fcor(np,np)', & + file=__FILE__, line=__LINE__) !Index terms: !----------- + allocate(elem(i)%idxP%ia(npsq), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%idxP%ia(npsq) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%idxP%ia(npsq)', & + file=__FILE__, line=__LINE__) allocate(elem(i)%idxP%ja(npsq), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate elem%idxP%ja(npsq) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'elem%idxP%ja(npsq)', & + file=__FILE__, line=__LINE__) + !----------- end do diff --git a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 index 88232737..8a78061e 100644 --- a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 +++ b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 @@ -16,8 +16,7 @@ module fvm_control_volume_mod use dimensions_mod, only: nc, nhe, nlev, ntrac_d, qsize_d,ne, np, nhr, ns, nhc use dimensions_mod, only: fv_nphys, nhe_phys, nhr_phys, ns_phys, nhc_phys,fv_nphys use dimensions_mod, only: irecons_tracer - use string_utils, only: to_str - use cam_abortutils, only: endrun + use cam_abortutils, only: endrun, check_allocate implicit none private @@ -287,142 +286,112 @@ subroutine allocate_physgrid_vars(fvm,par) do ie=1,nelemd allocate(fvm(ie)%phis_physgrid (fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%phis_physgrid(fv_nphys,fv_nphys) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%phis_physgrid(fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%vtx_cart_physgrid (4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%vtx_cart_physgrid(4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%vtx_cart_physgrid(4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%flux_orient_physgrid (2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%flux_orient_physgrid(2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%flux_orient_physgrid(2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%ifct_physgrid (1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%ifct_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%ifct_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%rot_matrix_physgrid (2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%rot_matrix_physgrid(2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%rot_matrix_physgrid(2,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%spherecentroid_physgrid(irecons_tracer-1,& 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%spherecentroid_physgrid(irecons_tracer-1,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%spherecentroid_physgrid(irecons_tracer-1,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%recons_metrics_physgrid (3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%recons_metrics_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%recons_metrics_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%recons_metrics_integral_physgrid(3,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%centroid_stretch_physgrid (7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%centroid_stretch_physgrid(7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%centroid_stretch_physgrid(7,1-nhe_phys:fv_nphys+nhe_phys,1-nhe_phys:fv_nphys+nhe_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%center_cart_physgrid(fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%area_sphere_physgrid(fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%ibase_physgrid(1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%halo_interp_weight_physgrid(1:ns_phys,1-nhr_phys:fv_nphys+nhr_phys,1:nhr_phys,2)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,& 1-nhe_phys:fv_nphys+nhe_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,'//& - '1-nhe_phys:fv_nphys+nhe_phys) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,'//& + '1-nhe_phys:fv_nphys+nhe_phys)',& + file=__FILE__, line=__LINE__) allocate(fvm(ie)%norm_elem_coord_physgrid(2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys )) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,'//& - '1-nhe_phys:fv_nphys+nhe_phys) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%vertex_recons_weights_physgrid(4,1:irecons_tracer-1,1-nhe_phys:fv_nphys+nhe_phys,'//& + '1-nhe_phys:fv_nphys+nhe_phys)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%Dinv_physgrid ( 1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%Dinv_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%Dinv_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d)) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d))', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d))'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d))', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%fm(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,nlev)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%dp_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev)', & + file=__FILE__, line=__LINE__) end do end subroutine allocate_physgrid_vars @@ -452,169 +421,128 @@ subroutine allocate_fvm_dims(fvm) !fvm tracer mixing ratio: allocate(fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%dp_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%dp_ref(nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%dp_ref(nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%dp_ref(nlev)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%dp_ref_inverse(nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%dp_ref_inverse(nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%dp_ref_inverse(nlev)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%psc(nc,nc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%psc(nc,nc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%psc(nc,nc)', & + file=__FILE__, line=__LINE__) ! inverse area_sphere allocate(fvm(ie)%inv_area_sphere(nc,nc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%inv_area_sphere(nc,nc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%inv_area_sphere(nc,nc)', & + file=__FILE__, line=__LINE__) ! inverse area_sphere allocate(fvm(ie)%inv_se_area_sphere(nc,nc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%inv_se_area_sphere(nc,nc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%inv_se_area_sphere(nc,nc)', & + file=__FILE__, line=__LINE__) #ifdef waccm_debug allocate(fvm(ie)%CSLAM_gamma(nc,nc,nlev,4), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%CSLAM_gamma(nc,nc,nlev,4) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%CSLAM_gamma(nc,nc,nlev,4)', & + file=__FILE__, line=__LINE__) #endif allocate(fvm(ie)%displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%displ_max(1-nhc:nc+nhc,1-nhc:nc+nhc,4)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%flux_vec(2,1-nhc:nc+nhc,1-nhc:nc+nhc,4), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%flux_vec(2,1-nhc:nc+nhc,1-nhc:nc+nhc,4) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%flux_vec(2,1-nhc:nc+nhc,1-nhc:nc+nhc,4)', & + file=__FILE__, line=__LINE__) ! cartesian location of vertices for flux sides allocate(fvm(ie)%vtx_cart(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%vtx_cart(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%vtx_cart(4,2,1-nhc:nc+nhc,1-nhc:nc+nhc)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%flux_orient(2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%flux_orient(2,1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%flux_orient(2,1-nhc:nc+nhc,1-nhc:nc+nhc)', & + file=__FILE__, line=__LINE__) ! indicator function for non-existent cells allocate(fvm(ie)%ifct(1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%ifct(1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%ifct(1-nhc:nc+nhc,1-nhc:nc+nhc)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%rot_matrix(2,2,1-nhc:nc+nhc,1-nhc:nc+nhc)', & + file=__FILE__, line=__LINE__) ! center of fvm cell in gnomonic coordinates allocate(fvm(ie)%center_cart(nc,nc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%center_cart(nc,nc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%center_cart(nc,nc)', & + file=__FILE__, line=__LINE__) ! spherical area of fvm cell allocate(fvm(ie)%area_sphere(nc,nc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%area_sphere(nc,nc) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%area_sphere(nc,nc)', & + file=__FILE__, line=__LINE__) ! centroids allocate(fvm(ie)%spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%spherecentroid(irecons_tracer-1,1-nhc:nc+nhc,1-nhc:nc+nhc)', & + file=__FILE__, line=__LINE__) ! pre-computed metric terms (for efficiency) allocate(fvm(ie)%recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%recons_metrics(3,1-nhe:nc+nhe,1-nhe:nc+nhe)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%recons_metrics_integral(3,1-nhe:nc+nhe,1-nhe:nc+nhe)', & + file=__FILE__, line=__LINE__) ! provide fixed interpolation points with respect to the arrival grid for reconstruction allocate(fvm(ie)%ibase(1-nh:nc+nh,1:nhr,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%ibase(1-nh:nc+nh,1:nhr,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fvm(ie)%ibase(1-nh:nc+nh,1:nhr,2)', & + file=__FILE__, line=__LINE__) allocate(fvm(ie)%halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%halo_interp_weight(1:ns,1-nh:nc+nh,1:nhr,2)', & + file=__FILE__, line=__LINE__) ! for finite-difference reconstruction allocate(fvm(ie)%centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%centroid_stretch(7,1-nhe:nc+nhe,1-nhe:nc+nhe)', & + file=__FILE__, line=__LINE__) ! pre-compute weights for reconstruction at cell vertices allocate(fvm(ie)%vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fvm(ie)%vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%vertex_recons_weights(4,1:irecons_tracer-1,1-nhe:nc+nhe,1-nhe:nc+nhe)', & + file=__FILE__, line=__LINE__) ! for mapping fvm2dyn allocate(fvm(ie)%norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fvm(ie)%norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fvm(ie)%norm_elem_coord(2,1-nhc:nc+nhc,1-nhc:nc+nhc)', & + file=__FILE__, line=__LINE__) end do diff --git a/src/dynamics/se/dycore/fvm_mapping.F90 b/src/dynamics/se/dycore/fvm_mapping.F90 index a563ade9..d5525aff 100644 --- a/src/dynamics/se/dycore/fvm_mapping.F90 +++ b/src/dynamics/se/dycore/fvm_mapping.F90 @@ -19,8 +19,7 @@ module fvm_mapping use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct use perf_mod, only: t_startf, t_stopf - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate implicit none private @@ -66,10 +65,9 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ character(len=*), parameter :: subname = 'phys2dyn_forcings_fvm (SE)' allocate(qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete)', & + file=__FILE__, line=__LINE__) do ie=nets,nete do nq=1,thermodynamic_active_species_num @@ -90,21 +88,17 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ call t_startf('p2d-pg2:copying') nflds = 4+ntrac allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//& - ': allocate fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(fld_gll(np,np,nlev,3,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_gll(np,np,nlev,3,nets:nete) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'fld_gll(np,np,nlev,3,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(llimiter(nflds), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate llimiter(nflds) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'llimiter(nflds)', & + file=__FILE__, line=__LINE__) fld_phys = -9.99E99_r8!xxx necessary? @@ -156,15 +150,12 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! nflds = thermodynamic_active_species_num allocate(fld_gll(np,np,nlev,nflds,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_gll(np,np,nlev,nflds,nets:nete) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'fld_gll(np,np,nlev,nflds,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete)', & + file=__FILE__, line=__LINE__) do ie=nets,nete ! @@ -205,20 +196,18 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! nflds is ft, fu, fv, + thermo species nflds = 3+thermodynamic_active_species_num allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(fld_gll(np,np,nlev,nflds,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_gll(np,np,nlev,nflds,nets:nete) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_gll(np,np,nlev,nflds,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(llimiter(nflds), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate llimiter(nflds) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, ' llimiter(nflds)', & + file=__FILE__, line=__LINE__) llimiter(1:nflds) = .false. do ie=nets,nete @@ -483,49 +472,40 @@ subroutine dyn2phys_all_vars(nets,nete,elem,fvm,& if (nc.ne.fv_nphys) then save_max_overlap = 4 !max number of mass overlap areas between phys and fvm grids allocate(save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//& - ': allocate save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_air_mass_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_q_overlap(save_max_overlap,fv_nphys,fv_nphys,nlev,num_trac,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_q_phys(fv_nphys,fv_nphys,nlev,num_trac,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_dp_phys(fv_nphys,fv_nphys,nlev,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_overlap_area(save_max_overlap,fv_nphys,fv_nphys,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_num_overlap(fv_nphys,fv_nphys,nlev,nets:nete)', & + file=__FILE__, line=__LINE__) save_num_overlap = 0 allocate(save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//& - ': allocate save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'save_overlap_idx(2,save_max_overlap,fv_nphys,fv_nphys,nets:nete)', & + file=__FILE__, line=__LINE__) end if @@ -706,22 +686,16 @@ subroutine setup_interpdata_for_gll_to_phys_vec_mapping(interpdata,interp_p) gp_quadrature = gausslobatto(np) call interpolate_create(gp_quadrature,interp_p) allocate(interpdata%interp_xy(ngrid), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate interpdata%interp_xy(ngrid) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'interpdata%interp_xy(ngrid)', & + file=__FILE__, line=__LINE__) allocate(interpdata%ilat(ngrid), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate interpdata%ilat(ngrid) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'interpdata%ilat(ngrid)', & + file=__FILE__, line=__LINE__) allocate(interpdata%ilon(ngrid), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate interpdata%ilon(ngrid) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'interpdata%ilon(ngrid)', & + file=__FILE__, line=__LINE__) ! !WARNING: THIS CODE INTERFERES WITH LAT-LON OUTPUT @@ -1051,28 +1025,24 @@ subroutine phys2fvm(ie,k,fvm,fq_phys,fqdp_fvm,num_trac) character(len=*), parameter :: subname = 'phys2fvm (SE)' allocate(dq_min_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate dq_min_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'dq_min_overlap(save_max_overlap,fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) allocate(dq_max_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate dq_max_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'dq_max_overlap(save_max_overlap,fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) allocate(dq_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate dq_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'dq_overlap(save_max_overlap,fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) allocate(fq_phys_overlap (save_max_overlap,fv_nphys,fv_nphys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fq_phys_overlap(save_max_overlap,fv_nphys,fv_nphys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fq_phys_overlap(save_max_overlap,fv_nphys,fv_nphys)', & + file=__FILE__, line=__LINE__) do m_cnst=1,num_trac fqdp_fvm(:,:,m_cnst) = 0.0_r8 diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 index e198f320..9c9aefb8 100644 --- a/src/dynamics/se/dycore/fvm_mod.F90 +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -191,8 +191,7 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, use edge_mod, only: initghostbuffer, freeghostbuffer, ghostpack, ghostunpack use fvm_reconstruction_mod, only: extend_panel_interpolate - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use dimensions_mod, only: fv_nphys,nhr,nhr_phys,nhc,nhc_phys,ns,ns_phys,nhe_phys,nc use perf_mod, only : t_startf, t_stopf ! _EXTERNAL @@ -247,10 +246,9 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, nh_phys = nhr_phys allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)', & + file=__FILE__, line=__LINE__) do ie=nets,nete do itr=1,num_flds @@ -272,10 +270,9 @@ subroutine fill_halo_and_extend_panel(elem,fvm,fld,hybrid,nets,nete,nphys,nhcc, nht_phys= nhe_phys+nhr nh_phys = nhr allocate(fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_tmp(1-nht_phys:nphys+nht_phys,1-nht_phys:nphys+nht_phys)', & + file=__FILE__, line=__LINE__) do ie=nets,nete do itr=1,num_flds diff --git a/src/dynamics/se/dycore/gridgraph_mod.F90 b/src/dynamics/se/dycore/gridgraph_mod.F90 index 3e097b00..7d451956 100644 --- a/src/dynamics/se/dycore/gridgraph_mod.F90 +++ b/src/dynamics/se/dycore/gridgraph_mod.F90 @@ -74,8 +74,7 @@ module GridGraph_mod subroutine allocate_gridvertex_nbrs(vertex, dim) - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate type (GridVertex_t), intent(inout) :: vertex integer, optional, intent(in) :: dim @@ -91,25 +90,20 @@ subroutine allocate_gridvertex_nbrs(vertex, dim) end if allocate(vertex%nbrs(num), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate vertex%nbrs(num) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'vertex%nbrs(num)', & + file=__FILE__, line=__LINE__) allocate(vertex%nbrs_face(num), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate vertex%nbrs_face(num) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'vertex%nbrs_face(num)', & + file=__FILE__, line=__LINE__) allocate(vertex%nbrs_wgt(num), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate vertex%nbrs_wgt(num) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'vertex%nbrs_face(num)', & + file=__FILE__, line=__LINE__) allocate(vertex%nbrs_wgt_ghost(num), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate vertex%nbrs_wgt_ghost(num) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'vertex%nbrs_wgt_ghost(num)', & + file=__FILE__, line=__LINE__) end subroutine allocate_gridvertex_nbrs !====================================================================== @@ -309,8 +303,7 @@ end subroutine PrintChecksum subroutine CreateSubGridGraph(Vertex, SVertex, local2global) - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate implicit none @@ -330,9 +323,8 @@ subroutine CreateSubGridGraph(Vertex, SVertex, local2global) nelem_s = SiZE(SVertex) allocate(global2local(nelem), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate global2local(nelem) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'global2local(nelem)', & + file=__FILE__, line=__LINE__) global2local(:) = 0 do i=1,nelem_s diff --git a/src/dynamics/se/dycore/hybrid_mod.F90 b/src/dynamics/se/dycore/hybrid_mod.F90 index 941256b9..f167435a 100644 --- a/src/dynamics/se/dycore/hybrid_mod.F90 +++ b/src/dynamics/se/dycore/hybrid_mod.F90 @@ -135,8 +135,7 @@ end function config_thread_region_hybrid function config_thread_region_par(par,region_name) result(hybrid) - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate type (parallel_t) , intent(in) :: par character(len=*), intent(in) :: region_name @@ -157,10 +156,8 @@ function config_thread_region_par(par,region_name) result(hybrid) region_num_threads = 1 if ( .NOT. allocated(work_pool_horz) ) then allocate(work_pool_horz(horz_num_threads,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate work_pool_horz(horz_num_threads,2)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'work_pool_horz(horz_num_threads,2)', & + file=__FILE__, line=__LINE__) end if call set_thread_ranges_1D ( work_pool_horz, ibeg_range, iend_range, ithr ) hybrid%ibeg = 1; hybrid%iend = nelemd_save @@ -220,8 +217,7 @@ end function config_thread_region_par subroutine init_loop_ranges(nelemd) - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate integer, intent(in) :: nelemd integer :: ith, beg_index, end_index @@ -234,10 +230,8 @@ subroutine init_loop_ranges(nelemd) nelemd_save=nelemd if ( .NOT. allocated(work_pool_horz) ) then allocate(work_pool_horz(horz_num_threads,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate work_pool_horz(horz_num_threads,2)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'work_pool_horz(horz_num_threads,2)', & + file=__FILE__, line=__LINE__) end if if(nelemdshr_kind_r8 use physconst, only: PI use control_mod, only: MAX_FILE_LEN - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use netcdf, only: nf90_strerror, nf90_open, nf90_close use netcdf, only: NF90_NOWRITE, nf90_NoErr @@ -389,10 +388,9 @@ subroutine create_index_table(index_table, element_nodes) !Create an index table so that we can find neighbors on O(n) ! so for each node, we want to know which elements it is part of allocate(index_table(p_number_nodes, max_elements_attached_to_node + 1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate (index_table(p_number_nodes,max_elements_attached_to_node+1)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'index_table(p_number_nodes,max_elements_attached_to_node+1)',& + file=__FILE__, line=__LINE__) !the last column in the index table is a count of the number of elements index_table = 0 @@ -676,19 +674,16 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) if (ne2 par%nprocs color = iam / npes_homme @@ -191,10 +188,8 @@ function initmpi(npes_homme) result(par) call MPI_Get_Processor_Name(my_name, namelen, ierr) allocate(the_names(par%nprocs), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate the_names(par%nprocs) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'the_names(par%nprocs)', & + file=__FILE__, line=__LINE__) do i = 1, par%nprocs the_names(i)(:) = '' diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 index b3d099f6..1db6dc8c 100644 --- a/src/dynamics/se/dycore/prim_advance_mod.F90 +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -2,8 +2,7 @@ module prim_advance_mod use shr_kind_mod, only: r8=>shr_kind_r8 use edgetype_mod, only: EdgeBuffer_t use perf_mod, only: t_startf, t_stopf, t_adj_detailf !, t_barrierf _EXTERNAL - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use parallel_mod, only: parallel_t, HME_BNDRY_P2P!,HME_BNDRY_A2A use thread_mod , only: horz_num_threads, vert_num_threads, omp_set_nested @@ -37,9 +36,8 @@ subroutine prim_advance_init(par, elem) if(.not. allocated(ur_weights)) then allocate(ur_weights(qsplit), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate ur_weights(qsplit) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'ur_weights(qsplit)', & + file=__FILE__, line=__LINE__) end if ur_weights(:)=0.0_r8 @@ -344,10 +342,8 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu if (ntrac>0) then allocate(ftmp_fvm(nc,nc,nlev,ntrac,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate ftmp_fvm(nc,nc,nlev,ntrac,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'ftmp_fvm(nc,nc,nlev,ntrac,nets:nete)', & + file=__FILE__, line=__LINE__) end if if (ftype==0) then diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90 index 44ec3955..c9a58afa 100644 --- a/src/dynamics/se/dycore/prim_advection_mod.F90 +++ b/src/dynamics/se/dycore/prim_advection_mod.F90 @@ -64,7 +64,7 @@ module prim_advection_mod subroutine Prim_Advec_Init1(par, elem) use dimensions_mod, only: nlev, qsize, nelemd,ntrac use parallel_mod, only: parallel_t, boundaryCommMethod - use string_utils, only: to_str + use cam_abortutils, only: check_allocate type(parallel_t) :: par type (element_t) :: elem(:) ! @@ -112,14 +112,12 @@ subroutine Prim_Advec_Init1(par, elem) ! this static array is shared by all threads, so dimension for all threads (nelemd), not nets:nete: allocate(qmin(nlev,qsize,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate qmin(nlev,qsize,nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'qmin(nlev,qsize,nelemd)', & + file=__FILE__, line=__LINE__) allocate(qmax(nlev,qsize,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate qmax(nlev,qsize,nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'qmax(nlev,qsize,nelemd)', & + file=__FILE__, line=__LINE__) end subroutine Prim_Advec_Init1 diff --git a/src/dynamics/se/dycore/prim_driver_mod.F90 b/src/dynamics/se/dycore/prim_driver_mod.F90 index db14697f..e5431ac2 100644 --- a/src/dynamics/se/dycore/prim_driver_mod.F90 +++ b/src/dynamics/se/dycore/prim_driver_mod.F90 @@ -664,7 +664,7 @@ subroutine get_global_ave_surface_pressure(elem, global_ave_ps_inic) use global_norms_mod , only: global_integral use hybrid_mod , only: config_thread_region, get_loop_ranges, hybrid_t use parallel_mod , only: par - use string_utils , only: to_str + use cam_abortutils , only: check_allocate type (element_t) , intent(in) :: elem(:) real (kind=r8), intent(out) :: global_ave_ps_inic @@ -682,9 +682,8 @@ subroutine get_global_ave_surface_pressure(elem, global_ave_ps_inic) hybrid = config_thread_region(par,'serial') call get_loop_ranges(hybrid,ibeg=nets,iend=nete) allocate(tmp(np,np,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate tmp(np,np,nets:nete) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'tmp(np,np,nets:nete)', & + file=__FILE__, line=__LINE__) do ie=nets,nete tmp(:,:,ie)=elem(ie)%state%psdry(:,:) diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index a65f5a06..cceba34f 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -45,8 +45,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use schedtype_mod, only: schedule use schedule_mod, only: genEdgeSched use prim_advection_mod, only: prim_advec_init1 - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate use parallel_mod, only: parallel_t, syncmp, global_shared_buf, nrepro_vars use spacecurve_mod, only: genspacepart use dof_mod, only: global_dof, CreateUniqueIndex, SetElemOffset @@ -110,14 +109,12 @@ subroutine prim_init1(elem, fvm, par, Tl) end if allocate(GridVertex(nelem), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate GridVertex(nelem) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'GridVertex(nelem)', & + file=__FILE__, line=__LINE__) allocate(GridEdge(nelem_edge), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate GridEdge(nelem_edge) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'GridEdge(nelem_edge)', & + file=__FILE__, line=__LINE__) do j = 1, nelem call allocate_gridvertex_nbrs(GridVertex(j)) @@ -154,14 +151,12 @@ subroutine prim_init1(elem, fvm, par, Tl) ! given partition, count number of local element descriptors ! =========================================================== allocate(MetaVertex(1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate MetaVertex(1) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'MetaVertex(1)', & + file=__FILE__, line=__LINE__) allocate(Schedule(1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Schedule(1) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Schedule(1)', & + file=__FILE__, line=__LINE__) nelem_edge = SIZE(GridEdge) @@ -182,9 +177,8 @@ subroutine prim_init1(elem, fvm, par, Tl) if (nelemd > 0) then allocate(elem(nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate elem(nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'elem(nelemd)', & + file=__FILE__, line=__LINE__) call allocate_element_dims(elem) call allocate_element_desc(elem) @@ -192,9 +186,8 @@ subroutine prim_init1(elem, fvm, par, Tl) if (fv_nphys > 0) then allocate(fvm(nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate fvm(nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'fvm(nelemd)', & + file=__FILE__, line=__LINE__) call allocate_fvm_dims(fvm) call allocate_physgrid_vars(fvm,par) @@ -202,10 +195,8 @@ subroutine prim_init1(elem, fvm, par, Tl) ! Even if fvm not needed, still desirable to allocate it as empty ! so it can be passed as a (size zero) array rather than pointer. allocate(fvm(0), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate fvm(0) failed with stat: '//to_str(ierr)) - end if - + call check_allocate(ierr, subname, 'fvm(0)', & + file=__FILE__, line=__LINE__) end if ! ==================================================== @@ -215,10 +206,8 @@ subroutine prim_init1(elem, fvm, par, Tl) call genEdgeSched(par, elem, par%rank+1, Schedule(1), MetaVertex(1)) allocate(global_shared_buf(nelemd, nrepro_vars), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate global_shared_buf(nelemd, nrepro_vars)'//& - 'failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'global_shared_buf(nelemd, nrepro_vars)', & + file=__FILE__, line=__LINE__) global_shared_buf = 0.0_r8 @@ -286,10 +275,8 @@ subroutine prim_init1(elem, fvm, par, Tl) end if call mass_matrix(par, elem) allocate(aratio(nelemd,1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate aratio(nelemd,1) failed with stat: '//to_str(ierr)) - end if - + call check_allocate(ierr, subname, 'aratio(nelemd,1)', & + file=__FILE__, line=__LINE__) if (topology == "cube") then area = 0 diff --git a/src/dynamics/se/dycore/prim_state_mod.F90 b/src/dynamics/se/dycore/prim_state_mod.F90 index 4ff733f0..b01745fe 100644 --- a/src/dynamics/se/dycore/prim_state_mod.F90 +++ b/src/dynamics/se/dycore/prim_state_mod.F90 @@ -29,7 +29,7 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) use time_mod, only: tstep use control_mod, only: rsplit, qsplit use perf_mod, only: t_startf, t_stopf - use cam_abortutils, only: endrun + use cam_abortutils, only: check_allocate use string_utils, only: to_str type (element_t), intent(inout) :: elem(:) type (TimeLevel_t), target, intent(in) :: tl @@ -67,54 +67,44 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) !------------- vmax = 11+2*max(qsize_d,ntrac_d) allocate(varname(vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate varname(vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'varname(vmax)', & + file=__FILE__, line=__LINE__) allocate(min_local(nets:nete, vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate min_local(nets:nete,vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'min_local(nets:nete,vmax)', & + file=__FILE__, line=__LINE__) allocate(max_local(nets:nete, vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate max_local(nets:nete,vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'max_local(nets:nete,vmax)', & + file=__FILE__, line=__LINE__) allocate(min_p(vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate min_p(vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'min_p(vmax)', & + file=__FILE__, line=__LINE__) allocate(max_p(vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate max_p(vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'max_p(vmax)', & + file=__FILE__, line=__LINE__) allocate(mass(vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mass(vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'mass(vmax)', & + file=__FILE__, line=__LINE__) allocate(mass_chg(vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mass_chg(vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'mass_chg(vmax)', & + file=__FILE__, line=__LINE__) allocate(tmp_gll(np,np,vmax,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate tmp_gll(np,np,vmax,nets:nete) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'tmp_gll(np,np,vmax,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(tmp_mass(vmax), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate tmp_mass(vmax) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'tmp_mass(vmax)', & + file=__FILE__, line=__LINE__) allocate(tmp_fvm(nc,nc,vmax,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate tmp_fvm(nc,nc,vmax,nets:nete) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'tmp_fvm(nc,nc,vmax,nets:nete)', & + file=__FILE__, line=__LINE__) !------------- !dynamics variables in n0 are at time = 'time': time=tl%nstep*tstep diff --git a/src/dynamics/se/dycore/quadrature_mod.F90 b/src/dynamics/se/dycore/quadrature_mod.F90 index 9f1d42a0..134d7918 100644 --- a/src/dynamics/se/dycore/quadrature_mod.F90 +++ b/src/dynamics/se/dycore/quadrature_mod.F90 @@ -1,8 +1,7 @@ #undef _GAUSS_TABLE module quadrature_mod use shr_kind_mod, only: r8=>shr_kind_r8 - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: check_allocate implicit none private @@ -52,16 +51,12 @@ function gauss(npts) result(gs) character(len=*), parameter :: subname = 'gauss (SE)' allocate(gs%points(npts), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate gs%points(npts) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'gs%points(npts)', & + file=__FILE__, line=__LINE__) allocate(gs%weights(npts), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate gs%weights(npts) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'gs%weights(npts)', & + file=__FILE__, line=__LINE__) gs%points=gauss_pts(npts) gs%weights=gauss_wts(npts,gs%points) @@ -298,16 +293,12 @@ function gausslobatto(npts) result(gll) character(len=*), parameter :: subname = 'gausslobatto (SE)' allocate(gll%points(npts), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate gll%points(npts) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'gll%points(npts)', & + file=__FILE__, line=__LINE__) allocate(gll%weights(npts), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate gll%weights(npts) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'gll%weights(npts)', & + file=__FILE__, line=__LINE__) gll%points=gausslobatto_pts(npts) gll%weights=gausslobatto_wts(npts,gll%points) diff --git a/src/dynamics/se/dycore/reduction_mod.F90 b/src/dynamics/se/dycore/reduction_mod.F90 index 5a777964..aa0043ba 100644 --- a/src/dynamics/se/dycore/reduction_mod.F90 +++ b/src/dynamics/se/dycore/reduction_mod.F90 @@ -2,8 +2,7 @@ module reduction_mod use shr_kind_mod, only: r8=>shr_kind_r8 use mpi, only: mpi_sum, mpi_min, mpi_max, mpi_real8, mpi_integer use mpi, only: mpi_success - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate implicit none private @@ -201,10 +200,8 @@ subroutine InitReductionBuffer_int_1d(red,len) red%len = len allocate(red%buf(len), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate red%buf(len) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'red%buf(len)', & + file=__FILE__, line=__LINE__) red%buf = 0 red%ctr = 0 @@ -231,10 +228,8 @@ subroutine InitReductionBuffer_r_1d(red,len) red%len = len allocate(red%buf(len), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate red%buf(len) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'red%buf(len)', & + file=__FILE__, line=__LINE__) red%buf = 0.0_R8 red%ctr = 0 @@ -261,10 +256,8 @@ subroutine InitReductionBuffer_ordered_1d(red,len,nthread) red%len = len allocate(red%buf(len,nthread+1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate red%buf(len,nthread+1) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'red%buf(len,nthread+1)', & + file=__FILE__, line=__LINE__) red%buf = 0.0_R8 red%ctr = 0 diff --git a/src/dynamics/se/dycore/schedule_mod.F90 b/src/dynamics/se/dycore/schedule_mod.F90 index fa1c6628..5f846111 100644 --- a/src/dynamics/se/dycore/schedule_mod.F90 +++ b/src/dynamics/se/dycore/schedule_mod.F90 @@ -39,8 +39,7 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) use metagraph_mod, only: metavertex_t use dimensions_mod, only: nelem, max_neigh_edges use gridgraph_mod, only: gridvertex_t, gridedge_t, assignment ( = ) - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: check_allocate use shr_kind_mod, only: shr_kind_cs use parallel_mod, only: nComPoints, rrequest, srequest, status, npackpoints @@ -97,10 +96,8 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) ! so no need to put it in the schedule data-structure ! ===================================================== allocate(Global2Local(nelem), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Global2Local(nelem) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Global2Local(nelem)', & + file=__FILE__, line=__LINE__) if(Debug) write(iulog,*)'genEdgeSched: point #1' iSched = PartNumber @@ -128,41 +125,29 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) ! Temporary array to calculate the Buffer Slot allocate(tmpP(2,nedges+1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate tmpP(2,nedges+1) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'tmpP(2,nedges+1)', & + file=__FILE__, line=__LINE__) allocate(tmpS(2,nedges+1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate tmpS(2,nedges+1) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'tmpS(2,nedges+1)', & + file=__FILE__, line=__LINE__) allocate(tmpP_ghost(2,nedges+1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate tmpP_ghost(2,nedges+1) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'tmpP_ghost(2,nedges+1)', & + file=__FILE__, line=__LINE__) ! Allocate all the cycle structures allocate(LSchedule%SendCycle(nedges), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%SendCycle(nedges) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%SendCycle(nedges)', & + file=__FILE__, line=__LINE__) allocate(LSchedule%RecvCycle(nedges), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%RecvCycle(nedges) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%RecvCycle(nedges)', & + file=__FILE__, line=__LINE__) allocate(LSchedule%MoveCycle(1), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%MoveCycle(1) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%MoveCycle(1)', & + file=__FILE__, line=__LINE__) ! Initialize the schedules... LSchedule%MoveCycle(1)%ptrP = 0 @@ -174,22 +159,16 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) ! Allocate and initalized the index translation arrays Global2Local = -1 allocate(LSchedule%Local2Global(nelemd0), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%Local2Global(nelemd0) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%Local2Global(nelemd0)', & + file=__FILE__, line=__LINE__) allocate(LSchedule%pIndx(max_neigh_edges*nelemd0), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%pIndx(max_neigh_edges*nelemd0) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%pIndx(max_neigh_edges*nelemd0)', & + file=__FILE__, line=__LINE__) allocate(LSchedule%gIndx(max_neigh_edges*nelemd0), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%gIndx(max_neigh_edges*nelemd0) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%gIndx(max_neigh_edges*nelemd0)', & + file=__FILE__, line=__LINE__) LSchedule%pIndx(:)%elemId = -1 LSchedule%pIndx(:)%edgeId = -1 @@ -355,22 +334,16 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) nSend = nedges nRecv = nedges allocate(Rrequest(nRecv), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Rrequest(nRecv) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Rrequest(nRecv)', & + file=__FILE__, line=__LINE__) allocate(Srequest(nSend), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate Srequest(nSend) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'Srequest(nSend)', & + file=__FILE__, line=__LINE__) allocate(status(MPI_STATUS_SIZE,nRecv), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate status(MPI_STATUS_SIZE,nRecv) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'status(MPI_STATUS_SIZE,nRecv)', & + file=__FILE__, line=__LINE__) !=============================================================== ! Number of communication points ... to be used later to @@ -388,10 +361,8 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) call MPI_Comm_rank(par%intracomm, par%intracommrank, ierr) allocate(intracommranks(par%intracommsize), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate intracommranks(par%intracommsize) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'intracommranks(par%intracommsize)', & + file=__FILE__, line=__LINE__) call MPI_Allgather(par%rank,1,MPIinteger_t,intracommranks,1,MPIinteger_t,par%intracomm,ierr) @@ -428,67 +399,55 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) LSchedule%nIntra = numIntra allocate(srcFull(nRecv), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate srcFull(nRecv) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'srcFull(nRecv)', & + file=__FILE__, line=__LINE__) allocate(srcWeightFull(nRecv), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate srcWeightFull(nRecv) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'srcWeightFull(nRecv)', & + file=__FILE__, line=__LINE__) allocate(destFull(nSend), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate destFull(nSend) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'destFull(nSend)', & + file=__FILE__, line=__LINE__) allocate(destWeightFull(nSend), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate destWeightFull(nSend) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'destWeightFull(nSend)', & + file=__FILE__, line=__LINE__) if(numInter>0) then allocate(srcInter(numInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate srcInter(numInter) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'srcInter(numInter)', & + file=__FILE__, line=__LINE__) allocate(srcWeightInter(numInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate srcWeightInter(numInter) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'srcWeightInter(numInter)', & + file=__FILE__, line=__LINE__) allocate(destInter(numInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate destInter(numInter) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'destInter(numInter)', & + file=__FILE__, line=__LINE__) allocate(destWeightInter(numInter), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate destWeightInter(numInter) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'destWeightInter(numInter)', & + file=__FILE__, line=__LINE__) endif if(numIntra>0) then allocate(srcIntra(numIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate srcIntra(numIntra) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'srcIntra(numIntra)', & + file=__FILE__, line=__LINE__) allocate(srcWeightIntra(numIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate srcWeightIntra(numIntra) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'srcWeightIntra(numIntra)', & + file=__FILE__, line=__LINE__) allocate(destIntra(numIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate destIntra(numIntra) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'destIntra(numIntra)', & + file=__FILE__, line=__LINE__) allocate(destWeightIntra(numIntra), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate destWeightIntra(numIntra) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'destWeightIntra(numIntra)', & + file=__FILE__, line=__LINE__) endif icIntra=0 @@ -537,13 +496,13 @@ subroutine genEdgeSched(par,elem, PartNumber,LSchedule,MetaVertex) print *,subname,': Error after call to MPI_dist_graph_create_adjacent(FULL) ',errorstring endif allocate(LSchedule%destFull(nSend), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%destFull(nSend) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%destFull(nSend)', & + file=__FILE__, line=__LINE__) + allocate(LSchedule%srcFull(nRecv), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate LSchedule%srcFull(nRecv) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'LSchedule%srcFull(nRecv)', & + file=__FILE__, line=__LINE__) + LSchedule%destFull(:) = destFull(:) LSchedule%srcFull(:) = srcFull(:) ! construct the FULL communication -group- (for one-sided operations): diff --git a/src/dynamics/se/dycore/spacecurve_mod.F90 b/src/dynamics/se/dycore/spacecurve_mod.F90 index 6c5955a3..3ebcdd36 100644 --- a/src/dynamics/se/dycore/spacecurve_mod.F90 +++ b/src/dynamics/se/dycore/spacecurve_mod.F90 @@ -1,7 +1,6 @@ module spacecurve_mod use cam_logfile, only: iulog - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate implicit none private @@ -915,10 +914,8 @@ function Factor(num) result(res) tmp = num tmp2 = log2(num) allocate(res%factors(tmp2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate res%factors(tmp2)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'res%factors(tmp2)', & + file=__FILE__, line=__LINE__) n=0 !----------------------- @@ -1037,17 +1034,13 @@ subroutine GenSpaceCurve(Mesh) if(verbose) write(iulog,*)'GenSpacecurve: level is ',level allocate(ordered(gridsize,gridsize), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate ordered(gridsize,gridsize)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'ordered(gridsize,gridsize)', & + file=__FILE__, line=__LINE__) ! Setup the working arrays for the traversal allocate(pos(0:dim-1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate pos(0:dim-1)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'pos(0:dim-1)', & + file=__FILE__, line=__LINE__) ! The array ordered will contain the visitation order ordered(:,:) = 0 diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index f82013ed..33f74e9e 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -9,6 +9,7 @@ module dyn_comp ! cnst_read_iv, qmin, cnst_type, tottnam, & ! cnst_is_a_water_species use constituents, only: pcnst +use vert_coord, only: pver use cam_control_mod, only: initial_run, simple_phys use cam_initfiles, only: initial_file_get_id, topo_file_get_id, pertlim !use phys_control, only: use_gw_front, use_gw_front_igw, waccmx_is @@ -34,9 +35,8 @@ module dyn_comp use shr_infnan_mod, only: shr_infnan_isnan use cam_logfile, only: iulog -use cam_abortutils, only: endrun +use cam_abortutils, only: endrun, check_allocate use cam_map_utils, only: iMap -use string_utils, only: to_str use shr_sys_mod, only: shr_sys_flush use parallel_mod, only: par @@ -432,7 +432,7 @@ subroutine dyn_readnl(NLFileName) write_restart_unstruct = se_write_restart_unstruct if (se_kmin_jet<0 ) kmin_jet = 1 - if (se_kmax_jet<0 ) kmax_jet = nlev + if (se_kmax_jet<0 ) kmax_jet = pver if (masterproc) then write(iulog, '(a,i0)') 'dyn_readnl: se_ftype = ',ftype @@ -662,26 +662,22 @@ subroutine dyn_init(dyn_in, dyn_out) ! Now allocate and set condenstate vars allocate(cnst_name_gll(qsize), stat=iret) ! constituent names for gll tracers - if (iret /= 0) then - call endrun(subname//': allocate cnst_name_gll(qsize) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'cnst_name_gll(qsize)', & + file=__FILE__, line=__LINE__) allocate(cnst_longname_gll(qsize), stat=iret) ! long name of constituents for gll tracers - if (iret /= 0) then - call endrun(subname//': allocate cnst_longname_gll(qsize) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'cnst_longname_gll(qsize)', & + file=__FILE__, line=__LINE__) allocate(kord_tr(qsize), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate kord_tr(qsize) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'kord_tr(qsize)', & + file=__FILE__, line=__LINE__) kord_tr(:) = vert_remap_tracer_alg if (ntrac>0) then allocate(kord_tr_cslam(ntrac), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate kord_tr_cslam(ntrac) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'kord_tr_cslam(ntrac)', & + file=__FILE__, line=__LINE__) kord_tr_cslam(:) = vert_remap_tracer_alg end if @@ -1037,14 +1033,12 @@ subroutine dyn_run(dyn_state) ldiag = .false. if (ldiag) then allocate(ps_before(np,np,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate ps_before(np,np,nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'ps_before(np,np,nelemd)', & + file=__FILE__, line=__LINE__) allocate(abs_ps_tend(np,np,nelemd), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate abs_ps_tend(np,np,nelemd) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'abs_ps_tend(np,np,nelemd)', & + file=__FILE__, line=__LINE__) end if @@ -1278,10 +1272,8 @@ subroutine read_inidat(dyn_in) end if allocate(qtmp(np,np,nlev,nelemd,pcnst), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate qtmp(np,np,nlev,nelemd,pcnst) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'qtmp(np,np,nlev,nelemd,pcnst)', & + file=__FILE__, line=__LINE__) qtmp = 0._r8 @@ -1289,9 +1281,8 @@ subroutine read_inidat(dyn_in) nullify(ldof) call cam_grid_get_gcid(cam_grid_id(ini_grid_name), ldof) allocate(pmask(npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pmask(npsq*nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pmask(npsq*nelemd)', & + file=__FILE__, line=__LINE__) pmask(:) = (ldof /= 0) @@ -1299,14 +1290,12 @@ subroutine read_inidat(dyn_in) latvals_deg => cam_grid_get_latvals(cam_grid_id(ini_grid_name)) lonvals_deg => cam_grid_get_lonvals(cam_grid_id(ini_grid_name)) allocate(latvals(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate latvals(np*np*nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'latvals(np*np*nelemd)', & + file=__FILE__, line=__LINE__) allocate(lonvals(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate lonvals(np*np*nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'lonvals(np*np*nelemd)', & + file=__FILE__, line=__LINE__) latvals(:) = latvals_deg(:)*deg2rad lonvals(:) = lonvals_deg(:)*deg2rad @@ -1326,9 +1315,8 @@ subroutine read_inidat(dyn_in) ! PHIS has already been set by set_phis. Get local copy for ! possible use in setting T and PS in the analytic IC code. allocate(phis_tmp(npsq,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate phis_tmp(npsq,nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'phis_tmp(npsq,nelemd)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd k = 1 @@ -1342,9 +1330,8 @@ subroutine read_inidat(dyn_in) inic_wet = .false. allocate(glob_ind(npsq * nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate glob_ind(npsq*nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'glob_ind(npsq*nelemd)', & + file=__FILE__, line=__LINE__) j = 1 do ie = 1, nelemd @@ -1357,16 +1344,13 @@ subroutine read_inidat(dyn_in) ! First, initialize all the variables, then assign allocate(dbuf4(npsq, nlev, nelemd, (qsize + 4)), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dbuf4(npsq,nlev,nelemd,(qsize+4)) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dbuf4(npsq,nlev,nelemd,(qsize+4))', & + file=__FILE__, line=__LINE__) dbuf4 = 0.0_r8 allocate(m_ind(qsize), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate m_ind(qsize) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'm_ind(qsize)', & + file=__FILE__, line=__LINE__) do m_cnst = 1, qsize m_ind(m_cnst) = m_cnst @@ -1424,14 +1408,12 @@ subroutine read_inidat(dyn_in) ! Read ICs from file. Assume all fields in the initial file are on the GLL grid. allocate(dbuf2(npsq,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dbuf2(npsq,nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dbuf2(npsq,nelemd)', & + file=__FILE__, line=__LINE__) allocate(dbuf3(npsq,nlev,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dbuf3(npsq,nlev,nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dbuf3(npsq,nlev,nelemd)', & + file=__FILE__, line=__LINE__) ! Check that number of columns in IC file matches grid definition. call check_file_layout(fh_ini, elem, dyn_cols, 'ncdata', .true., dimname) @@ -1523,9 +1505,8 @@ subroutine read_inidat(dyn_in) call random_seed(size=rndm_seed_sz) allocate(rndm_seed(rndm_seed_sz), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate rndm_seed(rndm_seed_sz) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'rndm_seed(rndm_seed_sz)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd ! seed random number generator based on element ID @@ -1590,9 +1571,8 @@ subroutine read_inidat(dyn_in) end do allocate(dbuf3(npsq,nlev,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate dbuf3(npsq,nlev,nelemd) failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'dbuf3(npsq,nlev,nelemd)', & + file=__FILE__, line=__LINE__) do m_cnst = 1, pcnst @@ -1690,10 +1670,8 @@ subroutine read_inidat(dyn_in) end if allocate(factor_array(np,np,nlev,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate factor_array(np,np,nlev,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'factor_array(np,np,nlev,nelemd)', & + file=__FILE__, line=__LINE__) ! ! compute: factor_array = 1/(1-sum(q)) @@ -1991,19 +1969,15 @@ subroutine set_phis(dyn_in) end if allocate(phis_tmp(npsq,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate phis_tmp(npsq,nelemd) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'phis_tmp(npsq,nelemd)', & + file=__FILE__, line=__LINE__) phis_tmp = 0.0_r8 if (fv_nphys > 0) then allocate(phis_phys_tmp(fv_nphys**2,nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate phis_phys_tmp(fv_nphys**2,nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'phis_phys_tmp(fv_nphys**2,nelemd)', & + file=__FILE__, line=__LINE__) phis_phys_tmp = 0.0_r8 do ie=1,nelemd @@ -2018,10 +1992,8 @@ subroutine set_phis(dyn_in) nullify(ldof) call cam_grid_get_gcid(cam_grid_id('GLL'), ldof) allocate(pmask(npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pmask(npsq*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pmask(npsq*nelemd)', & + file=__FILE__, line=__LINE__) pmask(:) = (ldof /= 0) deallocate(ldof) @@ -2079,25 +2051,19 @@ subroutine set_phis(dyn_in) latvals_deg => cam_grid_get_latvals(cam_grid_id('GLL')) lonvals_deg => cam_grid_get_lonvals(cam_grid_id('GLL')) allocate(latvals(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate latvals(np*np*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'latvals(np*np*nelemd)', & + file=__FILE__, line=__LINE__) allocate(lonvals(np*np*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate lonvals(np*np*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'lonvals(np*np*nelemd)', & + file=__FILE__, line=__LINE__) latvals(:) = latvals_deg(:)*deg2rad lonvals(:) = lonvals_deg(:)*deg2rad allocate(glob_ind(npsq*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate glob_ind(npsq*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'glob_ind(npsq*nelemd)', & + file=__FILE__, line=__LINE__) j = 1 do ie = 1, nelemd @@ -2115,16 +2081,12 @@ subroutine set_phis(dyn_in) ! initialize PHIS on physgrid allocate(latvals_phys(fv_nphys*fv_nphys*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate latvals_phys(fv_nphys*fv_nphys*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'latvals_phys(fv_nphys*fv_nphys*nelemd)', & + file=__FILE__, line=__LINE__) allocate(lonvals_phys(fv_nphys*fv_nphys*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate lonvals_phys(fv_nphys*fv_nphys*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'lonvals_phys(fv_nphys*fv_nphys*nelemd)', & + file=__FILE__, line=__LINE__) indx = 1 do ie = 1, nelemd @@ -2138,17 +2100,13 @@ subroutine set_phis(dyn_in) end do allocate(pmask_phys(fv_nphys*fv_nphys*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate pmask_phys(fv_nphys*fv_nphys*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'pmask_phys(fv_nphys*fv_nphys*nelemd)', & + file=__FILE__, line=__LINE__) pmask_phys(:) = .true. allocate(glob_ind(fv_nphys*fv_nphys*nelemd), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate glob_ind(fv_nphys*fv_nphys*nelemd)'//& - ' failed with stat: '//to_str(ierr)) - end if + call check_allocate(ierr, subname, 'glob_ind(fv_nphys*fv_nphys*nelemd)', & + file=__FILE__, line=__LINE__) j = 1 do ie = 1, nelemd @@ -2482,17 +2440,13 @@ subroutine map_phis_from_physgrid_to_gll(fvm,elem,phis_phys_tmp,phis_tmp,pmask) call get_loop_ranges(hybrid, ibeg=nets, iend=nete) allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate '//& - 'fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,1,1,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(fld_gll(np,np,1,1,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_gll(np,np,1,1,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'fld_gll(np,np,1,1,nets:nete)', & + file=__FILE__, line=__LINE__) fld_phys = 0.0_r8 do ie = nets, nete diff --git a/src/dynamics/se/stepon.F90 b/src/dynamics/se/stepon.F90 index e108335a..9ba0cc3a 100644 --- a/src/dynamics/se/stepon.F90 +++ b/src/dynamics/se/stepon.F90 @@ -181,8 +181,7 @@ subroutine diag_dynvar_ic(elem, fvm) !use physconst, only: get_sum_species, get_ps,thermodynamic_active_species_idx !use physconst, only: thermodynamic_active_species_idx_dycore,get_dp_ref use hycoef, only: hyai, hybi, ps0 - use cam_abortutils, only: endrun - use string_utils, only: to_str + use cam_abortutils, only: endrun, check_allocate !SE dycore: use time_mod, only: TimeLevel_Qdp ! dynamics typestep @@ -221,11 +220,8 @@ subroutine diag_dynvar_ic(elem, fvm) call TimeLevel_Qdp(TimeLevel, qsplit, tl_Qdp) allocate(ftmp(npsq,nlev,2), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate ftmp(npsq,nlev,2) failed with stat: '//& - to_str(iret)) - end if - + call check_allocate(iret, subname, 'ftmp(npsq,nlev,2)', & + file=__FILE__, line=__LINE__) !REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #if 0 @@ -313,10 +309,8 @@ subroutine diag_dynvar_ic(elem, fvm) if (hist_fld_active('PS_gll')) then allocate(fld_2d(np,np)) - if (iret /= 0) then - call endrun(subname//': allocate fld_2d(np, np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fld_2d(np, np)', & + file=__FILE__, line=__LINE__) do ie = 1, nelemd call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& @@ -341,12 +335,9 @@ subroutine diag_dynvar_ic(elem, fvm) !REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #if 0 allocate(fld_2d(np,np)) - if (iret /= 0) then - call endrun(subname//': allocate fld_2d(np, np) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'fld_2d(np, np)', & + file=__FILE__, line=__LINE__) - do ie = 1, nelemd call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,tl_f),fld_2d,hyai(1)*ps0) do j = 1, np @@ -359,10 +350,8 @@ subroutine diag_dynvar_ic(elem, fvm) deallocate(fld_2d) if (fv_nphys < 1) then allocate(factor_array(np,np,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate factor_array(np,np,nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'factor_array(np,np,nlev)', & + file=__FILE__, line=__LINE__) end if #endif @@ -401,26 +390,22 @@ subroutine diag_dynvar_ic(elem, fvm) call get_loop_ranges(hybrid, ibeg=nets, iend=nete) allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(fld_gll(np,np,nlev,ntrac,nets:nete), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate fld_gll(np,np,nlev,ntrac,nets:nete)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, & + 'fld_gll(np,np,nlev,ntrac,nets:nete)', & + file=__FILE__, line=__LINE__) allocate(llimiter(ntrac), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate llimiter(ntrac) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'llimiter(ntrac)', & + file=__FILE__, line=__LINE__) allocate(factor_array(nc,nc,nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate factor_array(nc,nc,nlev) failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'factor_array(nc,nc,nlev)', & + file=__FILE__, line=__LINE__) llimiter = .true. do ie = nets, nete diff --git a/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 b/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 index 48183a0a..5a5fd4cc 100644 --- a/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 +++ b/src/dynamics/tests/initial_conditions/ic_baro_dry_jw06.F90 @@ -13,7 +13,7 @@ module ic_baro_dry_jw06 !----------------------------------------------------------------------- use cam_logfile, only: iulog use shr_kind_mod, only: r8 => shr_kind_r8 - use cam_abortutils, only: endrun + use cam_abortutils, only: endrun, check_allocate use spmd_utils, only: masterproc use shr_sys_mod, only: shr_sys_flush @@ -54,7 +54,6 @@ subroutine bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U, V, T, PS, PHIS, & !use constituents, only: cnst_name !use const_init, only: cnst_init_default - use string_utils, only: to_str !Remove once constituents are enabled -JN use physics_types, only: ix_cld_liq, ix_rain @@ -100,10 +99,8 @@ subroutine bc_dry_jw06_set_ic(vcoord, latvals, lonvals, U, V, T, PS, PHIS, & exponent = rair*gamma/gravit allocate(mask_use(size(latvals)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'mask_use(size(latvals))', & + file=__FILE__, line=__LINE__) if (present(mask)) then if (size(mask_use) /= size(mask)) then diff --git a/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 b/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 index d25fc5a0..e18920c9 100644 --- a/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 +++ b/src/dynamics/tests/initial_conditions/ic_held_suarez.F90 @@ -8,7 +8,7 @@ module ic_held_suarez !----------------------------------------------------------------------- use cam_logfile, only: iulog use shr_kind_mod, only: r8 => shr_kind_r8 - use cam_abortutils, only: endrun + use cam_abortutils, only: endrun, check_allocate use spmd_utils, only: masterproc use shr_sys_mod, only: shr_sys_flush @@ -26,7 +26,6 @@ subroutine hs94_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & Q, m_cnst, mask, verbose) !use const_init, only: cnst_init_default !use constituents, only: cnst_name - use string_utils, only: to_str use physics_types, only: ix_cld_liq, ix_rain !Remove once constituents are enabled -JN !----------------------------------------------------------------------- @@ -59,10 +58,8 @@ subroutine hs94_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & character(len=*), parameter :: subname = 'HS94_SET_IC' allocate(mask_use(size(latvals)), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mask_use(size(latvals)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'mask_use(size(latvals))', & + file=__FILE__, line=__LINE__) if (present(mask)) then if (size(mask_use) /= size(mask)) then diff --git a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 index 43198c04..bf627283 100644 --- a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 +++ b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 @@ -18,8 +18,7 @@ module ic_us_standard_atmosphere use std_atm_profile, only: std_atm_pres, std_atm_height, std_atm_temp use cam_logfile, only: iulog -use cam_abortutils, only: endrun -use string_utils, only: to_str +use cam_abortutils, only: endrun, check_allocate implicit none private @@ -69,10 +68,8 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & ncol = size(latvals, 1) allocate(mask_use(ncol), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate mask_use(ncol)) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'mask_use(ncol)', & + file=__FILE__, line=__LINE__) if (present(mask)) then if (size(mask_use) /= size(mask)) then @@ -120,16 +117,12 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & end if nlev = size(T, 2) allocate(pmid(nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate pmid(nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'pmid(nlev)', & + file=__FILE__, line=__LINE__) allocate(zmid(nlev), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate zmid(nlev) failed with stat: '//& - to_str(iret)) - end if + call check_allocate(iret, subname, 'zmid(nlev)', & + file=__FILE__, line=__LINE__) do i = 1, ncol if (mask_use(i)) then From f1fa068fc62b9059d9542a8db3b609c225a32e31 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Tue, 15 Jun 2021 22:37:00 -0600 Subject: [PATCH 24/45] Fix issues caused by rebase and perform minor clean-up. --- cime_config/namelist_definition_cam.xml | 350 +++++++++++------------- src/dynamics/se/dyn_comp.F90 | 11 +- src/utils/cam_field_read.F90 | 42 ++- 3 files changed, 207 insertions(+), 196 deletions(-) diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 3a87048c..48d5a3bf 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -3551,135 +3551,135 @@ UNSET_PATH - atm/cam/inic/cam_vcoords_L26_c180105.nc - atm/cam/inic/cam_vcoords_L30_c180105.nc - atm/cam/inic/cam_vcoords_L32_c180105.nc - atm/cam/inic/fv/cami_0000-01-01_0.23x0.31_L26_c100513.nc - atm/cam/inic/fv/cami_0000-09-01_0.23x0.31_L26_c061106.nc - atm/cam/inic/fv/cami_1980-01-01_0.47x0.63_L26_c071226.nc - atm/cam/inic/fv/cami_0000-09-01_0.47x0.63_L26_c061106.nc - atm/cam/inic/fv/cami_0000-10-01_0.5x0.625_L26_c031204.nc - atm/cam/inic/fv/cami_1987-01-01_0.9x1.25_L26_c060703.nc - atm/cam/inic/fv/cami_0000-09-01_0.9x1.25_L26_c051205.nc - atm/cam/inic/fv/cami_0000-01-01_1.9x2.5_L26_c070408.nc - atm/cam/inic/fv/cami_0000-09-01_1.9x2.5_L26_c040809.nc - atm/cam/inic/fv/cami_0000-01-01_2.5x3.33_L26_c110309.nc - atm/cam/inic/fv/cami_0000-09-01_2.5x3.33_L26_c091007.nc - atm/cam/inic/fv/cami_0001-01-01_4x5_L26_c060608.nc - atm/cam/inic/fv/cami_0000-01-01_10x15_L26_c030918.nc - atm/cam/inic/fv/cami-mam3_0000-01-01_0.23x0.31_L30_c110527.nc - atm/cam/inic/fv/cami-mam3_0000-01-01_0.47x0.63_L30_c100929.nc - atm/cam/inic/fv/cami-mam3_0000-01-01_0.9x1.25_L30_c100618.nc - atm/cam/inic/fv/cami-mam3_0000-01-01_1.9x2.5_L30_c090306.nc - atm/cam/inic/fv/cami_0000-09-01_1.9x2.5_L30_c070109.nc - atm/cam/inic/fv/cami_0000-01-01_2.5x3.33_L30_c110309.nc - atm/cam/inic/fv/cami_0000-09-01_2.5x3.33_L30_c100831.nc - atm/cam/inic/fv/cami_0000-01-01_4x5_L30_c090108.nc - atm/cam/inic/fv/cami_0000-01-01_10x15_L30_c081013.nc - atm/cam/inic/fv/cami-mam3_0000-01-01_0.9x1.25_L32_c141031.nc - atm/cam/inic/fv/cami-mam3_0000-01-01_1.9x2.5_L32_c150407.nc - atm/cam/inic/fv/cami-mam4_0000-01-01_10x15_L32_c170914.nc - atm/cam/inic/fv/cami_0000-01-01_0.47x0.63_L26_APE_c080227.nc - atm/cam/inic/fv/aqua_0006-01-01_0.9x1.25_L26_c161020.nc - atm/cam/inic/fv/aqua_0006-01-01_1.9x2.5_L26_c161020.nc - atm/cam/inic/fv/aqua_0000-01-01_10x15_L26_c161230.nc - atm/cam/inic/fv/aqua_0006-01-01_0.9x1.25_L30_c161020.nc - atm/cam/inic/fv/aqua_0006-01-01_1.9x2.5_L30_c161020.nc - atm/cam/inic/fv/aqua_0000-01-01_10x15_L30_c170103.nc - atm/cam/inic/fv/aqua_0006-01-01_0.9x1.25_L32_c161020.nc - atm/cam/inic/fv/aqua_0006-01-01_1.9x2.5_L32_c161020.nc - atm/cam/inic/fv/aqua_0000-01-01_10x15_L32_c170103.nc - atm/cam/inic/fv/cami-chem_1990-01-01_0.9x1.25_L30_c080724.nc - atm/cam/inic/fv/cami-chem_1990-01-01_1.9x2.5_L26_c080114.nc - atm/cam/inic/fv/cami-chem_1990-01-01_1.9x2.5_L30_c080215.nc - atm/cam/inic/fv/camchemi_0012-01-01_10x15_L26_c081104.nc - atm/cam/inic/fv/camchemi_0012-01-01_10x15_L30_c081104.nc - atm/cam/inic/fv/camchemi_0012-01-01_4x5_L26_c081104.nc - atm/cam/inic/fv/camchemi_0012-01-01_4x5_L30_c081104.nc - atm/cam/inic/fv/camchemi_0012-01-01_1.9x2.5_L26_c081104.nc - atm/cam/inic/fv/camchemi_0012-01-01_1.9x2.5_L30_c081104.nc - atm/cam/inic/fv/trop_strat_mam3_chem_2000-01-01_10x15_L30_c121015.nc - atm/cam/inic/fv/trop_strat_mam3_chem_2000-01-01_4x5_L30_c121015.nc - atm/cam/inic/fv/trop_strat_mam3_chem_2000-01-01_1.9x2.5_L30_c121015.nc - atm/cam/chem/trop_mozart/ic/cami_0000-09-01_4x5_L26_c060217.nc - atm/cam/chem/trop_mozart/ic/cami_0000-09-01_10x15_L26_c060216.nc - atm/waccm/ic/cami_2000-02-01_0.9x1.25_L66_c040928.nc - atm/waccm/ic/cami_2000-07-01_1.9x2.5_L66_c040928.nc - atm/waccm/ic/FWT2000_f09_spinup01.cam.i.0001-01-02-00000_c160315.nc - atm/waccm/ic/f2000.waccm-mam3_1.9x2.5_L70.cam2.i.0017-01-01.c120410.nc - atm/waccm/ic/aqua.cam6.waccmsc_1.9x2.5_L70.2000-01-01.c170123.nc - atm/waccm/ic/aqua.waccm_tsmlt_1.9x2.5_L70_c170814.nc - atm/waccm/ic/f2000.waccm-mam3_4x5_L70.cam2.i.0017-01-01.c121113.nc - atm/waccm/ic/f2000.waccm-mam3_10x15_L70.cam2.i.0017-01-01.c141016.nc - atm/waccm/ic/b1850.waccm-mam3_1.9x2.5_L70.cam2.i.0156-01-01.c120523.nc - atm/waccm/ic/cami_2000-05-01_1.9x2.5_L103_c040928.nc - atm/waccm/ic/wa3_4x5_1950_spinup.cam2.i.1960-01-01-00000.nc - atm/waccm/ic/cami_2000-01-01_10x15_L66_c041121.nc - atm/waccm/ic/f40.2000.4deg.wcm.carma.sulf.004.cam2.i.0008-01-01-00000.nc - atm/waccm/ic/f40.2deg.wcm.carma.sulf.L66.cam2.i.2010-01-01.nc - atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_L81_c110906.nc - atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_4x5_L81_c160630.nc - atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_10x15_L81_c141027.nc - atm/waccm/ic/waccmx_aqua_4x5_L126_c170705.nc - atm/waccm/ic/fx2000_0.9x1.25_126lev_0002-01-01-00000_c181221.nc - atm/waccm/ic/wcmx-cam6-phys_1.9x2.5_130lev_2000_c181115.nc - atm/waccm/ic/wcmx-cam6-phys_0.9x1.25_130lev_2000_c190122.nc - atm/waccm/ic/FC6X2000_f05_spinup01.cam.i.0002-01-01-00000_c190711.nc - atm/waccm/ic/waccmx_mam4_aqua_4x5_L130_c180803.nc - atm/waccm/ic/waccmx_mam4_aqua_1.9x2.5_L130_c180803.nc - atm/cam/inic/gaus/T341clim01.cam2.i.0024-01-01-00000.nc - atm/cam/inic/gaus/cami_0000-01-01_256x512_L26_c030918.nc - atm/cam/inic/gaus/cami_0000-01-01_128x256_L26_c030918.nc - atm/cam/inic/gaus/cami_0000-09-01_128x256_L26_c040422.nc - atm/cam/inic/gaus/cami_0000-01-01_64x128_T42_L26_c031110.nc - atm/cam/inic/gaus/cami_0000-09-01_64x128_L26_c030918.nc - atm/cam/inic/gaus/cami_0000-01-01_64x128_L30_c090102.nc - atm/cam/inic/gaus/cami_0000-09-01_64x128_L30_c031210.nc - atm/cam/inic/gaus/cami_0000-01-01_64x128_L32_c170510.nc - atm/cam/inic/gaus/cami_0000-01-01_48x96_L26_c091218.nc - atm/cam/inic/gaus/cami_0000-09-01_48x96_L26_c040420.nc - atm/cam/inic/gaus/cami_0000-01-01_48x96_L30_c100426.nc - atm/cam/inic/gaus/cami_0000-09-01_32x64_L26_c030918.nc - atm/cam/inic/gaus/cami_0000-01-01_32x64_L30_c090107.nc - atm/cam/inic/gaus/cami_0000-01-01_8x16_L26_c030228.nc - atm/cam/inic/gaus/cami_0000-09-01_8x16_L26_c030918.nc - atm/cam/inic/gaus/cami_0000-01-01_8x16_L30_c090102.nc - atm/cam/inic/homme/cami-mam3_0000-01_ne5np4_L30.140707.nc - atm/cam/inic/se/ape_topo_cam4_ne16np4_L26_c171020.nc - atm/cam/inic/se/ape_topo_cam4_ne16np4_L30_c171020.nc - atm/cam/inic/se/ape_topo_cam4_ne16np4_L32_c171020.nc - atm/cam/inic/se/ape_topo_cam4_ne30np4_L26_c171020.nc - atm/cam/inic/se/ape_topo_cam4_ne30np4_L30_c171020.nc - atm/cam/inic/se/ape_topo_cam6_ne30np4_L32_c171023.nc - atm/cam/inic/se/ape_topo_cam4_ne60np4_L26_c171018.nc - atm/cam/inic/se/ape_topo_cam4_ne60np4_L30_c171020.nc - atm/cam/inic/se/ape_topo_cam4_ne60np4_L32_c171020.nc - atm/cam/inic/se/ape_topo_cam4_ne120np4_L26_c171018.nc - atm/cam/inic/se/ape_topo_cam4_ne120np4_L30_c171024.nc - atm/cam/inic/se/ape_topo_cam4_ne120np4_L32_c171023.nc - atm/cam/inic/homme/cami_1850-01-01_ne240np4_L26_c110314.nc - atm/cam/inic/homme/cami_0000-09-01_ne240np4_L26_c061106.nc - atm/cam/inic/homme/cami-mam3_0000-01-ne240np4_L30_c111004.nc - atm/cam/inic/se/ape_cam4_ne5np4_L26_c170517.nc - atm/cam/inic/se/ape_cam4_ne16np4_L26_c170417.nc - atm/cam/inic/se/ape_cam4_ne30np4_L26_c170417.nc - atm/cam/inic/se/ape_cam4_ne60np4_L26_c171023.nc - atm/cam/inic/se/ape_cam4_ne120np4_L26_c170419.nc - atm/cam/inic/se/ape_cam4_ne240np4_L26_c170613.nc - atm/cam/inic/se/ape_cam5_ne5np4_L30_c170517.nc - atm/cam/inic/se/ape_cam5_ne16np4_L30_c170417.nc - atm/cam/inic/se/ape_cam5_ne30np4_L30_c170417.nc - atm/cam/inic/se/ape_cam5_ne120np4_L30_c170419.nc - atm/cam/inic/se/ape_cam6_ne5np4_L32_c170517.nc - atm/cam/inic/se/ape_cam6_ne16np4_L32_c170509.nc - atm/cam/inic/se/ape_cam6_ne30np4_L32_c170509.nc - atm/cam/inic/se/ape_cam6_ne120np4_L32_c170908.nc - atm/cam/inic/se/ape_cam6_ne240np4_L32_c170908.nc - atm/cam/inic/se/f2000_conus_ne30x8_L32_c190712.nc - atm/waccm/ic/wa3_ne5np4_1950_spinup.cam2.i.1960-01-01-00000_c150810.nc - atm/waccm/ic/waccm5_1850_ne30np4_L70_0001-01-11-00000_c151217.nc - atm/waccm/ic/fw2000_ne30np4_L70_c181221.nc - atm/cam/inic/gaus/cami_0000-09-01_64x128_L30_c031210.nc + $DIN_LOC_ROOT/atm/cam/inic/cam_vcoords_L26_c180105.nc + $DIN_LOC_ROOT/atm/cam/inic/cam_vcoords_L30_c180105.nc + $DIN_LOC_ROOT/atm/cam/inic/cam_vcoords_L32_c180105.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_0.23x0.31_L26_c100513.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_0.23x0.31_L26_c061106.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_1980-01-01_0.47x0.63_L26_c071226.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_0.47x0.63_L26_c061106.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-10-01_0.5x0.625_L26_c031204.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_1987-01-01_0.9x1.25_L26_c060703.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_0.9x1.25_L26_c051205.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_1.9x2.5_L26_c070408.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_1.9x2.5_L26_c040809.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_2.5x3.33_L26_c110309.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_2.5x3.33_L26_c091007.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0001-01-01_4x5_L26_c060608.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_10x15_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam3_0000-01-01_0.23x0.31_L30_c110527.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam3_0000-01-01_0.47x0.63_L30_c100929.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam3_0000-01-01_0.9x1.25_L30_c100618.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam3_0000-01-01_1.9x2.5_L30_c090306.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_1.9x2.5_L30_c070109.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_2.5x3.33_L30_c110309.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-09-01_2.5x3.33_L30_c100831.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_4x5_L30_c090108.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_10x15_L30_c081013.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam3_0000-01-01_0.9x1.25_L32_c141031.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam3_0000-01-01_1.9x2.5_L32_c150407.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-mam4_0000-01-01_10x15_L32_c170914.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami_0000-01-01_0.47x0.63_L26_APE_c080227.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0006-01-01_0.9x1.25_L26_c161020.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0006-01-01_1.9x2.5_L26_c161020.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0000-01-01_10x15_L26_c161230.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0006-01-01_0.9x1.25_L30_c161020.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0006-01-01_1.9x2.5_L30_c161020.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0000-01-01_10x15_L30_c170103.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0006-01-01_0.9x1.25_L32_c161020.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0006-01-01_1.9x2.5_L32_c161020.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/aqua_0000-01-01_10x15_L32_c170103.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-chem_1990-01-01_0.9x1.25_L30_c080724.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-chem_1990-01-01_1.9x2.5_L26_c080114.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/cami-chem_1990-01-01_1.9x2.5_L30_c080215.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/camchemi_0012-01-01_10x15_L26_c081104.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/camchemi_0012-01-01_10x15_L30_c081104.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/camchemi_0012-01-01_4x5_L26_c081104.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/camchemi_0012-01-01_4x5_L30_c081104.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/camchemi_0012-01-01_1.9x2.5_L26_c081104.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/camchemi_0012-01-01_1.9x2.5_L30_c081104.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/trop_strat_mam3_chem_2000-01-01_10x15_L30_c121015.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/trop_strat_mam3_chem_2000-01-01_4x5_L30_c121015.nc + $DIN_LOC_ROOT/atm/cam/inic/fv/trop_strat_mam3_chem_2000-01-01_1.9x2.5_L30_c121015.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/ic/cami_0000-09-01_4x5_L26_c060217.nc + $DIN_LOC_ROOT/atm/cam/chem/trop_mozart/ic/cami_0000-09-01_10x15_L26_c060216.nc + $DIN_LOC_ROOT/atm/waccm/ic/cami_2000-02-01_0.9x1.25_L66_c040928.nc + $DIN_LOC_ROOT/atm/waccm/ic/cami_2000-07-01_1.9x2.5_L66_c040928.nc + $DIN_LOC_ROOT/atm/waccm/ic/FWT2000_f09_spinup01.cam.i.0001-01-02-00000_c160315.nc + $DIN_LOC_ROOT/atm/waccm/ic/f2000.waccm-mam3_1.9x2.5_L70.cam2.i.0017-01-01.c120410.nc + $DIN_LOC_ROOT/atm/waccm/ic/aqua.cam6.waccmsc_1.9x2.5_L70.2000-01-01.c170123.nc + $DIN_LOC_ROOT/atm/waccm/ic/aqua.waccm_tsmlt_1.9x2.5_L70_c170814.nc + $DIN_LOC_ROOT/atm/waccm/ic/f2000.waccm-mam3_4x5_L70.cam2.i.0017-01-01.c121113.nc + $DIN_LOC_ROOT/atm/waccm/ic/f2000.waccm-mam3_10x15_L70.cam2.i.0017-01-01.c141016.nc + $DIN_LOC_ROOT/atm/waccm/ic/b1850.waccm-mam3_1.9x2.5_L70.cam2.i.0156-01-01.c120523.nc + $DIN_LOC_ROOT/atm/waccm/ic/cami_2000-05-01_1.9x2.5_L103_c040928.nc + $DIN_LOC_ROOT/atm/waccm/ic/wa3_4x5_1950_spinup.cam2.i.1960-01-01-00000.nc + $DIN_LOC_ROOT/atm/waccm/ic/cami_2000-01-01_10x15_L66_c041121.nc + $DIN_LOC_ROOT/atm/waccm/ic/f40.2000.4deg.wcm.carma.sulf.004.cam2.i.0008-01-01-00000.nc + $DIN_LOC_ROOT/atm/waccm/ic/f40.2deg.wcm.carma.sulf.L66.cam2.i.2010-01-01.nc + $DIN_LOC_ROOT/atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_L81_c110906.nc + $DIN_LOC_ROOT/atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_4x5_L81_c160630.nc + $DIN_LOC_ROOT/atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_10x15_L81_c141027.nc + $DIN_LOC_ROOT/atm/waccm/ic/waccmx_aqua_4x5_L126_c170705.nc + $DIN_LOC_ROOT/atm/waccm/ic/fx2000_0.9x1.25_126lev_0002-01-01-00000_c181221.nc + $DIN_LOC_ROOT/atm/waccm/ic/wcmx-cam6-phys_1.9x2.5_130lev_2000_c181115.nc + $DIN_LOC_ROOT/atm/waccm/ic/wcmx-cam6-phys_0.9x1.25_130lev_2000_c190122.nc + $DIN_LOC_ROOT/atm/waccm/ic/FC6X2000_f05_spinup01.cam.i.0002-01-01-00000_c190711.nc + $DIN_LOC_ROOT/atm/waccm/ic/waccmx_mam4_aqua_4x5_L130_c180803.nc + $DIN_LOC_ROOT/atm/waccm/ic/waccmx_mam4_aqua_1.9x2.5_L130_c180803.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/T341clim01.cam2.i.0024-01-01-00000.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_256x512_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_128x256_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_128x256_L26_c040422.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_64x128_T42_L26_c031110.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_64x128_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_64x128_L30_c090102.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_64x128_L30_c031210.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_64x128_L32_c170510.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_48x96_L26_c091218.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_48x96_L26_c040420.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_48x96_L30_c100426.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_32x64_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_32x64_L30_c090107.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_8x16_L26_c030228.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_8x16_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-01-01_8x16_L30_c090102.nc + $DIN_LOC_ROOT/atm/cam/inic/homme/cami-mam3_0000-01_ne5np4_L30.140707.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne16np4_L26_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne16np4_L30_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne16np4_L32_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne30np4_L26_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne30np4_L30_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam6_ne30np4_L32_c171023.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne60np4_L26_c171018.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne60np4_L30_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne60np4_L32_c171020.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne120np4_L26_c171018.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne120np4_L30_c171024.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_topo_cam4_ne120np4_L32_c171023.nc + $DIN_LOC_ROOT/atm/cam/inic/homme/cami_1850-01-01_ne240np4_L26_c110314.nc + $DIN_LOC_ROOT/atm/cam/inic/homme/cami_0000-09-01_ne240np4_L26_c061106.nc + $DIN_LOC_ROOT/atm/cam/inic/homme/cami-mam3_0000-01-ne240np4_L30_c111004.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam4_ne5np4_L26_c170517.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam4_ne16np4_L26_c170417.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam4_ne30np4_L26_c170417.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam4_ne60np4_L26_c171023.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam4_ne120np4_L26_c170419.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam4_ne240np4_L26_c170613.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam5_ne5np4_L30_c170517.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam5_ne16np4_L30_c170417.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam5_ne30np4_L30_c170417.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam5_ne120np4_L30_c170419.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam6_ne5np4_L32_c170517.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam6_ne16np4_L32_c170509.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam6_ne30np4_L32_c170509.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam6_ne120np4_L32_c170908.nc + $DIN_LOC_ROOT/atm/cam/inic/se/ape_cam6_ne240np4_L32_c170908.nc + $DIN_LOC_ROOT/atm/cam/inic/se/f2000_conus_ne30x8_L32_c190712.nc + $DIN_LOC_ROOT/atm/waccm/ic/wa3_ne5np4_1950_spinup.cam2.i.1960-01-01-00000_c150810.nc + $DIN_LOC_ROOT/atm/waccm/ic/waccm5_1850_ne30np4_L70_0001-01-11-00000_c151217.nc + $DIN_LOC_ROOT/atm/waccm/ic/fw2000_ne30np4_L70_c181221.nc + $DIN_LOC_ROOT/atm/cam/inic/gaus/cami_0000-09-01_64x128_L30_c031210.nc @@ -8153,62 +8153,44 @@ UNSET_PATH UNSET_PATH - UNSET_PATH UNSET_PATH - atm/cam/topo/topo-from-cami_0000-01-01_256x512_L26_c030918.nc - atm/cam/topo/USGS-gtopo30_128x256_c050520.nc - atm/cam/topo/T42_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20180111.nc - atm/cam/topo/USGS-gtopo30_48x96_c050520.nc - atm/cam/topo/USGS-gtopo30_32x64_c050520.nc - atm/cam/topo/USGS-gtopo30_8x16_c050520.nc - atm/cam/topo/USGS_gtopo30_0.23x0.31_remap_c061107.nc - atm/cam/topo/USGS_gtopo30_0.47x0.63_remap_c061106.nc - atm/cam/topo/fv_0.47x0.63_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171023.nc - atm/cam/topo/topo-from-cami_0000-10-01_0.5x0.625_L26_c031204.nc - atm/cam/topo/fv_0.9x1.25_nc3000_Nsw042_Nrs008_Co060_Fi001_ZR_sgh30_24km_GRNL_c170103.nc - atm/cam/topo/fv_1.9x2.5_nc3000_Nsw084_Nrs016_Co120_Fi001_ZR_GRNL_c190405.nc - atm/cam/topo/USGS-gtopo30_2.5x3.33_remap_c100204.nc - atm/cam/topo/USGS-gtopo30_4x5_remap_c050520.nc - atm/cam/topo/fv_10x15_nc0540_Nsw042_Nrs008_Co060_Fi001_20171220.nc - atm/cam/topo/se/ne5np4_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw064_20170515.nc - atm/cam/topo/se/ne16np4_nc3000_Co120_Fi001_PF_nullRR_Nsw084_20171012.nc - atm/cam/topo/se/ne30np4_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171020.nc - atm/cam/topo/se/ne60np4_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171012.nc - atm/cam/topo/se/ne120np4_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171011.nc - atm/cam/topo/se/ne240np4_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171014.nc - atm/cam/topo/se/ne5pg2_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw060_20170706.nc - atm/cam/topo/se/ne30pg2_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc - atm/cam/topo/se/ne60pg2_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171014.nc - atm/cam/topo/se/ne120pg2_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171012.nc - atm/cam/topo/se/ne240pg2_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171014.nc - atm/cam/topo/se/ne5pg3_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw064_20170516.nc - atm/cam/topo/se/ne16pg3_nc3000_Co120_Fi001_PF_nullRR_Nsw084_20171012.nc - atm/cam/topo/se/ne30pg3_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc - atm/cam/topo/se/ne60pg3_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171012.nc - atm/cam/topo/se/ne120pg3_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171014.nc - atm/cam/topo/se/ne240pg3_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171015.nc - atm/cam/topo/se/ne5pg4_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw060_20170707.nc - atm/cam/topo/se/ne30pg4_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc - atm/cam/topo/se/ne60pg4_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171018.nc - atm/cam/topo/se/ne120pg4_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171014.nc - atm/cam/topo/se/ne30x8_conus_nc3000_Co060_Fi001_MulG_PF_nullRR_Nsw042_20190710.nc - - - - logical - topo - cam_initfiles_nl - - Setting use_topo_file=.false. allows the user to specify that PHIS, SGH, - SGH30, and LANDM_COSLAT are all zero without having to supply a topo file - full of zeros. - Default: TRUE - - - .true. - .false. - .false. - .false. + $DIN_LOC_ROOT/atm/cam/topo/topo-from-cami_0000-01-01_256x512_L26_c030918.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_128x256_c050520.nc + $DIN_LOC_ROOT/atm/cam/topo/T42_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20180111.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_48x96_c050520.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_32x64_c050520.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_8x16_c050520.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS_gtopo30_0.23x0.31_remap_c061107.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS_gtopo30_0.47x0.63_remap_c061106.nc + $DIN_LOC_ROOT/atm/cam/topo/fv_0.47x0.63_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171023.nc + $DIN_LOC_ROOT/atm/cam/topo/topo-from-cami_0000-10-01_0.5x0.625_L26_c031204.nc + $DIN_LOC_ROOT/atm/cam/topo/fv_0.9x1.25_nc3000_Nsw042_Nrs008_Co060_Fi001_ZR_sgh30_24km_GRNL_c170103.nc + $DIN_LOC_ROOT/atm/cam/topo/fv_1.9x2.5_nc3000_Nsw084_Nrs016_Co120_Fi001_ZR_GRNL_c190405.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_2.5x3.33_remap_c100204.nc + $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_4x5_remap_c050520.nc + $DIN_LOC_ROOT/atm/cam/topo/fv_10x15_nc0540_Nsw042_Nrs008_Co060_Fi001_20171220.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne5np4_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw064_20170515.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne16np4_nc3000_Co120_Fi001_PF_nullRR_Nsw084_20171012.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne30np4_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171020.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne60np4_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171012.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne120np4_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171011.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne240np4_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne5pg2_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw060_20170706.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne30pg2_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne60pg2_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne120pg2_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171012.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne240pg2_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne5pg3_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw064_20170516.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne16pg3_nc3000_Co120_Fi001_PF_nullRR_Nsw084_20171012.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne30pg3_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne60pg3_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171012.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne120pg3_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne240pg3_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171015.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne5pg4_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw060_20170707.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne30pg4_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne60pg4_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171018.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne120pg4_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171014.nc + $DIN_LOC_ROOT/atm/cam/topo/se/ne30x8_conus_nc3000_Co060_Fi001_MulG_PF_nullRR_Nsw042_20190710.nc diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 33f74e9e..aa3972a9 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -27,7 +27,6 @@ module dyn_comp !use cam_history_support, only: max_fieldname_len use time_manager, only: get_step_size -!use ncdio_atm, only: infld use cam_field_read, only: cam_read_field use pio, only: file_desc_t, pio_seterrorhandling, PIO_BCAST_ERROR, & @@ -2337,8 +2336,6 @@ subroutine read_dyn_field_2d(fieldname, fh, dimname, buffer) !---------------------------------------------------------------------------- buffer = 0.0_r8 -! call infld(trim(fieldname), fh, dimname, 1, npsq, 1, nelemd, buffer, & -! found, gridname='GLL') !Remove if below works! -JN call cam_read_field(trim(fieldname), fh, buffer, found, & gridname=ini_grid_name, fillvalue=fillvalue) if(.not. found) then @@ -2369,10 +2366,8 @@ subroutine read_dyn_field_3d(fieldname, fh, dimname, buffer) !---------------------------------------------------------------------------- buffer = 0.0_r8 -! call infld(trim(fieldname), fh, dimname, 'lev', 1, npsq, 1, nlev, & -! 1, nelemd, buffer, found, gridname='GLL') !Remove if below works! -JN - call cam_read_field(trim(fieldname), fh, buffer, found, 'lev', (/1, nlev/), & - dim3_pos=2, gridname=ini_grid_name, fillvalue=fillvalue) + call cam_read_field(trim(fieldname), fh, buffer, found, 'lev', (/1, nlev/), & + dim3_pos=2, gridname=ini_grid_name, fillvalue=fillvalue) if(.not. found) then call endrun('READ_DYN_FIELD_3D: Could not find '//trim(fieldname)//' field on input datafile') end if @@ -2399,8 +2394,6 @@ subroutine read_phys_field_2d(fieldname, fh, dimname, buffer) logical :: found !---------------------------------------------------------------------------- -! call infld(trim(fieldname), fh, dimname, 1, fv_nphys**2, 1, nelemd, buffer, & -! found, gridname='physgrid_d') !Remove if below works! -JN call cam_read_field(trim(fieldname), fh, buffer, found, gridname='physgrid_d') if(.not. found) then call endrun('READ_PHYS_FIELD_2D: Could not find '//trim(fieldname)//' field on input datafile') diff --git a/src/utils/cam_field_read.F90 b/src/utils/cam_field_read.F90 index af8dcdde..828d4698 100644 --- a/src/utils/cam_field_read.F90 +++ b/src/utils/cam_field_read.F90 @@ -178,7 +178,7 @@ end function num_target_dims ! ROUTINE: infld_real8_1d ! subroutine infld_real8_1d(varname, ncid, field, readvar, gridname, & - timelevel, log_output) + timelevel, log_output, fillvalue) ! ! infld_real8_1d: ! Netcdf I/O of 8-byte real field from netCDF file @@ -204,6 +204,8 @@ subroutine infld_real8_1d(varname, ncid, field, readvar, gridname, & character(len=*), optional, intent(in) :: gridname integer, optional, intent(in) :: timelevel logical, optional, intent(in) :: log_output + ! fillvalue: Provides the fill value as specified in the input file + real(r8), optional, intent(out) :: fillvalue ! ! LOCAL VARIABLES: type(io_desc_t), pointer :: iodesc @@ -374,6 +376,16 @@ subroutine infld_real8_1d(varname, ncid, field, readvar, gridname, & call pio_read_darray(ncid, varid, iodesc, field, ierr) end if + ! Acquire fill value for variable if requested + if (present(fillvalue)) then + ierr = cam_pio_inq_var_fill(ncid, varid, fillvalue) + !End run if PIO error occurred: + if (ierr /= PIO_NOERR) then + write(errormsg, *) subname//': cam_pio_inq_var_fill failed with PIO error: ', ierr + call safe_endrun(errormsg) + end if + end if + if (masterproc .and. log_read_field) then write(iulog,*) subname//': read field '//trim(varname) end if @@ -388,7 +400,7 @@ end subroutine infld_real8_1d ! ROUTINE: infld_real8_2d ! subroutine infld_real8_2d(varname, ncid, field, readvar, gridname, & - timelevel, dim3name, dim3_bnds, log_output) + timelevel, dim3name, dim3_bnds, log_output, fillvalue) ! ! infld_real8_2d: ! Netcdf I/O of 8-byte real field from netCDF file @@ -417,6 +429,8 @@ subroutine infld_real8_2d(varname, ncid, field, readvar, gridname, & ! dim3_bnds: Bounds of vertical dimension, if field is 3D integer, optional, intent(in) :: dim3_bnds(2) logical, optional, intent(in) :: log_output + ! fillvalue: Provides the fill value as specified in the input file + real(r8), optional, intent(out) :: fillvalue ! ! LOCAL VARIABLES: type(io_desc_t), pointer :: iodesc @@ -630,6 +644,16 @@ subroutine infld_real8_2d(varname, ncid, field, readvar, gridname, & nullify(iodesc) ! Cached by cam_pio_utils end if + ! Acquire fill value for variable if requested + if (present(fillvalue)) then + ierr = cam_pio_inq_var_fill(ncid, varid, fillvalue) + !End run if PIO error occurred: + if (ierr /= PIO_NOERR) then + write(errormsg, *) subname//': cam_pio_inq_var_fill failed with PIO error: ', ierr + call safe_endrun(errormsg) + end if + end if + if (masterproc .and. log_read_field) then write(iulog,*) subname//': read field '//trim(varname) end if @@ -646,7 +670,7 @@ end subroutine infld_real8_2d ! ROUTINE: infld_real8_3d ! subroutine infld_real8_3d(varname, ncid, field, readvar, dim3name, & - dim3_bnds, dim3_pos, gridname, timelevel, log_output) + dim3_bnds, dim3_pos, gridname, timelevel, log_output, fillvalue) ! ! infld_real8_3d: ! Netcdf I/O of 8-byte real field from netCDF file @@ -676,6 +700,8 @@ subroutine infld_real8_3d(varname, ncid, field, readvar, dim3name, & character(len=*), optional, intent(in) :: gridname integer, optional, intent(in) :: timelevel logical, optional, intent(in) :: log_output + ! fillvalue: Provides the fill value as specified in the input file + real(r8), optional, intent(out) :: fillvalue ! ! LOCAL VARIABLES: type(io_desc_t), pointer :: iodesc @@ -879,6 +905,16 @@ subroutine infld_real8_3d(varname, ncid, field, readvar, dim3name, & call pio_read_darray(ncid, varid, iodesc, field, ierr) end if + ! Acquire fill value for variable if requested + if (present(fillvalue)) then + ierr = cam_pio_inq_var_fill(ncid, varid, fillvalue) + !End run if PIO error occurred: + if (ierr /= PIO_NOERR) then + write(errormsg, *) subname//': cam_pio_inq_var_fill failed with PIO error: ', ierr + call safe_endrun(errormsg) + end if + end if + if (masterproc .and. log_read_field) then write(iulog,*) subname//': read field '//trim(varname) end if From 6c16f1ff4d55c4e5a71c4a0f77216216d663e155 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 16 Jun 2021 05:34:43 -0600 Subject: [PATCH 25/45] Fix namelist bugs found using CSLAM. --- cime_config/namelist_definition_cam.xml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 48d5a3bf..3ddada34 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -8152,8 +8152,10 @@ UNSET_PATH - UNSET_PATH UNSET_PATH + UNSET_PATH + UNSET_PATH + UNSET_PATH $DIN_LOC_ROOT/atm/cam/topo/topo-from-cami_0000-01-01_256x512_L26_c030918.nc $DIN_LOC_ROOT/atm/cam/topo/USGS-gtopo30_128x256_c050520.nc $DIN_LOC_ROOT/atm/cam/topo/T42_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20180111.nc @@ -11401,6 +11403,9 @@ 0 + 2 + 3 + 4 @@ -11644,9 +11649,6 @@ 0 - 2 - 3 - 4 From cc059772ebf04a775d8d02d0e3d22ecc3e3924cd Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 16 Jun 2021 09:56:04 -0600 Subject: [PATCH 26/45] Fix unit tests. --- cime_config/cam_autogen.py | 9 +++------ cime_config/cam_config.py | 8 +++++--- test/unit/cam_config_unit_tests.py | 4 +++- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cime_config/cam_autogen.py b/cime_config/cam_autogen.py index 48381ffa..6d721d8a 100644 --- a/cime_config/cam_autogen.py +++ b/cime_config/cam_autogen.py @@ -464,12 +464,9 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n #the host model files list for use by CCPP's capgen: host_files.append(reg_file.file_path) - # Convert preproc defs to string: - preproc_cache_str = ', '.join(preproc_defs) - if os.path.exists(genccpp_dir): do_gen_ccpp = force or build_cache.ccpp_mismatch(sdfs, scheme_files, - preproc_cache_str, + preproc_defs, kind_phys) else: os.makedirs(genccpp_dir) @@ -484,7 +481,7 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n _LOGGER.debug(" host files: %s", ", ".join(host_files)) _LOGGER.debug(" scheme files: %s", ', '.join(scheme_files)) _LOGGER.debug(" suite definition files: %s", ', '.join(sdfs)) - _LOGGER.debug(" preproc defs: %s", preproc_cache_str) + _LOGGER.debug(" preproc defs: %s", ', '.join(preproc_defs)) _LOGGER.debug(" output directory: '%s'", genccpp_dir) _LOGGER.debug(" kind_phys: '%s'", kind_phys) @@ -495,7 +492,7 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n host_name, kind_phys, force_overwrite, _LOGGER) # save build details in the build cache - build_cache.update_ccpp(sdfs, scheme_files, preproc_cache_str, kind_phys) + build_cache.update_ccpp(sdfs, scheme_files, preproc_defs, kind_phys) ##XXgoldyXX: v Temporary fix: Copy CCPP Framework source code into ##XXgoldyXX: v generated code directory request = DatatableReport("utility_files") diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index f19f4fef..e9c242ba 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -874,10 +874,10 @@ def __init__(self, case, case_log): phys_desc = """\n\ A semi-colon separated list of physics Suite Definition\n\ File (SDF) names. To specify the Kessler and Held-Suarez\n\ - suites as run time options, use '--physics-suites kessler;hs94'.""" + suites as run time options, use '--physics-suites kessler;rhs94'.""" self.create_config("physics_suites", phys_desc, - user_config_opts.physics_suites) + user_config_opts.physics_suites, is_nml_attr=True) #-------------------------------------------------------- # Print CAM configure settings and values to debug logger @@ -1233,7 +1233,9 @@ def __init__(self): "CASEROOT" : "/another/made-up/path", "CAM_CONFIG_OPTS" : "-dyn none --physics-suites adiabatic", "COMP_ROOT_DIR_ATM" : "/a/third/made-up/path", - "CAM_CPPDEFS" : "UNSET" + "CAM_CPPDEFS" : "UNSET", + "NTHRDS_ATM" : 1, + "RUN_STARTDATE" : "101" } def get_value(self, key): diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index b4c8f3a4..92a95003 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -65,7 +65,9 @@ def __init__(self): "CASEROOT" : "/another/made-up/path", "CAM_CONFIG_OPTS" : "-dyn none --physics-suites adiabatic", "COMP_ROOT_DIR_ATM" : "/a/third/made-up/path", - "CAM_CPPDEFS" : "UNSET" + "CAM_CPPDEFS" : "UNSET", + "NTHRDS_ATM" : 1, + "RUN_STARTDATE" : "101" } def get_value(self, key): From 60086d80ffbb43658b7c36f56cfdbaeb477ebb9a Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 16 Jun 2021 12:34:11 -0600 Subject: [PATCH 27/45] Fix issues with null dycore simulation. --- cime_config/buildlib | 5 ++--- cime_config/cam_autogen.py | 14 ++++++++---- src/physics/utils/physics_grid.F90 | 34 +++++++++++++----------------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index bfe6a23a..3923c0fd 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -94,7 +94,6 @@ def _build_cam(): os.path.join(atm_root, "src", "control"), os.path.join(atm_root, "src", "cpl", case.get_value("COMP_INTERFACE")), - os.path.join(atm_root, "src", "dynamics", dycore), os.path.join(atm_root, "src", "dynamics", "utils"), os.path.join(atm_root, "src", "physics", "utils"), os.path.join(atm_root, "src", "utils")] @@ -106,9 +105,9 @@ def _build_cam(): # Add main dynamics directory: dyn_dir = os.path.join(atm_root, "src", "dynamics", dycore) paths.append(dyn_dir) - # Add IC source directories if using a non-null dycore: + # Add IC source directories: + paths.append(os.path.join(atm_root, "src", "dynamics", "tests")) #Required due to namelist call. if dycore != "none": - paths.append(os.path.join(atm_root, "src", "dynamics", "tests")) paths.append(os.path.join(atm_root, "src", "dynamics", "tests", "initial_conditions")) # Add any necessary dycore sub-directories: diff --git a/cime_config/cam_autogen.py b/cime_config/cam_autogen.py index 6d721d8a..43b49f45 100644 --- a/cime_config/cam_autogen.py +++ b/cime_config/cam_autogen.py @@ -464,9 +464,15 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n #the host model files list for use by CCPP's capgen: host_files.append(reg_file.file_path) + # Convert preproc defs to string: + if preproc_defs: + preproc_cache_str = ', '.join(preproc_defs) + else: + preproc_cache_str = 'UNSET' + if os.path.exists(genccpp_dir): do_gen_ccpp = force or build_cache.ccpp_mismatch(sdfs, scheme_files, - preproc_defs, + preproc_cache_str, kind_phys) else: os.makedirs(genccpp_dir) @@ -481,18 +487,18 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n _LOGGER.debug(" host files: %s", ", ".join(host_files)) _LOGGER.debug(" scheme files: %s", ', '.join(scheme_files)) _LOGGER.debug(" suite definition files: %s", ', '.join(sdfs)) - _LOGGER.debug(" preproc defs: %s", ', '.join(preproc_defs)) + _LOGGER.debug(" preproc defs: %s", preproc_cache_str) _LOGGER.debug(" output directory: '%s'", genccpp_dir) _LOGGER.debug(" kind_phys: '%s'", kind_phys) # generate CCPP caps force_overwrite = False capgen(host_files, scheme_files, sdfs, cap_output_file, - preproc_defs, gen_hostcap, gen_docfiles, genccpp_dir, + preproc_cache_str, gen_hostcap, gen_docfiles, genccpp_dir, host_name, kind_phys, force_overwrite, _LOGGER) # save build details in the build cache - build_cache.update_ccpp(sdfs, scheme_files, preproc_defs, kind_phys) + build_cache.update_ccpp(sdfs, scheme_files, preproc_cache_str, kind_phys) ##XXgoldyXX: v Temporary fix: Copy CCPP Framework source code into ##XXgoldyXX: v generated code directory request = DatatableReport("utility_files") diff --git a/src/physics/utils/physics_grid.F90 b/src/physics/utils/physics_grid.F90 index a0af8c72..49936a2a 100644 --- a/src/physics/utils/physics_grid.F90 +++ b/src/physics/utils/physics_grid.F90 @@ -315,26 +315,22 @@ subroutine phys_grid_init(hdim1_d_in, hdim2_d_in, dycore_name_in, & dyn_attributes(index)) end do - if (.not. cam_grid_attr_exists('physgrid', 'area')) then - ! Physgird always needs an area attribute - if (unstructured) then - ! Physgrid always needs an area attribute. If we did not inherit one - ! from the dycore (i.e., physics and dynamics are on different - ! grids), create that attribute here (Note, a separate physics - ! grid is only supported for unstructured grids). - allocate(area_d(columns_on_task), stat=ierr) - call check_allocate(ierr, subname, 'area_d(columns_on_task)', & - file=__FILE__, line=__LINE__) + if ((.not. cam_grid_attr_exists('physgrid', 'area')) .and. & + unstructured) then + ! Physgrid always needs an area attribute. If we did not inherit one + ! from the dycore (i.e., physics and dynamics are on different + ! grids), create that attribute here (Note, a separate physics + ! grid is only supported for unstructured grids). + allocate(area_d(columns_on_task), stat=ierr) + call check_allocate(ierr, subname, 'area_d(columns_on_task)', & + file=__FILE__, line=__LINE__) - do col_index = 1, columns_on_task - area_d(col_index) = phys_columns(col_index)%area - end do - call cam_grid_attribute_register('physgrid', 'area', & - 'physics column areas', 'ncol', area_d, map=grid_map(3,:)) - nullify(area_d) ! Belongs to attribute now - else - call endrun(subname//"No 'area' attribute from dycore") - end if + do col_index = 1, columns_on_task + area_d(col_index) = phys_columns(col_index)%area + end do + call cam_grid_attribute_register('physgrid', 'area', & + 'physics column areas', 'ncol', area_d, map=grid_map(3,:)) + nullify(area_d) ! Belongs to attribute now end if ! Cleanup pointers (they belong to the grid now) nullify(grid_map) From 62b95d4d144fe8fb24b7e1b679e4f88a0ed8cbbc Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 30 Jun 2021 09:41:46 -0600 Subject: [PATCH 28/45] Add modifications requested by reviewers. --- cime_config/buildlib | 26 +-- cime_config/buildnml | 83 +------- cime_config/cam_config.py | 112 +++++++++-- cime_config/namelist_definition_cam.xml | 98 ++++----- src/data/write_init_files.py | 7 +- src/dynamics/se/dyn_comp.F90 | 22 +-- test/unit/cam_config_unit_tests.py | 253 +++++++++++++++++++++++- 7 files changed, 431 insertions(+), 170 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index 3923c0fd..d8d5bd08 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -102,25 +102,29 @@ def _build_cam(): paths.append(path) # End if # End for - # Add main dynamics directory: - dyn_dir = os.path.join(atm_root, "src", "dynamics", dycore) - paths.append(dyn_dir) - # Add IC source directories: + + # Add dynamics source code directories: + for direc in config.get_value("dyn_src_dirs").split(","): + dyn_dir = os.path.join(atm_root, "src", "dynamics") + for subdir in direc.split("/"): + dyn_dir = os.path.join(dyn_dir, subdir) + #Add to list of filepaths if not already present: + if dyn_dir not in paths: + paths.append(dyn_dir) + + # Add analytical IC source code directories: paths.append(os.path.join(atm_root, "src", "dynamics", "tests")) #Required due to namelist call. if dycore != "none": paths.append(os.path.join(atm_root, "src", "dynamics", "tests", "initial_conditions")) - # Add any necessary dycore sub-directories: - for root, direcs, _ in os.walk(dyn_dir): - for direc in direcs: - dyn_subdir = os.path.join(root, direc) - if dyn_subdir not in paths: - paths.append(dyn_subdir) - #If using the CMEPS/NUOPC coupler, then add additional path: + + # If using the CMEPS/NUOPC coupler, then add additional path: if case.get_value("COMP_INTERFACE") == "nuopc": paths.append(os.path.join(__CIMEROOT, "src", "drivers", "nuopc", "nuopc_cap_share")) # End if + + # Write Filepath text file with open(filepath_src, "w") as filepath: filepath.write("\n".join(paths)) filepath.write("\n") diff --git a/cime_config/buildnml b/cime_config/buildnml index d57bc4e2..42c673ba 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -74,92 +74,13 @@ def nml_attr_set(config): #is also a namelist attribute: if conf.is_nml_attr: #If so, then add to attribute dictionary: - if type(conf.value) is str: - #If value is a string, then add directly: - cam_nml_attr_dict[conf_name] = conf.value - else: - #If not, then convert to string before adding: - cam_nml_attr_dict[conf_name] = "{}".format(conf.value) - # End if + cam_nml_attr_dict[conf_name] = str(conf.value) # End if # End for # End if #Return namelist attribute dictionary: return cam_nml_attr_dict -################## - -def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): - - """ - Determines if a user has specified - which CCPP physics suite to use, - assuming there is more than one suite - listed in the 'physics_suites' CAM - configure option. - """ - - #Extract physics suite list: - phys_suites = config.get_value('physics_suites').split(';') - - if len(phys_suites) > 1: - #If more than one physics suite is listed, - #then check the "user_nl_cam" file to see if user - #specified a particular suite to use for this - #simulation: - with open(user_nl_file, 'r') as nl_file: - #Read lines in file: - nl_user_lines = nl_file.readlines() - - #Strip out all comment lines: - real_nl_lines = \ - [line for line in nl_user_lines if line[0] != "!"] - - #Search for "physics_suite" line: - phys_suite_lines = \ - [line for line in real_nl_lines if "physics_suite" in line] - - #If there is no "physics_suite" line, then throw an error: - if not phys_suite_lines: - emsg = "No 'physics_suite' variable is present in user_nl_cam.\n \ - This is required if more than one suite is listed\n \ - in CAM_CONFIG_OPTS." - raise CamBuildnmlError(emsg) - - #If there is more than one "physics_suite" line, then also throw an error: - if len(phys_suite_lines) > 1: - emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n \ - Only one 'physics_suite' line is allowed." - raise CamBuildnmlError(emsg) - - #Extract string from list: - phys_suite_line = phys_suite_lines[0] - - #Search for equals (=) sign in "physics_suite" string: - eq_sign_idx = phys_suite_line.find("=") - - if eq_sign_idx > 0: - #Assume all text left of equals sign is the physics suite name: - phys_suite_val = phys_suite_line[(eq_sign_idx+1):].strip() - else: - #Syntax is bad, so raise an error: - emsg = "No equals (=) sign was found with the 'physics_suite' variable." - raise CamBuildnmlError(emsg) - - #Check that physics suite specified is actually in config list: - if phys_suite_val not in phys_suites: - emsg = "physics_suite specified in user_nl_cam doesn't match any suites\n \ - listed in CAM_CONFIG_OPTS" - raise CamBuildnmlError(emsg) - - else: - #If only physics suite is listed, then - #just use that one: - phys_suite_val = phys_suites[0] - - #Add new namelist attribute to dictionary: - cam_nml_attr_dict["phys_suite"] = phys_suite_val - ################# #PRIMARY FUNCTION ################# @@ -348,7 +269,7 @@ def buildnml(case, caseroot, compname): #----------------------------------------------------------- # Find user-chosen CCPP physics suite, and set as an attribute: - ccpp_phys_set(config, cam_nml_dict, user_nl_file) + config.ccpp_phys_set(cam_nml_dict, user_nl_file) #-------------------------------- # Create CIME namelist input file: diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index e9c242ba..032bc0ba 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -672,6 +672,12 @@ def __init__(self, case, case_log): # Cam horizontal grid meta-data hgrid_desc = "Horizontal grid specifier." + # dynamics package source directories meta-data + dyn_dirs_desc = "Comma-seperated list of local directories containing" \ + " dynamics package source code.\n" \ + "These directories are assumed to be located under" \ + " src/dynamics, with a backslah ('/') indicating directory hierarchy." + # Create regex expressions to search for the different dynamics grids eul_grid_re = re.compile(r"T[0-9]+") # Eulerian dycore fv_grid_re = re.compile(r"[0-9][0-9.]*x[0-9][0-9.]*") # FV dycore @@ -698,9 +704,12 @@ def __init__(self, case, case_log): self.create_config("hgrid", hgrid_desc, atm_grid, se_grid_re, is_nml_attr=True) + # Source code directories + self.create_config("dyn_src_dirs", dyn_dirs_desc, "se,se/dycore") + # Add SE namelist groups to nmlgen list self.__nml_groups.append("air_composition_nl") - self.__nml_groups.append("dyn_se_inparm") + self.__nml_groups.append("dyn_se_nl") # Add required CPP definitons: self.add_cppdef("_MPI") @@ -754,6 +763,9 @@ def __init__(self, case, case_log): self.create_config("hgrid", hgrid_desc, atm_grid, None, is_nml_attr=True) + # Source code directories + self.create_config("dyn_src_dirs", dyn_dirs_desc, "none") + else: emsg = "ERROR: The specified CAM horizontal grid, '{}', " emsg += "does not match any known format." @@ -787,19 +799,23 @@ def __init__(self, case, case_log): #Set horizontal dimension variables: if dyn == "se": - # Extract cubed-sphere grid values from hgrid string: - csne_re = re.search(r"ne[0-9]+", atm_grid) - csne_val = int(csne_re.group()[2:]) - csnp_re = re.search(r"np[0-9]+", atm_grid) - csnp_val = int(csnp_re.group()[2:]) + # Determine location of "np" in atm_grid string: + np_idx = atm_grid.find("np") + + #Determine location of "pg" in atm_grid string: + pg_idx = atm_grid.find(".pg") - # Extract number of CSLAM physics grid points, if available: - npg_re = re.search(r"pg[1-9]+", atm_grid) - if npg_re: - npg_val = int(npg_re.group()[2:]) + # Extract cubed-sphere grid values from atm_grid/hgrid string: + # Note that the string always starts with "ne". + + csne_val = int(atm_grid[2:np_idx]) + if pg_idx > -1: + csnp_val = int(atm_grid[np_idx+2:pg_idx]) + npg_val = int(atm_grid[pg_idx+3:]) else: - npg_val = 0 #No CSLAM grid points + csnp_val = int(atm_grid[np_idx+2:]) + npg_val = 0 # Add number of elements along edge of cubed-sphere grid csne_desc = "Number of elements along one edge of a cubed sphere grid." @@ -877,7 +893,7 @@ def __init__(self, case, case_log): suites as run time options, use '--physics-suites kessler;rhs94'.""" self.create_config("physics_suites", phys_desc, - user_config_opts.physics_suites, is_nml_attr=True) + user_config_opts.physics_suites) #-------------------------------------------------------- # Print CAM configure settings and values to debug logger @@ -1196,6 +1212,78 @@ def generate_cam_src(self, gen_fort_indent): #-------------------------------------------------------------- build_cache.write() + #++++++++++++++++++++++++ + + def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): + + """ + Determine if a user has specified which + CCPP physics suite to use in the namelist, + assuming there is more than one suite + listed in the 'physics_suites' CAM + configure option. + """ + + #Extract physics suite list: + phys_suites = config.get_value('physics_suites').split(';') + + if len(phys_suites) > 1: + #If more than one physics suite is listed, + #then check the "user_nl_cam" file to see if user + #specified a particular suite to use for this + #simulation: + with open(user_nl_file, 'r') as nl_file: + #Read lines in file: + nl_user_lines = nl_file.readlines() + + #Break out "physics_suite" lines: + phys_suite_lines = \ + [[x.strip() for x in line.split('=')] \ + for line in nl_user_lines if line[0] != "!" and 'physics_suite' in line] + + #If there is no "physics_suite" line, then throw an error: + if not phys_suite_lines: + emsg = "No 'physics_suite' variable is present in user_nl_cam.\n \ + This is required if more than one suite is listed\n \ + in CAM_CONFIG_OPTS." + raise CamConfigValError(emsg) + + #If there is more than one "physics_suite" entry, then throw an error: + if len(phys_suite_lines) > 1: + emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n \ + Only one 'physics_suite' line is allowed." + raise CamConfigValError(emsg) + + #The split string list exists inside another, otherwise empty list, so extract + #from empty list: + phys_suite_list = phys_suite_lines[0] + + if len(phys_suite_list) == 1: + #If there is only one string entry, then it means the equals (=) sign was never found: + emsg = "No equals (=) sign was found with the 'physics_suite' variable." + raise CamConfigValError(emsg) + elif len(phys_suite_list) > 2: + #If there is more than two entries, it means there were two or more equals signs: + emsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." + raise CamConfigValError(emsg) + + #Remove quotation marks around physics_suite entry, if any: + phys_suite_val = phys_suite_list[1].strip(''' "' ''') + + #Check that physics suite specified is actually in config list: + if phys_suite_val not in phys_suites: + emsg = "physics_suite specified in user_nl_cam doesn't match any suites\n \ + listed in CAM_CONFIG_OPTS" + raise CamConfigValError(emsg) + + else: + #If only a single physics suite is listed, then just use that one: + phys_suite_val = phys_suites[0] + + #Add new namelist attribute to dictionary: + cam_nml_attr_dict["phys_suite"] = phys_suite_val + + ############################################################################### #IGNORE EVERYTHING BELOW HERE UNLESS RUNNING TESTS ON CAM_CONFIG! ############################################################################### diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 3ddada34..5e066df2 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -10819,7 +10819,7 @@ integer se - dyn_se_inparm + dyn_se_nl Set for refined exodus meshes (variable viscosity). Viscosity in namelist specified for regions with a resolution equivilant @@ -10827,14 +10827,14 @@ Default: -1 (not used) - 0 + -1 120 integer se - dyn_se_inparm + dyn_se_nl 0,1,2 CAM physics forcing option: @@ -10849,7 +10849,7 @@ real se - dyn_se_inparm + dyn_se_nl Scalar viscosity with variable coefficient. Use variable hyperviscosity based on element area limited by @@ -10862,7 +10862,7 @@ real se - dyn_se_inparm + dyn_se_nl Use tensor hyperviscosity. Citation: Guba, O., Taylor, M. A., Ullrich, P. A., Overfelt, J. R., and @@ -10880,7 +10880,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of hyperviscosity subcycles per dynamics timestep. @@ -10892,7 +10892,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of hyperviscosity subcycles per dynamics timestep in sponge del2 diffusion. @@ -10904,7 +10904,7 @@ integer se - dyn_se_inparm + dyn_se_nl Variable to specify the vertical index at which the Rayleigh friction term is centered (the peak value). @@ -10917,7 +10917,7 @@ real se - dyn_se_inparm + dyn_se_nl Rayleigh friction parameter to determine the width of the profile. If set to 0 then a width is chosen by the algorithm (see rayleigh_friction.F90). @@ -10931,7 +10931,7 @@ real se - dyn_se_inparm + dyn_se_nl Rayleigh friction parameter to determine the approximate value of the decay time (days) at model top. If 0.0 then no Rayleigh friction is applied. @@ -10944,7 +10944,7 @@ real se - dyn_se_inparm + dyn_se_nl Used by SE dycore to apply sponge layer diffusion to u, v, and T for stability of WACCM configurations. The diffusion is modeled on 3D molecular @@ -10966,7 +10966,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of hyperviscosity subcycles done in tracer advection code. @@ -10978,7 +10978,7 @@ integer se - dyn_se_inparm + dyn_se_nl 0,4,8 Limiter used for horizontal tracer advection: @@ -10993,7 +10993,7 @@ real se - dyn_se_inparm + dyn_se_nl Upper bound for Courant number, used to limit se_hypervis_power. Default: 1.0e99 (i.e., not used) unless se_refined_mesh=TRUE @@ -11006,7 +11006,7 @@ char*256 se - dyn_se_inparm + dyn_se_nl Filename of exodus file to read grid from (generated by CUBIT or SQuadGen). @@ -11019,7 +11019,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of elements along a cube edge. Must match value of grid. Set this to zero to use a refined mesh. @@ -11038,19 +11038,19 @@ integer se - dyn_se_inparm + dyn_se_nl Number of PEs to be used by SE dycore. - Default: 0 = Number of PEs used by CAM. + Default: -1 = Number of PEs used by CAM. - 0 + -1 integer se - dyn_se_inparm + dyn_se_nl Number of dynamics steps per physics timestep. @@ -11069,7 +11069,7 @@ integer se - dyn_se_inparm + dyn_se_nl 0,1,2 Scaling of temperature increment for different levels of @@ -11085,7 +11085,7 @@ real se - dyn_se_inparm + dyn_se_nl Hyperviscosity coefficient for u,v, T [m^4/s]. If < 0, se_nu is automatically set. @@ -11098,7 +11098,7 @@ real se - dyn_se_inparm + dyn_se_nl Hyperviscosity applied to divergence component of winds [m^4/s]. If < 0, uses se_nu_p. @@ -11111,7 +11111,7 @@ real se - dyn_se_inparm + dyn_se_nl Hyperviscosity coefficient applied to pressure-level thickness [m^4/s]. If < 0, se_nu_p is automatically set. @@ -11124,7 +11124,7 @@ real se - dyn_se_inparm + dyn_se_nl Second-order viscosity applied only near the model top [m^2/s]. @@ -11139,7 +11139,7 @@ logical se - dyn_se_inparm + dyn_se_nl Hyperscosity for T and dp is applied to (T-Tref) and (dp-dp_ref) where Xref are reference states where the effect of topography has been removed @@ -11157,7 +11157,7 @@ logical se - dyn_se_inparm + dyn_se_nl If TRUE the continous equations the dynamical core is based on will conserve a comprehensive moist total energy @@ -11173,7 +11173,7 @@ logical se - dyn_se_inparm + dyn_se_nl If TRUE the CSLAM algorithm will work for Courant number larger than 1 with a low-order increment for tracer mass more than one grid cell width away @@ -11185,7 +11185,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of SE rsplit time-steps CSLAM supercycles rsplit/se_fvm_supercycling must be an integer @@ -11197,7 +11197,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of SE rsplit time-steps CSLAM supercycles in the jet region (which is specified by kmin_jet and kmax_jet) @@ -11211,7 +11211,7 @@ integer se - dyn_se_inparm + dyn_se_nl Min level index where CSLAM runs with se_fvm_supercycling_jet (if se_fvm_supercycling_jet.ne.se_fvm_supercycling) or @@ -11225,7 +11225,7 @@ integer se - dyn_se_inparm + dyn_se_nl Max level index where CSLAM runs with se_fvm_supercycling_jet (if se_fvm_supercycling_jet.ne.se_fvm_supercycling) or @@ -11239,7 +11239,7 @@ integer se - dyn_se_inparm + dyn_se_nl Tracer advection is done every qsplit dynamics timesteps. @@ -11250,7 +11250,7 @@ logical se - dyn_se_inparm + dyn_se_nl TRUE specified use of a refined grid (mesh) for this run. @@ -11263,7 +11263,7 @@ integer se - dyn_se_inparm + dyn_se_nl Vertically lagrangian code vertically remaps every rsplit tracer timesteps. @@ -11276,7 +11276,7 @@ integer se - dyn_se_inparm + dyn_se_nl Frequency with which diagnostic output is written to log (output every statefreq dynamics timesteps). @@ -11288,7 +11288,7 @@ integer se - dyn_se_inparm + dyn_se_nl Time stepping method for SE dycore se_tstep_type=1 RK2 followed by qsplit-1 Leapfrog steps; second-order accurate in time (CESM1.2.0 setting) @@ -11303,7 +11303,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of tracers to include in logfile diagnostics for SE dycore @@ -11314,7 +11314,7 @@ char*32 se - dyn_se_inparm + dyn_se_nl CAM-SE vertical remapping of temperature: @@ -11330,7 +11330,7 @@ char*32 se - dyn_se_inparm + dyn_se_nl CAM-SE vertical remap algorithm for u,v,T, and water species: @@ -11354,7 +11354,7 @@ char*32 se - dyn_se_inparm + dyn_se_nl CAM-SE vertical remap algorithm for non-water tracers: @@ -11377,7 +11377,7 @@ logical se - dyn_se_inparm + dyn_se_nl Set .true. to allow writing SE dynamics fields to the restart file using the unstructured grid format. This allows the restart file to be used as an @@ -11393,7 +11393,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of equally-spaced horizontal physics points per spectral element. A number greater than zero will define [se_fv_nphys] equally @@ -11413,7 +11413,7 @@ char*16 se - dyn_se_inparm + dyn_se_nl no,SCRIP If 'SCRIP', write a NetCDF file with the grid in SCRIP format. @@ -11428,7 +11428,7 @@ char*256 se - dyn_se_inparm + dyn_se_nl Name of grid file to write if se_write_grid_file is set. Default: Set according to active grid @@ -11437,7 +11437,7 @@ logical se - dyn_se_inparm + dyn_se_nl Set to true to write the SEMapping.nc file. @@ -11459,7 +11459,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of threads to use for loops over elements. @@ -11470,7 +11470,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of threads to use when processing vertical loops. Normally equal to se_tracer_num_threads. @@ -11482,7 +11482,7 @@ integer se - dyn_se_inparm + dyn_se_nl Number of threads to use when processing loops over threads. Normally equal to se_vert_num_threads. diff --git a/src/data/write_init_files.py b/src/data/write_init_files.py index 6affb8a6..cde03f40 100644 --- a/src/data/write_init_files.py +++ b/src/data/write_init_files.py @@ -147,7 +147,7 @@ def write_init_files(files, outdir, indent, cap_datafile, logger, #Add error-message to logger, and return with non-zero retmsg: logger.error(emsg) - retmsg = "Required CCPPP physics variables missing from host model." + retmsg = "Required CCPP physics variables missing from host model." return retmsg # end if # end if @@ -272,9 +272,6 @@ def write_init_files(files, outdir, indent, cap_datafile, logger, #Write physics_check_data subroutine: write_phys_check_subroutine(outfile, fort_data, phys_check_fname_str) - #End module: - #outfile.write("\nend module {}".format(phys_input_fname_str), 0) - # end if #-------------------------------------- #Return retmsg: @@ -1800,7 +1797,7 @@ def write_phys_check_subroutine(outfile, fort_data, phys_check_fname_str): #Write dummy variable declarations: outfile.write("", 0) outfile.write("! Dummy arguments", 2) - outfile.write("character(len=SHR_KIND_CL), intent(in) :: file_name", 2) + outfile.write("character(len=SHR_KIND_CL), intent(in) :: file_name", 2) outfile.write("character(len=SHR_KIND_CS) :: suite_names(:) !Names of CCPP suites", 2) outfile.write("integer, intent(in) :: timestep", 2) outfile.write("real(kind_phys), intent(in) :: min_difference", 2) diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index aa3972a9..48387eda 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -182,7 +182,7 @@ subroutine dyn_readnl(NLFileName) integer :: se_rayk0 real(r8) :: se_molecular_diff - namelist /dyn_se_inparm/ & + namelist /dyn_se_nl/ & se_fine_ne, & ! For refined meshes se_ftype, & ! forcing type se_statediag_numtrac, & @@ -232,7 +232,7 @@ subroutine dyn_readnl(NLFileName) !-------------------------------------------------------------------------- - ! defaults for variables not set by build-namelist + ! defaults for variables if not set by the namelist se_fine_ne = -1 se_hypervis_power = 0 se_hypervis_scaling = 0 @@ -240,17 +240,17 @@ subroutine dyn_readnl(NLFileName) se_mesh_file = '' se_write_restart_unstruct = .false. - ! Read the namelist (dyn_se_inparm) + ! Read the namelist (dyn_se_nl) call MPI_barrier(mpicom, ierr) if (masterproc) then - write(iulog, *) "dyn_readnl: reading dyn_se_inparm namelist..." + write(iulog, *) "dyn_readnl: reading dyn_se_nl namelist..." unitn = shr_file_getunit() open( unitn, file=trim(NLFileName), status='old' ) - call find_group_name(unitn, 'dyn_se_inparm', status=ierr) + call find_group_name(unitn, 'dyn_se_nl', status=ierr) if (ierr == 0) then - read(unitn, dyn_se_inparm, iostat=ierr) + read(unitn, dyn_se_nl, iostat=ierr) if (ierr /= 0) then - call endrun('dyn_readnl: ERROR reading dyn_se_inparm namelist') + call endrun('dyn_readnl: ERROR reading dyn_se_nl namelist') end if end if close(unitn) @@ -305,13 +305,13 @@ subroutine dyn_readnl(NLFileName) call MPI_bcast(se_raytau0, 1, mpi_real8, masterprocid, mpicom, ierr) call MPI_bcast(se_molecular_diff, 1, mpi_real8, masterprocid, mpicom, ierr) - ! If se_npes is set to zero, then make it match host model: - if (se_npes == 0) then + ! If se_npes is set to negative one, then make it match host model: + if (se_npes == -1) then se_npes = npes else ! Check that se_npes is a positive integer: - if (se_npes < 0) then - call endrun('dyn_readnl: ERROR: se_npes must be >= 0') + if (se_npes <= 0) then + call endrun('dyn_readnl: ERROR: se_npes must either be > 0 or exactly -1') end if end if diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index 92a95003..60685897 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -63,7 +63,7 @@ def __init__(self): "COMP_ATM" : "cam", "EXEROOT" : "/some/made-up/path", "CASEROOT" : "/another/made-up/path", - "CAM_CONFIG_OPTS" : "-dyn none --physics-suites adiabatic", + "CAM_CONFIG_OPTS" : "-dyn none --physics-suites adiabatic;kessler", "COMP_ROOT_DIR_ATM" : "/a/third/made-up/path", "CAM_CPPDEFS" : "UNSET", "NTHRDS_ATM" : 1, @@ -288,6 +288,257 @@ def test_config_gen_cam_src_ccpp_check(self): #Check that error message matches what's expected: self.assertEqual(ermsg, str(valerr.exception)) + #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check that "ccpp_phys_set" works as expected with one physics suite entry + #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_check_single_suite(self): + + """ + Check that "ccpp_phys_set" works as expected + when given a correctly formatted namelist file + and a single physics suite in the config object. + """ + + #Save "physics_suites" value: + cam_config_suites_orig = self.test_config_cam.get_value("physics_suites") + + + #Set "new" physics_suites value with one physics suite: + self.test_config_cam.set_value("physics_suites", "kessler") + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + f.write('physics_suite = "adiabatic"\n') + + #Run ccpp_phys_set config method: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that dictonary entries are correct: + self.assertEqual(cam_nml_attr_dict["phys_suite"], "kessler") + + #Remove text file: + os.remove("test.txt") + + #Set physics_suites back to its original value: + self.test_config_cam.set_value("physics_suites", cam_config_suites_orig) + + + #++++++++++++++++++++++++++++++++++++++++++++ + #Check that "ccpp_phys_set" works as expected + #++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_check_multi_suite(self): + + """ + Check that "ccpp_phys_set" works as expected + when given a correctly formatted namelist file + and multiple physics suites in the config object. + """ + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + f.write('physics_suite = "adiabatic"\n') + + #Run ccpp_phys_set config method: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that dictonary entries are correct: + self.assertEqual(cam_nml_attr_dict["phys_suite"], "adiabatic") + + #Remove text file: + os.remove("test.txt") + + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check "ccpp_phys_set" missing "physics_suite" error-handling + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_missing_phys(self): + + """ + Check that "ccpp_phys_set" throws the proper + error if there is more than one CCPP suite and the + "physics_suite" namelist variable is missing. + """ + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Set error message: + ermsg = "No 'physics_suite' variable is present in user_nl_cam.\n \ + This is required if more than one suite is listed\n \ + in CAM_CONFIG_OPTS." + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + + #Expect "CamConfigValError": + with self.assertRaises(CamConfigValError) as valerr: + #Run ccpp_phys_set config method, which should fail + #due to missing "physics_suite" namelist variable: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) + + #Remove text file: + os.remove("test.txt") + + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check "ccpp_phys_set" missing equals-sign error-handling + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_two_phys(self): + + """ + Check that "ccpp_phys_set" throws the proper + error if there is more than one CCPP suite and + more than one "physics_suite" namelist variable. + """ + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Set error message: + ermsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n \ + Only one 'physics_suite' line is allowed." + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + f.write('physics_suite = "adiabatic"\n') + f.write('physics_suite = "kessler"\n') + + #Expect "CamConfigValError": + with self.assertRaises(CamConfigValError) as valerr: + #Run ccpp_phys_set config method, which should fail + #due to missing "physics_suite" namelist variable: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) + + #Remove text file: + os.remove("test.txt") + + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check "ccpp_phys_set" missing equals-sign error-handling + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_missing_equals(self): + + """ + Check that "ccpp_phys_set" throws the proper + error if there is a missing equals (=) sign + after the "physics_suite" namelist variable. + """ + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Set error message: + ermsg = "No equals (=) sign was found with the 'physics_suite' variable." + + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + f.write('physics_suite "adiabatic"\n') + + #Expect "CamConfigValError": + with self.assertRaises(CamConfigValError) as valerr: + #Run ccpp_phys_set config method, which should fail + #due to missing "physics_suite" namelist variable: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) + + #Remove text file: + os.remove("test.txt") + + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check "ccpp_phys_set" multiple equals-signs error-handling + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_two_equals(self): + + """ + Check that "ccpp_phys_set" throws the proper + error if there is more than one equals (=) sign + after the "physics_suite" namelist variable. + """ + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Set error message: + ermsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + f.write('physics_suite == "adiabatic"\n') + + #Expect "CamConfigValError": + with self.assertRaises(CamConfigValError) as valerr: + #Run ccpp_phys_set config method, which should fail + #due to missing "physics_suite" namelist variable: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) + + #Remove text file: + os.remove("test.txt") + + #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check "ccpp_phys_set" non-matching physics_suite error-handling + #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + def test_config_ccpp_phys_set_no_physics_suite_match(self): + + """ + Check that "ccpp_phys_set" throws the proper + error if the "physics_suite" namelist variable + value doesn't match any of the options listed + in "CAM_CONFIG_OPTS". + """ + + #Create namelist attribute dictionary: + cam_nml_attr_dict = dict() + + #Set error message: + ermsg = "physics_suite specified in user_nl_cam doesn't match any suites\n \ + listed in CAM_CONFIG_OPTS" + + #Create namelist file: + with open("test.txt", "w") as f: + f.write('!Namelist test file\n') + f.write('physics_suite = "cam6"\n') + + #Expect "CamConfigValError": + with self.assertRaises(CamConfigValError) as valerr: + #Run ccpp_phys_set config method, which should fail + #due to missing "physics_suite" namelist variable: + self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") + + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) + + #Remove text file: + os.remove("test.txt") + + ################################################# #Run unit tests if this script is called directly ################################################# From ae73cc64950a1d3c4455b28b24ec434473c5d6f2 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 30 Jun 2021 09:47:51 -0600 Subject: [PATCH 29/45] Fix unit test failures. --- cime_config/buildlib | 12 ++++----- test/unit/cam_config_unit_tests.py | 42 +++++++++++++++--------------- test/unit/write_init_unit_tests.py | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index d8d5bd08..41a0b68b 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -105,12 +105,12 @@ def _build_cam(): # Add dynamics source code directories: for direc in config.get_value("dyn_src_dirs").split(","): - dyn_dir = os.path.join(atm_root, "src", "dynamics") - for subdir in direc.split("/"): - dyn_dir = os.path.join(dyn_dir, subdir) - #Add to list of filepaths if not already present: - if dyn_dir not in paths: - paths.append(dyn_dir) + dyn_dir = os.path.join(atm_root, "src", "dynamics") + for subdir in direc.split("/"): + dyn_dir = os.path.join(dyn_dir, subdir) + #Add to list of filepaths if not already present: + if dyn_dir not in paths: + paths.append(dyn_dir) # Add analytical IC source code directories: paths.append(os.path.join(atm_root, "src", "dynamics", "tests")) #Required due to namelist call. diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index 60685897..f98b29fe 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -311,9 +311,9 @@ def test_config_ccpp_phys_set_check_single_suite(self): cam_nml_attr_dict = dict() #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') - f.write('physics_suite = "adiabatic"\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') + test_fil.write('physics_suite = "adiabatic"\n') #Run ccpp_phys_set config method: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") @@ -344,9 +344,9 @@ def test_config_ccpp_phys_set_check_multi_suite(self): cam_nml_attr_dict = dict() #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') - f.write('physics_suite = "adiabatic"\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') + test_fil.write('physics_suite = "adiabatic"\n') #Run ccpp_phys_set config method: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") @@ -378,8 +378,8 @@ def test_config_ccpp_phys_set_missing_phys(self): in CAM_CONFIG_OPTS." #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: @@ -413,10 +413,10 @@ def test_config_ccpp_phys_set_two_phys(self): Only one 'physics_suite' line is allowed." #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') - f.write('physics_suite = "adiabatic"\n') - f.write('physics_suite = "kessler"\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') + test_fil.write('physics_suite = "adiabatic"\n') + test_fil.write('physics_suite = "kessler"\n') #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: @@ -450,9 +450,9 @@ def test_config_ccpp_phys_set_missing_equals(self): #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') - f.write('physics_suite "adiabatic"\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') + test_fil.write('physics_suite "adiabatic"\n') #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: @@ -485,9 +485,9 @@ def test_config_ccpp_phys_set_two_equals(self): ermsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') - f.write('physics_suite == "adiabatic"\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') + test_fil.write('physics_suite == "adiabatic"\n') #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: @@ -522,9 +522,9 @@ def test_config_ccpp_phys_set_no_physics_suite_match(self): listed in CAM_CONFIG_OPTS" #Create namelist file: - with open("test.txt", "w") as f: - f.write('!Namelist test file\n') - f.write('physics_suite = "cam6"\n') + with open("test.txt", "w") as test_fil: + test_fil.write('!Namelist test file\n') + test_fil.write('physics_suite = "cam6"\n') #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: diff --git a/test/unit/write_init_unit_tests.py b/test/unit/write_init_unit_tests.py index 2cfd66ea..7312e37d 100644 --- a/test/unit/write_init_unit_tests.py +++ b/test/unit/write_init_unit_tests.py @@ -378,7 +378,7 @@ def test_missing_var_write_init(self): # Check return message: amsg = "Test failure: retmsg={}".format(retmsg) self.assertEqual(retmsg, - "Required CCPPP physics variables missing from host model.", + "Required CCPP physics variables missing from host model.", msg=amsg) # Make sure no output file was created: From 16cffc5e5f5abe6c591ae601c57518df6995514c Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 30 Jun 2021 10:56:35 -0600 Subject: [PATCH 30/45] Fix NAG debug error (Github issue #136). --- src/utils/cam_abortutils.F90 | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/utils/cam_abortutils.F90 b/src/utils/cam_abortutils.F90 index f3a2dd81..7fb89cfc 100644 --- a/src/utils/cam_abortutils.F90 +++ b/src/utils/cam_abortutils.F90 @@ -104,6 +104,7 @@ subroutine cam_register_close_file(file, log_shutdown_in) type(open_file_pointer), pointer :: of_prev character(len=msg_len) :: log_shutdown character(len=*), parameter :: subname = 'cam_register_close_file' + logical :: file_loop_var nullify(of_prev) ! Are we going to log shutdown events? @@ -114,7 +115,16 @@ subroutine cam_register_close_file(file, log_shutdown_in) end if ! Look to see if we have this file of_ptr => open_files_head - do while (associated(of_ptr) .and. associated(of_ptr%file_desc)) + + !Set while-loop control variable + file_loop_var = .false. + if (associated(of_ptr)) then + if(associated(of_ptr%file_desc)) then + file_loop_var = .true. + end if + end if + + do while (file_loop_var) if (file%fh == of_ptr%file_desc%fh) then ! Remove this file from the list if (associated(of_prev)) then @@ -139,6 +149,15 @@ subroutine cam_register_close_file(file, log_shutdown_in) of_prev => of_ptr of_ptr => of_ptr%next end if + !Check if loop needs to continue + if (.not.associated(of_ptr)) then + file_loop_var = .false. + else + if(.not.associated(of_ptr%file_desc)) then + file_loop_var = .false. + end if + end if + end do end subroutine cam_register_close_file From 117987715abd01c77ac73ef23253893573da9d2d Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 15 Jul 2021 20:14:43 -0600 Subject: [PATCH 31/45] Add additional code reviewer requests. --- cime_config/buildnml | 21 +++- cime_config/config_component.xml | 1 + cime_config/namelist_definition_cam.xml | 8 +- src/data/physconst.F90 | 151 +++++++++--------------- src/dynamics/se/dyn_comp.F90 | 2 +- src/utils/cam_map_utils.F90 | 2 +- 6 files changed, 84 insertions(+), 101 deletions(-) diff --git a/cime_config/buildnml b/cime_config/buildnml index 42c673ba..540c7ccb 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -280,9 +280,24 @@ def buildnml(case, caseroot, compname): # Convert namelist input file to list: namelist_infile_list = [namelist_infile] - #----------------------------- - # Initialize namelist defaults: - #----------------------------- + #------------------------------------------------- + # Initialize only "vert_coord_nl" namelist entries + #------------------------------------------------- + + # Initalize vert_coord_nl defaults: + nmlgen.init_defaults(namelist_infile_list, cam_nml_dict, + skip_groups=filter(lambda group: group !='vert_coord_nl', + config.nml_groups)) + + #-------------------------------------------- + # Set "nlev" namelist attribute to equal pver + #-------------------------------------------- + + cam_nml_dict["nlev"] = nmlgen.get_value("pver") + + #--------------------------------- + # Initialize all namelist defaults: + #--------------------------------- # Initalize namelist defaults in used namelist groups: nmlgen.init_defaults(namelist_infile_list, cam_nml_dict) diff --git a/cime_config/config_component.xml b/cime_config/config_component.xml index 9873d45b..9a5831f3 100644 --- a/cime_config/config_component.xml +++ b/cime_config/config_component.xml @@ -165,6 +165,7 @@ -phys held_suarez -phys kessler -chem terminator -analytic_ic --> --physics-suites kessler --analytic_ic + --physics-suites held_suarez_1994 --analytic_ic --dyn none --physics-suites adiabatic diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index 5e066df2..67c3907e 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -7214,9 +7214,15 @@ Number of model vertical levels. Default: 30 + + Please note that changing this variable + will likely change other namelist variables + via the "nlev" attribute. - 30 + 30 + 26 + 32 diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index be87fccc..bfd653d6 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -21,7 +21,7 @@ module physconst use shr_const_mod, only: shr_const_cpice use vert_coord, only: pver, pverp use physics_grid, only: pcols => columns_on_task - use cam_abortutils, only: endrun + use cam_abortutils, only: endrun, check_allocate use string_utils, only: to_str use constituents, only: pcnst @@ -383,7 +383,7 @@ subroutine physconst_readnl(nlfile) dry_air_species_num = 0 water_species_in_air_num = 0 do i = 1, num_names_max - if (.not. LEN(TRIM(dry_air_species(i)))==0) then + if ((LEN_TRIM(dry_air_species(i)) > 0) .and. (TRIM(dry_air_species(i)) /= 'N2')) then dry_air_species_num = dry_air_species_num + 1 end if if (.not. LEN(TRIM(water_species_in_air(i)))==0) then @@ -452,46 +452,32 @@ subroutine physconst_init(pcols, pver, pverp) ! Allocate constituent dependent properties !------------------------------------------------------------------------ allocate(cpairv(pcols,pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate cpairv(pcols,pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'cpairv(pcols,pver)', & + file=__FILE__, line=__LINE__) allocate(rairv(pcols,pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate rairv(pcols,pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'rairv(pcols,pver)', & + file=__FILE__, line=__LINE__) allocate(cappav(pcols,pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate cappav(pcols,pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'cappav(pcols,pver)', & + file=__FILE__, line=__LINE__) allocate(mbarv(pcols,pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate mbarv(pcols,pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'mbarv(pcols,pver)', & + file=__FILE__, line=__LINE__) allocate(zvirv(pcols,pver), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate zvirv(pcols,pver) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'zvirv(pcols,pver)', & + file=__FILE__, line=__LINE__) allocate(kmvis(pcols,pverp), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate kmvis(pcols,pverp) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'kmvis(pcols,pverp)', & + file=__FILE__, line=__LINE__) allocate(kmcnd(pcols,pverp), stat=ierr) - if (ierr /= 0) then - call endrun(subname//': allocate kmcnd(pcols,pverp) failed with stat: '//& - to_str(ierr)) - end if + call check_allocate(ierr, subname, 'kmcnd(pcols,pverp)', & + file=__FILE__, line=__LINE__) !------------------------------------------------------------------------ ! Initialize constituent dependent properties @@ -556,53 +542,37 @@ subroutine composition_init() i = dry_air_species_num+water_species_in_air_num allocate(thermodynamic_active_species_idx(i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_idx(i)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'thermodynamic_active_species_idx(i)', & + file=__FILE__, line=__LINE__) allocate(thermodynamic_active_species_idx_dycore(i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_idx_dycore(i)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'thermodynamic_active_species_idx_dycore(i)', & + file=__FILE__, line=__LINE__) allocate(thermodynamic_active_species_cp(0:i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_cp(0:i)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'thermodynamic_active_species_cp(0:i)', & + file=__FILE__, line=__LINE__) allocate(thermodynamic_active_species_cv(0:i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_cv(0:i)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'thermodynamic_active_species_cv(0:i)', & + file=__FILE__, line=__LINE__) allocate(thermodynamic_active_species_R(0:i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_R(0:i)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'thermodynamic_active_species_R(0:i)', & + file=__FILE__, line=__LINE__) i = dry_air_species_num - allocate(thermodynamic_active_species_mwi(i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_mwi(i)'//& - ' failed with stat: '//to_str(iret)) - end if + allocate(thermodynamic_active_species_mwi(0:i), stat=iret) + call check_allocate(iret, subname, 'thermodynamic_active_species_mwi(0:i)', & + file=__FILE__, line=__LINE__) - allocate(thermodynamic_active_species_kv(i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_kv(i)'//& - ' failed with stat: '//to_str(iret)) - end if + allocate(thermodynamic_active_species_kv(0:i), stat=iret) + call check_allocate(iret, subname, 'thermodynamic_active_species_kv(0:i)', & + file=__FILE__, line=__LINE__) - allocate(thermodynamic_active_species_kc(i), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate thermodynamic_active_species_kc(i)'//& - ' failed with stat: '//to_str(iret)) - end if + allocate(thermodynamic_active_species_kc(0:i), stat=iret) + call check_allocate(iret, subname, 'thermodynamic_active_species_kc(0:i)', & + file=__FILE__, line=__LINE__) thermodynamic_active_species_idx = -999 thermodynamic_active_species_idx_dycore = -999 @@ -627,18 +597,17 @@ subroutine composition_init() ! last major species in dry_air_species is derived from the others and constants associated with it ! are initialized here ! - if (TRIM(dry_air_species(dry_air_species_num))=='N2') then + if (TRIM(dry_air_species(dry_air_species_num+1))=='N2') then ! call cnst_get_ind('N' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then - write(iulog, *) subname//' dry air component not found: ', dry_air_species(dry_air_species_num) + write(iulog, *) subname//' dry air component not found: ', dry_air_species(dry_air_species_num+1) call endrun(subname // ':: dry air component not found') !Un-comment once constituents are enabled -JN: #if 0 else mw = 2.0_kind_phys*cnst_mw(ix) - icnst = dry_air_species_num - thermodynamic_active_species_idx(icnst) = 1!note - this is not used since this tracer value is derived + icnst = 0 ! index for the derived tracer N2 thermodynamic_active_species_cp (icnst) = 0.5_kind_phys*shr_const_rgas*(2._kind_phys+dof2)/mw !N2 thermodynamic_active_species_cv (icnst) = 0.5_kind_phys*shr_const_rgas*dof2/mw !N2 thermodynamic_active_species_R (icnst) = shr_const_rgas/mw @@ -763,7 +732,7 @@ subroutine composition_init() if (i>0) then if (masterproc) then write(iulog, *) "Dry air composition ",TRIM(dry_air_species(i)),& - icnst,thermodynamic_active_species_idx(icnst),& + icnst-1,& thermodynamic_active_species_mwi(icnst),& thermodynamic_active_species_cp(icnst),& thermodynamic_active_species_cv(icnst) @@ -1399,7 +1368,7 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp kmvis(i,j,k) = 0.0_r8 kmcnd(i,j,k) = 0.0_r8 residual = 1.0_r8 - do icnst=1,dry_air_species_num-1 + do icnst=1,dry_air_species_num ispecies = idx_local(icnst) mm = 0.5_r8*(tracer(i,j,k,ispecies)*factor(i,j,k)+tracer(i,j,k-1,ispecies)*factor(i,j,k-1)) kmvis(i,j,k) = kmvis(i,j,k)+thermodynamic_active_species_kv(icnst)* & @@ -1409,7 +1378,7 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp residual = residual - mm end do icnst=dry_air_species_num - ispecies = idx_local(icnst) + icnst=0 ! N2 kmvis(i,j,k) = kmvis(i,j,k)+thermodynamic_active_species_kv(icnst)* & thermodynamic_active_species_mwi(icnst)*residual kmcnd(i,j,k) = kmcnd(i,j,k)+thermodynamic_active_species_kc(icnst)* & @@ -1508,7 +1477,7 @@ subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_spec cp_dry = 0.0_kind_phys residual = 1.0_kind_phys - do nq=1,dry_air_species_num-1 + do nq=1,dry_air_species_num m_cnst = active_species_idx(nq) do k=k0,k1 do j=j0,j1 @@ -1520,7 +1489,7 @@ subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_spec end do end do end do - nq = dry_air_species_num + nq = 0 ! N2 do k=k0,k1 do j=j0,j1 do i = i0,i1 @@ -1563,7 +1532,7 @@ subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_speci R_dry = 0.0_r8 residual = 1.0_r8 - do nq=1,dry_air_species_num-1 + do nq=1,dry_air_species_num m_cnst = active_species_idx_dycore(nq) do k=k0,k1 do j=j0,j1 @@ -1576,9 +1545,9 @@ subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_speci end do end do ! - ! last dry air constituent derived from the others + ! N2 derived from the others ! - nq = dry_air_species_num + nq = 0 do k=k0,k1 do j=j0,j1 do i = i0,i1 @@ -1848,10 +1817,8 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ if (present(pint_out)) pmid_out=pmid if (present(rhoi_dry)) then allocate(R_dry(i0:i1,j0:j1,1:k1+1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate R_dry(i0:i1,j0:j1,1:k1+1)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'R_dry(i0:i1,j0:j1,1:k1+1)', & + file=__FILE__, line=__LINE__) if (tracer_mass) then call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) @@ -1872,10 +1839,8 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ end if if (present(rho_dry)) then allocate(R_dry(i0:i1,j0:j1,1:k1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate R_dry(i0:i1,j0:j1,1:k1)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'R_dry(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) if (tracer_mass) then call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) @@ -1924,7 +1889,7 @@ subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbar mbarv = 0.0_r8 residual = 1.0_r8 - do nq=1,dry_air_species_num-1 + do nq=1,dry_air_species_num m_cnst = active_species_idx(nq) do k=k0,k1 do j=j0,j1 @@ -1936,7 +1901,7 @@ subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbar end do end do end do - nq = dry_air_species_num + nq = 0 ! N2 do k=k0,k1 do j=j0,j1 do i = i0,i1 @@ -1973,16 +1938,12 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, kappa_dry= real(rair/cpair, r8) else allocate(R_dry(i0:i1,j0:j1,k0:k1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate R_dry(i0:i1,j0:j1,k0:k1)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'R_dry(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) allocate(cp_dry(i0:i1,j0:j1,k0:k1), stat=iret) - if (iret /= 0) then - call endrun(subname//': allocate cp_dry(i0:i1,j0:j1,k0:k1)'//& - ' failed with stat: '//to_str(iret)) - end if + call check_allocate(iret, subname, 'cp_dry(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) if (present(fact)) then call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,real(tracer, kind_phys),active_species_idx,cp_dry,& diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 48387eda..ff4b7c8f 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -906,7 +906,7 @@ subroutine dyn_init(dyn_in, dyn_out) do m_cnst = 1, qsize call addfld ('F'//trim(cnst_name_gll(m_cnst))//'_gll', (/ 'lev' /), 'I', 'kg/kg/s', & - trim(cnst_longname(m_cnst))//' mixing ratio forcing term (q_new-q_old) on GLL grid', gridname='GLL') + trim(cnst_longname_gll(m_cnst))//' mixing ratio forcing term (q_new-q_old) on GLL grid', gridname='GLL') end do ! Energy diagnostics and axial angular momentum diagnostics diff --git a/src/utils/cam_map_utils.F90 b/src/utils/cam_map_utils.F90 index 37b8f095..ae4969b1 100644 --- a/src/utils/cam_map_utils.F90 +++ b/src/utils/cam_map_utils.F90 @@ -680,7 +680,7 @@ subroutine cam_filemap_get_filemap(this, fieldlens, filelens, filemap, & end if ! - fileSize = product(filelens) + fileSize = product(int(filelens,kind=iMap)) srccnt = size(fieldlens) srclens(1:srccnt) = fieldlens(1:srccnt) if (srccnt < 7) then From 78052111e629c2282bad9d97d862c4faff3158b8 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 15 Jul 2021 21:04:27 -0600 Subject: [PATCH 32/45] Remove nlev from cam_config, as it is no longer necessary. --- cime_config/cam_config.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 032bc0ba..6f389a95 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -597,11 +597,9 @@ def __init__(self, case, case_log): # Level information for CAM is part of the atm grid name # and must be stripped out - case_nlev = '' match = re.match(r'(.+)z(\d+)', atm_grid) if match: atm_grid = match.groups()[0] - case_nlev = match.groups()[1] # End if # Save user options as list @@ -783,19 +781,9 @@ def __init__(self, case, case_log): raise CamConfigValError(emsg.format(user_dyn_opt, dyn)) # End if - #---------------------------------------- - # Set CAM grid variables (nlat,nlon,nlev) - #---------------------------------------- - - # Set number of vertical levels - if case_nlev: - nlev = case_nlev - else: - nlev = '30' # Default value - - # Add vertical levels to configure object - nlev_desc = "Number of vertical levels." - self.create_config("nlev", nlev_desc, nlev, None, is_nml_attr=True) + #--------------------------------------- + # Set CAM grid variables (nlat and nlon) + #--------------------------------------- #Set horizontal dimension variables: if dyn == "se": From 619707d6547560b91fe62e02ba992e378f19bb79 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 15 Jul 2021 21:10:56 -0600 Subject: [PATCH 33/45] Fix unit tests. --- test/unit/cam_config_unit_tests.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index f98b29fe..20867b7a 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -136,11 +136,11 @@ def test_config_set_value_check(self): #Set new value: newval = 200 - #Set nlev to "newval": - self.test_config_cam.set_value("nlev", newval) + #Set pcols to "newval": + self.test_config_cam.set_value("pcols", newval) #Get new value: - testval = self.test_config_cam.get_value("nlev") + testval = self.test_config_cam.get_value("pcols") #Check that testval matches self.assertEqual(testval, newval) @@ -255,12 +255,12 @@ def test_config_set_value_type_check(self): """ #Set error message: - ermsg = "ERROR: Value provided for variable, 'nlev', must be either an integer or a string. Currently it is type " + ermsg = "ERROR: Value provided for variable, 'pcols', must be either an integer or a string. Currently it is type " #Expect "CamConfigTypeError": with self.assertRaises(CamConfigTypeError) as typerr: #Run "set_value" method on made-up variable name: - self.test_config_cam.set_value("nlev", 5.0) + self.test_config_cam.set_value("pcols", 5.0) #Check that error message matches what's expected: self.assertEqual(ermsg, str(typerr.exception)) From 436245f77653f1f4b1d924cea32f046f0603375a Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Thu, 29 Jul 2021 10:29:40 -0600 Subject: [PATCH 34/45] Make buildnml and buildlib use python3 if called directly or as a subprocess (Github issue #140). --- cime_config/buildlib | 2 +- cime_config/buildnml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index 41a0b68b..0dd4a77b 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ create the cam library diff --git a/cime_config/buildnml b/cime_config/buildnml index 540c7ccb..728cc77d 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ CAM namelist creator From 7b96279f6c2e51b3dab95e270bd123c494a5f18f Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 16 Aug 2021 10:27:51 -0600 Subject: [PATCH 35/45] Create new 'dynconst' and 'dyn_thermo' modules to handle kind conversions. --- cime_config/buildnml | 3 +- cime_config/cam_config.py | 40 +- src/data/physconst.F90 | 385 +++--- .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 20 +- .../se/dycore/coordinate_systems_mod.F90 | 12 +- src/dynamics/se/dycore/cube_mod.F90 | 24 +- src/dynamics/se/dycore/derivative_mod.F90 | 90 +- src/dynamics/se/dycore/fvm_mod.F90 | 16 +- src/dynamics/se/dycore/global_norms_mod.F90 | 49 +- src/dynamics/se/dycore/interpolate_mod.F90 | 12 +- src/dynamics/se/dycore/mesh_mod.F90 | 8 +- src/dynamics/se/dycore/prim_advance_mod.F90 | 99 +- src/dynamics/se/dycore/prim_advection_mod.F90 | 21 +- src/dynamics/se/dycore/prim_init.F90 | 4 +- src/dynamics/se/dycore/quadrature_mod.F90 | 34 +- src/dynamics/se/dyn_comp.F90 | 7 +- src/dynamics/se/dyn_grid.F90 | 22 +- src/dynamics/se/test_fvm_mapping.F90 | 6 +- src/dynamics/utils/dyn_thermo.F90 | 1044 +++++++++++++++++ src/dynamics/utils/dynconst.F90 | 76 ++ src/physics/utils/phys_comp.F90 | 2 + test/unit/cam_config_unit_tests.py | 73 +- 22 files changed, 1583 insertions(+), 464 deletions(-) create mode 100644 src/dynamics/utils/dyn_thermo.F90 create mode 100644 src/dynamics/utils/dynconst.F90 diff --git a/cime_config/buildnml b/cime_config/buildnml index 728cc77d..10af10cd 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -286,7 +286,8 @@ def buildnml(case, caseroot, compname): # Initalize vert_coord_nl defaults: nmlgen.init_defaults(namelist_infile_list, cam_nml_dict, - skip_groups=filter(lambda group: group !='vert_coord_nl', + skip_default_for_groups=\ + filter(lambda group: group !='vert_coord_nl', config.nml_groups)) #-------------------------------------------- diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 6f389a95..83e73e01 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -883,6 +883,26 @@ def __init__(self, case, case_log): self.create_config("physics_suites", phys_desc, user_config_opts.physics_suites) + #------------------------------------------------------------------ + # Set Fortran kinds for real-type variables in dynamics and physics + #------------------------------------------------------------------ + + kind_valid_vals = ["REAL32","REAL64"] + + #dycore kind: + self.create_config("dyn_kind", + "Fortran kind used in dycore for type real.", + user_config_opts.dyn_kind, kind_valid_vals) + + #physics kind: + self.create_config("phys_kind", + "Fortran kind used in physics for type real.", + user_config_opts.phys_kind, kind_valid_vals) + + # Set phys->dyn kind conversion CPPdef if kinds are different: + if self.get_value("dyn_kind") != self.get_value("phys_kind"): + self.add_cppdef("DYN_PHYS_KIND_DIFF") + #-------------------------------------------------------- # Print CAM configure settings and values to debug logger #-------------------------------------------------------- @@ -959,6 +979,12 @@ def parse_config_opts(cls, config_opts, test_mode=False): action='store_true', required=False, help="""Flag to turn on Analytic Initial Conditions (ICs).""") + parser.add_argument("--dyn_kind", "-dyn_kind", + type=str, required=False, default="REAL64", + help="""Fortran kind used in dycore for type real.""") + parser.add_argument("--phys_kind", "-phys_kind", + type=str, required=False, default="REAL64", + help="""Fortran kind used in physics for type real.""") popts = [opt for opt in config_opts.split(" ") if opt] if test_mode: @@ -1231,15 +1257,15 @@ def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): #If there is no "physics_suite" line, then throw an error: if not phys_suite_lines: - emsg = "No 'physics_suite' variable is present in user_nl_cam.\n \ - This is required if more than one suite is listed\n \ - in CAM_CONFIG_OPTS." + emsg = "No 'physics_suite' variable is present in user_nl_cam.\n" + emsg += "This is required if more than one suite is listed\n" + emsg += "in CAM_CONFIG_OPTS." raise CamConfigValError(emsg) #If there is more than one "physics_suite" entry, then throw an error: if len(phys_suite_lines) > 1: - emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n \ - Only one 'physics_suite' line is allowed." + emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n" + emsg += "Only one 'physics_suite' line is allowed." raise CamConfigValError(emsg) #The split string list exists inside another, otherwise empty list, so extract @@ -1260,8 +1286,8 @@ def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): #Check that physics suite specified is actually in config list: if phys_suite_val not in phys_suites: - emsg = "physics_suite specified in user_nl_cam doesn't match any suites\n \ - listed in CAM_CONFIG_OPTS" + emsg = "physics_suite specified in user_nl_cam doesn't match any suites\n" + emsg += "listed in CAM_CONFIG_OPTS" raise CamConfigValError(emsg) else: diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index bfd653d6..1d486113 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -1042,20 +1042,17 @@ end subroutine physconst_calc_kappav !**************************************************************************************************************** ! subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp,ps,ptop) - !Given that this routine is only used by the dycore, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 - - integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac ! array bounds - real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracers; quantity specified by mixing_ratio arg - integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio - ! 2 => tracer is mass (q*dp) - integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array - real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness - real(r8), intent(out) :: dp(i0:i1,j0:j1,k0:k1) ! pressure level thickness - real(r8), optional,intent(out) :: ps(i0:i1,j0:j1) ! surface pressure (if ps present then ptop - ! must be present) - real(r8), optional,intent(in) :: ptop ! pressure at model top + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac ! array bounds + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(kind_phys), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_phys), intent(out) :: dp(i0:i1,j0:j1,k0:k1) ! pressure level thickness + real(kind_phys), optional,intent(out) :: ps(i0:i1,j0:j1) ! surface pressure (if ps present then ptop + ! must be present) + real(kind_phys), optional,intent(in) :: ptop ! pressure at model top integer :: i,j,k,m_cnst,nq @@ -1108,23 +1105,19 @@ end subroutine get_dp subroutine get_pmid_from_dpdry(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, & dp_dry, ptop, pmid, pint, dp) - !Given that this routine is only used by the dycore, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 - - integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! array bounds - real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg - integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio - ! 2 => tracer is mass (q*dp) - integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array - real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness - real(r8), intent(in) :: ptop ! model top pressure - real(r8), intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure - real(r8), optional, intent(out) :: pint(i0:i1,j0:j1,nlev+1) ! half-level pressure - real(r8), optional, intent(out) :: dp(i0:i1,j0:j1,nlev) ! presure level thickness - - real(r8) :: dp_local(i0:i1,j0:j1,nlev) ! local pressure level thickness - real(r8) :: pint_local(i0:i1,j0:j1,nlev+1) ! local interface pressure + integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! array bounds + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(kind_phys), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness + real(kind_phys), intent(in) :: ptop ! model top pressure + real(kind_phys), intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure + real(kind_phys), optional, intent(out) :: pint(i0:i1,j0:j1,nlev+1) ! half-level pressure + real(kind_phys), optional, intent(out) :: dp(i0:i1,j0:j1,nlev) ! presure level thickness + + real(kind_phys) :: dp_local(i0:i1,j0:j1,nlev) ! local pressure level thickness + real(kind_phys) :: pint_local(i0:i1,j0:j1,nlev+1) ! local interface pressure integer :: k call get_dp(i0,i1,j0,j1,1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp_local) @@ -1146,18 +1139,16 @@ end subroutine get_pmid_from_dpdry !************************************************************************************************************************* ! subroutine get_pmid_from_dp(i0,i1,j0,j1,k0,k1,dp,ptop,pmid,pint) - !Given that this routine is only used by the dycore, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 + use physics_types, only: dycore_gz_log_calc - integer, intent(in) :: i0,i1,j0,j1,k0,k1 ! array bounds - real(r8), intent(in) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness - real(r8), intent(in) :: ptop ! pressure at model top - real(r8), intent(out) :: pmid(i0:i1,j0:j1,k0:k1) ! mid (full) level pressure - real(r8), optional, intent(out) :: pint(i0:i1,j0:j1,k0:k1+1) ! pressure at interfaces (half levels) + integer, intent(in) :: i0,i1,j0,j1,k0,k1 ! array bounds + real(kind_phys), intent(in) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_phys), intent(in) :: ptop ! pressure at model top + real(kind_phys), intent(out) :: pmid(i0:i1,j0:j1,k0:k1) ! mid (full) level pressure + real(kind_phys), optional, intent(out) :: pint(i0:i1,j0:j1,k0:k1+1) ! pressure at interfaces (half levels) - real(r8) :: pint_local(i0:i1,j0:j1,k0:k1+1) + real(kind_phys) :: pint_local(i0:i1,j0:j1,k0:k1+1) integer :: k pint_local(:,:,k0) = ptop @@ -1171,7 +1162,7 @@ subroutine get_pmid_from_dp(i0,i1,j0,j1,k0,k1,dp,ptop,pmid,pint) end do else do k=k0,k1 - pmid(:,:,k) = 0.5_r8*(pint_local(:,:,k)+pint_local(:,:,k+1)) + pmid(:,:,k) = 0.5_kind_phys*(pint_local(:,:,k)+pint_local(:,:,k+1)) end do end if if (present(pint)) pint=pint_local @@ -1186,23 +1177,19 @@ end subroutine get_pmid_from_dp subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,& dp_dry,ptop,p00,inv_exner,exner,poverp0) - !Given that this routine is only used by the dycore, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 - - integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! index bounds - real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg - integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio - ! 2 => tracer is mass (q*dp) - integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array - real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness - real(r8), intent(in) :: ptop ! pressure at model top - real(r8), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa) - logical , intent(in) :: inv_exner ! logical for outputting inverse Exner or Exner pressure - real(r8), intent(out) :: exner(i0:i1,j0:j1,nlev) - real(r8), optional, intent(out) :: poverp0(i0:i1,j0:j1,nlev)! for efficiency when a routine needs this variable - - real(r8) :: pmid(i0:i1,j0:j1,nlev),kappa_dry(i0:i1,j0:j1,nlev) + integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! index bounds + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(kind_phys), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness + real(kind_phys), intent(in) :: ptop ! pressure at model top + real(kind_phys), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa) + logical , intent(in) :: inv_exner ! logical for outputting inverse Exner or Exner pressure + real(kind_phys), intent(out) :: exner(i0:i1,j0:j1,nlev) + real(kind_phys), optional, intent(out) :: poverp0(i0:i1,j0:j1,nlev) ! for efficiency when a routine needs this variable + + real(kind_phys) :: pmid(i0:i1,j0:j1,nlev),kappa_dry(i0:i1,j0:j1,nlev) ! ! compute mid level pressure ! @@ -1213,7 +1200,7 @@ subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_i if (mixing_ratio==1) then call get_kappa_dry(i0,i1,j0,j1,1,nlev,nlev,ntrac,tracer,active_species_idx,kappa_dry) else - call get_kappa_dry(i0,i1,j0,j1,1,nlev,nlev,ntrac,tracer,active_species_idx,kappa_dry,1.0_r8/dp_dry) + call get_kappa_dry(i0,i1,j0,j1,1,nlev,nlev,ntrac,tracer,active_species_idx,kappa_dry,1.0_kind_phys/dp_dry) end if if (inv_exner) then exner(:,:,:) = (p00/pmid(:,:,:))**kappa_dry(:,:,:) @@ -1230,25 +1217,22 @@ end subroutine get_exner !**************************************************************************************************************** ! subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pmid) - !Given that this routine is only used by the dycore, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 use physics_types, only: dycore_gz_log_calc - integer, intent(in) :: i0,i1,j0,j1,nlev ! array bounds - real(r8), intent(in) :: dp (i0:i1,j0:j1,nlev) ! pressure level thickness - real(r8), intent(in) :: T_v (i0:i1,j0:j1,nlev) ! virtual temperature - real(r8), intent(in) :: R_dry(i0:i1,j0:j1,nlev) ! R dry - real(r8), intent(in) :: phis (i0:i1,j0:j1) ! surface geopotential - real(r8), intent(in) :: ptop ! model top presure - real(r8), intent(out) :: gz(i0:i1,j0:j1,nlev) ! geopotential - real(r8), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure + integer, intent(in) :: i0,i1,j0,j1,nlev ! array bounds + real(kind_phys), intent(in) :: dp (i0:i1,j0:j1,nlev) ! pressure level thickness + real(kind_phys), intent(in) :: T_v (i0:i1,j0:j1,nlev) ! virtual temperature + real(kind_phys), intent(in) :: R_dry(i0:i1,j0:j1,nlev) ! R dry + real(kind_phys), intent(in) :: phis (i0:i1,j0:j1) ! surface geopotential + real(kind_phys), intent(in) :: ptop ! model top presure + real(kind_phys), intent(out) :: gz(i0:i1,j0:j1,nlev) ! geopotential + real(kind_phys), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure - real(r8), dimension(i0:i1,j0:j1,nlev) :: pmid_local - real(r8), dimension(i0:i1,j0:j1,nlev+1) :: pint - real(r8), dimension(i0:i1,j0:j1) :: gzh, Rdry_tv - integer :: k + real(kind_phys), dimension(i0:i1,j0:j1,nlev) :: pmid_local + real(kind_phys), dimension(i0:i1,j0:j1,nlev+1) :: pint + real(kind_phys), dimension(i0:i1,j0:j1) :: gzh, Rdry_tv + integer :: k call get_pmid_from_dp(i0,i1,j0,j1,1,nlev,dp,ptop,pmid_local,pint) @@ -1259,13 +1243,13 @@ subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pm if (dycore_gz_log_calc) then do k=nlev,1,-1 Rdry_tv(:,:) = R_dry(:,:,k)*T_v(:,:,k) - gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*(1.0_r8-pint(:,:,k)/pmid_local(:,:,k)) + gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*(1.0_kind_phys-pint(:,:,k)/pmid_local(:,:,k)) gzh(:,:) = gzh(:,:) + Rdry_tv(:,:)*(log(pint(:,:,k+1))-log(pint(:,:,k))) end do else do k=nlev,1,-1 Rdry_tv(:,:) = R_dry(:,:,k)*T_v(:,:,k) - gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*0.5_r8*dp(:,:,k)/pmid_local(:,:,k) + gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*0.5_kind_phys*dp(:,:,k)/pmid_local(:,:,k) gzh(:,:) = gzh(:,:) + Rdry_tv(:,:)*dp(:,:,k)/pmid_local(:,:,k) end do end if @@ -1281,31 +1265,27 @@ end subroutine get_gz_given_dp_Tv_Rdry subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sponge_factor,kmvis,kmcnd, ntrac,& tracer, fact, active_species_idx_dycore, mbarv_in) - !Given that this routine is only used with the dycore structures, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 - ! args - integer, intent(in) :: i0,i1,j0,j1,k1,nlev - real(r8), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature - integer, intent(in) :: get_at_interfaces ! 1: compute kmvis and kmcnd at interfaces - ! 0: compute kmvis and kmcnd at mid-levels - real(r8), intent(in) :: sponge_factor(1:k1) ! multiply kmvis and kmcnd with sponge_factor (for sponge layer) - real(r8), intent(out) :: kmvis(i0:i1,j0:j1,1:k1+get_at_interfaces) - real(r8), intent(out) :: kmcnd(i0:i1,j0:j1,1:k1+get_at_interfaces) - integer , intent(in) :: ntrac - real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracer array - integer, intent(in), optional :: active_species_idx_dycore(:) ! index of active species in tracer - real(r8), intent(in), optional :: fact(i0:i1,j0:j1,k1) ! if tracer is in units of mass or moist - ! fact converts to dry mixing ratio: tracer/fact - real(r8), intent(in), optional :: mbarv_in(i0:i1,j0:j1,1:k1) ! composition dependent atmosphere mean mass + integer, intent(in) :: i0,i1,j0,j1,k1,nlev + real(kind_phys), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature + integer, intent(in) :: get_at_interfaces ! 1: compute kmvis and kmcnd at interfaces + ! 0: compute kmvis and kmcnd at mid-levels + real(kind_phys), intent(in) :: sponge_factor(1:k1) ! multiply kmvis and kmcnd with sponge_factor (for sponge layer) + real(kind_phys), intent(out) :: kmvis(i0:i1,j0:j1,1:k1+get_at_interfaces) + real(kind_phys), intent(out) :: kmcnd(i0:i1,j0:j1,1:k1+get_at_interfaces) + integer , intent(in) :: ntrac + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracer array + integer, intent(in), optional :: active_species_idx_dycore(:) ! index of active species in tracer + real(kind_phys), intent(in), optional :: fact(i0:i1,j0:j1,k1) ! if tracer is in units of mass or moist + ! fact converts to dry mixing ratio: tracer/fact + real(kind_phys), intent(in), optional :: mbarv_in(i0:i1,j0:j1,1:k1) ! composition dependent atmosphere mean mass ! ! local vars ! integer :: i,j,k,icnst,ispecies - real(r8):: mbarvi,mm,residual ! Mean mass at mid level - real(r8):: cnst_vis, cnst_cnd, temp_local - real(r8), dimension(i0:i1,j0:j1,1:k1) :: factor,mbarv + real(kind_phys):: mbarvi,mm,residual ! Mean mass at mid level + real(kind_phys):: cnst_vis, cnst_cnd, temp_local + real(kind_phys), dimension(i0:i1,j0:j1,1:k1) :: factor,mbarv integer, dimension(thermodynamic_active_species_num):: idx_local !-------------------------------------------- ! Set constants needed for updates @@ -1313,13 +1293,13 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp if (dry_air_species_num==0) then - cnst_vis = (kv1*mmro2*o2_mwi + kv2*mmrn2*n2_mwi)*mbar*1.e-7_r8 - cnst_cnd = (kc1*mmro2*o2_mwi + kc2*mmrn2*n2_mwi)*mbar*1.e-5_r8 + cnst_vis = (kv1*mmro2*o2_mwi + kv2*mmrn2*n2_mwi)*mbar*1.e-7_kind_phys + cnst_cnd = (kc1*mmro2*o2_mwi + kc2*mmrn2*n2_mwi)*mbar*1.e-5_kind_phys if (get_at_interfaces==1) then do k=2,k1 do j=j0,j1 do i=i0,i1 - temp_local = 0.5_r8*(temp(i,j,k)+temp(i,j,k-1)) + temp_local = 0.5_kind_phys*(temp(i,j,k)+temp(i,j,k-1)) kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp_local**kv4 kmcnd(i,j,k) = sponge_factor(k)*cnst_cnd*temp_local**kc4 end do @@ -1328,8 +1308,8 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp ! ! extrapolate top level value ! - kmvis(i0:i1,j0:j1,1) = 1.5_r8*kmvis(i0:i1,j0:j1,2)-0.5_r8*kmvis(i0:i1,j0:j1,3) - kmcnd(i0:i1,j0:j1,1) = 1.5_r8*kmcnd(i0:i1,j0:j1,2)-0.5_r8*kmcnd(i0:i1,j0:j1,3) + kmvis(i0:i1,j0:j1,1) = 1.5_kind_phys*kmvis(i0:i1,j0:j1,2)-0.5_kind_phys*kmvis(i0:i1,j0:j1,3) + kmcnd(i0:i1,j0:j1,1) = 1.5_kind_phys*kmcnd(i0:i1,j0:j1,2)-0.5_kind_phys*kmcnd(i0:i1,j0:j1,3) else if (get_at_interfaces==0) then do k=1,k1 do j=j0,j1 @@ -1351,7 +1331,7 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp if (present(fact)) then factor = fact(:,:,:) else - factor = 1.0_r8 + factor = 1.0_kind_phys endif if (present(mbarv_in)) then mbarv = mbarv_in @@ -1365,12 +1345,12 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp do k=2,k1 do j=j0,j1 do i=i0,i1 - kmvis(i,j,k) = 0.0_r8 - kmcnd(i,j,k) = 0.0_r8 - residual = 1.0_r8 + kmvis(i,j,k) = 0.0_kind_phys + kmcnd(i,j,k) = 0.0_kind_phys + residual = 1.0_kind_phys do icnst=1,dry_air_species_num ispecies = idx_local(icnst) - mm = 0.5_r8*(tracer(i,j,k,ispecies)*factor(i,j,k)+tracer(i,j,k-1,ispecies)*factor(i,j,k-1)) + mm = 0.5_kind_phys*(tracer(i,j,k,ispecies)*factor(i,j,k)+tracer(i,j,k-1,ispecies)*factor(i,j,k-1)) kmvis(i,j,k) = kmvis(i,j,k)+thermodynamic_active_species_kv(icnst)* & thermodynamic_active_species_mwi(icnst)*mm kmcnd(i,j,k) = kmcnd(i,j,k)+thermodynamic_active_species_kc(icnst)* & @@ -1384,17 +1364,17 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp kmcnd(i,j,k) = kmcnd(i,j,k)+thermodynamic_active_species_kc(icnst)* & thermodynamic_active_species_mwi(icnst)*residual - temp_local = .5_r8*(temp(i,j,k-1)+temp(i,j,k)) - mbarvi = 0.5_r8*(mbarv(i,j,k-1)+mbarv(i,j,k)) - kmvis(i,j,k) = kmvis(i,j,k)*mbarvi*temp_local**kv4*1.e-7_r8 - kmcnd(i,j,k) = kmcnd(i,j,k)*mbarvi*temp_local**kc4*1.e-5_r8 + temp_local = 0.5_kind_phys*(temp(i,j,k-1)+temp(i,j,k)) + mbarvi = 0.5_kind_phys*(mbarv(i,j,k-1)+mbarv(i,j,k)) + kmvis(i,j,k) = kmvis(i,j,k)*mbarvi*temp_local**kv4*1.e-7_kind_phys + kmcnd(i,j,k) = kmcnd(i,j,k)*mbarvi*temp_local**kc4*1.e-5_kind_phys enddo enddo end do do j=j0,j1 do i=i0,i1 - kmvis(i,j,1) = 1.5_r8*kmvis(i,j,2)-.5_r8*kmvis(i,j,3) - kmcnd(i,j,1) = 1.5_r8*kmcnd(i,j,2)-.5_r8*kmcnd(i,j,3) + kmvis(i,j,1) = 1.5_kind_phys*kmvis(i,j,2)-0.5_kind_phys*kmvis(i,j,3) + kmcnd(i,j,1) = 1.5_kind_phys*kmcnd(i,j,2)-0.5_kind_phys*kmcnd(i,j,3) kmvis(i,j,k1+1) = kmvis(i,j,k1) kmcnd(i,j,k1+1) = kmcnd(i,j,k1) end do @@ -1414,18 +1394,14 @@ end subroutine get_molecular_diff_coef ! subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvis_ref,kmcnd_ref,rho_ref) - !Given that this routine is only used with the dycore structures, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 - ! args - integer, intent(in) :: k0,k1 !min/max vertical index - real(r8), intent(in) :: tref !reference temperature - real(r8), intent(in) :: press(k0:k1) !pressure - real(r8), intent(in) :: sponge_factor(k0:k1) !multiply kmvis and kmcnd with sponge_factor (for sponge layer) - real(r8), intent(out) :: kmvis_ref(k0:k1) !reference molecular diffusion coefficient - real(r8), intent(out) :: kmcnd_ref(k0:k1) !reference thermal conductivity coefficient - real(r8), intent(out) :: rho_ref(k0:k1) !reference density + integer, intent(in) :: k0,k1 !min/max vertical index + real(kind_phys), intent(in) :: tref !reference temperature + real(kind_phys), intent(in) :: press(k0:k1) !pressure + real(kind_phys), intent(in) :: sponge_factor(k0:k1) !multiply kmvis and kmcnd with sponge_factor (for sponge layer) + real(kind_phys), intent(out) :: kmvis_ref(k0:k1) !reference molecular diffusion coefficient + real(kind_phys), intent(out) :: kmcnd_ref(k0:k1) !reference thermal conductivity coefficient + real(kind_phys), intent(out) :: rho_ref(k0:k1) !reference density ! local vars integer :: k @@ -1439,17 +1415,17 @@ subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvi kmvis_ref(k) = sponge_factor(k)* & (kv1*mmro2*o2_mwi + & kv2*mmrn2*n2_mwi)*mbar* & - tref**kv4 * 1.e-7_r8 + tref**kv4 * 1.e-7_kind_phys kmcnd_ref(k) = sponge_factor(k)* & (kc1*mmro2*o2_mwi + & kc2*mmrn2*n2_mwi)*mbar* & - tref**kc4 * 1.e-5_r8 + tref**kc4 * 1.e-5_kind_phys end do end subroutine get_molecular_diff_coef_reference ! !**************************************************************************************************************** ! - ! Compute dry air heaet capacity under constant pressure + ! Compute dry air heat capacity under constant pressure ! !**************************************************************************************************************** ! @@ -1507,31 +1483,29 @@ end subroutine get_cp_dry !**************************************************************************************************************** ! subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry,fact) - !Only called by the dycore, so "r8" kind is used: - use shr_kind_mod, only: r8=>shr_kind_r8 - integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac !array boundas - real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac)!tracer array - integer, intent(in) :: active_species_idx_dycore(:) !index of active species in tracer - real(r8), intent(out) :: R_dry(i0:i1,j0:j1,k0:k1) !dry air R - real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac !array boundas + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx_dycore(:) !index of active species in tracer + real(kind_phys), intent(out) :: R_dry(i0:i1,j0:j1,k0:k1) !dry air R + real(kind_phys), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio integer :: i,j,k,m_cnst,nq - real(r8):: factor(i0:i1,j0:j1,k0_trac:k1_trac), residual(i0:i1,j0:j1,k0:k1), mm + real(kind_phys):: factor(i0:i1,j0:j1,k0_trac:k1_trac), residual(i0:i1,j0:j1,k0:k1), mm if (dry_air_species_num==0) then ! ! dry air not species dependent ! - R_dry = real(rair, r8) + R_dry = rair else if (present(fact)) then factor = fact(:,:,:) else - factor = 1.0_r8 + factor = 1.0_kind_phys endif - R_dry = 0.0_r8 - residual = 1.0_r8 + R_dry = 0.0_kind_phys + residual = 1.0_kind_phys do nq=1,dry_air_species_num m_cnst = active_species_idx_dycore(nq) do k=k0,k1 @@ -1569,19 +1543,16 @@ end subroutine get_R_dry subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy, & active_species_idx_dycore) - !Only called by the dycore, so "r8" kind is used: - use shr_kind_mod, only: r8=>shr_kind_r8 - - integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac - real(r8), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,ntrac)!tracer array (mass weighted) - real(r8), intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature - real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry presure level thickness - real(r8), optional, intent(out):: thermal_energy(i0:i1,j0:j1,k0:k1) !thermal energy in each column: sum cp*T*dp + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_phys), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,ntrac) !tracer array (mass weighted) + real(kind_phys), intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature + real(kind_phys), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry presure level thickness + real(kind_phys), intent(out) :: thermal_energy(i0:i1,j0:j1,k0:k1) !thermal energy in each column: sum cp*T*dp ! ! array of indicies for index of thermodynamic active species in dycore tracer array ! (if different from physics index) ! - integer, optional, dimension(:) :: active_species_idx_dycore + integer, optional, dimension(:), intent(in) :: active_species_idx_dycore ! local vars integer :: nq, itrac @@ -1600,7 +1571,7 @@ subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,th if (dry_air_species_num==0) then thermal_energy(:,:,:) = thermodynamic_active_species_cp(0)*dp_dry(:,:,:) else - call get_cp_dry(i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer_mass,idx_local,thermal_energy,fact=1.0_r8/dp_dry(:,:,:)) + call get_cp_dry(i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer_mass,idx_local,thermal_energy,fact=1.0_kind_phys/dp_dry(:,:,:)) thermal_energy(:,:,:) = thermal_energy(:,:,:)*dp_dry(:,:,:) end if ! @@ -1627,17 +1598,14 @@ end subroutine get_thermal_energy subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q, & active_species_idx_dycore) - !Given that this routine is only used by the dycore, - !the "r8" kind is used instead of "kind_phys": - use shr_kind_mod, only: r8=>shr_kind_r8 use cam_logfile, only: iulog ! args - integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac - real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntrac) !tracer array - real(r8), intent(out) :: T_v(i0:i1,j0:j1,k0:k1) !virtual temperature - real(r8), optional, intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature - real(r8), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry pressure level thickness - real(r8), optional,intent(out) :: sum_q(i0:i1,j0:j1,k0:k1) !sum tracer + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntrac) !tracer array + real(kind_phys), intent(out) :: T_v(i0:i1,j0:j1,k0:k1) !virtual temperature + real(kind_phys), optional, intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature + real(kind_phys), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry pressure level thickness + real(kind_phys), optional,intent(out) :: sum_q(i0:i1,j0:j1,k0:k1) !sum tracer ! ! array of indicies for index of thermodynamic active species in dycore tracer array ! (if different from physics index) @@ -1646,7 +1614,7 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q ! local vars integer :: itrac,nq - real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, factor, Rd + real(kind_phys), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, factor, Rd integer, dimension(thermodynamic_active_species_num) :: idx_local,idx if (present(active_species_idx_dycore)) then idx_local = active_species_idx_dycore @@ -1655,12 +1623,12 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q end if if (present(dp_dry)) then - factor = 1.0_r8/dp_dry + factor = 1.0_kind_phys/dp_dry else - factor = 1.0_r8 + factor = 1.0_kind_phys end if - sum_species = 1.0_r8 !all dry air species sum to 1 + sum_species = 1.0_kind_phys !all dry air species sum to 1 do nq=dry_air_species_num+1,thermodynamic_active_species_num itrac = idx_local(nq) sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:) @@ -1747,14 +1715,12 @@ end subroutine get_cp !************************************************************************************************************************* ! subroutine get_dp_ref(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) - !Only called by the dycore, so "r8" kind is used: - use shr_kind_mod, only: r8=>shr_kind_r8 - - integer, intent(in) :: i0,i1,j0,j1,k0,k1 - real(r8), intent(in) :: hyai(k0:k1+1),hybi(k0:k1+1),ps0 - real(r8), intent(in) :: phis(i0:i1,j0:j1) - real(r8), intent(out) :: dp_ref(i0:i1,j0:j1,k0:k1) - real(r8), intent(out) :: ps_ref(i0:i1,j0:j1) + + integer, intent(in) :: i0,i1,j0,j1,k0,k1 + real(kind_phys), intent(in) :: hyai(k0:k1+1),hybi(k0:k1+1),ps0 + real(kind_phys), intent(in) :: phis(i0:i1,j0:j1) + real(kind_phys), intent(out) :: dp_ref(i0:i1,j0:j1,k0:k1) + real(kind_phys), intent(out) :: ps_ref(i0:i1,j0:j1) integer :: k ! ! use static reference pressure (hydrostatic balance incl. effect of topography) @@ -1767,40 +1733,37 @@ end subroutine get_dp_ref ! !************************************************************************************************************************* ! - ! compute dry densisty from temperature (temp) and pressure (dp_dry and tracer) + ! compute dry density from temperature (temp) and pressure (dp_dry and tracer) ! !************************************************************************************************************************* ! subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_mass,& rho_dry, rhoi_dry,active_species_idx_dycore,pint_out,pmid_out) - !Only called by the dycore, so "r8" kind is used: - use shr_kind_mod, only: r8=>shr_kind_r8 - ! args - integer, intent(in) :: i0,i1,j0,j1,k1,ntrac,nlev - real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,ntrac) ! Tracer array - real(r8), intent(in) :: temp(i0:i1,j0:j1,1:nlev) ! Temperature - real(r8), intent(in) :: ptop - real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) - logical, intent(in) :: tracer_mass - real(r8), optional,intent(out) :: rho_dry(i0:i1,j0:j1,1:k1) - real(r8), optional,intent(out) :: rhoi_dry(i0:i1,j0:j1,1:k1+1) + integer, intent(in) :: i0,i1,j0,j1,k1,ntrac,nlev + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,nlev,ntrac) ! Tracer array + real(kind_phys), intent(in) :: temp(i0:i1,j0:j1,1:nlev) ! Temperature + real(kind_phys), intent(in) :: ptop + real(kind_phys), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) + logical, intent(in) :: tracer_mass + real(kind_phys), optional,intent(out) :: rho_dry(i0:i1,j0:j1,1:k1) + real(kind_phys), optional,intent(out) :: rhoi_dry(i0:i1,j0:j1,1:k1+1) ! ! array of indicies for index of thermodynamic active species in dycore tracer array ! (if different from physics index) ! - integer, optional, intent(in) :: active_species_idx_dycore(:) - real(r8),optional,intent(out) :: pint_out(i0:i1,j0:j1,1:k1+1) - real(r8),optional,intent(out) :: pmid_out(i0:i1,j0:j1,1:k1) + integer, optional, intent(in) :: active_species_idx_dycore(:) + real(kind_phys),optional,intent(out) :: pint_out(i0:i1,j0:j1,1:k1+1) + real(kind_phys),optional,intent(out) :: pmid_out(i0:i1,j0:j1,1:k1) ! local vars integer :: i,j,k integer :: iret - real(r8), dimension(i0:i1,j0:j1,1:k1) :: pmid - real(r8):: pint(i0:i1,j0:j1,1:k1+1) - real(r8), allocatable :: R_dry(:,:,:) - integer, dimension(thermodynamic_active_species_num):: idx_local + real(kind_phys), dimension(i0:i1,j0:j1,1:k1) :: pmid + real(kind_phys) :: pint(i0:i1,j0:j1,1:k1+1) + real(kind_phys), allocatable :: R_dry(:,:,:) + integer, dimension(thermodynamic_active_species_num) :: idx_local character(len=*), parameter :: subname = 'get_rho_dry' @@ -1821,19 +1784,19 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ file=__FILE__, line=__LINE__) if (tracer_mass) then - call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) + call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_kind_phys/dp_dry) else call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry) end if do k=2,k1+1 - rhoi_dry(i0:i1,j0:j1,k) = 0.5_r8*(temp(i0:i1,j0:j1,k)+temp(i0:i1,j0:j1,k-1))!could be more accurate! + rhoi_dry(i0:i1,j0:j1,k) = 0.5_kind_phys*(temp(i0:i1,j0:j1,k)+temp(i0:i1,j0:j1,k-1))!could be more accurate! rhoi_dry(i0:i1,j0:j1,k) = pint(i0:i1,j0:j1,k)/(rhoi_dry(i0:i1,j0:j1,k)*R_dry(i0:i1,j0:j1,k)) !ideal gas law for dry air end do ! ! extrapolate top level value ! k=1 - rhoi_dry(i0:i1,j0:j1,k) = 1.5_r8*(temp(i0:i1,j0:j1,1)-0.5_r8*temp(i0:i1,j0:j1,2)) + rhoi_dry(i0:i1,j0:j1,k) = 1.5_kind_phys*(temp(i0:i1,j0:j1,1)-0.5_kind_phys*temp(i0:i1,j0:j1,2)) rhoi_dry(i0:i1,j0:j1,k) = pint(i0:i1,j0:j1,1)/(rhoi_dry(i0:i1,j0:j1,k)*R_dry(i0:i1,j0:j1,k)) !ideal gas law for dry air deallocate(R_dry) end if @@ -1843,7 +1806,7 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ file=__FILE__, line=__LINE__) if (tracer_mass) then - call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry) + call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_kind_phys/dp_dry) else call get_R_dry(i0,i1,j0,j1,1,k1,1,nlev,ntrac,tracer,idx_local,R_dry) end if @@ -1864,17 +1827,15 @@ end subroutine get_rho_dry !************************************************************************************************************************* ! subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbarv,fact) - !Only called by the dycore, so "r8" kind is used: - use shr_kind_mod, only: r8=>shr_kind_r8 - integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac, nlev - real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array - integer, intent(in) :: active_species_idx(:) !index of active species in tracer - real(r8), intent(out) :: mbarv(i0:i1,j0:j1,k0:k1) !molecular weight of dry air - real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac, nlev + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx(:) !index of active species in tracer + real(kind_phys), intent(out) :: mbarv(i0:i1,j0:j1,k0:k1) !molecular weight of dry air + real(kind_phys), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio integer :: i,j,k,m_cnst,nq - real(r8):: factor(i0:i1,j0:j1,k0:k1), residual(i0:i1,j0:j1,k0:k1), mm + real(kind_phys) :: factor(i0:i1,j0:j1,k0:k1), residual(i0:i1,j0:j1,k0:k1), mm ! ! dry air not species dependent ! @@ -1884,11 +1845,11 @@ subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbar if (present(fact)) then factor = fact(:,:,:) else - factor = 1.0_r8 + factor = 1.0_kind_phys endif - mbarv = 0.0_r8 - residual = 1.0_r8 + mbarv = 0.0_kind_phys + residual = 1.0_kind_phys do nq=1,dry_air_species_num m_cnst = active_species_idx(nq) do k=k0,k1 @@ -1909,7 +1870,7 @@ subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbar end do end do end do - mbarv(i0:i1,j0:j1,k0:k1) = 1.0_r8/mbarv(i0:i1,j0:j1,k0:k1) + mbarv(i0:i1,j0:j1,k0:k1) = 1.0_kind_phys/mbarv(i0:i1,j0:j1,k0:k1) end if end subroutine get_mbarv ! @@ -1920,22 +1881,20 @@ end subroutine get_mbarv !************************************************************************************************************************* ! subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,kappa_dry,fact) - !Only called by the dycore, so "r8" kind is used: - use shr_kind_mod, only: r8=>shr_kind_r8 integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,nlev - real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array - integer, intent(in) :: active_species_idx(:) !index of thermodynamic active tracers - real(r8), intent(out) :: kappa_dry(i0:i1,j0:j1,k0:k1) !kappa dry - real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx(:) !index of thermodynamic active tracers + real(kind_phys), intent(out) :: kappa_dry(i0:i1,j0:j1,k0:k1) !kappa dry + real(kind_phys), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio ! - real(r8), allocatable, dimension(:,:,:) :: cp_dry,R_dry + real(kind_phys), allocatable, dimension(:,:,:) :: cp_dry,R_dry integer :: iret character(len=*), parameter :: subname = 'get_kappa_dry' ! ! dry air not species dependent if (dry_air_species_num==0) then - kappa_dry= real(rair/cpair, r8) + kappa_dry= rair/cpair else allocate(R_dry(i0:i1,j0:j1,k0:k1), stat=iret) call check_allocate(iret, subname, 'R_dry(i0:i1,j0:j1,k0:k1)', & @@ -1953,7 +1912,7 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,real(tracer, kind_phys),active_species_idx,cp_dry) call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry) end if - kappa_dry = R_dry/real(cp_dry, r8) + kappa_dry = R_dry/cp_dry deallocate(R_dry,cp_dry) end if end subroutine get_kappa_dry diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 index 11d4d0ab..240d07a4 100644 --- a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -7,7 +7,7 @@ module comp_gll_ctr_vol use cam_logfile, only: iulog use shr_sys_mod, only: shr_sys_flush use global_norms_mod, only: wrap_repro_sum - use physconst, only: pi + use dynconst, only: pi use shr_infnan_mod, only: isnan=>shr_infnan_isnan use coordinate_systems_mod, only: cartesian3d_t, cartesian2d_t @@ -667,7 +667,7 @@ function make_unique(a, n) result(m) ! if (ABS(a(j)-a(i)).lt. 1e-6) a(j) = 9999 delta = abs(a(j)-a(i)) if (delta < 1.e-6_r8) a(j) = 9999.0_r8 - if (abs((2.0_r8*real(pi, r8)) - delta) < 1.0e-6_r8) a(j) = 9999.0_r8 + if (abs((2.0_r8*pi) - delta) < 1.0e-6_r8) a(j) = 9999.0_r8 end do end do m = 0 @@ -676,7 +676,7 @@ function make_unique(a, n) result(m) end do if (mod(m,2).ne.0) then do i=1,n - print *,'angle with centroid: ',i,a(i),mod(a(i),2*real(pi, r8)) + print *,'angle with centroid: ',i,a(i),mod(a(i),2*pi) end do call endrun("Error: Found an odd number or nodes for cv element. Should be even.") end if @@ -1370,7 +1370,7 @@ subroutine InitControlVolumes_gll(elem, hybrid,nets,nete) ! MNL: dx and dy are no longer part of element_t ! but they are easily computed for the ! uniform case - dx = real(pi, r8)/(2.0d0*dble(ne)) + dx = pi/(2.0d0*dble(ne)) dy = dx ! intialize local element dual grid, local element areas @@ -1378,8 +1378,8 @@ subroutine InitControlVolumes_gll(elem, hybrid,nets,nete) do ie=nets,nete call convert_gbl_index(elem(ie)%vertex%number,ie1,je1,face_no) - start%x=r-real(pi, r8)/4 + ie1*dx - start%y=-1._r8*real(pi, r8)/4 + je1*dy + start%x=r-pi/4._r8 + ie1*dx + start%y=-1._r8*pi/4._r8 + je1*dy endd%x =start%x + dx endd%y =start%y + dy cartp_nm1(0:np,0:np) = element_coordinates(start,endd,gllnm1) @@ -2245,7 +2245,7 @@ subroutine VerifVolumes(elem, hybrid,nets,nete) real(r8), pointer :: locvol(:,:) - dx = real(pi, r8)/(2.0d0*dble(ne)) + dx = pi/(2.0_r8*real(ne, r8)) dy = dx if(.not. initialized) then @@ -2318,13 +2318,13 @@ subroutine VerifVolumes(elem, hybrid,nets,nete) if(hybrid%masterthread) then write(*,'(a,i2,a,2e23.15)') "cube face:",face," : SURFACE FV =",& - 6_r8*psum/(4_r8 * real(pi, r8)), & - 6_r8*psum/(4_r8 * real(pi, r8))-1 + 6_r8*psum/(4_r8 * pi), & + 6_r8*psum/(4_r8 * pi)-1 end if end do if(hybrid%masterthread) then - write(iulog, *) "SURFACE FV (total)= ", ptot/(4_r8 * real(pi, r8)) + write(iulog, *) "SURFACE FV (total)= ", ptot/(4_r8 * pi) end if end subroutine VerifVolumes diff --git a/src/dynamics/se/dycore/coordinate_systems_mod.F90 b/src/dynamics/se/dycore/coordinate_systems_mod.F90 index f653d76f..d621b063 100644 --- a/src/dynamics/se/dycore/coordinate_systems_mod.F90 +++ b/src/dynamics/se/dycore/coordinate_systems_mod.F90 @@ -1,7 +1,7 @@ module coordinate_systems_mod use shr_kind_mod, only: r8=>shr_kind_r8 use cam_abortutils, only: endrun - use physconst, only: pi + use dynconst, only: pi ! WARNING: When using this class be sure that you know if the ! cubic coordinates are on the unit cube or the [-\pi/4,\pi/4] cube @@ -290,7 +290,7 @@ pure function cart_to_spherical(cart) result (sphere) if ( abs(abs(sphere%lat)-PI/2) >= DIST_THRESHOLD ) then sphere%lon=ATAN2(cart%y,cart%x) if (sphere%lon<0) then - sphere%lon=sphere%lon + 2*real(pi, r8) + sphere%lon=sphere%lon + 2._r8*pi end if end if @@ -565,7 +565,7 @@ pure function sphere2cubedsphere (sphere, face_no) result(cart) lat = sphere%lat lon = sphere%lon - twopi = 2.0_r8 * real(pi, r8) + twopi = 2.0_r8 * pi pi2 = pi * 0.5_r8 pi3 = pi * 1.5_r8 pi4 = pi * 0.25_r8 @@ -573,14 +573,14 @@ pure function sphere2cubedsphere (sphere, face_no) result(cart) select case (face_no) case (1) xp = lon - if (real(pi, r8) < lon) xp = lon - twopi !if lon in [0,2\pi] + if (pi < lon) xp = lon - twopi !if lon in [0,2\pi] yp = atan(tan(lat)/cos(xp)) case (2) xp = lon - pi2 yp = atan(tan(lat)/cos(xp)) case (3) - xp = lon - real(pi, r8) - if (lon < 0) xp = lon + real(pi, r8) !if lon in [0,2\pi] + xp = lon - pi + if (lon < 0) xp = lon + pi !if lon in [0,2\pi] yp = atan(tan(lat)/cos(xp)) case (4) xp = lon - pi3 diff --git a/src/dynamics/se/dycore/cube_mod.F90 b/src/dynamics/se/dycore/cube_mod.F90 index edad65cf..968f723c 100644 --- a/src/dynamics/se/dycore/cube_mod.F90 +++ b/src/dynamics/se/dycore/cube_mod.F90 @@ -4,7 +4,7 @@ module cube_mod projectpoint, cubedsphere2cart, spherical_to_cart, sphere_tri_area,dist_threshold, & change_coordinates - use physconst, only: pi, rearth + use dynconst, only: pi, rearth use control_mod, only: hypervis_scaling, cubed_sphere_map use cam_abortutils, only: endrun, check_allocate @@ -15,10 +15,10 @@ module cube_mod integer,public, parameter :: nInnerElemEdge = 8 ! number of edges for an interior element integer,public, parameter :: nCornerElemEdge = 4 ! number of corner elements - real(kind=r8), public, parameter :: cube_xstart = -0.25_R8*real(pi, r8) - real(kind=r8), public, parameter :: cube_xend = 0.25_R8*real(pi, r8) - real(kind=r8), public, parameter :: cube_ystart = -0.25_R8*real(pi, r8) - real(kind=r8), public, parameter :: cube_yend = 0.25_R8*real(pi, r8) + real(kind=r8), public, parameter :: cube_xstart = -0.25_R8*pi + real(kind=r8), public, parameter :: cube_xend = 0.25_R8*pi + real(kind=r8), public, parameter :: cube_ystart = -0.25_R8*pi + real(kind=r8), public, parameter :: cube_yend = 0.25_R8*pi type, public :: face_t @@ -221,7 +221,7 @@ end subroutine elem_jacobians subroutine metric_atomic(elem,gll_points,alpha) use element_mod, only: element_t use dimensions_mod, only: np - use physconst, only: ra + use dynconst, only: ra type (element_t), intent(inout) :: elem real(r8), intent(in) :: alpha @@ -420,8 +420,8 @@ subroutine metric_atomic(elem,gll_points,alpha) DE(2,1)=sum(elem%D(i,j,2,:)*E(:,1)) DE(2,2)=sum(elem%D(i,j,2,:)*E(:,2)) - lamStar1=1/(eig(1)**(hypervis_scaling/4.0_r8)) *(real(rearth**2.0_r8, r8)) - lamStar2=1/(eig(2)**(hypervis_scaling/4.0_r8)) *(real(rearth**2.0_r8, r8)) + lamStar1=1/(eig(1)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) + lamStar2=1/(eig(2)**(hypervis_scaling/4.0_r8)) *(rearth**2.0_r8) !matrix (DE) * Lam^* * Lam , tensor HV when V is applied at each Laplace calculation ! DEL(1:2,1) = lamStar1*eig(1)*DE(1:2,1) @@ -453,8 +453,8 @@ subroutine metric_atomic(elem,gll_points,alpha) ! compute element length scales, based on SVDs, in km: - elem%dx_short = 1.0_r8/(max_svd*0.5_r8*dble(np-1)*real(ra, r8)*1000.0_r8) - elem%dx_long = 1.0_r8/(min_svd*0.5_r8*dble(np-1)*real(ra, r8)*1000.0_r8) + elem%dx_short = 1.0_r8/(max_svd*0.5_r8*real(np-1, r8)*ra*1000.0_r8) + elem%dx_long = 1.0_r8/(min_svd*0.5_r8*real(np-1, r8)*ra*1000.0_r8) ! optional noramlization: elem%D = elem%D * sqrt(alpha) @@ -824,7 +824,7 @@ end subroutine dmap_elementlocal subroutine coreolis_init_atomic(elem) use element_mod, only: element_t use dimensions_mod, only: np - use physconst, only: omega + use dynconst, only: omega type (element_t) :: elem @@ -833,7 +833,7 @@ subroutine coreolis_init_atomic(elem) integer :: i,j real (kind=r8) :: lat,lon,rangle - rangle = rotate_grid * real(pi, r8) / 180._r8 + rangle = rotate_grid * pi / 180._r8 do j=1,np do i=1,np if ( rotate_grid /= 0) then diff --git a/src/dynamics/se/dycore/derivative_mod.F90 b/src/dynamics/se/dycore/derivative_mod.F90 index 5e583415..433a62c5 100644 --- a/src/dynamics/se/dycore/derivative_mod.F90 +++ b/src/dynamics/se/dycore/derivative_mod.F90 @@ -1,10 +1,10 @@ module derivative_mod - use shr_kind_mod, only: r8=>shr_kind_r8 - use cam_abortutils, only: endrun, check_allocate + use shr_kind_mod, only: r8=>shr_kind_r8 + use cam_abortutils, only: endrun, check_allocate use dimensions_mod, only : np, nc, npdg, nelemd, nlev use quadrature_mod, only : quadrature_t, gauss, gausslobatto,legendre, jacobi ! needed for spherical differential operators: - use physconst, only: ra + use dynconst, only: ra use element_mod, only : element_t use control_mod, only : hypervis_scaling, hypervis_power use perf_mod, only : t_startf, t_stopf @@ -1043,15 +1043,15 @@ subroutine gradient_sphere(s,deriv,Dinv,ds) do j=1,np do l=1,np - dsdx00=0.0d0 - dsdy00=0.0d0 + dsdx00=0.0_r8 + dsdy00=0.0_r8 !DIR$ UNROLL(NP) do i=1,np dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) end do - v1(l ,j ) = dsdx00*real(ra, r8) - v2(j ,l ) = dsdy00*real(ra, r8) + v1(l ,j ) = dsdx00*ra + v2(j ,l ) = dsdy00*ra end do end do ! convert covarient to latlon @@ -1111,9 +1111,9 @@ function curl_sphere_wk_testcov(s,deriv,elem) result(ds) !DIR$ UNROLL(NP) do j=1,np ! phi(n)_y sum over second index, 1st index fixed at m - dscontra(m,n,1)=dscontra(m,n,1)-(elem%mp(m,j)*s(m,j)*deriv%Dvv(n,j) )*real(ra, r8) + dscontra(m,n,1)=dscontra(m,n,1)-(elem%mp(m,j)*s(m,j)*deriv%Dvv(n,j) )*ra ! phi(m)_x sum over first index, second index fixed at n - dscontra(m,n,2)=dscontra(m,n,2)+(elem%mp(j,n)*s(j,n)*deriv%Dvv(m,j) )*real(ra, r8) + dscontra(m,n,2)=dscontra(m,n,2)+(elem%mp(j,n)*s(j,n)*deriv%Dvv(m,j) )*ra enddo enddo enddo @@ -1175,12 +1175,12 @@ function gradient_sphere_wk_testcov(s,deriv,elem) result(ds) dscontra(m,n,1)=dscontra(m,n,1)-(& (elem%mp(j,n)*elem%metinv(m,n,1,1)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) ) +& (elem%mp(m,j)*elem%metinv(m,n,2,1)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) ) & - ) *real(ra, r8) + ) *ra dscontra(m,n,2)=dscontra(m,n,2)-(& (elem%mp(j,n)*elem%metinv(m,n,1,2)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) ) +& (elem%mp(m,j)*elem%metinv(m,n,2,2)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) ) & - ) *real(ra, r8) + ) *ra enddo enddo enddo @@ -1228,9 +1228,9 @@ function gradient_sphere_wk_testcontra(s,deriv,elem) result(ds) !DIR$ UNROLL(NP) do j=1,np ! phi(m)_x sum over first index, second index fixed at n - dscov(m,n,1)=dscov(m,n,1)-(elem%mp(j,n)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) )*real(ra, r8) + dscov(m,n,1)=dscov(m,n,1)-(elem%mp(j,n)*elem%metdet(m,n)*s(j,n)*deriv%Dvv(m,j) )*ra ! phi(n)_y sum over second index, 1st index fixed at m - dscov(m,n,2)=dscov(m,n,2)-(elem%mp(m,j)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) )*real(ra, r8) + dscov(m,n,2)=dscov(m,n,2)-(elem%mp(m,j)*elem%metdet(m,n)*s(m,j)*deriv%Dvv(n,j) )*ra enddo enddo enddo @@ -1311,15 +1311,15 @@ function curl_sphere(s,deriv,elem) result(ds) do j=1,np do l=1,np - dsdx00=0.0d0 - dsdy00=0.0d0 + dsdx00=0.0_r8 + dsdy00=0.0_r8 !DIR$ UNROLL(NP) do i=1,np dsdx00 = dsdx00 + deriv%Dvv(i,l )*s(i,j ) dsdy00 = dsdy00 + deriv%Dvv(i,l )*s(j ,i) end do - v2(l ,j ) = -dsdx00*real(ra, r8) - v1(j ,l ) = dsdy00*real(ra, r8) + v2(l ,j ) = -dsdx00*ra + v1(j ,l ) = dsdy00*ra end do end do ! convert contra -> latlon *and* divide by jacobian @@ -1379,7 +1379,7 @@ subroutine divergence_sphere_wk(v,deriv,elem,div) do j=1,np div(m,n)=div(m,n)-(elem%spheremp(j,n)*vtemp(j,n,1)*deriv%Dvv(m,j) & +elem%spheremp(m,j)*vtemp(m,j,2)*deriv%Dvv(n,j)) & - * real(ra, r8) + * ra enddo end do @@ -1418,22 +1418,22 @@ function element_boundary_integral(v,deriv,elem) result(result) result=0 j=1 do i=1,np - result(i,j)=result(i,j)-deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*real(ra, r8) + result(i,j)=result(i,j)-deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*ra enddo j=np do i=1,np - result(i,j)=result(i,j)+deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*real(ra, r8) + result(i,j)=result(i,j)+deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ucontra(i,j,2)*ra enddo i=1 do j=1,np - result(i,j)=result(i,j)-deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*real(ra, r8) + result(i,j)=result(i,j)-deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*ra enddo i=np do j=1,np - result(i,j)=result(i,j)+deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*real(ra, r8) + result(i,j)=result(i,j)+deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ucontra(i,j,1)*ra enddo end function element_boundary_integral @@ -1483,13 +1483,13 @@ function edge_flux_u_cg( v,p,pedges, deriv, elem, u_is_contra) result(result) j=1 pstar=p(i,j) if (ucontra(i,j,2)>0) pstar=pedges(i,0) - flux = -pstar*ucontra(i,j,2)*( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*real(ra, r8)) + flux = -pstar*ucontra(i,j,2)*( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ra) result(i,j)=result(i,j)+flux j=np pstar=p(i,j) if (ucontra(i,j,2)<0) pstar=pedges(i,np+1) - flux = pstar*ucontra(i,j,2)* ( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*real(ra, r8)) + flux = pstar*ucontra(i,j,2)* ( deriv%Mvv_twt(i,i)*elem%metdet(i,j)*ra) result(i,j)=result(i,j)+flux enddo @@ -1497,13 +1497,13 @@ function edge_flux_u_cg( v,p,pedges, deriv, elem, u_is_contra) result(result) i=1 pstar=p(i,j) if (ucontra(i,j,1)>0) pstar=pedges(0,j) - flux = -pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*real(ra, r8)) + flux = -pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ra) result(i,j)=result(i,j)+flux i=np pstar=p(i,j) if (ucontra(i,j,1)<0) pstar=pedges(np+1,j) - flux = pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*real(ra, r8)) + flux = pstar*ucontra(i,j,1)* ( deriv%Mvv_twt(j,j)*elem%metdet(i,j)*ra) result(i,j)=result(i,j)+flux end do @@ -1542,8 +1542,8 @@ subroutine vorticity_sphere(v,deriv,elem,vort) do j=1,np do l=1,np - dudy00=0.0d0 - dvdx00=0.0d0 + dudy00=0.0_r8 + dvdx00=0.0_r8 !DIR$ UNROLL(NP) do i=1,np @@ -1558,7 +1558,7 @@ subroutine vorticity_sphere(v,deriv,elem,vort) do j=1,np do i=1,np - vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*real(ra, r8)) + vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*ra) end do end do @@ -1598,8 +1598,8 @@ function vorticity_sphere_diag(v,deriv,elem) result(vort) do j=1,np do l=1,np - dudy00=0.0d0 - dvdx00=0.0d0 + dudy00=0.0_r8 + dvdx00=0.0_r8 !DIR$ UNROLL(NP) do i=1,np dvdx00 = dvdx00 + deriv%Dvv_diag(i,l)*vco(i,j ,2) @@ -1612,7 +1612,7 @@ function vorticity_sphere_diag(v,deriv,elem) result(vort) do j=1,np do i=1,np - vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*real(ra, r8)) + vort(i,j)=(vort(i,j)-vtemp(i,j))*(elem%rmetdet(i,j)*ra) end do end do @@ -1652,8 +1652,8 @@ subroutine divergence_sphere(v,deriv,elem,div) ! compute d/dx and d/dy do j=1,np do l=1,np - dudx00=0.0d0 - dvdy00=0.0d0 + dudx00=0.0_r8 + dvdy00=0.0_r8 !DIR$ UNROLL(NP) do i=1,np dudx00 = dudx00 + deriv%Dvv(i,l )*gv(i,j ,1) @@ -1666,7 +1666,7 @@ subroutine divergence_sphere(v,deriv,elem,div) do j=1,np do i=1,np - div(i,j)=(div(i,j)+vvtemp(i,j))*(elem%rmetdet(i,j)*real(ra, r8)) + div(i,j)=(div(i,j)+vvtemp(i,j))*(elem%rmetdet(i,j)*ra) end do end do @@ -1799,8 +1799,8 @@ subroutine vlaplace_sphere_wk_mol(v,deriv,elem,undamprrcart,mol_nu,laplace) do n=1,np do m=1,np ! add in correction so we dont damp rigid rotation - laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(real(ra**2, r8)) - laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(real(ra**2, r8)) + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) enddo enddo end if @@ -1846,8 +1846,8 @@ function vlaplace_sphere_wk_cartesian(v,deriv,elem,var_coef,undamprrcart) result if (undamprrcart) then ! add in correction so we dont damp rigid rotation - laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*(real(ra**2, r8)) - laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*(real(ra**2, r8)) + laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*(ra**2) + laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*(ra**2) end if end function vlaplace_sphere_wk_cartesian @@ -1897,8 +1897,8 @@ function vlaplace_sphere_wk_contra(v,deriv,elem,var_coef,undamprrcart,nu_ratio) do n=1,np do m=1,np ! add in correction so we dont damp rigid rotation - laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(real(ra**2, r8)) - laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(real(ra**2, r8)) + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) enddo enddo end if @@ -2095,10 +2095,10 @@ subroutine subcell_div_fluxes(u, p, n, metdet,fluxes) flux_l(:,:) = MATMUL(boundary_interp_matrix(:,1,:),lr) flux_r(:,:) = MATMUL(boundary_interp_matrix(:,2,:),lr) - fluxes(:,:,1) = -flux_b(:,:)*real(ra, r8) - fluxes(:,:,2) = flux_r(:,:)*real(ra, r8) - fluxes(:,:,3) = flux_t(:,:)*real(ra, r8) - fluxes(:,:,4) = -flux_l(:,:)*real(ra, r8) + fluxes(:,:,1) = -flux_b(:,:)*ra + fluxes(:,:,2) = flux_r(:,:)*ra + fluxes(:,:,3) = flux_t(:,:)*ra + fluxes(:,:,4) = -flux_l(:,:)*ra end subroutine subcell_div_fluxes diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 index 9c9aefb8..b827e2e3 100644 --- a/src/dynamics/se/dycore/fvm_mod.F90 +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -436,20 +436,20 @@ subroutine fvm_init1(par,elem) endif call endrun("stopping") end if - + if (nhe .ne. 1) then if (par%masterproc) then write(iulog,*) "PARAMETER ERROR for fvm: Number of halo zone for the extended" write(iulog,*) "element nhe has to be 1, only this is available now! STOP!" endif call endrun("stopping") - end if + end if end subroutine fvm_init1 - - - - - + + + + + ! initialization that can be done in threaded regions subroutine fvm_init2(elem,fvm,hybrid,nets,nete) use fvm_control_volume_mod, only: fvm_mesh,fvm_set_cubeboundary @@ -461,7 +461,7 @@ subroutine fvm_init2(elem,fvm,hybrid,nets,nete) use hycoef, only: hyai, hybi, ps0 use derivative_mod, only: subcell_integration use physconst, only: thermodynamic_active_species_num - + type (fvm_struct) :: fvm(:) type (element_t) :: elem(:) type (hybrid_t) :: hybrid diff --git a/src/dynamics/se/dycore/global_norms_mod.F90 b/src/dynamics/se/dycore/global_norms_mod.F90 index ade5dbcf..c170f8e8 100644 --- a/src/dynamics/se/dycore/global_norms_mod.F90 +++ b/src/dynamics/se/dycore/global_norms_mod.F90 @@ -3,7 +3,7 @@ module global_norms_mod use shr_kind_mod, only: r8=>shr_kind_r8 use cam_logfile, only: iulog use edgetype_mod, only: EdgeBuffer_t - use physconst, only: pi + use dynconst, only: pi implicit none private @@ -74,7 +74,7 @@ subroutine global_integrals(elem, h,hybrid,npts,num_flds,nets,nete,I_sphere) !JMD print *,'global_integral: before wrap_repro_sum' call wrap_repro_sum(nvars=num_flds, comm=hybrid%par%comm) !JMD print *,'global_integral: after wrap_repro_sum' - I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*real(pi, r8)) + I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*pi) end subroutine global_integrals subroutine global_integrals_general(h,hybrid,npts,da,num_flds,nets,nete,I_sphere) @@ -120,7 +120,7 @@ subroutine global_integrals_general(h,hybrid,npts,da,num_flds,nets,nete,I_sphere !JMD print *,'global_integral: before wrap_repro_sum' call wrap_repro_sum(nvars=num_flds, comm=hybrid%par%comm) !JMD print *,'global_integral: after wrap_repro_sum' - I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*real(pi, r8)) + I_sphere(:) =global_shared_sum(1:num_flds) /(4.0_r8*pi) end subroutine global_integrals_general @@ -179,7 +179,7 @@ function global_integral(elem, h,hybrid,npts,nets,nete) result(I_sphere) !JMD print *,'global_integral: after wrap_repro_sum' I_tmp = global_shared_sum(1) !JMD print *,'global_integral: after global_shared_sum' - I_sphere = I_tmp(1)/(4.0_r8*real(pi, r8)) + I_sphere = I_tmp(1)/(4.0_r8*pi) end function global_integral @@ -211,7 +211,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& use quadrature_mod, only: gausslobatto, quadrature_t use reduction_mod, only: ParallelMin,ParallelMax - use physconst, only: ra, rearth, pi + use dynconst, only: ra, rearth, cpair use control_mod, only: nu, nu_div, nu_q, nu_p, nu_s, nu_top, fine_ne, rk_stage_user, max_hypervis_courant use control_mod, only: tstep_type, hypervis_power, hypervis_scaling use cam_abortutils, only: endrun @@ -221,7 +221,6 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& use time_mod, only: tstep use mesh_mod, only: MeshUseMeshFile use dimensions_mod, only: ksponge_end, kmvis_ref, kmcnd_ref,rho_ref - use physconst, only: cpair type(element_t) , intent(inout) :: elem(:) integer , intent(in) :: nets,nete @@ -359,7 +358,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f9.3)') 'Element area: max/min',(max_area/min_area) if (.not.MeshUseMeshFile) then write(iulog,'(a,f6.3,f8.2)') "Average equatorial node spacing (deg, km) = ", & - dble(90)/dble(ne*(np-1)), real(pi, r8)*real(rearth, r8)/(2000.0_r8*dble(ne*(np-1))) + real(90, r8)/real(ne*(np-1), r8), pi*rearth/(2000.0_r8*real(ne*(np-1), r8)) end if write(iulog,'(a,2f9.3)') 'norm of Dinv (min, max): ', min_normDinv, max_normDinv write(iulog,'(a,1f8.2)') 'Max Dinv-based element distortion: ', max_ratio @@ -415,12 +414,12 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& ! dx_long elem(ie)%variable_hyperviscosity = sqrt((elem(ie)%dx_long/max_unif_dx) ** hypervis_power) elem(ie)%hv_courant = dtnu*(elem(ie)%variable_hyperviscosity(1,1)**2) * & - (lambda_vis**2) * ((real(ra, r8)*elem(ie)%normDinv)**4) + (lambda_vis**2) * ((ra*elem(ie)%normDinv)**4) ! Check to see if this is stable if (elem(ie)%hv_courant.gt.max_hypervis_courant) then stable_hv = sqrt( max_hypervis_courant / & - ( dtnu * (lambda_vis)**2 * (real(ra, r8)*elem(ie)%normDinv)**4 ) ) + ( dtnu * (lambda_vis)**2 * (ra*elem(ie)%normDinv)**4 ) ) #if 0 ! Useful print statements for debugging the adjustments to hypervis @@ -432,7 +431,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& #endif ! make sure that: elem(ie)%hv_courant <= max_hypervis_courant elem(ie)%variable_hyperviscosity = stable_hv - elem(ie)%hv_courant = dtnu*(stable_hv**2) * (lambda_vis)**2 * (real(ra, r8)*elem(ie)%normDinv)**4 + elem(ie)%hv_courant = dtnu*(stable_hv**2) * (lambda_vis)**2 * (ra*elem(ie)%normDinv)**4 end if normDinv_hypervis = max(normDinv_hypervis, elem(ie)%hv_courant/dtnu) @@ -445,9 +444,9 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& max_hypervis = ParallelMax(max_hypervis, hybrid) call wrap_repro_sum(nvars=1, comm=hybrid%par%comm) avg_hypervis = global_shared_sum(1)/dble(nelem) - + normDinv_hypervis = ParallelMax(normDinv_hypervis, hybrid) - + ! apply DSS (aka assembly procedure) to variable_hyperviscosity (makes continuous) call initEdgeBuffer(hybrid%par,edgebuf,elem,1) do ie=nets,nete @@ -488,7 +487,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& (lambda**(-hypervis_scaling/2) ) else ! constant coefficient formula: - normDinv_hypervis = (lambda_vis**2) * (real(ra, r8)*max_normDinv)**4 + normDinv_hypervis = (lambda_vis**2) * (ra*max_normDinv)**4 endif !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -622,14 +621,14 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& end if ugw = 342.0_r8 !max gravity wave speed - dt_max_adv = S_rk/(umax*max_normDinv*lambda_max*real(ra, r8)) - dt_max_gw = S_rk/(ugw*max_normDinv*lambda_max*real(ra, r8)) - dt_max_tracer_se = S_rk_tracer*min_gw/(umax*max_normDinv*real(ra, r8)) + dt_max_adv = S_rk/(umax*max_normDinv*lambda_max*ra) + dt_max_gw = S_rk/(ugw*max_normDinv*lambda_max*ra) + dt_max_tracer_se = S_rk_tracer*min_gw/(umax*max_normDinv*ra) if (ntrac>0) then if (large_Courant_incr) then - dt_max_tracer_fvm = dble(nhe)*(4.0_r8*real(pi, r8)*real(Rearth, r8)/dble(4.0_r8*ne*nc))/umax + dt_max_tracer_fvm = real(nhe, r8)*(4.0_r8*pi*real(Rearth, r8)/real(4.0_r8*ne*nc, r8))/umax else - dt_max_tracer_fvm = dble(nhe)*(2.0_r8*real(pi, r8)*real(Rearth, r8)/dble(4.0_r8*ne*nc))/umax + dt_max_tracer_fvm = real(nhe, r8)*(2.0_r8*pi*real(Rearth, r8)/real(4.0_r8*ne*nc, r8))/umax end if else dt_max_tracer_fvm = -1.0_r8 @@ -638,8 +637,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& dt_max_hypervis_tracer = s_hypervis/(nu_q*normDinv_hypervis) max_laplace = MAX(MAXVAL(nu_scale_top(:))*nu_top,MAXVAL(kmvis_ref(:)/rho_ref(:))) - max_laplace = MAX(max_laplace,MAXVAL(kmcnd_ref(:)/(real(cpair, r8)*rho_ref(:)))) - dt_max_laplacian_top = 1.0_r8/(max_laplace*((real(ra, r8)*max_normDinv)**2)*lambda_vis) + max_laplace = MAX(max_laplace,MAXVAL(kmcnd_ref(:)/(cpair*rho_ref(:)))) + dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) if (hybrid%masterthread) then write(iulog,'(a,f10.2,a)') ' ' @@ -669,8 +668,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f10.2)') '* dt_remap (vertical remap dt) ',dt_remap_actual do k=1,ksponge_end max_laplace = MAX(nu_scale_top(k)*nu_top,kmvis_ref(k)/rho_ref(k)) - max_laplace = MAX(max_laplace,kmcnd_ref(k)/(real(cpair, r8)*rho_ref(k))) - dt_max_laplacian_top = 1.0_r8/(max_laplace*((real(ra, r8)*max_normDinv)**2)*lambda_vis) + max_laplace = MAX(max_laplace,kmcnd_ref(k)/(cpair*rho_ref(k))) + dt_max_laplacian_top = 1.0_r8/(max_laplace*((ra*max_normDinv)**2)*lambda_vis) write(iulog,'(a,f10.2,a,f10.2,a)') '* dt (del2 sponge ; u,v,T,dM) < ',& dt_max_laplacian_top,'s',dt_dyn_del2_actual,'s' @@ -1068,7 +1067,7 @@ subroutine wrap_repro_sum (nvars, comm, nsize) end subroutine wrap_repro_sum subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu,factor,str) - use physconst, only: rearth + use dynconst, only: rearth use control_mod, only: hypervis_scaling,hypervis_power use hybrid_mod, only: hybrid_t use cam_abortutils, only: endrun @@ -1101,7 +1100,7 @@ subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min ! ! grid spacing in meters = max_min_dx*1000.0_r8 ! - nu_fac = (real(rearth, r8)/6.37122E6_r8)*1.0E15_r8/(110000.0_r8**uniform_res_hypervis_scaling) + nu_fac = (rearth/6.37122E6_r8)*1.0E15_r8/(110000.0_r8**uniform_res_hypervis_scaling) if (nu < 0) then if (ne <= 0) then @@ -1114,7 +1113,7 @@ subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min write(iulog,'(a,2e9.2,a,2f9.2)') "Value at min/max grid spacing: ",nu_min,nu_max,& " Max/min grid spacing (km) = ",max_min_dx,min_min_dx end if - nu = nu_min*(2.0_r8*real(rearth, r8)/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(real(rearth**4, r8)) + nu = nu_min*(2.0_r8*rearth/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(rearth**4) if (hybrid%masterthread) & write(iulog,'(a,a,a,e9.3)') "Nu_tensor",TRIM(str)," = ",nu else if (hypervis_power/=0) then diff --git a/src/dynamics/se/dycore/interpolate_mod.F90 b/src/dynamics/se/dycore/interpolate_mod.F90 index 19da3ea2..10f69c8f 100644 --- a/src/dynamics/se/dycore/interpolate_mod.F90 +++ b/src/dynamics/se/dycore/interpolate_mod.F90 @@ -7,7 +7,7 @@ module interpolate_mod use coordinate_systems_mod, only: spherical_polar_t, cartesian2d_t, & cartesian3D_t, sphere2cubedsphere, spherical_to_cart, & cubedsphere2cart, distance, change_coordinates, projectpoint - use physconst, only: PI + use dynconst, only: PI use quadrature_mod, only: quadrature_t, gauss, gausslobatto use parallel_mod, only: syncmp, parallel_t use cam_abortutils, only: endrun, check_allocate @@ -187,12 +187,12 @@ function get_interp_gweight() result(gw) end function get_interp_gweight function get_interp_lat() result(thislat) real(kind=r8) :: thislat(nlat) - thislat=lat*180.0_r8/real(PI, r8) + thislat=lat*180.0_r8/PI return end function get_interp_lat function get_interp_lon() result(thislon) real(kind=r8) :: thislon(nlon) - thislon=lon*180.0_r8/real(PI, r8) + thislon=lon*180.0_r8/PI return end function get_interp_lon @@ -1012,10 +1012,10 @@ subroutine cube_facepoint_ne(sphere, ne, cart, number) yp = cube%y ! MNL: for uniform grids (on cube face), analytic solution is fine - x1 = xp + 0.25_r8*real(PI, r8) - x2 = yp + 0.25_r8*real(PI, r8) + x1 = xp + 0.25_r8*PI + x2 = yp + 0.25_r8*PI - dx = (0.5_r8*real(PI, r8))/ne + dx = (0.5_r8*PI)/ne ie = INT(ABS(x1)/dx) je = INT(ABS(x2)/dx) ! if we are exactly on an element edge, we can put the point in diff --git a/src/dynamics/se/dycore/mesh_mod.F90 b/src/dynamics/se/dycore/mesh_mod.F90 index 6147cdce..2db1ad8b 100644 --- a/src/dynamics/se/dycore/mesh_mod.F90 +++ b/src/dynamics/se/dycore/mesh_mod.F90 @@ -1,7 +1,7 @@ module mesh_mod use shr_kind_mod, only: r8=>shr_kind_r8 - use physconst, only: PI + use dynconst, only: PI use control_mod, only: MAX_FILE_LEN use cam_abortutils, only: endrun, check_allocate @@ -667,7 +667,7 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) call endrun('initialize_space_filling_curve: Unreasonably small element found. less than .00001') end if - ne = CEILING(0.5_r8*real(PI, r8)/(h/2)); + ne = CEILING(0.5_r8*PI/(h/2)); ! find the smallest ne2 which is a power of 2 and ne2>ne ne2=2**ceiling( log(real(ne))/log(2._r8) ) @@ -712,8 +712,8 @@ subroutine initialize_space_filling_curve(GridVertex, element_nodes) y = centroids(i,2) ! map this element to an (i2,j2) element ! [ -PI/4, PI/4 ] -> [ 0, ne2 ] - i2=nint( (0.5_r8 + 2.0_r8*x/real(PI, r8))*ne2 + 0.5_r8 ) - j2=nint( (0.5_r8 + 2.0_r8*y/real(PI, r8))*ne2 + 0.5_r8 ) + i2=nint( (0.5_r8 + 2.0_r8*x/PI)*ne2 + 0.5_r8 ) + j2=nint( (0.5_r8 + 2.0_r8*y/PI)*ne2 + 0.5_r8 ) if (face == 4 .or. face == 6 ) i2 = ne2-i2+1 if (face == 1 .or. face == 2 .or. face == 6) j2 = ne2-j2+1 if (i2<1 ) i2=1 diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 index 1db6dc8c..4d75ec2e 100644 --- a/src/dynamics/se/dycore/prim_advance_mod.F90 +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -54,11 +54,11 @@ subroutine prim_advance_init(par, elem) end subroutine prim_advance_init subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, nete) - use ccpp_kinds, only: kind_phys - use physconst, only: get_cp, thermodynamic_active_species_num - use physconst, only: get_kappa_dry, dry_air_species_num + use physconst, only: thermodynamic_active_species_num + use physconst, only: dry_air_species_num use physconst, only: thermodynamic_active_species_idx_dycore - use physconst, only: cpair + use dynconst, only: cpair + use dyn_thermo, only: get_cp, get_kappa_dry !SE dycore: use control_mod, only: tstep_type, qsplit @@ -92,7 +92,7 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net integer :: qidx(thermodynamic_active_species_num) real (kind=r8) :: kappa(np,np,nlev,nets:nete) - real (kind=kind_phys) :: inv_cp_full(np,np,nlev,nets:nete) + real (kind=r8) :: inv_cp_full(np,np,nlev,nets:nete) call t_startf('prim_advance_exp') nm1 = tl%nm1 @@ -149,12 +149,12 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net if (lcp_moist) then do ie=nets,nete call get_cp(1,np,1,np,1,nlev,thermodynamic_active_species_num,& - real(qwater(:,:,:,:,ie), kind_phys), & + qwater(:,:,:,:,ie), & .true.,inv_cp_full(:,:,:,ie),active_species_idx_dycore=qidx) end do else do ie=nets,nete - inv_cp_full(:,:,:,ie) = 1.0_kind_phys/cpair + inv_cp_full(:,:,:,ie) = 1.0_r8/cpair end do end if do ie=nets,nete @@ -323,7 +323,8 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu use element_mod, only: element_t use control_mod, only: ftype, ftype_conserve use fvm_control_volume_mod, only: fvm_struct - use physconst, only: get_dp, thermodynamic_active_species_idx_dycore + use physconst, only: thermodynamic_active_species_idx_dycore + use dyn_thermo, only: get_dp type (element_t) , intent(inout) :: elem(:) type(fvm_struct) , intent(inout) :: fvm(:) real (kind=r8), intent(in) :: dt_dribble, dt_phys @@ -500,7 +501,8 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, ! For correct scaling, dt2 should be the same 'dt2' used in the leapfrog advace ! ! - use physconst, only: gravit, cappa, cpair, tref, lapse_rate, get_dp_ref + use dynconst, only: gravit, cappa, cpair, tref, lapse_rate + use dyn_thermo, only: get_dp_ref use dimensions_mod, only: np, nlev, nc, ntrac, npsq, qsize use dimensions_mod, only: hypervis_dynamic_ref_state,ksponge_end use dimensions_mod, only: nu_scale_top,nu_lev,kmvis_ref,kmcnd_ref,rho_ref,km_sponge_factor @@ -517,8 +519,8 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, use viscosity_mod, only: biharmonic_wk_dp3d use hybvcoord_mod, only: hvcoord_t use fvm_control_volume_mod, only: fvm_struct - use physconst, only: thermodynamic_active_species_idx_dycore - use physconst, only: get_molecular_diff_coef,get_rho_dry + use physconst, only: thermodynamic_active_species_idx_dycore + use dyn_thermo, only: get_molecular_diff_coef,get_rho_dry !Un-comment once history output has been resolved in CAMDEN -JN: ! use cam_history, only: outfld, hist_fld_active @@ -589,14 +591,14 @@ subroutine advance_hypervis_dp(edge3,elem,fvm,hybrid,deriv,nt,qn0,nets,nete,dt2, ! T1 = .0065*Tref*Cp/g ! = ~191 ! T0 = Tref-T1 ! = ~97 ! - T1 = real(lapse_rate*Tref*cpair/gravit, r8) + T1 = lapse_rate*Tref*cpair/gravit T0 = Tref-T1 do ie=nets,nete do k=1,nlev dp3d_ref(:,:,k,ie) = ((hvcoord%hyai(k+1)-hvcoord%hyai(k))*hvcoord%ps0 + & (hvcoord%hybi(k+1)-hvcoord%hybi(k))*ps_ref(:,:,ie)) tmp = hvcoord%hyam(k)*hvcoord%ps0+hvcoord%hybm(k)*ps_ref(:,:,ie) - tmp2 = (tmp/hvcoord%ps0)**real(cappa, r8) + tmp2 = (tmp/hvcoord%ps0)**cappa T_ref(:,:,k,ie) = (T0+T1*tmp2) end do end do @@ -1141,11 +1143,12 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& ! allows us to fuse these two loops for more cache reuse ! ! =================================== - use ccpp_kinds, only: kind_phys - use physconst, only: get_gz_given_dp_Tv_Rdry - use physconst, only: thermodynamic_active_species_num, get_virtual_temp, get_cp_dry - use physconst, only: thermodynamic_active_species_idx_dycore,get_R_dry - use physconst, only: dry_air_species_num,get_exner + use physconst, only: thermodynamic_active_species_num + use physconst, only: thermodynamic_active_species_idx_dycore + use physconst, only: dry_air_species_num + use dyn_thermo, only: get_gz_given_dp_Tv_Rdry + use dyn_thermo, only: get_virtual_temp, get_cp_dry + use dyn_thermo, only: get_R_dry, get_exner !SE dycore: use dimensions_mod, only: np, nc, nlev, ntrac, ksponge_end @@ -1205,7 +1208,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& real (kind=r8), dimension(np,np,2) :: grad_exner real (kind=r8), dimension(np,np) :: theta_v - real (kind=kind_phys), dimension(np,np,nlev) :: cp_dry + real (kind=r8), dimension(np,np,nlev) :: cp_dry type (EdgeDescriptor_t):: desc @@ -1228,7 +1231,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& call get_R_dry(1,np,1,np,1,nlev,1,nlev,thermodynamic_active_species_num,& qwater(:,:,:,:,ie),qidx,R_dry) call get_cp_dry(1,np,1,np,1,nlev,1,nlev,thermodynamic_active_species_num,& - real(qwater(:,:,:,:,ie),kind_phys),qidx,cp_dry) + qwater(:,:,:,:,ie),qidx,cp_dry) do k=1,nlev dp_dry(:,:,k) = elem(ie)%state%dp3d(:,:,k,n0) @@ -1351,7 +1354,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,& call gradient_sphere(Ephi(:,:),deriv,elem(ie)%Dinv,vtemp) density_inv(:,:) = R_dry(:,:,k)*T_v(:,:,k)/p_full(:,:,k) - if (dry_air_species_num==0) then + if (dry_air_species_num==0) then exner(:,:)=(p_full(:,:,k)/hvcoord%ps0)**kappa(:,:,k,ie) theta_v(:,:)=T_v(:,:,k)/exner(:,:) call gradient_sphere(exner(:,:),deriv,elem(ie)%Dinv,grad_exner) @@ -1590,9 +1593,8 @@ subroutine distribute_flux_at_corners(cflux, corners, getmapP) end subroutine distribute_flux_at_corners subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suffix) - use ccpp_kinds, only: kind_phys - use physconst, only: gravit, cpair, rearth, omega - use physconst, only: get_dp, get_cp + use dynconst, only: gravit, cpair, rearth, omega + use dyn_thermo, only: get_dp, get_cp use physconst, only: thermodynamic_active_species_idx_dycore use hycoef, only: hyai, ps0 use string_utils, only: strlist_get_ind @@ -1632,7 +1634,7 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf real(kind=r8) :: mo(npsq) ! mass AAM real(kind=r8) :: mr_cnst, mo_cnst, cos_lat, mr_tmp, mo_tmp - real(kind=kind_phys) :: cp(np,np,nlev) + real(kind=r8) :: cp(np,np,nlev) integer :: ie,i,j,k integer :: ixwv,ixcldice, ixcldliq, ixtt ! CLDICE, CLDLIQ and test tracer indices @@ -1674,8 +1676,8 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf ke = 0.0_r8 call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),2,thermodynamic_active_species_idx_dycore,& elem(ie)%state%dp3d(:,:,:,tl),pdel,ps=ps,ptop=hyai(1)*ps0) - call get_cp(1,np,1,np,1,nlev,qsize,real(elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp), kind_phys),& - .false.,cp,dp_dry=real(elem(ie)%state%dp3d(:,:,:,tl), kind_phys),& + call get_cp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,tl_qdp),& + .false.,cp,dp_dry=elem(ie)%state%dp3d(:,:,:,tl),& active_species_idx_dycore=thermodynamic_active_species_idx_dycore) do k = 1, nlev do j=1,np @@ -1683,14 +1685,14 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf ! ! kinetic energy ! - ke_tmp = 0.5_r8*(elem(ie)%state%v(i,j,1,k,tl)**2+ elem(ie)%state%v(i,j,2,k,tl)**2)*pdel(i,j,k)/real(gravit, r8) + ke_tmp = 0.5_r8*(elem(ie)%state%v(i,j,1,k,tl)**2+ elem(ie)%state%v(i,j,2,k,tl)**2)*pdel(i,j,k)/gravit if (lcp_moist) then - se_tmp = real(cp(i,j,k), r8)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/real(gravit, r8) + se_tmp = cp(i,j,k)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/gravit else ! ! using CAM physics definition of internal energy ! - se_tmp = real(cpair, r8)*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/real(gravit, r8) + se_tmp = cpair*elem(ie)%state%T(i,j,k,tl)*pdel(i,j,k)/gravit end if se (i+(j-1)*np) = se (i+(j-1)*np) + se_tmp ke (i+(j-1)*np) = ke (i+(j-1)*np) + ke_tmp @@ -1700,7 +1702,7 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf do j=1,np do i = 1, np - se(i+(j-1)*np) = se(i+(j-1)*np) + elem(ie)%state%phis(i,j)*ps(i,j)/real(gravit, r8) + se(i+(j-1)*np) = se(i+(j-1)*np) + elem(ie)%state%phis(i,j)*ps(i,j)/gravit end do end do ! @@ -1755,8 +1757,8 @@ subroutine calc_tot_energy_dynamics(elem,fvm,nets,nete,tl,tl_qdp,outfld_name_suf if ( hist_fld_active(name_out1).or.hist_fld_active(name_out2)) then call strlist_get_ind(cnst_name_gll, 'CLDLIQ', ixcldliq, abort=.false.) call strlist_get_ind(cnst_name_gll, 'CLDICE', ixcldice, abort=.false.) - mr_cnst = real(rearth**3/gravit, r8) - mo_cnst = real(omega*rearth**4/gravit, r8) + mr_cnst = rearth**3/gravit + mo_cnst = omega*rearth**4/gravit do ie=nets,nete mr = 0.0_r8 mo = 0.0_r8 @@ -1830,7 +1832,7 @@ end subroutine output_qdp_var_dynamics ! column integrate mass-variable and outfld ! subroutine util_function(f_in,nx,nz,name_out,ie) - use physconst, only: gravit + use dynconst, only: gravit !Un-comment once history outputs are enabled -JN: ! use cam_history, only: outfld, hist_fld_active integer, intent(in) :: nx,nz,ie @@ -1843,7 +1845,7 @@ subroutine util_function(f_in,nx,nz,name_out,ie) #if 0 if (hist_fld_active(name_out)) then f_out = 0.0_r8 - inv_g = 1.0_r8/real(gravit, r8) + inv_g = 1.0_r8/gravit do k = 1, nz do j = 1, nx do i = 1, nx @@ -1867,8 +1869,9 @@ subroutine compute_omega(hybrid,n0,qn0,elem,deriv,nets,nete,dt,hvcoord) use edge_mod, only : edgevpack, edgevunpack use bndry_mod, only : bndry_exchange use viscosity_mod, only: biharmonic_wk_omega - use physconst, only: thermodynamic_active_species_num, get_dp + use physconst, only: thermodynamic_active_species_num use physconst, only: thermodynamic_active_species_idx_dycore + use dyn_thermo, only: get_dp implicit none type (hybrid_t) , intent(in) :: hybrid type (element_t) , intent(inout), target :: elem(:) @@ -1983,8 +1986,8 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) ! Damping should then be applied to values relative to ! this reference. !======================================================================= - use hybvcoord_mod ,only: hvcoord_t - use physconst ,only: rair,cappa + use hybvcoord_mod , only: hvcoord_t + use dynconst, only: rair, cappa use element_mod, only: element_t use dimensions_mod, only: np,nlev use hybrid_mod, only: hybrid_t @@ -2040,7 +2043,7 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) ! Calculate (dry) geopotential values !-------------------------------------- - dPhi (:,:,:) = 0.5_r8*(real(rair, r8)*elem(ie)%state%T (:,:,:,nt) & + dPhi (:,:,:) = 0.5_r8*(rair*elem(ie)%state%T (:,:,:,nt) & *elem(ie)%state%dp3d(:,:,:,nt) & /P_val(:,:,:) ) Phi_val (:,:,nlev) = elem(ie)%state%phis(:,:) + dPhi(:,:,nlev) @@ -2068,7 +2071,7 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) Phis_avg(:,:,ie) = E_phis/E_Awgt do kk=1,nlev Phi_avg(:,:,kk,ie) = E_phi(kk) /E_Awgt - RT_avg (:,:,kk,ie) = E_T (kk)*real(rair, r8)/E_Awgt + RT_avg (:,:,kk,ie) = E_T (kk)*rair/E_Awgt end do end do ! ie=nets,nete @@ -2140,7 +2143,7 @@ subroutine calc_dp3d_reference(elem,edge3,hybrid,nets,nete,nt,hvcoord,dp3d_ref) if(.FALSE.) then ! DRY ADIABATIC laspe rate !------------------------------ - RT_lapse(:,:) = -1._r8*real(cappa, r8) + RT_lapse(:,:) = -1._r8*cappa else ! ENVIRONMENTAL (empirical) laspe rate !-------------------------------------- @@ -2283,7 +2286,7 @@ end subroutine rayleigh_friction subroutine solve_diffusion(dt,nx,nlev,i,j,nlay,pmid,pint,km,fld,boundary_condition,dfld) - use physconst, only: gravit + use dynconst, only: gravit real(kind=r8), intent(in) :: dt integer , intent(in) :: nlay, nlev,nx, i, j real(kind=r8), intent(in) :: pmid(nx,nx,nlay),pint(nx,nx,nlay+1),km(nx,nx,nlay+1) @@ -2305,21 +2308,21 @@ subroutine solve_diffusion(dt,nx,nlev,i,j,nlay,pmid,pint,km,fld,boundary_conditi else if (boundary_condition==1) then value_level0 = 0.75_r8*fld(i,j,1) ! value above sponge k=1 - alp = dt*(km(i,j,k+1)*real(gravit*gravit, r8)/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) - alm = dt*(km(i,j,k )*real(gravit*gravit, r8)/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) + alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*gravit*gravit/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) next_iterate(k) = (fld(i,j,k) + alp * current_guess(k+1) + alm * value_level0)/(1._r8 + alp + alm) else ! ! set fld'=0 at model top ! k=1 - alp = dt*(km(i,j,k+1)*real(gravit*gravit, r8)/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) - alm = dt*(km(i,j,k )*real(gravit*gravit, r8)/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) + alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k)-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*gravit*gravit/(0.5_r8*(pmid(i,j,1)-pmid(i,j,2))))/(pint(i,j,k)-pint(i,j,k+1)) next_iterate(k) = (fld(i,j,1) + alp * current_guess(2) + alm * current_guess(1))/(1._r8 + alp + alm) end if do k = 2, nlay-1 - alp = dt*(km(i,j,k+1)*real(gravit*gravit, r8)/(pmid(i,j,k )-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) - alm = dt*(km(i,j,k )*real(gravit*gravit, r8)/(pmid(i,j,k-1)-pmid(i,j,k )))/(pint(i,j,k)-pint(i,j,k+1)) + alp = dt*(km(i,j,k+1)*gravit*gravit/(pmid(i,j,k )-pmid(i,j,k+1)))/(pint(i,j,k)-pint(i,j,k+1)) + alm = dt*(km(i,j,k )*gravit*gravit/(pmid(i,j,k-1)-pmid(i,j,k )))/(pint(i,j,k)-pint(i,j,k+1)) next_iterate(k) = (fld(i,j,k) + alp * current_guess(k+1) + alm * current_guess(k-1))/(1._r8 + alp + alm) end do next_iterate(nlay) = (fld(i,j,nlay) + alp * fld(i,j,nlay) + alm * current_guess(nlay-1))/(1._r8 + alp + alm) ! bottom BC diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90 index c9a58afa..567ae3ac 100644 --- a/src/dynamics/se/dycore/prim_advection_mod.F90 +++ b/src/dynamics/se/dycore/prim_advection_mod.F90 @@ -958,7 +958,8 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) use dimensions_mod , only : ntrac use dimensions_mod, only : lcp_moist, kord_tr,kord_tr_cslam use cam_logfile, only : iulog - use physconst, only : pi,get_thermal_energy,get_dp,get_virtual_temp + use dynconst , only : pi + use dyn_thermo , only : get_thermal_energy, get_dp, get_virtual_temp use physconst , only : thermodynamic_active_species_idx_dycore use thread_mod , only : omp_set_nested use control_mod, only: vert_remap_uvTq_alg @@ -973,7 +974,7 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) real (kind=r8), dimension(np,np,nlev) :: dp_moist,dp_star_moist, dp_dry,dp_star_dry real (kind=r8), dimension(np,np,nlev) :: internal_energy_star real (kind=r8), dimension(np,np,nlev,2):: ttmp - real(r8), parameter :: rad2deg = 180.0_r8/real(pi, r8) + real(r8), parameter :: rad2deg = 180.0_r8/pi integer :: region_num_threads,qbeg,qend,kord_uvT(1) type (hybrid_t) :: hybridnew,hybridnew2 real (kind=r8) :: ptop @@ -1001,7 +1002,7 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) ! call get_virtual_temp(1,np,1,np,1,nlev,qsize,elem(ie)%state%qdp(:,:,:,1:qsize,np1_qdp), & internal_energy_star,dp_dry=elem(ie)%state%dp3d(:,:,:,np1), & - active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) internal_energy_star = internal_energy_star*elem(ie)%state%t(:,:,:,np1) end if ! @@ -1049,23 +1050,23 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) ! call get_dp(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,1:qsize,np1_qdp),2,& thermodynamic_active_species_idx_dycore,dp_dry,dp_moist(:,:,:)) - + ! ! Remapping of temperature ! - if (vert_remap_uvTq_alg>-20) then + if (vert_remap_uvTq_alg>-20) then ! ! remap internal energy and back out temperature - ! + ! if (lcp_moist) then call remap1(internal_energy_star,np,1,1,1,dp_star_dry,dp_dry,ptop,1,.true.,kord_uvT) ! ! compute sum c^(l)_p*m^(l)*dp on arrival (Eulerian) grid - ! + ! ttmp(:,:,:,1) = 1.0_r8 call get_thermal_energy(1,np,1,np,1,nlev,qsize,elem(ie)%state%qdp(:,:,:,1:qsize,np1_qdp), & ttmp(:,:,:,1),dp_dry,ttmp(:,:,:,2), & - active_species_idx_dycore=thermodynamic_active_species_idx_dycore) + active_species_idx_dycore=thermodynamic_active_species_idx_dycore) elem(ie)%state%t(:,:,:,np1)=internal_energy_star/ttmp(:,:,:,2) else internal_energy_star(:,:,:)=elem(ie)%state%t(:,:,:,np1)*dp_star_moist @@ -1091,13 +1092,13 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete) call remap1(elem(ie)%state%v(:,:,1,:,np1),np,1,1,1,dp_star_moist,dp_moist,ptop,-1,.false.,kord_uvT) call remap1(elem(ie)%state%v(:,:,2,:,np1),np,1,1,1,dp_star_moist,dp_moist,ptop,-1,.false.,kord_uvT) enddo - + if (ntrac>0) then ! ! vertical remapping of CSLAM tracers ! do ie=nets,nete - dpc_star=fvm(ie)%dp_fvm(1:nc,1:nc,:) + dpc_star=fvm(ie)%dp_fvm(1:nc,1:nc,:) do k=1,nlev do j=1,nc do i=1,nc diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index cceba34f..b0e8e425 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -50,7 +50,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use spacecurve_mod, only: genspacepart use dof_mod, only: global_dof, CreateUniqueIndex, SetElemOffset use params_mod, only: SFCURVE - use physconst, only: pi + use dynconst, only: pi use reduction_mod, only: red_min, red_max, red_max_int, red_flops use reduction_mod, only: red_sum, red_sum_int, initreductionbuffer use shr_reprosum_mod, only: repro_sum => shr_reprosum_calc @@ -284,7 +284,7 @@ subroutine prim_init1(elem, fvm, par, Tl) aratio(ie,1) = sum(elem(ie)%mp(:,:)*elem(ie)%metdet(:,:)) end do call repro_sum(aratio, area, nelemd, nelemd, 1, commid=par%comm) - area(1) = 4.0_r8*real(pi, r8)/area(1) ! ratio correction + area(1) = 4.0_r8*pi/area(1) ! ratio correction deallocate(aratio) if (par%masterproc) then write(iulog,'(2a,f20.17)') subname, "re-initializing cube elements: area correction=", area(1) diff --git a/src/dynamics/se/dycore/quadrature_mod.F90 b/src/dynamics/se/dycore/quadrature_mod.F90 index 134d7918..47954f33 100644 --- a/src/dynamics/se/dycore/quadrature_mod.F90 +++ b/src/dynamics/se/dycore/quadrature_mod.F90 @@ -104,7 +104,7 @@ end function gauss_wts ! ============================================================== function gauss_pts(np1) result(pts) - use physconst, only: pi + use dynconst, only: pi integer, intent(in) :: np1 ! Number of velocity grid points real (kind=r8) :: pts(np1) @@ -156,7 +156,7 @@ function gauss_pts(np1) result(pts) ! Compute first half of the roots by "polynomial deflation". ! ============================================================ - dth = real(pi, r8)/(2*n+2) + dth = pi/(2._r8*real(n+2, r8)) nh = (n+1)/2 @@ -314,7 +314,7 @@ end function gausslobatto ! ============================================================== function gausslobatto_pts(np1) result(pts) - use physconst, only: pi + use dynconst, only: pi integer, intent(in) :: np1 ! Number of velocity grid points real (kind=r8) :: pts(np1) @@ -327,10 +327,10 @@ function gausslobatto_pts(np1) result(pts) real (kind=r8) :: jacm1(0:np1) real (kind=r8) :: djac(0:np1) - integer prec ! number of mantissa bits + integer prec ! number of mantissa bits real (kind=r8) eps ! machine epsilon - real (kind=r8), parameter :: convthresh = 10 ! convergence threshold relative - ! to machine epsilon + real (kind=r8), parameter :: convthresh = 10 ! convergence threshold relative + ! to machine epsilon integer, parameter :: kstop = 30 ! max iterations for polynomial deflation real (kind=r8) :: a,b,det @@ -357,7 +357,7 @@ function gausslobatto_pts(np1) result(pts) ! ========================================================= ! compute machine precision and set the convergence - ! threshold thresh to 10 times that level + ! threshold thresh to 10 times that level ! ========================================================= prec = PRECISION(c10) @@ -376,7 +376,7 @@ function gausslobatto_pts(np1) result(pts) ! ============================================================ ! ============================================================ - ! compute the parameters in the polynomial whose + ! compute the parameters in the polynomial whose ! roots are desired... ! ============================================================ @@ -387,7 +387,7 @@ function gausslobatto_pts(np1) result(pts) a = -(jac(n+1)*jacm1(n-1)-jacm1(n+1)*jac(n-1))/det b = -(jac(n )*jacm1(n+1)-jacm1(n )*jac(n+1))/det - dth = real(pi, r8)/(2*n+1) + dth = pi/real(2*n+1, r8) cd = COS(c2*dth) sd = SIN(c2*dth) cs = COS(dth) @@ -396,7 +396,7 @@ function gausslobatto_pts(np1) result(pts) nh = (n+1)/2 do j=1,nh-1 - x=cs ! first guess at root + x=cs ! first guess at root k=0 delx=c1 do while(k thresh) @@ -422,16 +422,16 @@ function gausslobatto_pts(np1) result(pts) ! cs = cs(theta) => cs(theta+2*dth) ! ===================================================== - cstmp=cs*cd-ss*sd - ss=cs*sd+ss*cd - cs=cstmp + cstmp=cs*cd-ss*sd + ss=cs*sd+ss*cd + cs=cstmp end do ! ================================================ ! compute the second half of the roots by symmetry ! ================================================ - do j=1,nh + do j=1,nh xjac(n-j) = -xjac(j) end do @@ -439,7 +439,7 @@ function gausslobatto_pts(np1) result(pts) ! ==================================================== ! Reverse the sign of everything so that indexing - ! increases with position + ! increases with position ! ==================================================== do j=0,n @@ -449,8 +449,8 @@ function gausslobatto_pts(np1) result(pts) end function gausslobatto_pts ! ================================================ - ! Gauss Lobatto Legendre Weights - ! ================================================ + ! Gauss Lobatto Legendre Weights + ! ================================================ function gausslobatto_wts(np1, glpts) result(wts) diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index ff4b7c8f..33561033 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -3,7 +3,7 @@ module dyn_comp ! CAM interfaces to the SE Dynamical Core use shr_kind_mod, only: r8=>shr_kind_r8, shr_kind_cl -use physconst, only: pi +use dynconst, only: pi use spmd_utils, only: iam, masterproc !use constituents, only: pcnst, cnst_get_ind, cnst_name, cnst_longname, & ! cnst_read_iv, qmin, cnst_type, tottnam, & @@ -576,8 +576,9 @@ subroutine dyn_init(dyn_in, dyn_out) use dyn_grid, only: elem, fvm use cam_pio_utils, only: clean_iodesc_list use physconst, only: thermodynamic_active_species_num, thermodynamic_active_species_idx - use physconst, only: thermodynamic_active_species_idx_dycore, rair, cpair - use physconst, only: get_molecular_diff_coef_reference + use physconst, only: thermodynamic_active_species_idx_dycore + use dynconst, only: cpair + use dyn_thermo, only: get_molecular_diff_coef_reference !use cam_history, only: addfld, add_default, horiz_only, register_vector_field !use gravity_waves_sources, only: gws_init diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 27bcc804..cd7b270a 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -32,7 +32,7 @@ module dyn_grid use spmd_utils, only: masterproc, iam, mpicom, mstrid=>masterprocid, & npes use constituents, only: pcnst -use physconst, only: pi +use dynconst, only: pi use cam_initfiles, only: initial_file_get_id use physics_column_type, only: physics_column_t, kind_pcol use cam_map_utils, only: iMap @@ -130,6 +130,7 @@ subroutine model_grid_init() use hycoef, only: hycoef_init, hypi, hypm, nprlev, & hyam, hybm, hyai, hybi, ps0 use physconst, only: thermodynamic_active_species_num + use dynconst, only: dynconst_init use ref_pres, only: ref_pres_init use time_manager, only: get_nstep, get_step_size use dp_mapping, only: dp_init, dp_write, nphys_pts @@ -192,6 +193,13 @@ subroutine model_grid_init() ! Initialize hybrid coordinate arrays call hycoef_init(fh_ini, psdry=.true.) + ! Initialize physical and mathematical constants used by dynamics: + + ! Please note that this call must be done after 'physconst_readnl' + ! but before 'prim_init1' in order to avoid dependency issues + ! while still using the correct values for the constants: + call dynconst_init() + !Allocate SE dycore "hvcoord" structure: !+++++++ allocate(hvcoord%hyai(pverp), stat=ierr) @@ -443,14 +451,14 @@ subroutine set_dyn_col_values() ii = MOD(col_ind, fv_nphys) + 1 jj = (col_ind / fv_nphys) + 1 coord = fvm(elem_ind)%center_cart_physgrid(ii, jj) - local_dyn_columns(lindex)%lat_rad = coord%lat + local_dyn_columns(lindex)%lat_rad = real(coord%lat, kind_pcol) dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg local_dyn_columns(lindex)%lat_deg = dcoord - local_dyn_columns(lindex)%lon_rad = coord%lon + local_dyn_columns(lindex)%lon_rad = real(coord%lon, kind_pcol) dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg local_dyn_columns(lindex)%lon_deg = dcoord local_dyn_columns(lindex)%area = & - fvm(elem_ind)%area_sphere_physgrid(ii,jj) + real(fvm(elem_ind)%area_sphere_physgrid(ii,jj), kind_pcol) local_dyn_columns(lindex)%weight = & local_dyn_columns(lindex)%area ! File decomposition @@ -479,15 +487,15 @@ subroutine set_dyn_col_values() jj = elem(elem_ind)%idxP%ja(col_ind) dcoord = elem(elem_ind)%spherep(ii,jj)%lat - local_dyn_columns(lindex)%lat_rad = dcoord + local_dyn_columns(lindex)%lat_rad = real(dcoord, kind_pcol) dcoord = local_dyn_columns(lindex)%lat_rad * radtodeg local_dyn_columns(lindex)%lat_deg = dcoord dcoord = elem(elem_ind)%spherep(ii,jj)%lon - local_dyn_columns(lindex)%lon_rad = dcoord + local_dyn_columns(lindex)%lon_rad = real(dcoord, kind_pcol) dcoord = local_dyn_columns(lindex)%lon_rad * radtodeg local_dyn_columns(lindex)%lon_deg = dcoord local_dyn_columns(lindex)%area = & - 1.0_r8 / elem(elem_ind)%rspheremp(ii,jj) + real(1.0_kind_pcol / elem(elem_ind)%rspheremp(ii,jj), kind_pcol) local_dyn_columns(lindex)%weight = local_dyn_columns(lindex)%area ! File decomposition gindex = elem(elem_ind)%idxP%UniquePtoffset + col_ind - 1 diff --git a/src/dynamics/se/test_fvm_mapping.F90 b/src/dynamics/se/test_fvm_mapping.F90 index fc2bfb34..dc1f3bf7 100644 --- a/src/dynamics/se/test_fvm_mapping.F90 +++ b/src/dynamics/se/test_fvm_mapping.F90 @@ -1,7 +1,7 @@ module test_fvm_mapping use shr_kind_mod, only: r8=>shr_kind_r8 ! use cam_history, only: outfld - use physconst, only: pi + use dynconst, only: pi !SE dycore: use fvm_control_volume_mod, only: fvm_struct @@ -598,7 +598,6 @@ end function test_wind SUBROUTINE regrot(pxreg,pyreg,pxrot,pyrot,pxcen,pycen,kcall) - use physconst, only: pi ! !---------------------------------------------------------------------- ! @@ -693,7 +692,6 @@ SUBROUTINE regrot(pxreg,pyreg,pxrot,pyrot,pxcen,pycen,kcall) END SUBROUTINE regrot SUBROUTINE turnwi(puarg,pvarg,pures,pvres,pxreg,pyreg,pxrot,pyrot,pxcen,pycen,kcall) - use physconst, only: pi ! !----------------------------------------------------------------------- ! @@ -808,7 +806,7 @@ SUBROUTINE turnwi(puarg,pvarg,pures,pvres,pxreg,pyreg,pxrot,pyrot,pxcen,pycen,kc END SUBROUTINE turnwi SUBROUTINE Rossby_Haurwitz (lon, lat,u_wind, v_wind) - use physconst, only: rearth + use dynconst, only: rearth !----------------------------------------------------------------------- ! input parameters !----------------------------------------------------------------------- diff --git a/src/dynamics/utils/dyn_thermo.F90 b/src/dynamics/utils/dyn_thermo.F90 new file mode 100644 index 00000000..dbdced82 --- /dev/null +++ b/src/dynamics/utils/dyn_thermo.F90 @@ -0,0 +1,1044 @@ +module dyn_thermo + + !Create interfaces for physcis-based + !thermodynamic routines that ensure + !the real kind is the same that is used + !in the dycore. + + use shr_kind_mod, only: kind_dyn=>shr_kind_r8 + use ccpp_kinds, only: kind_phys + use cam_abortutils, only: check_allocate + + implicit none + public + + !Subroutines contained in this module are: + ! + ! get_cp + ! get_cp_dry + ! get_kappa_dry + ! get_dp + ! get_dp_ref + ! get_molecular_diff_coef + ! get_molecular_diff_coef_reference + ! get_rho_dry + ! get_gz_given_dp_Tv_Rdry + ! get_virtual_temp + ! get_R_dry + ! get_exner + ! get_thermal_energy + +!============================================================================== +CONTAINS +!============================================================================== + + ! + !************************************************************************************************************************* + ! + ! Compute generalized heat capacity at constant pressure + ! + !************************************************************************************************************************* + ! + subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species_idx_dycore) + + use physconst, only: get_cp_phys=>get_cp + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntrac) !Tracer array + real(kind_dyn), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) + logical , intent(in) :: inv_cp !output inverse cp instead of cp + real(kind_dyn), intent(out) :: cp(i0:i1,j0:j1,k0:k1) + ! + ! array of indicies for index of thermodynamic active species in dycore + ! tracer array + ! (if different from physics index) + ! + integer, optional, intent(in) :: active_species_idx_dycore(:) + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_cp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp, & + dp_dry=dp_dry, & + active_species_idx_dycore=active_species_idx_dycore) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,ntrac) + real(kind_phys) :: cp_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_cp (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(dp_dry)) then + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + dp_dry_phys = real(dp_dry, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_cp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,inv_cp,cp_phys, & + dp_dry=dp_dry_phys, & + active_species_idx_dycore=active_species_idx_dycore) + + !Set output variables back to dynamics kind: + cp = real(cp_phys, kind_dyn) + + !Deallocate variables: + if (allocated(dp_dry_phys)) then + deallocate(dp_dry_phys) + end if + +#endif + + end subroutine get_cp + ! + !**************************************************************************************************************** + ! + ! Compute dry air heat capacity under constant pressure + ! + !**************************************************************************************************************** + ! + subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx,cp_dry,fact) + + use physconst, only: get_cp_dry_phys=>get_cp_dry + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) + integer, intent(in) :: active_species_idx(:) ! Tracer arrays + real(kind_dyn), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) + real(kind_dyn), intent(out) :: cp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_cp_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx,& + cp_dry, fact=fact) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) + real(kind_phys) :: cp_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: fact_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_cp_dry (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(fact)) then + allocate(fact_phys(i0:i1,j0:j1,k0_trac:k1_trac), stat=iret) + call check_allocate(iret, subname, & + 'fact_phys(i0:i1,j0:j1,k0_trac:k1_trac)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_cp_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer_phys,active_species_idx,& + cp_dry_phys, fact=fact_phys) + + !Set output variables back to dynamics kind: + cp_dry = real(cp_dry_phys, kind_dyn) + + !Deallocate variables: + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + +#endif + + end subroutine get_cp_dry + ! + !************************************************************************************************************************* + ! + ! compute generalized kappa =Rdry/cpdry + ! + !************************************************************************************************************************* + ! + subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,kappa_dry,fact) + + use physconst, only: get_kappa_dry_phys=>get_kappa_dry + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,nlev + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx(:) !index of thermodynamic active tracers + real(kind_dyn), intent(out) :: kappa_dry(i0:i1,j0:j1,k0:k1) !kappa dry + real(kind_dyn), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_kappa_dry_phys(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,kappa_dry,& + fact=fact) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,1:ntrac) + real(kind_phys) :: kappa_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: fact_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_kappa_dry (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(fact)) then + allocate(fact_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'fact_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_kappa_dry_phys(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer_phys,active_species_idx,& + kappa_dry_phys, fact=fact_phys) + + !Set output variables back to dynamics kind: + kappa_dry = real(kaapa_dry_phys, kind_dyn) + + !Deallocate variables: + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + +#endif + + end subroutine get_kappa_dry + ! + !**************************************************************************************************************** + ! + ! Compute pressure level thickness from dry pressure and thermodynamic active + ! species mixing ratios + ! + ! Tracer can either be in units of dry mixing ratio (mixing_ratio=1) or + ! "mass" (=m*dp_dry) (mixing_ratio=2) + ! + !**************************************************************************************************************** + ! + subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp,ps,ptop) + + use physconst, only: get_dp_phys=>get_dp + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac ! array bounds + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) !tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(kind_dyn), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_dyn), intent(out) :: dp(i0:i1,j0:j1,k0:k1) ! pressure level thickness + real(kind_dyn), optional,intent(out) :: ps(:,:) ! surface pressure (if ps present then ptop + ! must be present) + real(kind_dyn), optional,intent(in) :: ptop ! pressure at model top + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_dp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp,ps,ptop) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac) + real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys) :: dp_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: ps(:,:) + real(kind_phys), allocatable :: ptop + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_dp (dyn)' + + if (present(ptop)) then + allocate(ptop_phys, stat=iret) + call check_allocate(iret, subname, 'ptop', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + ptop_phys = real(ptop, kind_phys) + end if + + if (present(ps)) then + allocate(ps(i0:i1,j0:j1), stat=iret) + call check_allocate(iret, subname, & + 'ps(i0:i1,j0:j1)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_dp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,mixing_ratio,& + active_species_idx,dp_dry_phys,dp_phys,ps_phys,ptop_phys) + + + !Set output variables back to dynamics kind: + dp = real(dp_phys, kind_dyn) + + if (present(ps)) then + ps = real(ps_phys, kind_dyn) + deallocate(ps_phys) + end if + + !Deallocate variables: + if (allocated(ptop_phys)) then + deallocate(ptop_phys) + end if + +#endif + + end subroutine get_dp + ! + !************************************************************************************************************************* + ! + ! compute reference pressure levels + ! + !************************************************************************************************************************* + ! + subroutine get_dp_ref(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) + + use physconst, only: get_dp_ref_phys=>get_dp_ref + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1 + real(kind_dyn), intent(in) :: hyai(k0:k1+1),hybi(k0:k1+1),ps0 + real(kind_dyn), intent(in) :: phis(i0:i1,j0:j1) + real(kind_dyn), intent(out) :: dp_ref(i0:i1,j0:j1,k0:k1) + real(kind_dyn), intent(out) :: ps_ref(i0:i1,j0:j1) + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_dp_ref_phys(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) + +#else + + !Declare local variables: + real(kind_phys) :: hyai_phys(k0:k1+1) + real(kind_phys) :: hybi_phys(k0:k1+1) + real(kind_phys) :: ps0_phys + real(kind_phys) :: phis_phys(i0:i1,j0:j1) + real(kind_phys) :: dp_ref_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys) :: ps_ref_phys(i0:i1,j0:j1) + + !Set local variables: + hyai_phys = real(hyai, kind_phys) + hybi_phys = real(hybi, kind_phys) + ps0_phys = real(ps0, kind_phys) + phis_phys = real(phis, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_dp_ref_phys(hyai_phys, hybi_phys, ps0_phys, i0,i1,j0,j1,k0,& + k1, phis_phys, dp_ref_phys, ps_ref_phys) + + !Set output variables back to dynamics kind: + dp_ref = real(dp_ref_phys, kind_dyn) + ps_ref = real(ps_ref_phys, kind_dyn) + +#endif + + end subroutine get_dp_ref + ! + !************************************************************************************************************************* + ! + ! compute 3D molecular diffusion and thermal conductivity + ! + !************************************************************************************************************************* + ! + subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sponge_factor,kmvis,kmcnd, ntrac,& + tracer, fact, active_species_idx_dycore, mbarv_in) + + use physconst, only: get_molecular_diff_coef_phys=>get_molecular_diff_coef + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k1,nlev + real(kind_dyn), intent(in) :: temp(i0:i1,j0:j1,nlev) !temperature + integer, intent(in) :: get_at_interfaces ! 1:compute kmvis and kmcnd at interfaces + ! 0: compute kmvis and kmcnd at mid-levels + real(kind_dyn), intent(in) :: sponge_factor(1:k1) ! multiply kmvis and kmcnd with sponge_factor (for sponge layer) + real(kind_dyn), intent(out) :: kmvis(i0:i1,j0:j1,1:k1+get_at_interfaces) + real(kind_dyn), intent(out) :: kmcnd(i0:i1,j0:j1,1:k1+get_at_interfaces) + integer, intent(in) :: ntrac + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracer array + integer, intent(in), optional :: active_species_idx_dycore(:) ! index of active species in tracer + real(kind_dyn), intent(in), optional :: fact(i0:i1,j0:j1,k1) ! if tracer is in units of mass or moist + ! fact converts to dry mixing ratio: tracer/fact + real(kind_dyn), intent(in), optional :: mbarv_in(i0:i1,j0:j1,1:k1) ! composition dependent atmosphere mean mass + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_molecular_diff_coef_phys(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces, & + sponge_factor,kmvis,kmcnd,ntrac, & + tracer, & + fact=fact, & + active_species_idx_dycore=active_species_idx_dycore, & + mbarv_in=mbarv_in) + +#else + + !Declare local variables: + real(kind_phys) :: temp_phys(i0:i1,j0:j1,nlev) + real(kind_phys) :: sponge_factor_phys(1:k1) + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,1:ntrac) + real(kind_phys) :: kmvis_phys(i0:i1,j0:j1,1:k1+get_at_interfaces) + real(kind_phys) :: kmcnd_phys(i0:i1,j0:j1,1:k1+get_at_interfaces) + real(kind_phys), allocatable :: fact_phys(:,:,:) + real(kind_phys), allocatable :: mbarv_in_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_molecular_diff_coef (dyn)' + + !Set local variables: + temp_phys = real(temp, kind_phys) + tracer_phys = real(tracer, kind_phys) + sponge_factor_phys = real(sponge_factor, kind_phys) + + if (present(fact)) then + allocate(fact_phys(i0:i1,j0:j1,k1), stat=iret) + call check_allocate(iret, subname, & + 'fact_phys(i0:i1,j0:j1,k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if + if (present(mbarv_in)) then + allocate(mbarv_in_phys(i0:i1,j0:j1,1:k1), stat=iret) + call check_allocate(iret, subname, & + 'mbarv_in_phys(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + mbarv_in_phys = real(mbarv_in, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_molecular_diff_coef_phys(i0,i1,j0,j1,k1,nlev,temp_phys,get_at_interfaces, & + sponge_factor_phys,kmvis_phys,kmcnd_phys,ntrac, & + tracer_phys, & + fact=fact_phys, & + active_species_idx_dycore=active_species_idx_dycore,& + mbarv_in=mbarv_in_phys) + + !Set output variables back to dynamics kind: + kmvis = real(kmvis_phys, kind_dyn) + kmcnd = real(kmcnd_phys, kind_dyn) + + !Deallocate variables: + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + + if (allocated(mbarv_in_phys)) then + deallocate(mbarv_in_phys) + end if + +#endif + + end subroutine get_molecular_diff_coef + ! + !************************************************************************************************************************* + ! + ! compute reference vertical profile of density, molecular diffusion and + ! thermal conductivity + ! + !************************************************************************************************************************* + ! + subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvis_ref,kmcnd_ref,rho_ref) + + use physconst, only: get_molecular_diff_coef_reference_phys=>get_molecular_diff_coef_reference + + !Subroutine (dummy) arguments: + integer, intent(in) :: k0,k1 !min/max vertical index + real(kind_dyn), intent(in) :: tref !reference temperature + real(kind_dyn), intent(in) :: press(k0:k1) !pressure + real(kind_dyn), intent(in) :: sponge_factor(k0:k1) !multiply kmvis and kmcnd with sponge_factor (for sponge layer) + real(kind_dyn), intent(out) :: kmvis_ref(k0:k1) !reference molecular diffusion coefficient + real(kind_dyn), intent(out) :: kmcnd_ref(k0:k1) !reference thermal conductivity coefficient + real(kind_dyn), intent(out) :: rho_ref(k0:k1) !reference density + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_molecular_diff_coef_reference_phys(k0,k1,tref,press,& + sponge_factor,& + kmvis_ref,kmcnd_ref,rho_ref) + +#else + + !Declare local variables: + real(kind_phys) :: tref_phys + real(kind_phys) :: press_phys(k0:k1) + real(kind_phys) :: sponge_factor_phys(k0:k1) + real(kind_phys) :: kmvis_ref_phys(k0:k1) + real(kind_phys) :: kmcnd_ref_phys(k0:k1) + real(kind_phys) :: rho_ref_phys(k0:k1) + + !Set local variables: + tref_phys = real(tref, kind_phys) + press_phys = real(press, kind_phys) + sponge_factor_phys = real(sponge_factor, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_molecular_diff_coef_reference_phys(k0,k1,tref_phys,press_phys,& + sponge_factor_phys,& + kmvis_ref_phys,kmcnd_ref_phys,& + rho_ref_phys) + + !Set output variables back to dynamics kind: + tref = real(tref_phys, kind_dyn) + press = real(press_phys, kind_dyn) + sponge_factor = real(sponge_factor_phys, kind_dyn) + +#endif + + end subroutine get_molecular_diff_coef_reference + ! + !************************************************************************************************************************* + ! + ! compute dry density from temperature (temp) and pressure (dp_dry and + ! tracer) + ! + !************************************************************************************************************************* + ! + subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_mass,& + rho_dry, rhoi_dry,active_species_idx_dycore,pint_out,pmid_out) + + use physconst, only: get_rho_dry_phys=>get_rho_dry + + !Subroutine (dummy) arguments: + integer, intent(in) :: i0,i1,j0,j1,k1,ntrac,nlev + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,nlev,ntrac) !Tracer array + real(kind_dyn), intent(in) :: temp(i0:i1,j0:j1,1:nlev) !Temperature + real(kind_dyn), intent(in) :: ptop + real(kind_dyn), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) + logical, intent(in) :: tracer_mass + real(kind_dyn), optional,intent(out) :: rho_dry(i0:i1,j0:j1,1:k1) + real(kind_dyn), optional,intent(out) :: rhoi_dry(i0:i1,j0:j1,1:k1+1) + ! + ! array of indicies for index of thermodynamic active species in dycore + ! tracer array + ! (if different from physics index) + ! + integer, optional, intent(in) :: active_species_idx_dycore(:) + real(kind_phys),optional,intent(out) :: pint_out(i0:i1,j0:j1,1:k1+1) + real(kind_phys),optional,intent(out) :: pmid_out(i0:i1,j0:j1,1:k1) + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_rho_dry_phys(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop, & + dp_dry,tracer_mass, & + rho_dry=rho_dry, & + rhoi_dry=rhoi_dry, & + active_species_idx_dycore=active_species_idx_dycore, & + pint_out=pint_out, & + pmid_out=pmid_out) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,ntrac) + real(kind_phys) :: temp_phys(i0:i1,j0:j1,1:nlev) + real(kind_phys) :: ptop_phys + real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,nlev) + real(kind_phys), allocatable :: rho_dry_phys(:,:,:) + real(kind_phys), allocatable :: rhoi_dry_phys(:,:,:) + real(kind_phys), allocatable :: pint_out_phys(:,:,:) + real(kind_phys), allocatable :: pmid_out_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_rho_dry (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + temp_phys = real(temp, kind_phys) + ptop_phys = real(ptop, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + + if (present(rho_dry)) then + allocate(rho_dry_phys(i0:i1,j0:j1,1:k1), stat=iret) + call check_allocate(iret, subname, & + 'rho_dry_phys(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) + end if + if (present(rhoi_dry)) then + allocate(rhoi_dry_phys(i0:i1,j0:j1,1:k1+1), stat=iret) + call check_allocate(iret, subname, & + 'rhoi_dry_phys(i0:i1,j0:j1,1:k1+1)', & + file=__FILE__, line=__LINE__) + + end if + if (present(pint_out)) then + allocate(pint_out_phys(i0:i1,j0:j1,1:k1+1), stat=iret) + call check_allocate(iret, subname, & + 'pint_out_phys(i0:i1,j0:j1,1:k1+1)', & + file=__FILE__, line=__LINE__) + end if + if (present(pmid_out)) then + allocate(pmid_out_phys(i0:i1,j0:j1,1:k1), stat=iret) + call check_allocate(iret, subname, & + 'pmid_out_phys(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_rho_dry_phys(i0,i1,j0,j1,k1,nlev,ntrac,tracer_phys,temp_phys, & + ptop_phys, dp_dry_phys,tracer_mass, & + rho_dry=rho_dry_phys, & + rhoi_dry=rhoi_dry_phys, & + active_species_idx_dycore=active_species_idx_dycore, & + pint_out=pint_out_phys, & + pmid_out=pmid_out_phys) + + !Set output variables back to dynamics kind: + if (present(rho_dry)) then + rho_dry = real(rho_dry_phys, kind_dyn) + deallocate(rho_dry_phys) + end if + if (present(rhoi_dry)) then + rhoi_dry = real(rhoi_dry_phys, kind_dyn) + deallocate(rhoi_dry_phys) + end if + if (present(pint_out)) then + pint_out = real(pint_out_phys, kind_dyn) + deallocate(pint_out_phys) + end if + if (present(pmid_out)) then + pmid_out = real(pmid_out_phys, kind_dyn) + deallocate(pmid_out_phys) + end if + +#endif + + end subroutine get_rho_dry + ! + !**************************************************************************************************************** + ! + ! Compute geopotential from pressure level thickness and virtual temperature + ! + !**************************************************************************************************************** + ! + subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pmid) + + use physconst, only: get_gz_given_dp_Tv_Rdry_phys=>get_gz_given_dp_Tv_Rdry + + !Subroutine (dummy) arguments: + integer, intent(in) :: i0,i1,j0,j1,nlev ! array bounds + real(kind_dyn), intent(in) :: dp (i0:i1,j0:j1,nlev) ! pressure level thickness + real(kind_dyn), intent(in) :: T_v (i0:i1,j0:j1,nlev) ! virtual temperature + real(kind_dyn), intent(in) :: R_dry(i0:i1,j0:j1,nlev) ! R dry + real(kind_dyn), intent(in) :: phis (i0:i1,j0:j1) ! surface geopotential + real(kind_dyn), intent(in) :: ptop ! model top presure + real(kind_dyn), intent(out) :: gz(i0:i1,j0:j1,nlev) ! geopotential + real(kind_dyn), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,& + pmid=pmid) + +#else + + !Declare local variables: + real(kind_phys) :: dp_phys(i0:i1,j0:j1,nlev) + real(kind_phys) :: T_v_phys(i0:i1,j0:j1,nlev) + real(kind_phys) :: R_dry_phys(i0:i1,j0:j1,nlev) + real(kind_phys) :: phis_phys(i0:i1,j0:j1) + real(kind_phys) :: ptop_phys + real(kind_phys) :: gz_phys(i0:i1,j0:j1,nlev) + real(kind_phys), allocatable :: pmid_phys(:,:,nlev) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_gz_given_dp_Tv_Rdry (dyn)' + + !Set local variables: + dp_phys = real(dp, kind_phys) + T_v_phys = real(T_v, kind_phys) + R_dry_phys = real(R_dry, kind_phys) + phis_phys = real(phis, kind_phys) + ptop_phys = real(ptop, kind_phys) + + if (present(pmid)) then + !Allocate variable if optional argument is present: + allocate(pmid_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, 'pmid_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + if (present(pmid)) then + call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp_phys,T_v_phys, & + R_dry_phys,phis_phys,ptop_phys,gz_phys, & + pmid=pmid_phys) + + pmid = real(pmid_phys, kind_dyn) + + else + call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp_phys,T_v_phys, & + R_dry_phys,phis_phys,ptop_phys,gz_phys) + end if + + !Set output variables back to dynamics kind: + gz = real(gz_phys, kind_dyn) + + if (present(pmid)) then + pmid = real(pmid_phys, kind_dyn) + deallocate(pmid_phys) + end if + +#endif + + end subroutine + ! + !**************************************************************************************************************** + ! + ! Compute virtual temperature T_v + ! + ! tracer is in units of dry mixing ratio unless optional argument dp_dry is + ! present in which case tracer is + ! in units of "mass" (=m*dp) + ! + ! If temperature is not supplied then just return factor that T needs to be + ! multiplied by to get T_v + ! + !**************************************************************************************************************** + ! + subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q, & + active_species_idx_dycore) + + use physconst, only: get_virtual_temp_phys=>get_virtual_temp + + !Subroutine (dummy) arguments: + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntrac) ! tracer array + real(kind_dyn), intent(out) :: T_v(i0:i1,j0:j1,k0:k1) ! virtual temperature + real(kind_dyn), optional, intent(in) :: temp(i0:i1,j0:j1,k0:k1) ! temperature + real(kind_dyn), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_dyn), optional,intent(out) :: sum_q(i0:i1,j0:j1,k0:k1) ! sum tracer + ! + ! array of indicies for index of thermodynamic active species in dycore + ! tracer array + ! (if different from physics index) + ! + integer, optional, intent(in) :: active_species_idx_dycore(:) + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_virtual_temp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v, & + temp=temp,dp_dry=dp_dry,sum_q=sum_q, & + active_species_idx_dycore=active_species_idx_dycore) +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,ntrac) + real(kind_phys) :: T_v_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: temp_phys(:,:,:) + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) + real(kind_phys), allocatable :: sum_q_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_virtual_temp (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(temp)) then + !Allocate variable if optional argument is present: + allocate(temp_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, 'temp_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + temp_phys = real(temp, kind_phys) + end if + + if (present(dp_dry)) then + !Allocate variable if optional argument is present: + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + dp_dry_phys = real(dp_dry, kind_phys) + end if + + if (present(sum_q)) then + !Allocate variable if optional argument is present: + allocate(sum_q_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, 'sum_q_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_virtual_temp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,T_v_phys, & + temp=temp_phys,dp_dry=dp_dry_phys,sum_q=sum_q_phys, & + active_species_idx_dycore=active_species_idx_dycore) + + !Set output variables back to dynamics kind: + T_v = real(T_v_phys, kind_dyn) + + if (present(sum_q)) then + sum_q = real(sum_q_phys, kind_dyn) + deallocate(sum_q_phys) + end if + + !Deallocate variables: + if (allocated(temp_phys)) then + deallocate(temp_phys) + end if + + if (allocated(dp_dry_phys)) then + deallocate(dp_dry_phys) + end if + +#endif + + end subroutine get_virtual_temp + ! + !**************************************************************************************************************** + ! + ! Compute generalized dry air gas constant R + ! + !**************************************************************************************************************** + ! + subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry,fact) + + use physconst, only: get_R_dry_phys=>get_R_dry + + !Subroutine (dummy) arguments: + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac !array boundas + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) !tracer array + integer, intent(in) :: active_species_idx_dycore(:) !index of active species in tracer + real(kind_dyn), intent(out) :: R_dry(i0:i1,j0:j1,k0:k1) !dry air R + real(kind_dyn), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_R_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry, & + fact=fact) +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) + real(kind_phys) :: R_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: fact_phys(i0:i1,j0:j1,k0_trac:k1_trac) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_R_dry (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(fact)) then + !Allocate variable if optional argument is present: + allocate(fact_phys(i0:i1,j0:j1,k0_trac:k1_trac), stat=iret) + call check_allocate(iret, subname, 'fact_phys(i0:i1,j0:j1,k0_trac:k1_trac)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_R_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer_phys, & + active_species_idx_dycore,R_dry_phys,fact=fact_phys) + + !Set output variables back to dynamics kind: + R_dry = real(R_dry_phys, kind_dyn) + + !Deallocate variables: + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + +#endif + + end subroutine get_R_dry + ! + !**************************************************************************************************************** + ! + ! Compute Exner pressure + ! + !**************************************************************************************************************** + ! + subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,& + dp_dry,ptop,p00,inv_exner,exner,poverp0) + + use physconst, only: get_exner_phys=>get_exner + + !Subroutine (dummy) arguments: + integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! index bounds + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg + integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio + ! 2 => tracer is mass (q*dp) + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array + real(kind_dyn), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness + real(kind_dyn), intent(in) :: ptop ! pressure at model top + real(kind_dyn), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa) + logical , intent(in) :: inv_exner ! logical for outputting inverse Exner or Exner pressure + real(kind_dyn), intent(out) :: exner(i0:i1,j0:j1,nlev) + real(kind_dyn), optional, intent(out) :: poverp0(i0:i1,j0:j1,nlev) ! for efficiency when a routine needs this variable + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_exner_phys(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,& + dp_dry,ptop,p00,inv_exner,exner,poverp0=poverp0) +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,1:ntrac) + real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,nlev) + real(kind_phys) :: ptop_phys + real(kind_phys) :: p00_phys + real(kind_phys) :: exner_phys(i0:i1,j0:j1,nlev) + real(kind_phys), allocatable :: poverp0_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_exner (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + ptop_phys = real(ptop, kind_phys) + p00_phys = real(p00, kind_phys) + + if (present(poverp0)) then + !Allocate variable if optional argument is present: + allocate(poverp0_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, 'poverp0_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_exner(i0,i1,j0,j1,nlev,ntrac,tracer_phys,mixing_ratio,active_species_idx,& + dp_dry_phys,ptop_phys,p00_phys,inv_exner,exner_phys, & + poverp0=poverp0_phys) + + !Set optional output variables back to dynamics kind: + if (present(poverp0)) then + poverp0 = real(poverp0_phys, kind_dyn) + deallocate(poverp0_phys) + end if + + !Set output variables back to dynamics kind: + exner = real(exner_phys, kind_dyn) + +#endif + + end subroutine get_exner + ! + !**************************************************************************************************************** + ! + ! g*compute thermal energy = cp*T*dp, where dp is pressure level thickness, + ! cp is generalized cp and T temperature + ! + ! Note:tracer is in units of m*dp_dry ("mass") + ! + !**************************************************************************************************************** + ! + subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy, & + active_species_idx_dycore) + + use physconst, only: get_thermal_energy_phys=>get_thermal_energy + + !Subroutine (dummy) arguments: + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_dyn), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,ntrac) !tracer array (mass weighted) + real(kind_dyn), intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature + real(kind_dyn), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry presure level thickness + real(kind_dyn), intent(out) :: thermal_energy(i0:i1,j0:j1,k0:k1) !thermal energy in each column: sum cp*T*dp + ! + ! array of indicies for index of thermodynamic active species in dycore + ! tracer array + ! (if different from physics index) + ! + integer, optional, dimension(:), intent(in) :: active_species_idx_dycore + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_thermal_energy_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy,& + active_species_idx_dycore=active_species_idx_dycore) +#else + + !Declare local variables: + real(kind_phys) :: tracer_mass_phys(i0:i1,j0:j1,k0:k1,ntrac) + real(kind_phys) :: temp_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys) :: thermal_energy_phys(i0:i1,j0:j1,k0:k1) + + !Set local variables: + tracer_mass_phys = real(tracer_mass, kind_phys) + temp_phys = real(temp, kind_phys) + dp_dry_phys = real(dp_dry_phys, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_thermal_energy_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass_phys,temp_phys,& + dp_dry_phys,thermal_energy_phys,& + active_species_idx_dycore=active_species_idx_dycore) + + !Set output variables back to dynamics kind: + thermal_energy = real(thermal_energy_phys, kind_dyn) +#endif + + end subroutine get_thermal_energy + +end module dyn_thermo diff --git a/src/dynamics/utils/dynconst.F90 b/src/dynamics/utils/dynconst.F90 new file mode 100644 index 00000000..f15bc593 --- /dev/null +++ b/src/dynamics/utils/dynconst.F90 @@ -0,0 +1,76 @@ +module dynconst + + !Physical constants with the same precision (kind) as the dycore. + + use shr_kind_mod, only: kind_dyn=>shr_kind_r8 + use physconst, only: phys_pi=>pi + + implicit none + public + + !Physical constants: + + !Please note that pi must be a parameter, as it is used in + !parameter calls in the dynamics itself. This should be ok, + !though, as Pi is an actual mathematical constant that should + !never change: + + !circle's circumference/diameter [unitless] + real(kind_dyn), parameter :: pi = real(phys_pi, kind_dyn) + + + ! radius of earth [m] + real(kind_dyn), protected :: rearth + ! reciprocal of earth's radius [1/m] + real(kind_dyn), protected :: ra + ! earth's rotation rate [rad/sec] + real(kind_dyn), protected :: omega + ! gravitational acceleration [m/s**2] + real(kind_dyn), protected :: gravit + ! specific heat of dry air [J/K/kg] + real(kind_dyn), protected :: cpair + ! Dry air gas constant [J/K/kg] + real(kind_dyn), protected :: rair + ! reference temperature [K] + real(kind_dyn), protected :: tref + ! reference lapse rate [K/m] + real(kind_dyn), protected :: lapse_rate + ! R/Cp + real(kind_dyn), protected :: cappa + +!============================================================================== +CONTAINS +!============================================================================== + + subroutine dynconst_init + + !Subroutine to initialize physical + !and mathematical constants used + !by the dynamics with the same + !real kind used by the dynamics itself. + + use physconst, only: phys_rearth=>rearth + use physconst, only: phys_ra=>ra + use physconst, only: phys_omega=>omega + use physconst, only: phys_cpair=>cpair + use physconst, only: phys_gravit=>gravit + use physconst, only: phys_tref=>tref + use physconst, only: phys_lapse_rate=>lapse_rate + use physconst, only: phys_cappa=>cappa + use physconst, only: phys_rair=>rair + + !Set constants used by the dynamics: + + rearth = real(phys_rearth, kind_dyn) + ra = real(phys_ra, kind_dyn) + omega = real(phys_omega, kind_dyn) + cpair = real(phys_cpair, kind_dyn) + rair = real(phys_rair, kind_dyn) + gravit = real(phys_gravit, kind_dyn) + tref = real(phys_tref, kind_dyn) + lapse_rate = real(phys_lapse_rate, kind_dyn) + cappa = real(phys_cappa, kind_dyn) + + end subroutine dynconst_init + +end module dynconst diff --git a/src/physics/utils/phys_comp.F90 b/src/physics/utils/phys_comp.F90 index 099f2ae0..77bf1fa0 100644 --- a/src/physics/utils/phys_comp.F90 +++ b/src/physics/utils/phys_comp.F90 @@ -109,6 +109,7 @@ subroutine phys_init(phys_state, phys_tend, cam_out) use physics_grid, only: columns_on_task use vert_coord, only: pver, pverp use physconst, only: physconst_init + use dynconst, only: dynconst_init use physics_types, only: allocate_physics_types_fields use constituents, only: pcnst use cam_ccpp_cap, only: cam_ccpp_physics_initialize @@ -127,6 +128,7 @@ subroutine phys_init(phys_state, phys_tend, cam_out) errflg = 0 call physconst_init(columns_on_task, pver, pverp) + call allocate_physics_types_fields(columns_on_task, pver, pverp, & pcnst, set_init_val_in=.true., reallocate_in=.false.) call ccpp_physics_suite_list(suite_names) diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index 20867b7a..e8009832 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -25,7 +25,7 @@ #Python unit-testing library: import unittest -#Adddirectory to python path: +#Add directory to python path: CURRDIR = os.path.abspath(os.path.dirname(__file__)) CAM_ROOT_DIR = os.path.join(CURRDIR, os.pardir, os.pardir) CAM_CONF_DIR = os.path.abspath(os.path.join(CAM_ROOT_DIR, "cime_config")) @@ -190,15 +190,15 @@ def test_config_get_value_list_check(self): """Check that "get_value" throws the proper error when non-existent variable is requested""" #Set error message: - ermsg = "ERROR: Invalid configuration name, 'fake variable'" + ermsg = "ERROR: Invalid configuration name, 'fake variable'" #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: #Run "get_value" method on made-up variable name: self.test_config_cam.get_value("fake variable") - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #+++++++++++++++++++++++++++++++++++++++++++++++++++++ #Check "set_value" non-created variable error-handling @@ -216,9 +216,8 @@ def test_config_set_value_list_check(self): #Run "set_value" method on made-up variable name: self.test_config_cam.set_value("fake variable", 200) - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) - + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #Check "print_config" non-created variable error-handling @@ -239,9 +238,8 @@ def test_config_print_config_list_check(self): #Run "print_config" method on made-up variable name: self.test_config_cam.print_config("fake variable", print_log) - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) - + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #++++++++++++++++++++++++++++++++++++++++++++ #Check "set_value" input value error-handling @@ -255,15 +253,15 @@ def test_config_set_value_type_check(self): """ #Set error message: - ermsg = "ERROR: Value provided for variable, 'pcols', must be either an integer or a string. Currently it is type " + ermsg = "ERROR: Value provided for variable, 'pcols', must be either an integer or a string. Currently it is type " #Expect "CamConfigTypeError": with self.assertRaises(CamConfigTypeError) as typerr: #Run "set_value" method on made-up variable name: self.test_config_cam.set_value("pcols", 5.0) - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(typerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(typerr.exception)) #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #Check "generate_cam_src" missing "ccpp_framework" error-handling @@ -285,8 +283,8 @@ def test_config_gen_cam_src_ccpp_check(self): #due to the case paths being "fake": self.test_config_cam.generate_cam_src(0) - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #Check that "ccpp_phys_set" works as expected with one physics suite entry @@ -373,9 +371,9 @@ def test_config_ccpp_phys_set_missing_phys(self): cam_nml_attr_dict = dict() #Set error message: - ermsg = "No 'physics_suite' variable is present in user_nl_cam.\n \ - This is required if more than one suite is listed\n \ - in CAM_CONFIG_OPTS." + ermsg = "No 'physics_suite' variable is present in user_nl_cam.\n" + ermsg += "This is required if more than one suite is listed\n" + ermsg += "in CAM_CONFIG_OPTS." #Create namelist file: with open("test.txt", "w") as test_fil: @@ -387,8 +385,8 @@ def test_config_ccpp_phys_set_missing_phys(self): #due to missing "physics_suite" namelist variable: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #Remove text file: os.remove("test.txt") @@ -409,8 +407,8 @@ def test_config_ccpp_phys_set_two_phys(self): cam_nml_attr_dict = dict() #Set error message: - ermsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n \ - Only one 'physics_suite' line is allowed." + ermsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n" + ermsg += "Only one 'physics_suite' line is allowed." #Create namelist file: with open("test.txt", "w") as test_fil: @@ -421,11 +419,11 @@ def test_config_ccpp_phys_set_two_phys(self): #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: #Run ccpp_phys_set config method, which should fail - #due to missing "physics_suite" namelist variable: + #due to multiple "physics_suite" namelist variable: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #Remove text file: os.remove("test.txt") @@ -457,11 +455,11 @@ def test_config_ccpp_phys_set_missing_equals(self): #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: #Run ccpp_phys_set config method, which should fail - #due to missing "physics_suite" namelist variable: + #due to a missing equals sign in the namelist entry: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #Remove text file: os.remove("test.txt") @@ -492,11 +490,12 @@ def test_config_ccpp_phys_set_two_equals(self): #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: #Run ccpp_phys_set config method, which should fail - #due to missing "physics_suite" namelist variable: + #due to an incorrect number of equal signs in the + #namelist entry: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #Remove text file: os.remove("test.txt") @@ -518,8 +517,8 @@ def test_config_ccpp_phys_set_no_physics_suite_match(self): cam_nml_attr_dict = dict() #Set error message: - ermsg = "physics_suite specified in user_nl_cam doesn't match any suites\n \ - listed in CAM_CONFIG_OPTS" + ermsg = "physics_suite specified in user_nl_cam doesn't match any suites\n" + ermsg += "listed in CAM_CONFIG_OPTS" #Create namelist file: with open("test.txt", "w") as test_fil: @@ -529,11 +528,13 @@ def test_config_ccpp_phys_set_no_physics_suite_match(self): #Expect "CamConfigValError": with self.assertRaises(CamConfigValError) as valerr: #Run ccpp_phys_set config method, which should fail - #due to missing "physics_suite" namelist variable: + #due to a mis-match between the "physics_suite" namelist + #variable and the physics suite options listed in the + #physics_suites config variable: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") - #Check that error message matches what's expected: - self.assertEqual(ermsg, str(valerr.exception)) + #Check that error message matches what's expected: + self.assertEqual(ermsg, str(valerr.exception)) #Remove text file: os.remove("test.txt") From c265f4347e2b40ebdf94ec5e90a90fbc4c3634ab Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Tue, 17 Aug 2021 11:22:48 -0600 Subject: [PATCH 36/45] Fix parse_config_opts doctests. --- cime_config/cam_config.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index f61d33a4..bfb14b18 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -970,14 +970,14 @@ def parse_config_opts(cls, config_opts, test_mode=False): >>> ConfigCAM.parse_config_opts("--dyn se", test_mode=True) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): SystemExit: 2 - >>> vlist(ConfigCAM.parse_config_opts("--phys kessler")) - [('dyn', ''), ('physics_suites', 'kessler')] - >>> vlist(ConfigCAM.parse_config_opts("--phys kessler --dyn se")) - [('dyn', 'se'), ('physics_suites', 'kessler')] - >>> ConfigCAM.parse_config_opts("--physics-suites kessler --dyn se --analytic_ic") - [('analystic_ic', True), ('dyn', 'se'), ('physics_suites', 'kessler')] - >>> vlist(ConfigCAM.parse_config_opts("--phys kessler;musica")) - [('dyn', ''), ('physics_suites', 'kessler;musica')] + >>> vlist(ConfigCAM.parse_config_opts("--physics-suites kessler")) + [('analytic_ic', False), ('dyn', ''), ('dyn_kind', 'REAL64'), ('phys_kind', 'REAL64'), ('physics_suites', 'kessler')] + >>> vlist(ConfigCAM.parse_config_opts("--physics-suites kessler --dyn se")) + [('analytic_ic', False), ('dyn', 'se'), ('dyn_kind', 'REAL64'), ('phys_kind', 'REAL64'), ('physics_suites', 'kessler')] + >>> vlist(ConfigCAM.parse_config_opts("--physics-suites kessler --dyn se --analytic_ic")) + [('analytic_ic', True), ('dyn', 'se'), ('dyn_kind', 'REAL64'), ('phys_kind', 'REAL64'), ('physics_suites', 'kessler')] + >>> vlist(ConfigCAM.parse_config_opts("--physics-suites kessler;musica")) + [('analytic_ic', False), ('dyn', ''), ('dyn_kind', 'REAL64'), ('phys_kind', 'REAL64'), ('physics_suites', 'kessler;musica')] >>> ConfigCAM.parse_config_opts("--phys kessler musica", test_mode=True) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): SystemExit: 2 From 2c5891fa066fa6121176b7f689513955e57c798b Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 18 Aug 2021 09:26:05 -0600 Subject: [PATCH 37/45] Replace CPP ifdefs with runtime options DDT object. --- src/dynamics/se/dp_coupling.F90 | 235 ++++++++++++++++---------------- src/dynamics/se/dyn_comp.F90 | 25 ++-- src/dynamics/se/stepon.F90 | 56 ++++---- 3 files changed, 159 insertions(+), 157 deletions(-) diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index dacb335d..a791df6a 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -14,6 +14,7 @@ module dp_coupling use dyn_grid, only: TimeLevel, edgebuf use dyn_comp, only: dyn_export_t, dyn_import_t +use runtime_obj, only: runtime_options use physics_types, only: physics_state, physics_tend use physics_types, only: ix_qv, ix_cld_liq, ix_rain !Remove once constituents are enabled use physics_grid, only: pcols => columns_on_task, get_dyn_col_p @@ -45,7 +46,7 @@ module dp_coupling CONTAINS !========================================================================================= -subroutine d_p_coupling(phys_state, phys_tend, dyn_out) +subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) ! Convert the dynamics output state into the physics input state. ! Note that all pressures and tracer mixing ratios coming from the dycore are based on @@ -54,7 +55,6 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) use physics_types, only: pdel ! use gravity_waves_sources, only: gws_src_fnct use dyn_comp, only: frontgf_idx, frontga_idx -! use phys_control, only: use_gw_front, use_gw_front_igw use hycoef, only: hyai, ps0 use test_fvm_mapping, only: test_mapping_overwrite_dyn_state, test_mapping_output_phys_state @@ -64,9 +64,10 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) use control_mod, only: qsplit ! arguments - type(dyn_export_t), intent(inout) :: dyn_out ! dynamics export - type(physics_state), intent(inout) :: phys_state - type(physics_tend ), intent(inout) :: phys_tend + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(dyn_export_t), intent(inout) :: dyn_out ! dynamics export + type(physics_state), intent(inout) :: phys_state + type(physics_tend ), intent(inout) :: phys_tend ! LOCAL VARIABLES @@ -84,10 +85,10 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) real(r8), allocatable :: omega_tmp(:,:,:) ! temp array to hold omega ! Frontogenesis - !real (kind=r8), allocatable :: frontgf(:,:,:) ! temp arrays to hold frontogenesis - !real (kind=r8), allocatable :: frontga(:,:,:) ! function (frontgf) and angle (frontga) - !real (kind=r8), allocatable :: frontgf_phys(:,:,:) - !real (kind=r8), allocatable :: frontga_phys(:,:,:) + real (kind=r8), allocatable :: frontgf(:,:,:) ! temp arrays to hold frontogenesis + real (kind=r8), allocatable :: frontga(:,:,:) ! function (frontgf) and angle (frontga) + real (kind=r8), allocatable :: frontgf_phys(:,:) + real (kind=r8), allocatable :: frontga_phys(:,:) integer :: ncols,ierr integer :: blk_ind(1), m, m_cnst @@ -152,9 +153,9 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) call check_allocate(ierr, subname, 'omega_tmp(nphys_pts,pver,nelemd)', & file=__FILE__, line=__LINE__) -!Remove once a gravity wave parameterization is available -JN -#if 0 - if (use_gw_front .or. use_gw_front_igw) then + if (cam_runtime_opts%gw_front() .or. & + cam_runtime_opts%gw_front_igw()) then + allocate(frontgf(nphys_pts,pver,nelemd), stat=ierr) call check_allocate(ierr, subname, 'frontgf(nphys_pts,pver,nelemd)', & file=__FILE__, line=__LINE__) @@ -163,15 +164,17 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) call check_allocate(ierr, subname, 'frontga(nphys_pts,pver,nelemd)', & file=__FILE__, line=__LINE__) end if -#endif if (iam < par%nprocs) then -!Remove once a gravity wave parameterization is available -JN -#if 0 - if (use_gw_front .or. use_gw_front_igw) then - call gws_src_fnct(elem, tl_f, tl_qdp_np0, frontgf, frontga, nphys) + + ! Gravity Waves + if (cam_runtime_opts%gw_front() .or. & + cam_runtime_opts%gw_front_igw()) then + + !Un-comment once gravity wave parameterization is available -JN: + !call gws_src_fnct(elem, tl_f, tl_qdp_np0, frontgf, frontga, nphys) + end if -#endif if (fv_nphys > 0) then call test_mapping_overwrite_dyn_state(elem,dyn_out%fvm) @@ -233,13 +236,13 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) phis_tmp(:,:) = 0._r8 q_tmp(:,:,:,:) = 0._r8 -!Remove once a gravity wave parameterization is available -JN -#if 0 - if (use_gw_front .or. use_gw_front_igw) then + if (cam_runtime_opts%gw_front() .or. & + cam_runtime_opts%gw_front_igw()) then + frontgf(:,:,:) = 0._r8 frontga(:,:,:) = 0._r8 + end if -#endif endif ! iam < par%nprocs @@ -256,18 +259,15 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) q_prev = 0.0_r8 call t_startf('dpcopy') -!Remove once a gravity wave parameterization is available -JN -#if 0 - if (use_gw_front .or. use_gw_front_igw) then - allocate(frontgf_phys(pcols, pver, begchunk:endchunk), stat=ierr) - call check_allocate(ierr, subname, 'frontgf_phys(pcols, pver, begchunk:endchunk)', & + if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then + allocate(frontgf_phys(pcols, pver), stat=ierr) + call check_allocate(ierr, subname, 'frontgf_phys(pcols, pver)', & file=__FILE__, line=__LINE__) - allocate(frontga_phys(pcols, pver, begchunk:endchunk), stat=ierr) - call check_allocate(ierr, subname, 'frontga_phys(pcols, pver, begchunk:endchunk)', & + allocate(frontga_phys(pcols, pver), stat=ierr) + call check_allocate(ierr, subname, 'frontga_phys(pcols, pver)', & file=__FILE__, line=__LINE__) end if -#endif !$omp parallel do num_threads(max_num_threads) private (icol, ie, blk_ind, ilyr, m) do icol = 1, pcols call get_dyn_col_p(icol, ie, blk_ind) @@ -280,13 +280,10 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) phys_state%v(icol, ilyr) = real(uv_tmp(blk_ind(1), 2, ilyr, ie), kind_phys) phys_state%omega(icol, ilyr) = real(omega_tmp(blk_ind(1), ilyr, ie), kind_phys) -!Remove once a gravity wave parameterization is available -JN -#if 0 - if (use_gw_front .or. use_gw_front_igw) then - frontgf_phys(icol, ilyr, lchnk) = frontgf(blk_ind(1), ilyr, ie) - frontga_phys(icol, ilyr, lchnk) = frontga(blk_ind(1), ilyr, ie) + if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then + frontgf_phys(icol, ilyr) = frontgf(blk_ind(1), ilyr, ie) + frontga_phys(icol, ilyr) = frontga(blk_ind(1), ilyr, ie) end if -#endif end do do m = 1, pcnst @@ -302,26 +299,24 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) phys_tend%dudt(:,:) = 0._kind_phys phys_tend%dvdt(:,:) = 0._kind_phys -!Remove once a gravity wave parameterization is available -JN -#if 0 - if (use_gw_front .or. use_gw_front_igw) then + if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then !$omp parallel do num_threads(max_num_threads) private (lchnk, ncols, icol, ilyr, pbuf_chnk, pbuf_frontgf, pbuf_frontga) - do lchnk = begchunk, endchunk - ncols = get_ncols_p(lchnk) - pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) - call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf) - call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga) - do icol = 1, ncols - do ilyr = 1, pver - pbuf_frontgf(icol, ilyr) = frontgf_phys(icol, ilyr, lchnk) - pbuf_frontga(icol, ilyr) = frontga_phys(icol, ilyr, lchnk) - end do - end do - end do +!Un-comment once pbuf replacement variables are available -JN: +! do lchnk = begchunk, endchunk +! ncols = get_ncols_p(lchnk) +! pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) +! call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf) +! call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga) +! do icol = 1, ncols +! do ilyr = 1, pver +! pbuf_frontgf(icol, ilyr) = frontgf_phys(icol, ilyr, lchnk) +! pbuf_frontga(icol, ilyr) = frontga_phys(icol, ilyr, lchnk) +! end do +! end do +! end do deallocate(frontgf_phys) deallocate(frontga_phys) end if -#endif call t_stopf('dpcopy') @@ -344,14 +339,14 @@ subroutine d_p_coupling(phys_state, phys_tend, dyn_out) ! ps, pdel, and q in phys_state are all dry at this point. After return from derived_phys_dry ! ps and pdel include water vapor only, and the 'wet' constituents have been converted to wet mmr. call t_startf('derived_phys') - call derived_phys_dry(phys_state, phys_tend) + call derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) call t_stopf('derived_phys') end subroutine d_p_coupling !========================================================================================= -subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) +subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, tl_qdp) use physics_types, only: pdel, pdeldry @@ -365,11 +360,12 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) use fvm_mapping, only: phys2dyn_forcings_fvm ! arguments - type(physics_state), intent(inout) :: phys_state - type(physics_tend), intent(inout) :: phys_tend - integer, intent(in) :: tl_qdp, tl_f - type(dyn_import_t), intent(inout) :: dyn_in - type(hybrid_t) :: hybrid + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(physics_state), intent(inout) :: phys_state + type(physics_tend), intent(inout) :: phys_tend + integer, intent(in) :: tl_qdp, tl_f + type(dyn_import_t), intent(inout) :: dyn_in + type(hybrid_t) :: hybrid ! LOCAL VARIABLES integer :: ic , ncols ! index @@ -609,7 +605,7 @@ end subroutine p_d_coupling !========================================================================================= -subroutine derived_phys_dry(phys_state, phys_tend) +subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! The ps, pdel, and q components of phys_state are all dry on input. ! On output the psdry and pdeldry components are initialized; ps and pdel are @@ -624,7 +620,6 @@ subroutine derived_phys_dry(phys_state, phys_tend) use physics_types, only: exner, zi, zm, lagrangian_vertical use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update use shr_const_mod, only: shr_const_rwv -! use phys_control, only: waccmx_is use geopotential_t, only: geopotential_t_run ! use check_energy, only: check_energy_timestep_init use hycoef, only: hyai, ps0 @@ -634,8 +629,9 @@ subroutine derived_phys_dry(phys_state, phys_tend) use dyn_comp, only: ixo, ixo2, ixh, ixh2 ! arguments - type(physics_state), intent(inout) :: phys_state - type(physics_tend ), intent(inout) :: phys_tend + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(physics_state), intent(inout) :: phys_state + type(physics_tend ), intent(inout) :: phys_tend ! local variables real(r8) :: zvirv(pcols,pver) ! Local zvir array pointer @@ -758,80 +754,77 @@ subroutine derived_phys_dry(phys_state, phys_tend) end do #endif -!Remove once WACCMX is enabled in CAMDEN: -#if 0 - !------------------------------------------------------------ - ! Ensure O2 + O + H (N2) mmr greater than one. - ! Check for unusually large H2 values and set to lower value. - !------------------------------------------------------------ - if ( waccmx_is('ionosphere') .or. waccmx_is('neutral') ) then - - do i=1,ncol - do k=1,pver + !------------------------------------------------------------ + ! Ensure O2 + O + H (N2) mmr greater than one. + ! Check for unusually large H2 values and set to lower value. + !------------------------------------------------------------ + if (cam_runtime_opts%waccmx_option() == 'ionosphere' .or. & + cam_runtime_opts%waccmx_option() == 'neutral') then - if (phys_state(lchnk)%q(i,k,ixo) < mmrMin) phys_state(lchnk)%q(i,k,ixo) = mmrMin - if (phys_state(lchnk)%q(i,k,ixo2) < mmrMin) phys_state(lchnk)%q(i,k,ixo2) = mmrMin + do i=1,pcols + do k=1,pver - mmrSum_O_O2_H = phys_state(lchnk)%q(i,k,ixo)+phys_state(lchnk)%q(i,k,ixo2)+phys_state(lchnk)%q(i,k,ixh) + if (phys_state%q(i,k,ixo) < mmrMin) phys_state%q(i,k,ixo) = mmrMin + if (phys_state%q(i,k,ixo2) < mmrMin) phys_state%q(i,k,ixo2) = mmrMin - if ((1._r8-mmrMin-mmrSum_O_O2_H) < 0._r8) then + mmrSum_O_O2_H = phys_state%q(i,k,ixo)+phys_state%q(i,k,ixo2)+phys_state%q(i,k,ixh) - phys_state(lchnk)%q(i,k,ixo) = phys_state(lchnk)%q(i,k,ixo) * (1._r8 - N2mmrMin) / mmrSum_O_O2_H + if ((1._r8-mmrMin-mmrSum_O_O2_H) < 0._r8) then - phys_state(lchnk)%q(i,k,ixo2) = phys_state(lchnk)%q(i,k,ixo2) * (1._r8 - N2mmrMin) / mmrSum_O_O2_H + phys_state%q(i,k,ixo) = phys_state%q(i,k,ixo) * (1._r8 - N2mmrMin) / mmrSum_O_O2_H - phys_state(lchnk)%q(i,k,ixh) = phys_state(lchnk)%q(i,k,ixh) * (1._r8 - N2mmrMin) / mmrSum_O_O2_H + phys_state%q(i,k,ixo2) = phys_state%q(i,k,ixo2) * (1._r8 - N2mmrMin) / mmrSum_O_O2_H - endif + phys_state%q(i,k,ixh) = phys_state%q(i,k,ixh) * (1._r8 - N2mmrMin) / mmrSum_O_O2_H - if(phys_state(lchnk)%q(i,k,ixh2) .gt. 6.e-5_r8) then - phys_state(lchnk)%q(i,k,ixh2) = 6.e-5_r8 - endif + endif - end do - end do - endif + if(phys_state%q(i,k,ixh2) .gt. 6.e-5_r8) then + phys_state%q(i,k,ixh2) = 6.e-5_r8 + endif - !----------------------------------------------------------------------------- - ! Call physconst_update to compute cpairv, rairv, mbarv, and cappav as - ! constituent dependent variables. - ! Compute molecular viscosity(kmvis) and conductivity(kmcnd). - ! Fill local zvirv variable; calculated for WACCM-X. - !----------------------------------------------------------------------------- - if ( waccmx_is('ionosphere') .or. waccmx_is('neutral') ) then - call physconst_update(phys_state(lchnk)%q, phys_state(lchnk)%t, lchnk, ncol) - zvirv(:,:) = shr_const_rwv / rairv(:,:,lchnk) -1._r8 - else - zvirv(:,:) = zvir - endif -!Remove once WACCMX is enabled in CAMDEN: -#else - zvirv(:,:) = zvir -#endif + end do + end do + endif + + !----------------------------------------------------------------------------- + ! Call physconst_update to compute cpairv, rairv, mbarv, and cappav as + ! constituent dependent variables. + ! Compute molecular viscosity(kmvis) and conductivity(kmcnd). + ! Fill local zvirv variable; calculated for WACCM-X. + !----------------------------------------------------------------------------- + if (cam_runtime_opts%waccmx_option() == 'ionosphere' .or. & + cam_runtime_opts%waccmx_option() == 'neutral') then + + call physconst_update(phys_state%q, phys_state%t, pcols) + zvirv(:,:) = shr_const_rwv / rairv(:,:) -1._r8 + else + zvirv(:,:) = zvir + endif - !NOTE: Should geopotential be done in CCPP physics suite? -JN: + !NOTE: Should geopotential be done in CCPP physics suite? -JN: - ! Compute initial geopotential heights - based on full pressure - !call geopotential_t (phys_state%lnpint, phys_state%lnpmid , phys_state%pint , & - ! phys_state%pmid , phys_state%pdel , phys_state%rpdel , & - ! phys_state%t , phys_state%q(:,:,ix_qv), rairv, gravit, zvirv , & - ! phys_state%zi , phys_state%zm , ncol ) + ! Compute initial geopotential heights - based on full pressure + !call geopotential_t (phys_state%lnpint, phys_state%lnpmid , phys_state%pint , & + ! phys_state%pmid , phys_state%pdel , phys_state%rpdel , & + ! phys_state%t , phys_state%q(:,:,ix_qv), rairv, gravit, zvirv , & + ! phys_state%zi , phys_state%zm , ncol ) - call geopotential_t_run(pver, lagrangian_vertical, pver, 1, & - pverp, 1, lnpint, pint, pmid, pdel, & - rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & - rairv, gravit, zvirv, zi, zm, pcols, & - errflg, errmsg) + call geopotential_t_run(pver, lagrangian_vertical, pver, 1, & + pverp, 1, lnpint, pint, pmid, pdel, & + rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & + rairv, gravit, zvirv, zi, zm, pcols, & + errflg, errmsg) - !NOTE: Should dry static energy be done in CCPP physics suite? -JN: + !NOTE: Should dry static energy be done in CCPP physics suite? -JN: - ! Compute initial dry static energy, include surface geopotential - do k = 1, pver - do i = 1, pcols - phys_state%s(i,k) = cpair*phys_state%t(i,k) & - + gravit*zm(i,k) + phys_state%phis(i) - end do + ! Compute initial dry static energy, include surface geopotential + do k = 1, pver + do i = 1, pcols + phys_state%s(i,k) = cpair*phys_state%t(i,k) & + + gravit*zm(i,k) + phys_state%phis(i) end do + end do !Remove once constituents (and QNEG) are enabled in CAMDEN: #if 0 diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 33561033..b1b70384 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -12,7 +12,6 @@ module dyn_comp use vert_coord, only: pver use cam_control_mod, only: initial_run, simple_phys use cam_initfiles, only: initial_file_get_id, topo_file_get_id, pertlim -!use phys_control, only: use_gw_front, use_gw_front_igw, waccmx_is use dyn_grid, only: ini_grid_name, timelevel, hvcoord, edgebuf use cam_grid_support, only: cam_grid_id, cam_grid_get_gcid, & @@ -572,7 +571,8 @@ end subroutine dyn_readnl !========================================================================================= -subroutine dyn_init(dyn_in, dyn_out) +subroutine dyn_init(cam_runtime_opts, dyn_in, dyn_out) + use runtime_obj, only: runtime_options use dyn_grid, only: elem, fvm use cam_pio_utils, only: clean_iodesc_list use physconst, only: thermodynamic_active_species_num, thermodynamic_active_species_idx @@ -601,8 +601,9 @@ subroutine dyn_init(dyn_in, dyn_out) use control_mod, only: vert_remap_uvTq_alg, vert_remap_tracer_alg ! Dummy arguments: - type(dyn_import_t), intent(out) :: dyn_in - type(dyn_export_t), intent(out) :: dyn_out + type(runtime_options), intent(in) :: cam_runtime_opts + type(dyn_import_t), intent(out) :: dyn_in + type(dyn_export_t), intent(out) :: dyn_out ! Local variables integer :: ithr, nets, nete, ie, k, kmol_end @@ -873,7 +874,8 @@ subroutine dyn_init(dyn_in, dyn_out) call prim_init2(elem, fvm, hybrid, nets, nete, TimeLevel, hvcoord) !$OMP END PARALLEL -! if (use_gw_front .or. use_gw_front_igw) call gws_init(elem) +!Uncomment once gravity waves are enabled -JN: +! if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) call gws_init(elem) end if ! iam < par%nprocs !Remove/replace after CAMDEN history output is enabled -JN: @@ -966,12 +968,13 @@ subroutine dyn_init(dyn_in, dyn_out) end if ! constituent indices for waccm-x -! if ( waccmx_is('ionosphere') .or. waccmx_is('neutral') ) then -! call cnst_get_ind('O', ixo) -! call cnst_get_ind('O2', ixo2) -! call cnst_get_ind('H', ixh) -! call cnst_get_ind('H2', ixh2) -! end if +! if ( cam_runtime_opts%waccmx_option() == 'ionosphere' .or. & +! cam_runtime_opts%waccmx_option() == 'neutral' ) then +! call cnst_get_ind('O', ixo) +! call cnst_get_ind('O2', ixo2) +! call cnst_get_ind('H', ixh) +! call cnst_get_ind('H2', ixh2) +! end if call test_mapping_addfld diff --git a/src/dynamics/se/stepon.F90 b/src/dynamics/se/stepon.F90 index 9ba0cc3a..7d5466f5 100644 --- a/src/dynamics/se/stepon.F90 +++ b/src/dynamics/se/stepon.F90 @@ -5,6 +5,7 @@ module stepon use physics_types, only: physics_state, physics_tend use spmd_utils, only: iam, mpicom use perf_mod, only: t_startf, t_stopf, t_barrierf +use runtime_obj, only: runtime_options !SE dycore: use parallel_mod, only: par @@ -24,17 +25,18 @@ module stepon contains !========================================================================================= -subroutine stepon_init(dyn_in, dyn_out) +subroutine stepon_init(cam_runtime_opts, dyn_in, dyn_out) ! Dummy arguments - type(dyn_import_t), intent(in) :: dyn_in ! Dynamics import container - type(dyn_export_t), intent(in) :: dyn_out ! Dynamics export container + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(dyn_import_t), intent(in) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(in) :: dyn_out ! Dynamics export container end subroutine stepon_init !========================================================================================= -subroutine stepon_run1(dtime_out, phys_state, phys_tend, dyn_in, dyn_out) +subroutine stepon_run1(dtime_out, cam_runtime_opts, phys_state, phys_tend, dyn_in, dyn_out) use time_manager, only: get_step_size use cam_abortutils, only: endrun @@ -44,11 +46,12 @@ subroutine stepon_run1(dtime_out, phys_state, phys_tend, dyn_in, dyn_out) use time_mod, only: tstep ! dynamics timestep ! Dummy arguments - real(r8), intent(out) :: dtime_out ! Time-step (s) - type(physics_state), intent(inout) :: phys_state ! Physics state object - type(physics_tend), intent(inout) :: phys_tend ! Physics tendency object - type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container - type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + real(r8), intent(out) :: dtime_out ! Time-step (s) + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(physics_state), intent(inout) :: phys_state ! Physics state object + type(physics_tend), intent(inout) :: phys_tend ! Physics tendency object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container !---------------------------------------------------------------------------- !Extract model time step in seconds from ESMF time manager: @@ -64,14 +67,14 @@ subroutine stepon_run1(dtime_out, phys_state, phys_tend, dyn_in, dyn_out) call t_barrierf('sync_d_p_coupling', mpicom) call t_startf('d_p_coupling') ! Move data into phys_state structure. - call d_p_coupling(phys_state, phys_tend, dyn_out) + call d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) call t_stopf('d_p_coupling') end subroutine stepon_run1 !========================================================================================= -subroutine stepon_run2(phys_state, phys_tend, dyn_in, dyn_out) +subroutine stepon_run2(cam_runtime_opts, phys_state, phys_tend, dyn_in, dyn_out) !SE/CAM interface: use dp_coupling, only: p_d_coupling @@ -83,10 +86,11 @@ subroutine stepon_run2(phys_state, phys_tend, dyn_in, dyn_out) use prim_advance_mod, only: calc_tot_energy_dynamics ! Dummy arguments - type(physics_state), intent(inout) :: phys_state ! Physics state object - type(physics_tend), intent(inout) :: phys_tend ! Physics tendency object - type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container - type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(physics_state), intent(inout) :: phys_state ! Physics state object + type(physics_tend), intent(inout) :: phys_tend ! Physics tendency object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container ! Local variables integer :: tl_f, tl_fQdp @@ -100,7 +104,7 @@ subroutine stepon_run2(phys_state, phys_tend, dyn_in, dyn_out) call t_barrierf('sync_p_d_coupling', mpicom) call t_startf('p_d_coupling') ! copy from phys structures -> dynamics structures - call p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_fQdp) + call p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, tl_fQdp) call t_stopf('p_d_coupling') if (iam < par%nprocs) then @@ -111,7 +115,7 @@ end subroutine stepon_run2 !========================================================================================= -subroutine stepon_run3(dtime, cam_out, phys_state, dyn_in, dyn_out) +subroutine stepon_run3(dtime, cam_runtime_opts, cam_out, phys_state, dyn_in, dyn_out) use camsrfexch, only: cam_out_t @@ -125,11 +129,12 @@ subroutine stepon_run3(dtime, cam_out, phys_state, dyn_in, dyn_out) use control_mod, only: qsplit ! Dummy arguments - real(r8), intent(in) :: dtime ! Time-step - type(cam_out_t), intent(inout) :: cam_out ! Output from CAM to surface - type(physics_state), intent(inout) :: phys_state ! Physics state object - type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container - type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + real(r8), intent(in) :: dtime ! Time-step + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(cam_out_t), intent(inout) :: cam_out ! Output from CAM to surface + type(physics_state), intent(inout) :: phys_state ! Physics state object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container ! Local variables integer :: tl_f, tl_fQdp @@ -161,11 +166,12 @@ end subroutine stepon_run3 !========================================================================================= -subroutine stepon_final(dyn_in, dyn_out) +subroutine stepon_final(cam_runtime_opts, dyn_in, dyn_out) ! Dummy arguments - type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container - type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container + type(runtime_options), intent(in) :: cam_runtime_opts ! Runtime settings object + type(dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container + type(dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container end subroutine stepon_final From 1e56860be060e2289573ddff6b4f6ec20590cb22 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 25 Aug 2021 20:36:18 -0600 Subject: [PATCH 38/45] Address review comments and suggestions. --- cime_config/buildlib | 12 +- cime_config/buildnml | 2 +- cime_config/cam_autogen.py | 4 +- cime_config/cam_config.py | 236 ++++++++++++++++-- cime_config/namelist_definition_cam.xml | 59 +++-- src/data/generate_registry_data.py | 128 +++++++++- src/data/physconst.F90 | 169 +++++++++++-- src/data/registry.xml | 15 +- src/data/registry_v1_0.xsd | 30 ++- src/dynamics/se/advect_tend.F90 | 2 +- src/dynamics/se/dp_coupling.F90 | 81 +++--- .../se/dycore/comp_ctr_vol_around_gll_pts.F90 | 8 +- src/dynamics/se/dycore/derivative_mod.F90 | 35 ++- src/dynamics/se/dycore/dimensions_mod.F90 | 59 +++-- src/dynamics/se/dycore/edge_mod.F90 | 2 +- src/dynamics/se/dycore/element_mod.F90 | 202 +++------------ src/dynamics/se/dycore/fvm_analytic_mod.F90 | 4 +- .../se/dycore/fvm_control_volume_mod.F90 | 14 +- src/dynamics/se/dycore/fvm_mapping.F90 | 6 +- src/dynamics/se/dycore/fvm_mod.F90 | 35 ++- src/dynamics/se/dycore/global_norms_mod.F90 | 12 +- src/dynamics/se/dycore/interpolate_mod.F90 | 4 +- src/dynamics/se/dycore/parallel_mod.F90 | 4 +- src/dynamics/se/dycore/prim_state_mod.F90 | 4 +- src/dynamics/se/dyn_comp.F90 | 35 ++- src/dynamics/se/dyn_grid.F90 | 46 ++-- src/dynamics/se/native_mapping.F90 | 7 +- src/dynamics/se/stepon.F90 | 71 +++--- src/dynamics/tests/inic_analytic.F90 | 14 +- src/dynamics/tests/inic_analytic_utils.F90 | 5 +- .../initial_conditions/ic_baroclinic.F90 | 79 +++--- .../initial_conditions/ic_us_standard_atm.F90 | 118 ++++++--- src/dynamics/utils/dyn_thermo.F90 | 127 +++++++++- src/physics/utils/phys_comp.F90 | 4 + src/utils/cam_pio_utils.F90 | 2 +- test/unit/cam_config_unit_tests.py | 2 +- .../sample_files/physics_types_complete.F90 | 26 ++ test/unit/sample_files/physics_types_ddt2.F90 | 8 + .../sample_files/physics_types_ddt_array.F90 | 8 + .../sample_files/physics_types_ddt_eul.F90 | 8 + .../sample_files/physics_types_ddt_fv.F90 | 8 + .../sample_files/physics_types_ddt_se.F90 | 8 + .../sample_files/physics_types_parameter.F90 | 8 + .../sample_files/physics_types_simple.F90 | 8 + test/unit/sample_files/reg_good_complete.xml | 4 +- 45 files changed, 1153 insertions(+), 570 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index 0dd4a77b..84bee0a9 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -104,19 +104,17 @@ def _build_cam(): # End for # Add dynamics source code directories: - for direc in config.get_value("dyn_src_dirs").split(","): - dyn_dir = os.path.join(atm_root, "src", "dynamics") - for subdir in direc.split("/"): - dyn_dir = os.path.join(dyn_dir, subdir) + for direc in config.get_value("dyn_src_dirs"): + dyn_dir = os.path.join(atm_root, "src", "dynamics", direc) + if dyn_dir not in paths: #Add to list of filepaths if not already present: - if dyn_dir not in paths: - paths.append(dyn_dir) + paths.append(dyn_dir) # Add analytical IC source code directories: paths.append(os.path.join(atm_root, "src", "dynamics", "tests")) #Required due to namelist call. if dycore != "none": paths.append(os.path.join(atm_root, "src", "dynamics", "tests", - "initial_conditions")) + "initial_conditions")) # If using the CMEPS/NUOPC coupler, then add additional path: if case.get_value("COMP_INTERFACE") == "nuopc": diff --git a/cime_config/buildnml b/cime_config/buildnml index 10af10cd..10471f93 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -288,7 +288,7 @@ def buildnml(case, caseroot, compname): nmlgen.init_defaults(namelist_infile_list, cam_nml_dict, skip_default_for_groups=\ filter(lambda group: group !='vert_coord_nl', - config.nml_groups)) + config.nml_groups)) #-------------------------------------------- # Set "nlev" namelist attribute to equal pver diff --git a/cime_config/cam_autogen.py b/cime_config/cam_autogen.py index ca19e3f2..01a5ee69 100644 --- a/cime_config/cam_autogen.py +++ b/cime_config/cam_autogen.py @@ -469,9 +469,9 @@ def generate_physics_suites(ccpp_scripts_path, build_cache, preproc_defs, host_n # Convert preproc defs to string: if preproc_defs: - preproc_cache_str = ', '.join(preproc_defs) + preproc_cache_str = ', '.join(preproc_defs) else: - preproc_cache_str = 'UNSET' + preproc_cache_str = 'UNSET' if os.path.exists(genccpp_dir): do_gen_ccpp = force or build_cache.ccpp_mismatch(sdfs, scheme_files, diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index bfb14b18..fbc424b9 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -478,7 +478,7 @@ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): #++++++++++++++++++++++++ # Create properties needed to return given value and valid values - # without underscores + # without underscores @property def value(self): """Return the value of this config object""" @@ -541,6 +541,143 @@ def set_value(self, val): # If ok, then set object's value to one provided self.__value = val +############################################################################### + +class ConfigList(ConfigGen): + + """ + Configuration class used to store list-based + CAM configuration options. + + Inputs to initalize class are: + name -> Name of new CAM configure option + desc -> Text description of CAM configure option + list_vals -> List values for CAM configure option + valid_type (optional) -> Specify valid type for CAM configure option list values. + Currently accepts "int" for integer and "str" for string. + + Doctests: + + 1. Check that ConfigList works properly with no valid_type: + + >>> ConfigList("test", "test object description", [1,2,3]).value + [1, 2, 3] + + 2. Check that ConfigList works with a correct valid type provided: + >>> ConfigList("test", "test object description", ["x", "y", "z"], valid_type="str").value + ['x', 'y', 'z'] + + 3. Check that ConfigList With a non-string passed to "valid_type" fails with the correct error: + >>> ConfigList("test", "test object description", [1, 2, 3], valid_type=5).value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: valid_type entry for variable 'test' must be a string, not type ''. + + 4. Check that ConfigList with a non-recognized "valid_type" option fails with the correct error: + >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="foo").value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: 'foo' is not a recognized option for 'valid_type'. Please use either 'int' or 'str'. + + 5. Check that ConfigList with list entries that don't match the valid_type entry fails with the correct error: + >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="str").value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: List entry, '1', provided for variable, 'test', is not a string, but instead is type ''. + """ + + def __init__(self, name, desc, val, valid_type=None): + + # Add generic attributes + ConfigGen.__init__(self, name, desc, is_nml_attr=None) + + # Check if valid_type is not None + if valid_type is not None: + # If not None, make sure valid_type is a string: + if not isinstance(valid_type, str): + emsg = "ERROR: valid_type entry for variable '{}' must be a string, " + emsg += " not type '{}'." + raise CamConfigTypeError(emsg.format(name, type(valid_type))) + # End if + # End if + + # If ok, then add valid_type to object + self.__valid_type = valid_type + + # Next, check that provided list entry types are "valid" based on the + # valid type provided: + if self.__valid_type is not None: + self.__check_value(val) + + # If everything is ok, then add provided value to object + self.__value = val + + #++++++++++++++++++++++++ + + # Create properties needed to return given value and valid values + # without underscores + @property + def value(self): + """Return the value of this config object""" + return self.__value + + @property + def valid_type(self): + """Return the valid type of this config object""" + return self.__valid_type + + #++++++++++++++++++++++++ + + def __check_value(self, val): + + """ + Check if the entries in the provided + list (val) are of the correct type as + specified by the "valid_type" entry. + """ + + # Extract valid type (valid_type) from object + valid_type = self.valid_type + + if valid_type == "str": + #All list entries should be strings: + for list_entry in val: + if not isinstance(list_entry, str): + emsg = "ERROR: List entry, '{}', provided for variable, '{}'" + emsg += ", is not a string, but instead is type '{}'." + raise CamConfigValError(emsg.format(list_entry, self.name, + type(list_entry))) + + elif valid_type == "int": + #All list entries should be integers: + for list_entry in val: + if not isinstance(list_entry, int): + emsg = "ERROR: List entry, '{}', provided for variable, '{}'" + emsg += ", is not an integer, but instead is type '{}'." + raise CamConfigValError(emsg.format(list_entry, self.name, + type(list_entry))) + else: + #Invalid option given for "valid_type", so raise error: + emsg = "ERROR: '{}' is not a recognized option for 'valid_type'." + emsg += " Please use either 'int' or 'str'." + raise CamConfigValError(emsg.format(valid_type)) + + #++++++++++++++++++++++++ + + def set_value(self, val): + + """ + Set configure object's value to the one provided. + """ + + # First, check that the provided value is valid + if self.__valid_type is not None: + self.__check_value(val) + + # If ok, then set object's value to one provided + self.__value = val + + ############################################################################### # MAIN CAM CONFIGURE OBJECT ############################################################################### @@ -568,17 +705,22 @@ class ConfigCAM: >>> FCONFIG.get_value("test_str") 'test_val' + With a given list value: + >>> FCONFIG.create_config("test_list", "test object description", [1, 2]) + >>> FCONFIG.get_value("test_list") + [1, 2] + 2. Check that the same configure object can't be created twice: >>> FCONFIG.create_config("test_int", "test object description", 5) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): CamConfigValError: ERROR: The CAM config variable, 'test_int', already exists! Any new config variable must be given a different name - 3. Check that a configure object's given value must be either a string or integer: + 3. Check that a configure object's given value must be either a string, integer or list: - >>> FCONFIG.create_config("test_list", "test_object_description", [5]) #doctest: +IGNORE_EXCEPTION_DETAIL + >>> FCONFIG.create_config("test_dict", "test_object_description", {"x": "y"}) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): - CamConfigTypeError: ERROR: The input value for new CAM config variable, 'test_list', must be either an integer or a string, not + CamConfigTypeError: ERROR: The input value for new CAM config variable, 'test_dict', must be either an integer or a string, not """ @@ -607,7 +749,7 @@ def __init__(self, case, case_log): self.__atm_name = case.get_value("COMP_ATM") # Save CPP definitions as a list: - self.__cppdefs = case.get_value("CAM_CPPDEFS").split() + self.__cppdefs = [x for x in case.get_value("CAM_CPPDEFS").split() if x] # If only "UNSET" is present in the list, then convert to # empty list: @@ -698,10 +840,10 @@ def __init__(self, case, case_log): hgrid_desc = "Horizontal grid specifier." # dynamics package source directories meta-data - dyn_dirs_desc = "Comma-seperated list of local directories containing" \ + dyn_dirs_desc = "Comma-separated list of local directories containing" \ " dynamics package source code.\n" \ "These directories are assumed to be located under" \ - " src/dynamics, with a backslah ('/') indicating directory hierarchy." + " src/dynamics, with a slash ('/') indicating directory hierarchy." # Create regex expressions to search for the different dynamics grids eul_grid_re = re.compile(r"T[0-9]+") # Eulerian dycore @@ -730,7 +872,8 @@ def __init__(self, case, case_log): se_grid_re, is_nml_attr=True) # Source code directories - self.create_config("dyn_src_dirs", dyn_dirs_desc, "se,se/dycore") + self.create_config("dyn_src_dirs", dyn_dirs_desc, ["se",os.path.join("se","dycore")], + valid_list_type="str") # Add SE namelist groups to nmlgen list self.__nml_groups.append("air_composition_nl") @@ -789,7 +932,8 @@ def __init__(self, case, case_log): None, is_nml_attr=True) # Source code directories - self.create_config("dyn_src_dirs", dyn_dirs_desc, "none") + self.create_config("dyn_src_dirs", dyn_dirs_desc, ["none"], + valid_list_type="str") else: emsg = "ERROR: The specified CAM horizontal grid, '{}', " @@ -849,6 +993,11 @@ def __init__(self, case, case_log): self.add_cppdef("NP", csnp_val) else: + # Additional dyn value checks are not required, + # as the "dyn_valid_vals" list in the "create_config" call + # prevents non-supported dycores from being used, and all + # dycores are lat/lon-based. + # Add number of latitudes in grid to configure object nlat_desc = ["Number of unique latitude points in rectangular lat/lon grid.", "Set to 1 (one) for unstructured grids."] @@ -928,7 +1077,7 @@ def __init__(self, case, case_log): # Set phys->dyn kind conversion CPPdef if kinds are different: if self.get_value("dyn_kind") != self.get_value("phys_kind"): - self.add_cppdef("DYN_PHYS_KIND_DIFF") + self.add_cppdef("DYN_PHYS_KIND_DIFF") #-------------------------------------------------------- # Print CAM configure settings and values to debug logger @@ -1024,8 +1173,8 @@ def parse_config_opts(cls, config_opts, test_mode=False): # end if return pargs - def create_config(self, name, desc, val, - valid_vals=None, is_nml_attr=False): + def create_config(self, name, desc, val, valid_vals=None, + valid_list_type=None, is_nml_attr=False): """ Create new CAM "configure" object, and add it @@ -1043,10 +1192,14 @@ def create_config(self, name, desc, val, conf_obj = ConfigString(name, desc, val, valid_vals, is_nml_attr=is_nml_attr) + elif isinstance(val, list): + # If list, then call list configure object + conf_obj = ConfigList(name, desc, val, + valid_type=valid_list_type) else: - # If neither an integer or a string, then throw an error + # If not an integer, string, or a list, then throw an error emsg = ("ERROR: The input value for new CAM config variable, '{}', " - "must be either an integer or a string, not {}") + "must be an integer, string, or list, not {}") raise CamConfigTypeError(emsg.format(name, type(val))) # Next, check that object name isn't already in the config list @@ -1134,13 +1287,47 @@ def add_cppdef(self, cppname, value=None): """ Add a CPP definition value to be used during the - building of the model. + building of the model. An error is thrown if + the CPP macro has already been defined. + + Check that add_cppdef works properly: + >>> FCONFIG.add_cppdef("TEST"); FCONFIG.cpp_defs + ['-DTEST_CPPDEF', '-DNEW_TEST=5', '-DTEST'] + + Check that add_cppdef works properly with provided value: + >>> FCONFIG.add_cppdef("COOL_VAR", 100); FCONFIG.cpp_defs + ['-DTEST_CPPDEF', '-DNEW_TEST=5', '-DTEST', '-DCOOL_VAR=100'] + + Check that a duplicate cppdef creates an error: + >>> FCONFIG.add_cppdef("TEST_CPPDEF") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: CPP definition 'TEST_CPPDEF' has already been set + + Check that a duplicate cppdef creates an error even if an equals sign + is present in the stored copy but not the passed variable: + >>> FCONFIG.add_cppdef("NEW_TEST") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: CPP definition 'NEW_TEST' has already been set """ + #Create string to check if CPP definition is already present: + check_str = r"-D"+cppname.upper() + + #Check if CPP definition name already exists in CPP string list. + #This is done because a CPP definition should only be set once, + #in order to avoid variable overwriting or other un-expected + #compiler behaviors: + if any([re.match(check_str+r"($|=)", cppdef.strip()) for cppdef in self.__cppdefs]): + #If match is found, then raise an error: + emsg = "ERROR: CPP definition '{}' has already been set" + raise CamConfigValError(emsg.format(cppname.upper())) + # Check if input value is a logical: if value is None: # Create CPP flag string with no equals sign: - cpp_str = "-D{}".format(cppname.upper()) + cpp_str = check_str else: # Create CPP definition flag string: cpp_str = "-D{}={}".format(cppname.upper(), value) @@ -1255,7 +1442,7 @@ def generate_cam_src(self, gen_fort_indent): #++++++++++++++++++++++++ - def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): + def ccpp_phys_set(self, cam_nml_attr_dict, user_nl_file): """ Determine if a user has specified which @@ -1266,7 +1453,7 @@ def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): """ #Extract physics suite list: - phys_suites = config.get_value('physics_suites').split(';') + phys_suites = self.get_value('physics_suites').split(';') if len(phys_suites) > 1: #If more than one physics suite is listed, @@ -1280,12 +1467,12 @@ def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): #Break out "physics_suite" lines: phys_suite_lines = \ [[x.strip() for x in line.split('=')] \ - for line in nl_user_lines if line[0] != "!" and 'physics_suite' in line] + for line in nl_user_lines if line.lstrip()[0] != "!" and 'physics_suite' in line] #If there is no "physics_suite" line, then throw an error: if not phys_suite_lines: emsg = "No 'physics_suite' variable is present in user_nl_cam.\n" - emsg += "This is required if more than one suite is listed\n" + emsg += "This is required if more than one suite is listed\n" emsg += "in CAM_CONFIG_OPTS." raise CamConfigValError(emsg) @@ -1303,7 +1490,8 @@ def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): #If there is only one string entry, then it means the equals (=) sign was never found: emsg = "No equals (=) sign was found with the 'physics_suite' variable." raise CamConfigValError(emsg) - elif len(phys_suite_list) > 2: + + if len(phys_suite_list) > 2: #If there is more than two entries, it means there were two or more equals signs: emsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." raise CamConfigValError(emsg) @@ -1313,9 +1501,9 @@ def ccpp_phys_set(config, cam_nml_attr_dict, user_nl_file): #Check that physics suite specified is actually in config list: if phys_suite_val not in phys_suites: - emsg = "physics_suite specified in user_nl_cam doesn't match any suites\n" + emsg = "physics_suite specified in user_nl_cam, '{}', doesn't match any suites\n" emsg += "listed in CAM_CONFIG_OPTS" - raise CamConfigValError(emsg) + raise CamConfigValError(emsg.format(phys_suite_val)) else: #If only a single physics suite is listed, then just use that one: @@ -1362,7 +1550,7 @@ def __init__(self): "CASEROOT" : "/another/made-up/path", "CAM_CONFIG_OPTS" : "-dyn none --physics-suites adiabatic", "COMP_ROOT_DIR_ATM" : "/a/third/made-up/path", - "CAM_CPPDEFS" : "UNSET", + "CAM_CPPDEFS" : "-DTEST_CPPDEF -DNEW_TEST=5", "NTHRDS_ATM" : 1, "RUN_STARTDATE" : "101" } diff --git a/cime_config/namelist_definition_cam.xml b/cime_config/namelist_definition_cam.xml index bc43afc1..c32cb8ff 100644 --- a/cime_config/namelist_definition_cam.xml +++ b/cime_config/namelist_definition_cam.xml @@ -7222,6 +7222,7 @@ 30 26 + 30 32 @@ -10781,7 +10782,7 @@ - char*6(20) + char*80(20) physconst air_composition_nl @@ -10790,33 +10791,61 @@ dry air are constant. If set then the list of major species is assumed to have 'N2' listed last. This information is currently used only for computing the variable properties of air in WACCM-X configurations. - Default: ['O', 'O2', 'H', 'N2'] if WACCM-X, otherwise None. + Default if WACCM-X: + + ['O_mixing_ratio_wrt_dry_air', 'O2_mixing_ratio_wrt_dry_air', + 'H_mixing_ratio_wrt_dry_air', 'N2_mixing_ratio_wrt_dry_air'] + + Otherwise default is None. "" - O, O2, H, N2 + + O_mixing_ratio_wrt_dry_air, O2_mixing_ratio_wrt_dry_air, H_mixing_ratio_wrt_dry_air, N2_mixing_ratio_wrt_dry_air + - char*6(20) + char*80(20) physconst air_composition_nl List of water species that are included in "moist" air. This is currently used only by the SE dycore to generalize the computation of the moist air mass and thermodynamic properties. - Default: - ['Q','CLDLIQ','RAINQM'] if CAM4, CAM5, or Kessler physics is used. - ['Q','CLDLIQ','CLDICE','RAINQM','SNOWQM'] if CAM6 physics is used. - ['Q'] for all other physics choices. - - - Q - Q, CLDLIQ, RAINQM - Q, CLDLIQ, RAINQM - Q, CLDLIQ, RAINQM - Q, CLDLIQ, CLDICE, RAINQM, SNOWQM + Default if CAM4, CAM5, or Kessler physics is used: + + ['specific_humidity', + 'cloud_liquid_water_mixing_ratio_wrt_dry_air', + 'rain_mixing_ratio_wrt_dry_air'] + + Default if CAM6 physics is used: + + ['specific_humidity', + 'cloud_liquid_water_mixing_ratio_wrt_dry_air', + 'cloud_ice_mixing_ratio_wrt_dry_air', + 'rain_mixing_ratio_wrt_dry_air', + 'snow_mixing_ratio_wrt_dry_air'] + + Otherwise default is: ['specific_humidity'] + + + + specific_humidity + + + specific_humidity, cloud_liquid_water_mixing_ratio_wrt_dry_air, rain_mixing_ratio_wrt_dry_air + + + specific_humidity, cloud_liquid_water_mixing_ratio_wrt_dry_air, rain_mixing_ratio_wrt_dry_air + + + specific_humidity, cloud_liquid_water_mixing_ratio_wrt_dry_air, rain_mixing_ratio_wrt_dry_air + + + specific_humidity, cloud_liquid_water_mixing_ratio_wrt_dry_air, cloud_ice_mixing_ratio_wrt_dry_air, rain_mixing_ratio_wrt_dry_air, rain_mixing_ratio_wrt_dry_air + diff --git a/src/data/generate_registry_data.py b/src/data/generate_registry_data.py index 3f97eb8e..7f2ef15c 100755 --- a/src/data/generate_registry_data.py +++ b/src/data/generate_registry_data.py @@ -140,7 +140,8 @@ class VarBase: def __init__(self, elem_node, local_name, dimensions, known_types, type_default, units_default="", kind_default='', protected=False, index_name='', local_index_name='', - local_index_name_str='', alloc_default='none'): + local_index_name_str='', alloc_default='none', + tstep_init_default=False): self.__local_name = local_name self.__dimensions = dimensions self.__units = elem_node.get('units', default=units_default) @@ -157,6 +158,8 @@ def __init__(self, elem_node, local_name, dimensions, known_types, self.__local_index_name = local_index_name self.__local_index_name_str = local_index_name_str self.__allocatable = elem_node.get('allocatable', default=alloc_default) + self.__tstep_init = elem_node.get("phys_timestep_init_zero", + default=tstep_init_default) if self.__allocatable == "none": self.__allocatable = "" # end if @@ -188,6 +191,11 @@ def __init__(self, elem_node, local_name, dimensions, known_types, # end if # pylint: enable=bad-continuation + if self.__tstep_init == "true": + self.__tstep_init = True + elif self.__tstep_init == "false": + self.__tstep_init = False + def write_metadata(self, outfile): """Write out this variable as CCPP metadata""" outfile.write('[ {} ]\n'.format(self.local_name)) @@ -207,7 +215,8 @@ def write_metadata(self, outfile): outfile.write(' {} = {}\n'.format('dimensions', self.dimension_string)) - def write_initial_value(self, outfile, indent, init_var, ddt_str): + def write_initial_value(self, outfile, indent, init_var, ddt_str, + tstep_init=False): """Write the code for the initial value of this variable and/or one of its array elements.""" #Check if variable has associated array index @@ -240,11 +249,18 @@ def write_initial_value(self, outfile, indent, init_var, ddt_str): init_val = '' # end if # end if - if init_val: + #Time-step initialization, which is always zero: + if tstep_init: + if self.kind: + outfile.write("{} = 0._{}".format(var_name, self.kind), indent) + else: + #Assume variable is an integer: + outfile.write("{} = 0".format(var_name), indent) + # end if + elif init_val: outfile.write("if ({}) then".format(init_var), indent) outfile.write("{} = {}".format(var_name, init_val), indent+1) outfile.write("end if", indent) - # end if # end if @property @@ -340,13 +356,19 @@ def is_ddt(self): """Return True iff this variable is a derived type""" return self.__type.ddt + @property + def tstep_init(self): + """Return True if variable will be set to zero every physics timestep.""" + return self.__tstep_init + ############################################################################### class ArrayElement(VarBase): ############################################################################### """Documented array element of a registry Variable""" def __init__(self, elem_node, parent_name, dimensions, known_types, - parent_type, parent_kind, parent_units, parent_alloc, vdict): + parent_type, parent_kind, parent_units, parent_alloc, + parent_tstep_init, vdict): """Initialize the Arary Element information by identifying its metadata properties """ @@ -401,7 +423,8 @@ def __init__(self, elem_node, parent_name, dimensions, known_types, index_name=index_name, local_index_name=local_index_name, local_index_name_str=local_index_name_str, - alloc_default=parent_alloc) + alloc_default=parent_alloc, + tstep_init_default=parent_tstep_init) @property def index_string(self): @@ -438,7 +461,7 @@ class Variable(VarBase): __VAR_ATTRIBUTES = ["access", "allocatable", "dycore", "extends", "kind", "local_name", "name", "standard_name", - "type", "units", "version"] + "type", "units", "version", "phys_timestep_init_zero"] def __init__(self, var_node, known_types, vdict, logger): # pylint: disable=too-many-locals @@ -526,7 +549,7 @@ def __init__(self, var_node, known_types, vdict, logger): my_dimensions, known_types, ttype, self.kind, self.units, allocatable, - vdict)) + self.tstep_init, vdict)) # end if (all other processing done above) # end for @@ -642,7 +665,6 @@ def write_allocate_routine(self, outfile, indent, is a string to use to write initialization test code. is a string to use to write reallocate test code. is a prefix string (e.g., state%). - is a TypeRegistry. """ # Be careful about dimensions, scalars have none, not '()' if self.dimensions: @@ -700,6 +722,61 @@ def write_allocate_routine(self, outfile, indent, # end for # end if + def write_tstep_init_routine(self, outfile, indent, + ddt_str, init_val=False): + """ + Write the code to iniitialize this variable to zero at the + start of each physics timestep. + + is a prefix string (e.g., state%). + is an optional variable that forces the writing + of the variable initiliazation code even if not + directly specified in the registry itself. + """ + + # Be careful about dimensions, scalars have none, not '()' + if self.dimensions: + dimension_string = self.dimension_string + else: + dimension_string = '' + # end if + my_ddt = self.is_ddt + if my_ddt: # This is a DDT object, allocate entries + + subi = indent + sub_ddt_str = '{}{}%'.format(ddt_str, self.local_name) + if dimension_string: + emsg = "Arrays of DDT objects not implemented" + raise ParseInternalError(emsg) + # end if + for var in my_ddt.variable_list(): + var.write_tstep_init_routine(outfile, subi, sub_ddt_str, + init_val=self.tstep_init) + else: + + # Do nothing if a parameter + if self.allocatable == "parameter": + return + + # Check if variable should be initialized: + if init_val or self.tstep_init: + # Set variables needed for writing source code + if self.long_name: + comment = ' ! ' + self.local_name + ": " + self.long_name + else: + comment = (' ! ' + self.local_name + ": " + + convert_to_long_name(self.standard_name)) + + # Write source code + outfile.write("", 0) + outfile.write(comment, indent) + + # Initialize the variable: + self.write_initial_value(outfile, indent, "", ddt_str, + tstep_init=True) + + # end if + @classmethod def constant_dimension(cls, dim): """Return dimension value if is a constant dimension, else None""" @@ -1180,17 +1257,25 @@ def write_source(self, outdir, indent, logger): outfile.write('!! public interfaces', 0) outfile.write('public :: {}'.format(self.allocate_routine_name()), 1) + outfile.write('public :: {}'.format(self.tstep_init_routine_name()), + 1) # end of module header outfile.end_module_header() outfile.write("", 0) # Write data management subroutines self.write_allocate_routine(outfile) + self.write_tstep_init_routine(outfile) + # end with def allocate_routine_name(self): """Return the name of the allocate routine for this module""" return 'allocate_{}_fields'.format(self.name) + def tstep_init_routine_name(self): + """Return the name of the physics timestep init routine for this module""" + return "{}_tstep_init".format(self.name) + def write_allocate_routine(self, outfile): """Write a subroutine to allocate all the data in this module""" subname = self.allocate_routine_name() @@ -1241,6 +1326,24 @@ def write_allocate_routine(self, outfile): # end for outfile.write('end subroutine {}'.format(subname), 1) + def write_tstep_init_routine(self, outfile): + """ + Write a subroutine to initialize registered variables + to zero at the beginning of each physics timestep. + """ + subname = self.tstep_init_routine_name() + outfile.write('', 0) + outfile.write('subroutine {}()'.format(subname), 1) + outfile.write('', 0) + outfile.write('!! Local variables', 2) + subn_str = 'character(len=*), parameter :: subname = "{}"' + outfile.write(subn_str.format(subname), 2) + for var in self.__var_dict.variable_list(): + var.write_tstep_init_routine(outfile, 2, '') + # end for + outfile.write('', 0) + outfile.write('end subroutine {}'.format(subname), 1) + @property def name(self): """Return this File's name""" @@ -1291,7 +1394,7 @@ def parse_command_line(args, description): help="Dycore (EUL, FV, FV3, MPAS, SE, none)") parser.add_argument("--config", type=str, required=True, metavar='CONFIG (required)', - help=("Comma-separated onfig items " + help=("Comma-separated config items " "(e.g., gravity_waves=True)")) parser.add_argument("--output-dir", type=str, default=None, help="Directory where output files will be written") @@ -1562,9 +1665,10 @@ def main(): else: loglevel = logging.INFO # end if + retcode, files = gen_registry(args.registry_file, args.dycore.lower(), - args.config, outdir, args.source_mods, - args.source_root, args.indent, + args.config, outdir, args.indent, + args.source_mods, args.source_root, loglevel=loglevel) return retcode, files diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 1d486113..4349b1f1 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -3,6 +3,7 @@ module physconst ! Physical constants. Use csm_share values whenever available. use ccpp_kinds, only: kind_phys + use shr_kind_mod, only: r8 => shr_kind_r8 use shr_const_mod, only: shr_const_g, shr_const_stebol use shr_const_mod, only: shr_const_tkfrz, shr_const_mwdair @@ -43,10 +44,12 @@ module physconst public :: get_dp ! pressure level thickness from dry dp and dry mixing ratios public :: get_pmid_from_dp ! full level pressure from dp (approximation depends on dycore) + public :: get_ps ! surface pressure public :: get_thermal_energy ! thermal energy quantity = dp*cp*T public :: get_virtual_temp ! virtual temperature public :: get_cp ! (generalized) heat capacity public :: get_cp_dry ! (generalized) heat capacity for dry air + public :: get_sum_species ! sum of thermodynamically active species: dp_dry*sum_species=dp public :: get_gz_given_dp_Tv_Rdry ! geopotential (with dp,dry R and Tv as input) public :: get_R_dry ! (generalized) dry air gas constant public :: get_kappa_dry ! (generalized) dry kappa = R_dry/cp_dry @@ -70,7 +73,7 @@ module physconst real(kind_phys), public, parameter :: cday = real(shr_const_cday, kind_phys) ! specific heat of fresh h2o (J/K/kg) real(kind_phys), public, parameter :: cpliq = real(shr_const_cpfw, kind_phys) - ! specific heat of ice (J/K/kg) + ! specific heat of ice (J/K/kg) real(kind_phys), public, parameter :: cpice = real(shr_const_cpice, kind_phys) ! Von Karman constant real(kind_phys), public, parameter :: karman = real(shr_const_karman, kind_phys) @@ -177,8 +180,8 @@ module physconst ! NOTE: These routines may be replaced once constituents are enabled in the CCPP-framework ! integer, parameter :: num_names_max = 30 - character(len=6) :: dry_air_species(num_names_max) - character(len=6) :: water_species_in_air(num_names_max) + character(len=80) :: dry_air_species(num_names_max) + character(len=80) :: water_species_in_air(num_names_max) integer, protected, public :: dry_air_species_num integer, protected, public :: water_species_in_air_num @@ -254,6 +257,17 @@ subroutine physconst_readnl(nlfile) logical :: newg, newsday, newmwh2o, newcpwv logical :: newmwdry, newcpair, newrearth, newtmelt, newomega + ! Kind-converstion variables, to ensure that MPI broadcast + ! works as expected: + real(r8) :: gravit_r8 + real(r8) :: sday_r8 + real(r8) :: mwh2o_r8 + real(r8) :: cpwv_r8 + real(r8) :: mwdry_r8 + real(r8) :: cpair_r8 + real(r8) :: rearth_r8 + real(r8) :: tmelt_r8 + real(r8) :: omega_r8 ! Physical constants needing to be reset (ie. for aqua planet experiments) namelist /physconst_nl/ gravit, sday, mwh2o, cpwv, mwdry, cpair, & @@ -275,17 +289,55 @@ subroutine physconst_readnl(nlfile) close(unitn) end if + ! Copy namelist variables into "r8" temporary variables + ! for broadcasting: + gravit_r8 = real(gravit, r8) + sday_r8 = real(sday, r8) + mwh2o_r8 = real(mwh2o, r8) + cpwv_r8 = real(cpwv, r8) + mwdry_r8 = real(mwdry, r8) + cpair_r8 = real(cpair, r8) + rearth_r8 = real(rearth, r8) + tmelt_r8 = real(tmelt, r8) + omega_r8 = real(omega, r8) + ! Broadcast namelist variables if (npes > 1) then - call mpi_bcast(gravit, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(sday, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(mwh2o, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(cpwv, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(mwdry, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(cpair, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(rearth, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(tmelt, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(omega, 1, mpi_real8, masterprocid, mpicom, ierr) + + ! Copy namelist variables into "r8" temporary variables + ! for broadcasting: + gravit_r8 = real(gravit, r8) + sday_r8 = real(sday, r8) + mwh2o_r8 = real(mwh2o, r8) + cpwv_r8 = real(cpwv, r8) + mwdry_r8 = real(mwdry, r8) + cpair_r8 = real(cpair, r8) + rearth_r8 = real(rearth, r8) + tmelt_r8 = real(tmelt, r8) + omega_r8 = real(omega, r8) + + ! Broadcast to other PEs: + call mpi_bcast(gravit_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(sday_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(mwh2o_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(cpwv_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(mwdry_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(cpair_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(rearth_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(tmelt_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(omega_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + + ! Convert broadcasted variables back to "kind_phys": + gravit = real(gravit_r8, kind_phys) + sday = real(sday_r8, kind_phys) + mwh2o = real(mwh2o_r8, kind_phys) + cpwv = real(cpwv_r8, kind_phys) + mwdry = real(mwdry_r8, kind_phys) + cpair = real(cpair_r8, kind_phys) + rearth = real(rearth_r8, kind_phys) + tmelt = real(tmelt_r8, kind_phys) + omega = real(omega_r8, kind_phys) + end if newg = gravit /= real(shr_const_g, kind_phys) @@ -383,7 +435,8 @@ subroutine physconst_readnl(nlfile) dry_air_species_num = 0 water_species_in_air_num = 0 do i = 1, num_names_max - if ((LEN_TRIM(dry_air_species(i)) > 0) .and. (TRIM(dry_air_species(i)) /= 'N2')) then + if ((LEN_TRIM(dry_air_species(i)) > 0) .and. & + (TRIM(dry_air_species(i)) /= 'mass_mixing_ratio_N2')) then dry_air_species_num = dry_air_species_num + 1 end if if (.not. LEN(TRIM(water_species_in_air(i)))==0) then @@ -597,7 +650,7 @@ subroutine composition_init() ! last major species in dry_air_species is derived from the others and constants associated with it ! are initialized here ! - if (TRIM(dry_air_species(dry_air_species_num+1))=='N2') then + if (TRIM(dry_air_species(dry_air_species_num+1))=='N2_mixing_ratio_wrt_dry_air') then ! call cnst_get_ind('N' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -646,7 +699,7 @@ subroutine composition_init() ! ! O ! - case('O') + case('O_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('O' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -669,7 +722,7 @@ subroutine composition_init() ! ! O2 ! - case('O2') + case('O2_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('O2' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -692,7 +745,7 @@ subroutine composition_init() ! ! H ! - case('H') + case('H_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('H' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -751,7 +804,7 @@ subroutine composition_init() ! ! Q ! - case('Q') + case('specific_humidity') ! call cnst_get_ind('Q' ,ix, abort=.false.) ix = ix_qv !This should be removed once constituents are enabled -JN. if (ix<1) then @@ -769,7 +822,7 @@ subroutine composition_init() ! ! CLDLIQ ! - case('CLDLIQ') + case('cloud_liquid_water_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('CLDLIQ' ,ix, abort=.false.) ix = ix_cld_liq !This should be removed once constituents are enabled -JN. if (ix<1) then @@ -784,7 +837,7 @@ subroutine composition_init() ! ! CLDICE ! - case('CLDICE') + case('cloud_ice_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('CLDICE' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -799,7 +852,7 @@ subroutine composition_init() ! ! RAINQM ! - case('RAINQM') + case('rain_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('RAINQM' ,ix, abort=.false.) ix = ix_rain !This should be removed once constituents are enabled -JN. if (ix<1) then @@ -814,7 +867,7 @@ subroutine composition_init() ! ! SNOWQM ! - case('SNOWQM') + case('snow_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('SNOWQM' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -829,7 +882,7 @@ subroutine composition_init() ! ! GRAUQM ! - case('GRAUQM') + case('graupel_mixing_ratio_wrt_dry_air') ! call cnst_get_ind('GRAUQM' ,ix, abort=.false.) ix = -1 !Model should die if it gets here, until constituents are enabled -JN. if (ix<1) then @@ -1425,6 +1478,44 @@ end subroutine get_molecular_diff_coef_reference ! !**************************************************************************************************************** ! + ! get pressure from dry pressure and thermodynamic active species (e.g., forms of water: water vapor, cldliq, etc.) + ! + !**************************************************************************************************************** + ! + subroutine get_ps(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,ps,ptop) + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_phys), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,1:ntrac) ! Tracer array + real(kind_phys), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_phys), intent(out) :: ps(i0:i1,j0:j1) ! surface pressure + real(kind_phys), intent(in) :: ptop + integer, intent(in) :: active_species_idx(:) + + integer :: i,j,k,m_cnst,nq + real(kind_phys) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + + dp = dp_dry + do nq=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = active_species_idx(nq) + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + dp(i,j,k) = dp(i,j,k) + tracer_mass(i,j,k,m_cnst) + end do + end do + end do + end do + ps = ptop + do k=k0,k1 + do j=j0,j1 + do i = i0,i1 + ps(i,j) = ps(i,j)+dp(i,j,k) + end do + end do + end do + end subroutine get_ps + ! + !**************************************************************************************************************** + ! ! Compute dry air heat capacity under constant pressure ! !**************************************************************************************************************** @@ -1916,5 +2007,37 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, deallocate(R_dry,cp_dry) end if end subroutine get_kappa_dry + ! + !**************************************************************************************************************** + ! + ! Compute sum of thermodynamically active species + ! + ! tracer is in units of dry mixing ratio unless optional argument dp_dry is present in which case tracer is + ! in units of "mass" (=m*dp) + ! + !**************************************************************************************************************** + ! + subroutine get_sum_species(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum_species,dp_dry) + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_phys), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracer array + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic active tracers + real(kind_phys), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness is present + ! then tracer is in units of mass + real(kind_phys), intent(out) :: sum_species(i0:i1,j0:j1,k0:k1) ! sum species + + real(kind_phys) :: factor(i0:i1,j0:j1,k0:k1) + integer :: nq,itrac + + if (present(dp_dry)) then + factor = 1.0_r8/dp_dry(:,:,:) + else + factor = 1.0_r8 + endif + sum_species = 1.0_r8 !all dry air species sum to 1 + do nq=dry_air_species_num+1,thermodynamic_active_species_num + itrac = active_species_idx(nq) + sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:) + end do + end subroutine get_sum_species end module physconst diff --git a/src/data/registry.xml b/src/data/registry.xml index 1e5bea45..403072fb 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -278,6 +278,7 @@ surface_air_pressure + surface_pressure_of_dry_air geopotential_at_surface air_temperature x_wind @@ -285,6 +286,18 @@ lagrangian_tendency_of_air_pressure dry_static_energy constituent_mixing_ratio + pressure_thickness + pressure_thickness_of_dry_air + reciprocal_of_pressure_thickness + reciprocal_of_pressure_thickness_of_dry_air + air_pressure + air_pressure_of_dry_air + ln_of_air_pressure + ln_of_air_pressure_of_dry_air + air_pressure_at_interface + air_pressure_of_dry_air_at_interface + ln_of_air_pressure_at_interface + ln_of_air_pressure_of_dry_air_at_interface total_tendency_of_air_temperature @@ -298,7 +311,7 @@ + units="None" type="physics_tend" phys_timestep_init_zero="true"> Total tendency from physics suite diff --git a/src/data/registry_v1_0.xsd b/src/data/registry_v1_0.xsd index 7efb8c37..f89d6087 100644 --- a/src/data/registry_v1_0.xsd +++ b/src/data/registry_v1_0.xsd @@ -94,17 +94,17 @@ - - - - - - - - - - - + + + + + + + + + + + @@ -139,6 +139,8 @@ + @@ -155,6 +157,8 @@ + @@ -172,6 +176,8 @@ + @@ -193,6 +199,8 @@ + diff --git a/src/dynamics/se/advect_tend.F90 b/src/dynamics/se/advect_tend.F90 index 0f6c0681..bec262a8 100644 --- a/src/dynamics/se/advect_tend.F90 +++ b/src/dynamics/se/advect_tend.F90 @@ -58,7 +58,7 @@ subroutine compute_adv_tends_xyz(elem,fvm,nets,nete,qn0,n0) init = .false. if ( .not. allocated( adv_tendxyz ) ) then init = .true. - allocate( adv_tendxyz(nx,nx,nlev,pcnst,nets:nete) ) + allocate( adv_tendxyz(nx,nx,nlev,pcnst,nets:nete), stat=iret ) call check_allocate(iret, subname, 'adv_tendxyz(nx,nx,nlev,pcnst,nets:nete)', & file=__FILE__, line=__LINE__) diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index a791df6a..916bedd3 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -52,7 +52,6 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) ! Note that all pressures and tracer mixing ratios coming from the dycore are based on ! dry air mass. - use physics_types, only: pdel ! use gravity_waves_sources, only: gws_src_fnct use dyn_comp, only: frontgf_idx, frontga_idx use hycoef, only: hyai, ps0 @@ -274,7 +273,7 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) phys_state%ps(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) phys_state%phis(icol) = real(phis_tmp(blk_ind(1), ie), kind_phys) do ilyr = 1, pver - pdel(icol, ilyr) = real(dp3d_tmp(blk_ind(1), ilyr, ie), kind_phys) + phys_state%pdel(icol, ilyr) = real(dp3d_tmp(blk_ind(1), ilyr, ie), kind_phys) phys_state%t(icol, ilyr) = real(T_tmp(blk_ind(1), ilyr, ie), kind_phys) phys_state%u(icol, ilyr) = real(uv_tmp(blk_ind(1), 1, ilyr, ie), kind_phys) phys_state%v(icol, ilyr) = real(uv_tmp(blk_ind(1), 2, ilyr, ie), kind_phys) @@ -293,12 +292,6 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) end do end do - ! Re-set physics tendencies to zero: - ! Is there a better solution here? -JN - phys_tend%dTdt(:,:) = 0._kind_phys - phys_tend%dudt(:,:) = 0._kind_phys - phys_tend%dvdt(:,:) = 0._kind_phys - if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then !$omp parallel do num_threads(max_num_threads) private (lchnk, ncols, icol, ilyr, pbuf_chnk, pbuf_frontgf, pbuf_frontga) !Un-comment once pbuf replacement variables are available -JN: @@ -322,7 +315,6 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) ! Save the tracer fields input to physics package for calculating tendencies ! The mixing ratios are all dry at this point. -! q_prev(1:ncols,1:pver,:) = phys_state(lchnk)%q(1:ncols,1:pver,1:pcnst) q_prev(1:pcols,1:pver,:) = real(phys_state%q(1:pcols,1:pver,1:3), r8) call test_mapping_output_phys_state(phys_state,dyn_out%fvm) @@ -348,8 +340,6 @@ end subroutine d_p_coupling subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, tl_qdp) - use physics_types, only: pdel, pdeldry - ! Convert the physics output state into the dynamics input state. use test_fvm_mapping, only: test_mapping_overwrite_tendencies use test_fvm_mapping, only: test_mapping_output_mapped_tendencies @@ -448,7 +438,7 @@ subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, t do ilyr = 1, pver do icol=1, pcols !Apply adjustment only to water vapor: - factor = pdel(icol,ilyr)/pdeldry(icol,ilyr) + factor = phys_state%pdel(icol,ilyr)/phys_state%pdeldry(icol,ilyr) phys_state%q(icol,ilyr,ix_qv) = factor*phys_state%q(icol,ilyr,ix_qv) phys_state%q(icol,ilyr,ix_cld_liq) = factor*phys_state%q(icol,ilyr,ix_cld_liq) phys_state%q(icol,ilyr,ix_rain) = factor*phys_state%q(icol,ilyr,ix_rain) @@ -468,7 +458,7 @@ subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, t dyn_in%fvm) do ilyr = 1, pver - dp_phys(blk_ind(1),ilyr,ie) = real(pdeldry(icol,ilyr), r8) + dp_phys(blk_ind(1),ilyr,ie) = real(phys_state%pdeldry(icol,ilyr), r8) T_tmp(blk_ind(1),ilyr,ie) = real(phys_tend%dtdt(icol,ilyr), r8) uv_tmp(blk_ind(1),1,ilyr,ie) = real(phys_tend%dudt(icol,ilyr), r8) uv_tmp(blk_ind(1),2,ilyr,ie) = real(phys_tend%dvdt(icol,ilyr), r8) @@ -614,9 +604,6 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! Finally compute energy and water column integrals of the physics input state. ! use constituents, only: qmin - use physics_types, only: psdry, pint, lnpint, pintdry, lnpintdry - use physics_types, only: pdel, rpdel, pdeldry, rpdeldry - use physics_types, only: pmid, lnpmid, pmiddry, lnpmiddry use physics_types, only: exner, zi, zm, lagrangian_vertical use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update use shr_const_mod, only: shr_const_rwv @@ -658,31 +645,31 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i = 1, pcols ! Set model-top values: - psdry(i) = real(hyai(1)*ps0, kind_phys) + sum(pdel(i,:)) - pintdry(i,1) = real(hyai(1)*ps0, kind_phys) + phys_state%psdry(i) = real(hyai(1)*ps0, kind_phys) + sum(phys_state%pdel(i,:)) + phys_state%pintdry(i,1) = real(hyai(1)*ps0, kind_phys) end do ! Calculate (natural) logarithm: - call shr_vmath_log(pintdry(1:pcols,1), & - lnpintdry(1:pcols,1), pcols) + call shr_vmath_log(phys_state%pintdry(1:pcols,1), & + phys_state%lnpintdry(1:pcols,1), pcols) !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i = 1, pcols ! Calculate dry pressure variables for rest of column: - pintdry(i,k+1) = pintdry(i,k) + pdel(i,k) - pdeldry(i,k) = pdel(i,k) - rpdeldry(i,k) = 1._kind_phys/pdeldry(i,k) - pmiddry(i,k) = 0.5_kind_phys*(pintdry(i,k+1) + & - pintdry(i,k)) + phys_state%pintdry(i,k+1) = phys_state%pintdry(i,k) + phys_state%pdel(i,k) + phys_state%pdeldry(i,k) = phys_state%pdel(i,k) + phys_state%rpdeldry(i,k) = 1._kind_phys/phys_state%pdeldry(i,k) + phys_state%pmiddry(i,k) = 0.5_kind_phys*(phys_state%pintdry(i,k+1) + & + phys_state%pintdry(i,k)) end do ! Calculate (natural) logarithms: - call shr_vmath_log(pintdry(1:pcols,k+1),& - lnpintdry(1:pcols,k+1), pcols) + call shr_vmath_log(phys_state%pintdry(1:pcols,k+1),& + phys_state%lnpintdry(1:pcols,k+1), pcols) - call shr_vmath_log(pmiddry(1:pcols,k), & - lnpmiddry(1:pcols,k), pcols) + call shr_vmath_log(phys_state%pmiddry(1:pcols,k), & + phys_state%lnpmiddry(1:pcols,k), pcols) end do ! wet pressure variables (should be removed from physics!) @@ -693,7 +680,7 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! to be consistent with total energy formula in physic's check_energy module only ! include water vapor in in moist dp factor_array(i,k) = 1._kind_phys+phys_state%q(i,k,ix_qv) - pdel(i,k) = pdeldry(i,k)*factor_array(i,k) + phys_state%pdel(i,k) = phys_state%pdeldry(i,k)*factor_array(i,k) end do end do @@ -702,29 +689,29 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i=1, pcols ! Set model-top values assuming zero moisture: - phys_state%ps(i) = pintdry(i,1) - pint(i,1) = pintdry(i,1) + phys_state%ps(i) = phys_state%pintdry(i,1) + phys_state%pint(i,1) = phys_state%pintdry(i,1) end do !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i=1, pcols ! Calculate wet (total) pressure variables for rest of column: - pint(i,k+1) = pint(i,k) + pdel(i,k) - pmid(i,k) = (pint(i,k+1) + pint(i,k))/2._kind_phys - phys_state%ps(i) = phys_state%ps(i) + pdel(i,k) + phys_state%pint(i,k+1) = phys_state%pint(i,k) + phys_state%pdel(i,k) + phys_state%pmid(i,k) = (phys_state%pint(i,k+1) + phys_state%pint(i,k))/2._kind_phys + phys_state%ps(i) = phys_state%ps(i) + phys_state%pdel(i,k) end do ! Calculate (natural) logarithms: - call shr_vmath_log(pint(1:pcols,k), lnpint(1:pcols,k), pcols) - call shr_vmath_log(pmid(1:pcols,k), lnpmid(1:pcols,k), pcols) + call shr_vmath_log(phys_state%pint(1:pcols,k), phys_state%lnpint(1:pcols,k), pcols) + call shr_vmath_log(phys_state%pmid(1:pcols,k), phys_state%lnpmid(1:pcols,k), pcols) end do - call shr_vmath_log(pint(1:pcols,pverp),lnpint(1:pcols,pverp),pcols) + call shr_vmath_log(phys_state%pint(1:pcols,pverp),phys_state%lnpint(1:pcols,pverp),pcols) !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i = 1, pcols - rpdel(i,k) = 1._kind_phys/pdel(i,k) - exner(i,k) = (pint(i,pver+1)/pmid(i,k))**cappa + phys_state%rpdel(i,k) = 1._kind_phys/phys_state%pdel(i,k) + exner(i,k) = (phys_state%pint(i,pver+1)/phys_state%pmid(i,k))**cappa end do end do @@ -802,17 +789,11 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) zvirv(:,:) = zvir endif - !NOTE: Should geopotential be done in CCPP physics suite? -JN: - - ! Compute initial geopotential heights - based on full pressure - !call geopotential_t (phys_state%lnpint, phys_state%lnpmid , phys_state%pint , & - ! phys_state%pmid , phys_state%pdel , phys_state%rpdel , & - ! phys_state%t , phys_state%q(:,:,ix_qv), rairv, gravit, zvirv , & - ! phys_state%zi , phys_state%zm , ncol ) - + !Call geopotential_t CCPP scheme: call geopotential_t_run(pver, lagrangian_vertical, pver, 1, & - pverp, 1, lnpint, pint, pmid, pdel, & - rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & + pverp, 1, phys_state%lnpint, phys_state%pint, & + phys_state%pmid, phys_state%pdel, & + phys_state%rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & rairv, gravit, zvirv, zi, zm, pcols, & errflg, errmsg) diff --git a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 index 240d07a4..aa091332 100644 --- a/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 +++ b/src/dynamics/se/dycore/comp_ctr_vol_around_gll_pts.F90 @@ -1370,7 +1370,7 @@ subroutine InitControlVolumes_gll(elem, hybrid,nets,nete) ! MNL: dx and dy are no longer part of element_t ! but they are easily computed for the ! uniform case - dx = pi/(2.0d0*dble(ne)) + dx = pi/(2.0_r8*real(ne, r8)) dy = dx ! intialize local element dual grid, local element areas @@ -2318,13 +2318,13 @@ subroutine VerifVolumes(elem, hybrid,nets,nete) if(hybrid%masterthread) then write(*,'(a,i2,a,2e23.15)') "cube face:",face," : SURFACE FV =",& - 6_r8*psum/(4_r8 * pi), & - 6_r8*psum/(4_r8 * pi)-1 + 6._r8*psum/(4._r8 * pi), & + 6._r8*psum/(4._r8 * pi)-1 end if end do if(hybrid%masterthread) then - write(iulog, *) "SURFACE FV (total)= ", ptot/(4_r8 * pi) + write(iulog, *) "SURFACE FV (total)= ", ptot/(4._r8 * pi) end if end subroutine VerifVolumes diff --git a/src/dynamics/se/dycore/derivative_mod.F90 b/src/dynamics/se/dycore/derivative_mod.F90 index 433a62c5..2cc0b73d 100644 --- a/src/dynamics/se/dycore/derivative_mod.F90 +++ b/src/dynamics/se/dycore/derivative_mod.F90 @@ -1,13 +1,13 @@ module derivative_mod use shr_kind_mod, only: r8=>shr_kind_r8 use cam_abortutils, only: endrun, check_allocate - use dimensions_mod, only : np, nc, npdg, nelemd, nlev - use quadrature_mod, only : quadrature_t, gauss, gausslobatto,legendre, jacobi + use dimensions_mod, only: np, nc, npdg, nelemd, nlev + use quadrature_mod, only: quadrature_t, gauss, gausslobatto,legendre, jacobi ! needed for spherical differential operators: use dynconst, only: ra - use element_mod, only : element_t - use control_mod, only : hypervis_scaling, hypervis_power - use perf_mod, only : t_startf, t_stopf + use element_mod, only: element_t + use control_mod, only: hypervis_scaling, hypervis_power + use perf_mod, only: t_startf, t_stopf implicit none private @@ -937,7 +937,7 @@ function remap_phys2gll(pin,nphys) result(pout) ! compute phys grid cell edges on [-1,1] do i=1,nphys+1 - dx = 2d0/nphys + dx = 2.0_r8/nphys phys_edges(i)=-1 + (i-1)*dx enddo @@ -1783,9 +1783,13 @@ subroutine vlaplace_sphere_wk_mol(v,deriv,elem,undamprrcart,mol_nu,laplace) real(kind=r8), intent(out) :: laplace(np,np,2) real(kind=r8) :: vor(np,np),div(np,np) + real(kind=r8) :: ra_sq integer :: n,m + !Inverse of Earth radius squared: + ra_sq = ra**2.0_r8 + call divergence_sphere(v,deriv,elem,div) call vorticity_sphere(v,deriv,elem,vor) @@ -1799,8 +1803,8 @@ subroutine vlaplace_sphere_wk_mol(v,deriv,elem,undamprrcart,mol_nu,laplace) do n=1,np do m=1,np ! add in correction so we dont damp rigid rotation - laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) - laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*ra_sq + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*ra_sq enddo enddo end if @@ -1826,7 +1830,10 @@ function vlaplace_sphere_wk_cartesian(v,deriv,elem,var_coef,undamprrcart) result integer component real(kind=r8) :: dum_cart(np,np,3) real(kind=r8) :: dum_cart2(np,np) + real(kind=r8) :: ra_sq + !Inverse of Earth radius squared: + ra_sq = ra**2.0_r8 ! latlon -> cartesian do component=1,3 @@ -1846,8 +1853,8 @@ function vlaplace_sphere_wk_cartesian(v,deriv,elem,var_coef,undamprrcart) result if (undamprrcart) then ! add in correction so we dont damp rigid rotation - laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*(ra**2) - laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*(ra**2) + laplace(:,:,1)=laplace(:,:,1) + 2*elem%spheremp(:,:)*v(:,:,1)*ra_sq + laplace(:,:,2)=laplace(:,:,2) + 2*elem%spheremp(:,:)*v(:,:,2)*ra_sq end if end function vlaplace_sphere_wk_cartesian @@ -1878,6 +1885,10 @@ function vlaplace_sphere_wk_contra(v,deriv,elem,var_coef,undamprrcart,nu_ratio) integer i,j,l,m,n real(kind=r8) :: vor(np,np),div(np,np) real(kind=r8) :: v1,v2,div1,div2,vor1,vor2,phi_x,phi_y + real(kind=r8) :: ra_sq + + !Inverse of Earth radius squared: + ra_sq = ra**2.0_r8 call divergence_sphere(v,deriv,elem,div) call vorticity_sphere(v,deriv,elem,vor) @@ -1897,8 +1908,8 @@ function vlaplace_sphere_wk_contra(v,deriv,elem,var_coef,undamprrcart,nu_ratio) do n=1,np do m=1,np ! add in correction so we dont damp rigid rotation - laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*(ra**2) - laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*(ra**2) + laplace(m,n,1)=laplace(m,n,1) + 2*elem%spheremp(m,n)*v(m,n,1)*ra_sq + laplace(m,n,2)=laplace(m,n,2) + 2*elem%spheremp(m,n)*v(m,n,2)*ra_sq enddo enddo end if diff --git a/src/dynamics/se/dycore/dimensions_mod.F90 b/src/dynamics/se/dycore/dimensions_mod.F90 index bdba4287..120cf38a 100644 --- a/src/dynamics/se/dycore/dimensions_mod.F90 +++ b/src/dynamics/se/dycore/dimensions_mod.F90 @@ -32,9 +32,7 @@ module dimensions_mod integer , public :: fv_nphys !physics-grid resolution - the "MAX" is so that the code compiles with NC=0 integer, public, protected :: qsize_d !SE tracer dimension size - integer, public, protected :: ntrac_d !FVM tracer dimension size - - integer, public :: ntrac = 0 !ntrac is set in dyn_comp + integer, public, protected :: ntrac = 0 !FVM tracer dimension size integer, public :: qsize = 0 !qsize is set in dyn_comp ! ! hyperviscosity is applied on approximate pressure levels @@ -85,7 +83,7 @@ module dimensions_mod integer, public :: npdg = 0 ! dg degree for hybrid cg/dg element 0=disabled - integer, public, protected :: npsq + integer, public, parameter :: npsq = np*np integer, public, protected :: nlev integer, public, protected :: nlevp @@ -133,67 +131,66 @@ subroutine dimensions_mod_init() if (fv_nphys > 0) then ! Use CSLAM for tracer advection qsize_d = 10 ! SE tracers (currently SE supports 10 condensate loading tracers) - ntrac_d = pcnst + ntrac = pcnst else ! Use GLL for tracer advection qsize_d = pcnst - ntrac_d = 0 ! No fvm tracers if CSLAM is off + ntrac = 0 ! No fvm tracers if CSLAM is off end if ! Set grid dimension variables: - npsq = np*np nlev = pver nlevp = pverp ! Allocate vertically-dimensioned variables: - allocate(irecons_tracer_lev(pver), stat=iret) - call check_allocate(iret, subname, 'irecons_tracer_lev(pver)', & + allocate(irecons_tracer_lev(nlev), stat=iret) + call check_allocate(iret, subname, 'irecons_tracer_lev(nlev)', & file=__FILE__, line=__LINE__) - allocate(nu_scale_top(pver), stat=iret) - call check_allocate(iret, subname, 'nu_scale_top(pver)', & + allocate(nu_scale_top(nlev), stat=iret) + call check_allocate(iret, subname, 'nu_scale_top(nlev)', & file=__FILE__, line=__LINE__) - allocate(nu_lev(pver), stat=iret) - call check_allocate(iret, subname, 'nu_lev(pver)', & + allocate(nu_lev(nlev), stat=iret) + call check_allocate(iret, subname, 'nu_lev(nlev)', & file=__FILE__, line=__LINE__) - allocate(otau(pver), stat=iret) - call check_allocate(iret, subname, 'otau(pver)', & + allocate(otau(nlev), stat=iret) + call check_allocate(iret, subname, 'otau(nlev)', & file=__FILE__, line=__LINE__) - allocate(nu_div_lev(pver), stat=iret) - call check_allocate(iret, subname, 'nu_div_lev(pver)', & + allocate(nu_div_lev(nlev), stat=iret) + call check_allocate(iret, subname, 'nu_div_lev(nlev)', & file=__FILE__, line=__LINE__) - allocate(kmvis_ref(pver), stat=iret) - call check_allocate(iret, subname, 'kmvis_ref(pver)', & + allocate(kmvis_ref(nlev), stat=iret) + call check_allocate(iret, subname, 'kmvis_ref(nlev)', & file=__FILE__, line=__LINE__) - allocate(kmcnd_ref(pver), stat=iret) - call check_allocate(iret, subname, 'kmcnd_ref(pver)', & + allocate(kmcnd_ref(nlev), stat=iret) + call check_allocate(iret, subname, 'kmcnd_ref(nlev)', & file=__FILE__, line=__LINE__) - allocate(rho_ref(pver), stat=iret) - call check_allocate(iret, subname, 'rho_ref(pver)', & + allocate(rho_ref(nlev), stat=iret) + call check_allocate(iret, subname, 'rho_ref(nlev)', & file=__FILE__, line=__LINE__) - allocate(km_sponge_factor(pver), stat=iret) - call check_allocate(iret, subname, 'km_sponge_factor(pver)', & + allocate(km_sponge_factor(nlev), stat=iret) + call check_allocate(iret, subname, 'km_sponge_factor(nlev)', & file=__FILE__, line=__LINE__) - allocate(kmvisi_ref(pverp), stat=iret) - call check_allocate(iret, subname, 'kmvisi_ref(pverp)', & + allocate(kmvisi_ref(nlevp), stat=iret) + call check_allocate(iret, subname, 'kmvisi_ref(nlevp)', & file=__FILE__, line=__LINE__) - allocate(kmcndi_ref(pverp), stat=iret) - call check_allocate(iret, subname, 'kmcndi_ref(pverp)', & + allocate(kmcndi_ref(nlevp), stat=iret) + call check_allocate(iret, subname, 'kmcndi_ref(nlevp)', & file=__FILE__, line=__LINE__) - allocate(rhoi_ref(pverp), stat=iret) - call check_allocate(iret, subname, 'rhoi_ref(pverp)', & + allocate(rhoi_ref(nlevp), stat=iret) + call check_allocate(iret, subname, 'rhoi_ref(nlevp)', & file=__FILE__, line=__LINE__) end subroutine dimensions_mod_init diff --git a/src/dynamics/se/dycore/edge_mod.F90 b/src/dynamics/se/dycore/edge_mod.F90 index ba5365fa..42ab912f 100644 --- a/src/dynamics/se/dycore/edge_mod.F90 +++ b/src/dynamics/se/dycore/edge_mod.F90 @@ -646,7 +646,7 @@ subroutine initEdgeBuffer_i8(edge,nlyr) integer :: nbuf integer :: ierr - character(len=*), parameter :: subname = '' + character(len=*), parameter :: subname = 'initEdgeBuffer_i8 (SE)' ! sanity check for threading if (omp_get_num_threads()>1) then diff --git a/src/dynamics/se/dycore/element_mod.F90 b/src/dynamics/se/dycore/element_mod.F90 index e5d1fc55..2fa3b91a 100644 --- a/src/dynamics/se/dycore/element_mod.F90 +++ b/src/dynamics/se/dycore/element_mod.F90 @@ -4,6 +4,7 @@ module element_mod use coordinate_systems_mod, only: spherical_polar_t, cartesian2D_t, cartesian3D_t, distance use edgetype_mod, only: edgedescriptor_t use gridgraph_mod, only: gridvertex_t + use dimensions_mod, only: np, npsq use cam_abortutils, only: endrun, check_allocate implicit none @@ -21,12 +22,13 @@ module element_mod ! vertically-lagrangian code advects dp3d instead of ps ! tracers Q, Qdp always use 2 level time scheme - real(kind=r8), allocatable :: v(:,:,:,:,:) ! velocity - real(kind=r8), allocatable :: T(:,:,:,:) ! temperature - real(kind=r8), allocatable :: dp3d(:,:,:,:) ! dry delta p on levels - real(kind=r8), allocatable :: psdry(:,:) ! dry surface pressure - real(kind=r8), allocatable :: phis(:,:) ! surface geopotential (prescribed) - real(kind=r8), allocatable :: Qdp(:,:,:,:,:) ! Tracer mass + real(kind=r8), allocatable :: v(:,:,:,:,:) ! velocity + real(kind=r8), allocatable :: T(:,:,:,:) ! temperature + real(kind=r8), allocatable :: dp3d(:,:,:,:) ! dry delta p on levels + real(kind=r8), allocatable :: Qdp(:,:,:,:,:) ! Tracer mass + + real(kind=r8) :: psdry(np,np) ! dry surface pressure + real(kind=r8) :: phis(np,np) ! surface geopotential (prescribed) end type elem_state_t @@ -65,8 +67,6 @@ module element_mod real(kind=r8), allocatable :: dvdt_met(:,:,:) ! rate of change of meridional component of prescribed meteorology winds real(kind=r8), allocatable :: T_met(:,:,:) ! prescribed meteorology temperature real(kind=r8), allocatable :: dTdt_met(:,:,:) ! rate of change of prescribed meteorology temperature - real(kind=r8), allocatable :: ps_met(:,:) ! surface pressure of prescribed meteorology - real(kind=r8), allocatable :: dpsdt_met(:,:) ! rate of change of surface pressure of prescribed meteorology real(kind=r8), allocatable :: nudge_factor(:,:,:) ! nudging factor (prescribed) real(kind=r8), allocatable :: Utnd(:,:) ! accumulated U tendency due to nudging towards prescribed met real(kind=r8), allocatable :: Vtnd(:,:) ! accumulated V tendency due to nudging towards prescribed met @@ -74,6 +74,10 @@ module element_mod real(kind=r8), allocatable :: pecnd(:,:,:) ! pressure perturbation from condensate + real(kind=r8) :: ps_met(np,np) ! surface pressure of prescribed meteorology + real(kind=r8) :: dpsdt_met(np,np) ! rate of change of surface pressure of prescribed meteorology + + end type derived_state_t !___________________________________________________________________ @@ -94,7 +98,8 @@ module element_mod ! ============= DATA-STRUCTURES COMMON TO ALL SOLVERS ================ type, public :: index_t - integer, allocatable :: ia(:),ja(:) + integer :: ia(npsq) + integer :: ja(npsq) integer :: is,ie integer :: NumUniquePts integer :: UniquePtOffset @@ -106,25 +111,25 @@ module element_mod integer :: GlobalId ! Coordinate values of element points - type(spherical_polar_t), allocatable :: spherep(:,:) ! Spherical coords of GLL points + type(spherical_polar_t) :: spherep(np,np) ! Spherical coords of GLL points ! Equ-angular gnomonic projection coordinates - type(cartesian2D_t), allocatable :: cartp(:,:) ! gnomonic coords of GLL points - type(cartesian2D_t) :: corners(4) ! gnomonic coords of element corners - real(kind=r8) :: u2qmap(4,2) ! bilinear map from ref element to quad in cubedsphere coordinates + type(cartesian2D_t) :: cartp(np,np) ! gnomonic coords of GLL points + type(cartesian2D_t) :: corners(4) ! gnomonic coords of element corners + real(kind=r8) :: u2qmap(4,2) ! bilinear map from ref element to quad in cubedsphere coordinates ! SHOULD BE REMOVED ! 3D cartesian coordinates type(cartesian3D_t) :: corners3D(4) ! Element diagnostics - real(kind=r8) :: area ! Area of element - real(kind=r8) :: normDinv ! some type of norm of Dinv used for CFL - real(kind=r8) :: dx_short ! short length scale in km - real(kind=r8) :: dx_long ! long length scale in km + real(kind=r8) :: area ! Area of element + real(kind=r8) :: normDinv ! some type of norm of Dinv used for CFL + real(kind=r8) :: dx_short ! short length scale in km + real(kind=r8) :: dx_long ! long length scale in km - real(kind=r8), allocatable :: variable_hyperviscosity(:,:) ! hyperviscosity based on above - real(kind=r8) :: hv_courant ! hyperviscosity courant number - real(kind=r8), allocatable :: tensorVisc(:,:,:,:) !og, matrix V for tensor viscosity + real(kind=r8) :: variable_hyperviscosity(np,np) ! hyperviscosity based on above + real(kind=r8) :: hv_courant ! hyperviscosity courant number + real(kind=r8) :: tensorVisc(np,np,2,2) !og, matrix V for tensor viscosity ! Edge connectivity information ! integer :: node_numbers(4) @@ -137,12 +142,12 @@ module element_mod type(derived_state_t) :: derived ! Metric terms - real(kind=r8), allocatable :: met(:,:,:,:) ! metric tensor on velocity and pressure grid - real(kind=r8), allocatable :: metinv(:,:,:,:) ! metric tensor on velocity and pressure grid - real(kind=r8), allocatable :: metdet(:,:) ! g = SQRT(det(g_ij)) on velocity and pressure grid - real(kind=r8), allocatable :: rmetdet(:,:) ! 1/metdet on velocity pressure grid - real(kind=r8), allocatable :: D(:,:,:,:) ! Map covariant field on cube to vector field on the sphere - real(kind=r8), allocatable :: Dinv(:,:,:,:) ! Map vector field on the sphere to covariant v on cube + real(kind=r8) :: met(np,np,2,2) ! metric tensor on velocity and pressure grid + real(kind=r8) :: metinv(np,np,2,2) ! metric tensor on velocity and pressure grid + real(kind=r8) :: metdet(np,np) ! g = SQRT(det(g_ij)) on velocity and pressure grid + real(kind=r8) :: rmetdet(np,np) ! 1/metdet on velocity pressure grid + real(kind=r8) :: D(np,np,2,2) ! Map covariant field on cube to vector field on the sphere + real(kind=r8) :: Dinv(np,np,2,2) ! Map vector field on the sphere to covariant v on cube ! Mass flux across the sides of each sub-element. @@ -183,21 +188,21 @@ module element_mod ! Convert vector fields from spherical to rectangular components ! The transpose of this operation is its pseudoinverse. - real(kind=r8), allocatable :: vec_sphere2cart(:,:,:,:) + real(kind=r8) :: vec_sphere2cart(np,np,3,2) ! Mass matrix terms for an element on a cube face - real(kind=r8), allocatable :: mp(:,:) ! mass matrix on v and p grid - real(kind=r8), allocatable :: rmp(:,:) ! inverse mass matrix on v and p grid + real(kind=r8) :: mp(np,np) ! mass matrix on v and p grid + real(kind=r8) :: rmp(np,np) ! inverse mass matrix on v and p grid ! Mass matrix terms for an element on the sphere ! This mass matrix is used when solving the equations in weak form ! with the natural (surface area of the sphere) inner product - real(kind=r8), allocatable :: spheremp(:,:) ! mass matrix on v and p grid - real(kind=r8), allocatable :: rspheremp(:,:) ! inverse mass matrix on v and p grid + real(kind=r8) :: spheremp(np,np) ! mass matrix on v and p grid + real(kind=r8) :: rspheremp(np,np) ! inverse mass matrix on v and p grid - integer(i8), allocatable :: gdofP(:,:) ! global degree of freedom (P-grid) + integer(i8) :: gdofP(np,np) ! global degree of freedom (P-grid) - real(kind=r8), allocatable :: fcor(:,:) ! Coriolis term + real(kind=r8) :: fcor(np,np) ! Coriolis term type(index_t) :: idxP type(index_t), pointer :: idxV @@ -225,8 +230,6 @@ module element_mod subroutine PrintElem(arr) - use dimensions_mod, only: np - real(kind=r8) :: arr(:,:) integer :: i,j @@ -414,7 +417,7 @@ subroutine allocate_element_dims(elem) ! Allocate the SE element arrays using the pre-calculated SE dimensions - use dimensions_mod, only: np, nc, npsq, nlev, nlevp, qsize_d, ntrac_d + use dimensions_mod, only: nc, nlev, nlevp, qsize_d, ntrac !Dummy arguments: type(element_t), intent(inout) :: elem(:) @@ -430,26 +433,6 @@ subroutine allocate_element_dims(elem) do i=1,num - !Coordinate values of element points: - allocate(elem(i)%spherep(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%spherep(np,np)', & - file=__FILE__, line=__LINE__) - - !Gnomonic coords of GLL points: - allocate(elem(i)%cartp(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%cartp(np,np)', & - file=__FILE__, line=__LINE__) - - !Variable Hyperviscosity: - allocate(elem(i)%variable_hyperviscosity(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%variable_hyperviscosity(np,np)', & - file=__FILE__, line=__LINE__) - - !og, matrix V for tensor viscosity: - allocate(elem(i)%tensorVisc(np,np,2,2), stat=iret) - call check_allocate(iret, subname, 'elem%tensorVisc(np,np,2,2)', & - file=__FILE__, line=__LINE__) - !Allocate "state" variables: !-------------------------- @@ -468,16 +451,6 @@ subroutine allocate_element_dims(elem) call check_allocate(iret, subname, 'elem%state%dp3d(np,np,nlev,timelevels)', & file=__FILE__, line=__LINE__) - ! dry surface pressure - allocate(elem(i)%state%psdry(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%state%psdry(np,np)', & - file=__FILE__, line=__LINE__) - - ! surface geopotential (prescribed) - allocate(elem(i)%state%phis(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%state%phis(np,np)', & - file=__FILE__, line=__LINE__) - ! Tracer mass allocate(elem(i)%state%Qdp(np,np,nlev,qsize_d,2), stat=iret) call check_allocate(iret, subname, 'elem%state%Qdp(np,np,nlev,qsize_d,2)', & @@ -539,8 +512,8 @@ subroutine allocate_element_dims(elem) file=__FILE__, line=__LINE__) ! total tracer mass for diagnostics - allocate(elem(i)%derived%mass(max(qsize_d,ntrac_d)+9), stat=iret) - call check_allocate(iret, subname, 'elem%derived%mass(max(qsize_d,ntrac_d)+9)', & + allocate(elem(i)%derived%mass(max(qsize_d,ntrac)+9), stat=iret) + call check_allocate(iret, subname, 'elem%derived%mass(max(qsize_d,ntrac)+9)', & file=__FILE__, line=__LINE__) ! tracer forcing @@ -598,16 +571,6 @@ subroutine allocate_element_dims(elem) call check_allocate(iret, subname, 'elem%derived%dTdt_met(np,np,nlev)', & file=__FILE__, line=__LINE__) - ! surface pressure of prescribed meteorology - allocate(elem(i)%derived%ps_met(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%derived%ps_met(np,np)', & - file=__FILE__, line=__LINE__) - - ! rate of change of surface pressure of prescribed meteorology - allocate(elem(i)%derived%dpsdt_met(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%derived%dpsdt_met(np,np)', & - file=__FILE__, line=__LINE__) - ! nudging factor (prescribed) allocate(elem(i)%derived%nudge_factor(np,np,nlev), stat=iret) call check_allocate(iret, subname, 'elem%derived%nudge_factor(np,np,nlev)', & @@ -635,94 +598,11 @@ subroutine allocate_element_dims(elem) !---------------------------- - !Allocate "Metric terms": - !----------------------- - - ! metric tensor on velocity and pressure grid - allocate(elem(i)%met(np,np,2,2), stat=iret) - call check_allocate(iret, subname, 'elem%met(np,np,2,2)', & - file=__FILE__, line=__LINE__) - - ! metric tensor on velocity and pressure grid - allocate(elem(i)%metinv(np,np,2,2), stat=iret) - call check_allocate(iret, subname, 'elem%metinv(np,np,2,2)', & - file=__FILE__, line=__LINE__) - - ! g = SQRT(det(g_ij)) on velocity and pressure grid - allocate(elem(i)%metdet(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%metdet(np,np)', & - file=__FILE__, line=__LINE__) - - ! 1/metdet on velocity pressure grid - allocate(elem(i)%rmetdet(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%rmetdet(np,np)', & - file=__FILE__, line=__LINE__) - - ! Map covariant field on cube to vector field on the sphere - allocate(elem(i)%D(np,np,2,2), stat=iret) - call check_allocate(iret, subname, 'elem%D(np,np,2,2)', & - file=__FILE__, line=__LINE__) - - ! Map vector field on the sphere to covariant v on cube - allocate(elem(i)%Dinv(np,np,2,2), stat=iret) - call check_allocate(iret, subname, 'elem%Dinv(np,np,2,2)', & - file=__FILE__, line=__LINE__) - - !----------------------- - !First Coordinate: allocate(elem(i)%sub_elem_mass_flux(nc,nc,4,nlev), stat=iret) call check_allocate(iret, subname, 'elem%sub_elem_mass_flux(nc,nc,4,nlev)', & file=__FILE__, line=__LINE__) - !Spherical -> rectangular converter: - allocate(elem(i)%vec_sphere2cart(np,np,3,2), stat=iret) - call check_allocate(iret, subname, 'elem%vec_sphere2cart(np,np,3,2)', & - file=__FILE__, line=__LINE__) - - !Mass matrix on v and p grid: - allocate(elem(i)%mp(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%mp(np,np)', & - file=__FILE__, line=__LINE__) - - !Inverse mass matrix on v and p grid: - allocate(elem(i)%rmp(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%rmp(np,np)', & - file=__FILE__, line=__LINE__) - - !Mass matrix on v and p grid: - allocate(elem(i)%spheremp(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%spheremp(np,np)', & - file=__FILE__, line=__LINE__) - - !Inverse mass matrix on v and p grid: - allocate(elem(i)%rspheremp(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%rspheremp(np,np)', & - file=__FILE__, line=__LINE__) - - !Global degree of freedom (P-grid): - allocate(elem(i)%gdofP(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%gdofP(np,np)', & - file=__FILE__, line=__LINE__) - - !Coriolis term: - allocate(elem(i)%fcor(np,np), stat=iret) - call check_allocate(iret, subname, 'elem%fcor(np,np)', & - file=__FILE__, line=__LINE__) - - !Index terms: - !----------- - - allocate(elem(i)%idxP%ia(npsq), stat=iret) - call check_allocate(iret, subname, 'elem%idxP%ia(npsq)', & - file=__FILE__, line=__LINE__) - - allocate(elem(i)%idxP%ja(npsq), stat=iret) - call check_allocate(iret, subname, 'elem%idxP%ja(npsq)', & - file=__FILE__, line=__LINE__) - - !----------- - end do end subroutine allocate_element_dims diff --git a/src/dynamics/se/dycore/fvm_analytic_mod.F90 b/src/dynamics/se/dycore/fvm_analytic_mod.F90 index 2fc43829..6c3d7390 100644 --- a/src/dynamics/se/dycore/fvm_analytic_mod.F90 +++ b/src/dynamics/se/dycore/fvm_analytic_mod.F90 @@ -738,7 +738,7 @@ subroutine init_flux_orient(flux_orient,ifct,nc,nhc,cubeboundary,faceno) ! ! halo of flux_orient will be filled through boundary exchange ! - flux_orient (1,1:nc,1:nc) = dble(faceno) + flux_orient (1,1:nc,1:nc) = real(faceno, r8) flux_orient (2,:,:) = 0.0_r8 ifct(:,:) = 1 if (cubeboundary>0) then @@ -1204,7 +1204,7 @@ subroutine get_equispace_weights(dx, x, w,ns) do j=1,ns do k=1,ns if (k.ne.j) then - w(j)=w(j)*(x-dble(k-1)*dx)/(dble(j-1)*dx-dble(k-1)*dx) + w(j)=w(j)*(x-real(k-1, r8)*dx)/(real(j-1, r8)*dx-real(k-1, r8)*dx) end if end do end do diff --git a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 index 8a78061e..f205c047 100644 --- a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 +++ b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 @@ -13,7 +13,7 @@ module fvm_control_volume_mod use shr_kind_mod, only: r8=>shr_kind_r8 use coordinate_systems_mod, only: spherical_polar_t use element_mod, only: element_t - use dimensions_mod, only: nc, nhe, nlev, ntrac_d, qsize_d,ne, np, nhr, ns, nhc + use dimensions_mod, only: nc, nhe, nlev, ntrac, qsize_d,ne, np, nhr, ns, nhc use dimensions_mod, only: fv_nphys, nhe_phys, nhr_phys, ns_phys, nhc_phys,fv_nphys use dimensions_mod, only: irecons_tracer use cam_abortutils, only: endrun, check_allocate @@ -368,14 +368,14 @@ subroutine allocate_physgrid_vars(fvm,par) 'fvm(ie)%Dinv_physgrid(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,2,2)', & file=__FILE__, line=__LINE__) - allocate(fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d)), stat=iret) + allocate(fvm(ie)%fc(nc,nc,nlev,max(ntrac,qsize_d)), stat=iret) call check_allocate(iret, subname, & - 'fvm(ie)%fc(nc,nc,nlev,max(ntrac_d,qsize_d))', & + 'fvm(ie)%fc(nc,nc,nlev,max(ntrac,qsize_d))', & file=__FILE__, line=__LINE__) - allocate(fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d)), stat=iret) + allocate(fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac,qsize_d)), stat=iret) call check_allocate(iret, subname, & - 'fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac_d,qsize_d))', & + 'fvm(ie)%fc_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,max(ntrac,qsize_d))', & file=__FILE__, line=__LINE__) allocate(fvm(ie)%ft(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev), stat=iret) @@ -420,9 +420,9 @@ subroutine allocate_fvm_dims(fvm) do ie=1,nelemd !fvm tracer mixing ratio: - allocate(fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d), stat=iret) + allocate(fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac), stat=iret) call check_allocate(iret, subname, & - 'fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac_d)', & + 'fvm(ie)%c(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,ntrac)', & file=__FILE__, line=__LINE__) allocate(fvm(ie)%se_flux(1-nhe:nc+nhe,1-nhe:nc+nhe,4,nlev), stat=iret) diff --git a/src/dynamics/se/dycore/fvm_mapping.F90 b/src/dynamics/se/dycore/fvm_mapping.F90 index d5525aff..cb5c6bd7 100644 --- a/src/dynamics/se/dycore/fvm_mapping.F90 +++ b/src/dynamics/se/dycore/fvm_mapping.F90 @@ -702,7 +702,7 @@ subroutine setup_interpdata_for_gll_to_phys_vec_mapping(interpdata,interp_p) ! OF REGULAR SE IF nc>0 ! ioff=1 - dx = 2.0_r8/dble(fv_nphys) + dx = 2.0_r8/real(fv_nphys, r8) do j=1,fv_nphys do i=1,fv_nphys interpdata%interp_xy(ioff)%x = -1_r8+(i-0.5_r8)*dx @@ -782,9 +782,9 @@ subroutine tensor_lagrange_interp(cubeboundary,np,nc,nhc,num_lev,nflds,psi,inter gll_points(3) = sqrt(1.0_r8/5.0_r8) gll_points(4) = 1.0_r8 - dx = 2_r8/dble(nc) + dx = 2_r8/real(nc, r8) do k=1-nc,2*nc - nc_points(k) = -1.0_r8+dx*0.5_r8+dble(k-1)*dx + nc_points(k) = -1.0_r8+dx*0.5_r8+real(k-1, r8)*dx end do ! ! find fvm point surrounding gll points for simple limiter diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 index b827e2e3..042226ed 100644 --- a/src/dynamics/se/dycore/fvm_mod.F90 +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -302,7 +302,7 @@ subroutine fvm_init1(par,elem) use control_mod, only: rsplit use dimensions_mod, only: qsize, qsize_d use dimensions_mod, only: fvm_supercycling, fvm_supercycling_jet - use dimensions_mod, only: nc,nhe, nhc, nlev,ntrac, ntrac_d,ns, nhr + use dimensions_mod, only: nc,nhe, nhc, nlev,ntrac, ntrac,ns, nhr use dimensions_mod, only: large_Courant_incr use dimensions_mod, only: kmin_jet,kmax_jet @@ -330,8 +330,6 @@ subroutine fvm_init1(par,elem) ! if (kmin_jet>kmax_jet) & call endrun("PARAMETER ERROR for fvm: kmin_jet must be < kmax_jet") - if (ntrac>ntrac_d) & - call endrun("PARAMETER ERROR for fvm: ntrac > ntrac_d") if (qsize>0.and.mod(rsplit,fvm_supercycling).ne.0) then if (par%masterproc) then @@ -358,8 +356,8 @@ subroutine fvm_init1(par,elem) end if call endrun("PARAMETER ERROR for fvm: large_courant_incr requires fvm_supercycling=fvm_supercycling_jet") endif - - if (par%masterproc) then + + if (par%masterproc) then write(iulog,*) " " write(iulog,*) "Done Tracer transport scheme information " write(iulog,*) " " @@ -367,16 +365,16 @@ subroutine fvm_init1(par,elem) if (par%masterproc) write(iulog,*) "fvm resolution is nc*nc in each element: nc = ",nc - if (par%masterproc) write(iulog,*)'ntrac,ntrac_d=',ntrac,ntrac_d - if (par%masterproc) write(iulog,*)'qsize,qsize_d=',qsize,qsize_d - + if (par%masterproc) write(iulog,*)'ntrac=',ntrac + if (par%masterproc) write(iulog,*)'qsize,qsize_d=',qsize,qsize_d + if (nc.ne.3) then - if (par%masterproc) then + if (par%masterproc) then write(iulog,*) "Only nc==3 is supported for CSLAM" endif call endrun("PARAMETER ERRROR for fvm: only nc=3 supported for CSLAM") end if - + if (par%masterproc) then write(iulog,*) " " if (ns==1) then @@ -454,7 +452,7 @@ end subroutine fvm_init1 subroutine fvm_init2(elem,fvm,hybrid,nets,nete) use fvm_control_volume_mod, only: fvm_mesh,fvm_set_cubeboundary use bndry_mod, only: compute_ghost_corner_orientation - use dimensions_mod, only: nlev, nc, nhc, nhe, ntrac, ntrac_d, np + use dimensions_mod, only: nlev, nc, nhc, nhe, ntrac, np use dimensions_mod, only: nhc_phys, fv_nphys use dimensions_mod, only: fvm_supercycling, fvm_supercycling_jet use dimensions_mod, only: kmin_jet,kmax_jet @@ -530,7 +528,7 @@ subroutine fvm_init3(elem,fvm,hybrid,nets,nete,irecons) use control_mod , only: neast, nwest, seast, swest use fvm_analytic_mod, only: compute_reconstruct_matrix use dimensions_mod , only: fv_nphys - use dimensions_mod, only: nlev, nc, nhe, nlev, ntrac, ntrac_d,nhc + use dimensions_mod, only: nlev, nc, nhe, nlev, ntrac, nhc use coordinate_systems_mod, only: cartesian2D_t,cartesian3D_t use coordinate_systems_mod, only: cubedsphere2cart, cart2cubedsphere implicit none @@ -721,9 +719,9 @@ subroutine fvm_init3(elem,fvm,hybrid,nets,nete,irecons) ! convert to element normalized coordinates ! fvm(ie)%norm_elem_coord(1,i,j) =(tmpgnom%x-elem(ie)%corners(1)%x)/& - (0.5_r8*dble(nc)*fvm(ie)%dalpha)-1.0_r8 + (0.5_r8*real(nc, r8)*fvm(ie)%dalpha)-1.0_r8 fvm(ie)%norm_elem_coord(2,i,j) =(tmpgnom%y-elem(ie)%corners(1)%y)/& - (0.5_r8*dble(nc)*fvm(ie)%dalpha)-1.0_r8 + (0.5_r8*real(nc, r8)*fvm(ie)%dalpha)-1.0_r8 else fvm(ie)%norm_elem_coord(1,i,j) = 1D9 fvm(ie)%norm_elem_coord(2,i,j) = 1D9 @@ -740,7 +738,6 @@ subroutine fvm_pg_init(elem, fvm, hybrid, nets, nete,irecons) use control_mod, only : neast, nwest, seast, swest use coordinate_systems_mod, only : cubedsphere2cart, cart2cubedsphere use dimensions_mod, only: fv_nphys, nhe_phys,nhc_phys - use dimensions_mod, only: ntrac_d use cube_mod ,only: dmap use control_mod ,only: cubed_sphere_map use fvm_analytic_mod, only: compute_reconstruct_matrix @@ -929,12 +926,12 @@ subroutine fvm_pg_init(elem, fvm, hybrid, nets, nete,irecons) ! convert to element normalized coordinates ! fvm(ie)%norm_elem_coord_physgrid(1,i,j) =(tmpgnom%x-elem(ie)%corners(1)%x)/& - (0.5_r8*dble(fv_nphys)*fvm(ie)%dalpha_physgrid)-1.0_r8 + (0.5_r8*real(fv_nphys, r8)*fvm(ie)%dalpha_physgrid)-1.0_r8 fvm(ie)%norm_elem_coord_physgrid(2,i,j) =(tmpgnom%y-elem(ie)%corners(1)%y)/& - (0.5_r8*dble(fv_nphys)*fvm(ie)%dalpha_physgrid)-1.0_r8 + (0.5_r8*real(fv_nphys, r8)*fvm(ie)%dalpha_physgrid)-1.0_r8 else - fvm(ie)%norm_elem_coord_physgrid(1,i,j) = 1D9 - fvm(ie)%norm_elem_coord_physgrid(2,i,j) = 1D9 + fvm(ie)%norm_elem_coord_physgrid(1,i,j) = 1E9_r8 + fvm(ie)%norm_elem_coord_physgrid(2,i,j) = 1E9_r8 end if end do end do diff --git a/src/dynamics/se/dycore/global_norms_mod.F90 b/src/dynamics/se/dycore/global_norms_mod.F90 index c170f8e8..21d46b9b 100644 --- a/src/dynamics/se/dycore/global_norms_mod.F90 +++ b/src/dynamics/se/dycore/global_norms_mod.F90 @@ -335,8 +335,8 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& global_shared_buf(ie,2) = elem(ie)%dx_short enddo call wrap_repro_sum(nvars=2, comm=hybrid%par%comm) - avg_area = global_shared_sum(1)/dble(nelem) - avg_min_dx = global_shared_sum(2)/dble(nelem) + avg_area = global_shared_sum(1)/real(nelem, r8) + avg_min_dx = global_shared_sum(2)/real(nelem, r8) min_area = ParallelMin(min_area,hybrid) max_area = ParallelMax(max_area,hybrid) @@ -390,12 +390,12 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& ! viscosity in namelist specified for regions with a resolution ! equivilant to a uniform grid with ne=fine_ne if (np /= 4 ) call endrun('ERROR: setting fine_ne only supported with NP=4') - max_unif_dx = (111.28_r8*30)/dble(fine_ne) ! in km + max_unif_dx = (111.28_r8*30)/real(fine_ne, r8) ! in km endif ! ! note: if L = eigenvalue of metinv, then associated length scale (km) is - ! dx = 1.0_r8/( sqrt(L)*0.5_r8*dble(np-1)*ra*1000.0_r8) + ! dx = 1.0_r8/( sqrt(L)*0.5_r8*real(np-1, r8)*ra*1000.0_r8) ! ! for viscosity *tensor*, we take at each point: ! nu1 = nu*(dx1/max_unif_dx)**3.2 dx1 associated with eigenvalue 1 @@ -443,7 +443,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& min_hypervis = ParallelMin(min_hypervis, hybrid) max_hypervis = ParallelMax(max_hypervis, hybrid) call wrap_repro_sum(nvars=1, comm=hybrid%par%comm) - avg_hypervis = global_shared_sum(1)/dble(nelem) + avg_hypervis = global_shared_sum(1)/real(nelem, r8) normDinv_hypervis = ParallelMax(normDinv_hypervis, hybrid) @@ -1113,7 +1113,7 @@ subroutine automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min write(iulog,'(a,2e9.2,a,2f9.2)') "Value at min/max grid spacing: ",nu_min,nu_max,& " Max/min grid spacing (km) = ",max_min_dx,min_min_dx end if - nu = nu_min*(2.0_r8*rearth/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(rearth**4) + nu = nu_min*(2.0_r8*rearth/(3.0_r8*max_min_dx*1000.0_r8))**hypervis_scaling/(rearth**4._r8) if (hybrid%masterthread) & write(iulog,'(a,a,a,e9.3)') "Nu_tensor",TRIM(str)," = ",nu else if (hypervis_power/=0) then diff --git a/src/dynamics/se/dycore/interpolate_mod.F90 b/src/dynamics/se/dycore/interpolate_mod.F90 index 10f69c8f..a1d2e163 100644 --- a/src/dynamics/se/dycore/interpolate_mod.F90 +++ b/src/dynamics/se/dycore/interpolate_mod.F90 @@ -1041,13 +1041,13 @@ subroutine cube_facepoint_ne(sphere, ne, cart, number) ! The only time we can skip this statement is if ie=1, but then ! the statement has no effect, so lets never skip it: ! if (x1 > dx ) then - x1 = x1 - dble(ie-1)*dx + x1 = x1 - real(ie-1, r8)*dx ! endif x1 = 2.0_r8*(x1/dx)-1.0_r8 ! if (x2 > dx ) then ! removed MT 1/2009, see above - x2 = x2 - dble(je-1)*dx + x2 = x2 - real(je-1, r8)*dx ! endif x2 = 2.0_r8*(x2/dx)-1.0_r8 diff --git a/src/dynamics/se/dycore/parallel_mod.F90 b/src/dynamics/se/dycore/parallel_mod.F90 index 34a1b42e..21474652 100644 --- a/src/dynamics/se/dycore/parallel_mod.F90 +++ b/src/dynamics/se/dycore/parallel_mod.F90 @@ -113,7 +113,7 @@ function initmpi(npes_homme) result(par) use spmd_utils, only: mpicom, iam, npes use mpi, only: MPI_COMM_NULL, MPI_MAX_PROCESSOR_NAME use mpi, only: MPI_CHARACTER, MPI_INTEGER, MPI_BAND - use dimensions_mod, only: nlev, qsize_d, ntrac_d + use dimensions_mod, only: nlev, qsize_d, ntrac integer, intent(in) :: npes_homme @@ -148,7 +148,7 @@ function initmpi(npes_homme) result(par) PartitionForNodes = .TRUE. ! Initialize number of SE dycore variables used in repro_sum: - nrepro_vars = MAX(10, nlev*qsize_d, nlev*ntrac_d) + nrepro_vars = MAX(10, nlev*qsize_d, nlev*ntrac) ! Allocate repro_sum variable: allocate(global_shared_sum(nrepro_vars), stat=iret) diff --git a/src/dynamics/se/dycore/prim_state_mod.F90 b/src/dynamics/se/dycore/prim_state_mod.F90 index b01745fe..966aa2f9 100644 --- a/src/dynamics/se/dycore/prim_state_mod.F90 +++ b/src/dynamics/se/dycore/prim_state_mod.F90 @@ -1,7 +1,7 @@ module prim_state_mod use shr_kind_mod, only: r8=>shr_kind_r8 use cam_logfile, only: iulog - use dimensions_mod, only: nlev, np, nc, qsize_d, ntrac_d + use dimensions_mod, only: nlev, np, nc, qsize_d, ntrac use parallel_mod, only: ordered use hybrid_mod, only: hybrid_t use time_mod, only: timelevel_t, TimeLevel_Qdp, time_at @@ -65,7 +65,7 @@ subroutine prim_printstate(elem, tl,hybrid,nets,nete, fvm, omega_cn) !Allocate tracer-dimensioned variables: !------------- - vmax = 11+2*max(qsize_d,ntrac_d) + vmax = 11+2*max(qsize_d,ntrac) allocate(varname(vmax), stat=iret) call check_allocate(iret, subname, 'varname(vmax)', & file=__FILE__, line=__LINE__) diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index b1b70384..ebf5ed43 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -9,7 +9,6 @@ module dyn_comp ! cnst_read_iv, qmin, cnst_type, tottnam, & ! cnst_is_a_water_species use constituents, only: pcnst -use vert_coord, only: pver use cam_control_mod, only: initial_run, simple_phys use cam_initfiles, only: initial_file_get_id, topo_file_get_id, pertlim use dyn_grid, only: ini_grid_name, timelevel, hvcoord, edgebuf @@ -102,10 +101,10 @@ subroutine dyn_readnl(NLFileName) use mpi, only: mpi_real8, mpi_integer, mpi_character, mpi_logical use physconst, only: thermodynamic_active_species_num use shr_nl_mod, only: find_group_name => shr_nl_find_group_name - use shr_file_mod, only: shr_file_getunit, shr_file_freeunit use spmd_utils, only: masterproc, masterprocid, mpicom, npes use dyn_grid, only: se_write_grid_file, se_grid_filename, se_write_gll_corners use native_mapping, only: native_mapping_readnl + use vert_coord, only: pver !SE dycore: use namelist_mod, only: homme_set_defaults, homme_postprocess_namelist @@ -243,8 +242,7 @@ subroutine dyn_readnl(NLFileName) call MPI_barrier(mpicom, ierr) if (masterproc) then write(iulog, *) "dyn_readnl: reading dyn_se_nl namelist..." - unitn = shr_file_getunit() - open( unitn, file=trim(NLFileName), status='old' ) + open( newunit=unitn, file=trim(NLFileName), status='old' ) call find_group_name(unitn, 'dyn_se_nl', status=ierr) if (ierr == 0) then read(unitn, dyn_se_nl, iostat=ierr) @@ -253,7 +251,6 @@ subroutine dyn_readnl(NLFileName) end if end if close(unitn) - call shr_file_freeunit(unitn) end if ! Broadcast namelist values to all PEs @@ -307,11 +304,12 @@ subroutine dyn_readnl(NLFileName) ! If se_npes is set to negative one, then make it match host model: if (se_npes == -1) then se_npes = npes - else - ! Check that se_npes is a positive integer: - if (se_npes <= 0) then - call endrun('dyn_readnl: ERROR: se_npes must either be > 0 or exactly -1') - end if + else if (se_npes <= 0) then + ! se_npes is not a positive integer: + call endrun('dyn_readnl: ERROR: se_npes must either be > 0 or exactly -1') + else if (se_npes > npes) then + ! se_npes is too large: + call endrun('dyn_readnl: ERROR: se_npes must be <= number of atmosphere pes (npes)') end if ! Initialize the SE structure that holds the MPI decomposition information @@ -382,11 +380,9 @@ subroutine dyn_readnl(NLFileName) if (fv_nphys > 0) then ! Use CSLAM for tracer advection qsize = thermodynamic_active_species_num ! number tracers advected by GLL - ntrac = pcnst ! number tracers advected by CSLAM else ! Use GLL for tracer advection qsize = pcnst - ntrac = 0 end if if (rsplit < 1) then @@ -597,7 +593,6 @@ subroutine dyn_init(cam_runtime_opts, dyn_in, dyn_out) use time_mod, only: time_at use control_mod, only: runtype, raytau0, raykrange, rayk0, molecular_diff, nu_top use test_fvm_mapping, only: test_mapping_addfld - !use phys_control, only: phys_getopts use control_mod, only: vert_remap_uvTq_alg, vert_remap_tracer_alg ! Dummy arguments: @@ -968,13 +963,13 @@ subroutine dyn_init(cam_runtime_opts, dyn_in, dyn_out) end if ! constituent indices for waccm-x -! if ( cam_runtime_opts%waccmx_option() == 'ionosphere' .or. & -! cam_runtime_opts%waccmx_option() == 'neutral' ) then -! call cnst_get_ind('O', ixo) -! call cnst_get_ind('O2', ixo2) -! call cnst_get_ind('H', ixh) -! call cnst_get_ind('H2', ixh2) -! end if + if ( cam_runtime_opts%waccmx_option() == 'ionosphere' .or. & + cam_runtime_opts%waccmx_option() == 'neutral' ) then + call cnst_get_ind('O', ixo) + call cnst_get_ind('O2', ixo2) + call cnst_get_ind('H', ixh) + call cnst_get_ind('H2', ixh2) + end if call test_mapping_addfld diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index cd7b270a..e3aad02e 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -42,7 +42,7 @@ module dyn_grid !SE dycore: use dimensions_mod, only: globaluniquecols, nelem, nelemd, nelemdmax, & - ne, np, npsq, fv_nphys, nlev, nc, ntrac + ne, np, npsq, fv_nphys, nlev, nlevp, nc, ntrac use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct use hybvcoord_mod, only: hvcoord_t @@ -97,13 +97,6 @@ module dyn_grid ! Name of horizontal grid dimension in initial file. character(len=6) :: ini_grid_hdim_name = ' ' -type block_global_data - integer :: UniquePtOffset ! global index of first column in element - integer :: NumUniqueP ! number of unique columns in element - integer :: LocalID ! local index of element in a task - integer :: Owner ! task id of element owner -end type block_global_data - type(physics_column_t), allocatable :: local_dyn_columns(:) ! number of global dynamics columns. Set by SE dycore init. @@ -126,17 +119,17 @@ subroutine model_grid_init() ! decomposition based on the dynamics (SE) grid. use mpi, only: mpi_max - use vert_coord, only: vert_coord_init, pver, pverp + use vert_coord, only: vert_coord_init, pver use hycoef, only: hycoef_init, hypi, hypm, nprlev, & hyam, hybm, hyai, hybi, ps0 use physconst, only: thermodynamic_active_species_num use dynconst, only: dynconst_init use ref_pres, only: ref_pres_init use time_manager, only: get_nstep, get_step_size - use dp_mapping, only: dp_init, dp_write, nphys_pts - use native_mapping, only: do_native_mapping, create_native_mapping_files use cam_grid_support, only: hclen=>max_hcoordname_len use physics_grid, only: phys_grid_init + use dp_mapping, only: dp_init, dp_write, nphys_pts + use native_mapping, only: do_native_mapping, create_native_mapping_files !SE dycore: use parallel_mod, only: par @@ -178,7 +171,8 @@ subroutine model_grid_init() ! Set vertical coordinate information not provided by namelist: call vert_coord_init(1, pver) - ! Initialize SE-dycore specific variables: + ! Initialize SE-dycore specific variables, + ! note that this must be done before "nlev" can be used: call dimensions_mod_init() ! Initialize total number of physics points per spectral element: @@ -202,32 +196,32 @@ subroutine model_grid_init() !Allocate SE dycore "hvcoord" structure: !+++++++ - allocate(hvcoord%hyai(pverp), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%hyai(pverp)', & + allocate(hvcoord%hyai(nlevp), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%hyai(nlevp)', & file=__FILE__, line=__LINE__) - allocate(hvcoord%hyam(pver), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%hyam(pver)', & + allocate(hvcoord%hyam(nlev), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%hyam(nlev)', & file=__FILE__, line=__LINE__) - allocate(hvcoord%hybi(pverp), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%hybi(pverp)', & + allocate(hvcoord%hybi(nlevp), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%hybi(nlevp)', & file=__FILE__, line=__LINE__) - allocate(hvcoord%hybm(pver), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%hybm(pver)', & + allocate(hvcoord%hybm(nlev), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%hybm(nlev)', & file=__FILE__, line=__LINE__) - allocate(hvcoord%hybd(pver), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%hybd(pver)', & + allocate(hvcoord%hybd(nlev), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%hybd(nlev)', & file=__FILE__, line=__LINE__) - allocate(hvcoord%etam(pver), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%etam(pver)', & + allocate(hvcoord%etam(nlev), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%etam(nlev)', & file=__FILE__, line=__LINE__) - allocate(hvcoord%etai(pverp), stat=ierr) - call check_allocate(ierr, subname, 'hvcoord%etai(pverp)', & + allocate(hvcoord%etai(nlevp), stat=ierr) + call check_allocate(ierr, subname, 'hvcoord%etai(nlevp)', & file=__FILE__, line=__LINE__) !+++++++ diff --git a/src/dynamics/se/native_mapping.F90 b/src/dynamics/se/native_mapping.F90 index 86316f2c..63b9aa7c 100644 --- a/src/dynamics/se/native_mapping.F90 +++ b/src/dynamics/se/native_mapping.F90 @@ -28,7 +28,6 @@ module native_mapping subroutine native_mapping_readnl(NLFileName) use shr_nl_mod, only: find_group_name => shr_nl_find_group_name - use shr_file_mod, only: shr_file_getunit, shr_file_freeunit character(len=*), intent(in) :: NLFileName @@ -49,8 +48,7 @@ subroutine native_mapping_readnl(NLFileName) if(masterproc) then exist=.true. write(iulog,*) sub//': Check for native_mapping_nl namelist in ',trim(nlfilename) - unitn = shr_file_getunit() - open( unitn, file=trim(nlfilename), status='old' ) + open( newunit=unitn, file=trim(nlfilename), status='old' ) call find_group_name(unitn, 'native_mapping_nl', status=ierr) if(ierr/=0) then @@ -65,7 +63,6 @@ subroutine native_mapping_readnl(NLFileName) if(len_trim(native_mapping_outgrids(1))==0) exist=.false. end if close(unitn) - call shr_file_freeunit(unitn) end if call mpi_bcast(exist, 1, mpi_logical, mstrid, mpicom, ierr) @@ -544,7 +541,7 @@ subroutine create_native_mapping_files(par, elem, maptype, ncol, clat, clon, are ierr = pio_put_var(ogfile, areaB_id, areaB) deallocate(areaB) - allocate(grid_imask(ncol)) + allocate(grid_imask(ncol), stat=ierr) call check_allocate(ierr, subname, 'grid_imask(ncol)', & file=__FILE__, line=__LINE__) diff --git a/src/dynamics/se/stepon.F90 b/src/dynamics/se/stepon.F90 index 7d5466f5..d18e08f2 100644 --- a/src/dynamics/se/stepon.F90 +++ b/src/dynamics/se/stepon.F90 @@ -61,6 +61,9 @@ subroutine stepon_run1(dtime_out, cam_runtime_opts, phys_state, phys_tend, dyn_i if (iam < par%nprocs) then if (tstep <= 0) call endrun('stepon_run1: bad tstep') if (dtime_out <= 0) call endrun('stepon_run1: bad dtime') + + ! write diagnostic fields on gll grid and initial file + call diag_dynvar_ic(dyn_out%elem, dyn_out%fvm) end if ! Synchronize all PEs and then transfer dynamics variables to physics: @@ -177,15 +180,13 @@ end subroutine stepon_final !========================================================================================= -!Remove once "outfld" is enabled in CAMDEN -JN: -#if 0 - subroutine diag_dynvar_ic(elem, fvm) !use constituents, only: cnst_type, cnst_name - use cam_history, only: write_inithist, outfld, hist_fld_active, fieldname_len + !use cam_history, only: write_inithist, outfld, hist_fld_active, fieldname_len use dyn_grid, only: TimeLevel - !use physconst, only: get_sum_species, get_ps,thermodynamic_active_species_idx - !use physconst, only: thermodynamic_active_species_idx_dycore,get_dp_ref + use physconst, only: thermodynamic_active_species_idx + use physconst, only: thermodynamic_active_species_idx_dycore + use dyn_thermo, only: get_sum_species, get_ps, get_dp_ref use hycoef, only: hyai, hybi, ps0 use cam_abortutils, only: endrun, check_allocate @@ -195,7 +196,7 @@ subroutine diag_dynvar_ic(elem, fvm) use hybrid_mod, only: config_thread_region, get_loop_ranges use hybrid_mod, only: hybrid_t use dimensions_mod, only: np, npsq, nc, nhc, fv_nphys, qsize, ntrac, nlev - !use dimensions_mod, only: cnst_name_gll + use dimensions_mod, only: cnst_name_gll use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct use fvm_mapping, only: fvm2dyn @@ -208,7 +209,7 @@ subroutine diag_dynvar_ic(elem, fvm) integer :: ie, i, j, k, m, m_cnst, nq integer :: tl_f, tl_qdp integer :: iret - character(len=fieldname_len) :: tfname + !character(len=fieldname_len) :: tfname !Uncomment once 'fieldname_len' or an equivalent is available -JN type(hybrid_t) :: hybrid integer :: nets, nete @@ -266,6 +267,9 @@ subroutine diag_dynvar_ic(elem, fvm) !REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #endif +!Remove once "outfld" is enabled in CAMDEN -JN: +#if 0 + if (hist_fld_active('U_gll') .or. hist_fld_active('V_gll')) then do ie = 1, nelemd do j = 1, np @@ -314,7 +318,7 @@ subroutine diag_dynvar_ic(elem, fvm) end if if (hist_fld_active('PS_gll')) then - allocate(fld_2d(np,np)) + allocate(fld_2d(np,np), stat=iret) call check_allocate(iret, subname, 'fld_2d(np, np)', & file=__FILE__, line=__LINE__) @@ -337,13 +341,16 @@ subroutine diag_dynvar_ic(elem, fvm) end do end if - if (write_inithist()) then -!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: -#if 0 - allocate(fld_2d(np,np)) +!Remove once history output is available in CAMDEN -JN: +#endif + + !if (write_inithist()) then !Un-comment once history output is available -JN + if (.false.) then !Remove once history output is available -JN + allocate(fld_2d(np,np), stat=iret) call check_allocate(iret, subname, 'fld_2d(np, np)', & file=__FILE__, line=__LINE__) + do ie = 1, nelemd call get_ps(1,np,1,np,1,nlev,qsize,elem(ie)%state%Qdp(:,:,:,:,tl_Qdp),& thermodynamic_active_species_idx_dycore,elem(ie)%state%dp3d(:,:,:,tl_f),fld_2d,hyai(1)*ps0) do j = 1, np @@ -351,20 +358,21 @@ subroutine diag_dynvar_ic(elem, fvm) ftmp(i+(j-1)*np,1,1) = fld_2d(i,j) end do end do - call outfld('PS&IC', ftmp(:,1,1), npsq, ie) +! call outfld('PS&IC', ftmp(:,1,1), npsq, ie) end do deallocate(fld_2d) - if (fv_nphys < 1) then - allocate(factor_array(np,np,nlev), stat=iret) - call check_allocate(iret, subname, 'factor_array(np,np,nlev)', & - file=__FILE__, line=__LINE__) - end if -#endif + if (fv_nphys < 1) then + allocate(factor_array(np,np,nlev), stat=iret) + call check_allocate(iret, subname, 'factor_array(np,np,nlev)', & + file=__FILE__, line=__LINE__) + end if - do ie = 1, nelemd - call outfld('T&IC', RESHAPE(elem(ie)%state%T(:,:,:,tl_f), (/npsq,nlev/)), npsq, ie) - call outfld('U&IC', RESHAPE(elem(ie)%state%v(:,:,1,:,tl_f), (/npsq,nlev/)), npsq, ie) - call outfld('V&IC', RESHAPE(elem(ie)%state%v(:,:,2,:,tl_f), (/npsq,nlev/)), npsq, ie) + do ie = 1, nelemd + + !Un-comment once history output is available -JN: + !call outfld('T&IC', RESHAPE(elem(ie)%state%T(:,:,:,tl_f), (/npsq,nlev/)), npsq, ie) + !call outfld('U&IC', RESHAPE(elem(ie)%state%v(:,:,1,:,tl_f), (/npsq,nlev/)), npsq, ie) + !call outfld('V&IC', RESHAPE(elem(ie)%state%v(:,:,2,:,tl_f), (/npsq,nlev/)), npsq, ie) !REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #if 0 @@ -384,11 +392,10 @@ subroutine diag_dynvar_ic(elem, fvm) end if end do end if +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: #endif end do -!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: -#if 0 if (fv_nphys > 0) then !JMD $OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(hybrid,nets,nete,n) !JMD hybrid = config_thread_region(par,'horizontal') @@ -414,6 +421,10 @@ subroutine diag_dynvar_ic(elem, fvm) file=__FILE__, line=__LINE__) llimiter = .true. + +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#if 0 + do ie = nets, nete call get_sum_species(1,nc,1,nc,1,nlev,ntrac,fvm(ie)%c(1:nc,1:nc,:,:),thermodynamic_active_species_idx,factor_array) factor_array(:,:,:) = 1.0_r8/factor_array(:,:,:) @@ -435,22 +446,20 @@ subroutine diag_dynvar_ic(elem, fvm) end do end do +!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: +#endif + deallocate(fld_fvm) deallocate(fld_gll) deallocate(llimiter) end if deallocate(factor_array) -!REMOVE ONCE TRACERS/CHEMISTRY IS ENABLED -JN: -#endif end if ! if (write_inithist) deallocate(ftmp) end subroutine diag_dynvar_ic -!Remove once "outfld" is enabled in CAMDEN -JN: -#endif - !========================================================================================= end module stepon diff --git a/src/dynamics/tests/inic_analytic.F90 b/src/dynamics/tests/inic_analytic.F90 index 8a65e2aa..23bfef3f 100644 --- a/src/dynamics/tests/inic_analytic.F90 +++ b/src/dynamics/tests/inic_analytic.F90 @@ -17,6 +17,7 @@ module inic_analytic public :: analytic_ic_active ! forwarded from init_analytic_utils public :: analytic_ic_set_ic ! Set analytic initial conditions + public :: dyn_set_inic_col interface analytic_ic_set_ic module procedure dyn_set_inic_cblock @@ -37,7 +38,7 @@ module inic_analytic CONTAINS !============================================================================== - subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & + subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, zint, U, V, T, & PS, PHIS_IN, PHIS_OUT, Q, m_cnst, mask, verbose) use cam_initfiles, only: pertlim #ifdef ANALYTIC_IC @@ -58,6 +59,7 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) integer, intent(in) :: glob_ind(:) ! global column index + real(r8), optional, intent(in) :: zint(:,:) ! height at layer interfaces real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity real(r8), optional, intent(inout) :: T(:,:) ! temperature @@ -166,7 +168,7 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) case('moist_baroclinic_wave_dcmip2016', 'dry_baroclinic_wave_dcmip2016') - call bc_wav_set_ic(vcoord, latvals, lonvals, U=U, V=V, T=T, PS=PS, & + call bc_wav_set_ic(vcoord, latvals, lonvals, zint=zint, U=U, V=V, T=T, PS=PS, & PHIS=PHIS_OUT, Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) case('dry_baroclinic_wave_jw2006') @@ -174,8 +176,8 @@ subroutine dyn_set_inic_col(vcoord, latvals, lonvals, glob_ind, U, V, T, & PHIS=PHIS_OUT, Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) case('us_standard_atmosphere') - call us_std_atm_set_ic(latvals, lonvals, U=U, V=V, T=T, PS=PS, PHIS=PHIS_IN, & - Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) + call us_std_atm_set_ic(latvals, lonvals, zint=zint, U=U, V=V, T=T, PS=PS, PHIS_IN=PHIS_IN, & + PHIS_OUT=PHIS_OUT, Q=Q, m_cnst=m_cnst, mask=mask_use, verbose=verbose_use) case default call endrun(subname//': Unknown analytic_ic_type, "'//trim(analytic_ic_type)//'"') @@ -352,8 +354,8 @@ subroutine dyn_set_inic_cblock(vcoord,latvals, lonvals, glob_ind, U, V, T, & glob_ind(bbeg:bend), PS=PS(:,i), verbose=verbose) end if if (present(PHIS_OUT)) then - call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & - glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), verbose=verbose) + call dyn_set_inic_col(vcoord,latvals(bbeg:bend), lonvals(bbeg:bend), & + glob_ind(bbeg:bend), PHIS_OUT=PHIS_OUT(:,i), verbose=verbose) end if end if if (present(Q)) then diff --git a/src/dynamics/tests/inic_analytic_utils.F90 b/src/dynamics/tests/inic_analytic_utils.F90 index f40069cf..c574eced 100644 --- a/src/dynamics/tests/inic_analytic_utils.F90 +++ b/src/dynamics/tests/inic_analytic_utils.F90 @@ -42,7 +42,6 @@ subroutine analytic_ic_readnl(nlfile) use mpi, only: MPI_CHARACTER, MPI_LOGICAL use shr_nl_mod, only: find_group_name => shr_nl_find_group_name - use shr_file_mod, only: shr_file_getunit, shr_file_freeunit use spmd_utils, only: masterproc, masterprocid, mpicom use shr_string_mod, only: shr_string_toLower @@ -61,8 +60,7 @@ subroutine analytic_ic_readnl(nlfile) namelist /analytic_ic_nl/ analytic_ic_type if (masterproc) then - unitn = shr_file_getunit() - open(unitn, file=trim(nlfile), status='old') + open(newunit=unitn, file=trim(nlfile), status='old') call find_group_name(unitn, 'analytic_ic_nl', status=ierr) if (ierr == 0) then nl_not_found = .false. @@ -77,7 +75,6 @@ subroutine analytic_ic_readnl(nlfile) nl_not_found = .true. end if close(unitn) - call shr_file_freeunit(unitn) analytic_ic_type = shr_string_toLower(analytic_ic_type) end if diff --git a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 index 3061cd41..8ddc5684 100644 --- a/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 +++ b/src/dynamics/tests/initial_conditions/ic_baroclinic.F90 @@ -76,8 +76,8 @@ module ic_baroclinic contains - subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & - Q, Z, m_cnst, mask, verbose) + subroutine bc_wav_set_ic(vcoord,latvals, lonvals, zint, U, V, T, PS, PHIS, & + Q, m_cnst, mask, verbose) use dyn_tests_utils, only: vc_moist_pressure, vc_dry_pressure, vc_height !use constituents, only: cnst_name !use const_init, only: cnst_init_default @@ -95,16 +95,17 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) ! z_k for vccord 1) + real(r8), optional, intent(in) :: zint(:,:) ! interface height (ncol,ilev), ordered top to bottom real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity real(r8), optional, intent(inout) :: T(:,:) ! temperature real(r8), optional, intent(inout) :: PS(:) ! surface pressure real(r8), optional, intent(out) :: PHIS(:) ! surface geopotential real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) - real(r8), optional, intent(inout) :: Z(:,:) ! height (ncol, lev) integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) logical, optional, intent(in) :: mask(:) ! only init where .true. logical, optional, intent(in) :: verbose ! for internal use + ! Local variables logical, allocatable :: mask_use(:) logical :: verbose_use @@ -121,7 +122,7 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & logical :: lU, lV, lT, lQ, l3d_vars logical :: cnst1_is_moisture real(r8), allocatable :: pdry_half(:), pwet_half(:),zdry_half(:),zk(:) - real(r8), allocatable :: zlocal(:,:)! height of full level p for test tracer initialization + real(r8), allocatable :: zmid(:,:) ! layer midpoint heights for test tracer initialization if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then ! @@ -132,11 +133,16 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & call endrun(subname//' ERROR: For iterate_z_given_pressure to work ptop must be less than 100hPa') end if ztop = iterate_z_given_pressure(ptop,.false.,ptop,0.0_r8,-1000._r8) !Find height of top pressure surface + else if (vcoord == vc_height) then - ! - ! height-based vertical coordinate - ! - call endrun(subname//' ERROR: z-based vertical coordinate not coded yet') + ! + ! height-based vertical coordinate + ! + if (present(zint)) then + ztop = zint(1,1) + else + call endrun(subname//' ERROR: z-based vertical coordinate requires using optional arg zint') + end if else call endrun(subname//' ERROR: vcoord value out of range') end if @@ -174,7 +180,7 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & !******************************* ! if (present(PS)) then - if (vcoord == vc_moist_pressure) then + if (vcoord == vc_moist_pressure .or. vcoord == vc_height) then where(mask_use) PS = psurf_moist end where @@ -230,8 +236,8 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & nlev = size(Q, 2) ! check whether first constituent in Q is water vapor. cnst1_is_moisture = m_cnst(1) == 1 - allocate(zlocal(size(Q, 1),nlev), stat=iret) - call check_allocate(iret, subname, 'zlocal(size(Q, 1),nlev)', & + allocate(zmid(size(Q, 1),nlev), stat=iret) + call check_allocate(iret, subname, 'zmid(size(Q, 1),nlev)', & file=__FILE__, line=__LINE__) end if @@ -267,22 +273,21 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & psurface = psurf_moist-wvp end if - do k=1,nlev - ! compute pressure levels - pk = hyam(k)*ps0 + hybm(k)*psurface - ! find height of pressure surface - zk(k) = iterate_z_given_pressure(pk,(vcoord == vc_dry_pressure),ptop,latvals(i),ztop) - end do + if (vcoord == vc_moist_pressure .or. vcoord == vc_dry_pressure) then + do k=1,nlev + ! compute pressure levels + pk = hyam(k)*ps0 + hybm(k)*psurface + ! find height of pressure surface + zk(k) = iterate_z_given_pressure(pk,(vcoord == vc_dry_pressure),ptop,latvals(i),ztop) + end do + else if (vcoord == vc_height) then + zk = 0.5_r8*(zint(i,1:nlev) + zint(i,2:nlev+1)) + end if if (lq) then - if (present(Z)) then - zlocal(i,1:nlev) = Z(i,1:nlev) - else - zlocal(i,1:nlev) = zk(:) - end if + zmid(i,:) = zk(:) end if - do k=1,nlev ! ! wind components @@ -293,7 +298,8 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & ! ! temperature and moisture for moist vertical coordinates ! - if ((lq.or.lt).and.(vcoord == vc_moist_pressure)) then + if ( (lq .or. lt) .and. & + (vcoord==vc_moist_pressure .or. vcoord==vc_height) ) then if (analytic_ic_is_moist()) then pk = moist_pressure_given_z(zk(k),latvals(i)) qk = qv_given_moist_pressure(pk,latvals(i)) @@ -359,27 +365,22 @@ subroutine bc_wav_set_ic(vcoord,latvals, lonvals, U, V, T, PS, PHIS, & #if 0 if (lq) then ncnst = size(m_cnst, 1) - if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then - do m = 1, ncnst + do m = 1, ncnst - ! water vapor already done above - if (m_cnst(m) == 1) cycle + ! water vapor already done above + if (m_cnst(m) == 1) cycle - call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m),& - mask=mask_use, verbose=verbose_use, notfound=.false.,& - z=zlocal) + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m),& + mask=mask_use, verbose=verbose_use, notfound=.false.,& + z=zmid) - end do - - end if ! vcoord + end do end if ! lq #else if (lq) then - if ((vcoord == vc_moist_pressure) .or. (vcoord == vc_dry_pressure)) then - !Initialize cloud liquid and rain until constituent routines are enabled: - Q(:,:,ix_cld_liq) = 0.0_r8 - Q(:,:,ix_rain) = 0.0_r8 - end if + !Initialize cloud liquid and rain until constituent routines are enabled: + Q(:,:,ix_cld_liq) = 0.0_r8 + Q(:,:,ix_rain) = 0.0_r8 end if #endif diff --git a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 index bf627283..0f91f131 100644 --- a/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 +++ b/src/dynamics/tests/initial_conditions/ic_us_standard_atm.F90 @@ -30,8 +30,8 @@ module ic_us_standard_atmosphere CONTAINS !========================================================================================= -subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & - Q, m_cnst, mask, verbose) +subroutine us_std_atm_set_ic(latvals, lonvals, zint, U, V, T, PS, PHIS_IN, & + PHIS_OUT, Q, m_cnst, mask, verbose) !---------------------------------------------------------------------------- ! @@ -43,11 +43,13 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & ! Arguments real(r8), intent(in) :: latvals(:) ! lat in degrees (ncol) real(r8), intent(in) :: lonvals(:) ! lon in degrees (ncol) + real(r8), optional, intent(in) :: zint(:,:) ! height at layer interfaces real(r8), optional, intent(inout) :: U(:,:) ! zonal velocity real(r8), optional, intent(inout) :: V(:,:) ! meridional velocity real(r8), optional, intent(inout) :: T(:,:) ! temperature real(r8), optional, intent(inout) :: PS(:) ! surface pressure - real(r8), optional, intent(in) :: PHIS(:) ! surface geopotential + real(r8), optional, intent(in) :: PHIS_IN(:) ! surface geopotential + real(r8), optional, intent(out) :: PHIS_OUT(:)! surface geopotential real(r8), optional, intent(inout) :: Q(:,:,:) ! tracer (ncol, lev, m) integer, optional, intent(in) :: m_cnst(:) ! tracer indices (reqd. if Q) logical, optional, intent(in) :: mask(:) ! Only init where .true. @@ -58,14 +60,19 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & logical :: verbose_use integer :: i, k, m integer :: ncol - integer :: nlev + integer :: nlev, nlevp integer :: ncnst integer :: iret character(len=*), parameter :: subname = 'us_std_atm_set_ic' real(r8) :: psurf(1) - real(r8), allocatable :: pmid(:), zmid(:) + real(r8), allocatable :: pmid(:), zmid(:), zmid2d(:,:) !---------------------------------------------------------------------------- + ! check input consistency + if (present(zint) .and. present(PHIS_IN)) then + call endrun(subname//': Only one of the args zint and PHIS_IN can be present') + end if + ncol = size(latvals, 1) allocate(mask_use(ncol), stat=iret) call check_allocate(iret, subname, 'mask_use(ncol)', & @@ -86,6 +93,13 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & verbose_use = .true. end if + if (present(PHIS_OUT)) then + PHIS_OUT = 0.0_r8 + if (masterproc .and. verbose_use) then + write(iulog,*) ' PHIS initialized by '//subname + end if + end if + nlev = -1 if (present(U)) then nlev = size(U, 2) @@ -112,10 +126,6 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & end if if (present(T)) then - if (.not.present(PHIS)) then - call endrun(subname//': PHIS must be specified to initiallize T') - end if - nlev = size(T, 2) allocate(pmid(nlev), stat=iret) call check_allocate(iret, subname, 'pmid(nlev)', & file=__FILE__, line=__LINE__) @@ -124,20 +134,37 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & call check_allocate(iret, subname, 'zmid(nlev)', & file=__FILE__, line=__LINE__) - do i = 1, ncol - if (mask_use(i)) then - ! get surface pressure - call std_atm_pres(PHIS(i:i)/gravit, psurf) - ! get pressure levels - do k = 1, nlev - pmid(k) = hyam(k)*ps0 + hybm(k)*psurf(1) - end do - ! get height of pressure level - call std_atm_height(pmid, zmid) - ! given height get temperature - call std_atm_temp(zmid, T(i,:)) - end if - end do + if (present(PHIS_IN)) then + + do i = 1, ncol + if (mask_use(i)) then + ! get surface pressure + call std_atm_pres(PHIS_IN(i:i)/gravit, psurf) + ! get pressure levels + do k = 1, nlev + pmid(k) = hyam(k)*ps0 + hybm(k)*psurf(1) + end do + ! get height of pressure level + call std_atm_height(pmid, zmid) + ! given height get temperature + call std_atm_temp(zmid, T(i,:)) + end if + end do + + else if (present(zint)) then + + do i = 1, ncol + if (mask_use(i)) then + zmid = 0.5_r8*(zint(i,1:nlev) + zint(i,2:nlev+1)) + ! given height get temperature + call std_atm_temp(zmid, T(i,:)) + end if + end do + + else + call endrun(subname//': either PHIS or zint must be specified to initiallize T') + end if + deallocate(pmid, zmid) if(masterproc .and. verbose_use) then @@ -146,15 +173,29 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & end if if (present(PS)) then - if (.not.present(PHIS)) then - call endrun(subname//': PHIS must be specified to initiallize PS') + + if (present(PHIS_IN)) then + + do i = 1, ncol + if (mask_use(i)) then + call std_atm_pres(PHIS_IN(i:i)/gravit, PS(i:i)) + end if + end do + + else if (present(zint)) then + + nlevp = size(zint, 2) + + do i = 1, ncol + if (mask_use(i)) then + call std_atm_pres(zint(i:i,nlevp), PS(i:i)) + end if + end do + + else + call endrun(subname//': either PHIS or zint must be specified to initiallize PS') end if - do i = 1, ncol - if (mask_use(i)) then - call std_atm_pres(PHIS(i:i)/gravit, PS(i:i)) - end if - end do if(masterproc .and. verbose_use) then write(iulog,*) ' PS initialized by "',subname,'"' end if @@ -162,6 +203,11 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & if (present(Q)) then nlev = size(Q, 2) + if (present(zint)) then + allocate(zmid2d(ncol,nlev)) + zmid2d = 0.5_r8*(zint(:,1:nlev) + zint(:,2:nlev+1)) + end if + ncnst = size(m_cnst, 1) do m = 1, ncnst if (m_cnst(m) == 1) then @@ -177,8 +223,13 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & write(iulog,*) ' ', trim(cnst_name(m_cnst(m))), ' initialized by '//subname end if else - call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m_cnst(m)),& - mask=mask_use, verbose=verbose_use, notfound=.false.) + if (present(zint)) then + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m_cnst(m)),& + mask=mask_use, verbose=verbose_use, notfound=.false., z=zmid2d) + else + call cnst_init_default(m_cnst(m), latvals, lonvals, Q(:,:,m_cnst(m)),& + mask=mask_use, verbose=verbose_use, notfound=.false.) + end if #else else !Initialize cloud liquid and rain until constituent routines are enabled: @@ -186,6 +237,9 @@ subroutine us_std_atm_set_ic(latvals, lonvals, U, V, T, PS, PHIS, & #endif end if end do + + if (allocated(zmid2d)) deallocate(zmid2d) + end if deallocate(mask_use) diff --git a/src/dynamics/utils/dyn_thermo.F90 b/src/dynamics/utils/dyn_thermo.F90 index dbdced82..989da040 100644 --- a/src/dynamics/utils/dyn_thermo.F90 +++ b/src/dynamics/utils/dyn_thermo.F90 @@ -17,8 +17,10 @@ module dyn_thermo ! get_cp ! get_cp_dry ! get_kappa_dry + ! get_ps ! get_dp ! get_dp_ref + ! get_sum_species ! get_molecular_diff_coef ! get_molecular_diff_coef_reference ! get_rho_dry @@ -238,6 +240,54 @@ end subroutine get_kappa_dry ! !**************************************************************************************************************** ! + ! get pressure from dry pressure and thermodynamic active species (e.g., forms of water: water vapor, cldliq, etc.) + ! + !**************************************************************************************************************** + ! + subroutine get_ps(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,ps,ptop) + + use physconst, only: get_ps_phys=>get_ps + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_dyn), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,1:ntrac) ! Tracer array + real(kind_dyn), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_dyn), intent(out) :: ps(i0:i1,j0:j1) ! surface pressure + real(kind_dyn), intent(in) :: ptop + integer, intent(in) :: active_species_idx(:) + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_ps_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,ps,ptop) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_mass_phys(i0:i1,j0:j1,k0:k1,1:ntrac) ! Tracer array + real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness + real(kind_phys) :: ps_phys(i0:i1,j0:j1) ! surface pressure + real(kind_phys) :: ptop_phys + + !Set local variables: + tracer_mass_phys = real(tracer_mass, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + ptop_phys = real(ptop, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_ps_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass_phys,active_species_idx,dp_dry_phys,ps_phys,ptop_phys) + + !Set output variables back to dynamics kind: + ps = real(ps_phys, kind_dyn) + +#endif + + end subroutine get_ps + ! + !**************************************************************************************************************** + ! ! Compute pressure level thickness from dry pressure and thermodynamic active ! species mixing ratios ! @@ -278,14 +328,14 @@ subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx real(kind_phys), allocatable :: ps(:,:) real(kind_phys), allocatable :: ptop - !Set local variables: - tracer_phys = real(tracer, kind_phys) - dp_dry_phys = real(dp_dry, kind_phys) - !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_dp (dyn)' + !Set local variables: + tracer_phys = real(tracer, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + if (present(ptop)) then allocate(ptop_phys, stat=iret) call check_allocate(iret, subname, 'ptop', & @@ -376,6 +426,75 @@ subroutine get_dp_ref(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) end subroutine get_dp_ref ! + !**************************************************************************************************************** + ! + ! Compute sum of thermodynamically active species + ! + ! tracer is in units of dry mixing ratio unless optional argument dp_dry is present in which case tracer is + ! in units of "mass" (=m*dp) + ! + !**************************************************************************************************************** + ! + subroutine get_sum_species(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum_species,dp_dry) + + use physconst, only: get_sum_species_phys=>get_sum_species + + !Subroutine (dummy) arguments: + + integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac + real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracer array + integer, intent(in) :: active_species_idx(:) ! index for thermodynamic active tracers + real(kind_dyn), optional, intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness is present + ! then tracer is in units of mass + real(kind_dyn), intent(out) :: sum_species(i0:i1,j0:j1,k0:k1) ! sum species + +#ifndef DYN_PHYS_KIND_DIFF + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_sum_species_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum_species, & + dp_dry=dp_dry) + +#else + + !Declare local variables: + real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracer array + real(kind_phys) :: sum_species_phys(i0:i1,j0:j1,k0:k1) ! sum species + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) ! dry pressure level thickness is present + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_sum_species (dyn)' + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(dp_dry)) then + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + dp_dry_phys = real(dp_dry, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_sum_species_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,active_species_idx,sum_species_phys, & + dp_dry=dp_dry_phys) + + !Set output variables back to dynamics kind: + sum_species = real(sum_species_phys, kind_dyn) + + !Deallocate variables: + if (allocated(dp_dry_phys)) then + deallocate(dp_dry_phys) + end if + +#endif + + end subroutine get_sum_species + ! !************************************************************************************************************************* ! ! compute 3D molecular diffusion and thermal conductivity diff --git a/src/physics/utils/phys_comp.F90 b/src/physics/utils/phys_comp.F90 index 25b3261a..ec6ef65b 100644 --- a/src/physics/utils/phys_comp.F90 +++ b/src/physics/utils/phys_comp.F90 @@ -179,6 +179,7 @@ subroutine phys_run2(dtime_phys, cam_runtime_opts, phys_state, phys_tend, & use cam_abortutils, only: endrun use runtime_obj, only: runtime_options use physics_types, only: physics_state, physics_tend + use physics_types, only: physics_types_tstep_init use physics_grid, only: columns_on_task use camsrfexch, only: cam_in_t, cam_out_t use cam_ccpp_cap, only: cam_ccpp_physics_timestep_initial @@ -225,6 +226,9 @@ subroutine phys_run2(dtime_phys, cam_runtime_opts, phys_state, phys_tend, & call physics_read_data(ncdata, suite_names, data_frame, & read_initialized_variables=use_init_variables) + ! Initialize host model variables that must be done each time step: + call physics_types_tstep_init() + ! Initialize the physics time step call cam_ccpp_physics_timestep_initial(phys_suite_name, dtime_phys, & errmsg, errflg) diff --git a/src/utils/cam_pio_utils.F90 b/src/utils/cam_pio_utils.F90 index 70ca373c..73f67501 100644 --- a/src/utils/cam_pio_utils.F90 +++ b/src/utils/cam_pio_utils.F90 @@ -728,7 +728,7 @@ subroutine find_iodesc(ldimlens, fdimlens, dtype, map, iodesc_p, found, perm) iodesc_p => curr end if if(masterproc .and. (debug_output > DEBUGOUT_INFO)) then - write(iulog,*) "FIND_IODESC: Using decomp, '"//trim(curr%tag)//"'" + write(iulog,*) "FIND_IODESC: Using decomp, '",trim(curr%tag),"'" call shr_sys_flush(iulog) end if diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index 623644ac..5c98a054 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -518,7 +518,7 @@ def test_config_ccpp_phys_set_no_physics_suite_match(self): cam_nml_attr_dict = dict() #Set error message: - ermsg = "physics_suite specified in user_nl_cam doesn't match any suites\n" + ermsg = "physics_suite specified in user_nl_cam, 'cam6', doesn't match any suites\n" ermsg += "listed in CAM_CONFIG_OPTS" #Create namelist file: diff --git a/test/unit/sample_files/physics_types_complete.F90 b/test/unit/sample_files/physics_types_complete.F90 index b4ec3209..07bfe6b4 100644 --- a/test/unit/sample_files/physics_types_complete.F90 +++ b/test/unit/sample_files/physics_types_complete.F90 @@ -69,6 +69,7 @@ module physics_types_complete !! public interfaces public :: allocate_physics_types_complete_fields + public :: physics_types_complete_tstep_init CONTAINS @@ -184,4 +185,29 @@ subroutine allocate_physics_types_complete_fields(horizontal_dimension, end if end subroutine allocate_physics_types_complete_fields + subroutine physics_types_complete_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_complete_tstep_init" + + ! standard_var: Standard non ddt variable + standard_var = 0 + + ! latitude: Latitude + phys_state%latitude = 0._kind_phys + + ! longitude: Longitude + phys_state%longitude = 0._kind_phys + + ! q: Constituent mixing ratio + phys_state%q = 0._kind_phys + + ! ncol: Number of horizontal columns + phys_state%ncol = 0 + + ! pver: Number of vertical layers + phys_state%pver = 0 + + end subroutine physics_types_complete_tstep_init + end module physics_types_complete diff --git a/test/unit/sample_files/physics_types_ddt2.F90 b/test/unit/sample_files/physics_types_ddt2.F90 index aa4970e0..ac7080a3 100644 --- a/test/unit/sample_files/physics_types_ddt2.F90 +++ b/test/unit/sample_files/physics_types_ddt2.F90 @@ -59,6 +59,7 @@ module physics_types_ddt2 !! public interfaces public :: allocate_physics_types_ddt2_fields + public :: physics_types_ddt2_tstep_init CONTAINS @@ -145,4 +146,11 @@ subroutine allocate_physics_types_ddt2_fields(horizontal_dimension, vertical_lay end if end subroutine allocate_physics_types_ddt2_fields + subroutine physics_types_ddt2_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_ddt2_tstep_init" + + end subroutine physics_types_ddt2_tstep_init + end module physics_types_ddt2 diff --git a/test/unit/sample_files/physics_types_ddt_array.F90 b/test/unit/sample_files/physics_types_ddt_array.F90 index d4f577e9..2de800b7 100644 --- a/test/unit/sample_files/physics_types_ddt_array.F90 +++ b/test/unit/sample_files/physics_types_ddt_array.F90 @@ -47,6 +47,7 @@ module physics_types_ddt_array !! public interfaces public :: allocate_physics_types_ddt_array_fields + public :: physics_types_ddt_array_tstep_init CONTAINS @@ -132,4 +133,11 @@ subroutine allocate_physics_types_ddt_array_fields(horizontal_dimension, end if end subroutine allocate_physics_types_ddt_array_fields + subroutine physics_types_ddt_array_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_ddt_array_tstep_init" + + end subroutine physics_types_ddt_array_tstep_init + end module physics_types_ddt_array diff --git a/test/unit/sample_files/physics_types_ddt_eul.F90 b/test/unit/sample_files/physics_types_ddt_eul.F90 index 4ebf5605..e7ee7e71 100644 --- a/test/unit/sample_files/physics_types_ddt_eul.F90 +++ b/test/unit/sample_files/physics_types_ddt_eul.F90 @@ -41,6 +41,7 @@ module physics_types_ddt !! public interfaces public :: allocate_physics_types_ddt_fields + public :: physics_types_ddt_tstep_init CONTAINS @@ -99,4 +100,11 @@ subroutine allocate_physics_types_ddt_fields(horizontal_dimension, set_init_val_ end if end subroutine allocate_physics_types_ddt_fields + subroutine physics_types_ddt_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_ddt_tstep_init" + + end subroutine physics_types_ddt_tstep_init + end module physics_types_ddt diff --git a/test/unit/sample_files/physics_types_ddt_fv.F90 b/test/unit/sample_files/physics_types_ddt_fv.F90 index c90878f6..2446d024 100644 --- a/test/unit/sample_files/physics_types_ddt_fv.F90 +++ b/test/unit/sample_files/physics_types_ddt_fv.F90 @@ -41,6 +41,7 @@ module physics_types_ddt !! public interfaces public :: allocate_physics_types_ddt_fields + public :: physics_types_ddt_tstep_init CONTAINS @@ -99,4 +100,11 @@ subroutine allocate_physics_types_ddt_fields(horizontal_dimension, set_init_val_ end if end subroutine allocate_physics_types_ddt_fields + subroutine physics_types_ddt_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_ddt_tstep_init" + + end subroutine physics_types_ddt_tstep_init + end module physics_types_ddt diff --git a/test/unit/sample_files/physics_types_ddt_se.F90 b/test/unit/sample_files/physics_types_ddt_se.F90 index 84868b2f..894f280f 100644 --- a/test/unit/sample_files/physics_types_ddt_se.F90 +++ b/test/unit/sample_files/physics_types_ddt_se.F90 @@ -41,6 +41,7 @@ module physics_types_ddt !! public interfaces public :: allocate_physics_types_ddt_fields + public :: physics_types_ddt_tstep_init CONTAINS @@ -99,4 +100,11 @@ subroutine allocate_physics_types_ddt_fields(horizontal_dimension, set_init_val_ end if end subroutine allocate_physics_types_ddt_fields + subroutine physics_types_ddt_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_ddt_tstep_init" + + end subroutine physics_types_ddt_tstep_init + end module physics_types_ddt diff --git a/test/unit/sample_files/physics_types_parameter.F90 b/test/unit/sample_files/physics_types_parameter.F90 index d603e168..bac3e7b5 100644 --- a/test/unit/sample_files/physics_types_parameter.F90 +++ b/test/unit/sample_files/physics_types_parameter.F90 @@ -36,6 +36,7 @@ module physics_types_parameter !! public interfaces public :: allocate_physics_types_parameter_fields + public :: physics_types_parameter_tstep_init CONTAINS @@ -94,4 +95,11 @@ subroutine allocate_physics_types_parameter_fields(horizontal_dimension, set_ini end if end subroutine allocate_physics_types_parameter_fields + subroutine physics_types_parameter_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_parameter_tstep_init" + + end subroutine physics_types_parameter_tstep_init + end module physics_types_parameter diff --git a/test/unit/sample_files/physics_types_simple.F90 b/test/unit/sample_files/physics_types_simple.F90 index 566a8e2d..20c642c9 100644 --- a/test/unit/sample_files/physics_types_simple.F90 +++ b/test/unit/sample_files/physics_types_simple.F90 @@ -34,6 +34,7 @@ module physics_types_simple !! public interfaces public :: allocate_physics_types_simple_fields + public :: physics_types_simple_tstep_init CONTAINS @@ -92,4 +93,11 @@ subroutine allocate_physics_types_simple_fields(horizontal_dimension, set_init_v end if end subroutine allocate_physics_types_simple_fields + subroutine physics_types_simple_tstep_init() + + !! Local variables + character(len=*), parameter :: subname = "physics_types_simple_tstep_init" + + end subroutine physics_types_simple_tstep_init + end module physics_types_simple diff --git a/test/unit/sample_files/reg_good_complete.xml b/test/unit/sample_files/reg_good_complete.xml index b42750f2..b3a80a5d 100644 --- a/test/unit/sample_files/reg_good_complete.xml +++ b/test/unit/sample_files/reg_good_complete.xml @@ -54,7 +54,7 @@ + units="K" type="real" phys_timestep_init_zero="true"> stand_var + units="None" type="physics_state" phys_timestep_init_zero="true"> Physics state variables updated by dynamical core From 0f9daf1ef2897bc314a66f58469c20184c9e3f0c Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Wed, 25 Aug 2021 20:58:43 -0600 Subject: [PATCH 39/45] Fix pylint errors. --- cime_config/buildnml | 5 ++--- test/unit/cam_config_unit_tests.py | 18 +++++++++--------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/cime_config/buildnml b/cime_config/buildnml index 10471f93..83797d4d 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -61,8 +61,8 @@ def nml_attr_set(config): set namelist defaults. """ - #Create new namelist attribute dictionary: - cam_nml_attr_dict = dict() + #Create new (empty) namelist attribute dictionary: + cam_nml_attr_dict = {} if config: #Loop over all CAM configuration settings in config dictionary: @@ -98,7 +98,6 @@ def buildnml(case, caseroot, compname): # End if srcroot = case.get_value("SRCROOT") rundir = case.get_value("RUNDIR") - din_loc_root = case.get_value("DIN_LOC_ROOT") atm_ncpl = case.get_value("ATM_NCPL") cam_namelist_opts = case.get_value("CAM_NAMELIST_OPTS") cam_nml_use_case = case.get_value("CAM_NML_USE_CASE") diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index 5c98a054..0f4e82cc 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -306,11 +306,11 @@ def test_config_ccpp_phys_set_check_single_suite(self): #Set "new" physics_suites value with one physics suite: self.test_config_cam.set_value("physics_suites", "kessler") - #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + #Create (empty) namelist attribute dictionary: + cam_nml_attr_dict = {} #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') test_fil.write('physics_suite = "adiabatic"\n') @@ -343,7 +343,7 @@ def test_config_ccpp_phys_set_check_multi_suite(self): cam_nml_attr_dict = dict() #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') test_fil.write('physics_suite = "adiabatic"\n') @@ -377,7 +377,7 @@ def test_config_ccpp_phys_set_missing_phys(self): ermsg += "in CAM_CONFIG_OPTS." #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') #Expect "CamConfigValError": @@ -412,7 +412,7 @@ def test_config_ccpp_phys_set_two_phys(self): ermsg += "Only one 'physics_suite' line is allowed." #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') test_fil.write('physics_suite = "adiabatic"\n') test_fil.write('physics_suite = "kessler"\n') @@ -449,7 +449,7 @@ def test_config_ccpp_phys_set_missing_equals(self): #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') test_fil.write('physics_suite "adiabatic"\n') @@ -484,7 +484,7 @@ def test_config_ccpp_phys_set_two_equals(self): ermsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') test_fil.write('physics_suite == "adiabatic"\n') @@ -522,7 +522,7 @@ def test_config_ccpp_phys_set_no_physics_suite_match(self): ermsg += "listed in CAM_CONFIG_OPTS" #Create namelist file: - with open("test.txt", "w") as test_fil: + with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') test_fil.write('physics_suite = "cam6"\n') From 4354b9d2122ac4193366ea10822e427ea17c0b31 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 20 Sep 2021 20:45:10 -0600 Subject: [PATCH 40/45] Address second round of review comments and suggestions. --- cime_config/cam_config.py | 532 ++++-- src/data/generate_registry_data.py | 41 +- src/data/physconst.F90 | 86 +- src/data/registry.xml | 47 +- src/data/registry_v1_0.xsd | 4 - src/dynamics/se/dp_coupling.F90 | 121 +- src/dynamics/se/dycore/fvm_mod.F90 | 10 +- src/dynamics/se/dycore/quadrature_mod.F90 | 2 +- src/dynamics/se/dyn_comp.F90 | 5 +- src/dynamics/se/gravity_waves_sources.F90 | 234 +++ src/dynamics/utils/dyn_thermo.F90 | 1487 ++++++++++------- src/dynamics/utils/dynconst.F90 | 27 +- test/.pylintrc | 2 +- test/unit/cam_config_unit_tests.py | 20 +- .../sample_files/physics_types_complete.F90 | 2 +- 15 files changed, 1718 insertions(+), 902 deletions(-) create mode 100644 src/dynamics/se/gravity_waves_sources.F90 diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index fbc424b9..db67e214 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -27,7 +27,7 @@ from cam_autogen import generate_init_routines -# Determine regular rexpression type (for later usage in Config_string) +# Determine regular rexpression type (for later usage in check_string_val) REGEX_TYPE = type(re.compile(r" ")) ############################################################################### @@ -52,6 +52,155 @@ def __init_(self, message): super(CamConfigTypeError, self).__init__(message) # pylint: enable=useless-super-delegation +############################################################################### +# Valid value-checking functions +############################################################################### + +def _check_integer_val(name, val, valid_vals=None): + + """ + Checks if a provided integer value is "valid" + as defined by the provided "valid_vals" entry + for the given config variable (name). + + If value is not valid, then an error message is returned, + otherwise the function returns None. + + Possible valid_val types are: + + list -> If a list, then just check that provided value is in the list. + + tuple -> If a tuple, then there must be only two values, which define + a possible range of values, e.g. (min, max). If only one value + is provided, then only a minimum (or maximum) value will be + enforced, depending on if the tuple is (x,) or (,x). + """ + + # Make sure that provided value is an integer: + if not isinstance(val, int): + emsg = "ERROR: Value being checked in 'check_integer_val'" + emsg += "must be an integer type, not '{}'" + raise CamConfigTypeError(emsg.format(type(val))) + # End if + + # Only check the given value if valid_vals is not "None" + if valid_vals is not None: + # Check if valid values is a tuple + if isinstance(valid_vals, tuple): + # Check that length of valid values tuple is 2 + if len(valid_vals) != 2: + emsg = ("ERROR: Valid values tuple for variable, " + "'{}', must have two elements, not '{}' elements") + raise CamConfigValError(emsg.format(name, + len(valid_vals))) + # End if + if valid_vals[0] is None: + # If first valid value is "None", then just check that + # given value is less than second valid value, and + # that second value is an integer + if valid_vals[1] is None: + emsg = "ERROR: Valid values tuple for variable, '{}', " + emsg += "must contain at least one integer" + raise CamConfigValError(emsg.format(name)) + # End if + if val > valid_vals[1]: + emsg = "ERROR: Value, '{}', provided for variable, " + emsg += "'{}', is greater than max valid value, '{}'" + return emsg.format(val, name, valid_vals[1]) + # End if + elif valid_vals[1] is None: + # Check if second value is "None". + # If so, then just check that given value is greater + # than first valid value + if val < valid_vals[0]: + emsg = "ERROR: Value, '{}', provided for variable, " + emsg += "'{}', is less than minimum valid value, '{}'" + return emsg.format(val, name, valid_vals[0]) + # End if + else: + # If both valid values are integers, then check that + # given value is between both valid values + if (val < valid_vals[0]) or (val > valid_vals[1]): + emsg = "ERROR: Value, '{}', provided for variable, " + emsg += "'{}', is outside valid value range, '{}'" + return emsg.format(val, name, valid_vals) + # End if + # End if + elif isinstance(valid_vals, list): + # If valid_vals is a list, then just check that the given value + # matches one of the valid values in the list + if not val in valid_vals: + emsg = "ERROR: Value, '{}', provided for variable, '{}', " + emsg += "does not match any of the valid values: '{}'" + return emsg.format(val, name, valid_vals) + # End if + else: + # valid_vals is neither a list nor a tuple, so throw an error: + emsg = "ERROR: Valid values for integers must by provided as " + emsg = "either a tuple or a list, not '{}'." + raise CamConfigTypeError(emsg.format(type(valid_vals))) + + # End if + # End if + + # Return nothing if value is valid + return None + +############################################################################### + +def _check_string_val(name, val, valid_vals=None): + + """ + Checks if a provided string value is "valid" + as defined by the provided "valid_vals" entry. + + Possible valid_val types are: + + list -> If a list, then just check that provided value is in the list. + + regex -> If a compiled regular expression, then check that the provided + value is matched by the regular expression. + """ + + # Make sure that provided value is an integer: + if not isinstance(val, str): + emsg = "ERROR: Value being checked in 'check_string_val'" + emsg += "must be a string type, not '{}'" + raise CamConfigTypeError(emsg.format(type(val))) + # End if + + # Only check the given value if valid_vals is not "None" + if valid_vals is not None: + + # If a list, then check that the given value + # matches one of the valid values in the list + if isinstance(valid_vals, list): + if not val in valid_vals: + emsg = "ERROR: Value, '{}', provided for variable, '{}', " + emsg += "does not match any of the valid values: '{}'" + return emsg.format(val, name, valid_vals) + # End if + elif isinstance(valid_vals, REGEX_TYPE): + # If a regular expression object, then check that + # value is matched by the expression + if valid_vals.match(val) is None: + emsg = "ERROR: Value, '{}', provided for variable, '{}', " + emsg += "does not match the valid regular expression." + return emsg.format(val, name) + # End if + # End if + else: + # valid_vals is neither a list nor a regex, so throw an error: + emsg = "ERROR: Valid values for strings must by provided as " + emsg = "either a regular expression or a list, not '{}'" + return emsg.format(type(valid_vals)) + + # End if + # End if + + # Return nothing if value is valid + return None + ############################################################################### # CAM configure option classes ############################################################################### @@ -105,7 +254,7 @@ class ConfigGen: def __init__(self, name, desc, is_nml_attr=False): - # Check that "name" is a string + # Check that "name" is a string if not isinstance(name, str): emsg = "ERROR: Configuration variable name '{}' must be a string, not {}" raise CamConfigTypeError(emsg.format(name, type(name))) @@ -246,7 +395,7 @@ class ConfigInteger(ConfigGen): def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): # Add generic attributes - ConfigGen.__init__(self, name, desc, is_nml_attr=is_nml_attr) + super(ConfigInteger, self).__init__(name, desc, is_nml_attr=is_nml_attr) # Check that "valid_vals" is either "None", a list, or a tuple if valid_vals is not None: @@ -254,6 +403,7 @@ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): emsg = ("ERROR: The valid values for variable, '{}', " "must either be None, a list, or a tuple, not {}") raise CamConfigTypeError(emsg.format(name, type(valid_vals))) + # End if # If list or tuple, check that all entries are either # "None" or integers @@ -263,6 +413,9 @@ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): "either None or an integer. Currently it is {}") raise CamConfigTypeError(emsg.format(name, type(valid_val))) + # End if + # End for + # End if # If ok, then add valid_vals to object self.__valid_vals = valid_vals @@ -306,66 +459,14 @@ def __check_value(self, val): included in that list. """ - # Extract valid values (valid_vals) from object - valid_vals = self.valid_vals + # Check if integer value is valid + bad_val_msg = _check_integer_val(self.name, val, + valid_vals=self.valid_vals) + + # Raise an error if a bad value is found: + if bad_val_msg: + raise CamConfigValError(bad_val_msg) - # Only check the given value if valid_vals is not "None" - if valid_vals is not None: - # Check if valid values is a tuple - if isinstance(valid_vals, tuple): - # Check that length of valid values tuple is 2 - if len(valid_vals) != 2: - emsg = ("Error: Valid values tuple for variable, " - "'{}', must have two elements, not '{}' elements") - raise CamConfigValError(emsg.format(self.name, - len(valid_vals))) - # End if - if valid_vals[0] is None: - # If first valid value is "None", then just check that - # given value is less than second valid value, and - # that second value is an integer - if valid_vals[1] is None: - emsg = "Error: Valid values tuple for variable, '{}', " - emsg += "must contain at least one integer" - raise CamConfigValError(emsg.format(self.name)) - # End if - if val > valid_vals[1]: - emsg = "Error: Value, '{}', provided for variable, " - emsg += "'{}', is greater than max valid value, '{}'" - raise CamConfigValError(emsg.format(val, self.name, - valid_vals[1])) - # End if - elif valid_vals[1] is None: - # Check if second value is "None". - # If so, then just check that given value is greater - # than first valid value - if val < valid_vals[0]: - emsg = "Error: Value, '{}', provided for variable, " - emsg += "'{}', is less than minimum valid value, '{}'" - raise CamConfigValError(emsg.format(val, self.name, - valid_vals[0])) - # End if - else: - # If both valid values are integers, then check that - # given value is between both valid values - if (val < valid_vals[0]) or (val > valid_vals[1]): - emsg = "Error: Value, '{}', provided for variable, " - emsg += "'{}', is outside valid value range, '{}'" - raise CamConfigValError(emsg.format(val, self.name, - valid_vals)) - # End if - # End if - else: - # If valid_vals is a list, then just check that the given value - # matches one of the valid values in the list - if not val in valid_vals: - emsg = "ERROR: Value, '{}', provided for variable, '{}', " - emsg += "does not match any of the valid values: '{}'" - raise CamConfigValError(emsg.format(val, self.name, - valid_vals)) - # End if - # End if - # End if #++++++++++++++++++++++++ def set_value(self, val): @@ -393,7 +494,7 @@ class ConfigString(ConfigGen): name -> Name of new CAM configure option desc -> Text description of CAM configure option val -> Integer value for CAM configure option - valid_vals (optional) -> List of valid CAM configure option values (default is None) + valid_vals (optional) -> List or regex of valid CAM configure option values (default is None) is_nml_attr (optional) -> Logical that determines if option is also a namelist attribute (defaut is False) Doctests: @@ -445,7 +546,7 @@ class ConfigString(ConfigGen): def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): # Add generic attributes - ConfigGen.__init__(self, name, desc, is_nml_attr=is_nml_attr) + super(ConfigString, self).__init__(name, desc, is_nml_attr=is_nml_attr) # Check if Valid_vals is not None if valid_vals is not None: @@ -465,6 +566,7 @@ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): # End if # End if # End if + # If ok, then add valid_vals to object self.__valid_vals = valid_vals @@ -508,24 +610,13 @@ def __check_value(self, val): expression. """ - # Extract valid values (valid_vals) from object - valid_vals = self.valid_vals + # Check if string value is valid + bad_val_msg = _check_string_val(self.name, val, + valid_vals=self.valid_vals) - # If a list, then check that the given value - # matches one of the valid values in the list - if isinstance(valid_vals, list): - if not val in valid_vals: - emsg = ("ERROR: Value, '{}', provided for variable, '{}', " - "does not match any of the valid values: '{}'") - raise CamConfigValError(emsg.format(val, self.name, valid_vals)) - - elif valid_vals is not None: - # If a regular expression object, then check that - # value is matched by the expression - if valid_vals.match(val) is None: - emsg = ("ERROR: Value, '{}', provided for variable, '{}', " - "does not match the valid regular expression") - raise CamConfigValError(emsg.format(val, self.name)) + # Raise an error if a bad value is found: + if bad_val_msg: + raise CamConfigValError(bad_val_msg) #++++++++++++++++++++++++ @@ -555,6 +646,8 @@ class ConfigList(ConfigGen): list_vals -> List values for CAM configure option valid_type (optional) -> Specify valid type for CAM configure option list values. Currently accepts "int" for integer and "str" for string. + valid_vals (optional) -> Valid CAM configure option values (default is None), + valid_type must be included in order to use valid_vals. Doctests: @@ -567,6 +660,7 @@ class ConfigList(ConfigGen): >>> ConfigList("test", "test object description", ["x", "y", "z"], valid_type="str").value ['x', 'y', 'z'] + 3. Check that ConfigList With a non-string passed to "valid_type" fails with the correct error: >>> ConfigList("test", "test object description", [1, 2, 3], valid_type=5).value #doctest: +ELLIPSIS Traceback (most recent call last): @@ -580,16 +674,53 @@ class ConfigList(ConfigGen): CamConfigValError: ERROR: 'foo' is not a recognized option for 'valid_type'. Please use either 'int' or 'str'. 5. Check that ConfigList with list entries that don't match the valid_type entry fails with the correct error: - >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="str").value #doctest: +ELLIPSIS + >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="str").value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: The following list entries, provided for variable, 'test', are not strings, but instead are: + '1': type='' + '2': type='' + '3': type='' + + + 6. Check that ConfigList with "valid_vals" but no "valid_type" fails with the correct error: + >>> ConfigList("test", "test object description", [1, 2, 3], valid_vals=[1,2,3,4,5]).value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: valid values can only be used if valid_type is 'int' or 'str', not 'None'. + + 7. Check that ConfigList with a "valid_vals" type that doesn't match "valid_type='int'" fails with the correct error: + >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="int", valid_vals={'a':1}).value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: the valid values provided must be either in the form of a list + or a tuple in order to be used with integer elements, not ''. + + 8. Check that ConfigList with a "valid_vals" type that doesn't match "valid_type='str'" fails with the correct error: + >>> ConfigList("test", "test object description", ["a", "b", "c"], valid_type="str", valid_vals={'a':1}).value #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: the valid values provided must be either in the form of a list + or a regular expression in order to be used with string elements, not ''. + + 9. check that ConfigList with a list that matches the "valid_vals" entry works as expected: + >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="int", valid_vals=(0,5)).value + [1, 2, 3] + + 10. check that ConfigList with a list that does not mach the "valid_vals" entry fails wit hthe correct error: + >>> ConfigList("test", "test object description", ["1", "b", "c"], valid_type="str", valid_vals=["1","2","3"]).value #doctest: +ELLIPSIS Traceback (most recent call last): ... - CamConfigValError: ERROR: List entry, '1', provided for variable, 'test', is not a string, but instead is type ''. + CamConfigValError: The following errors were found for a list-type config variable: + ERROR: Value, 'b', provided for variable, 'test', does not match any of the valid values: '['1', '2', '3']' + + ERROR: Value, 'c', provided for variable, 'test', does not match any of the valid values: '['1', '2', '3']' """ - def __init__(self, name, desc, val, valid_type=None): + def __init__(self, name, desc, val, valid_type=None, valid_vals=None): # Add generic attributes - ConfigGen.__init__(self, name, desc, is_nml_attr=None) + super(ConfigList, self).__init__(name, desc, is_nml_attr=False) # Check if valid_type is not None if valid_type is not None: @@ -601,13 +732,43 @@ def __init__(self, name, desc, val, valid_type=None): # End if # End if - # If ok, then add valid_type to object + # If ok, then add valid_type and valid_vals to object self.__valid_type = valid_type + self.__valid_vals = valid_vals - # Next, check that provided list entry types are "valid" based on the - # valid type provided: + #Check that the valid values option can be used with the valid type: + if self.__valid_vals is not None: + # If only integers are allowed ,then make sure valid vals is + # either a list or a tuple: + if valid_type == "int": + if not isinstance(valid_vals, list) and not isinstance(valid_vals, tuple): + emsg = "ERROR: the valid values provided must be either in the form of a list" + emsg += "\nor a tuple in order to be used with integer elements, not '{}'." + raise CamConfigTypeError(emsg.format(type(valid_vals))) + # End if + + # If only strings are allowed ,then make sure valid vals is + # either a list or a regular expression: + elif valid_type == "str": + if not isinstance(valid_vals, list) and not isinstance(valid_vals, REGEX_TYPE): + emsg = "ERROR: the valid values provided must be either in the form of a list" + emsg += "\nor a regular expression in order to be used with string elements, not '{}'." + raise CamConfigTypeError(emsg.format(type(valid_vals))) + # End if + else: + # Currently valid values can only be used with strings or integers, + # so throw an error: + emsg = "ERROR: valid values can only be used if valid_type is 'int' or 'str', not '{}'." + raise CamConfigValError(emsg.format(valid_type)) + + # Next, check that provided list entry types and values are "valid" + # based on the valid type and valid values provided: if self.__valid_type is not None: - self.__check_value(val) + self.__check_type(val) + + #If valid values are provided, then check them as well: + if self.__valid_vals is not None: + self.__check_values(val) # If everything is ok, then add provided value to object self.__value = val @@ -626,9 +787,14 @@ def valid_type(self): """Return the valid type of this config object""" return self.__valid_type + @property + def valid_vals(self): + """Return the valid values of this config object""" + return self.__valid_vals + #++++++++++++++++++++++++ - def __check_value(self, val): + def __check_type(self, val): """ Check if the entries in the provided @@ -639,32 +805,95 @@ def __check_value(self, val): # Extract valid type (valid_type) from object valid_type = self.valid_type + # Create empty dictionary to store errors: + bad_val_types = {} + if valid_type == "str": #All list entries should be strings: for list_entry in val: if not isinstance(list_entry, str): - emsg = "ERROR: List entry, '{}', provided for variable, '{}'" - emsg += ", is not a string, but instead is type '{}'." - raise CamConfigValError(emsg.format(list_entry, self.name, - type(list_entry))) + bad_val_types[str(list_entry)] = str(type(list_entry)) + + #If bad values dictionary is non-empty, then raise error: + if bad_val_types: + emsg = "ERROR: The following list entries, provided for variable," + emsg += " '{}', are not strings, but instead are:\n".format(self.name) + for key_str, type_str in bad_val_types.items(): + emsg += "'{}': type='{}'\n".format(key_str, type_str) + raise CamConfigValError(emsg) + # End if elif valid_type == "int": #All list entries should be integers: for list_entry in val: if not isinstance(list_entry, int): - emsg = "ERROR: List entry, '{}', provided for variable, '{}'" - emsg += ", is not an integer, but instead is type '{}'." - raise CamConfigValError(emsg.format(list_entry, self.name, - type(list_entry))) + bad_val_types[str(list_entry)] = str(type(list_entry)) + + #If bad values dictionary is non-empty, then raise error: + if bad_val_types: + emsg = "ERROR: The following list entries, provided for variable," + emsg += " '{}', are not integers, but instead are:\n".format(self.name) + for key_str, type_str in bad_val_types.items(): + emsg += "'{}': type='{}'\n".format(key_str, type_str) + raise CamConfigValError(emsg) + # End if + else: #Invalid option given for "valid_type", so raise error: emsg = "ERROR: '{}' is not a recognized option for 'valid_type'." emsg += " Please use either 'int' or 'str'." raise CamConfigValError(emsg.format(valid_type)) + # End if + #++++++++++++++++++++++++ - def set_value(self, val): + def __check_values(self, list_vals): + + """ + Check if the entries in the provided + list (val) are valid as specified by + specified by the "valid_vals" entry. + """ + + # Create empty list: + bad_val_msgs = [] + + # Check if valid type is string or integer + if self.valid_type == "int": + for val in list_vals: + #Check if integer value in list is valid + bad_val_msg = _check_integer_val(self.name, val, + valid_vals=self.valid_vals) + + # If return value is not None, then add + # to bad value list + if bad_val_msg: + bad_val_msgs.append(bad_val_msg) + # End if + + elif self.valid_type == "str": + for val in list_vals: + # Check if string value in list is valid + bad_val_msg = _check_string_val(self.name, val, + valid_vals=self.valid_vals) + + # If return value is not None, then add + # to bad value list + if bad_val_msg: + bad_val_msgs.append(bad_val_msg) + # End if + # End if + + # If bad values are present, then raise an error + if bad_val_msgs: + emsg = "The following errors were found for a list-type config variable:\n" + emsg += "\n\n".join(bad_val_msgs) + raise CamConfigValError(emsg) + + #++++++++++++++++++++++++ + + def set_value(self, list_vals): """ Set configure object's value to the one provided. @@ -672,10 +901,10 @@ def set_value(self, val): # First, check that the provided value is valid if self.__valid_type is not None: - self.__check_value(val) + self.__check_type(list_vals) # If ok, then set object's value to one provided - self.__value = val + self.__value = list_vals ############################################################################### @@ -840,10 +1069,10 @@ def __init__(self, case, case_log): hgrid_desc = "Horizontal grid specifier." # dynamics package source directories meta-data - dyn_dirs_desc = "Comma-separated list of local directories containing" \ - " dynamics package source code.\n" \ - "These directories are assumed to be located under" \ - " src/dynamics, with a slash ('/') indicating directory hierarchy." + dyn_dirs_desc = ["Comma-separated list of local directories containing", + "dynamics package source code.", + "These directories are assumed to be located under", + "src/dynamics, with a slash ('/') indicating directory hierarchy."] # Create regex expressions to search for the different dynamics grids eul_grid_re = re.compile(r"T[0-9]+") # Eulerian dycore @@ -981,12 +1210,12 @@ def __init__(self, case, case_log): self.create_config("csne", csne_desc, csne_val, is_nml_attr=True) # Add number of points on each cubed-sphere element edge - csnp_desc = "Number of points on each edge of the elements in a cubed sphere grid." + csnp_desc = "Number of points on each edge of each element in a cubed sphere grid." self.create_config("csnp", csnp_desc, csnp_val) # Add number of CSLAM physics grid points: - npg_desc = "Number of physics grid cells on each edge of" \ - " the elements in a cubed sphere grid." + npg_desc = "Number of finite volume grid cells on each edge of" \ + " each element in a cubed sphere grid." self.create_config("npg", npg_desc, npg_val, is_nml_attr=True) # Add number of points (NP) CPP definition: @@ -1026,10 +1255,9 @@ def __init__(self, case, case_log): else: analy_ic_val = 0 #Don't use Analytic ICs - analy_ic_desc = "\n\ - Switch to turn on analytic initial conditions for the dynamics state:\n\ - 0 => no,\n\ - 1 => yes." + analy_ic_desc = ["Switch to turn on analytic initial conditions for the dynamics state:", + "0 => no", + "1 => yes."] self.create_config("analytic_ic", analy_ic_desc, analy_ic_val, [0, 1], is_nml_attr=True) @@ -1075,10 +1303,6 @@ def __init__(self, case, case_log): "Fortran kind used in physics for type real.", user_config_opts.phys_kind, kind_valid_vals) - # Set phys->dyn kind conversion CPPdef if kinds are different: - if self.get_value("dyn_kind") != self.get_value("phys_kind"): - self.add_cppdef("DYN_PHYS_KIND_DIFF") - #-------------------------------------------------------- # Print CAM configure settings and values to debug logger #-------------------------------------------------------- @@ -1134,13 +1358,13 @@ def parse_config_opts(cls, config_opts, test_mode=False): cco_str = "CAM_CONFIG_OPTS" #Don't allow abbreviations if using python 3.5 or greater: - if sys.version_info[0] > 2 and sys.version_info[1] > 4: + if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 5): parser = argparse.ArgumentParser(description=cco_str, - prog="ConfigCAM", allow_abbrev=False, + prog="ConfigCAM", epilog="Allowed values of "+cco_str) else: parser = argparse.ArgumentParser(description=cco_str, - prog="ConfigCAM", + prog="ConfigCAM", allow_abbrev=False, epilog="Allowed values of "+cco_str) @@ -1195,7 +1419,8 @@ def create_config(self, name, desc, val, valid_vals=None, elif isinstance(val, list): # If list, then call list configure object conf_obj = ConfigList(name, desc, val, - valid_type=valid_list_type) + valid_type=valid_list_type, + valid_vals=valid_vals) else: # If not an integer, string, or a list, then throw an error emsg = ("ERROR: The input value for new CAM config variable, '{}', " @@ -1253,7 +1478,7 @@ def print_all(self, case_log): if self.__cppdefs: case_log.debug("\nCAM CPP Defs: {}".format(" ".join(self.__cppdefs))) - # Print additional separator (to help seperate this output from + # Print additional separator (to help separate this output from # additional CIME output) case_log.debug("-----------------------------") @@ -1313,7 +1538,7 @@ def add_cppdef(self, cppname, value=None): """ #Create string to check if CPP definition is already present: - check_str = r"-D"+cppname.upper() + check_str = r"-D"+cppname #Check if CPP definition name already exists in CPP string list. #This is done because a CPP definition should only be set once, @@ -1330,7 +1555,7 @@ def add_cppdef(self, cppname, value=None): cpp_str = check_str else: # Create CPP definition flag string: - cpp_str = "-D{}={}".format(cppname.upper(), value) + cpp_str = "{}={}".format(check_str, value) # Add string to CPP definition list: self.__cppdefs.append(cpp_str) @@ -1445,36 +1670,52 @@ def generate_cam_src(self, gen_fort_indent): def ccpp_phys_set(self, cam_nml_attr_dict, user_nl_file): """ - Determine if a user has specified which - CCPP physics suite to use in the namelist, - assuming there is more than one suite - listed in the 'physics_suites' CAM - configure option. + Find the physics suite to run. + + If more than one physics suite is available, + then make sure the user has specified a physics + suite from the list of available suites. + + If exactly one physics suite is available, + then make sure that either the user did not + specify a suite or that they did specify a + suite and that it matches the available suite. + """ #Extract physics suite list: phys_suites = self.get_value('physics_suites').split(';') - if len(phys_suites) > 1: - #If more than one physics suite is listed, - #then check the "user_nl_cam" file to see if user - #specified a particular suite to use for this - #simulation: - with open(user_nl_file, 'r') as nl_file: - #Read lines in file: - nl_user_lines = nl_file.readlines() - - #Break out "physics_suite" lines: - phys_suite_lines = \ - [[x.strip() for x in line.split('=')] \ - for line in nl_user_lines if line.lstrip()[0] != "!" and 'physics_suite' in line] - - #If there is no "physics_suite" line, then throw an error: - if not phys_suite_lines: + #Check the "user_nl_cam" file to see if user + #specified a particular suite to use for this + #simulation: + with open(user_nl_file, 'r') as nl_file: + #Read lines in file: + nl_user_lines = nl_file.readlines() + + #Break out "physics_suite" lines: + phys_suite_lines = [] + for line in nl_user_lines: + #Must check if line.lstrip is non-empty first, + #Otherwise blank spaces in user_nl_cam will + #cause problems: + if line.lstrip(): + if line.lstrip()[0] != '!' and 'physics_suite' in line: + phys_suite_lines.append([x.strip() for x in line.split('=')]) + + if not phys_suite_lines: + #If there is no "physics_suite" line, + #then check if there is only one physics suite option: + if len(phys_suites) == 1: + #If so, then just use the only possible suite option: + phys_suite_val = phys_suites[0] + else: + #If more than one option, then raise an error: emsg = "No 'physics_suite' variable is present in user_nl_cam.\n" emsg += "This is required if more than one suite is listed\n" emsg += "in CAM_CONFIG_OPTS." raise CamConfigValError(emsg) + else: #If there is more than one "physics_suite" entry, then throw an error: if len(phys_suite_lines) > 1: @@ -1505,9 +1746,6 @@ def ccpp_phys_set(self, cam_nml_attr_dict, user_nl_file): emsg += "listed in CAM_CONFIG_OPTS" raise CamConfigValError(emsg.format(phys_suite_val)) - else: - #If only a single physics suite is listed, then just use that one: - phys_suite_val = phys_suites[0] #Add new namelist attribute to dictionary: cam_nml_attr_dict["phys_suite"] = phys_suite_val diff --git a/src/data/generate_registry_data.py b/src/data/generate_registry_data.py index 7f2ef15c..77d8a0ca 100755 --- a/src/data/generate_registry_data.py +++ b/src/data/generate_registry_data.py @@ -191,11 +191,6 @@ def __init__(self, elem_node, local_name, dimensions, known_types, # end if # pylint: enable=bad-continuation - if self.__tstep_init == "true": - self.__tstep_init = True - elif self.__tstep_init == "false": - self.__tstep_init = False - def write_metadata(self, outfile): """Write out this variable as CCPP metadata""" outfile.write('[ {} ]\n'.format(self.local_name)) @@ -245,17 +240,41 @@ def write_initial_value(self, outfile, indent, init_var, ddt_str, init_val = 'HUGE(1)' elif self.var_type.lower() == 'character': init_val = '""' + elif self.var_type.lower() == 'complex': + init_val = '(nan, nan)' else: init_val = '' # end if # end if - #Time-step initialization, which is always zero: + #Time-step initialization, which is always zero for numerical quantities, + #empty strings for characters, and "false" for logical quantities: if tstep_init: - if self.kind: - outfile.write("{} = 0._{}".format(var_name, self.kind), indent) + if self.var_type.lower() == 'real': + if self.kind: + outfile.write('{} = 0._{}'.format(var_name, self.kind), indent) + else: + outfile.write('{} = 0.0'.format(var_name), indent) + elif self.var_type.lower() == 'integer': + if self.kind: + outfile.write('{} = 0_{}'.format(var_name, self.kind), indent) + else: + outfile.write('{} = 0'.format(var_name), indent) + elif self.var_type.lower() == 'character': + if self.kind: + outfile.write('{} = {}_""'.format(var_name, self.kind), indent) + else: + outfile.write('{} = ""'.format(var_name), indent) + elif self.var_type.lower() == 'complex': + if self.kind: + outfile.write('{} = (0._{}, 0._{})'.format(var_name, self.kind, self.kind), indent) + else: + outfile.write('{} = (0.0, 0.0)'.format(var_name), indent) + elif self.var_type.lower() == 'logical': + outfile.write('{} = .false.'.format(var_name), indent) else: - #Assume variable is an integer: - outfile.write("{} = 0".format(var_name), indent) + emsg = 'Variable "{}" is of type "{}", which is not a supported type\n' + emsg += 'for use with "phys_timestep_init_zero".' + raise TypeError(emsg.format(var_name, self.var_type)) # end if elif init_val: outfile.write("if ({}) then".format(init_var), indent) @@ -741,7 +760,7 @@ def write_tstep_init_routine(self, outfile, indent, dimension_string = '' # end if my_ddt = self.is_ddt - if my_ddt: # This is a DDT object, allocate entries + if my_ddt: # This is a DDT object, initalize individual entries subi = indent sub_ddt_str = '{}{}%'.format(ddt_str, self.local_name) diff --git a/src/data/physconst.F90 b/src/data/physconst.F90 index 4349b1f1..eceb3908 100644 --- a/src/data/physconst.F90 +++ b/src/data/physconst.F90 @@ -259,15 +259,15 @@ subroutine physconst_readnl(nlfile) ! Kind-converstion variables, to ensure that MPI broadcast ! works as expected: - real(r8) :: gravit_r8 - real(r8) :: sday_r8 - real(r8) :: mwh2o_r8 - real(r8) :: cpwv_r8 - real(r8) :: mwdry_r8 - real(r8) :: cpair_r8 - real(r8) :: rearth_r8 - real(r8) :: tmelt_r8 - real(r8) :: omega_r8 + real(r8) :: gravit_bcast + real(r8) :: sday_bcast + real(r8) :: mwh2o_bcast + real(r8) :: cpwv_bcast + real(r8) :: mwdry_bcast + real(r8) :: cpair_bcast + real(r8) :: rearth_bcast + real(r8) :: tmelt_bcast + real(r8) :: omega_bcast ! Physical constants needing to be reset (ie. for aqua planet experiments) namelist /physconst_nl/ gravit, sday, mwh2o, cpwv, mwdry, cpair, & @@ -289,54 +289,42 @@ subroutine physconst_readnl(nlfile) close(unitn) end if - ! Copy namelist variables into "r8" temporary variables - ! for broadcasting: - gravit_r8 = real(gravit, r8) - sday_r8 = real(sday, r8) - mwh2o_r8 = real(mwh2o, r8) - cpwv_r8 = real(cpwv, r8) - mwdry_r8 = real(mwdry, r8) - cpair_r8 = real(cpair, r8) - rearth_r8 = real(rearth, r8) - tmelt_r8 = real(tmelt, r8) - omega_r8 = real(omega, r8) - ! Broadcast namelist variables if (npes > 1) then - ! Copy namelist variables into "r8" temporary variables + ! Copy namelist variables into "bcast" temporary variables ! for broadcasting: - gravit_r8 = real(gravit, r8) - sday_r8 = real(sday, r8) - mwh2o_r8 = real(mwh2o, r8) - cpwv_r8 = real(cpwv, r8) - mwdry_r8 = real(mwdry, r8) - cpair_r8 = real(cpair, r8) - rearth_r8 = real(rearth, r8) - tmelt_r8 = real(tmelt, r8) - omega_r8 = real(omega, r8) + gravit_bcast = real(gravit, r8) + sday_bcast = real(sday, r8) + mwh2o_bcast = real(mwh2o, r8) + cpwv_bcast = real(cpwv, r8) + mwdry_bcast = real(mwdry, r8) + cpair_bcast = real(cpair, r8) + rearth_bcast = real(rearth, r8) + tmelt_bcast = real(tmelt, r8) + omega_bcast = real(omega, r8) ! Broadcast to other PEs: - call mpi_bcast(gravit_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(sday_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(mwh2o_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(cpwv_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(mwdry_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(cpair_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(rearth_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(tmelt_r8, 1, mpi_real8, masterprocid, mpicom, ierr) - call mpi_bcast(omega_r8, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(gravit_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(sday_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(mwh2o_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(cpwv_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(mwdry_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(cpair_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(rearth_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(tmelt_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) + call mpi_bcast(omega_bcast, 1, mpi_real8, masterprocid, mpicom, ierr) ! Convert broadcasted variables back to "kind_phys": - gravit = real(gravit_r8, kind_phys) - sday = real(sday_r8, kind_phys) - mwh2o = real(mwh2o_r8, kind_phys) - cpwv = real(cpwv_r8, kind_phys) - mwdry = real(mwdry_r8, kind_phys) - cpair = real(cpair_r8, kind_phys) - rearth = real(rearth_r8, kind_phys) - tmelt = real(tmelt_r8, kind_phys) - omega = real(omega_r8, kind_phys) + gravit = real(gravit_bcast, kind_phys) + sday = real(sday_bcast, kind_phys) + mwh2o = real(mwh2o_bcast, kind_phys) + cpwv = real(cpwv_bcast, kind_phys) + mwdry = real(mwdry_bcast, kind_phys) + cpair = real(cpair_bcast, kind_phys) + rearth = real(rearth_bcast, kind_phys) + tmelt = real(tmelt_bcast, kind_phys) + omega = real(omega_bcast, kind_phys) end if diff --git a/src/data/registry.xml b/src/data/registry.xml index 403072fb..71401f3a 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -67,7 +67,7 @@ horizontal_dimension vertical_layer_dimension v state_v - horizontal_dimension vertical_layer_dimension @@ -120,6 +120,20 @@ horizontal_dimension vertical_layer_dimension rpdeldry state_rpdeldry + + horizontal_dimension vertical_layer_dimension + frontgf pbuf_frontgf + + + horizontal_dimension vertical_layer_dimension + frontga pbuf_frontga + .true. - surface_air_pressure - surface_pressure_of_dry_air - geopotential_at_surface - air_temperature - x_wind - y_wind - lagrangian_tendency_of_air_pressure - dry_static_energy - constituent_mixing_ratio - pressure_thickness - pressure_thickness_of_dry_air - reciprocal_of_pressure_thickness - reciprocal_of_pressure_thickness_of_dry_air - air_pressure - air_pressure_of_dry_air - ln_of_air_pressure - ln_of_air_pressure_of_dry_air - air_pressure_at_interface - air_pressure_of_dry_air_at_interface - ln_of_air_pressure_at_interface - ln_of_air_pressure_of_dry_air_at_interface + surface_pressure_of_dry_air + geopotential_at_surface + air_temperature + x_wind + y_wind + lagrangian_tendency_of_air_pressure + constituent_mixing_ratio + pressure_thickness_of_dry_air + frontogenesis_function + frontogenesis_angle total_tendency_of_air_temperature diff --git a/src/data/registry_v1_0.xsd b/src/data/registry_v1_0.xsd index f89d6087..283ac61c 100644 --- a/src/data/registry_v1_0.xsd +++ b/src/data/registry_v1_0.xsd @@ -176,8 +176,6 @@ - @@ -199,8 +197,6 @@ - diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 916bedd3..983cb491 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -52,7 +52,7 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) ! Note that all pressures and tracer mixing ratios coming from the dycore are based on ! dry air mass. -! use gravity_waves_sources, only: gws_src_fnct + use gravity_waves_sources, only: gws_src_fnct use dyn_comp, only: frontgf_idx, frontga_idx use hycoef, only: hyai, ps0 use test_fvm_mapping, only: test_mapping_overwrite_dyn_state, test_mapping_output_phys_state @@ -86,8 +86,6 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) ! Frontogenesis real (kind=r8), allocatable :: frontgf(:,:,:) ! temp arrays to hold frontogenesis real (kind=r8), allocatable :: frontga(:,:,:) ! function (frontgf) and angle (frontga) - real (kind=r8), allocatable :: frontgf_phys(:,:) - real (kind=r8), allocatable :: frontga_phys(:,:) integer :: ncols,ierr integer :: blk_ind(1), m, m_cnst @@ -170,8 +168,8 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) if (cam_runtime_opts%gw_front() .or. & cam_runtime_opts%gw_front_igw()) then - !Un-comment once gravity wave parameterization is available -JN: - !call gws_src_fnct(elem, tl_f, tl_qdp_np0, frontgf, frontga, nphys) + ! Calculate frontogenesis function and angle + call gws_src_fnct(elem, tl_f, tl_qdp_np0, frontgf, frontga, nphys) end if @@ -258,30 +256,21 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) q_prev = 0.0_r8 call t_startf('dpcopy') - if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then - allocate(frontgf_phys(pcols, pver), stat=ierr) - call check_allocate(ierr, subname, 'frontgf_phys(pcols, pver)', & - file=__FILE__, line=__LINE__) - - allocate(frontga_phys(pcols, pver), stat=ierr) - call check_allocate(ierr, subname, 'frontga_phys(pcols, pver)', & - file=__FILE__, line=__LINE__) - end if !$omp parallel do num_threads(max_num_threads) private (icol, ie, blk_ind, ilyr, m) do icol = 1, pcols call get_dyn_col_p(icol, ie, blk_ind) - phys_state%ps(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) - phys_state%phis(icol) = real(phis_tmp(blk_ind(1), ie), kind_phys) + phys_state%psdry(icol) = real(ps_tmp(blk_ind(1), ie), kind_phys) + phys_state%phis(icol) = real(phis_tmp(blk_ind(1), ie), kind_phys) do ilyr = 1, pver - phys_state%pdel(icol, ilyr) = real(dp3d_tmp(blk_ind(1), ilyr, ie), kind_phys) - phys_state%t(icol, ilyr) = real(T_tmp(blk_ind(1), ilyr, ie), kind_phys) - phys_state%u(icol, ilyr) = real(uv_tmp(blk_ind(1), 1, ilyr, ie), kind_phys) - phys_state%v(icol, ilyr) = real(uv_tmp(blk_ind(1), 2, ilyr, ie), kind_phys) - phys_state%omega(icol, ilyr) = real(omega_tmp(blk_ind(1), ilyr, ie), kind_phys) + phys_state%pdeldry(icol, ilyr) = real(dp3d_tmp(blk_ind(1), ilyr, ie), kind_phys) + phys_state%t(icol, ilyr) = real(T_tmp(blk_ind(1), ilyr, ie), kind_phys) + phys_state%u(icol, ilyr) = real(uv_tmp(blk_ind(1), 1, ilyr, ie), kind_phys) + phys_state%v(icol, ilyr) = real(uv_tmp(blk_ind(1), 2, ilyr, ie), kind_phys) + phys_state%omega(icol, ilyr) = real(omega_tmp(blk_ind(1), ilyr, ie), kind_phys) if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then - frontgf_phys(icol, ilyr) = frontgf(blk_ind(1), ilyr, ie) - frontga_phys(icol, ilyr) = frontga(blk_ind(1), ilyr, ie) + phys_state%frontgf(icol, ilyr) = real(frontgf(blk_ind(1), ilyr, ie), kind_phys) + phys_state%frontga(icol, ilyr) = real(frontga(blk_ind(1), ilyr, ie), kind_phys) end if end do @@ -292,25 +281,6 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) end do end do - if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) then - !$omp parallel do num_threads(max_num_threads) private (lchnk, ncols, icol, ilyr, pbuf_chnk, pbuf_frontgf, pbuf_frontga) -!Un-comment once pbuf replacement variables are available -JN: -! do lchnk = begchunk, endchunk -! ncols = get_ncols_p(lchnk) -! pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) -! call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf) -! call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga) -! do icol = 1, ncols -! do ilyr = 1, pver -! pbuf_frontgf(icol, ilyr) = frontgf_phys(icol, ilyr, lchnk) -! pbuf_frontga(icol, ilyr) = frontga_phys(icol, ilyr, lchnk) -! end do -! end do -! end do - deallocate(frontgf_phys) - deallocate(frontga_phys) - end if - call t_stopf('dpcopy') ! Save the tracer fields input to physics package for calculating tendencies @@ -328,8 +298,8 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) deallocate(q_tmp) deallocate(omega_tmp) - ! ps, pdel, and q in phys_state are all dry at this point. After return from derived_phys_dry - ! ps and pdel include water vapor only, and the 'wet' constituents have been converted to wet mmr. + ! Constituent mixing rations in phys_state are all dry at this point. + ! After return from derived_phys_dry the 'wet' constituents have been converted to wet mmr. call t_startf('derived_phys') call derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) call t_stopf('derived_phys') @@ -340,6 +310,8 @@ end subroutine d_p_coupling subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, tl_qdp) + use physics_types, only: pdel + ! Convert the physics output state into the dynamics input state. use test_fvm_mapping, only: test_mapping_overwrite_tendencies use test_fvm_mapping, only: test_mapping_output_mapped_tendencies @@ -438,7 +410,7 @@ subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, t do ilyr = 1, pver do icol=1, pcols !Apply adjustment only to water vapor: - factor = phys_state%pdel(icol,ilyr)/phys_state%pdeldry(icol,ilyr) + factor = pdel(icol,ilyr)/phys_state%pdeldry(icol,ilyr) phys_state%q(icol,ilyr,ix_qv) = factor*phys_state%q(icol,ilyr,ix_qv) phys_state%q(icol,ilyr,ix_cld_liq) = factor*phys_state%q(icol,ilyr,ix_cld_liq) phys_state%q(icol,ilyr,ix_rain) = factor*phys_state%q(icol,ilyr,ix_rain) @@ -604,7 +576,9 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! Finally compute energy and water column integrals of the physics input state. ! use constituents, only: qmin - use physics_types, only: exner, zi, zm, lagrangian_vertical + use physics_types, only: pintdry, lnpintdry, rpdeldry, pmiddry + use physics_types, only: lnpmiddry, pdel, ps, pint, pmid, lnpint, lnpmid + use physics_types, only: rpdel, exner, zi, zm, lagrangian_vertical, dse use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update use shr_const_mod, only: shr_const_rwv use geopotential_t, only: geopotential_t_run @@ -645,31 +619,30 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i = 1, pcols ! Set model-top values: - phys_state%psdry(i) = real(hyai(1)*ps0, kind_phys) + sum(phys_state%pdel(i,:)) - phys_state%pintdry(i,1) = real(hyai(1)*ps0, kind_phys) + phys_state%psdry(i) = real(hyai(1)*ps0, kind_phys) + sum(phys_state%pdeldry(i,:)) + pintdry(i,1) = real(hyai(1)*ps0, kind_phys) end do ! Calculate (natural) logarithm: - call shr_vmath_log(phys_state%pintdry(1:pcols,1), & - phys_state%lnpintdry(1:pcols,1), pcols) + call shr_vmath_log(pintdry(1:pcols,1), & + lnpintdry(1:pcols,1), pcols) !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i = 1, pcols ! Calculate dry pressure variables for rest of column: - phys_state%pintdry(i,k+1) = phys_state%pintdry(i,k) + phys_state%pdel(i,k) - phys_state%pdeldry(i,k) = phys_state%pdel(i,k) - phys_state%rpdeldry(i,k) = 1._kind_phys/phys_state%pdeldry(i,k) - phys_state%pmiddry(i,k) = 0.5_kind_phys*(phys_state%pintdry(i,k+1) + & - phys_state%pintdry(i,k)) + pintdry(i,k+1) = pintdry(i,k) + phys_state%pdeldry(i,k) + rpdeldry(i,k) = 1._kind_phys/phys_state%pdeldry(i,k) + pmiddry(i,k) = 0.5_kind_phys*(pintdry(i,k+1) + & + pintdry(i,k)) end do ! Calculate (natural) logarithms: - call shr_vmath_log(phys_state%pintdry(1:pcols,k+1),& - phys_state%lnpintdry(1:pcols,k+1), pcols) + call shr_vmath_log(pintdry(1:pcols,k+1),& + lnpintdry(1:pcols,k+1), pcols) - call shr_vmath_log(phys_state%pmiddry(1:pcols,k), & - phys_state%lnpmiddry(1:pcols,k), pcols) + call shr_vmath_log(pmiddry(1:pcols,k), & + lnpmiddry(1:pcols,k), pcols) end do ! wet pressure variables (should be removed from physics!) @@ -680,7 +653,7 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! to be consistent with total energy formula in physic's check_energy module only ! include water vapor in in moist dp factor_array(i,k) = 1._kind_phys+phys_state%q(i,k,ix_qv) - phys_state%pdel(i,k) = phys_state%pdeldry(i,k)*factor_array(i,k) + pdel(i,k) = phys_state%pdeldry(i,k)*factor_array(i,k) end do end do @@ -689,29 +662,29 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i=1, pcols ! Set model-top values assuming zero moisture: - phys_state%ps(i) = phys_state%pintdry(i,1) - phys_state%pint(i,1) = phys_state%pintdry(i,1) + ps(i) = pintdry(i,1) + pint(i,1) = pintdry(i,1) end do !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i=1, pcols ! Calculate wet (total) pressure variables for rest of column: - phys_state%pint(i,k+1) = phys_state%pint(i,k) + phys_state%pdel(i,k) - phys_state%pmid(i,k) = (phys_state%pint(i,k+1) + phys_state%pint(i,k))/2._kind_phys - phys_state%ps(i) = phys_state%ps(i) + phys_state%pdel(i,k) + pint(i,k+1) = pint(i,k) + pdel(i,k) + pmid(i,k) = (pint(i,k+1) + pint(i,k))/2._kind_phys + ps(i) = ps(i) + pdel(i,k) end do ! Calculate (natural) logarithms: - call shr_vmath_log(phys_state%pint(1:pcols,k), phys_state%lnpint(1:pcols,k), pcols) - call shr_vmath_log(phys_state%pmid(1:pcols,k), phys_state%lnpmid(1:pcols,k), pcols) + call shr_vmath_log(pint(1:pcols,k), lnpint(1:pcols,k), pcols) + call shr_vmath_log(pmid(1:pcols,k), lnpmid(1:pcols,k), pcols) end do - call shr_vmath_log(phys_state%pint(1:pcols,pverp),phys_state%lnpint(1:pcols,pverp),pcols) + call shr_vmath_log(pint(1:pcols,pverp),lnpint(1:pcols,pverp),pcols) !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i = 1, pcols - phys_state%rpdel(i,k) = 1._kind_phys/phys_state%pdel(i,k) - exner(i,k) = (phys_state%pint(i,pver+1)/phys_state%pmid(i,k))**cappa + rpdel(i,k) = 1._kind_phys/pdel(i,k) + exner(i,k) = (pint(i,pver+1)/pmid(i,k))**cappa end do end do @@ -791,9 +764,9 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !Call geopotential_t CCPP scheme: call geopotential_t_run(pver, lagrangian_vertical, pver, 1, & - pverp, 1, phys_state%lnpint, phys_state%pint, & - phys_state%pmid, phys_state%pdel, & - phys_state%rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & + pverp, 1, lnpint, pint, & + pmid, pdel, & + rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & rairv, gravit, zvirv, zi, zm, pcols, & errflg, errmsg) @@ -802,7 +775,7 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! Compute initial dry static energy, include surface geopotential do k = 1, pver do i = 1, pcols - phys_state%s(i,k) = cpair*phys_state%t(i,k) & + dse(i,k) = cpair*phys_state%t(i,k) & + gravit*zm(i,k) + phys_state%phis(i) end do end do diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 index 042226ed..fb28f633 100644 --- a/src/dynamics/se/dycore/fvm_mod.F90 +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -930,8 +930,8 @@ subroutine fvm_pg_init(elem, fvm, hybrid, nets, nete,irecons) fvm(ie)%norm_elem_coord_physgrid(2,i,j) =(tmpgnom%y-elem(ie)%corners(1)%y)/& (0.5_r8*real(fv_nphys, r8)*fvm(ie)%dalpha_physgrid)-1.0_r8 else - fvm(ie)%norm_elem_coord_physgrid(1,i,j) = 1E9_r8 - fvm(ie)%norm_elem_coord_physgrid(2,i,j) = 1E9_r8 + fvm(ie)%norm_elem_coord_physgrid(1,i,j) = 1.E9_r8 + fvm(ie)%norm_elem_coord_physgrid(2,i,j) = 1.E9_r8 end if end do end do @@ -945,15 +945,15 @@ subroutine fvm_pg_init(elem, fvm, hybrid, nets, nete,irecons) x1 = fvm(ie)%norm_elem_coord_physgrid(1,i,j) x2 = fvm(ie)%norm_elem_coord_physgrid(2,i,j) call Dmap(D(i,j,:,:),x1,x2,elem(ie)%corners3D,cubed_sphere_map,elem(ie)%corners,elem(ie)%u2qmap,elem(ie)%facenum) - detD = D(i,j,1,1)*D(i,j,2,2) - D(i,j,1,2)*D(i,j,2,1) - + detD = D(i,j,1,1)*D(i,j,2,2) - D(i,j,1,2)*D(i,j,2,1) + fvm(ie)%Dinv_physgrid(i,j,1,1) = D(i,j,2,2)/detD fvm(ie)%Dinv_physgrid(i,j,1,2) = -D(i,j,1,2)/detD fvm(ie)%Dinv_physgrid(i,j,2,1) = -D(i,j,2,1)/detD fvm(ie)%Dinv_physgrid(i,j,2,2) = D(i,j,1,1)/detD end do end do - end do + end do end if end subroutine fvm_pg_init diff --git a/src/dynamics/se/dycore/quadrature_mod.F90 b/src/dynamics/se/dycore/quadrature_mod.F90 index 47954f33..e9ad38c7 100644 --- a/src/dynamics/se/dycore/quadrature_mod.F90 +++ b/src/dynamics/se/dycore/quadrature_mod.F90 @@ -156,7 +156,7 @@ function gauss_pts(np1) result(pts) ! Compute first half of the roots by "polynomial deflation". ! ============================================================ - dth = pi/(2._r8*real(n+2, r8)) + dth = pi/(2._r8*real(n+1, r8)) nh = (n+1)/2 diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index ebf5ed43..893fda4e 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -576,7 +576,7 @@ subroutine dyn_init(cam_runtime_opts, dyn_in, dyn_out) use dynconst, only: cpair use dyn_thermo, only: get_molecular_diff_coef_reference !use cam_history, only: addfld, add_default, horiz_only, register_vector_field - !use gravity_waves_sources, only: gws_init + use gravity_waves_sources, only: gws_init use physics_types, only: ix_qv, ix_cld_liq !Use until constituents are fully-enabled -JN @@ -869,8 +869,7 @@ subroutine dyn_init(cam_runtime_opts, dyn_in, dyn_out) call prim_init2(elem, fvm, hybrid, nets, nete, TimeLevel, hvcoord) !$OMP END PARALLEL -!Uncomment once gravity waves are enabled -JN: -! if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) call gws_init(elem) + if (cam_runtime_opts%gw_front() .or. cam_runtime_opts%gw_front_igw()) call gws_init(elem) end if ! iam < par%nprocs !Remove/replace after CAMDEN history output is enabled -JN: diff --git a/src/dynamics/se/gravity_waves_sources.F90 b/src/dynamics/se/gravity_waves_sources.F90 new file mode 100644 index 00000000..2d148c35 --- /dev/null +++ b/src/dynamics/se/gravity_waves_sources.F90 @@ -0,0 +1,234 @@ +module gravity_waves_sources + use shr_kind_mod, only: r8 => shr_kind_r8 + + !SE dycore: + use derivative_mod, only: derivative_t + use dimensions_mod, only: np,nlev + use edgetype_mod, only: EdgeBuffer_t + use element_mod, only: element_t + use hybrid_mod, only: hybrid_t + + implicit none + private + save + + !! gravity_waves_sources created by S Santos, 10 Aug 2011 + !! + !! gws_src_fnct starts parallel environment and computes frontogenesis + !! for use by WACCM (via dp_coupling) + + public :: gws_src_fnct + public :: gws_init + private :: compute_frontogenesis + + type (EdgeBuffer_t) :: edge3 + type (derivative_t) :: deriv + real(r8) :: psurf_ref + +!---------------------------------------------------------------------- +CONTAINS +!---------------------------------------------------------------------- + + subroutine gws_init(elem) + + !SE dycore: + use parallel_mod, only: par + use edge_mod, only: initEdgeBuffer + use hycoef, only: hypi + use dimensions_mod, only: nlevp + use thread_mod, only: horz_num_threads + + ! Elem will be needed for future updates to edge code + type(element_t), pointer :: elem(:) + + ! Set up variables similar to dyn_comp and prim_driver_mod initializations + call initEdgeBuffer(par, edge3, elem, 3*nlev,nthreads=1) + + psurf_ref = hypi(nlevp) + + end subroutine gws_init + + subroutine gws_src_fnct(elem, tl, tlq, frontgf, frontga,nphys) + + use vert_coord, only: pver + use cam_abortutils, only: check_allocate + + !SE dycore: + use derivative_mod, only: derivinit + use dimensions_mod, only: npsq, nelemd + use dof_mod, only: UniquePoints + use hybrid_mod, only: config_thread_region, get_loop_ranges + use parallel_mod, only: par + use thread_mod, only: horz_num_threads + use dimensions_mod, only: fv_nphys + + type (element_t), intent(inout), dimension(:) :: elem + integer, intent(in) :: tl, nphys, tlq + real (kind=r8), intent(out) :: frontgf(nphys*nphys,pver,nelemd) + real (kind=r8), intent(out) :: frontga(nphys*nphys,pver,nelemd) + + ! Local variables + type (hybrid_t) :: hybrid + integer :: nets, nete, ithr, ncols, ie, iret + real(kind=r8), allocatable :: frontgf_thr(:,:,:,:) + real(kind=r8), allocatable :: frontga_thr(:,:,:,:) + + character(len=*), parameter :: subname = 'gws_src_fnct' + + ! This does not need to be a thread private data-structure + call derivinit(deriv) + !!$OMP PARALLEL NUM_THREADS(horz_num_threads), DEFAULT(SHARED), PRIVATE(nets,nete,hybrid,ie,ncols,frontgf_thr,frontga_thr) +! hybrid = config_thread_region(par,'horizontal') + hybrid = config_thread_region(par,'serial') + call get_loop_ranges(hybrid,ibeg=nets,iend=nete) + + allocate(frontgf_thr(nphys,nphys,nlev,nets:nete), stat=iret) + call check_allocate(iret, subname, & + 'frontgf_thr(nphys,nphys,nlev,nets:nete)', & + file=__FILE__, line=__LINE__) + + allocate(frontga_thr(nphys,nphys,nlev,nets:nete), stat=iret) + call check_allocate(iret, subname, & + 'frontga_thr(nphys,nphys,nlev,nets:nete)', & + file=__FILE__, line=__LINE__) + + call compute_frontogenesis(frontgf_thr,frontga_thr,tl,tlq,elem,deriv,hybrid,nets,nete,nphys) + if (fv_nphys>0) then + do ie=nets,nete + frontgf(:,:,ie) = RESHAPE(frontgf_thr(:,:,:,ie),(/nphys*nphys,nlev/)) + frontga(:,:,ie) = RESHAPE(frontga_thr(:,:,:,ie),(/nphys*nphys,nlev/)) + end do + else + do ie=nets,nete + ncols = elem(ie)%idxP%NumUniquePts + call UniquePoints(elem(ie)%idxP, nlev, frontgf_thr(:,:,:,ie), frontgf(1:ncols,:,ie)) + call UniquePoints(elem(ie)%idxP, nlev, frontga_thr(:,:,:,ie), frontga(1:ncols,:,ie)) + end do + end if + deallocate(frontga_thr) + deallocate(frontgf_thr) + !!$OMP END PARALLEL + + end subroutine gws_src_fnct + + subroutine compute_frontogenesis(frontgf,frontga,tl,tlq,elem,ederiv,hybrid,nets,nete,nphys) + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! compute frontogenesis function F + ! F = -gradth dot C + ! with: + ! theta = potential temperature + ! gradth = grad(theta) + ! C = ( gradth dot grad ) U + ! + ! Original by Mark Taylor, July 2011 + ! Change by Santos, 10 Aug 2011: + ! Integrated into gravity_waves_sources module, several arguments made global + ! to prevent repeated allocation/initialization + ! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + use physconst, only: cappa, dry_air_species_num,thermodynamic_active_species_num + use physconst, only: thermodynamic_active_species_idx_dycore + use dyn_grid, only: hvcoord + + !SE dycore: + use derivative_mod, only: gradient_sphere, ugradv_sphere + use edge_mod, only: edgevpack, edgevunpack + use bndry_mod, only: bndry_exchange + use dimensions_mod, only: fv_nphys,ntrac + use fvm_mapping, only: dyn2phys_vector,dyn2phys + + type(hybrid_t), intent(in) :: hybrid + type(element_t), intent(inout), target :: elem(:) + type(derivative_t), intent(in) :: ederiv + integer, intent(in) :: nets,nete,nphys + integer, intent(in) :: tl,tlq + real(r8), intent(out) :: frontgf(nphys,nphys,nlev,nets:nete) + real(r8), intent(out) :: frontga(nphys,nphys,nlev,nets:nete) + + ! local + real(r8) :: area_inv(fv_nphys,fv_nphys), tmp(np,np) + real(r8) :: uv_tmp(fv_nphys*fv_nphys,2,nlev) + real(r8) :: frontgf_gll(np,np,nlev,nets:nete) + real(r8) :: frontga_gll(np,np,nlev,nets:nete) + integer :: k,kptr,i,j,ie,component,h,nq,m_cnst + real(r8) :: gradth(np,np,2,nlev,nets:nete) ! grad(theta) + real(r8) :: p(np,np) ! pressure at mid points + real(r8) :: pint(np,np) ! pressure at interface points + real(r8) :: theta(np,np) ! potential temperature at mid points + real(r8) :: C(np,np,2), sum_water(np,np) + + do ie=nets,nete + ! pressure at model top + pint(:,:) = hvcoord%hyai(1) + do k=1,nlev + ! moist pressure at mid points + sum_water(:,:) = 1.0_r8 + do nq=dry_air_species_num+1,thermodynamic_active_species_num + m_cnst = thermodynamic_active_species_idx_dycore(nq) + ! + ! make sure Q is updated + ! + sum_water(:,:) = sum_water(:,:) + elem(ie)%state%Qdp(:,:,k,m_cnst,tlq)/elem(ie)%state%dp3d(:,:,k,tl) + end do + p(:,:) = pint(:,:) + 0.5_r8*sum_water(:,:)*elem(ie)%state%dp3d(:,:,k,tl) + ! moist pressure at interface for next iteration + pint(:,:) = pint(:,:)+elem(ie)%state%dp3d(:,:,k,tl) + ! + theta(:,:) = elem(ie)%state%T(:,:,k,tl)*(psurf_ref / p(:,:))**cappa + ! gradth(:,:,:,k,ie) = gradient_sphere(theta,ederiv,elem(ie)%Dinv) + call gradient_sphere(theta,ederiv,elem(ie)%Dinv,gradth(:,:,:,k,ie)) + ! compute C = (grad(theta) dot grad ) u + C(:,:,:) = ugradv_sphere(gradth(:,:,:,k,ie), elem(ie)%state%v(:,:,:,k,tl),ederiv,elem(ie)) + ! gradth dot C + frontgf_gll(:,:,k,ie) = -( C(:,:,1)*gradth(:,:,1,k,ie) + C(:,:,2)*gradth(:,:,2,k,ie) ) + ! apply mass matrix + gradth(:,:,1,k,ie)=gradth(:,:,1,k,ie)*elem(ie)%spheremp(:,:) + gradth(:,:,2,k,ie)=gradth(:,:,2,k,ie)*elem(ie)%spheremp(:,:) + frontgf_gll(:,:,k,ie)=frontgf_gll(:,:,k,ie)*elem(ie)%spheremp(:,:) + enddo + ! pack + call edgeVpack(edge3, frontgf_gll(:,:,:,ie),nlev,0,ie) + call edgeVpack(edge3, gradth(:,:,:,:,ie),2*nlev,nlev,ie) + enddo + call bndry_exchange(hybrid,edge3,location='compute_frontogenesis') + do ie=nets,nete + call edgeVunpack(edge3, frontgf_gll(:,:,:,ie),nlev,0,ie) + call edgeVunpack(edge3, gradth(:,:,:,:,ie),2*nlev,nlev,ie) + ! apply inverse mass matrix, + do k=1,nlev + gradth(:,:,1,k,ie)=gradth(:,:,1,k,ie)*elem(ie)%rspheremp(:,:) + gradth(:,:,2,k,ie)=gradth(:,:,2,k,ie)*elem(ie)%rspheremp(:,:) + frontgf_gll(:,:,k,ie)=frontgf_gll(:,:,k,ie)*elem(ie)%rspheremp(:,:) + end do + if (fv_nphys>0) then + uv_tmp(:,:,:) = dyn2phys_vector(gradth(:,:,:,:,ie),elem(ie)) + do k=1,nlev + h=0 + do j=1,fv_nphys + do i=1,fv_nphys + h=h+1 + frontga(i,j,k,ie) = atan2 ( uv_tmp(h,2,k) , uv_tmp(h,1,k) + 1.e-10_r8 ) + end do + end do + end do + ! + ! compute inverse physgrid area for mapping of scaler + ! + tmp = 1.0_r8 + area_inv = dyn2phys(tmp,elem(ie)%metdet) + area_inv = 1.0_r8/area_inv + do k=1,nlev + frontgf(:,:,k,ie) = dyn2phys(frontgf_gll(:,:,k,ie),elem(ie)%metdet,area_inv) + end do + else + do k=1,nlev + frontgf(:,:,k,ie)=frontgf_gll(:,:,k,ie) + ! Frontogenesis angle + frontga(:,:,k,ie) = atan2 ( gradth(:,:,2,k,ie) , gradth(:,:,1,k,ie) + 1.e-10_r8 ) + end do + end if + enddo + end subroutine compute_frontogenesis + + +end module gravity_waves_sources diff --git a/src/dynamics/utils/dyn_thermo.F90 b/src/dynamics/utils/dyn_thermo.F90 index 989da040..1fde6612 100644 --- a/src/dynamics/utils/dyn_thermo.F90 +++ b/src/dynamics/utils/dyn_thermo.F90 @@ -10,25 +10,25 @@ module dyn_thermo use cam_abortutils, only: check_allocate implicit none - public - - !Subroutines contained in this module are: - ! - ! get_cp - ! get_cp_dry - ! get_kappa_dry - ! get_ps - ! get_dp - ! get_dp_ref - ! get_sum_species - ! get_molecular_diff_coef - ! get_molecular_diff_coef_reference - ! get_rho_dry - ! get_gz_given_dp_Tv_Rdry - ! get_virtual_temp - ! get_R_dry - ! get_exner - ! get_thermal_energy + private + + !Public subroutines contained in this module are: + + public :: get_cp + public :: get_cp_dry + public :: get_kappa_dry + public :: get_ps + public :: get_dp + public :: get_dp_ref + public :: get_sum_species + public :: get_molecular_diff_coef + public :: get_molecular_diff_coef_reference + public :: get_rho_dry + public :: get_gz_given_dp_Tv_Rdry + public :: get_virtual_temp + public :: get_R_dry + public :: get_exner + public :: get_thermal_energy !============================================================================== CONTAINS @@ -59,52 +59,70 @@ subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species ! integer, optional, intent(in) :: active_species_idx_dycore(:) -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_cp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp, & - dp_dry=dp_dry, & - active_species_idx_dycore=active_species_idx_dycore) - -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,ntrac) - real(kind_phys) :: cp_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: cp_phys(:,:,:) real(kind_phys), allocatable :: dp_dry_phys(:,:,:) !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_cp (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(dp_dry)) then - allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_cp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp, & + dp_dry=dp_dry, & + active_species_idx_dycore=active_species_idx_dycore) + + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,k0:k1,ntrac), stat=iret) call check_allocate(iret, subname, & - 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + 'tracer_phys(i0:i1,j0:j1,k0:k1,ntrac)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - dp_dry_phys = real(dp_dry, kind_phys) - end if + allocate(cp_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'cp_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) - !Call physics routine using local vriables with matching kinds: - call get_cp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,inv_cp,cp_phys, & - dp_dry=dp_dry_phys, & - active_species_idx_dycore=active_species_idx_dycore) - !Set output variables back to dynamics kind: - cp = real(cp_phys, kind_dyn) + !Set local input variables: + tracer_phys = real(tracer, kind_phys) - !Deallocate variables: - if (allocated(dp_dry_phys)) then - deallocate(dp_dry_phys) - end if + !Allocate and set optional variables: + if (present(dp_dry)) then + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + dp_dry_phys = real(dp_dry, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_cp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,inv_cp,cp_phys, & + dp_dry=dp_dry_phys, & + active_species_idx_dycore=active_species_idx_dycore) + + !Set output variables back to dynamics kind: + cp = real(cp_phys, kind_dyn) -#endif + !Deallocate variables: + deallocate(tracer_phys) + deallocate(cp_phys) + + if (allocated(dp_dry_phys)) then + deallocate(dp_dry_phys) + end if + + + end if !kind check end subroutine get_cp ! @@ -126,50 +144,67 @@ subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_spec real(kind_dyn), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) real(kind_dyn), intent(out) :: cp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_cp_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx,& - cp_dry, fact=fact) - -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) - real(kind_phys) :: cp_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: cp_dry_phys(:,:,:) real(kind_phys), allocatable :: fact_phys(:,:,:) !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_cp_dry (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_cp_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx,& + cp_dry, fact=fact) + + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac)', & + file=__FILE__, line=__LINE__) + - if (present(fact)) then - allocate(fact_phys(i0:i1,j0:j1,k0_trac:k1_trac), stat=iret) + allocate(cp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) call check_allocate(iret, subname, & - 'fact_phys(i0:i1,j0:j1,k0_trac:k1_trac)', & + 'cp_dry_phys(i0:i1,j0:j1,k0:k1)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - fact_phys = real(fact, kind_phys) - end if - !Call physics routine using local vriables with matching kinds: - call get_cp_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer_phys,active_species_idx,& - cp_dry_phys, fact=fact_phys) + !Set local variables: + tracer_phys = real(tracer, kind_phys) - !Set output variables back to dynamics kind: - cp_dry = real(cp_dry_phys, kind_dyn) + if (present(fact)) then + allocate(fact_phys(i0:i1,j0:j1,k0_trac:k1_trac), stat=iret) + call check_allocate(iret, subname, & + 'fact_phys(i0:i1,j0:j1,k0_trac:k1_trac)', & + file=__FILE__, line=__LINE__) - !Deallocate variables: - if (allocated(fact_phys)) then - deallocate(fact_phys) - end if + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if -#endif + !Call physics routine using local vriables with matching kinds: + call get_cp_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer_phys,active_species_idx,& + cp_dry_phys, fact=fact_phys) + + !Set output variables back to dynamics kind: + cp_dry = real(cp_dry_phys, kind_dyn) + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(cp_dry_phys) + + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + + end if !kind check end subroutine get_cp_dry ! @@ -191,50 +226,66 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx, real(kind_dyn), intent(out) :: kappa_dry(i0:i1,j0:j1,k0:k1) !kappa dry real(kind_dyn), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_kappa_dry_phys(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,kappa_dry,& - fact=fact) - -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,1:ntrac) - real(kind_phys) :: kappa_dry_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: kappa_dry_phys(:,:,:) real(kind_phys), allocatable :: fact_phys(:,:,:) !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_kappa_dry (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then + + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_kappa_dry_phys(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,kappa_dry,& + fact=fact) - if (present(fact)) then - allocate(fact_phys(i0:i1,j0:j1,nlev), stat=iret) + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,nlev,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,nlev,1:ntrac)', & + file=__FILE__, line=__LINE__) + + allocate(kappa_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) call check_allocate(iret, subname, & - 'fact_phys(i0:i1,j0:j1,nlev)', & + 'kappa_dry_phys(i0:i1,j0:j1,k0:k1)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - fact_phys = real(fact, kind_phys) - end if + !Set local variables: + tracer_phys = real(tracer, kind_phys) - !Call physics routine using local vriables with matching kinds: - call get_kappa_dry_phys(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer_phys,active_species_idx,& - kappa_dry_phys, fact=fact_phys) + if (present(fact)) then + allocate(fact_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'fact_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) - !Set output variables back to dynamics kind: - kappa_dry = real(kaapa_dry_phys, kind_dyn) + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if - !Deallocate variables: - if (allocated(fact_phys)) then - deallocate(fact_phys) - end if + !Call physics routine using local vriables with matching kinds: + call get_kappa_dry_phys(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer_phys,active_species_idx,& + kappa_dry_phys, fact=fact_phys) -#endif + !Set output variables back to dynamics kind: + kappa_dry = real(kappa_dry_phys, kind_dyn) + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(kappa_dry_phys) + + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + + end if !kind check end subroutine get_kappa_dry ! @@ -257,32 +308,59 @@ subroutine get_ps(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry, real(kind_dyn), intent(in) :: ptop integer, intent(in) :: active_species_idx(:) -#ifndef DYN_PHYS_KIND_DIFF + !Declare local variables: + real(kind_phys), allocatable :: tracer_mass_phys(:,:,:,:) ! Tracer array + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) ! dry pressure level thickness + real(kind_phys), allocatable :: ps_phys(:,:) ! surface pressure - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_ps_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,ps,ptop) + real(kind_phys) :: ptop_phys -#else + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_ps (dyn)' - !Declare local variables: - real(kind_phys) :: tracer_mass_phys(i0:i1,j0:j1,k0:k1,1:ntrac) ! Tracer array - real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness - real(kind_phys) :: ps_phys(i0:i1,j0:j1) ! surface pressure - real(kind_phys) :: ptop_phys + !Check if kinds are different: + if (kind_phys == kind_dyn) then - !Set local variables: - tracer_mass_phys = real(tracer_mass, kind_phys) - dp_dry_phys = real(dp_dry, kind_phys) - ptop_phys = real(ptop, kind_phys) + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_ps_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,ps,ptop) - !Call physics routine using local vriables with matching kinds: - call get_ps_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass_phys,active_species_idx,dp_dry_phys,ps_phys,ptop_phys) + else - !Set output variables back to dynamics kind: - ps = real(ps_phys, kind_dyn) + !Allocate local variables: + allocate(tracer_mass_phys(i0:i1,j0:j1,k0:k1,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_mass_phys(i0:i1,j0:j1,k0:k1,1:ntrac)', & + file=__FILE__, line=__LINE__) -#endif + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(ps_phys(i0:i1,j0:j1), stat=iret) + call check_allocate(iret, subname, & + 'ps_phys(i0:i1,j0:j1)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + tracer_mass_phys = real(tracer_mass, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + ptop_phys = real(ptop, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_ps_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass_phys,active_species_idx,dp_dry_phys,ps_phys,ptop_phys) + + !Set output variables back to dynamics kind: + ps = real(ps_phys, kind_dyn) + + !Deallocate variables: + deallocate(tracer_mass_phys) + deallocate(dp_dry_phys) + deallocate(ps_phys) + + end if !kind check end subroutine get_ps ! @@ -313,64 +391,85 @@ subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx ! must be present) real(kind_dyn), optional,intent(in) :: ptop ! pressure at model top -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_dp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp,ps,ptop) - -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac) - real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,k0:k1) - real(kind_phys) :: dp_phys(i0:i1,j0:j1,k0:k1) - real(kind_phys), allocatable :: ps(:,:) - real(kind_phys), allocatable :: ptop + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) + real(kind_phys), allocatable :: dp_phys(:,:,:) + real(kind_phys), allocatable :: ps_phys(:,:) + real(kind_phys), allocatable :: ptop_phys !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_dp (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) - dp_dry_phys = real(dp_dry, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_dp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx,dp_dry,dp,ps,ptop) - if (present(ptop)) then - allocate(ptop_phys, stat=iret) - call check_allocate(iret, subname, 'ptop', & + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - ptop_phys = real(ptop, kind_phys) - end if + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) - if (present(ps)) then - allocate(ps(i0:i1,j0:j1), stat=iret) + allocate(dp_phys(i0:i1,j0:j1,k0:k1), stat=iret) call check_allocate(iret, subname, & - 'ps(i0:i1,j0:j1)', & + 'dp_phys(i0:i1,j0:j1,k0:k1)', & file=__FILE__, line=__LINE__) - end if - !Call physics routine using local vriables with matching kinds: - call get_dp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,mixing_ratio,& - active_species_idx,dp_dry_phys,dp_phys,ps_phys,ptop_phys) + !Set local variables: + tracer_phys = real(tracer, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + if (present(ptop)) then + allocate(ptop_phys, stat=iret) + call check_allocate(iret, subname, 'ptop_phys', & + file=__FILE__, line=__LINE__) - !Set output variables back to dynamics kind: - dp = real(dp_phys, kind_dyn) + !Set optional local variable: + ptop_phys = real(ptop, kind_phys) + end if - if (present(ps)) then - ps = real(ps_phys, kind_dyn) - deallocate(ps_phys) - end if + if (present(ps)) then + allocate(ps_phys(i0:i1,j0:j1), stat=iret) + call check_allocate(iret, subname, & + 'ps_phys(i0:i1,j0:j1)', & + file=__FILE__, line=__LINE__) + end if - !Deallocate variables: - if (allocated(ptop_phys)) then - deallocate(ptop_phys) - end if + !Call physics routine using local vriables with matching kinds: + call get_dp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,mixing_ratio,& + active_species_idx,dp_dry_phys,dp_phys,ps_phys,ptop_phys) -#endif + + !Set output variables back to dynamics kind: + dp = real(dp_phys, kind_dyn) + + if (present(ps)) then + ps = real(ps_phys, kind_dyn) + deallocate(ps_phys) + end if + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(dp_dry_phys) + deallocate(dp_phys) + + if (allocated(ptop_phys)) then + deallocate(ptop_phys) + end if + + end if !kind check end subroutine get_dp ! @@ -392,37 +491,76 @@ subroutine get_dp_ref(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) real(kind_dyn), intent(out) :: dp_ref(i0:i1,j0:j1,k0:k1) real(kind_dyn), intent(out) :: ps_ref(i0:i1,j0:j1) -#ifndef DYN_PHYS_KIND_DIFF + !Declare local variables: + real(kind_phys), allocatable :: hyai_phys(:) + real(kind_phys), allocatable :: hybi_phys(:) + real(kind_phys), allocatable :: phis_phys(:,:) + real(kind_phys), allocatable :: dp_ref_phys(:,:,:) + real(kind_phys), allocatable :: ps_ref_phys(:,:) - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_dp_ref_phys(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) + real(kind_phys) :: ps0_phys -#else + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_dp_ref (dyn)' - !Declare local variables: - real(kind_phys) :: hyai_phys(k0:k1+1) - real(kind_phys) :: hybi_phys(k0:k1+1) - real(kind_phys) :: ps0_phys - real(kind_phys) :: phis_phys(i0:i1,j0:j1) - real(kind_phys) :: dp_ref_phys(i0:i1,j0:j1,k0:k1) - real(kind_phys) :: ps_ref_phys(i0:i1,j0:j1) + !Check if kinds are different: + if (kind_phys == kind_dyn) then + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_dp_ref_phys(hyai, hybi, ps0, i0,i1,j0,j1,k0,k1,phis,dp_ref,ps_ref) + + else + + !Allocate local variables: + allocate(hyai_phys(k0:k1+1), stat=iret) + call check_allocate(iret, subname, & + 'hyai_phys(k0:k1+1)', & + file=__FILE__, line=__LINE__) + + allocate(hybi_phys(k0:k1+1), stat=iret) + call check_allocate(iret, subname, & + 'hybi_phys(k0:k1+1)', & + file=__FILE__, line=__LINE__) + + allocate(phis_phys(i0:i1,j0:j1), stat=iret) + call check_allocate(iret, subname, & + 'phis_phys(i0:i1,j0:j1)', & + file=__FILE__, line=__LINE__) + + allocate(dp_ref_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_ref_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) - !Set local variables: - hyai_phys = real(hyai, kind_phys) - hybi_phys = real(hybi, kind_phys) - ps0_phys = real(ps0, kind_phys) - phis_phys = real(phis, kind_phys) + allocate(ps_ref_phys(i0:i1,j0:j1), stat=iret) + call check_allocate(iret, subname, & + 'ps_ref_phys(i0:i1,j0:j1)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + hyai_phys = real(hyai, kind_phys) + hybi_phys = real(hybi, kind_phys) + ps0_phys = real(ps0, kind_phys) + phis_phys = real(phis, kind_phys) - !Call physics routine using local vriables with matching kinds: - call get_dp_ref_phys(hyai_phys, hybi_phys, ps0_phys, i0,i1,j0,j1,k0,& - k1, phis_phys, dp_ref_phys, ps_ref_phys) + !Call physics routine using local vriables with matching kinds: + call get_dp_ref_phys(hyai_phys, hybi_phys, ps0_phys, i0,i1,j0,j1,k0,& + k1, phis_phys, dp_ref_phys, ps_ref_phys) - !Set output variables back to dynamics kind: - dp_ref = real(dp_ref_phys, kind_dyn) - ps_ref = real(ps_ref_phys, kind_dyn) + !Set output variables back to dynamics kind: + dp_ref = real(dp_ref_phys, kind_dyn) + ps_ref = real(ps_ref_phys, kind_dyn) -#endif + !Deallocate variables: + deallocate(hyai_phys) + deallocate(hybi_phys) + deallocate(phis_phys) + deallocate(dp_ref_phys) + deallocate(ps_ref_phys) + + end if !kind check end subroutine get_dp_ref ! @@ -448,50 +586,65 @@ subroutine get_sum_species(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum ! then tracer is in units of mass real(kind_dyn), intent(out) :: sum_species(i0:i1,j0:j1,k0:k1) ! sum species -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_sum_species_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum_species, & - dp_dry=dp_dry) - -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracer array - real(kind_phys) :: sum_species_phys(i0:i1,j0:j1,k0:k1) ! sum species + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) ! tracer array + real(kind_phys), allocatable :: sum_species_phys(:,:,:) ! sum species real(kind_phys), allocatable :: dp_dry_phys(:,:,:) ! dry pressure level thickness is present !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_sum_species (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(dp_dry)) then - allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_sum_species_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum_species, & + dp_dry=dp_dry) + + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac), stat=iret) call check_allocate(iret, subname, & - 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + 'tracer_phys(i0:i1,j0:j1,k0:k1,1:ntrac)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - dp_dry_phys = real(dp_dry, kind_phys) - end if + allocate(sum_species_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'sum_species_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) - !Call physics routine using local vriables with matching kinds: - call get_sum_species_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,active_species_idx,sum_species_phys, & - dp_dry=dp_dry_phys) + !Set local variables: + tracer_phys = real(tracer, kind_phys) - !Set output variables back to dynamics kind: - sum_species = real(sum_species_phys, kind_dyn) + if (present(dp_dry)) then + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) - !Deallocate variables: - if (allocated(dp_dry_phys)) then - deallocate(dp_dry_phys) - end if + !Set optional local variable: + dp_dry_phys = real(dp_dry, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_sum_species_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,active_species_idx,sum_species_phys, & + dp_dry=dp_dry_phys) + + !Set output variables back to dynamics kind: + sum_species = real(sum_species_phys, kind_dyn) + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(sum_species_phys) -#endif + if (allocated(dp_dry_phys)) then + deallocate(dp_dry_phys) + end if + + end if !kind check end subroutine get_sum_species ! @@ -522,25 +675,12 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp ! fact converts to dry mixing ratio: tracer/fact real(kind_dyn), intent(in), optional :: mbarv_in(i0:i1,j0:j1,1:k1) ! composition dependent atmosphere mean mass -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_molecular_diff_coef_phys(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces, & - sponge_factor,kmvis,kmcnd,ntrac, & - tracer, & - fact=fact, & - active_species_idx_dycore=active_species_idx_dycore, & - mbarv_in=mbarv_in) - -#else - !Declare local variables: - real(kind_phys) :: temp_phys(i0:i1,j0:j1,nlev) - real(kind_phys) :: sponge_factor_phys(1:k1) - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,1:ntrac) - real(kind_phys) :: kmvis_phys(i0:i1,j0:j1,1:k1+get_at_interfaces) - real(kind_phys) :: kmcnd_phys(i0:i1,j0:j1,1:k1+get_at_interfaces) + real(kind_phys), allocatable :: temp_phys(:,:,:) + real(kind_phys), allocatable :: sponge_factor_phys(:) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: kmvis_phys(:,:,:) + real(kind_phys), allocatable :: kmcnd_phys(:,:,:) real(kind_phys), allocatable :: fact_phys(:,:,:) real(kind_phys), allocatable :: mbarv_in_phys(:,:,:) @@ -548,52 +688,97 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_molecular_diff_coef (dyn)' - !Set local variables: - temp_phys = real(temp, kind_phys) - tracer_phys = real(tracer, kind_phys) - sponge_factor_phys = real(sponge_factor, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(fact)) then - allocate(fact_phys(i0:i1,j0:j1,k1), stat=iret) + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_molecular_diff_coef_phys(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces, & + sponge_factor,kmvis,kmcnd,ntrac, & + tracer, & + fact=fact, & + active_species_idx_dycore=active_species_idx_dycore, & + mbarv_in=mbarv_in) + + else + + !Allocate local variables: + allocate(temp_phys(i0:i1,j0:j1,nlev), stat=iret) call check_allocate(iret, subname, & - 'fact_phys(i0:i1,j0:j1,k1)', & + 'temp_phys(i0:i1,j0:j1,nlev)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - fact_phys = real(fact, kind_phys) - end if - if (present(mbarv_in)) then - allocate(mbarv_in_phys(i0:i1,j0:j1,1:k1), stat=iret) + allocate(sponge_factor_phys(1:k1), stat=iret) call check_allocate(iret, subname, & - 'mbarv_in_phys(i0:i1,j0:j1,1:k1)', & + 'sponge_factor_phys(1:k1)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - mbarv_in_phys = real(mbarv_in, kind_phys) - end if + allocate(tracer_phys(i0:i1,j0:j1,nlev,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,nlev,1:ntrac)', & + file=__FILE__, line=__LINE__) - !Call physics routine using local vriables with matching kinds: - call get_molecular_diff_coef_phys(i0,i1,j0,j1,k1,nlev,temp_phys,get_at_interfaces, & - sponge_factor_phys,kmvis_phys,kmcnd_phys,ntrac, & - tracer_phys, & - fact=fact_phys, & - active_species_idx_dycore=active_species_idx_dycore,& - mbarv_in=mbarv_in_phys) + allocate(kmvis_phys(i0:i1,j0:j1,1:k1+get_at_interfaces), stat=iret) + call check_allocate(iret, subname, & + 'kmvis_phys(i0:i1,j0:j1,1:k1+get_at_interfaces)', & + file=__FILE__, line=__LINE__) - !Set output variables back to dynamics kind: - kmvis = real(kmvis_phys, kind_dyn) - kmcnd = real(kmcnd_phys, kind_dyn) + allocate(kmcnd_phys(i0:i1,j0:j1,1:k1+get_at_interfaces), stat=iret) + call check_allocate(iret, subname, & + 'kmcnd_phys(i0:i1,j0:j1,1:k1+get_at_interfaces)', & + file=__FILE__, line=__LINE__) - !Deallocate variables: - if (allocated(fact_phys)) then - deallocate(fact_phys) - end if + !Set local variables: + temp_phys = real(temp, kind_phys) + tracer_phys = real(tracer, kind_phys) + sponge_factor_phys = real(sponge_factor, kind_phys) + + if (present(fact)) then + allocate(fact_phys(i0:i1,j0:j1,k1), stat=iret) + call check_allocate(iret, subname, & + 'fact_phys(i0:i1,j0:j1,k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if + if (present(mbarv_in)) then + allocate(mbarv_in_phys(i0:i1,j0:j1,1:k1), stat=iret) + call check_allocate(iret, subname, & + 'mbarv_in_phys(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + mbarv_in_phys = real(mbarv_in, kind_phys) + end if + + !Call physics routine using local vriables with matching kinds: + call get_molecular_diff_coef_phys(i0,i1,j0,j1,k1,nlev,temp_phys,get_at_interfaces, & + sponge_factor_phys,kmvis_phys,kmcnd_phys,ntrac, & + tracer_phys, & + fact=fact_phys, & + active_species_idx_dycore=active_species_idx_dycore,& + mbarv_in=mbarv_in_phys) + + !Set output variables back to dynamics kind: + kmvis = real(kmvis_phys, kind_dyn) + kmcnd = real(kmcnd_phys, kind_dyn) + + !Deallocate variables: + deallocate(temp_phys) + deallocate(sponge_factor_phys) + deallocate(tracer_phys) + deallocate(kmvis_phys) + deallocate(kmcnd_phys) - if (allocated(mbarv_in_phys)) then - deallocate(mbarv_in_phys) - end if + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + if (allocated(mbarv_in_phys)) then + deallocate(mbarv_in_phys) + end if -#endif + end if !kind check end subroutine get_molecular_diff_coef ! @@ -617,41 +802,80 @@ subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvi real(kind_dyn), intent(out) :: kmcnd_ref(k0:k1) !reference thermal conductivity coefficient real(kind_dyn), intent(out) :: rho_ref(k0:k1) !reference density -#ifndef DYN_PHYS_KIND_DIFF + !Declare local variables: + real(kind_phys), allocatable :: press_phys(:) + real(kind_phys), allocatable :: sponge_factor_phys(:) + real(kind_phys), allocatable :: kmvis_ref_phys(:) + real(kind_phys), allocatable :: kmcnd_ref_phys(:) + real(kind_phys), allocatable :: rho_ref_phys(:) - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_molecular_diff_coef_reference_phys(k0,k1,tref,press,& - sponge_factor,& - kmvis_ref,kmcnd_ref,rho_ref) + real(kind_phys) :: tref_phys -#else + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_molecular_diff_coef_reference (dyn)' - !Declare local variables: - real(kind_phys) :: tref_phys - real(kind_phys) :: press_phys(k0:k1) - real(kind_phys) :: sponge_factor_phys(k0:k1) - real(kind_phys) :: kmvis_ref_phys(k0:k1) - real(kind_phys) :: kmcnd_ref_phys(k0:k1) - real(kind_phys) :: rho_ref_phys(k0:k1) - - !Set local variables: - tref_phys = real(tref, kind_phys) - press_phys = real(press, kind_phys) - sponge_factor_phys = real(sponge_factor, kind_phys) - - !Call physics routine using local vriables with matching kinds: - call get_molecular_diff_coef_reference_phys(k0,k1,tref_phys,press_phys,& - sponge_factor_phys,& - kmvis_ref_phys,kmcnd_ref_phys,& - rho_ref_phys) - - !Set output variables back to dynamics kind: - tref = real(tref_phys, kind_dyn) - press = real(press_phys, kind_dyn) - sponge_factor = real(sponge_factor_phys, kind_dyn) - -#endif + !Check if kinds are different: + if (kind_phys == kind_dyn) then + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_molecular_diff_coef_reference_phys(k0,k1,tref,press,& + sponge_factor,& + kmvis_ref,kmcnd_ref,rho_ref) + + else + + !Allocate local variables: + allocate(press_phys(k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'press_phys(k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(sponge_factor_phys(k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'sponge_factor_phys(k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(kmvis_ref_phys(k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'kmvis_ref_phys(k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(kmcnd_ref_phys(k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'kmcnd_ref_phys(k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(rho_ref_phys(k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'rho_ref_phys(k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + tref_phys = real(tref, kind_phys) + press_phys = real(press, kind_phys) + sponge_factor_phys = real(sponge_factor, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_molecular_diff_coef_reference_phys(k0,k1,tref_phys,press_phys,& + sponge_factor_phys,& + kmvis_ref_phys,kmcnd_ref_phys,& + rho_ref_phys) + + !Set output variables back to dynamics kind: + kmvis_ref = real(kmvis_ref_phys, kind_dyn) + kmcnd_ref = real(kmcnd_ref_phys, kind_dyn) + rho_ref = real(rho_ref_phys, kind_dyn) + + !Deallocate variables: + deallocate(press_phys) + deallocate(sponge_factor_phys) + deallocate(kmvis_ref_phys) + deallocate(kmcnd_ref_phys) + deallocate(rho_ref_phys) + + end if !kind check end subroutine get_molecular_diff_coef_reference ! @@ -685,94 +909,117 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_ real(kind_phys),optional,intent(out) :: pint_out(i0:i1,j0:j1,1:k1+1) real(kind_phys),optional,intent(out) :: pmid_out(i0:i1,j0:j1,1:k1) -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_rho_dry_phys(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop, & - dp_dry,tracer_mass, & - rho_dry=rho_dry, & - rhoi_dry=rhoi_dry, & - active_species_idx_dycore=active_species_idx_dycore, & - pint_out=pint_out, & - pmid_out=pmid_out) - -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,ntrac) - real(kind_phys) :: temp_phys(i0:i1,j0:j1,1:nlev) - real(kind_phys) :: ptop_phys - real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,nlev) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: temp_phys(:,:,:) + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) real(kind_phys), allocatable :: rho_dry_phys(:,:,:) real(kind_phys), allocatable :: rhoi_dry_phys(:,:,:) real(kind_phys), allocatable :: pint_out_phys(:,:,:) real(kind_phys), allocatable :: pmid_out_phys(:,:,:) + real(kind_phys) :: ptop_phys + !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_rho_dry (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) - temp_phys = real(temp, kind_phys) - ptop_phys = real(ptop, kind_phys) - dp_dry_phys = real(dp_dry, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(rho_dry)) then - allocate(rho_dry_phys(i0:i1,j0:j1,1:k1), stat=iret) - call check_allocate(iret, subname, & - 'rho_dry_phys(i0:i1,j0:j1,1:k1)', & - file=__FILE__, line=__LINE__) - end if - if (present(rhoi_dry)) then - allocate(rhoi_dry_phys(i0:i1,j0:j1,1:k1+1), stat=iret) + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_rho_dry_phys(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop, & + dp_dry,tracer_mass, & + rho_dry=rho_dry, & + rhoi_dry=rhoi_dry, & + active_species_idx_dycore=active_species_idx_dycore, & + pint_out=pint_out, & + pmid_out=pmid_out) + + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,nlev,ntrac), stat=iret) call check_allocate(iret, subname, & - 'rhoi_dry_phys(i0:i1,j0:j1,1:k1+1)', & + 'tracer_phys(i0:i1,j0:j1,nlev,ntrac)', & file=__FILE__, line=__LINE__) - end if - if (present(pint_out)) then - allocate(pint_out_phys(i0:i1,j0:j1,1:k1+1), stat=iret) + allocate(temp_phys(i0:i1,j0:j1,1:nlev), stat=iret) call check_allocate(iret, subname, & - 'pint_out_phys(i0:i1,j0:j1,1:k1+1)', & + 'temp_phys(i0:i1,j0:j1,1:nlev)', & file=__FILE__, line=__LINE__) - end if - if (present(pmid_out)) then - allocate(pmid_out_phys(i0:i1,j0:j1,1:k1), stat=iret) + + allocate(dp_dry_phys(i0:i1,j0:j1,nlev), stat=iret) call check_allocate(iret, subname, & - 'pmid_out_phys(i0:i1,j0:j1,1:k1)', & + 'dp_dry_phys(i0:i1,j0:j1,nlev)', & file=__FILE__, line=__LINE__) - end if - - !Call physics routine using local vriables with matching kinds: - call get_rho_dry_phys(i0,i1,j0,j1,k1,nlev,ntrac,tracer_phys,temp_phys, & - ptop_phys, dp_dry_phys,tracer_mass, & - rho_dry=rho_dry_phys, & - rhoi_dry=rhoi_dry_phys, & - active_species_idx_dycore=active_species_idx_dycore, & - pint_out=pint_out_phys, & - pmid_out=pmid_out_phys) - - !Set output variables back to dynamics kind: - if (present(rho_dry)) then - rho_dry = real(rho_dry_phys, kind_dyn) - deallocate(rho_dry_phys) - end if - if (present(rhoi_dry)) then - rhoi_dry = real(rhoi_dry_phys, kind_dyn) - deallocate(rhoi_dry_phys) - end if - if (present(pint_out)) then - pint_out = real(pint_out_phys, kind_dyn) - deallocate(pint_out_phys) - end if - if (present(pmid_out)) then - pmid_out = real(pmid_out_phys, kind_dyn) - deallocate(pmid_out_phys) - end if - -#endif + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + temp_phys = real(temp, kind_phys) + ptop_phys = real(ptop, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + + if (present(rho_dry)) then + allocate(rho_dry_phys(i0:i1,j0:j1,1:k1), stat=iret) + call check_allocate(iret, subname, & + 'rho_dry_phys(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) + end if + if (present(rhoi_dry)) then + allocate(rhoi_dry_phys(i0:i1,j0:j1,1:k1+1), stat=iret) + call check_allocate(iret, subname, & + 'rhoi_dry_phys(i0:i1,j0:j1,1:k1+1)', & + file=__FILE__, line=__LINE__) + + end if + if (present(pint_out)) then + allocate(pint_out_phys(i0:i1,j0:j1,1:k1+1), stat=iret) + call check_allocate(iret, subname, & + 'pint_out_phys(i0:i1,j0:j1,1:k1+1)', & + file=__FILE__, line=__LINE__) + end if + if (present(pmid_out)) then + allocate(pmid_out_phys(i0:i1,j0:j1,1:k1), stat=iret) + call check_allocate(iret, subname, & + 'pmid_out_phys(i0:i1,j0:j1,1:k1)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_rho_dry_phys(i0,i1,j0,j1,k1,nlev,ntrac,tracer_phys,temp_phys, & + ptop_phys, dp_dry_phys,tracer_mass, & + rho_dry=rho_dry_phys, & + rhoi_dry=rhoi_dry_phys, & + active_species_idx_dycore=active_species_idx_dycore, & + pint_out=pint_out_phys, & + pmid_out=pmid_out_phys) + + !Set output variables back to dynamics kind: + if (present(rho_dry)) then + rho_dry = real(rho_dry_phys, kind_dyn) + deallocate(rho_dry_phys) + end if + if (present(rhoi_dry)) then + rhoi_dry = real(rhoi_dry_phys, kind_dyn) + deallocate(rhoi_dry_phys) + end if + if (present(pint_out)) then + pint_out = real(pint_out_phys, kind_dyn) + deallocate(pint_out_phys) + end if + if (present(pmid_out)) then + pmid_out = real(pmid_out_phys, kind_dyn) + deallocate(pmid_out_phys) + end if + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(temp_phys) + deallocate(dp_dry_phys) + + end if !kind check end subroutine get_rho_dry ! @@ -796,64 +1043,91 @@ subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pm real(kind_dyn), intent(out) :: gz(i0:i1,j0:j1,nlev) ! geopotential real(kind_dyn), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,& - pmid=pmid) - -#else - !Declare local variables: - real(kind_phys) :: dp_phys(i0:i1,j0:j1,nlev) - real(kind_phys) :: T_v_phys(i0:i1,j0:j1,nlev) - real(kind_phys) :: R_dry_phys(i0:i1,j0:j1,nlev) - real(kind_phys) :: phis_phys(i0:i1,j0:j1) + real(kind_phys), allocatable :: dp_phys(:,:,:) + real(kind_phys), allocatable :: T_v_phys(:,:,:) + real(kind_phys), allocatable :: R_dry_phys(:,:,:) + real(kind_phys), allocatable :: phis_phys(:,:) + real(kind_phys), allocatable :: gz_phys(:,:,:) + real(kind_phys), allocatable :: pmid_phys(:,:,:) + real(kind_phys) :: ptop_phys - real(kind_phys) :: gz_phys(i0:i1,j0:j1,nlev) - real(kind_phys), allocatable :: pmid_phys(:,:,nlev) !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_gz_given_dp_Tv_Rdry (dyn)' - !Set local variables: - dp_phys = real(dp, kind_phys) - T_v_phys = real(T_v, kind_phys) - R_dry_phys = real(R_dry, kind_phys) - phis_phys = real(phis, kind_phys) - ptop_phys = real(ptop, kind_phys) - - if (present(pmid)) then - !Allocate variable if optional argument is present: - allocate(pmid_phys(i0:i1,j0:j1,nlev), stat=iret) - call check_allocate(iret, subname, 'pmid_phys(i0:i1,j0:j1,nlev)', & + !Check if kinds are different: + if (kind_phys == kind_dyn) then + + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,& + pmid=pmid) + + else + + !Allocate local variables: + allocate(dp_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'dp_phys(i0:i1,j0:j1,nlev)', & file=__FILE__, line=__LINE__) - end if - !Call physics routine using local vriables with matching kinds: - if (present(pmid)) then + allocate(T_v_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'T_v_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + + allocate(R_dry_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'R_dry_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + + allocate(phis_phys(i0:i1,j0:j1), stat=iret) + call check_allocate(iret, subname, & + 'phis_phys(i0:i1,j0:j1)', & + file=__FILE__, line=__LINE__) + + allocate(gz_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'gz_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + dp_phys = real(dp, kind_phys) + T_v_phys = real(T_v, kind_phys) + R_dry_phys = real(R_dry, kind_phys) + phis_phys = real(phis, kind_phys) + ptop_phys = real(ptop, kind_phys) + + if (present(pmid)) then + !Allocate variable if optional argument is present: + allocate(pmid_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, 'pmid_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp_phys,T_v_phys, & R_dry_phys,phis_phys,ptop_phys,gz_phys, & pmid=pmid_phys) - pmid = real(pmid_phys, kind_dyn) + !Set output variables back to dynamics kind: + gz = real(gz_phys, kind_dyn) - else - call get_gz_given_dp_Tv_Rdry_phys(i0,i1,j0,j1,nlev,dp_phys,T_v_phys, & - R_dry_phys,phis_phys,ptop_phys,gz_phys) - end if + if (present(pmid)) then + pmid = real(pmid_phys, kind_dyn) + deallocate(pmid_phys) + end if - !Set output variables back to dynamics kind: - gz = real(gz_phys, kind_dyn) + !Deallocate variables: + deallocate(dp_phys) + deallocate(T_v_phys) + deallocate(R_dry_phys) + deallocate(phis_phys) + deallocate(gz_phys) - if (present(pmid)) then - pmid = real(pmid_phys, kind_dyn) - deallocate(pmid_phys) - end if - -#endif + end if !kind check end subroutine ! @@ -889,18 +1163,9 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q ! integer, optional, intent(in) :: active_species_idx_dycore(:) -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_virtual_temp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v, & - temp=temp,dp_dry=dp_dry,sum_q=sum_q, & - active_species_idx_dycore=active_species_idx_dycore) -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0:k1,ntrac) - real(kind_phys) :: T_v_phys(i0:i1,j0:j1,k0:k1) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: T_v_phys(:,:,:) real(kind_phys), allocatable :: temp_phys(:,:,:) real(kind_phys), allocatable :: dp_dry_phys(:,:,:) real(kind_phys), allocatable :: sum_q_phys(:,:,:) @@ -909,59 +1174,83 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_virtual_temp (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(temp)) then - !Allocate variable if optional argument is present: - allocate(temp_phys(i0:i1,j0:j1,k0:k1), stat=iret) - call check_allocate(iret, subname, 'temp_phys(i0:i1,j0:j1,k0:k1)', & - file=__FILE__, line=__LINE__) + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_virtual_temp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v, & + temp=temp,dp_dry=dp_dry,sum_q=sum_q, & + active_species_idx_dycore=active_species_idx_dycore) - !Set optional local variable: - temp_phys = real(temp, kind_phys) - end if + else - if (present(dp_dry)) then - !Allocate variable if optional argument is present: - allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) - call check_allocate(iret, subname, 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,k0:k1,ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,k0:k1,ntrac)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - dp_dry_phys = real(dp_dry, kind_phys) - end if - - if (present(sum_q)) then - !Allocate variable if optional argument is present: - allocate(sum_q_phys(i0:i1,j0:j1,k0:k1), stat=iret) - call check_allocate(iret, subname, 'sum_q_phys(i0:i1,j0:j1,k0:k1)', & + allocate(T_v_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'T_v_phys(i0:i1,j0:j1,k0:k1)', & file=__FILE__, line=__LINE__) - end if - !Call physics routine using local vriables with matching kinds: - call get_virtual_temp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,T_v_phys, & - temp=temp_phys,dp_dry=dp_dry_phys,sum_q=sum_q_phys, & - active_species_idx_dycore=active_species_idx_dycore) - - !Set output variables back to dynamics kind: - T_v = real(T_v_phys, kind_dyn) - - if (present(sum_q)) then - sum_q = real(sum_q_phys, kind_dyn) - deallocate(sum_q_phys) - end if - - !Deallocate variables: - if (allocated(temp_phys)) then - deallocate(temp_phys) - end if - - if (allocated(dp_dry_phys)) then - deallocate(dp_dry_phys) - end if - -#endif + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(temp)) then + !Allocate variable if optional argument is present: + allocate(temp_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, 'temp_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + temp_phys = real(temp, kind_phys) + end if + + if (present(dp_dry)) then + !Allocate variable if optional argument is present: + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + dp_dry_phys = real(dp_dry, kind_phys) + end if + + if (present(sum_q)) then + !Allocate variable if optional argument is present: + allocate(sum_q_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, 'sum_q_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_virtual_temp_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_phys,T_v_phys, & + temp=temp_phys,dp_dry=dp_dry_phys,sum_q=sum_q_phys, & + active_species_idx_dycore=active_species_idx_dycore) + + !Set output variables back to dynamics kind: + T_v = real(T_v_phys, kind_dyn) + + if (present(sum_q)) then + sum_q = real(sum_q_phys, kind_dyn) + deallocate(sum_q_phys) + end if + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(T_v_phys) + + if (allocated(temp_phys)) then + deallocate(temp_phys) + end if + if (allocated(dp_dry_phys)) then + deallocate(dp_dry_phys) + end if + + end if !kind check end subroutine get_virtual_temp ! @@ -982,49 +1271,65 @@ subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_speci real(kind_dyn), intent(out) :: R_dry(i0:i1,j0:j1,k0:k1) !dry air R real(kind_dyn), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_R_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry, & - fact=fact) -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) - real(kind_phys) :: R_dry_phys(i0:i1,j0:j1,k0:k1) - real(kind_phys), allocatable :: fact_phys(i0:i1,j0:j1,k0_trac:k1_trac) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: R_dry_phys(:,:,:) + real(kind_phys), allocatable :: fact_phys(:,:,:) !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_R_dry (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(fact)) then - !Allocate variable if optional argument is present: - allocate(fact_phys(i0:i1,j0:j1,k0_trac:k1_trac), stat=iret) - call check_allocate(iret, subname, 'fact_phys(i0:i1,j0:j1,k0_trac:k1_trac)', & + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_R_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry, & + fact=fact) + + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac)', & file=__FILE__, line=__LINE__) - !Set optional local variable: - fact_phys = real(fact, kind_phys) - end if + allocate(R_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'R_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + + if (present(fact)) then + !Allocate variable if optional argument is present: + allocate(fact_phys(i0:i1,j0:j1,k0_trac:k1_trac), stat=iret) + call check_allocate(iret, subname, 'fact_phys(i0:i1,j0:j1,k0_trac:k1_trac)', & + file=__FILE__, line=__LINE__) + + !Set optional local variable: + fact_phys = real(fact, kind_phys) + end if - !Call physics routine using local vriables with matching kinds: - call get_R_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer_phys, & - active_species_idx_dycore,R_dry_phys,fact=fact_phys) + !Call physics routine using local vriables with matching kinds: + call get_R_dry_phys(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer_phys, & + active_species_idx_dycore,R_dry_phys,fact=fact_phys) - !Set output variables back to dynamics kind: - R_dry = real(R_dry_phys, kind_dyn) + !Set output variables back to dynamics kind: + R_dry = real(R_dry_phys, kind_dyn) - !Deallocate variables: - if (allocated(fact_phys)) then - deallocate(fact_phys) - end if + !Deallocate variables: + deallocate(tracer_phys) + deallocate(R_dry_phys) -#endif + if (allocated(fact_phys)) then + deallocate(fact_phys) + end if + + end if !kind check end subroutine get_R_dry ! @@ -1052,54 +1357,78 @@ subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_i real(kind_dyn), intent(out) :: exner(i0:i1,j0:j1,nlev) real(kind_dyn), optional, intent(out) :: poverp0(i0:i1,j0:j1,nlev) ! for efficiency when a routine needs this variable -#ifndef DYN_PHYS_KIND_DIFF - - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_exner_phys(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,& - dp_dry,ptop,p00,inv_exner,exner,poverp0=poverp0) -#else - !Declare local variables: - real(kind_phys) :: tracer_phys(i0:i1,j0:j1,nlev,1:ntrac) - real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,nlev) + real(kind_phys), allocatable :: tracer_phys(:,:,:,:) + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) + real(kind_phys), allocatable :: exner_phys(:,:,:) + real(kind_phys), allocatable :: poverp0_phys(:,:,:) + real(kind_phys) :: ptop_phys real(kind_phys) :: p00_phys - real(kind_phys) :: exner_phys(i0:i1,j0:j1,nlev) - real(kind_phys), allocatable :: poverp0_phys(:,:,:) !check_allocate variables: integer :: iret !allocate status integer character(len=*), parameter :: subname = 'get_exner (dyn)' - !Set local variables: - tracer_phys = real(tracer, kind_phys) - dp_dry_phys = real(dp_dry, kind_phys) - ptop_phys = real(ptop, kind_phys) - p00_phys = real(p00, kind_phys) + !Check if kinds are different: + if (kind_phys == kind_dyn) then - if (present(poverp0)) then - !Allocate variable if optional argument is present: - allocate(poverp0_phys(i0:i1,j0:j1,nlev), stat=iret) - call check_allocate(iret, subname, 'poverp0_phys(i0:i1,j0:j1,nlev)', & + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_exner_phys(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,& + dp_dry,ptop,p00,inv_exner,exner,poverp0=poverp0) + + else + + !Allocate local variables: + allocate(tracer_phys(i0:i1,j0:j1,nlev,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_phys(i0:i1,j0:j1,nlev,1:ntrac)', & file=__FILE__, line=__LINE__) - end if - !Call physics routine using local vriables with matching kinds: - call get_exner(i0,i1,j0,j1,nlev,ntrac,tracer_phys,mixing_ratio,active_species_idx,& - dp_dry_phys,ptop_phys,p00_phys,inv_exner,exner_phys, & - poverp0=poverp0_phys) + allocate(dp_dry_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) - !Set optional output variables back to dynamics kind: - if (present(poverp0)) then - poverp0 = real(poverp0_phys, kind_dyn) - deallocate(poverp0_phys) - end if + allocate(exner_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, & + 'exner_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + tracer_phys = real(tracer, kind_phys) + dp_dry_phys = real(dp_dry, kind_phys) + ptop_phys = real(ptop, kind_phys) + p00_phys = real(p00, kind_phys) + + if (present(poverp0)) then + !Allocate variable if optional argument is present: + allocate(poverp0_phys(i0:i1,j0:j1,nlev), stat=iret) + call check_allocate(iret, subname, 'poverp0_phys(i0:i1,j0:j1,nlev)', & + file=__FILE__, line=__LINE__) + end if + + !Call physics routine using local vriables with matching kinds: + call get_exner_phys(i0,i1,j0,j1,nlev,ntrac,tracer_phys,mixing_ratio,active_species_idx,& + dp_dry_phys,ptop_phys,p00_phys,inv_exner,exner_phys, & + poverp0=poverp0_phys) + + !Set optional output variables back to dynamics kind: + if (present(poverp0)) then + poverp0 = real(poverp0_phys, kind_dyn) + deallocate(poverp0_phys) + end if + + !Deallocate variables: + deallocate(tracer_phys) + deallocate(dp_dry_phys) + deallocate(exner_phys) - !Set output variables back to dynamics kind: - exner = real(exner_phys, kind_dyn) + !Set output variables back to dynamics kind: + exner = real(exner_phys, kind_dyn) -#endif + end if !kind check end subroutine get_exner ! @@ -1130,33 +1459,67 @@ subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,th ! integer, optional, dimension(:), intent(in) :: active_species_idx_dycore -#ifndef DYN_PHYS_KIND_DIFF + !Declare local variables: + real(kind_phys), allocatable :: tracer_mass_phys(:,:,:,:) + real(kind_phys), allocatable :: temp_phys(:,:,:) + real(kind_phys), allocatable :: dp_dry_phys(:,:,:) + real(kind_phys), allocatable :: thermal_energy_phys(:,:,:) + + !check_allocate variables: + integer :: iret !allocate status integer + character(len=*), parameter :: subname = 'get_thermal_energy (dyn)' - !The dynamics and physics kind is the same, so just call the physics - !routine directly: - call get_thermal_energy_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy,& - active_species_idx_dycore=active_species_idx_dycore) -#else + !Check if kinds are different: + if (kind_phys == kind_dyn) then - !Declare local variables: - real(kind_phys) :: tracer_mass_phys(i0:i1,j0:j1,k0:k1,ntrac) - real(kind_phys) :: temp_phys(i0:i1,j0:j1,k0:k1) - real(kind_phys) :: dp_dry_phys(i0:i1,j0:j1,k0:k1) - real(kind_phys) :: thermal_energy_phys(i0:i1,j0:j1,k0:k1) - - !Set local variables: - tracer_mass_phys = real(tracer_mass, kind_phys) - temp_phys = real(temp, kind_phys) - dp_dry_phys = real(dp_dry_phys, kind_phys) - - !Call physics routine using local vriables with matching kinds: - call get_thermal_energy_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass_phys,temp_phys,& - dp_dry_phys,thermal_energy_phys,& - active_species_idx_dycore=active_species_idx_dycore) - - !Set output variables back to dynamics kind: - thermal_energy = real(thermal_energy_phys, kind_dyn) -#endif + !The dynamics and physics kind is the same, so just call the physics + !routine directly: + call get_thermal_energy_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy,& + active_species_idx_dycore=active_species_idx_dycore) + + else + + !Allocate local variables: + allocate(tracer_mass_phys(i0:i1,j0:j1,k0:k1,1:ntrac), stat=iret) + call check_allocate(iret, subname, & + 'tracer_mass_phys(i0:i1,j0:j1,nlev,1:ntrac)', & + file=__FILE__, line=__LINE__) + + allocate(temp_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'temp_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(dp_dry_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'dp_dry_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + allocate(thermal_energy_phys(i0:i1,j0:j1,k0:k1), stat=iret) + call check_allocate(iret, subname, & + 'thermal_energy_phys(i0:i1,j0:j1,k0:k1)', & + file=__FILE__, line=__LINE__) + + !Set local variables: + tracer_mass_phys = real(tracer_mass, kind_phys) + temp_phys = real(temp, kind_phys) + dp_dry_phys = real(dp_dry_phys, kind_phys) + + !Call physics routine using local vriables with matching kinds: + call get_thermal_energy_phys(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass_phys,temp_phys,& + dp_dry_phys,thermal_energy_phys,& + active_species_idx_dycore=active_species_idx_dycore) + + !Set output variables back to dynamics kind: + thermal_energy = real(thermal_energy_phys, kind_dyn) + + !Deallocate variables: + deallocate(tracer_mass_phys) + deallocate(temp_phys) + deallocate(dp_dry_phys) + deallocate(thermal_energy_phys) + + end if !kind check end subroutine get_thermal_energy diff --git a/src/dynamics/utils/dynconst.F90 b/src/dynamics/utils/dynconst.F90 index f15bc593..403c984d 100644 --- a/src/dynamics/utils/dynconst.F90 +++ b/src/dynamics/utils/dynconst.F90 @@ -6,7 +6,7 @@ module dynconst use physconst, only: phys_pi=>pi implicit none - public + private !Physical constants: @@ -16,27 +16,30 @@ module dynconst !never change: !circle's circumference/diameter [unitless] - real(kind_dyn), parameter :: pi = real(phys_pi, kind_dyn) - + real(kind_dyn), parameter, public :: pi = real(phys_pi, kind_dyn) ! radius of earth [m] - real(kind_dyn), protected :: rearth + real(kind_dyn), protected, public :: rearth ! reciprocal of earth's radius [1/m] - real(kind_dyn), protected :: ra + real(kind_dyn), protected, public :: ra ! earth's rotation rate [rad/sec] - real(kind_dyn), protected :: omega + real(kind_dyn), protected, public :: omega ! gravitational acceleration [m/s**2] - real(kind_dyn), protected :: gravit + real(kind_dyn), protected, public :: gravit ! specific heat of dry air [J/K/kg] - real(kind_dyn), protected :: cpair + real(kind_dyn), protected, public :: cpair ! Dry air gas constant [J/K/kg] - real(kind_dyn), protected :: rair + real(kind_dyn), protected, public :: rair ! reference temperature [K] - real(kind_dyn), protected :: tref + real(kind_dyn), protected, public :: tref ! reference lapse rate [K/m] - real(kind_dyn), protected :: lapse_rate + real(kind_dyn), protected, public :: lapse_rate ! R/Cp - real(kind_dyn), protected :: cappa + real(kind_dyn), protected, public :: cappa + + + !Public routines: + public :: dynconst_init !============================================================================== CONTAINS diff --git a/test/.pylintrc b/test/.pylintrc index 124343ea..89b8030f 100644 --- a/test/.pylintrc +++ b/test/.pylintrc @@ -411,7 +411,7 @@ max-locals=40 max-parents=7 # Maximum number of public methods for a class (see R0904). -max-public-methods=20 +max-public-methods=25 # Maximum number of return / yield for function / method body max-returns=6 diff --git a/test/unit/cam_config_unit_tests.py b/test/unit/cam_config_unit_tests.py index 0f4e82cc..029a6c91 100644 --- a/test/unit/cam_config_unit_tests.py +++ b/test/unit/cam_config_unit_tests.py @@ -312,7 +312,7 @@ def test_config_ccpp_phys_set_check_single_suite(self): #Create namelist file: with open("test.txt", "w", encoding='UTF-8') as test_fil: test_fil.write('!Namelist test file\n') - test_fil.write('physics_suite = "adiabatic"\n') + test_fil.write('physics_suite = "kessler"\n') #Run ccpp_phys_set config method: self.test_config_cam.ccpp_phys_set(cam_nml_attr_dict, "test.txt") @@ -340,7 +340,7 @@ def test_config_ccpp_phys_set_check_multi_suite(self): """ #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + cam_nml_attr_dict = {} #Create namelist file: with open("test.txt", "w", encoding='UTF-8') as test_fil: @@ -369,7 +369,7 @@ def test_config_ccpp_phys_set_missing_phys(self): """ #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + cam_nml_attr_dict = {} #Set error message: ermsg = "No 'physics_suite' variable is present in user_nl_cam.\n" @@ -392,9 +392,9 @@ def test_config_ccpp_phys_set_missing_phys(self): #Remove text file: os.remove("test.txt") - #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - #Check "ccpp_phys_set" missing equals-sign error-handling - #++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + #Check "ccpp_phys_set" multiple namelist entries error-handling + #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ def test_config_ccpp_phys_set_two_phys(self): @@ -405,7 +405,7 @@ def test_config_ccpp_phys_set_two_phys(self): """ #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + cam_nml_attr_dict = {} #Set error message: ermsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n" @@ -442,7 +442,7 @@ def test_config_ccpp_phys_set_missing_equals(self): """ #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + cam_nml_attr_dict = {} #Set error message: ermsg = "No equals (=) sign was found with the 'physics_suite' variable." @@ -478,7 +478,7 @@ def test_config_ccpp_phys_set_two_equals(self): """ #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + cam_nml_attr_dict = {} #Set error message: ermsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." @@ -515,7 +515,7 @@ def test_config_ccpp_phys_set_no_physics_suite_match(self): """ #Create namelist attribute dictionary: - cam_nml_attr_dict = dict() + cam_nml_attr_dict = {} #Set error message: ermsg = "physics_suite specified in user_nl_cam, 'cam6', doesn't match any suites\n" diff --git a/test/unit/sample_files/physics_types_complete.F90 b/test/unit/sample_files/physics_types_complete.F90 index 07bfe6b4..de6af294 100644 --- a/test/unit/sample_files/physics_types_complete.F90 +++ b/test/unit/sample_files/physics_types_complete.F90 @@ -191,7 +191,7 @@ subroutine physics_types_complete_tstep_init() character(len=*), parameter :: subname = "physics_types_complete_tstep_init" ! standard_var: Standard non ddt variable - standard_var = 0 + standard_var = 0.0 ! latitude: Latitude phys_state%latitude = 0._kind_phys From 9887dd91446874193a44b937657ad32b6465fb40 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 20 Sep 2021 21:05:03 -0600 Subject: [PATCH 41/45] Fix pylint errors. --- cime_config/buildlib | 2 +- cime_config/buildnml | 1 - src/data/generate_registry_data.py | 32 ++++++++++++++---------------- test/.pylintrc | 6 +++++- test/unit/write_init_unit_tests.py | 12 +++++------ 5 files changed, 26 insertions(+), 27 deletions(-) diff --git a/cime_config/buildlib b/cime_config/buildlib index 84bee0a9..5076036f 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -123,7 +123,7 @@ def _build_cam(): # End if # Write Filepath text file - with open(filepath_src, "w") as filepath: + with open(filepath_src, "w", encoding='utf-8') as filepath: filepath.write("\n".join(paths)) filepath.write("\n") # End with diff --git a/cime_config/buildnml b/cime_config/buildnml index 83797d4d..b9417701 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -98,7 +98,6 @@ def buildnml(case, caseroot, compname): # End if srcroot = case.get_value("SRCROOT") rundir = case.get_value("RUNDIR") - atm_ncpl = case.get_value("ATM_NCPL") cam_namelist_opts = case.get_value("CAM_NAMELIST_OPTS") cam_nml_use_case = case.get_value("CAM_NML_USE_CASE") debug = case.get_value("DEBUG") diff --git a/src/data/generate_registry_data.py b/src/data/generate_registry_data.py index 77d8a0ca..2de28a48 100755 --- a/src/data/generate_registry_data.py +++ b/src/data/generate_registry_data.py @@ -119,7 +119,7 @@ def is_ddt_type(self, ttype): def known_ddt_names(self): """Return a list of the known DDT types in this registry""" - ddt_names = list() + ddt_names = [] for key in self: ddt = self[key].ddt if ddt: @@ -152,7 +152,7 @@ def __init__(self, elem_node, local_name, dimensions, known_types, self.__long_name = '' self.__initial_value = '' self.__ic_names = None - self.__elements = list() + self.__elements = [] self.__protected = protected self.__index_name = index_name self.__local_index_name = local_index_name @@ -184,12 +184,10 @@ def __init__(self, elem_node, local_name, dimensions, known_types, # end if (just ignore other tags) # end for - # pylint: disable=bad-continuation if ((not self.initial_value) and (self.allocatable == VarBase.__pointer_type_str)): self.__initial_value = VarBase.__pointer_def_init # end if - # pylint: enable=bad-continuation def write_metadata(self, outfile): """Write out this variable as CCPP metadata""" @@ -406,9 +404,9 @@ def __init__(self, elem_node, parent_name, dimensions, known_types, local_index_name = var.local_name # Find the location of this element's index found = False - my_dimensions = list() - my_index = list() - my_local_index = list() + my_dimensions = [] + my_index = [] + my_local_index = [] for dim_ind, dim in enumerate(dimensions): if dimensions[dim_ind] == pos: found = True @@ -502,12 +500,12 @@ def __init__(self, var_node, known_types, vdict, logger): else: protected = False # end if - my_dimensions = list() + my_dimensions = [] self.__def_dims_str = "" for attrib in var_node: if attrib.tag == 'dimensions': my_dimensions = [x.strip() for x in attrib.text.split(' ') if x] - def_dims = list() # Dims used for variable declarations + def_dims = [] # Dims used for variable declarations for dim in my_dimensions: if dim.count(':') > 1: emsg = "Illegal dimension string, '{},' in '{}'" @@ -517,7 +515,7 @@ def __init__(self, var_node, known_types, vdict, logger): if allocatable in ("", "parameter", "target"): # We need to find a local variable for every dimension dimstrs = [x.strip() for x in dim.split(':')] - ldimstrs = list() + ldimstrs = [] for ddim in dimstrs: lname = Variable.constant_dimension(ddim) if not lname: @@ -827,7 +825,7 @@ def __init__(self, name, ttype, logger): self.__name = name self.__type = ttype self.__logger = logger - self.__standard_names = list() + self.__standard_names = [] self.__dimensions = set() # All known dimensions for this dictionary @property @@ -992,7 +990,7 @@ def __init__(self, ddt_node, known_types, var_dict, dycore, config, logger): """ self.__type = ddt_node.get('type') self.__logger = logger - self.__data = list() + self.__data = [] extends = ddt_node.get('extends', default=None) if extends is None: self.__extends = None @@ -1149,7 +1147,7 @@ def __init__(self, file_node, known_types, dycore, config, self.__type = file_node.get('type') self.__known_types = known_types self.__ddts = OrderedDict() - self.__use_statements = list() + self.__use_statements = [] self.__generate_code = gen_code self.__file_path = file_path for obj in file_node: @@ -1231,7 +1229,7 @@ def write_source(self, outdir, indent, logger): with FortranWriter(ofilename, "w", file_desc, self.name, indent=indent) as outfile: # Use statements (if any) - module_list = list() # tuple of (module, type) + module_list = [] # tuple of (module, type) for var in self.__var_dict.variable_list(): mod = var.module if mod and (mod.lower() != self.name.lower()): @@ -1438,7 +1436,7 @@ def metadata_file_to_files(file_path, known_types, dycore, config, logger): registry File object. """ known_ddts = known_types.known_ddt_names() - mfiles = list() + mfiles = [] if os.path.exists(file_path): meta_tables = parse_metadata_file(file_path, known_ddts, logger) logger.info("Parsing metadata_file, '{}'".format(file_path)) @@ -1486,7 +1484,7 @@ def metadata_file_to_files(file_path, known_types, dycore, config, logger): vnode_str += '>' dims = var.get_dimensions() if dims: - vdims = list() + vdims = [] for dim in dims: if dim[0:18] == 'ccpp_constant_one:': vdims.append(dim[18:]) @@ -1532,7 +1530,7 @@ def write_registry_files(registry, dycore, config, outdir, src_mod, src_root, Traceback (most recent call last): CCPPError: Unknown registry object type, 'variable' """ - files = list() + files = [] known_types = TypeRegistry() for section in registry: sec_name = section.get('name') diff --git a/test/.pylintrc b/test/.pylintrc index 89b8030f..90588cb8 100644 --- a/test/.pylintrc +++ b/test/.pylintrc @@ -54,7 +54,11 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=import-error, too-many-statements, too-many-lines, too-many-locals, bad-whitespace + +#Note: If python 3.5 is ever dropped as a testing option +# then 'consider-using-f-string' should be re-enabled -JN + +disable=import-error, too-many-statements, too-many-lines, too-many-locals, bad-whitespace, consider-using-f-string # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/test/unit/write_init_unit_tests.py b/test/unit/write_init_unit_tests.py index b998cbc1..e0a32729 100644 --- a/test/unit/write_init_unit_tests.py +++ b/test/unit/write_init_unit_tests.py @@ -34,7 +34,7 @@ if PY3: __FILE_OPEN = (lambda x: open(x, 'r', encoding='utf-8')) else: - __FILE_OPEN = (lambda x: open(x, 'r')) + __FILE_OPEN = (lambda x: open(x, 'r', encoding='utf-8')) # End if #Check for all necessary directories: @@ -476,8 +476,6 @@ def test_no_horiz_var_write_init(self): host_files = [model_host, out_meta] # Setup write_init_files inputs: - check_init_in = os.path.join(_INIT_SAMPLES_DIR, "phys_vars_init_check_simple.F90") - phys_input_in = os.path.join(_INIT_SAMPLES_DIR, "physics_inputs_simple.F90") check_init_out = os.path.join(_TMP_DIR, "phys_vars_init_check_no_horiz.F90") phys_input_out = os.path.join(_TMP_DIR, "physics_inputs_no_horiz.F90") @@ -499,10 +497,10 @@ def test_no_horiz_var_write_init(self): # Run test with self.assertRaises(ValueError) as verr: - retmsg = write_init.write_init_files(files, _TMP_DIR, 3, - cap_datafile, logger, - phys_check_filename="phys_vars_init_check_no_horiz.F90", - phys_input_filename="physics_inputs_no_horiz.F90") + _ = write_init.write_init_files(files, _TMP_DIR, 3, + cap_datafile, logger, + phys_check_filename="phys_vars_init_check_no_horiz.F90", + phys_input_filename="physics_inputs_no_horiz.F90") # Check exception message emsg = "Variable 'air_pressure_at_sea_level' needs at least one registered dimension" \ From 82bd95d296f15c2c0bbcb5598d9e09cfd855d6df Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 20 Sep 2021 21:16:29 -0600 Subject: [PATCH 42/45] Fix doctest failure in Python 3.5 --- cime_config/cam_config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index db67e214..46d05a67 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -15,6 +15,8 @@ import argparse import os.path +from collections import OrderedDict + #----------------------------------- # Import CAM-specific python modules #----------------------------------- @@ -806,7 +808,7 @@ def __check_type(self, val): valid_type = self.valid_type # Create empty dictionary to store errors: - bad_val_types = {} + bad_val_types = OrderedDict() if valid_type == "str": #All list entries should be strings: From a012973cc94a7dbeb811a61745f210dd6448b300 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Mon, 20 Sep 2021 22:10:54 -0600 Subject: [PATCH 43/45] Modify registry to work with null dycore. --- src/data/registry.xml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/data/registry.xml b/src/data/registry.xml index 71401f3a..14440642 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -291,13 +291,14 @@ .true. + air_temperature + x_wind + y_wind + lagrangian_tendency_of_air_pressure + constituent_mixing_ratio + surface_pressure_of_dry_air geopotential_at_surface - air_temperature - x_wind - y_wind - lagrangian_tendency_of_air_pressure - constituent_mixing_ratio pressure_thickness_of_dry_air frontogenesis_function frontogenesis_angle From 61182fb5e42d21bf8b2a673bb4cb8278f07d53c1 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Fri, 24 Sep 2021 15:05:30 -0600 Subject: [PATCH 44/45] Address third round of review comments and suggestions. --- cime_config/cam_config.py | 503 ++++++++++++++--------------- src/data/generate_registry_data.py | 6 +- src/data/registry.xml | 29 +- src/dynamics/se/dp_coupling.F90 | 67 ++-- src/dynamics/se/dyn_comp.F90 | 4 - 5 files changed, 302 insertions(+), 307 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 46d05a67..84cbca8b 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -75,34 +75,103 @@ def _check_integer_val(name, val, valid_vals=None): tuple -> If a tuple, then there must be only two values, which define a possible range of values, e.g. (min, max). If only one value is provided, then only a minimum (or maximum) value will be - enforced, depending on if the tuple is (x,) or (,x). + enforced, depending on if the tuple is (x, None) or (None ,x). + + Doctests: + + Please note that "successful" validation tests are done in the ConfigInteger doctests. + + 1. Check that using a non-integer value throws an error: + >>> _check_integer_val("test", 5.0, valid_vals=None) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: Value being checked in 'check_integer_val' must be an integer type, not ''. + + 2. Check that using a valid_vals option that is not a list or tuple throws an error: + >>> _check_integer_val("test", 5, valid_vals="test_vals") #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: Valid values for integers must by provided as either a tuple or a list, not ''. + + 3. Check that using non-integer values inside the valid_vals list or tuple throws an error: + >>> _check_integer_val("test", 5, valid_vals=[1,2,5,"test_val"]) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: Valid value, 'test_val', for variable 'test', must be either None or an integer. Currently it is ''. + + 4. Check that using a tuple with only one entry throws an error: + >>> _check_integer_val("test", 5, valid_vals=(1,)) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: Valid values tuple for variable, 'test', must have two elements, not '1' elements. + + 5. Check that using a tuple with more than two entries throws an error: + >>> _check_integer_val("test", 5, valid_vals=(1,2,5)) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: Valid values tuple for variable, 'test', must have two elements, not '3' elements. + + 6. Check that using a tuple with only Nones throws an error: + >>> _check_integer_val("test", 5, valid_vals=(None,None)) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigValError: ERROR: Valid values tuple for variable, 'test', must contain at least one integer. + + 7. Check that an integer less than the tuple min is "invalid": + >>> _check_integer_val("test", 5, valid_vals=(6,None)) + "ERROR: Value, '5', provided for variable, 'test', is less than minimum valid value, '6'" + + 8. Check that an integer greater than the tuple max is "invalid": + >>> _check_integer_val("test", 5, valid_vals=(None,4)) + "ERROR: Value, '5', provided for variable, 'test', is greater than max valid value, '4'" + + 9. Check that an integer outside min/max tuple range is "invalid": + >>> _check_integer_val("test", 5, valid_vals=(10,13)) + "ERROR: Value, '5', provided for variable, 'test', is outside valid value range, '(10, 13)'" + + 10. Check that an integer not included in the list is "invalid": + >>> _check_integer_val("test", 5, valid_vals=[1,2,3,4]) + "ERROR: Value, '5', provided for variable, 'test', does not match any of the valid values: '[1, 2, 3, 4]'" + """ # Make sure that provided value is an integer: if not isinstance(val, int): - emsg = "ERROR: Value being checked in 'check_integer_val'" - emsg += "must be an integer type, not '{}'" + emsg = "ERROR: Value being checked in 'check_integer_val' " + emsg += "must be an integer type, not '{}'." raise CamConfigTypeError(emsg.format(type(val))) # End if # Only check the given value if valid_vals is not "None" if valid_vals is not None: + # Check if valid values is a tuple if isinstance(valid_vals, tuple): + + # Check that all tuple elements are integers: + for valid_val in valid_vals: + if valid_val is not None and not isinstance(valid_val, int): + emsg = ("ERROR: Valid value, '{}', for variable '{}', must be " + "either None or an integer. Currently it is '{}'.") + raise CamConfigTypeError(emsg.format(valid_val, name, type(valid_val))) + # End if + # End for + # Check that length of valid values tuple is 2 if len(valid_vals) != 2: emsg = ("ERROR: Valid values tuple for variable, " - "'{}', must have two elements, not '{}' elements") + "'{}', must have two elements, not '{}' elements.") raise CamConfigValError(emsg.format(name, len(valid_vals))) # End if + if valid_vals[0] is None: # If first valid value is "None", then just check that # given value is less than second valid value, and # that second value is an integer if valid_vals[1] is None: emsg = "ERROR: Valid values tuple for variable, '{}', " - emsg += "must contain at least one integer" + emsg += "must contain at least one integer." raise CamConfigValError(emsg.format(name)) # End if if val > valid_vals[1]: @@ -128,7 +197,18 @@ def _check_integer_val(name, val, valid_vals=None): return emsg.format(val, name, valid_vals) # End if # End if + elif isinstance(valid_vals, list): + + # Check that all tuple elements are integers: + for valid_val in valid_vals: + if valid_val is not None and not isinstance(valid_val, int): + emsg = ("ERROR: Valid value, '{}', for variable '{}', must be " + "either None or an integer. Currently it is '{}'.") + raise CamConfigTypeError(emsg.format(valid_val, name, type(valid_val))) + # End if + # End for + # If valid_vals is a list, then just check that the given value # matches one of the valid values in the list if not val in valid_vals: @@ -136,10 +216,11 @@ def _check_integer_val(name, val, valid_vals=None): emsg += "does not match any of the valid values: '{}'" return emsg.format(val, name, valid_vals) # End if + else: # valid_vals is neither a list nor a tuple, so throw an error: emsg = "ERROR: Valid values for integers must by provided as " - emsg = "either a tuple or a list, not '{}'." + emsg += "either a tuple or a list, not '{}'." raise CamConfigTypeError(emsg.format(type(valid_vals))) # End if @@ -162,12 +243,42 @@ def _check_string_val(name, val, valid_vals=None): regex -> If a compiled regular expression, then check that the provided value is matched by the regular expression. + + Doctests: + + Please note that "successful" validation tests are done in the ConfigString doctests. + + 1. Check that using a non-string value throws an error: + >>> _check_string_val("test", [5], valid_vals=None) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: Value being checked in 'check_string_val' must be a string type, not ''. + + 2. Check that using a valid_vals option that is not None, a list, or a regex throws an error: + >>> _check_string_val("test", "test_val", valid_vals=5) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: Valid values for strings must by provided as either a regular expression or a list, not ''. + + 3. Check that using non-string values inside the valid_vals list throws an error: + >>> _check_string_val("test", "1", valid_vals=["1","2","5",6]) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + CamConfigTypeError: ERROR: Valid value, '6', for variable 'test', must be a string. Currently it is ''. + + 9. Check that a string that doesn't match the provided regex is "invalid": + >>> _check_string_val("test", "test_val", valid_vals=re.compile(r"foo")) + "ERROR: Value, 'test_val', provided for variable, 'test', does not match the valid regular expression." + + 10. Check that a string not included in the list is "invalid": + >>> _check_string_val("test", "test_val", valid_vals=["1","2","3","4"]) + "ERROR: Value, 'test_val', provided for variable, 'test', does not match any of the valid values: '['1', '2', '3', '4']'" """ - # Make sure that provided value is an integer: + # Make sure that provided value is a string: if not isinstance(val, str): - emsg = "ERROR: Value being checked in 'check_string_val'" - emsg += "must be a string type, not '{}'" + emsg = "ERROR: Value being checked in 'check_string_val' " + emsg += "must be a string type, not '{}'." raise CamConfigTypeError(emsg.format(type(val))) # End if @@ -177,6 +288,16 @@ def _check_string_val(name, val, valid_vals=None): # If a list, then check that the given value # matches one of the valid values in the list if isinstance(valid_vals, list): + + # Check that all list elements are strings: + for valid_val in valid_vals: + if not isinstance(valid_val, str): + emsg = ("ERROR: Valid value, '{}', for variable '{}', must be " + "a string. Currently it is '{}'.") + raise CamConfigTypeError(emsg.format(valid_val, name, type(valid_val))) + # End if + # End for + if not val in valid_vals: emsg = "ERROR: Value, '{}', provided for variable, '{}', " emsg += "does not match any of the valid values: '{}'" @@ -194,8 +315,8 @@ def _check_string_val(name, val, valid_vals=None): else: # valid_vals is neither a list nor a regex, so throw an error: emsg = "ERROR: Valid values for strings must by provided as " - emsg = "either a regular expression or a list, not '{}'" - return emsg.format(type(valid_vals)) + emsg += "either a regular expression or a list, not '{}'." + raise CamConfigTypeError(emsg.format(type(valid_vals))) # End if # End if @@ -203,11 +324,14 @@ def _check_string_val(name, val, valid_vals=None): # Return nothing if value is valid return None +# Helper function to better generalize config value checking: +_TYPE_CHECK_FUNCTIONS = {"int" : _check_integer_val, "str" : _check_string_val} + ############################################################################### # CAM configure option classes ############################################################################### -class ConfigGen: +class ConfigGen(object): """ Generic configuration class used to @@ -344,54 +468,6 @@ class ConfigInteger(ConfigGen): >>> ConfigInteger("test", "test object description", 5, [4, 5, 6], is_nml_attr=True).is_nml_attr True - 2. Check that valid_vals must be None, a tuple, or a list: - - >>> ConfigInteger("test", "test object description", 5, "valid_vals").valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigTypeError: ERROR: The valid values for variable, 'test', must either be None, a list, or a tuple, not - - 3. Check that elements in list/tuple must be type None or integer: - - >>> ConfigInteger("test", "Test object description", 5, (1, 5.0)).valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigTypeError: ERROR: Valid value for variable, 'test', must be either None or an integer. Currently it is - - 4. Evaluate if the "check_value" method works properly: - - With tuple length < 2: - >>> ConfigInteger("test", "Test object description", 5, (1,)).valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: Error: Valid values tuple for variable, 'test', must have two elements, not '1' elements - - With tuple length > 2: - >>> ConfigInteger("test", "Test object description", 5, (1, 2, 10)).valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: Error: Valid values tuple for variable, 'test', must have two elements, not '3' elements - - With tuple full of Nones: - >>> ConfigInteger("test", "Test object description", 5, (None, None)).valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: Error: Valid values tuple for variable, 'test', must contain at least one integer - - With given value less than valid minimum: - >>> ConfigInteger("test", "Test object description", 5, (6, None)).value #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: Error: Value, '5', provided for variable, 'test', is less than minimum valid value, '6' - - With given value more than valid maximum: - >>> ConfigInteger("test", "Test object description", 5, (None, 4)).value #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: Error: Value, '5', provided for variable, 'test', is greater than max valid value, '4' - - With given value outside valid range: - >>> ConfigInteger("test", "Test object description", 5, (1, 4)).value #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: Error: Value, '5', provided for variable, 'test', is outside valid value range, '(1, 4)' - - With given value not present in valid value list: - >>> ConfigInteger("test,", "Test object description", 5, [3, 4, 6]).value #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: ERROR: Value, '5', provided for variable, 'test', does not match any of the valid values: '[3, 4, 6]' """ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): @@ -399,31 +475,13 @@ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): # Add generic attributes super(ConfigInteger, self).__init__(name, desc, is_nml_attr=is_nml_attr) - # Check that "valid_vals" is either "None", a list, or a tuple - if valid_vals is not None: - if not isinstance(valid_vals, (list, tuple)): - emsg = ("ERROR: The valid values for variable, '{}', " - "must either be None, a list, or a tuple, not {}") - raise CamConfigTypeError(emsg.format(name, type(valid_vals))) - # End if - - # If list or tuple, check that all entries are either - # "None" or integers - for valid_val in valid_vals: - if valid_val is not None and not isinstance(valid_val, int): - emsg = ("ERROR: Valid value for variable, '{}', must be " - "either None or an integer. Currently it is {}") - raise CamConfigTypeError(emsg.format(name, - type(valid_val))) - # End if - # End for - # End if - - # If ok, then add valid_vals to object + # Add valid_vals to object self.__valid_vals = valid_vals - # Next, check that provided value is "valid" based on the - # valid values list or tuple + # Check that provided value is "valid" based on the + # valid values list or tuple. Note that this function + # also checks valid_vals itself to ensure that it is + # of the correct type and format. self.__check_value(val) # If everything is ok, then add provided value to object @@ -519,30 +577,6 @@ class ConfigString(ConfigGen): >>> ConfigString("test", "test_object description", "test_val", re.compile(r"test_val"), is_nml_attr=True).is_nml_attr True - 2. Check that valid_vals must be either None, a list, or a regular expression: - - >>> ConfigString("test", "test object description", "test_val", "test_valid_vals").valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigTypeError: ERROR: The valid values for variable, 'test', must either be None, a list, or a regex object, not - - 3. Check that if valid_vals is a list, all elements must be strings: - - >>> ConfigString("test", "test object description", "test_val", ["test_val", 5]).valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigTypeError: ERROR: All valid value list options for variable, 'test', must be strings. - - 4. Evaluate if the "check_value" method works properly: - - With given value not present in valid value list: - >>> ConfigString("test", "test object description", "test_val", ["real_val", "other_val"]).valid_vals #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: ERROR: Value, 'test_val', provided for variable, 'test', does not match any of the valid values: '['real_val', 'other_val']' - - With given value not matching the valid regular expression: - >>> ConfigString("test", "test object description", "test_val", re.compile(r"real_val")).value #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CamConfigValError: ERROR: Value, 'test_val', provided for variable, 'test', does not match the valid regular expression - """ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): @@ -550,30 +584,14 @@ def __init__(self, name, desc, val, valid_vals=None, is_nml_attr=False): # Add generic attributes super(ConfigString, self).__init__(name, desc, is_nml_attr=is_nml_attr) - # Check if Valid_vals is not None - if valid_vals is not None: - # If not None, check if valid_vals is either a list or a - # regular expression (regex) object - if not isinstance(valid_vals, (list, REGEX_TYPE)): - emsg = "ERROR: The valid values for variable, '{}', must " - emsg += "either be None, a list, or a regex object, not {}" - raise CamConfigTypeError(emsg.format(name, type(valid_vals))) - # End if - if isinstance(valid_vals, list): - # If list, check that every entry is a string - if not all(isinstance(n, str) for n in valid_vals): - emsg = "ERROR: All valid value list options for " - emsg += "variable, '{}', must be strings." - raise CamConfigTypeError(emsg.format(name)) - # End if - # End if - # End if - # If ok, then add valid_vals to object self.__valid_vals = valid_vals # Next, check that provided value is "valid" based on the - # valid values list or regular expression + # valid values list or regular expression. Note that this + # function also checks valid_vals itself to ensure that it + # is of the correct type and format. + self.__check_value(val) # If everything is ok, then add provided value to object @@ -691,25 +709,11 @@ class ConfigList(ConfigGen): ... CamConfigValError: ERROR: valid values can only be used if valid_type is 'int' or 'str', not 'None'. - 7. Check that ConfigList with a "valid_vals" type that doesn't match "valid_type='int'" fails with the correct error: - >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="int", valid_vals={'a':1}).value #doctest: +ELLIPSIS - Traceback (most recent call last): - ... - CamConfigTypeError: ERROR: the valid values provided must be either in the form of a list - or a tuple in order to be used with integer elements, not ''. - - 8. Check that ConfigList with a "valid_vals" type that doesn't match "valid_type='str'" fails with the correct error: - >>> ConfigList("test", "test object description", ["a", "b", "c"], valid_type="str", valid_vals={'a':1}).value #doctest: +ELLIPSIS - Traceback (most recent call last): - ... - CamConfigTypeError: ERROR: the valid values provided must be either in the form of a list - or a regular expression in order to be used with string elements, not ''. - - 9. check that ConfigList with a list that matches the "valid_vals" entry works as expected: + 7. check that ConfigList with a list that matches the "valid_vals" entry works as expected: >>> ConfigList("test", "test object description", [1, 2, 3], valid_type="int", valid_vals=(0,5)).value [1, 2, 3] - 10. check that ConfigList with a list that does not mach the "valid_vals" entry fails wit hthe correct error: + 8. check that ConfigList with a list that does not mach the "valid_vals" entry fails with the correct error: >>> ConfigList("test", "test object description", ["1", "b", "c"], valid_type="str", valid_vals=["1","2","3"]).value #doctest: +ELLIPSIS Traceback (most recent call last): ... @@ -734,35 +738,17 @@ def __init__(self, name, desc, val, valid_type=None, valid_vals=None): # End if # End if + # Check that the valid values option is only being used with a valid type: + if valid_vals is not None and valid_type not in ["int", "str"]: + # Currently valid values can only be used with strings or integers, + # so throw an error: + emsg = "ERROR: valid values can only be used if valid_type is 'int' or 'str', not '{}'." + raise CamConfigValError(emsg.format(valid_type)) + # If ok, then add valid_type and valid_vals to object self.__valid_type = valid_type self.__valid_vals = valid_vals - #Check that the valid values option can be used with the valid type: - if self.__valid_vals is not None: - # If only integers are allowed ,then make sure valid vals is - # either a list or a tuple: - if valid_type == "int": - if not isinstance(valid_vals, list) and not isinstance(valid_vals, tuple): - emsg = "ERROR: the valid values provided must be either in the form of a list" - emsg += "\nor a tuple in order to be used with integer elements, not '{}'." - raise CamConfigTypeError(emsg.format(type(valid_vals))) - # End if - - # If only strings are allowed ,then make sure valid vals is - # either a list or a regular expression: - elif valid_type == "str": - if not isinstance(valid_vals, list) and not isinstance(valid_vals, REGEX_TYPE): - emsg = "ERROR: the valid values provided must be either in the form of a list" - emsg += "\nor a regular expression in order to be used with string elements, not '{}'." - raise CamConfigTypeError(emsg.format(type(valid_vals))) - # End if - else: - # Currently valid values can only be used with strings or integers, - # so throw an error: - emsg = "ERROR: valid values can only be used if valid_type is 'int' or 'str', not '{}'." - raise CamConfigValError(emsg.format(valid_type)) - # Next, check that provided list entry types and values are "valid" # based on the valid type and valid values provided: if self.__valid_type is not None: @@ -810,42 +796,42 @@ def __check_type(self, val): # Create empty dictionary to store errors: bad_val_types = OrderedDict() + good_type = "??" if valid_type == "str": #All list entries should be strings: + good_type = "string" for list_entry in val: if not isinstance(list_entry, str): bad_val_types[str(list_entry)] = str(type(list_entry)) - - #If bad values dictionary is non-empty, then raise error: - if bad_val_types: - emsg = "ERROR: The following list entries, provided for variable," - emsg += " '{}', are not strings, but instead are:\n".format(self.name) - for key_str, type_str in bad_val_types.items(): - emsg += "'{}': type='{}'\n".format(key_str, type_str) - raise CamConfigValError(emsg) - # End if - + # end if + # end for elif valid_type == "int": #All list entries should be integers: + good_type = "int" for list_entry in val: if not isinstance(list_entry, int): bad_val_types[str(list_entry)] = str(type(list_entry)) - - #If bad values dictionary is non-empty, then raise error: - if bad_val_types: - emsg = "ERROR: The following list entries, provided for variable," - emsg += " '{}', are not integers, but instead are:\n".format(self.name) - for key_str, type_str in bad_val_types.items(): - emsg += "'{}': type='{}'\n".format(key_str, type_str) - raise CamConfigValError(emsg) - # End if - + # end if + # end for else: #Invalid option given for "valid_type", so raise error: emsg = "ERROR: '{}' is not a recognized option for 'valid_type'." emsg += " Please use either 'int' or 'str'." raise CamConfigValError(emsg.format(valid_type)) - + # End if + #If bad values dictionary is non-empty, then raise error: + if bad_val_types: + if len(bad_val_types) > 1: + emsg = "ERROR: The following list entries, provided for variable," + emsg += " '{}', are not {}s, but instead are:\n".format(self.name, good_type) + else: + emsg = "ERROR: The following list entry, provided for variable," + emsg += " '{}', is not a {}, but instead is: ".format(self.name, good_type) + # end if + for key_str, type_str in bad_val_types.items(): + emsg += "'{}': type='{}'\n".format(key_str, type_str) + # end for + raise CamConfigValError(emsg) # End if #++++++++++++++++++++++++ @@ -862,36 +848,28 @@ def __check_values(self, list_vals): bad_val_msgs = [] # Check if valid type is string or integer - if self.valid_type == "int": - for val in list_vals: - #Check if integer value in list is valid - bad_val_msg = _check_integer_val(self.name, val, - valid_vals=self.valid_vals) - - # If return value is not None, then add - # to bad value list - if bad_val_msg: - bad_val_msgs.append(bad_val_msg) - # End if - - elif self.valid_type == "str": + if self.valid_type in _TYPE_CHECK_FUNCTIONS: for val in list_vals: - # Check if string value in list is valid - bad_val_msg = _check_string_val(self.name, val, - valid_vals=self.valid_vals) - + #Check if integer or string value in list is valid + bad_val_msg = _TYPE_CHECK_FUNCTIONS[self.valid_type](self.name, val, + valid_vals=self.valid_vals) # If return value is not None, then add # to bad value list if bad_val_msg: bad_val_msgs.append(bad_val_msg) # End if - # End if + # end for + else: + emsg = "Internal Error: Bad valid_type, '{}'" + raise CamConfigTypeError(emsg.format(self.valid_type)) + # end if # If bad values are present, then raise an error if bad_val_msgs: emsg = "The following errors were found for a list-type config variable:\n" emsg += "\n\n".join(bad_val_msgs) raise CamConfigValError(emsg) + # End if #++++++++++++++++++++++++ @@ -1257,8 +1235,8 @@ def __init__(self, case, case_log): else: analy_ic_val = 0 #Don't use Analytic ICs - analy_ic_desc = ["Switch to turn on analytic initial conditions for the dynamics state:", - "0 => no", + analy_ic_desc = ["Switch to turn on analytic initial conditions for the dynamics state: ", + "0 => no ", "1 => yes."] self.create_config("analytic_ic", analy_ic_desc, @@ -1694,60 +1672,69 @@ def ccpp_phys_set(self, cam_nml_attr_dict, user_nl_file): with open(user_nl_file, 'r') as nl_file: #Read lines in file: nl_user_lines = nl_file.readlines() - - #Break out "physics_suite" lines: - phys_suite_lines = [] - for line in nl_user_lines: - #Must check if line.lstrip is non-empty first, - #Otherwise blank spaces in user_nl_cam will - #cause problems: - if line.lstrip(): - if line.lstrip()[0] != '!' and 'physics_suite' in line: - phys_suite_lines.append([x.strip() for x in line.split('=')]) - - if not phys_suite_lines: - #If there is no "physics_suite" line, - #then check if there is only one physics suite option: - if len(phys_suites) == 1: - #If so, then just use the only possible suite option: - phys_suite_val = phys_suites[0] - else: - #If more than one option, then raise an error: - emsg = "No 'physics_suite' variable is present in user_nl_cam.\n" - emsg += "This is required if more than one suite is listed\n" - emsg += "in CAM_CONFIG_OPTS." - raise CamConfigValError(emsg) + #End with + + #Break out "physics_suite" lines: + phys_suite_lines = [] + for line in nl_user_lines: + #Must check if line.lstrip is non-empty first, + #Otherwise blank spaces in user_nl_cam will + #cause problems: + if line.lstrip(): + if line.lstrip()[0] != '!' and 'physics_suite' in line: + phys_suite_lines.append([x.strip() for x in line.split('=')]) + #End if + #End if + #End for + + if not phys_suite_lines: + #If there is no "physics_suite" line, + #then check if there is only one physics suite option: + if len(phys_suites) == 1: + #If so, then just use the only possible suite option: + phys_suite_val = phys_suites[0] else: + #If more than one option, then raise an error: + emsg = "No 'physics_suite' variable is present in user_nl_cam.\n" + emsg += "This is required if more than one suite is listed\n" + emsg += "in CAM_CONFIG_OPTS." + raise CamConfigValError(emsg) + #End if + else: - #If there is more than one "physics_suite" entry, then throw an error: - if len(phys_suite_lines) > 1: - emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n" - emsg += "Only one 'physics_suite' line is allowed." - raise CamConfigValError(emsg) - - #The split string list exists inside another, otherwise empty list, so extract - #from empty list: - phys_suite_list = phys_suite_lines[0] - - if len(phys_suite_list) == 1: - #If there is only one string entry, then it means the equals (=) sign was never found: - emsg = "No equals (=) sign was found with the 'physics_suite' variable." - raise CamConfigValError(emsg) - - if len(phys_suite_list) > 2: - #If there is more than two entries, it means there were two or more equals signs: - emsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." - raise CamConfigValError(emsg) + #If there is more than one "physics_suite" entry, then throw an error: + if len(phys_suite_lines) > 1: + emsg = "More than one 'physics_suite' variable is present in user_nl_cam.\n" + emsg += "Only one 'physics_suite' line is allowed." + raise CamConfigValError(emsg) + #End if - #Remove quotation marks around physics_suite entry, if any: - phys_suite_val = phys_suite_list[1].strip(''' "' ''') + #The split string list exists inside another, otherwise empty list, so extract + #from empty list: + phys_suite_list = phys_suite_lines[0] - #Check that physics suite specified is actually in config list: - if phys_suite_val not in phys_suites: - emsg = "physics_suite specified in user_nl_cam, '{}', doesn't match any suites\n" - emsg += "listed in CAM_CONFIG_OPTS" - raise CamConfigValError(emsg.format(phys_suite_val)) + if len(phys_suite_list) == 1: + #If there is only one string entry, then it means the equals (=) sign was never found: + emsg = "No equals (=) sign was found with the 'physics_suite' variable." + raise CamConfigValError(emsg) + #End if + if len(phys_suite_list) > 2: + #If there is more than two entries, it means there were two or more equals signs: + emsg = "There must only be one equals (=) sign in the 'physics_suite' namelist line." + raise CamConfigValError(emsg) + #End if + + #Remove quotation marks around physics_suite entry, if any: + phys_suite_val = phys_suite_list[1].strip(''' "' ''') + + #Check that physics suite specified is actually in config list: + if phys_suite_val not in phys_suites: + emsg = "physics_suite specified in user_nl_cam, '{}', doesn't match any suites\n" + emsg += "listed in CAM_CONFIG_OPTS" + raise CamConfigValError(emsg.format(phys_suite_val)) + #End if + #End if (phys_suite_lines check). #Add new namelist attribute to dictionary: cam_nml_attr_dict["phys_suite"] = phys_suite_val diff --git a/src/data/generate_registry_data.py b/src/data/generate_registry_data.py index 2de28a48..0f662f52 100755 --- a/src/data/generate_registry_data.py +++ b/src/data/generate_registry_data.py @@ -235,9 +235,9 @@ def write_initial_value(self, outfile, indent, init_var, ddt_str, if self.var_type.lower() == 'real': init_val = 'nan' elif self.var_type.lower() == 'integer': - init_val = 'HUGE(1)' + init_val = 'unset_int' elif self.var_type.lower() == 'character': - init_val = '""' + init_val = 'unset_str' elif self.var_type.lower() == 'complex': init_val = '(nan, nan)' else: @@ -1229,7 +1229,7 @@ def write_source(self, outdir, indent, logger): with FortranWriter(ofilename, "w", file_desc, self.name, indent=indent) as outfile: # Use statements (if any) - module_list = [] # tuple of (module, type) + module_list = [] # elements are a tuple, (module, type) for var in self.__var_dict.variable_list(): mod = var.module if mod and (mod.lower() != self.name.lower()): diff --git a/src/data/registry.xml b/src/data/registry.xml index 14440642..970bb445 100644 --- a/src/data/registry.xml +++ b/src/data/registry.xml @@ -11,6 +11,8 @@ + + air_temperature + dry_static_energy x_wind y_wind lagrangian_tendency_of_air_pressure constituent_mixing_ratio - - surface_pressure_of_dry_air - geopotential_at_surface - pressure_thickness_of_dry_air - frontogenesis_function - frontogenesis_angle + pressure_thickness + pressure_thickness_of_dry_air + reciprocal_of_pressure_thickness + reciprocal_of_pressure_thickness_of_dry_air + air_pressure + air_pressure_of_dry_air + ln_of_air_pressure + ln_of_air_pressure_of_dry_air + air_pressure_at_interface + air_pressure_of_dry_air_at_interface + ln_of_air_pressure_at_interface + ln_of_air_pressure_of_dry_air_at_interface + surface_air_pressure + surface_pressure_of_dry_air + geopotential_at_surface + geopotential_height + geopotential_height_at_interface + inverse_exner_function_wrt_surface_pressure + frontogenesis_function + frontogenesis_angle total_tendency_of_air_temperature diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 983cb491..46264f0d 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -53,7 +53,6 @@ subroutine d_p_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_out) ! dry air mass. use gravity_waves_sources, only: gws_src_fnct - use dyn_comp, only: frontgf_idx, frontga_idx use hycoef, only: hyai, ps0 use test_fvm_mapping, only: test_mapping_overwrite_dyn_state, test_mapping_output_phys_state @@ -310,8 +309,6 @@ end subroutine d_p_coupling subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, tl_qdp) - use physics_types, only: pdel - ! Convert the physics output state into the dynamics input state. use test_fvm_mapping, only: test_mapping_overwrite_tendencies use test_fvm_mapping, only: test_mapping_output_mapped_tendencies @@ -410,7 +407,7 @@ subroutine p_d_coupling(cam_runtime_opts, phys_state, phys_tend, dyn_in, tl_f, t do ilyr = 1, pver do icol=1, pcols !Apply adjustment only to water vapor: - factor = pdel(icol,ilyr)/phys_state%pdeldry(icol,ilyr) + factor = phys_state%pdel(icol,ilyr)/phys_state%pdeldry(icol,ilyr) phys_state%q(icol,ilyr,ix_qv) = factor*phys_state%q(icol,ilyr,ix_qv) phys_state%q(icol,ilyr,ix_cld_liq) = factor*phys_state%q(icol,ilyr,ix_cld_liq) phys_state%q(icol,ilyr,ix_rain) = factor*phys_state%q(icol,ilyr,ix_rain) @@ -576,9 +573,7 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! Finally compute energy and water column integrals of the physics input state. ! use constituents, only: qmin - use physics_types, only: pintdry, lnpintdry, rpdeldry, pmiddry - use physics_types, only: lnpmiddry, pdel, ps, pint, pmid, lnpint, lnpmid - use physics_types, only: rpdel, exner, zi, zm, lagrangian_vertical, dse + use physics_types, only: lagrangian_vertical use physconst, only: cpair, gravit, zvir, cappa, rairv, physconst_update use shr_const_mod, only: shr_const_rwv use geopotential_t, only: geopotential_t_run @@ -619,30 +614,30 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i = 1, pcols ! Set model-top values: - phys_state%psdry(i) = real(hyai(1)*ps0, kind_phys) + sum(phys_state%pdeldry(i,:)) - pintdry(i,1) = real(hyai(1)*ps0, kind_phys) + phys_state%psdry(i) = real(hyai(1)*ps0, kind_phys) + sum(phys_state%pdeldry(i,:)) + phys_state%pintdry(i,1) = real(hyai(1)*ps0, kind_phys) end do ! Calculate (natural) logarithm: - call shr_vmath_log(pintdry(1:pcols,1), & - lnpintdry(1:pcols,1), pcols) + call shr_vmath_log(phys_state%pintdry(1:pcols,1), & + phys_state%lnpintdry(1:pcols,1), pcols) !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i = 1, pcols ! Calculate dry pressure variables for rest of column: - pintdry(i,k+1) = pintdry(i,k) + phys_state%pdeldry(i,k) - rpdeldry(i,k) = 1._kind_phys/phys_state%pdeldry(i,k) - pmiddry(i,k) = 0.5_kind_phys*(pintdry(i,k+1) + & - pintdry(i,k)) + phys_state%pintdry(i,k+1) = phys_state%pintdry(i,k) + phys_state%pdeldry(i,k) + phys_state%rpdeldry(i,k) = 1._kind_phys/phys_state%pdeldry(i,k) + phys_state%pmiddry(i,k) = 0.5_kind_phys*(phys_state%pintdry(i,k+1) + & + phys_state%pintdry(i,k)) end do ! Calculate (natural) logarithms: - call shr_vmath_log(pintdry(1:pcols,k+1),& - lnpintdry(1:pcols,k+1), pcols) + call shr_vmath_log(phys_state%pintdry(1:pcols,k+1),& + phys_state%lnpintdry(1:pcols,k+1), pcols) - call shr_vmath_log(pmiddry(1:pcols,k), & - lnpmiddry(1:pcols,k), pcols) + call shr_vmath_log(phys_state%pmiddry(1:pcols,k), & + phys_state%lnpmiddry(1:pcols,k), pcols) end do ! wet pressure variables (should be removed from physics!) @@ -653,7 +648,7 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! to be consistent with total energy formula in physic's check_energy module only ! include water vapor in in moist dp factor_array(i,k) = 1._kind_phys+phys_state%q(i,k,ix_qv) - pdel(i,k) = phys_state%pdeldry(i,k)*factor_array(i,k) + phys_state%pdel(i,k) = phys_state%pdeldry(i,k)*factor_array(i,k) end do end do @@ -662,29 +657,29 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !$omp parallel do num_threads(horz_num_threads) private (i) do i=1, pcols ! Set model-top values assuming zero moisture: - ps(i) = pintdry(i,1) - pint(i,1) = pintdry(i,1) + phys_state%ps(i) = phys_state%pintdry(i,1) + phys_state%pint(i,1) = phys_state%pintdry(i,1) end do !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i=1, pcols ! Calculate wet (total) pressure variables for rest of column: - pint(i,k+1) = pint(i,k) + pdel(i,k) - pmid(i,k) = (pint(i,k+1) + pint(i,k))/2._kind_phys - ps(i) = ps(i) + pdel(i,k) + phys_state%pint(i,k+1) = phys_state%pint(i,k) + phys_state%pdel(i,k) + phys_state%pmid(i,k) = (phys_state%pint(i,k+1) + phys_state%pint(i,k))/2._kind_phys + phys_state%ps(i) = phys_state%ps(i) + phys_state%pdel(i,k) end do ! Calculate (natural) logarithms: - call shr_vmath_log(pint(1:pcols,k), lnpint(1:pcols,k), pcols) - call shr_vmath_log(pmid(1:pcols,k), lnpmid(1:pcols,k), pcols) + call shr_vmath_log(phys_state%pint(1:pcols,k), phys_state%lnpint(1:pcols,k), pcols) + call shr_vmath_log(phys_state%pmid(1:pcols,k), phys_state%lnpmid(1:pcols,k), pcols) end do - call shr_vmath_log(pint(1:pcols,pverp),lnpint(1:pcols,pverp),pcols) + call shr_vmath_log(phys_state%pint(1:pcols,pverp),phys_state%lnpint(1:pcols,pverp),pcols) !$omp parallel do num_threads(horz_num_threads) private (k, i) do k = 1, nlev do i = 1, pcols - rpdel(i,k) = 1._kind_phys/pdel(i,k) - exner(i,k) = (pint(i,pver+1)/pmid(i,k))**cappa + phys_state%rpdel(i,k) = 1._kind_phys/phys_state%pdel(i,k) + phys_state%exner(i,k) = (phys_state%pint(i,pver+1)/phys_state%pmid(i,k))**cappa end do end do @@ -764,10 +759,10 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) !Call geopotential_t CCPP scheme: call geopotential_t_run(pver, lagrangian_vertical, pver, 1, & - pverp, 1, lnpint, pint, & - pmid, pdel, & - rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & - rairv, gravit, zvirv, zi, zm, pcols, & + pverp, 1, phys_state%lnpint, phys_state%pint, & + phys_state%pmid, phys_state%pdel, & + phys_state%rpdel, phys_state%t, phys_state%q(:,:,ix_qv), & + rairv, gravit, zvirv, phys_state%zi, phys_state%zm, pcols, & errflg, errmsg) !NOTE: Should dry static energy be done in CCPP physics suite? -JN: @@ -775,8 +770,8 @@ subroutine derived_phys_dry(cam_runtime_opts, phys_state, phys_tend) ! Compute initial dry static energy, include surface geopotential do k = 1, pver do i = 1, pcols - dse(i,k) = cpair*phys_state%t(i,k) & - + gravit*zm(i,k) + phys_state%phis(i) + phys_state%dse(i,k) = cpair*phys_state%t(i,k) & + + gravit*phys_state%zm(i,k) + phys_state%phis(i) end do end do diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 893fda4e..2944b572 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -72,10 +72,6 @@ module dyn_comp ! Namelist logical, public, protected :: write_restart_unstruct -! Frontogenesis indices -integer, public :: frontgf_idx = -1 -integer, public :: frontga_idx = -1 - ! constituent indices for waccm-x dry air properties integer, public, protected :: & ixo = -1, & From d66cb95aea6dae7fc4255042168a71c7f25718c9 Mon Sep 17 00:00:00 2001 From: Jesse Nusbaumer Date: Sun, 26 Sep 2021 21:54:29 -0600 Subject: [PATCH 45/45] Address final round of review comments and suggestions. --- cime_config/cam_config.py | 30 ++++++++++++++++++++---------- src/dynamics/utils/dyn_thermo.F90 | 8 ++++---- src/physics/utils/physics_grid.F90 | 4 ++++ 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/cime_config/cam_config.py b/cime_config/cam_config.py index 84cbca8b..a2bb15ba 100644 --- a/cime_config/cam_config.py +++ b/cime_config/cam_config.py @@ -97,7 +97,8 @@ def _check_integer_val(name, val, valid_vals=None): >>> _check_integer_val("test", 5, valid_vals=[1,2,5,"test_val"]) #doctest: +ELLIPSIS Traceback (most recent call last): ... - CamConfigTypeError: ERROR: Valid value, 'test_val', for variable 'test', must be either None or an integer. Currently it is ''. + CamConfigTypeError: ERROR: Valid value, 'test_val', for variable 'test', must be an integer. Currently it is ''. + 4. Check that using a tuple with only one entry throws an error: >>> _check_integer_val("test", 5, valid_vals=(1,)) #doctest: +ELLIPSIS @@ -148,14 +149,18 @@ def _check_integer_val(name, val, valid_vals=None): # Check if valid values is a tuple if isinstance(valid_vals, tuple): - # Check that all tuple elements are integers: + # Check that all tuple elements are either None or integers + emsg = "" for valid_val in valid_vals: if valid_val is not None and not isinstance(valid_val, int): - emsg = ("ERROR: Valid value, '{}', for variable '{}', must be " - "either None or an integer. Currently it is '{}'.") - raise CamConfigTypeError(emsg.format(valid_val, name, type(valid_val))) + emsg += "ERROR: Valid value, '{}', for variable '{}', must be " + emsg += "either None or an integer. Currently it is '{}'.\n" + emsg = emsg.format(valid_val, name, type(valid_val)) # End if # End for + if emsg: + raise CamConfigTypeError(emsg) + # end if # Check that length of valid values tuple is 2 if len(valid_vals) != 2: @@ -200,14 +205,19 @@ def _check_integer_val(name, val, valid_vals=None): elif isinstance(valid_vals, list): - # Check that all tuple elements are integers: + # Check that all list elements are integers + emsg = "" for valid_val in valid_vals: - if valid_val is not None and not isinstance(valid_val, int): - emsg = ("ERROR: Valid value, '{}', for variable '{}', must be " - "either None or an integer. Currently it is '{}'.") - raise CamConfigTypeError(emsg.format(valid_val, name, type(valid_val))) + if not isinstance(valid_val, int): + emsg += "ERROR: Valid value, '{}', for variable '{}', " + emsg += "must be an integer. Currently it is '{}'.\n" + emsg = emsg.format(valid_val, name, type(valid_val)) # End if # End for + if emsg: + raise CamConfigTypeError(emsg) + # end if + # If valid_vals is a list, then just check that the given value # matches one of the valid values in the list diff --git a/src/dynamics/utils/dyn_thermo.F90 b/src/dynamics/utils/dyn_thermo.F90 index 1fde6612..f8300477 100644 --- a/src/dynamics/utils/dyn_thermo.F90 +++ b/src/dynamics/utils/dyn_thermo.F90 @@ -383,20 +383,20 @@ subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac ! array bounds real(kind_dyn), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) !tracers; quantity specified by mixing_ratio arg integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio - ! 2 => tracer is mass (q*dp) + ! 2 => tracer is mass (q*dp) integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array real(kind_dyn), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness real(kind_dyn), intent(out) :: dp(i0:i1,j0:j1,k0:k1) ! pressure level thickness real(kind_dyn), optional,intent(out) :: ps(:,:) ! surface pressure (if ps present then ptop - ! must be present) - real(kind_dyn), optional,intent(in) :: ptop ! pressure at model top + ! must be present) + real(kind_dyn), optional,intent(in) :: ptop ! pressure at model top !Declare local variables: real(kind_phys), allocatable :: tracer_phys(:,:,:,:) real(kind_phys), allocatable :: dp_dry_phys(:,:,:) real(kind_phys), allocatable :: dp_phys(:,:,:) real(kind_phys), allocatable :: ps_phys(:,:) - real(kind_phys), allocatable :: ptop_phys + real(kind_phys), allocatable :: ptop_phys !Allocatable in order to indicate "presence" to get_dp_phys subroutine. !check_allocate variables: integer :: iret !allocate status integer diff --git a/src/physics/utils/physics_grid.F90 b/src/physics/utils/physics_grid.F90 index 49936a2a..0528cb0c 100644 --- a/src/physics/utils/physics_grid.F90 +++ b/src/physics/utils/physics_grid.F90 @@ -46,6 +46,10 @@ module physics_grid public :: local_index_p ! local column index of a physics column public :: get_grid_dims ! return grid dimensions + ! Private subroutines + private :: check_phys_input !Checks that physics grid is initialized and that + !provided physics column index is valid. + ! The identifier for the physics grid integer, parameter, public :: phys_decomp = 100