diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE index 7a696e24f..8e59f465c 100644 --- a/.github/PULL_REQUEST_TEMPLATE +++ b/.github/PULL_REQUEST_TEMPLATE @@ -16,7 +16,7 @@ One or more paragraphs describing the problem, solution, and required changes. ## TESTS CONDUCTED: If there are changes to the build or source code, the tests below must be conducted. Contact a repository manager if you need assistance. -- [ ] Compile branch on all Tier 1 machines using Intel (Orion, Jet, Hera and WCOSS2). +- [ ] Compile branch on all Tier 1 machines using Intel (Orion, Jet, Hera, Hercules and WCOSS2). - [ ] Compile branch on Hera using GNU. - [ ] Compile branch in 'Debug' mode on WCOSS2. - [ ] Run unit tests locally on any Tier 1 machine. diff --git a/.github/workflows/ubuntu_intel.yaml b/.github/workflows/ubuntu_intel.yaml index f936c08f1..7c882c313 100644 --- a/.github/workflows/ubuntu_intel.yaml +++ b/.github/workflows/ubuntu_intel.yaml @@ -50,14 +50,14 @@ jobs: sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list sudo apt-get update - sudo apt-get install intel-oneapi-dev-utilities intel-oneapi-mpi-devel intel-oneapi-openmp intel-oneapi-compiler-fortran intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic + sudo apt-get install intel-oneapi-dev-utilities intel-oneapi-mpi-devel intel-oneapi-openmp intel-oneapi-compiler-fortran-2023.2.1 intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic-2023.2.1 echo "source /opt/intel/oneapi/setvars.sh" >> ~/.bash_profile # Install dependencies using Spack - name: install-dependencies-with-spack if: steps.cache-env.outputs.cache-hit != 'true' run: | - git clone -c feature.manyFiles=true https://github.com/NOAA-EMC/spack.git + git clone -c feature.manyFiles=true https://github.com/JCSDA/spack.git source spack/share/spack/setup-env.sh sed "s/\[intel, gcc@10:10, apple-clang@14\]/\[intel\]/g" ufs_utils/ci/spack.yaml > spack_ci.yaml spack env create ufs_utils-env spack_ci.yaml diff --git a/README.md b/README.md index 9e64ba59a..fa857f1f8 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ It also uses the following repositories: ## Installing -On Orion, Jet, Hera and WCOSS2 do the following: +On Orion, Hercules, Jet, Hera and WCOSS2 do the following: 1) Set the 'fixed' directories using the `link_fixdirs.sh` script in `./fix`. Usage: `./link_fixdirs.sh $RUN_ENVIR $machine`, diff --git a/ci/spack.yaml b/ci/spack.yaml index 8711ee2cc..61693f700 100644 --- a/ci/spack.yaml +++ b/ci/spack.yaml @@ -10,7 +10,7 @@ spack: - g2@3.4.5 - ip@4.4.0 precision=d - nemsio@2.5.4 - - sp@2.3.3 + - sp@2.5.0 - w3emc@2.10.0 - sfcio@1.4.1 - sigio@2.3.2 @@ -19,4 +19,4 @@ spack: - esmf@8.4.2~debug~xerces+external-parallelio view: true concretizer: - unify: true + unify: when_possible diff --git a/cmake/LibMPI.cmake b/cmake/LibMPI.cmake index f5a4d5f35..83dbe8df8 100644 --- a/cmake/LibMPI.cmake +++ b/cmake/LibMPI.cmake @@ -94,6 +94,17 @@ function (platform_name RETURN_VARIABLE) set (${RETURN_VARIABLE} "orion" PARENT_SCOPE) + elseif (SITENAME MATCHES "^Hercules-login-1.HPC.MsState.Edu" OR + SITENAME MATCHES "^Hercules-login-2.HPC.MsState.Edu" OR + SITENAME MATCHES "^Hercules-login-3.HPC.MsState.Edu" OR + SITENAME MATCHES "^Hercules-login-4.HPC.MsState.Edu" OR + SITENAME MATCHES "^hercules-login-1.hpc.msstate.edu" OR + SITENAME MATCHES "^hercules-login-2.hpc.msstate.edu" OR + SITENAME MATCHES "^hercules-login-3.hpc.msstate.edu" OR + SITENAME MATCHES "^hercules-login-4.hps.msstate.edu") + + set (${RETURN_VARIABLE} "hercules" PARENT_SCOPE) + elseif (SITENAME MATCHES "^cheyenne1.cheyenne.ucar.edu" OR SITENAME MATCHES "^cheyenne1.cheyenne.ucar.edu" OR SITENAME MATCHES "^cheyenne2.cheyenne.ucar.edu" OR diff --git a/cmake/mpiexec.hercules b/cmake/mpiexec.hercules new file mode 100755 index 000000000..332b33e29 --- /dev/null +++ b/cmake/mpiexec.hercules @@ -0,0 +1,15 @@ +#!/bin/bash +# +# Arguments: +# +# $1 - Number of MPI Tasks +# $2+ - Executable and its arguments +# + +ACCOUNT= +QOS=debug + +NP=$1 +shift + +srun -A $ACCOUNT -q $QOS -n $NP $@ diff --git a/driver_scripts/driver_grid.hercules.sh b/driver_scripts/driver_grid.hercules.sh new file mode 100644 index 000000000..cd4afee51 --- /dev/null +++ b/driver_scripts/driver_grid.hercules.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +#SBATCH -J fv3_grid_driver +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o log.fv3_grid_driver +#SBATCH -e log.fv3_grid_driver +#SBATCH --nodes=2 --ntasks-per-node=15 +#SBATCH -q debug +#SBATCH -t 00:30:00 + +#----------------------------------------------------------------------- +# Driver script to create a cubic-sphere based model grid on Hercules. +# +# Produces the following files (netcdf, each tile in separate file): +# 1) 'mosaic' and 'grid' files containing lat/lon and other +# records that describe the model grid. +# 2) 'oro' files containing land mask, terrain and gravity +# wave drag fields. +# 3) Surface climo fields, such as soil type, vegetation +# greenness and albedo. +# +# Note: The sfc_climo_gen program only runs with an +# mpi task count that is a multiple of six. This is +# an ESMF library requirement. Large grids may require +# tasks spread across multiple nodes. The orography code +# benefits from threads. +# +# To run, do the following: +# +# 1) Set "C" resolution, "res" - Example: res=96. +# 2) Set grid type ("gtype"). Valid choices are +# "uniform" - global uniform grid +# "stretch" - global stretched grid +# "nest" - global stretched grid with nest +# "regional_gfdl" - stand-alone gfdl regional grid +# "regional_esg" - stand-alone extended Schmidt gnomonic +# (esg) regional grid +# 3) For "uniform" and "regional_gfdl" grids - to include lake +# fraction and depth, set "add_lake" to true, and the +# "lake_cutoff" value. +# 4) For "stretch" and "nest" grids, set the stretching factor - +# "stretch_fac", and center lat/lon of highest resolution +# tile - "target_lat" and "target_lon". +# 5) For "nest" grids, set the refinement ratio - "refine_ratio", +# the starting/ending i/j index location within the parent +# tile - "istart_nest", "jstart_nest", "iend_nest", "jend_nest" +# 6) For "regional_gfdl" grids, set the "halo". Default is three +# rows/columns. +# 7) For "regional_esg" grids, set center lat/lon of grid, +# - "target_lat/lon" - the i/j dimensions - "i/jdim", the +# x/y grid spacing - "delx/y", and halo. +# 8) Set working directory - TEMP_DIR - and path to the repository +# clone - home_dir. +# 9) To use the GSL orographic drag suite, set 'make_gsl_orog' to true. +# 10) Set 'soil_veg_src' and 'veg_type_src' to choose the +# soil type and vegetation type data. +# 11) Submit script: "sbatch $script". +# 12) All files will be placed in "out_dir". +# +#----------------------------------------------------------------------- + +set -x + +source ../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../modulefiles +module load build.$target.intel +module list + +#----------------------------------------------------------------------- +# Set grid specs here. +#----------------------------------------------------------------------- + +export gtype=uniform # 'uniform', 'stretch', 'nest', + # 'regional_gfdl', 'regional_esg' + +export make_gsl_orog=false # When 'true' will output 'oro' files for + # the GSL orographic drag suite. + +export vegsoilt_frac='.false.' # When true, outputs percent of each + # soil and veg type category and a + # dominant category. When false, only + # outputs the dominant category. A + # Fortran logical, so include the dots. + +export veg_type_src="viirs.v3.igbp.30s" # Vegetation type data. + # For viirs-based vegetation type data, set to: + # 1) "viirs.v3.igbp.30s" for global 30s data + # For the modis-based data, set to: + # 1) "modis.igbp.0.05" for global 0.05-deg data + # 2) "modis.igbp.0.03" for global 0.03-deg data + # 3) "modis.igbp.conus.30s" for CONUS 30s data + # 4) "modis.igbp.nh.30s" for N Hemis 30s data + # 5) "modis.igbp.30s" for global 30s data + +export soil_type_src="bnu.v3.30s" # Soil type data. + # For Beijing Normal Univ. data, set to: + # 1) "bnu.v3.30s" for global 30s data. + # For STATSGO soil type data, set to: + # 1) "statsgo.0.05" for global 0.05-deg data + # 2) "statsgo.0.03" for global 0.03-deg data + # 3) "statsgo.conus.30s" for CONUS 30s data + # 4) "statsgo.nh.30s" for NH 30s data + # 5) "statsgo.30s" for global 30s data + +# choose dataset sources for lakefrac & lakedepth so that lake_data_srce=LakeFrac_LakeDepth; +# available options are 'MODISP_GLDBV3', 'MODISP_GLOBATHY', 'VIIRS_GLDBV3', 'VIIRS_GLOBATHY' & 'GLDBV3' +export lake_data_srce=MODISP_GLDBV3 + +if [ $gtype = uniform ]; then + export res=96 + export add_lake=true # Add lake frac and depth to orography data. + export lake_cutoff=0.50 # return 0 if lake_frac < lake_cutoff & add_lake=T + export binary_lake=1 # return 1 if lake_frac >= lake_cutoff & add_lake=T + export ocn=${ocn:-"025"} # use one of "025", "050", "100", "500". Cannot be empty +elif [ $gtype = stretch ]; then + export res=96 + export stretch_fac=1.5 # Stretching factor for the grid + export target_lon=-97.5 # Center longitude of the highest resolution tile + export target_lat=35.5 # Center latitude of the highest resolution tile +elif [ $gtype = nest ] || [ $gtype = regional_gfdl ]; then + export add_lake=false # Add lake frac and depth to orography data. + export lake_cutoff=0.20 # lake frac < lake_cutoff ignored when add_lake=T + export res=768 + export stretch_fac=1.5 # Stretching factor for the grid + export target_lon=-97.5 # Center longitude of the highest resolution tile + export target_lat=38.5 # Center latitude of the highest resolution tile + export refine_ratio=3 # The refinement ratio + export istart_nest=123 # Starting i-direction index of nest grid in parent tile supergrid + export jstart_nest=331 # Starting j-direction index of nest grid in parent tile supergrid + export iend_nest=1402 # Ending i-direction index of nest grid in parent tile supergrid + export jend_nest=1194 # Ending j-direction index of nest grid in parent tile supergrid + export halo=3 # Lateral boundary halo +elif [ $gtype = regional_esg ] ; then + export res=-999 # equivalent resolution is computed + export target_lon=-97.5 # Center longitude of grid + export target_lat=35.5 # Center latitude of grid + export idim=301 # Dimension of grid in 'i' direction + export jdim=200 # Dimension of grid in 'j' direction + export delx=0.0585 # Grid spacing (in degrees) in the 'i' direction + # on the SUPERGRID (which has twice the resolution of + # the model grid). The physical grid spacing in the 'i' + # direction is related to delx as follows: + # distance = 2*delx*(circumf_Earth/360 deg) + export dely=0.0585 # Grid spacing (in degrees) in the 'j' direction. + export halo=3 # number of row/cols for halo +fi + +#----------------------------------------------------------------------- +# Check paths. +# home_dir - location of repository. +# TEMP_DIR - working directory. +# out_dir - where files will be placed upon completion. +#----------------------------------------------------------------------- + +export home_dir=$SLURM_SUBMIT_DIR/.. +export TEMP_DIR=/work/noaa/stmp/$LOGNAME/fv3_grid.$gtype +export out_dir=/work/noaa/stmp/$LOGNAME/my_grids + +#----------------------------------------------------------------------- +# Should not need to change anything below here. +#----------------------------------------------------------------------- + +export APRUN=time +export APRUN_SFC=srun +export OMP_NUM_THREADS=24 +export OMP_STACKSIZE=2048m + +ulimit -a +ulimit -s unlimited + +#----------------------------------------------------------------------- +# Start script. +#----------------------------------------------------------------------- + +$home_dir/ush/fv3gfs_driver_grid.sh + +exit diff --git a/fix/link_fixdirs.sh b/fix/link_fixdirs.sh index 0d460d843..00c17a10b 100755 --- a/fix/link_fixdirs.sh +++ b/fix/link_fixdirs.sh @@ -9,7 +9,7 @@ set -ex # 'nco' (copies data). # # $machine - is the machine. Choices are: -# 'wcoss2', 'hera', 'jet', 'orion', 's4' +# 'wcoss2', 'hera', 'jet', 'orion', 'hercules', 's4' RUN_ENVIR=${1} machine=${2} @@ -17,7 +17,7 @@ machine=${2} if [ $# -lt 2 ]; then set +x echo '***ERROR*** must specify two arguements: (1) RUN_ENVIR, (2) machine' - echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | s4 )' + echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | hercules | s4 )' exit 1 fi @@ -28,10 +28,10 @@ if [ $RUN_ENVIR != emc -a $RUN_ENVIR != nco ]; then exit 1 fi -if [ $machine != wcoss2 -a $machine != hera -a $machine != jet -a $machine != orion -a $machine != s4 ]; then +if [ $machine != wcoss2 -a $machine != hera -a $machine != jet -a $machine != orion -a $machine != s4 -a $machine != hercules ]; then set +x echo '***ERROR*** unsupported machine' - echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | s4 )' + echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | hercules | s4 )' exit 1 fi @@ -48,7 +48,7 @@ if [ $machine = "hera" ]; then FIX_DIR="/scratch1/NCEPDEV/global/glopara/fix" elif [ $machine = "jet" ]; then FIX_DIR="/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix" -elif [ $machine = "orion" ]; then +elif [ $machine = "orion" -o $machine = "hercules" ]; then FIX_DIR="/work/noaa/global/glopara/fix" elif [ $machine = "wcoss2" ]; then FIX_DIR="/lfs/h2/emc/global/noscrub/emc.global/FIX/fix" diff --git a/modulefiles/build.hercules.intel.lua b/modulefiles/build.hercules.intel.lua new file mode 100644 index 000000000..161fe7255 --- /dev/null +++ b/modulefiles/build.hercules.intel.lua @@ -0,0 +1,61 @@ +help([[ +Load environment to compile UFS_UTILS on Hercules using Intel +]]) + +prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/modulefiles/Core") + +stack_intel_ver=os.getenv("stack_intel_ver") or "2021.9.0" +load(pathJoin("stack-intel", stack_intel_ver)) + +stack_impi_ver=os.getenv("stack_impi_ver") or "2021.9.0" +load(pathJoin("stack-intel-oneapi-mpi", stack_impi_ver)) + +cmake_ver=os.getenv("cmake_ver") or "3.23.1" +load(pathJoin("cmake", cmake_ver)) + +bacio_ver=os.getenv("bacio_ver") or "2.4.1" +load(pathJoin("bacio", bacio_ver)) + +g2_ver=os.getenv("g2_ver") or "3.4.5" +load(pathJoin("g2", g2_ver)) + +ip_ver=os.getenv("ip_ver") or "4.3.0" +load(pathJoin("ip", ip_ver)) + +nemsio_ver=os.getenv("nemsio_ver") or "2.5.4" +load(pathJoin("nemsio", nemsio_ver)) + +sp_ver=os.getenv("sp_ver") or "2.3.3" +load(pathJoin("sp", sp_ver)) + +w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" +load(pathJoin("w3emc", w3emc_ver)) + +sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +load(pathJoin("sfcio", sfcio_ver)) + +sigio_ver=os.getenv("sigio_ver") or "2.3.2" +load(pathJoin("sigio", sigio_ver)) + +zlib_ver=os.getenv("zlib_ver") or "1.2.13" +load(pathJoin("zlib", zlib_ver)) + +png_ver=os.getenv("png_ver") or "1.6.37" +load(pathJoin("libpng", png_ver)) + +netcdf_c_ver=os.getenv("netcdf_c_ver") or "4.9.2" +load(pathJoin("netcdf-c", netcdf_c_ver)) + +netcdf_fortran_ver=os.getenv("netcdf_fortran_ver") or "4.6.0" +load(pathJoin("netcdf-fortran", netcdf_fortran_ver)) + +nccmp_ver=os.getenv("nccmp_ver") or "1.9.0.1" +load(pathJoin("nccmp", nccmp_ver)) + +esmf_ver=os.getenv("esmf_ver") or "8.4.2" +load(pathJoin("esmf", esmf_ver)) + +nco_ver=os.getenv("nco_ver") or "5.0.6" +load(pathJoin("nco", nco_ver)) + +whatis("Description: UFS_UTILS build environment") diff --git a/reg_tests/chgres_cube/driver.hercules.sh b/reg_tests/chgres_cube/driver.hercules.sh new file mode 100755 index 000000000..80ff82a17 --- /dev/null +++ b/reg_tests/chgres_cube/driver.hercules.sh @@ -0,0 +1,236 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run the chgres_cube consistency tests on Hercules. +# +# Set WORK_DIR to a general working location outside the UFS_UTILS directory. +# The exact working directory (OUTDIR) will be WORK_DIR/reg_tests/chgres-cube. +# Set the PROJECT_CODE and QUEUE as appropriate. To see which projects you +# are authorized to use, type "saccount_params". +# +# Invoke the script with no arguments. A series of daily-chained +# consistency tests will be submitted. To check the queue, type: +# "squeue -u $LOGNAME". +# +# The run output will be stored in OUTDIR. Log output from the suite +# will be in LOG_FILE. Once the suite has completed, a summary is +# placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module list + +ulimit -s unlimited + +export OUTDIR="${WORK_DIR:-/work/noaa/stmp/$LOGNAME}" +export OUTDIR="${OUTDIR}/reg-tests/chgres-cube" + +PROJECT_CODE="${PROJECT_CODE:-nesdis-rdo2}" +QUEUE="${QUEUE:-batch}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. HOMEufs is the root +# directory of your UFS_UTILS clone. HOMEreg contains the input data +# and baseline data for each test. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEufs=$PWD/../.. + +export HOMEreg=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/chgres_cube + +LOG_FILE=consistency.log +SUM_FILE=summary.log +rm -f $SUM_FILE ${LOG_FILE}* + +export OMP_STACKSIZE=1024M + +export APRUN=srun + +export machine=hercules + +export NCCMP=${NCCMP:-nccmp} + +rm -fr $OUTDIR + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 warm restart files. +#----------------------------------------------------------------------------- + +LOG_FILE1=${LOG_FILE}01 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.restart \ + -o $LOG_FILE1 -e $LOG_FILE1 ./c96.fv3.restart.sh) + +#----------------------------------------------------------------------------- +# Initialize C192 using FV3 tiled history files. +#----------------------------------------------------------------------------- + +LOG_FILE2=${LOG_FILE}02 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c192.fv3.history \ + --open-mode=append -o $LOG_FILE2 -e $LOG_FILE2 ./c192.fv3.history.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE3=${LOG_FILE}03 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ + --open-mode=append -o $LOG_FILE3 -e $LOG_FILE3 ./c96.fv3.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS sigio/sfcio files. +#----------------------------------------------------------------------------- + +LOG_FILE4=${LOG_FILE}04 +export OMP_NUM_THREADS=6 # needs to match cpus-per-task +TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 --mem=75G -t 0:25:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ + --open-mode=append -o $LOG_FILE4 -e $LOG_FILE4 ./c96.gfs.sigio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE5=${LOG_FILE}05 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ + --open-mode=append -o $LOG_FILE5 -e $LOG_FILE5 ./c96.gfs.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize regional C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE6=${LOG_FILE}06 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.regional \ + --open-mode=append -o $LOG_FILE6 -e $LOG_FILE6 ./c96.regional.sh) + +#----------------------------------------------------------------------------- +# Initialize global C192 using GFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE7=${LOG_FILE}07 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ + --open-mode=append -o $LOG_FILE7 -e $LOG_FILE7 ./c192.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize global C96 using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +LOG_FILE8=${LOG_FILE}08 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ + --open-mode=append -o $LOG_FILE8 -e $LOG_FILE8 ./c96.fv3.netcdf.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 WAM IC using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +LOG_FILE9=${LOG_FILE}09 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST9=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=100G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ + --open-mode=append -o $LOG_FILE9 -e $LOG_FILE9 ./c96.fv3.netcdf2wam.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 25-KM USING GFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE10=${LOG_FILE}10 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST10=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2 \ + --open-mode=append -o $LOG_FILE10 -e $LOG_FILE10 ./25km.conus.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. +#----------------------------------------------------------------------------- + +LOG_FILE11=${LOG_FILE}11 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST11=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2 \ + --open-mode=append -o $LOG_FILE11 -e $LOG_FILE11 ./3km.conus.hrrr.gfssdf.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. +#----------------------------------------------------------------------------- + +LOG_FILE12=${LOG_FILE}12 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2 \ + --open-mode=append -o $LOG_FILE12 -e $LOG_FILE12 ./3km.conus.hrrr.newsfc.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE13=${LOG_FILE}13 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST13=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2 \ + --open-mode=append -o $LOG_FILE13 -e $LOG_FILE13 ./13km.conus.nam.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE14=${LOG_FILE}14 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST14=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2 \ + --open-mode=append -o $LOG_FILE14 -e $LOG_FILE14 ./13km.conus.rap.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE15=${LOG_FILE}15 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST15=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2 \ + --open-mode=append -o $LOG_FILE15 -e $LOG_FILE15 ./13km.na.gfs.ncei.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE16=${LOG_FILE}16 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST16=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2 \ + --open-mode=append -o $LOG_FILE16 -e $LOG_FILE16 ./25km.conus.gfs.pbgrib2.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using GEFS GRIB2 file. +#----------------------------------------------------------------------------- + +LOG_FILE17=${LOG_FILE}17 +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST17=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ + --open-mode=append -o $LOG_FILE17 -e $LOG_FILE17 ./c96.gefs.grib2.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE \ + -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16:$TEST17 << EOF +#!/bin/bash +grep -a '<<<' ${LOG_FILE}* > $SUM_FILE +EOF + +exit 0 diff --git a/reg_tests/cpld_gridgen/rt.sh b/reg_tests/cpld_gridgen/rt.sh index 2b5614a92..70d11c048 100755 --- a/reg_tests/cpld_gridgen/rt.sh +++ b/reg_tests/cpld_gridgen/rt.sh @@ -151,6 +151,16 @@ elif [[ $target = orion ]]; then PARTITION=orion ulimit -s unlimited SBATCH_COMMAND="./cpld_gridgen.sh" +elif [[ $target = hercules ]]; then + STMP=${STMP:-/work/noaa/stmp/$USER} + export MOM6_FIXDIR=/work/noaa/global/glopara/fix/mom6/20220805 + BASELINE_ROOT=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/cpld_gridgen/baseline_data + ACCOUNT=${ACCOUNT:-nems} + QUEUE=${QUEUE:-batch} + NCCMP=nccmp + PARTITION=hercules + ulimit -s unlimited + SBATCH_COMMAND="./cpld_gridgen.sh" elif [[ $target = jet ]]; then STMP=${STMP:-/lfs4/HFIP/h-nems/$USER} export MOM6_FIXDIR=/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/mom6/20220805 diff --git a/reg_tests/global_cycle/driver.hercules.sh b/reg_tests/global_cycle/driver.hercules.sh new file mode 100755 index 000000000..d8cb1048d --- /dev/null +++ b/reg_tests/global_cycle/driver.hercules.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run global_cycle consistency tests on Hercules. +# +# Set $WORK_DIR to your working directory. Set the project code and +# and queue as appropriate. +# +# Invoke the script from command line as follows: ./$script +# +# Log output is placed in consistency.log??. A summary is +# placed in summary.log +# +# A test fails when its output does not match the baseline files +# as determined by the 'nccmp' utility. This baseline files are +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module list + +ulimit -s unlimited + +export WORK_DIR="${WORK_DIR:-/work/noaa/stmp/$LOGNAME}" + +PROJECT_CODE="${PROJECT_CODE:-fv3-cpu}" +QUEUE="${QUEUE:-batch}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export DATA_DIR="${WORK_DIR}/reg-tests/global-cycle" + +export HOMEreg=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/global_cycle + +export OMP_NUM_THREADS_CY=2 + +export APRUNCY="srun" + +export NWPROD=$PWD/../.. + +reg_dir=$PWD + +LOG_FILE=consistency.log01 +export DATA="${DATA_DIR}/test1" +export COMOUT=$DATA +TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c768.fv3gfs \ + -o $LOG_FILE -e $LOG_FILE ./C768.fv3gfs.sh) + +LOG_FILE=consistency.log02 +export DATA="${DATA_DIR}/test2" +export COMOUT=$DATA +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.lndincsoilnoahmp.sh) + +LOG_FILE=consistency.log03 +export DATA="${DATA_DIR}/test3" +export COMOUT=$DATA +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c768.lndincsnow \ + -o $LOG_FILE -e $LOG_FILE ./C768.lndincsnow.sh) + +LOG_FILE=consistency.log +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d\ + afterok:$TEST1:$TEST2:$TEST3 << EOF +#!/bin/bash +grep -a '<<<' ${LOG_FILE}* > summary.log +EOF + +exit diff --git a/reg_tests/grid_gen/driver.hercules.sh b/reg_tests/grid_gen/driver.hercules.sh new file mode 100755 index 000000000..a97d8cc25 --- /dev/null +++ b/reg_tests/grid_gen/driver.hercules.sh @@ -0,0 +1,117 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run grid generation consistency tests on Hercules. +# +# Set WORK_DIR to your working directory. Set the PROJECT_CODE and QUEUE +# as appropriate. To see which projects you are authorized to use, +# type "saccount_params" (after a 'module load contrib noaatools'). +# +# Invoke the script with no arguments. A set of tests will +# be submitted to run in parallel. To check the queue, type: +# "squeue -u $LOGNAME". +# +# Log output from each test will be placed in its own LOG_FILE. +# Once the suite has completed, a summary is placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg +# +#----------------------------------------------------------------------------- + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module list + +set -x +ulimit -s unlimited + +export WORK_DIR="${WORK_DIR:-/work/noaa/stmp/$LOGNAME}" +export WORK_DIR="${WORK_DIR}/reg-tests/grid-gen" +QUEUE="${QUEUE:-batch}" +PROJECT_CODE=${PROJECT_CODE:-fv3-cpu} + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log +SUM_FILE=summary.log +export home_dir=$PWD/../.. +export APRUN=time +export APRUN_SFC=srun +export OMP_STACKSIZE=2048m +export OMP_NUM_THREADS=24 + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEreg=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/grid_gen/baseline_data + +rm -fr $WORK_DIR + +#----------------------------------------------------------------------------- +# C96 uniform grid +#----------------------------------------------------------------------------- + +LOG_FILE1=${LOG_FILE}01 +TEST1=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.uniform \ + -o $LOG_FILE1 -e $LOG_FILE1 ./c96.uniform.sh) + +#----------------------------------------------------------------------------- +# C96 uniform grid using viirs vegetation and bnu soil type data. +#----------------------------------------------------------------------------- + +LOG_FILE2=${LOG_FILE}02 +TEST2=$(sbatch --parsable --ntasks-per-node=15 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.viirs.bnu \ + -o $LOG_FILE2 -e $LOG_FILE2 ./c96.viirs.bnu.sh) + +#----------------------------------------------------------------------------- +# GFDL regional grid +#----------------------------------------------------------------------------- + +LOG_FILE3=${LOG_FILE}03 +TEST3=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J gfdl.regional \ + -o $LOG_FILE3 -e $LOG_FILE3 ./gfdl.regional.sh) + +#----------------------------------------------------------------------------- +# ESG regional grid (output dominant soil/vegetation type). +#----------------------------------------------------------------------------- + +LOG_FILE4=${LOG_FILE}04 +TEST4=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J esg.regional \ + -o $LOG_FILE4 -e $LOG_FILE4 ./esg.regional.sh) + +#----------------------------------------------------------------------------- +# ESG regional grid (output percent of each soil and vegetation type and +# the dominant category). +#----------------------------------------------------------------------------- + +LOG_FILE5=${LOG_FILE}05 +TEST5=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J esg.regional.pct.cat \ + -o $LOG_FILE5 -e $LOG_FILE5 ./esg.regional.pct.cat.sh) + +#----------------------------------------------------------------------------- +# Regional grid with GSL gravity wave drag fields. +#----------------------------------------------------------------------------- + +LOG_FILE6=${LOG_FILE}06 +TEST6=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd \ + -o $LOG_FILE6 -e $LOG_FILE6 ./regional.gsl.gwd.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J grid_summary -o $LOG_FILE -e $LOG_FILE \ + -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6 << EOF +#!/bin/bash +grep -a '<<<' ${LOG_FILE}* > $SUM_FILE +EOF diff --git a/reg_tests/ice_blend/driver.hercules.sh b/reg_tests/ice_blend/driver.hercules.sh new file mode 100755 index 000000000..f541ac367 --- /dev/null +++ b/reg_tests/ice_blend/driver.hercules.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run ice_blend consistency test on Hercules. +# +# Set $DATA to your working directory. Set the project code (SBATCH -A) +# and queue (SBATCH -q) as appropriate. +# +# Invoke the script as follows: sbatch $script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#SBATCH -J ice_blend +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o consistency.log +#SBATCH -e consistency.log +#SBATCH --ntasks=1 +#SBATCH -q debug +#SBATCH -t 00:03:00 + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module list + +ulimit -s unlimited + +export DATA="${WORK_DIR:-/work/noaa/stmp/$LOGNAME}" +export DATA="${DATA}/reg-tests/ice-blend" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export WGRIB=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/grib-util-1.3.0-wenl3in/bin/wgrib +export WGRIB2=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/wgrib2-3.1.1-v7xhwos/bin/wgrib2 +export COPYGB=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/grib-util-1.3.0-wenl3in/bin/copygb +export COPYGB2=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/grib-util-1.3.0-wenl3in/bin/copygb2 +export CNVGRIB=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/grib-util-1.3.0-wenl3in/bin/cnvgrib + +export HOMEreg=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/ice_blend +export HOMEreg=/work/noaa/global/dhuber/noscrub/ufs_utils/reg_tests/ice_blend +export HOMEgfs=$PWD/../.. + +rm -fr $DATA + +./ice_blend.sh + +exit 0 diff --git a/reg_tests/rt.sh b/reg_tests/rt.sh index 1bf12f987..b22aeb73b 100755 --- a/reg_tests/rt.sh +++ b/reg_tests/rt.sh @@ -73,8 +73,8 @@ cd fix cd ../reg_tests -#if [[ $target == "orion" ]] || [[ $target == "jet" ]] || [[ $target == "hera" ]] || [[ $target == "wcoss2" ]] ; then -if [[ $target == "orion" ]] || [[ $target == "jet" ]] || [[ $target == "hera" ]] ; then +#if [[ $target == "orion" ]] || [[ $target == "jet" ]] || [[ $target == "hera" ]] || [[ $target == "hercules" ]] || [[ $target == "wcoss2" ]] ; then +if [[ $target == "orion" ]] || [[ $target == "jet" ]] || [[ $target == "hera" ]] || [[ $target == "hercules" ]] ; then cd cpld_gridgen export ACCOUNT=$PROJECT_CODE @@ -116,7 +116,7 @@ done for dir in weight_gen ice_blend; do cd $dir - if [[ $target == "hera" ]] || [[ $target == "jet" ]] || [[ $target == "orion" ]] || [[ $target == "s4" ]] ; then + if [[ $target == "hera" ]] || [[ $target == "jet" ]] || [[ $target == "orion" ]] || [[ $target == "s4" ]] || [[ $target == "hercules" ]] ; then sbatch -A ${PROJECT_CODE} ./driver.$target.sh elif [[ $target == "wcoss2" ]] ; then qsub -v WORK_DIR ./driver.$target.sh diff --git a/reg_tests/snow2mdl/driver.hercules.sh b/reg_tests/snow2mdl/driver.hercules.sh new file mode 100755 index 000000000..ea64eecbf --- /dev/null +++ b/reg_tests/snow2mdl/driver.hercules.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run snow2mdl consistency tests on Hercules. +# +# Set $DATA_ROOT to your working directory. Set the project code +# and queue as appropriate. +# +# Invoke the script as follows: ./$script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline files are +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.intel +module list + +ulimit -s unlimited + +export DATA_ROOT="${WORK_DIR:-/work/noaa/stmp/$LOGNAME}" +export DATA_ROOT="${DATA_ROOT}/reg-tests/snow2mdl" + +PROJECT_CODE="${PROJECT_CODE:-fv3-cpu}" +QUEUE="${QUEUE:-batch}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +rm -fr $DATA_ROOT + +export HOMEreg=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/snow2mdl +export HOMEreg=/work/noaa/global/dhuber/noscrub/ufs_utils/reg_tests/snow2mdl +export HOMEgfs=$PWD/../.. +export WGRIB=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/grib-util-1.3.0-wenl3in/bin/wgrib +export WGRIB2=/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.5.0/envs/unified-env/install/intel/2021.9.0/wgrib2-3.1.1-v7xhwos/bin/wgrib2 + +# The first test mimics GFS OPS. + +export DATA="${DATA_ROOT}/test.ops" +TEST1=$(sbatch --parsable -J snow.ops -A $PROJECT_CODE -o consistency.log \ + -e consistency.log --ntasks=1 -q $QUEUE -t 00:03:00 ./snow2mdl.ops.sh) + +# This tests the afwa global grib2 data. + +export DATA="${DATA_ROOT}/test.global" +TEST2=$(sbatch --parsable -J snow.global -A $PROJECT_CODE -o consistency.log \ + -e consistency.log --ntasks=1 -q $QUEUE -t 00:03:00 --open-mode=append \ + -d afterok:$TEST1 ./snow2mdl.global.sh) + +# Create the summary file. + +sbatch --ntasks=1 -t 0:01:00 -A $PROJECT_CODE -J snow_summary -o consistency.log -e consistency.log \ + --open-mode=append -q $QUEUE -d afterok:$TEST2 << EOF +#!/bin/bash +grep -a '<<<' consistency.log > summary.log +EOF + +exit 0 diff --git a/reg_tests/weight_gen/driver.hercules.sh b/reg_tests/weight_gen/driver.hercules.sh new file mode 100755 index 000000000..369796d61 --- /dev/null +++ b/reg_tests/weight_gen/driver.hercules.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run weight_gen consistency test on Hercules. +# +# Set $DATA to your working directory. Set the project code (SBATCH -A) +# and queue (SBATCH -q) as appropriate. +# +# Invoke the script as follows: sbatch $script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline files +# as determined by the 'nccmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#SBATCH -J weight_gen +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o consistency.log +#SBATCH -e consistency.log +#SBATCH --ntasks=1 +#SBATCH -q debug +#SBATCH -t 00:03:00 + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +export DATA="${WORK_DIR:-/work/noaa/stmp/$LOGNAME}" +export DATA="${DATA}/reg-tests/weight_gen" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEreg=/work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/weight_gen +export HOMEreg=/work/noaa/global/dhuber/noscrub/ufs_utils/reg_tests/weight_gen +export HOMEufs=$PWD/../.. + +./weight_gen.sh + +exit 0 diff --git a/sorc/machine-setup.sh b/sorc/machine-setup.sh index 6e73630e7..e7dfd2d09 100644 --- a/sorc/machine-setup.sh +++ b/sorc/machine-setup.sh @@ -87,6 +87,9 @@ elif [[ -d /lustre && -d /ncrc ]] ; then elif [[ "$(hostname)" =~ "Orion" ]]; then target="orion" module purge +elif [[ "$(hostname)" =~ "hercules" || "$(hostname)" =~ "Hercules" ]]; then + target="hercules" + module purge elif [[ -d /work/00315 && -d /scratch/00315 ]] ; then target=stampede module purge diff --git a/util/weight_gen/run.hercules.sh b/util/weight_gen/run.hercules.sh new file mode 100755 index 000000000..59e7841c7 --- /dev/null +++ b/util/weight_gen/run.hercules.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +#SBATCH -J weight_gen +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o log +#SBATCH -e log +#SBATCH --ntasks=1 +#SBATCH -q debug +#SBATCH -t 00:03:00 + +#------------------------------------------------------------------------------- +# +# Run the weight_gen program on Hercules. +# +# Set WORK_DIR to your working directory. +# +# Set CRES to your desired resolution. Valid choices are: +# - C48 => 192x94 and 192x96 gaussian +# - C96 => 384x192 and 384x194 gaussian +# - C128 => 512x256 and 512x258 gaussian +# - C192 => 768x384 and 768x386 gaussian +# - C384 => 1536x768 and 1536x770 gaussian +# - C768 => 3072x1536 and 3072x1538 gaussian +# - C1152 => 4608x2304 and 4608x2406 gaussian +# - C3072 => 12288x6144 and 12288x6146 gaussian +# +# To run this script, do: 'sbatch $script' +# +#------------------------------------------------------------------------------- + +set -x + +UFS_DIR=$PWD/../.. +source $UFS_DIR/sorc/machine-setup.sh > /dev/null 2>&1 +module use $UFS_DIR/modulefiles +module load build.$target.intel +module list + +export CRES="C48" + +export WORK_DIR=/work/noaa/stmp/$USER/weight_gen + +${UFS_DIR}/util/weight_gen/weight_gen.sh + +exit