diff --git a/modulefiles/build.s4.intel b/modulefiles/build.s4.intel deleted file mode 100644 index 37cdf2a0c..000000000 --- a/modulefiles/build.s4.intel +++ /dev/null @@ -1,27 +0,0 @@ -#%Module##################################################### -## Build and run module for S4 -############################################################# - -module load license_intel/S4 -module use /data/prod/hpc-stack/modulefiles/stack -module load hpc/1.1.0 -module load hpc-intel/18.0.4 -module load hpc-impi/18.0.4 - -module load bacio/2.4.1 -module load g2/3.4.1 -module load ip/3.3.3 -module load nemsio/2.5.2 -module load sp/2.3.3 -module load w3nco/2.4.1 -module load sfcio/1.4.1 -module load sigio/2.3.2 - -module load jasper/2.0.22 -module load zlib/1.2.11 -module load png/1.6.35 - -module load hdf5/1.10.6 -module load netcdf/4.7.4 -module load nccmp/1.8.7.0 -module load esmf/8_1_0_beta_snapshot_27 diff --git a/modulefiles/build.s4.intel.lua b/modulefiles/build.s4.intel.lua new file mode 100644 index 000000000..c9418d420 --- /dev/null +++ b/modulefiles/build.s4.intel.lua @@ -0,0 +1,59 @@ +help([[ +Load environment to compile UFS_UTILS on S4 using Intel +]]) + +load(pathJoin("license_intel","S4")) +prepend_path("MODULEPATH", "/data/prod/hpc-stack/modulefiles/stack") + +hpc_ver=os.getenv("hpc_ver") or "1.2.0" +load(pathJoin("hpc", hpc_ver)) + +hpc_intel_ver=os.getenv("hpc_intel_ver") or "2022.1" +load(pathJoin("hpc-intel", hpc_intel_ver)) + +impi_ver=os.getenv("impi_ver") or "2022.1" +load(pathJoin("hpc-impi", impi_ver)) + +bacio_ver=os.getenv("bacio_ver") or "2.4.1" +load(pathJoin("bacio", bacio_ver)) + +g2_ver=os.getenv("g2_ver") or "3.4.5" +load(pathJoin("g2", g2_ver)) + +ip_ver=os.getenv("ip_ver") or "3.3.3" +load(pathJoin("ip", ip_ver)) + +nemsio_ver=os.getenv("nemsio_ver") or "2.5.4" +load(pathJoin("nemsio", nemsio_ver)) + +sp_ver=os.getenv("sp_ver") or "2.3.3" +load(pathJoin("sp", sp_ver)) + +w3nco_ver=os.getenv("w3nco_ver") or "2.4.1" +load(pathJoin("w3nco", w3nco_ver)) + +sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +load(pathJoin("sfcio", sfcio_ver)) + +sigio_ver=os.getenv("sigio_ver") or "2.3.2" +load(pathJoin("sigio", sigio_ver)) + +zlib_ver=os.getenv("zlib_ver") or "1.2.11" +load(pathJoin("zlib", zlib_ver)) + +png_ver=os.getenv("png_ver") or "1.6.35" +load(pathJoin("libpng", png_ver)) + +hdf5_ver=os.getenv("hdf5_ver") or "1.10.6" +load(pathJoin("hdf5", hdf5_ver)) + +netcdf_ver=os.getenv("netcdf_ver") or "4.7.4" +load(pathJoin("netcdf", netcdf_ver)) + +nccmp_ver=os.getenv("nccmp_ver") or "1.8.9.0" +load(pathJoin("nccmp", nccmp_ver)) + +esmf_ver=os.getenv("esmf_ver") or "8.2.1b04" +load(pathJoin("esmf", esmf_ver)) + +whatis("Description: UFS_UTILS build environment") diff --git a/reg_tests/chgres_cube/driver.s4.sh b/reg_tests/chgres_cube/driver.s4.sh new file mode 100755 index 000000000..4e6e60119 --- /dev/null +++ b/reg_tests/chgres_cube/driver.s4.sh @@ -0,0 +1,224 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run the chgres_cube consistency tests on S4. +# +# Set WORK_DIR to a general working location outside the UFS_UTILS directory. +# The exact working directory (OUTDIR) will be WORK_DIR/reg_tests/chgres-cube. +# Set the PROJECT_CODE and QUEUE as appropriate. To see which projects you +# are authorized to use, type +# "sacctmgr show assoc Users= format=account,user,qos" +# +# Invoke the script with no arguments. A series of daisy-chained +# jobs will be submitted. To check the queue, type: +# "squeue -u USERNAME". +# +# The run output will be stored in OUTDIR. Log output from the suite +# will be in LOG_FILE. Once the suite has completed, a summary is +# placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +export OUTDIR="${WORK_DIR:-/scratch/short/users/$LOGNAME}" +export OUTDIR="${OUTDIR}/reg-tests/chgres-cube" + +PROJECT_CODE="${PROJECT_CODE:-star}" +QUEUE="${QUEUE:-s4}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. HOMEufs is the root +# directory of your UFS_UTILS clone. HOMEreg contains the input data +# and baseline data for each test. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEufs=$PWD/../.. + +export HOMEreg=/data/users/dhuber/save/nems/role.ufsutils/ufs_utils/reg_tests/chgres_cube + +LOG_FILE=consistency.log +SUM_FILE=summary.log +rm -f $LOG_FILE* $SUM_FILE + +export OMP_STACKSIZE=1024M + +export APRUN=srun +export NCCMP=${NCCMP:-nccmp} +rm -fr $OUTDIR + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 warm restart files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log01 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.restart \ + -o $LOG_FILE -e $LOG_FILE ./c96.fv3.restart.sh) + +#----------------------------------------------------------------------------- +# Initialize C192 using FV3 tiled history files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log02 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c192.fv3.history \ + -o $LOG_FILE -e $LOG_FILE ./c192.fv3.history.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log03 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ + -o $LOG_FILE -e $LOG_FILE ./c96.fv3.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS sigio/sfcio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log04 +export OMP_NUM_THREADS=6 # should match cpus-per-task +TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ + -o $LOG_FILE -e $LOG_FILE ./c96.gfs.sigio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log05 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ + -o $LOG_FILE -e $LOG_FILE ./c96.gfs.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize regional C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log06 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.regional \ + -o $LOG_FILE -e $LOG_FILE ./c96.regional.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log07 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST7=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ + -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf.sh) + +#----------------------------------------------------------------------------- +# Initialize global C192 using GFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log08 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ + -o $LOG_FILE -e $LOG_FILE ./c192.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 25-KM USING GFS GRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log09 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST9=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log10 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST10=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.gfssdf.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log11 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST11=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.newsfc.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log12 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./13km.conus.nam.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log13 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./13km.conus.rap.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log14 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST14=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./13km.na.gfs.ncei.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 WAM IC using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log15 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST15=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ + -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf2wam.sh) + +#----------------------------------------------------------------------------- +# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log16 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST16=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ + -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.pbgrib2.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- +LOG_FILE=consistency.log +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d\ + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF +#!/bin/bash +grep -a '<<<' $LOG_FILE* > $SUM_FILE +EOF + +exit 0 diff --git a/reg_tests/global_cycle/driver.s4.sh b/reg_tests/global_cycle/driver.s4.sh new file mode 100755 index 000000000..439925c7e --- /dev/null +++ b/reg_tests/global_cycle/driver.s4.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run global_cycle consistency test on S4. +# +# Set $WORK_DIR to your working directory. Set the project code +# and queue as appropriate. +# +# Invoke the script from the command line as follows: ./$script +# +# Log output is placed in consistency.log??. A summary is +# placed in summary.log +# +# A test fails when its output does not match the baseline files +# as determined by the 'nccmp' utility. This baseline files are +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +WORK_DIR="${WORK_DIR:-/scratch/short/users/$LOGNAME}" + +PROJECT_CODE="${PROJECT_CODE:-star}" +QUEUE="${QUEUE:-batch}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +DATA_DIR="${WORK_DIR}/reg-tests/global-cycle" + +export HOMEreg=/data/users/dhuber/save/nems/role.ufsutils/ufs_utils/reg_tests/global_cycle + +export OMP_NUM_THREADS_CY=2 + +export APRUNCY="srun" + +export NWPROD=$PWD/../.. + +reg_dir=$PWD + +LOG_FILE=consistency.log01 +export DATA="${DATA_DIR}/test1" +export COMOUT=$DATA +TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c768.fv3gfs \ + -o $LOG_FILE -e $LOG_FILE ./C768.fv3gfs.sh) + +LOG_FILE=consistency.log02 +export DATA="${DATA_DIR}/test2" +export COMOUT=$DATA +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c768.lndincsoil \ + -o $LOG_FILE -e $LOG_FILE ./C768.lndincsoil.sh) + +LOG_FILE=consistency.log03 +export DATA="${DATA_DIR}/test3" +export COMOUT=$DATA +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c768.lndincsnow \ + -o $LOG_FILE -e $LOG_FILE ./C768.lndincsnow.sh) + +LOG_FILE=consistency.log +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d\ + afterok:$TEST1:$TEST2:$TEST3 << EOF +#!/bin/bash +grep -a '<<<' ${LOG_FILE}* > summary.log +EOF + +exit diff --git a/reg_tests/grid_gen/driver.s4.sh b/reg_tests/grid_gen/driver.s4.sh new file mode 100755 index 000000000..190c22c9c --- /dev/null +++ b/reg_tests/grid_gen/driver.s4.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run grid generation consistency tests on S4. +# +# Set WORK_DIR to your working directory. Set the PROJECT_CODE and QUEUE +# as appropriate. To see which projects you are authorized to use, +# type +# "sacctmgr show assoc Users= format=account,user,qos" +# +# Invoke the script with no arguments. A series of daily- +# chained jobs will be submitted. To check the queue, type: +# "squeue -u USERNAME". +# +# Log output from the suite will be in LOG_FILE. Once the suite +# has completed, a summary is placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg +# +#----------------------------------------------------------------------------- + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +set -x + +export WORK_DIR="${WORK_DIR:-/scratch/short/users/$LOGNAME}" +export WORK_DIR="${WORK_DIR}/reg-tests/grid-gen" +QUEUE="${QUEUE:-s4}" +PROJECT_CODE="${PROJECT_CODE:-star}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +LOG_FILE=consistency.log +SUM_FILE=summary.log +export home_dir=$PWD/../.. +export APRUN=time +export APRUN_SFC=srun +export OMP_STACKSIZE=2048m +export machine=S4 +export HOMEreg=/data/users/dhuber/save/nems/role.ufsutils/ufs_utils/reg_tests/grid_gen/baseline_data + +ulimit -a +#ulimit -s unlimited + +rm -fr $WORK_DIR + +export OMP_NUM_THREADS=24 + +#----------------------------------------------------------------------------- +# C96 uniform grid +#----------------------------------------------------------------------------- + +TEST1=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.uniform \ + -o $LOG_FILE -e $LOG_FILE ./c96.uniform.sh) + +#----------------------------------------------------------------------------- +# C96 uniform grid using viirs vegetation data. +#----------------------------------------------------------------------------- + +TEST2=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.viirs.vegt \ + -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST1 ./c96.viirs.vegt.sh) + +#----------------------------------------------------------------------------- +# gfdl regional grid +#----------------------------------------------------------------------------- + +TEST3=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J gfdl.regional \ + -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST2 ./gfdl.regional.sh) + +#----------------------------------------------------------------------------- +# esg regional grid +#----------------------------------------------------------------------------- + +TEST4=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J esg.regional \ + -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST3 ./esg.regional.sh) + +#----------------------------------------------------------------------------- +# Regional GSL gravity wave drag test. +#----------------------------------------------------------------------------- + +TEST5=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd \ + -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST4 ./regional.gsl.gwd.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J grid_summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d afterok:$TEST5 << EOF +#!/bin/bash +grep -a '<<<' $LOG_FILE > $SUM_FILE +EOF diff --git a/reg_tests/ice_blend/driver.s4.sh b/reg_tests/ice_blend/driver.s4.sh new file mode 100755 index 000000000..7aee14a02 --- /dev/null +++ b/reg_tests/ice_blend/driver.s4.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run ice_blend consistency test on S4. +# +# Set $DATA to your working directory. Set the project code (SBATCH -A) +# and queue (SBATCH -q) as appropriate. +# +# Invoke the script as follows: sbatch $script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#SBATCH -J ice_blend +#SBATCH -A s4 +#SBATCH --open-mode=truncate +#SBATCH -o consistency.log +#SBATCH -e consistency.log +#SBATCH --ntasks=1 +#SBATCH -q s4 +#SBATCH -t 00:03:00 + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +export DATA="${WORK_DIR:-/scratch/short/users/$LOGNAME}" +export DATA="${DATA}/reg-tests/ice-blend" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export WGRIB=/data/prod/hpc-stack/intel-2022.1/grib_util/1.2.2/bin/wgrib +export WGRIB2=/data/prod/hpc-stack/intel-2022.1/wgrib2/2.0.8/bin/wgrib2 +export COPYGB=/data/prod/hpc-stack/intel-2022.1/grib_util/1.2.2/bin/copygb +export COPYGB2=/data/prod/hpc-stack/intel-2022.1/grib_util/1.2.2/bin/copygb2 +export CNVGRIB=/data/prod/hpc-stack/intel-2022.1/grib_util/1.2.2/bin/cnvgrib + +export HOMEreg=/data/users/dhuber/save/nems/role.ufsutils/ufs_utils/reg_tests/ice_blend +export HOMEgfs=$PWD/../.. + +rm -fr $DATA + +./ice_blend.sh + +exit 0 diff --git a/reg_tests/rt.sh b/reg_tests/rt.sh index bf196f576..b16d98c07 100755 --- a/reg_tests/rt.sh +++ b/reg_tests/rt.sh @@ -91,7 +91,7 @@ fi for dir in ice_blend; do cd $dir - if [[ $target == "hera" ]] || [[ $target == "jet" ]] || [[ $target == "orion" ]]; then + if [[ $target == "hera" ]] || [[ $target == "jet" ]] || [[ $target == "orion" ]] || [[ $target == "s4" ]] ; then sbatch -A ${PROJECT_CODE} ./driver.$target.sh elif [[ $target == "wcoss_dell_p3" ]] || [[ $target == "wcoss_cray" ]]; then cat ./driver.$target.sh | bsub -P ${PROJECT_CODE} diff --git a/reg_tests/snow2mdl/driver.s4.sh b/reg_tests/snow2mdl/driver.s4.sh new file mode 100755 index 000000000..608a19584 --- /dev/null +++ b/reg_tests/snow2mdl/driver.s4.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run snow2mdl consistency test on S4. +# +# Set $DATA_ROOT to your working directory. Set the project code +# and queue as appropriate. +# +# Invoke the script as follows: ./$script +# +# Log output is placed in consistency.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +compiler=${compiler:-"intel"} + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +module use ../../modulefiles +module load build.$target.$compiler +module list + +DATA_ROOT="${WORK_DIR:-/scratch/short/users/$LOGNAME}" +DATA_ROOT="${DATA_ROOT}/reg-tests/snow2mdl" + +PROJECT_CODE="${PROJECT_CODE:-star}" +QUEUE="${QUEUE:-s4}" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export UPDATE_BASELINE="FALSE" +#export UPDATE_BASELINE="TRUE" + +if [ "$UPDATE_BASELINE" = "TRUE" ]; then + source ../get_hash.sh +fi + +export HOMEreg=/data/users/dhuber/save/nems/role.ufsutils/ufs_utils/reg_tests/snow2mdl +export HOMEgfs=$PWD/../.. +export WGRIB=/data/prod/hpc-stack/intel-2022.1/grib_util/1.2.2/bin/wgrib +export WGRIB2=/data/prod/hpc-stack/intel-2022.1/wgrib2/2.0.8/bin/wgrib2 + +# The first test mimics GFS OPS. + +export DATA="${DATA_ROOT}/test.ops" +TEST1=$(sbatch --parsable -J snow.ops -A ${PROJECT_CODE} -o consistency.log -e consistency.log \ + --ntasks=1 -q ${QUEUE} -t 00:03:00 ./snow2mdl.ops.sh) + +# The second test is for the new AFWA global GRIB2 data. + +export DATA="${DATA_ROOT}/test.global" +TEST2=$(sbatch --parsable -J snow.global -A ${PROJECT_CODE} -o consistency.log -e consistency.log \ + --ntasks=1 -q ${QUEUE} -t 00:03:00 -d afterok:$TEST1 ./snow2mdl.global.sh) + +# Create summary file. + +sbatch --nodes=1 -t 0:01:00 -A ${PROJECT_CODE} -J snow_summary -o consistency.log -e consistency.log \ + --open-mode=append -q ${QUEUE} -d afterok:$TEST2 << EOF +#!/bin/bash +grep -a '<<<' consistency.log > summary.log +EOF + +exit 0