Skip to content
Snippets Groups Projects
Commit f7bf9557 authored by Guillaume Samson's avatar Guillaume Samson :snowman2:
Browse files

Merge branch '61-continuous-integration-tests-with-sette' into 'main'

Resolve "continuous integration tests with SETTE"

Closes #61

See merge request nemo/nemo!98
parents b2787cb1 ac0146d6
No related branches found
No related tags found
No related merge requests found
Showing
with 463 additions and 240 deletions
# HPC & configs lists to be tested with SETTE
.parallel_HPC_CFG:
parallel:
matrix:
- HPC: [irene,jean-zay]
CONFIG: [ORCA2_ICE_PISCES,ORCA2_OFF_PISCES,AMM12,AGRIF,WED025,GYRE_PISCES,SAS,ORCA2_ICE_OBS]
# HPC & testcases lists to be tested with SETTE
.parallel_HPC_TST:
parallel:
matrix:
- HPC: [irene,jean-zay]
CONFIG: [SWG,ICE_AGRIF,OVERFLOW,LOCK_EXCHANGE,VORTEX,ISOMIP+]
# variables declared hereafter will be avaible in all jobs
# these 2 variables are needed for each HPC to be tested:
# - SETTE_CFG: SETTE param.cfg file path
# - SETTE_MOD: SETTE modules list
variables:
# JEAN-ZAY
JZ_SETTE_CFG: "/linkhome/rech/genloc01/reee217/nemo_CI/param.cfg"
JZ_SETTE_MOD: "intel-compilers/19.1.3 intel-mpi/2019.9 hdf5/1.12.0-mpi netcdf-c/4.7.4-mpi netcdf-fortran/4.5.3-mpi git/2.25.0"
# IRENE
IR_SETTE_CFG: "/ccc/cont005/dsku/leger/home/user/locean/massons/nemo_CI/param.cfg"
IR_SETTE_MOD: "intel/19.0.5.281 mpi/openmpi/4.0.2 flavor/hdf5/parallel flavor/buildmpi/openmpi/4.0 netcdf-c/4.6.0 netcdf-fortran/4.4.4 hdf5/1.8.20 boost/1.69.0 blitz/0.10 c++/gnu/7.3.0 c/gnu/7.3.0"
# default (but willingly wrong) values
# HPC & configs lists to be tested with SETTE
.parallel_HPC_CFG:
parallel:
matrix:
- HPC: [default]
CONFIG: [default]
# HPC & testcases lists to be tested with SETTE
.parallel_HPC_TST:
parallel:
matrix:
- HPC: [default]
CONFIG: [default]
# HPC & configs lists to be tested with SETTE
.parallel_HPC_CFG:
parallel:
matrix:
- HPC: [aa,belenos]
#CONFIG: [ORCA2_ICE_PISCES,ORCA2_OFF_PISCES,AMM12,AGRIF_DEMO,WED025,GYRE_PISCES,ORCA2_SAS_ICE,ORCA2_ICE_OBS]
CONFIG: [ORCA2_ICE_PISCES,ORCA2_OFF_PISCES,AMM12,WED025,GYRE_PISCES,ORCA2_SAS_ICE,ORCA2_ICE_OBS]
# HPC & testcases lists to be tested with SETTE
.parallel_HPC_TST:
parallel:
matrix:
- HPC: [aa,belenos]
#CONFIG: [SWG,ICE_AGRIF,OVERFLOW,LOCK_EXCHANGE,VORTEX,ISOMIP+]
CONFIG: [SWG,OVERFLOW,LOCK_EXCHANGE,VORTEX,ISOMIP+]
# variables declared hereafter will be avaible in all jobs
# these 2 variables are needed for each HPC to be tested:
# - SETTE_CFG: SETTE param.cfg file path
# - SETTE_MOD: SETTE modules list
variables:
# METEO-FRANCE BELENOS (INTEL_2018+INTEL-MPI)
BL_SETTE_CFG: "/home/ext/mr/smer/samsong/SRC/NEMO/main/sette/param.ci"
BL_SETTE_MOD: "gcc/9.2.0 intel/2018.5.274 intelmpi/2018.5.274 phdf5/1.8.18 netcdf_par/4.7.1_V2 xios/trunk/r2320_intel-impi git/2.27.0"
# ECMWF AA (INTEL_2021+OPEN-MPI)
AA_SETTE_CFG: "/home/ar0s/SRC/NEMO/main/sette/param.ci"
AA_SETTE_MOD: "prgenv/intel intel/2021.4.0 openmpi/4.1.1.1 hdf5-parallel/1.10.6 netcdf4-parallel/4.7.4 xios/trunk/r2320_intel-ompi"
# MERCATOR KARA (TODO)
#KR_SETTE_CFG: "/kara/data1/gsamson/SRC/NEMO/nemo/sette/param.ci"
#KR_SETTE_MOD: "gcc/9.3.0 openmpi/4.0.5_gcc9.3.0 hdf5/1.8.18_gcc9.3.0 netcdf/4.7.1_gcc9.3.0 xios/trunk_rev2136_gcc9.3.0 git/2.34.1"
#----------#
# SETTE CI #
#----------#
include:
# default values will be overwritten depending on commit author
- local: '.gitlab-ci-default.yml'
- local: '.gitlab-ci-cnrs.yml'
rules:
- if: '$CI_COMMIT_AUTHOR =~ /.*masson.*/'
- local: '.gitlab-ci-mercator.yml'
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
#- if: '$CI_COMMIT_AUTHOR =~ /.*samson.*/ || $CI_COMMIT_AUTHOR =~ /.*chanut.*/'
.cache: &cache_CFG
key: ${CONFIG}_${CI_COMMIT_SHORT_SHA}_${CI_PIPELINE_ID}
paths:
- cfgs/${CONFIG}_ST/EXP*
- cfgs/${CONFIG}_ST/BLD/bin
- cfgs/${CONFIG}_ST/cpp_${CONFIG}_ST.fcm
.cache: &cache_TST
key: ${CONFIG}_${CI_COMMIT_SHORT_SHA}_${CI_PIPELINE_ID}
paths:
- tests/${CONFIG}_ST/EXP*
- tests/${CONFIG}_ST/BLD/bin
- tests/${CONFIG}_ST/cpp_${CONFIG}_ST.fcm
stages:
- compile
- run
- check
before_script:
# the following commands will be executed at the beginning of each job
# they are needed to copy param.cfg to SETTE directory and to load the modules
- declare -A HOST_NAME=( ["kara"]="KR" ["aa"]="AA" ["belenoslogin"]="BL" ["irene"]="IR" ["jean-zay"]="JZ" )
- SETTE_MOD=${HOST_NAME[${HOSTNAME//[0-9,-,.]*}]}_SETTE_MOD
- if [ -d ${HOME}/modules ]; then module use ${HOME}/modules; fi
- module purge || true # force to always have 0 as error code (needed for irene)
- module load ${!SETTE_MOD}
- set -x # debug prints
- SETTE_CFG=${HOST_NAME[${HOSTNAME//[0-9,-,.]*}]}_SETTE_CFG
- cp -av ${!SETTE_CFG} sette/param.cfg
- git config core.filemode false # ignore git(lab) symlinks permissions changes when pushing then pulling cache ("old mode 100755 -> new mode 100644")
- SETTE_OPT="-b -u -w"
# JOBS
# sette_cmp_cfgs : compile reference configurations
# sette_cmp_tsts : compile test cases
# sette_run_cfgs : run reference configurations
# sette_run_tsts : run test cases
# sette_rpt_cfgs : check reference configurations results
# sette_rpt_tsts : check test cases results
sette_cmp_cfgs:
stage: compile
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
extends: .parallel_HPC_CFG # HPC + configs loop
cache:
<<: *cache_CFG
policy: push
tags:
- $HPC
script:
- cd sette
- ./sette.sh ${SETTE_OPT} -n ${CONFIG} -x "COMPILE" | tee -a sette_cmp_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
- cd -
artifacts:
paths:
- sette/sette_cmp_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
when: always
sette_cmp_tsts:
stage: compile
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
extends: .parallel_HPC_TST # HPC + tests loop
cache:
<<: *cache_TST
policy: push
tags:
- $HPC
script:
- cd sette
- ./sette.sh ${SETTE_OPT} -n ${CONFIG} -x "COMPILE" | tee -a sette_cmp_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
- cd -
artifacts:
paths:
- sette/sette_cmp_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
when: always
sette_run_cfgs:
stage: run
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
needs: [sette_cmp_cfgs] # limited to 50 jobs
dependencies: [] # do not retrieve previous artifacts
extends: .parallel_HPC_CFG # HPC + configs loop
cache:
<<: *cache_CFG
policy: pull
tags:
- $HPC
script:
- cd sette
- ./sette.sh ${SETTE_OPT} -n ${CONFIG} -x "RESTART REPRO PHYOPTS CORRUPT" | tee -a sette_run_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
- cd -
artifacts:
paths:
- sette/sette_run_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
- cfgs/${CONFIG}_ST/*/sette.*.out
- cfgs/${CONFIG}_ST/*/sette.*.err
- cfgs/${CONFIG}_ST/*/ocean.output*
when: always
sette_run_tsts:
stage: run
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
needs: [sette_cmp_tsts] # limited to 50 jobs
dependencies: [] # do not retrieve previous artifacts
extends: .parallel_HPC_TST # HPC + tests loop
cache:
<<: *cache_TST
policy: pull
tags:
- $HPC
script:
- cd sette
- ./sette.sh ${SETTE_OPT} -n ${CONFIG} -x "RESTART REPRO PHYOPTS CORRUPT" | tee -a sette_run_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
- cd -
artifacts:
paths:
- sette/sette_run_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
- tests/${CONFIG}_ST/*/sette.*.out
- tests/${CONFIG}_ST/*/sette.*.err
- tests/${CONFIG}_ST/*/ocean.output*
when: always
sette_rpt_cfgs:
stage: check
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
needs: [sette_run_cfgs] # limited to 50 jobs
dependencies: [] # do not retrieve previous artifacts
extends: .parallel_HPC_CFG # HPC + configs loop
tags:
- $HPC
script:
- cd sette
- ./sette_rpt.sh -b -u -n "${CONFIG}" | tee -a sette_rpt_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
artifacts:
paths:
- sette/sette_rpt_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
when: always
sette_rpt_tsts:
stage: check
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
needs: [sette_run_tsts] # limited to 50 jobs
dependencies: [] # do not retrieve previous artifacts
extends: .parallel_HPC_TST # HPC + tests loop
tags:
- $HPC
script:
- cd sette
- ./sette_rpt.sh -b -u -n "${CONFIG}" | tee -a sette_rpt_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
artifacts:
paths:
- sette/sette_rpt_${CONFIG}_${CI_COMMIT_SHORT_SHA}_${HPC}.log
when: always
\ No newline at end of file
[![pipeline status](https://forge.nemo-ocean.eu/nemo/nemo/badges/61-continuous-integration-tests-with-sette/pipeline.svg)](https://forge.nemo-ocean.eu/nemo/nemo/-/commits/61-continuous-integration-tests-with-sette)
**Contents**
.. contents::
:local:
.. _`Former web platform forge`: https://forge.ipsl.jussieu.fr/nemo
.. _`NEMO users' guide`: https://sites.nemo-ocean.io/user-guide
.. _`Migration Guide`: https://sites.nemo-ocean.io/user-guide/migration.html
.. _`Change list`: https://sites.nemo-ocean.io/user-guide/changes.html
.. _`Test case repository`: https://github.com/NEMO-ocean/NEMO-examples
.. _`How to cite`: https://www.nemo-ocean.eu/bibliography/how-to-cite/
.. _`NEMO forums`: https://nemo-ocean.discourse.group
.. _`NEMO newsletter`: https://listes.ipsl.fr/sympa/subscribe/nemo-newsletter
.. _`NEMO publications`: https://www.nemo-ocean.eu/bibliography/publications/add
.. _`NEMO projects`: https://www.nemo-ocean.eu/projects/add
.. _`Special Issue`: https://gmd.copernicus.org/articles/special_issue40.html
.. _`NEMO System Team wiki`: https://forge.nemo-ocean.eu/developers/home/-/wikis/Home
.. _`NEMO ocean engine`: https://zenodo.org/record/1464816
.. _`NEMO Tracers engine` : https://zenodo.org/record/1471700
.. _`NEMO Sea Ice engine`: https://zenodo.org/record/1471689
.. _`PISCES`: https://www.pisces-community.org/index.php/model-description/
**Welcome to NEMO home page!**
NEMO (*Nucleus for European Modelling of the Ocean*) is a state-of-the-art modelling
framework for research activities and forecasting services in ocean and climate sciences,
developed in a sustainable way by the NEMO European consortium since 2008.
This page intends to help you to get started using the NEMO platform and to introduce you
to the different levels of information available. It starts here with NEMO release 4.2.0.
Reminder: Our `Former web platform forge`_ (SVN+Trac) contains the previous documentation
and releases made available from the beginning of the project up to of NEMO 4.0.
Getting started
===============
Getting your hands on NEMO: the first steps are described in detail in the
`NEMO users' guide`_ . This explains how to download the code, build the environment,
create the executable, and perform a first integration.
If you are already using a previous release of NEMO, please refer to the
`Migration Guide`_ which aims to help you to make the move to 4.2.0.
The above users guides cover in detail what is available from gitlab and supported by NEMO
System Team. Aside from this web platform, a set of test cases is also available from the
`Test case repository`_ . These test cases can be useful for students, outreach, and
exploring specific aspects of NEMO with light configurations. The web page also allows you
to submit test cases you have developed and want to share with the community. Feel free to
contribute!
Project documentation
=====================
Reference manuals fully describing NEMO for the three main component
* |OCE| models the ocean {thermo}dynamics and solves the primitive equations (`./src/OCE <./src/OCE>`_)
* |ICE| simulates sea-ice {thermo}dynamics, brine inclusions and subgrid-scale thickness
variations (`./src/ICE <./src/ICE>`_)
* |MBG| models the {on,off}line oceanic tracers transport
(`./src/TOP <./src/TOP>`_) ; and `PISCES`_ models the biogeochemical processes (`.src/TOP/PISCES <.src/TOP/PISCES>`_)
are available from Zenodo:
============ ======================== =====
Component Reference Manual DOI
============ ======================== =====
|NEMO-OCE| `NEMO ocean engine`_ .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.6334656.svg
:target: https://doi.org/10.5281/zenodo.6334656
|NEMO-ICE| `NEMO Sea Ice engine`_ .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3878122.svg
:target: https://doi.org/10.5281/zenodo.3878122
|NEMO-MBG| `NEMO Tracers engine`_ .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.1471700.svg
:target: https://doi.org/10.5281/zenodo.1471700
============ ======================== =====
These reference manuals are the publications that should be cited in your own
publications. Please visit `How to cite`_? for details.
New features of 4.2.0 release are described in the `Change list`_ section of the `NEMO users' guide`_
Asking questions, and exchanging information
============================================
- Register once for all and use the `NEMO forums`_ on discourse to share and discuss with the NEMO community.
- Register once for all and receive by mail the `NEMO newsletter`_ : recommended for all
users to receive the major announcements from the project (new releases, open meetings and
main informations). Low traffic: about ten messages a year.
Contributing to NEMO visibility: projects and publications
==========================================================
Please help us justifying the NEMO development efforts by
- Adding your publications using NEMO and its outputs to the `NEMO publications`_ page
- Describing your project using NEMO on the `NEMO projects`_ page
NEMO also has a `Special Issue`_ in the open-access journal
Geoscientific Model Development (GMD) from the European Geosciences Union
The main scope is to collect relevant manuscripts covering various topics and
to provide a single portal to assess the model potential and evolution.
Contributing to NEMO development
================================
NEMO strives to be written in a way which allows the easy incorporation of developments.
You are welcome to contribute to the development of the NEMO Shared reference. NEMO
development is driven by NEMO Consortium planning and producing NEMO's sustainable
development in order to keep a reliable evolving framework. Development is organised and
scheduled through a five years development strategy, Working groups and the activities of
the development team (named NEMO System Team) in a yearly workplan. More information is
available on the `NEMO System Team wiki`_
Disclaimer
==========
The NEMO source code is freely available and distributed under
`CeCILL v2.0 license <./LICENSE>`_ (GNU GPL compatible).
You can use, modify and/or redistribute the software under its terms,
but users are provided only with a limited warranty and the software's authors and
the successive licensor's have only limited liability.
......@@ -55,7 +55,8 @@
%CPP cpp
%FC mpifort -c -cpp
%FCFLAGS -march=core-avx2 -i4 -r8 -O3 -fp-model strict -fno-alias -align array64byte
%PROD_FCFLAGS -march=core-avx2 -i4 -r8 -O3 -fp-model strict -fno-alias -align array64byte
%DEBUG_FCFLAGS -march=core-avx2 -i4 -r8 -g -O0 -debug all -traceback -fp-model strict -ftrapuv -check all,noarg_temp_created -fpe-all0 -ftz -init=arrays,snan,huge
%FFLAGS %FCFLAGS
%LD mpifort
%LDFLAGS
......
......@@ -9,6 +9,7 @@ use strict;
use warnings;
use Fcm::Timer qw{timestamp_command};
use File::Spec;
# Function declarations
sub catfile;
......
......@@ -203,12 +203,12 @@ if [ -n "${x_n}" ]; then
# Look for already-existing new config
NEW_CONF=${x_n}
if [ -n "${x_t}" ]; then
NEW_DIR=$( find ${x_t} -maxdepth 1 -type d -name ${x_n} 2>/dev/null )
NEW_DIR=$( find -L ${x_t} -maxdepth 1 -type d -name ${x_n} 2>/dev/null )
else
NEW_DIR=$( find ${MAIN_DIR}/cfgs ${MAIN_DIR}/tests -maxdepth 1 -type d -name ${NEW_CONF} 2>/dev/null )
NEW_DIR=$( find -L ${MAIN_DIR}/cfgs ${MAIN_DIR}/tests -maxdepth 1 -type d -name ${NEW_CONF} 2>/dev/null )
fi
NEW_NB=$( echo ${NEW_DIR} | wc -w )
NEW_CMP=$( grep -l "^${NEW_CONF} " ${MAIN_DIR}/cfgs/*_cfgs.txt ${MAIN_DIR}/tests/*_cfgs.txt )
NEW_CMP=$( grep -l "^${NEW_CONF} " ${MAIN_DIR}/cfgs/*_cfgs.txt ${MAIN_DIR}/tests/*_cfgs.txt | tail -1 )
# Define if new config needs to be created or not
if [[ ${NEW_NB:-0} -eq 0 || -z "${NEW_CMP}" ]]; then
......@@ -389,7 +389,7 @@ if [ "${NBR_PRC}" -gt 0 ]; then
fcm build --ignore-lock -v ${x_v} -j ${NBR_PRC} ${NEMO_TDIR}/${CUR_CONF}/BLD/${USEBLD} || exit 1
if [ -f ${NEMO_TDIR}/${CUR_CONF}/BLD/bin/nemo.exe ]; then
ln -sfv ${NEMO_TDIR}/${CUR_CONF}/BLD/bin/nemo.exe ${NEMO_TDIR}/${CUR_CONF}/EXP00/nemo
(cd ${NEMO_TDIR}/${CUR_CONF}/EXP00; ln -sfv ../BLD/bin/nemo.exe ./nemo)
else
exit 1
fi
......
......@@ -66,7 +66,12 @@ then
do
if [[ -L ${f} && $( readlink -f ${f} ) =~ "SHARED" ]]
then
\ln -sf $( readlink -f ${f} ) ${1}/EXP00/$( basename ${f} ) # keep link from SHARED
# create absolute(relative) symlinks if config directory is outside(inside) nemo directory
if [[ $(dirname ${1}) != $(dirname ${2}) ]]; then
\ln -sf $( readlink -f ${f} ) ${1}/EXP00/$( basename ${f} ) # keep link from SHARED
else
(cd ${1}/EXP00; \ln -sf ../../../cfgs/SHARED/$(basename $(readlink -f ${f}) ) $( basename ${f} ))
fi
else
\cp ${f} ${1}/EXP00/.
fi
......
......@@ -57,10 +57,8 @@ set -o posix
# * creation
#
#-
[ ! -d ${3}/${2} ] && \mkdir ${3}/${2}
[ ! -d ${3}/${2}/BLD ] && \mkdir ${3}/${2}/BLD
[ ! -d ${3}/${2}/BLD ] && \mkdir -p ${3}/${2}/BLD
[ ! -d ${1}/${2}/BLD ] && ln -sf ${3}/${2}/BLD ${1}/${2}/BLD
# enforce presence of cpp_tools.fcm (write a blank one if not present in the tools directory)
# cp instead of ln to avoid overwiting previous tool cpp_XXX.fcm file when compiling a file without cpp_YYY.fcm file.
[ -f ${3}/${2}/cpp_${2}.fcm ] && ln -sf -f ${3}/${2}/cpp_${2}.fcm ${3}/${2}/BLD/cpp_tools.fcm || echo 'bld::tool::fppkeys ' > ${3}/${2}/BLD/cpp_tools.fcm
rm -f ${1}/${CUR_CONF}/BLD/fcm.bld.lock
......@@ -3,12 +3,12 @@
#SBATCH -J sette
#SBATCH -o sette.%j.out
#SBATCH -e sette.%j.err
#SBATCH --parsable
#SBATCH -N 1
#SBATCH --time=0-00:20:00
#SBATCH --time=0-00:30:00
#SBATCH --account=frtomerc
#SBATCH --partition=par
#SBATCH --qos=np
##SBATCH --ntasks=NPROCS
#SBATCH --ntasks=TOTAL_NPROCS
set -x
......@@ -73,23 +73,17 @@ module list
sed -i "s/one_file/multiple_file/" file_def_nemo-*.xml
#
# Run the parallel MPI executable
#
echo "Running time ${MPIRUN} ./nemo"
#
if [ $XCORES -gt 0 ]; then
#
# Run MPMD case
#
#XIOS will run on a separate node so will run in parallel queue
if [ ! -f ./xios_server.exe ] && [ -f ${XIO_HOME}/bin/xios_server.exe ]; then
if [ $XCORES -gt 0 ] && [ ! -f ./xios_server.exe ] && [ -f ${XIO_HOME}/bin/xios_server.exe ]; then
cp ${XIO_HOME}/bin/xios_server.exe .
fi
if [ ! -f ./xios_server.exe ]; then
echo "./xios_server.exe not found"
echo "run aborted"
exit
fi
cat > ./config.file <<-EOF
cat > ./config.file <<-EOF
0-$((OCORES-1)) ./nemo
${OCORES}-$((TOTAL_NPROCS-1)) ./xios_server.exe
EOF
......
......@@ -41,8 +41,8 @@
#
# modules to load
module purge
module use /home/ext/mr/smer/samsong/modules
module load gcc/9.2.0 intel/2018.5.274 intelmpi/2018.5.274 phdf5/1.8.18 netcdf_par/4.7.1_V2 xios/trunk/rev2320-impi
module use ~samsong/modules
module load gcc/9.2.0 intel/2018.5.274 intelmpi/2018.5.274 phdf5/1.8.18 netcdf_par/4.7.1_V2 xios/trunk/r2320_intel-impi
export XIO_HOME=${XIOS_DIR}
# Don't remove neither change the following line
......
#!/usr/bin/env bash
#SBATCH -J sette
#SBATCH -o sette.%j.out
#SBATCH -e sette.%j.err
#SBATCH --parsable
#SBATCH --exclusive
#SBATCH -N 1
#SBATCH -p normal256
#SBATCH --time=01:00:00
##SBATCH --time=00:15:00
##SBATCH -A smer
#SBATCH -A cmems
##SBATCH --qos=normal
#SBATCH --qos=coper
set -x
# Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these
# (via sed operating on this template job file).
#
echo " ";
OCORES=NPROCS
XCORES=NXIOPROCS
O_PER_NODE=32
X_PER_NODE=8
if [ $XCORES -le $X_PER_NODE ]; then X_PER_NODE=$XCORES; fi
if [ $OCORES -le $O_PER_NODE ]; then O_PER_NODE=$OCORES; fi
export SETTE_DIR=DEF_SETTE_DIR
###############################################################
#
export MPIRUN="srun"
#
# load sette functions (only post_test_tidyup needed)
#
. ${SETTE_DIR}/all_functions.sh
###############################################################
#
# modules to load
module purge
module use ~samsong/modules
module load gcc/9.2.0 intel/2018.5.274 openmpi/intel/4.0.2.2 hdf5-1.8.18-ompi netcdf-4.7.1-ompi_V2 xios/trunk/r2320_intel-ompi
export OMPI_CC=icc
export OMPI_CXX=icpc
export OMPI_FC=ifort
export OMPI_F77=ifort
export OMPI_F90=ifort
# Don't remove neither change the following line
# BODY
#
# Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these
# (via sed operating on this template job file). Note that the number of compute nodes required
# is also set by the fcm_job.sh on the PBS select header line above.
#
# These variables are needed by post_test_tidyup function in all_functions.sh
#
export XIO_HOME=${XIOS_DIR}
export INPUT_DIR=DEF_INPUT_DIR
export CONFIG_DIR=DEF_CONFIG_DIR
export TOOLS_DIR=DEF_TOOLS_DIR
export NEMO_VALIDATION_DIR=DEF_NEMO_VALIDATION
export NEW_CONF=DEF_NEW_CONF
export CMP_NAM=DEF_CMP_NAM
export TEST_NAME=DEF_TEST_NAME
export EXE_DIR=DEF_EXE_DIR
ulimit -a
ulimit -s unlimited
#
# end of set up
###############################################################
#
# change to the working directory
#
cd ${EXE_DIR}
echo Running on host `hostname`
echo Time is `date`
echo Directory is `pwd`
#
# Run the parallel MPI executable
#
if [ $XCORES -gt 0 ]; then
#
# Run MPMD case
#
#XIOS will run on a separate node so will run in parallel queue
if [ $XCORES -gt 0 ] && [ ! -f ./xios_server.exe ] && [ -f ${XIO_HOME}/bin/xios_server.exe ]; then
cp ${XIO_HOME}/bin/xios_server.exe .
fi
cat > ./config.file <<-EOF
0-$((OCORES-1)) ./nemo
${OCORES}-$((TOTAL_NPROCS-1)) ./xios_server.exe
EOF
$MPIRUN --ntasks=TOTAL_NPROCS --multi-prog config.file
else
#
# Run SPMD case
#
$MPIRUN --ntasks=TOTAL_NPROCS ./nemo
fi
#
#
post_test_tidyup
# END_BODY
# Don't remove neither change the previous line
exit
......@@ -175,24 +175,11 @@ clean_config() {
# define validation dir
set_valid_dir () {
if [ ${DETACHED_HEAD} == "no" ] ; then
REVISION_NB=`git -C ${MAIN_DIR} rev-parse --short HEAD`
else
REVISION_NB=${DETACHED_CMIT}
fi
REVISION_NB=`git -C ${MAIN_DIR} rev-parse --short HEAD`
REV_DATE0="`git -C ${MAIN_DIR} log -1 | grep Date | sed -e 's/.*Date: *//' -e's/ +.*$//'`"
REV_DATE=`${DATE_CONV}"${REV_DATE0}" +"%y%j"`
REVISION_NB=${REV_DATE}_${REVISION_NB}
if [ ${#REVISION_NB} -eq 0 ]
then
echo "some problems with git rev-list command"
echo "some problems with git rev-list command" >> ${SETTE_DIR}/output.sette
REVISION_NB=`date +%Y%m%d`
echo "put in ${REVISION_NB} date"
echo "put in ${REVISION_NB} date" >> ${SETTE_DIR}/output.sette
else
REVISION_NB=${REV_DATE}_${NEMO_REV}
echo "value of revision number of NEMOGCM: ${REVISION_NB}"
fi
localchanges=`git -C ${MAIN_DIR} status --short -uno | wc -l`
if [[ $localchanges > 0 ]] ; then
REVISION_NB=${REVISION_NB}+
......
......@@ -93,11 +93,16 @@ cd ${EXE_DIR}
if [ "${INTERACT_FLAG}" == "yes" ]; then
eval ${JOB_FILE}
else if [ "${INTERACT_FLAG}" == "no" ]; then
if [[ "${BATCH_COMMAND_PAR}" =~ "sbatch" ]]; then
BATCH_NAME0="${BATCH_NAME}_${config}_${TEST_NAME}"
BATCH_COMMAND_PAR="sbatch -J ${BATCH_NAME0} --parsable"
BATCH_COMMAND_SEQ=${BATCH_COMMAND_PAR}
fi
# submit job to batch system
if [ "${NB_PROC}" == "1" ]; then
eval ${BATCH_COMMAND_SEQ} ${JOB_FILE} ; echo ${BATCH_COMMAND_SEQ} ${JOB_FILE}
BATCH_LST+=( $( ${BATCH_COMMAND_SEQ} ${JOB_FILE} ) ) ; echo ${BATCH_COMMAND_SEQ} ${JOB_FILE}
else
eval ${BATCH_COMMAND_PAR} ${JOB_FILE} ; echo ${BATCH_COMMAND_PAR} ${JOB_FILE}
BATCH_LST+=( $( ${BATCH_COMMAND_PAR} ${JOB_FILE} ) ) ; echo ${BATCH_COMMAND_PAR} ${JOB_FILE}
fi
fi
fi
File moved
File moved
......@@ -35,8 +35,6 @@ BATCH_NAME=${SETTE_BATCH_NAME:-sette}
FORCING_DIR=${SETTE_FORCING_DIR:-$WORKDIR/FORCING}
# validation files storing
NEMO_VALIDATION_DIR=${SETTE_NEMO_VALIDATION_DIR:-$MAIN_DIR}/NEMO_VALIDATION
# input files storing (namelist, iodef ...) (DO NOT CHANGE)
INPUT_DIR=${CONFIG_DIR}/${NEW_CONF}/EXP00
# optional custom SETTE tests directory
#export CUSTOM_DIR=/path/to/custom/sette/tests
# ------------------------------------------------------------------------------------------
......@@ -72,11 +70,13 @@ fi
#
# TYPES OF TESTS TO PERFORM
# Note an attempt will be made to compile each configuration even if none of these tests are activated
export DO_COMPILE=0
export DO_RESTART=0
export DO_REPRO=0
export DO_CORRUPT=0
export DO_PHYOPTS=0
TEST_TYPES=(${SETTE_TEST_TYPES[@]:-"RESTART REPRO PHYOPTS CORRUPT"})
TEST_TYPES=(${SETTE_TEST_TYPES[@]:-"COMPILE RESTART REPRO PHYOPTS CORRUPT"})
if [[ ${TEST_TYPES[*]} =~ .*COMPILE.* ]]; then export DO_COMPILE=1 ;fi
if [[ ${TEST_TYPES[*]} =~ .*RESTART.* ]]; then export DO_RESTART=1 ;fi
if [[ ${TEST_TYPES[*]} =~ .*REPRO.* ]]; then export DO_REPRO=1 ;fi
if [[ ${TEST_TYPES[*]} =~ .*CORRUPT.* ]]; then export DO_CORRUPT=1 ;fi
......
......@@ -9,7 +9,8 @@ export CMPL_CORES=8 # Number of threads to use for compiling
export SETTE_STG="_ST" # Base suffix to append to configuration name
NEMO_DEBUG=""
dry_run=0
NO_REPORT=0
SETTE_REPORT=0
WAIT_SETTE=0
#
# controls for some common namelist, run-time options:
#
......@@ -36,60 +37,36 @@ export USER_INPUT='yes' # Default: yes => request user input on decisions
# 1. regarding mismatched options
# 2. regardin incompatible options
# 3. regarding creation of directories
#
# Check that git branch is usable
export DETACHED_HEAD="no"
git -C ${MAIN_DIR} branch --show-current >& /dev/null
if [[ $? == 0 ]] ; then
# subdirectory below NEMO_VALIDATION_DIR defaults to branchname
export SETTE_SUB_VAL="$(git -C ${MAIN_DIR} branch --show-current)"
if [ -z $SETTE_SUB_VAL ] ; then
# Probabably on a detached HEAD (possibly testing an old commit).
# Verify this and try to recover original commit
MORE_INFO="$(git branch -a | head -1l | sed -e's/.*(//' -e 's/)//' )"
if [[ "${MORE_INFO}" == *"detached"* ]] ; then
export DETACHED_HEAD="yes"
export DETACHED_CMIT=$( echo \\${MORE_INFO} | awk '{print $NF}' )
# There is no robust way to recover a branch name in a detached state
# so just use the commit with a prefix
export SETTE_SUB_VAL="detached_"${DETACHED_CMIT}
else
export SETTE_SUB_VAL="Unknown"
fi
fi
export SETTE_THIS_BRANCH=${SETTE_SUB_VAL}
else
# subdirectory below NEMO_VALIDATION_DIR defaults to "MAIN"
export SETTE_SUB_VAL="MAIN"
export SETTE_THIS_BRANCH="Unknown"
fi
export SETTE_THIS_BRANCH=$(git log -1 --pretty=%D HEAD | sed 's|.*origin/||g;s|, .*||g;s|.*-> ||g' )
export SETTE_SUB_VAL=${SETTE_THIS_BRANCH}
export NEMO_REV=$(git -C ${MAIN_DIR} rev-parse --short HEAD 2> /dev/null)
# Parse command-line arguments
if [ $# -gt 0 ]; then
while getopts n:x:v:g:cybrshTqQteiACFNXua option; do
while getopts n:x:v:g:cybrshTqQteiACFNXuaw option; do
case $option in
c) export SETTE_CLEAN_CONFIGS='yes'
export SETTE_SYNC_CONFIGS='yes'
echo "-c: Configuration ${SETTE_TEST_CONFIGS[@]} will be cleaned; this option enforces also synchronisation"
echo "-c: Configuration(s) ${SETTE_TEST_CONFIGS[@]} will be cleaned; this option enforces also synchronisation"
echo "";;
y) dry_run=1
echo "";;
b) NEMO_DEBUG="-b"
echo "-b: Nemo will be compiled with DEBUG options"
echo "-b: Nemo will be compiled with DEBUG options if available in ARCH file"
echo "";;
r) SETTE_REPORT=1
echo "-r: Sette report will be printed once jobs are finished"
WAIT_SETTE=1
echo "";;
r) NO_REPORT=1
w) WAIT_SETTE=1
echo "-w: Sette will wait for jobs to finish"
echo "";;
s) export SETTE_SYNC_CONFIGS='yes'
echo "-s: MY_SRC and EXP00 in ${SETTE_TEST_CONFIGS[@]} will be synchronised with the MY_SRC and EXPREF from the reference configuration"
echo "";;
n) OPTSTR="$OPTARG"
export SETTE_TEST_CONFIGS=(${OPTSTR})
echo "=================================="
if [ ${#SETTE_TEST_CONFIGS[@]} -gt 1 ]; then
echo "-n: Configurations ${SETTE_TEST_CONFIGS[@]} will be tested if they are available"
else
echo "-n: Configuration ${SETTE_TEST_CONFIGS[@]} will be tested if it is available"
fi
SETTE_TEST_CONFIGS=(${OPTSTR})
echo "-n: Configuration(s) ${SETTE_TEST_CONFIGS[@]} will be tested if they are available"
echo "";;
g) case $OPTARG in
[0-9,a-z,A-Z] ) echo "-g: Using ${SETTE_STG}${OPTARG} as the configuration suffix";;
......@@ -154,7 +131,7 @@ if [ $# -gt 0 ]; then
echo '-A to run tests in attached (SPMD) mode (default: MPMD with key_xios)'
echo '-n "CFG1_to_test CFG2_to_test ..." to test some specific configurations'
echo '-x "TEST_type TEST_type ..." to specify particular type(s) of test(s) to run after compilation'
echo ' TEST_type choices are: RESTART REPRO CORRUPT PHYSICS - anything else will COMPILE only'
echo ' TEST_type choices are: COMPILE RESTART REPRO CORRUPT PHYSICS - anything else will COMPILE only'
echo '-v "subdir" optional validation record subdirectory to be created below NEMO_VALIDATION_DIR'
echo '-g "group_suffix" single character suffix to be appended to the standard _ST suffix used'
echo ' for SETTE-built configurations (needed if sette.sh invocations may overlap)'
......@@ -163,6 +140,8 @@ if [ $# -gt 0 ]; then
echo '-b to compile Nemo with debug options (only if %DEBUG_FCFLAGS if defined in your arch file)'
echo '-c to clean each configuration'
echo '-s to synchronise the sette MY_SRC and EXP00 with the reference MY_SRC and EXPREF'
echo '-w to wait for Sette jobs to finish'
echo '-r to print Sette report after Sette jobs completion'
echo '-u to run sette.sh without any user interaction. This means no checks on creating'
echo ' directories etc. i.e. no safety net!' ; exit 42 ;;
esac
......@@ -172,7 +151,6 @@ fi
#
# Get SETTE parameters
. ./param.cfg
#
# Set the common compile keys to add or delete based on command-line arguments:
#
......@@ -222,6 +200,7 @@ fi
echo "Carrying out the following tests : ${TEST_TYPES[@]}"
echo "requested by the command : "$cmd $cmdargs
echo "on branch : "$SETTE_THIS_BRANCH
echo "on revision : "$NEMO_REV
printf "%-33s : %s\n" USING_TIMING $USING_TIMING
printf "%-33s : %s\n" USING_ICEBERGS $USING_ICEBERGS
printf "%-33s : %s\n" USING_ABL $USING_ABL
......@@ -243,7 +222,6 @@ echo ""
# Option compatibility tests
#
if [ ${USING_MPMD} == "yes" ] && [ ${USING_XIOS} == "no" ] ; then echo "Incompatible choices. MPMD mode requires the XIOS server" ; exit ; fi
if [ ${dry_run} -eq 1 ] ; then echo "dryrun only: no tests performed" ; exit ; fi
# run sette on reference configuration
......@@ -268,28 +246,49 @@ if [[ $? != 0 ]]; then
exit 42
fi
if [ ${NO_REPORT} -ne 0 ] ; then exit ; fi
# wait for sette jobs to finish
if [[ ${WAIT_SETTE} -eq 1 && "${TEST_TYPES[@]}" =~ (RESTART|REPRO|CORRUPT|PHYSICS) ]]; then
echo ""
echo "-------------------------------------------------------------"
echo "wait for sette jobs to finish..."
echo "-------------------------------------------------------------"
echo ""
NRUN=999
NIT=0
while [[ $NRUN -ne 0 && $nit -le 1080 ]]; do
nit=$((nit+1))
if [[ "${BATCH_STAT}" == "squeue" && -n "${BATCH_LST[@]}" ]]; then
BATCH_STAT="squeue -j $(echo ${BATCH_LST[@]} | tr ' ' ',') -h -o %j"
NRUN=$( ${BATCH_STAT} | wc -l )
echo "currently running jobs: "$(${BATCH_STAT})
else
NRUN=$( ${BATCH_STAT} | grep ${BATCH_NAME} | wc -l )
fi
if [[ $NRUN -ne 0 ]]; then
echo $NRUN "sette jobs still in queue or running ..."
sleep 10
else
echo "all sette runs completed"
break
fi
done
fi
# run sette report
echo ""
echo "-------------------------------------------------------------"
echo "./sette_rpt.sh (script will wait all nemo_sette run are done)"
echo "-------------------------------------------------------------"
echo ""
NRUN=999
NIT=0
while [[ $NRUN -ne 0 && $nit -le 1080 ]]; do
nit=$((nit+1))
NRUN=$( ${BATCH_STAT} | grep ${BATCH_NAME} | wc -l )
if [[ $NRUN -ne 0 ]]; then
printf "%-3d %s\r" $NRUN 'nemo_sette runs still in queue or running ...';
else
printf "%-50s\n" " "
./sette_rpt.sh ${NEMO_DEBUG} ${SETTE_SUB_VAL:+-v ${SETTE_SUB_VAL}}
exit
if [ ${SETTE_REPORT} -eq 1 ] ; then
echo ""
echo "-------------------------------------------------------------"
echo "./sette_rpt.sh"
echo "-------------------------------------------------------------"
./sette_rpt.sh ${NEMO_DEBUG} -n "${TEST_CONFIGS[*]}"
if [[ $? != 0 ]]; then
echo ""
echo "-----------------------------------------------------------------"
echo "./sette_rpt.sh didn't finish properly, need investigations"
echo "-----------------------------------------------------------------"
echo ""
exit 42
fi
sleep 10
done
printf "\n"
echo ""
echo "Something wrong happened, it tooks more than 3 hours to run all the sette tests"
echo ""
fi
exit 0
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment