#!/usr/bin/env bash #SBATCH -J sette #SBATCH -o sette.%j.out #SBATCH -e sette.%j.err #SBATCH --export=ALL #SBATCH --parsable #SBATCH --exclusive #SBATCH -N 1 #SBATCH -p normal256 #SBATCH --time=01:00:00 ##SBATCH --time=00:15:00 ##SBATCH -A smer #SBATCH -A cmems ##SBATCH --qos=normal #SBATCH --qos=coper # Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these # (via sed operating on this template job file). # echo " "; OCORES=NPROCS XCORES=NXIOPROCS O_PER_NODE=32 X_PER_NODE=8 if [ $XCORES -le $X_PER_NODE ]; then X_PER_NODE=$XCORES; fi if [ $OCORES -le $O_PER_NODE ]; then O_PER_NODE=$OCORES; fi export SETTE_DIR=DEF_SETTE_DIR ############################################################### # # # load sette functions (only post_test_tidyup needed) # . ${SETTE_DIR}/all_functions.sh ############################################################### # # modules to load module purge module use /home/ext/mr/smer/samsong/modules module load gcc/9.2.0 intel/2018.5.274 intelmpi/2018.5.274 phdf5/1.8.18 netcdf_par/4.7.1_V2 xios/trunk/rev2320-impi export XIO_HOME=${XIOS_DIR} # Don't remove neither change the following line # BODY # # Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these # (via sed operating on this template job file). Note that the number of compute nodes required # is also set by the fcm_job.sh on the PBS select header line above. # # These variables are needed by post_test_tidyup function in all_functions.sh # export INPUT_DIR=DEF_INPUT_DIR export CONFIG_DIR=DEF_CONFIG_DIR export TOOLS_DIR=DEF_TOOLS_DIR export NEMO_VALIDATION_DIR=DEF_NEMO_VALIDATION export NEW_CONF=DEF_NEW_CONF export CMP_NAM=DEF_CMP_NAM export TEST_NAME=DEF_TEST_NAME export EXE_DIR=DEF_EXE_DIR ulimit -a ulimit -s unlimited # # end of set up ############################################################### # # change to the working directory # cd $EXE_DIR echo Running on host `hostname` echo Time is `date` echo Directory is `pwd` if [ $XCORES -gt 0 ]; then # # Run MPMD case # #XIOS will run on a separate node so will run in parallel queue if [ ! -f ./xios_server.exe ] && [ -f ${XIO_HOME}/bin/xios_server.exe ]; then cp ${XIO_HOME}/bin/xios_server.exe . fi if [ ! -f ./xios_server.exe ]; then echo "./xios_server.exe not found" echo "run aborted" exit fi # cat > mpmd.conf <<EOF #0-$((OCORES-1)) ./nemo #${OCORES}-39 ./xios_server.exe #EOF cat > mpmd.conf <<EOF -n ${OCORES} ./nemo -n ${XCORES} ./xios_server.exe EOF # echo time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n 40 --multi-prog ./mpmd.conf # time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n 40 --multi-prog ./mpmd.conf echo time mpiexec.hydra -configfile ./mpmd.conf time mpiexec.hydra -configfile ./mpmd.conf # else # # Run SPMD case # # echo time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n ${OCORES} ./nemo # time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n ${OCORES} ./nemo echo time mpiexec.hydra -n ${OCORES} ./nemo time mpiexec.hydra -n ${OCORES} ./nemo fi # # post_test_tidyup # END_BODY # Don't remove neither change the previous line exit