From 660d5e57c91849379c40aedcda312a7cd5d0cf85 Mon Sep 17 00:00:00 2001
From: sebastien masson <smasson@amphitrite.locean-ipsl.upmc.fr>
Date: Mon, 28 Feb 2022 10:17:58 +0100
Subject: [PATCH] update DIA doc + minor cleaning of old cpp keys

---
 doc/NEMO_manual_state.txt             | 10 +--
 doc/latex/NEMO/subfiles/chap_DIA.tex  | 91 ++++++++++++++-------------
 doc/latex/NEMO/subfiles/chap_SBC.tex  |  2 +-
 doc/latex/NEMO/subfiles/chap_misc.tex | 25 --------
 doc/latex/global/coding_rules.tex     |  3 +-
 src/OCE/DIA/diaptr.F90                |  2 +-
 src/OCE/DOM/dtatsd.F90                |  1 -
 src/OCE/IOM/iom.F90                   |  2 +-
 src/OCE/LBC/mpp_lnk_icb_generic.h90   |  4 +-
 src/OCE/SBC/cpl_oasis3.F90            |  1 -
 tests/ADIAB_WAVE/EXPREF/namelist_cfg  |  4 +-
 tests/ISOMIP+/MY_SRC/dtatsd.F90       |  1 -
 12 files changed, 59 insertions(+), 87 deletions(-)

diff --git a/doc/NEMO_manual_state.txt b/doc/NEMO_manual_state.txt
index 59064573..42e5bff0 100644
--- a/doc/NEMO_manual_state.txt
+++ b/doc/NEMO_manual_state.txt
@@ -5,23 +5,23 @@ namage namagrif namalb namc14_fcg namc14_sbc namc14_typ namc1d namc1d_dyndmp nam
 chap_DYN.tex: key{dynspg\_exp} key{dynspg\_flt} key{dynspg\_ts} key{trddyn} key{trdvor} key{vvl} mdl{dynspg\_flt} mdl{zpsdhe} nlst{namsplit} np{ln\_bt\_nn\_auto} np{ln\_dynhpg\_djc} np{ln\_dynhpg\_imp} np{ln\_dynhpg\_isf} np{ln\_dynhpg\_prj} np{ln\_dynhpg\_sco} np{ln\_dynhpg\_vec} np{ln\_dynhpg\_zco} np{ln\_dynhpg\_zps} np{ln\_dynldf\_bilap} np{ln\_dynvor\_con} np{ln\_dynzad\_zts} np{ln\_traadv\_tvd\_zts} np{ln\_wd\_dl\_ramp} np{ln\_zdfexp} np{nn\_atfp} np{nn\_dynhpg\_rst} np{nn\_ice\_embd} np{nn\_zdfexp} 
 introduction.tex: 
 chap_model_basics_zstar.tex: key{dynspg\_exp} key{dynspg\_flt} key{dynspg\_ts} key{vvl} mdl{dynspg\_flt} np{rdtbt} np{rnu} 
-chap_DIA.tex: key{diaar5} key{diadct} key{diaharm} key{diahth} key{diainstant} key{floats} key{IOM} key{iomput} key{mpp\_mpi} key{netcdf4} key{trdmld\_trc} key{trdtrc} key{vvl} ngn{namdia\_harm} np{init\_float\_ariane} np{iom\_put} np{jpnflnewflo} np{nb\_ana} np{nit000} np{nitend} np{nn\_debug} np{nn\_diacfl} np{tname} rou{iom\_rstput} 
+chap_DIA.tex: key{diaar5} key{diadct} key{diaharm} key{diahth} key{diainstant} key{floats} key{xios} key{netcdf4} key{trdmld\_trc} key{trdtrc} key{vvl} ngn{namdia\_harm} np{init\_float\_ariane} rou{iom\_put} np{jpnflnewflo} np{nb\_ana} np{nit000} np{nitend} np{nn\_debug} np{nn\_diacfl} np{tname} rou{iom\_rstput} 
 annex_E.tex: key{traldf\_eiv} 
 chap_TRA.tex: jp{lk\_vvl} key{trabbl} key{traTTT} key{zdfddm} mdl{traldf\_lap} ngn{namtra\_bbc} np{ln\_flxqsr} np{ln\_sco} np{ln\_traadv\_NONE} np{ln\_traldf} np{ln\_traldf\_grif} np{ln\_traldf\_NONE} np{ln\_tsd\_tradmp} np{ln\_useCT} np{ln\_zco} np{ln\_zdfexp} np{ln\_zps} np{nn\_chdta} np{nn\_eos} np{nn\_fct\_zts} np{nn\_geoflx\_cst} np{nn\_zdfexp} rou{traldf\_blp} rou{traldf\_lap} 
 chap_DIU.tex: 
 chap_ASM.tex: key{asminc} ngn{namasm} 
 chap_ZDF.tex: key{dynspg\_exp} key{dynspg\_flt} key{dynspg\_ts} key{trabbl} key{vvl} key{zdf} key{zdfcst} key{zdfddm} key{zdfgls} key{zdfosm} key{zdfric} key{zdftke} key{zdftmx} key{zdftmx\_new} mdl{dynbfr} mdl{dynzdf\_imp} mdl{zdfbfr} mdl{zdfini} mdl{zdfkpp} ngn{namzdf\_ddm} ngn{namzdf\_tmx} ngn{namzdf\_tmx\_new} nlst{nambfr} nlst{namzdf_ddm} nlst{namzdf_tmx} nlst{namzdf_tmx_new} np{ln\_bfr2d} np{ln\_bfrimp} np{ln\_crban} np{ln\_tmx\_itf} np{ln\_tranpc} np{ln\_zdfexp} np{ln\_zdftmx\_itf} np{nn\_botfr} np{nn\_clo} np{nn\_ediff} np{nn\_ediss} np{nn\_tkebc\_bot} np{nn\_tkebc\_surf} np{rn\_avevd} np{rn\_bfeb2} np{rn\_bfri1} np{rn\_bfri2} np{rn\_bfri2\_max} np{rn\_bfrien} np{rn\_bfrz0} np{rn\_htmx} np{rn\_me} np{rn\_n2min} np{rn\_tfe} np{rn\_tfe\_itf} np{rn\_tfri2} np{rn\_tfri2\_max} np{rn\_tfrz0} rou{zdf\_bfr} 
 chap_OBS.tex: 
-chap_misc.tex: key{mpp\_mpi} key{nosignedzero} key{vectopt\_loop} np{iom\_get} np{jpjdta} np{jpjglo} np{nn\_bench} np{nn\_bit\_cmp} np{open\_ocean\_jstart} 
+chap_misc.tex: key{nosignedzero} np{iom\_get} np{jpjdta} np{jpjglo} np{nn\_bench} np{nn\_bit\_cmp} np{open\_ocean\_jstart} 
 chap_LDF.tex: hf{dynldf\_cNd} hf{ldfdyn\_substitute} hf{ldftra\_substitute} hf{traldf\_c1d} hf{traldf\_cNd} key{dynldf\_c1d} key{dynldf\_c2d} key{dynldf\_c3d} key{traldf\_c1d} key{traldf\_c2d} key{traldf\_c3d} key{traldf\_cNd} key{traldf\_eiv} mdl{ldfdyn\_c2d} mdl{ldfeiv} mdl{traadv\_eiv} np{ln\_dynldf\_bilap} np{ln\_sco} np{nn\_eos} np{rn\_aeih\_0} np{rn\_aeiv} np{rn\_aeiv\_0} np{rn\_ahm0} np{rn\_ahmb0} np{rn\_aht0} np{rn\_ahtb0} np{traldf\_grif} np{traldf\_grif\_iso} rou{ldf\_dyn\_c2d\_orca} rou{ldfslp\_init} 
-chap_LBC.tex: jp{jpreci} key{mpp\_mpi} np{jpiglo} np{jpindt} np{jpinft} np{jpjglo} np{jpjnob} np{nbdysegn} np{nn\_bdy\_jpk} np{nn\_msh} np{nn\_tra} rou{inimpp2} 
-chap_DOM.tex: key{mpp\_mpi} ngn{namzgr} ngn{namzgr\_sco} nlst{namzgr} nlst{namzgr_sco} np{jpiglo} np{jpjglo} np{jpkglo} np{ln\_sco} np{ln\_sigcrit} np{ln\_s\_SF12} np{ln\_s\_SH94} np{ln\_tsd\_ini} np{ln\_zco} np{ln\_zps} np{nn\_bathy} np{nn\_msh} np{ppa0} np{ppa1} np{ppacr} np{ppdzmin} np{pphmax} np{ppkth} np{ppsur} np{rn\_alpha} np{rn\_bb} np{rn\_e3zps\_min} np{rn\_e3zps\_rat} np{rn\_hc} np{rn\_rmax} np{rn\_sbot\_max} np{rn\_sbot\_min} np{rn\_theta} np{rn\_zb\_a} np{rn\_zb\_b} np{rn\_zs} rou{istate\_t\_s} 
+chap_LBC.tex: jp{jpreci} key{mpi\_off} np{jpiglo} np{jpindt} np{jpinft} np{jpjglo} np{jpjnob} np{nbdysegn} np{nn\_bdy\_jpk} np{nn\_msh} np{nn\_tra} 
+chap_DOM.tex: ngn{namzgr} ngn{namzgr\_sco} nlst{namzgr} nlst{namzgr_sco} np{jpiglo} np{jpjglo} np{jpkglo} np{ln\_sco} np{ln\_sigcrit} np{ln\_s\_SF12} np{ln\_s\_SH94} np{ln\_tsd\_ini} np{ln\_zco} np{ln\_zps} np{nn\_bathy} np{nn\_msh} np{ppa0} np{ppa1} np{ppacr} np{ppdzmin} np{pphmax} np{ppkth} np{ppsur} np{rn\_alpha} np{rn\_bb} np{rn\_e3zps\_min} np{rn\_e3zps\_rat} np{rn\_hc} np{rn\_rmax} np{rn\_sbot\_max} np{rn\_sbot\_min} np{rn\_theta} np{rn\_zb\_a} np{rn\_zb\_b} np{rn\_zs} rou{istate\_t\_s} 
 chap_conservation.tex: key{\_} 
 annex_iso.tex: key{trabbl} key{traldf\_eiv} np{ln\_traldf\_eiv} np{ln\_traldf\_gdia} 
 chap_time_domain.tex: np{ln\_zdfexp} np{nn\_dynhpg\_rst} np{nn\_zdfexp} 
 annex_C.tex: 
 chap_CONFIG.tex: key{agrif} key{c1d} key{dynspg\_ts} key{orca\_r12} key{orca\_r8} key{vvl} key{zdfgls} ngn{namusr\_def} np{jpiglo} np{jpizoom} np{jpjglo} np{jpjzoom} np{ln\_bench} np{nn\_GYRE} np{ORCA\_index} 
-chap_SBC.tex: key{cice} key{coupled} key{cpl\_carbon\_cycle} key{lim3} key{mpp\_mpi} key{nemocice\_decomp} key{oasis3} key{pisces} key{top} key{vvl} mdl{sbcana} mdl{sbc\_ana\_gyre} mdl{sbcsas} ngn{namsbc\_ana} ngn{namsbc\_clio} ngn{namsbc\_core} nlst{namsbc_ana} nlst{namsbc_clio} nlst{namsbc_core} np{clim} np{ln\_ana} np{ln\_blk\_clio} np{ln\_blk\_core} np{ln\_clio} np{ln\_core} np{ln\_rnf\_temp} np{nn\_nit000} np{nn\_tau000} np{nn\_test\_box} np{nz\_rnf} np{rn\_emp0} np{rn\_fiscpl} np{rn\_qns0} np{rn\_qsr0} np{rn\_utau0} np{rn\_vtau0} 
+chap_SBC.tex: key{cice} key{coupled} key{cpl\_carbon\_cycle} key{lim3} key{nemocice\_decomp} key{oasis3} key{pisces} key{top} key{vvl} mdl{sbcana} mdl{sbc\_ana\_gyre} mdl{sbcsas} ngn{namsbc\_ana} ngn{namsbc\_clio} ngn{namsbc\_core} nlst{namsbc_ana} nlst{namsbc_clio} nlst{namsbc_core} np{clim} np{ln\_ana} np{ln\_blk\_clio} np{ln\_blk\_core} np{ln\_clio} np{ln\_core} np{ln\_rnf\_temp} np{nn\_nit000} np{nn\_tau000} np{nn\_test\_box} np{nz\_rnf} np{rn\_emp0} np{rn\_fiscpl} np{rn\_qns0} np{rn\_qsr0} np{rn\_utau0} np{rn\_vtau0} 
 chap_STO.tex: 
 
 ¤ Namelist parameters unfollowing naming conventions (^[cdlnr]n_* or uppercase somewhere)
diff --git a/doc/latex/NEMO/subfiles/chap_DIA.tex b/doc/latex/NEMO/subfiles/chap_DIA.tex
index b6508dbe..e56f8597 100644
--- a/doc/latex/NEMO/subfiles/chap_DIA.tex
+++ b/doc/latex/NEMO/subfiles/chap_DIA.tex
@@ -47,7 +47,7 @@ The information is printed from within the code on the logical unit \texttt{numo
 To locate these prints, use the UNIX command "\textit{grep -i numout}" in the source code directory.
 
 By default, diagnostic output files are written in NetCDF format.
-Since version 3.2, when defining \key{iomput}, an I/O server has been added which
+When defining \key{xios}, an I/O server has been added which
 provides more flexibility in the choice of the fields to be written as well as how
 the writing work is distributed over the processors in massively parallel computing.
 A complete description of the use of this I/O server is presented in the next section.
@@ -55,12 +55,12 @@ A complete description of the use of this I/O server is presented in the next se
 %\cmtgm{                    % start of gmcomment
 
 %% =================================================================================================
-\section{Standard model output (IOM)}
+\section{Standard model output (IOMPUT)}
 \label{sec:DIA_iom}
 
 Since version 3.2, iomput is the \NEMO\ output interface of choice.
 It has been designed to be simple to use, flexible and efficient.
-The two main purposes of iomput are:
+The two main purposes of \rou{iom\_put} are:
 
 \begin{enumerate}
 \item The complete and flexible control of the output files through external XML files adapted by
@@ -82,29 +82,25 @@ aspects of the diagnostic output stream, such as:
 \item Control over metadata via a large XML "database" of possible output fields.
 \end{itemize}
 
-In addition, iomput allows the user to add in the code the output of any new variable (scalar, 2D or 3D)
+In addition, iomput allows the user to add in the code the output of any new variable (scalar, 1D, 2D or 3D)
 in a very easy way.
-All details of iomput functionalities are listed in the following subsections.
-Examples of the XML files that control the outputs can be found in:
-\path{cfgs/ORCA2_ICE_PISCES/EXPREF/iodef.xml},
-\path{cfgs/SHARED/field_def_nemo-oce.xml},
-\path{cfgs/SHARED/field_def_nemo-pisces.xml},
-\path{cfgs/SHARED/field_def_nemo-ice.xml} and \path{cfgs/SHARED/domain_def_nemo.xml}. \\
+All details of \rou{iom\_put} functionalities are listed in the following subsections.
+An example of the main XML file that control the outputs can be found in \path{cfgs/ORCA2_ICE_PISCES/EXPREF/iodef.xml}.\\
 
-The second functionality targets output performance when running in parallel (\key{mpp\_mpi}).
-Iomput provides the possibility to specify N dedicated I/O processes (in addition to the \NEMO\ processes)
+The second functionality targets output performance when running in parallel.
+XIOS provides the possibility to specify N dedicated I/O processes (in addition to the \NEMO\ processes)
 to collect and write the outputs.
 With an appropriate choice of N by the user, the bottleneck associated with the writing of
 the output files can be greatly reduced.
 
-In version 3.6, the \rou{iom\_put} interface depends on
-an external code called \href{https://forge.ipsl.jussieu.fr/ioserver/browser/XIOS/branchs/xios-2.5}{XIOS-2.5}
+In version 4.2, the \rou{iom\_put} interface depends on
+an external code called \href{https://forge.ipsl.jussieu.fr/ioserver/browser/XIOS/trunk}{XIOS-trunk}
 %(use of revision 618 or higher is required).
 This new IO server can take advantage of the parallel I/O functionality of NetCDF4 to
 create a single output file and therefore to bypass the rebuilding phase.
 Note that writing in parallel into the same NetCDF files requires that your NetCDF4 library is linked to
 an HDF5 library that has been correctly compiled (\ie\ with the configure option $--$enable-parallel).
-Note that the files created by iomput through XIOS are incompatible with NetCDF3.
+Note that the files created by \rou{iom\_put} through XIOS are incompatible with NetCDF3.
 All post-processsing and visualization tools must therefore be compatible with NetCDF4 and not only NetCDF3.
 
 Even if not using the parallel I/O functionality of NetCDF4, using N dedicated I/O servers,
@@ -141,15 +137,13 @@ Note, however, that \textbf{\NEMO\ will not read restart generated by XIOS when
 have to be rebuild before continuing the run. This option aims to reduce number of restart files generated by \NEMO\ only,
 and may be useful when there is a need to change number of processors used to run simulation.
 
-An older versions of XIOS do not support reading functionality. It's recommended to use at least XIOS2@1451.
-
 %% =================================================================================================
 \subsection{XIOS: XML Inputs-Outputs Server}
 
 %% =================================================================================================
 \subsubsection{Attached or detached mode?}
 
-Iomput is based on \href{http://forge.ipsl.jussieu.fr/ioserver/wiki}{XIOS},
+\rou{Iom\_put} is based on \href{http://forge.ipsl.jussieu.fr/ioserver/wiki}{XIOS},
 the io\_server developed by Yann Meurdesoif from IPSL.
 The behaviour of the I/O subsystem is controlled by settings in the external XML files listed above.
 Key settings in the iodef.xml file are the tags associated with each defined file.
@@ -211,17 +205,6 @@ See the XML basics section below for more details on XML syntax and rules.
     example  \\
     \hline
     \hline
-    buffer\_size                                                            &
-    buffer size used by XIOS to send data from \NEMO\ to XIOS.
-    Larger is more efficient.
-    Note that needed/used buffer sizes are summarized at the end of the job &
-    25000000 \\
-    \hline
-    buffer\_server\_factor\_size                                            &
-    ratio between \NEMO\ and XIOS buffer size.
-    Should be 2.                                                            &
-    2        \\
-    \hline
     info\_level                                                             &
     verbosity level (0 to 100)                                              &
     0        \\
@@ -263,10 +246,10 @@ simply need to be activated by including the required output in a file definitio
 To add new output variables, all 4 of the following steps must be taken.
 
 \begin{enumerate}
-\item in \NEMO\ code, add a \forcode{CALL iom_put( 'identifier', array )} where you want to output a 2D or 3D array.
+\item in \NEMO\ code, add a \forcode{CALL iom_put( 'identifier', array )} where you want to output an array.
 \item If necessary, add \forcode{USE iom ! I/O manager library} to the list of used modules in
   the upper part of your module.
-\item in the field\_def.xml file, add the definition of your variable using the same identifier you used in the f90 code
+\item in the appropriate \path{cfgs/SHARED/field_def_nemo-....xml} files, add the definition of your variable using the same identifier you used in the f90 code
   (see subsequent sections for a details of the XML syntax and rules).
   For example:
 \begin{xmllines}
@@ -279,18 +262,21 @@ To add new output variables, all 4 of the following steps must be taken.
 \end{xmllines}
 Note your definition must be added to the field\_group whose reference grid is consistent with the size of
 the array passed to iomput.
-The grid\_ref attribute refers to definitions set in iodef.xml which, in turn,
-reference grids and axes either defined in the code
-(iom\_set\_domain\_attr and iom\_set\_axis\_attr in \mdl{iom}) or defined in the domain\_def.xml file.
+The grid\_ref attribute refers to definitions set in grid\_def\_nemo.xml which, in turn,
+reference domains and axes either defined in the code
+(iom\_set\_domain\_attr and iom\_set\_axis\_attr in \mdl{iom}) or defined in the domain\_def\_nemo.xml and axis\_def\_nemo.xml files.
 \eg:
 \begin{xmllines}
-<grid id="grid_T_3D" domain_ref="grid_T" axis_ref="deptht"/>
+  <grid id="grid_T_3D" >
+    <domain domain_ref="grid_T" />
+    <axis axis_ref="deptht" />
+  </grid>
 \end{xmllines}
 Note, if your array is computed within the surface module each \np{nn_fsbc}{nn\_fsbc} time\_step,
 add the field definition within the field\_group defined with the id "SBC":
 \xmlcode{<field_group id="SBC" ...>} which has been defined with the correct frequency of operations
 (iom\_set\_field\_attr in \mdl{iom})
-\item add your field in one of the output files defined in iodef.xml
+\item add your field in one of the output files defined in file\_def\_nemo-*.xml
   (again see subsequent sections for syntax and rules)
 \begin{xmllines}
 <file id="file1" .../>
@@ -425,16 +411,31 @@ In \NEMO, we used the following contexts (that can be defined in any order):
 
 The XML file can be split in different parts to improve its readability and facilitate its use.
 The inclusion of XML files into the main XML file can be done through the attribute src:
-\xmlline|<context src="./nemo_def.xml" />|
+\begin{xmllines}
+<context id="nemo" src="./context_nemo.xml"/>
+\end{xmllines}
 
-\noindent In \NEMO, by default, the field definition is done in 3 separate files (
+\noindent In \NEMO, by default, the field definition is done in 4 separate files (
 \path{cfgs/SHARED/field_def_nemo-oce.xml},
-\path{cfgs/SHARED/field_def_nemo-pisces.xml} and
-\path{cfgs/SHARED/field_def_nemo-ice.xml} ) and the  domain definition is done in another file ( \path{cfgs/SHARED/domain_def_nemo.xml} )
+\path{cfgs/SHARED/field_def_nemo-pisces.xml}, \path{cfgs/SHARED/field_def_nemo-innerttrc.xml} and
+\path{cfgs/SHARED/field_def_nemo-ice.xml} ) and the grid definition is done in another 3 files (\path{cfgs/SHARED/grid_def_nemo.xml}, \path{cfgs/SHARED/axis_def_nemo.xml} and \path{cfgs/SHARED/domain_def_nemo.xml})
 that
-are included in the main iodef.xml file through the following commands:
+are included in each context\_nemo.xml file through the following commands:
 \begin{xmllines}
-<context id="nemo" src="./context_nemo.xml"/>
+<!-- Fields definition -->
+    <field_definition src="./field_def_nemo-oce.xml"      />   <!--  NEMO ocean dynamics               -->
+    <field_definition src="./field_def_nemo-ice.xml"      />   <!--  NEMO ocean sea ice                -->
+    <field_definition src="./field_def_nemo-pisces.xml"   />   <!--  NEMO ocean biogeochemical         -->
+    <field_definition src="./field_def_nemo-innerttrc.xml"/>   <!--  NEMO ocean inert passive tracer   -->
+
+<!-- Files definition -->
+    <file_definition src="./file_def_nemo-oce.xml"/>       <!--  NEMO ocean dynamics                   -->
+    <file_definition src="./file_def_nemo-ice.xml"/>       <!--  NEMO ocean sea ice                    -->
+    <file_definition src="./file_def_nemo-innerttrc.xml"/> <!--  NEMO ocean inert passive tracer       -->
+
+    <axis_definition src="./axis_def_nemo.xml"/>           <!-- Axis definition -->
+    <domain_definition src="./domain_def_nemo.xml"/>       <!-- Domain definition -->
+    <grid_definition   src="./grid_def_nemo.xml"/>         <!-- Grids definition -->
 \end{xmllines}
 
 %% =================================================================================================
@@ -1415,7 +1416,7 @@ each processing region.
   \label{tab:DIA_NC4}
 \end{table}
 
-When \key{iomput} is activated with \key{netcdf4} chunking and compression parameters for fields produced via
+When \key{xios} is activated with \key{netcdf4} chunking and compression parameters for fields produced via
 \rou{iom\_put} calls are set via an equivalent and identically named namelist to \nam{nc4}{nc4} in
 \textit{xmlio\_server.def}.
 Typically this namelist serves the mean files whilst the \nam{nc4}{nc4} in the main namelist file continues to
@@ -1438,7 +1439,7 @@ Each trend of the dynamics and/or temperature and salinity time evolution equati
 \mdl{trddyn} and/or \mdl{trdtra} modules (see TRD directory) just after their computation
 (\ie\ at the end of each \textit{dyn....F90} and/or \textit{tra....F90} routines).
 This capability is controlled by options offered in \nam{trd}{trd} namelist.
-Note that the output are done with XIOS, and therefore the \key{iomput} is required.
+Note that the output are done with XIOS, and therefore the \key{xios} is required.
 
 What is done depends on the \nam{trd}{trd} logical set to \forcode{.true.}:
 
@@ -1550,7 +1551,7 @@ Output data can be written in ascii files (\np[=.true.]{ln_flo_ascii}{ln\_flo\_a
 In that case, output filename is trajec\_float.
 
 Another possiblity of writing format is Netcdf (\np[=.false.]{ln_flo_ascii}{ln\_flo\_ascii}) with
-\key{iomput} and outputs selected in iodef.xml.
+\key{xios} and outputs selected in iodef.xml.
 Here it is an example of specification to put in files description section:
 
 \begin{xmllines}
diff --git a/doc/latex/NEMO/subfiles/chap_SBC.tex b/doc/latex/NEMO/subfiles/chap_SBC.tex
index d48e5e25..8b022f76 100644
--- a/doc/latex/NEMO/subfiles/chap_SBC.tex
+++ b/doc/latex/NEMO/subfiles/chap_SBC.tex
@@ -1651,7 +1651,7 @@ The amount of information is controlled by two integer parameters:
 Iceberg trajectories can also be written out and this is enabled by setting \np{nn_sample_rate}{nn\_sample\_rate}~$>$~0.
 A non-zero value represents how many timesteps between writes of information into the output file.
 These output files are in NETCDF format.
-When \key{mpp\_mpi} is defined, each output file contains only those icebergs in the corresponding processor.
+When running with multiple processes, each output file contains only those icebergs in the corresponding processor.
 Trajectory points are written out in the order of their parent iceberg in the model's "linked list" of icebergs.
 So care is needed to recreate data for individual icebergs,
 since its trajectory data may be spread across multiple files.
diff --git a/doc/latex/NEMO/subfiles/chap_misc.tex b/doc/latex/NEMO/subfiles/chap_misc.tex
index ab279184..8b4a9479 100644
--- a/doc/latex/NEMO/subfiles/chap_misc.tex
+++ b/doc/latex/NEMO/subfiles/chap_misc.tex
@@ -300,31 +300,6 @@ The self-compensated summation method should be used in all summation in i- and/
 See \mdl{closea} module for an example.
 Note also that this implementation may be sensitive to the optimization level.
 
-%% =================================================================================================
-\subsection{MPP scalability}
-\label{subsec:MISC_mppsca}
-
-The default method of communicating values across the north-fold in distributed memory applications (\key{mpp\_mpi})
-uses a \textsc{MPI\_ALLGATHER} function to exchange values from each processing region in
-the northern row with every other processing region in the northern row.
-This enables a global width array containing the top 4 rows to be collated on every northern row processor and then
-folded with a simple algorithm.
-Although conceptually simple, this "All to All" communication will hamper performance scalability for
-large numbers of northern row processors.
-From version 3.4 onwards an alternative method is available which only performs direct "Peer to Peer" communications
-between each processor and its immediate "neighbours" across the fold line.
-This is achieved by using the default \textsc{MPI\_ALLGATHER} method during initialisation to
-help identify the "active" neighbours.
-Stored lists of these neighbours are then used in all subsequent north-fold exchanges to
-restrict exchanges to those between associated regions.
-The collated global width array for each region is thus only partially filled but is guaranteed to
-be set at all the locations actually required by each individual for the fold operation.
-This alternative method should give identical results to the default \textsc{ALLGATHER} method and
-is recommended for large values of \np{jpni}{jpni}.
-The new method is activated by setting \np{ln_nnogather}{ln\_nnogather} to be true (\nam{mpp}{mpp}).
-The reproducibility of results using the two methods should be confirmed for each new,
-non-reference configuration.
-
 %% =================================================================================================
 \section{Model optimisation, control print and benchmark}
 \label{sec:MISC_opt}
diff --git a/doc/latex/global/coding_rules.tex b/doc/latex/global/coding_rules.tex
index 7d52fc4f..dc220da1 100644
--- a/doc/latex/global/coding_rules.tex
+++ b/doc/latex/global/coding_rules.tex
@@ -563,8 +563,7 @@ Missing rule on structure name??
 
 \subsection{Bounds checking}
 
-\NEMO\ is able to run when an array bounds checking option is enabled
-(provided the cpp key \texttt{key\_vectopt\_loop} is not defined). \\
+\NEMO\ is able to run when an array bounds checking option is enabled. \\
 Thus, constructs of the following form are disallowed:
 
 \begin{forlines}
diff --git a/src/OCE/DIA/diaptr.F90 b/src/OCE/DIA/diaptr.F90
index 180a2eb0..501e54f6 100644
--- a/src/OCE/DIA/diaptr.F90
+++ b/src/OCE/DIA/diaptr.F90
@@ -463,7 +463,7 @@ CONTAINS
 
       ! l_diaptr is defined with iom_use
       !   --> dia_ptr_init must be done after the call to iom_init
-      !   --> cannot be .TRUE. without cpp key: key_iom -->  nbasin define by iom_init is initialized
+      !   --> cannot be .TRUE. without cpp key: key_xios -->  nbasin define by iom_init is initialized
       l_diaptr = iom_use( 'zomsf'    ) .OR. iom_use( 'zotem'    ) .OR. iom_use( 'zosal'    ) .OR.  &
          &       iom_use( 'zosrf'    ) .OR. iom_use( 'sopstove' ) .OR. iom_use( 'sophtove' ) .OR.  &
          &       iom_use( 'sopstbtr' ) .OR. iom_use( 'sophtbtr' ) .OR. iom_use( 'sophtadv' ) .OR.  &
diff --git a/src/OCE/DOM/dtatsd.F90 b/src/OCE/DOM/dtatsd.F90
index e4059c25..7ffccc4c 100644
--- a/src/OCE/DOM/dtatsd.F90
+++ b/src/OCE/DOM/dtatsd.F90
@@ -128,7 +128,6 @@ CONTAINS
       !!
       !! ** Method  : - call fldread routine
       !!              - ORCA_R2: add some hand made alteration to read data
-      !!              - 'key_orca_lev10' interpolates on 10 times more levels
       !!              - s- or mixed z-s coordinate: vertical interpolation on model mesh
       !!              - ln_tsd_dmp=F: deallocates the T-S data structure
       !!                as T-S data are no are used
diff --git a/src/OCE/IOM/iom.F90 b/src/OCE/IOM/iom.F90
index 336385b3..dbccf2c7 100644
--- a/src/OCE/IOM/iom.F90
+++ b/src/OCE/IOM/iom.F90
@@ -1376,7 +1376,7 @@ CONTAINS
             RETURN
          ENDIF
          !
-      ELSE        ! read using XIOS. Only if KEY_IOMPUT is defined
+      ELSE        ! read using XIOS. Only if key_xios is defined
 #if defined key_xios
 !would be good to be able to check which context is active and swap only if current is not restart
          idvar = iom_varid( kiomid, cdvar )
diff --git a/src/OCE/LBC/mpp_lnk_icb_generic.h90 b/src/OCE/LBC/mpp_lnk_icb_generic.h90
index b2718466..8798f3e0 100644
--- a/src/OCE/LBC/mpp_lnk_icb_generic.h90
+++ b/src/OCE/LBC/mpp_lnk_icb_generic.h90
@@ -37,8 +37,8 @@
       INTEGER  ::   jl   ! dummy loop indices
       INTEGER  ::   imigr, iihom, ijhom        ! local integers
       INTEGER  ::   ipreci, iprecj             !   -       -
-      INTEGER  ::   ml_req1, ml_req2, ml_err   ! for key_mpi_isend
-      INTEGER, DIMENSION(MPI_STATUS_SIZE) ::   ml_stat   ! for key_mpi_isend
+      INTEGER  ::   ml_req1, ml_req2, ml_err   ! for mpi_isend
+      INTEGER, DIMENSION(MPI_STATUS_SIZE) ::   ml_stat   ! for mpi_isend
       !!
       REAL(PRECISION), DIMENSION(1-kexti:jpi+kexti,nn_hls+kextj,2) ::   r2dns, r2dsn
       REAL(PRECISION), DIMENSION(1-kextj:jpj+kextj,nn_hls+kexti,2) ::   r2dwe, r2dew
diff --git a/src/OCE/SBC/cpl_oasis3.F90 b/src/OCE/SBC/cpl_oasis3.F90
index ebdd68fc..0147ffda 100644
--- a/src/OCE/SBC/cpl_oasis3.F90
+++ b/src/OCE/SBC/cpl_oasis3.F90
@@ -16,7 +16,6 @@ MODULE cpl_oasis3
 
    !!----------------------------------------------------------------------
    !!   'key_oasis3'                    coupled Ocean/Atmosphere via OASIS3-MCT
-   !!   'key_oa3mct_v3'                 to be added for OASIS3-MCT version 3
    !!----------------------------------------------------------------------
    !!   cpl_init     : initialization of coupled mode communication
    !!   cpl_define   : definition of grid and fields
diff --git a/tests/ADIAB_WAVE/EXPREF/namelist_cfg b/tests/ADIAB_WAVE/EXPREF/namelist_cfg
index 1fffb700..e180de22 100644
--- a/tests/ADIAB_WAVE/EXPREF/namelist_cfg
+++ b/tests/ADIAB_WAVE/EXPREF/namelist_cfg
@@ -432,13 +432,13 @@
 !!======================================================================
 !!                  ***  Miscellaneous namelists  ***                 !!
 !!                                                                    !!
-!!   nammpp            Massively Parallel Processing                    ("key_mpp_mpi")
+!!   nammpp            Massively Parallel Processing
 !!   namctl            Control prints                                   (default: OFF)
 !!   namsto            Stochastic parametrization of EOS                (default: OFF)
 !!======================================================================
 !
 !-----------------------------------------------------------------------
-&nammpp        !   Massively Parallel Processing                        ("key_mpp_mpi)
+&nammpp        !   Massively Parallel Processing
 !-----------------------------------------------------------------------
 /
 !-----------------------------------------------------------------------
diff --git a/tests/ISOMIP+/MY_SRC/dtatsd.F90 b/tests/ISOMIP+/MY_SRC/dtatsd.F90
index eb04a744..cdee4ac5 100644
--- a/tests/ISOMIP+/MY_SRC/dtatsd.F90
+++ b/tests/ISOMIP+/MY_SRC/dtatsd.F90
@@ -153,7 +153,6 @@ CONTAINS
       !!
       !! ** Method  : - call fldread routine
       !!              - ORCA_R2: add some hand made alteration to read data
-      !!              - 'key_orca_lev10' interpolates on 10 times more levels
       !!              - s- or mixed z-s coordinate: vertical interpolation on model mesh
       !!              - ln_tsd_dmp=F: deallocates the T-S data structure
       !!                as T-S data are no are used
-- 
GitLab