diff --git a/cfgs/AGRIF_DEMO/EXPREF/namelist_cfg b/cfgs/AGRIF_DEMO/EXPREF/namelist_cfg
index 94bb5c091f998f6dd59cc4c92c6429fd76b3bed3..363ab44a9456f880a5c337a0d4236422c79f60a5 100644
--- a/cfgs/AGRIF_DEMO/EXPREF/namelist_cfg
+++ b/cfgs/AGRIF_DEMO/EXPREF/namelist_cfg
@@ -93,7 +93,7 @@
    ln_traqsr   = .true.    !  Light penetration in the ocean            (T => fill namtra_qsr)
    ln_ssr      = .true.    !  Sea Surface Restoring on T and/or S       (T => fill namsbc_ssr)
    ln_rnf      = .true.    !  runoffs                                   (T => fill namsbc_rnf)
-   nn_fwb      = 0         !  FreshWater Budget: =0 unchecked
+   nn_fwb      = 1         !  FreshWater Budget: =0 unchecked
 /
 !-----------------------------------------------------------------------
 &namsbc_blk    !   namsbc_blk  generic Bulk formula                     (ln_blk =T)
diff --git a/cfgs/SHARED/field_def_nemo-oce.xml b/cfgs/SHARED/field_def_nemo-oce.xml
index 240c3913dad841c05331260dd38de1297f7c6f62..8c6362f30e2177c69d9db0a82e0fc0d486eac802 100644
--- a/cfgs/SHARED/field_def_nemo-oce.xml
+++ b/cfgs/SHARED/field_def_nemo-oce.xml
@@ -795,6 +795,7 @@ that are available in the tidal-forcing implementation (see
     <field id="sshtot"     long_name="global mean ssh"                              standard_name="global_average_sea_level_change"                unit="m"    />
     <field id="sshsteric"  long_name="global mean ssh steric"                       standard_name="global_average_steric_sea_level_change"         unit="m"    />
     <field id="sshthster"  long_name="global mean ssh thermosteric"                 standard_name="global_average_thermosteric_sea_level_change"   unit="m"    />
+    <field id="sshice"     long_name="global mean ssh equivalent for ice and snow"  standard_name="global_average_ice_snow_sea_level_change"       unit="m"    />
     <field id="masstot"    long_name="global total mass"                            standard_name="sea_water_mass"                                 unit="kg"   />
     <field id="temptot"    long_name="global mean temperature"                      standard_name="sea_water_potential_temperature"                unit="degC" />
     <field id="saltot"     long_name="global mean salinity"                         standard_name="sea_water_salinity"                             unit="1e-3" />
diff --git a/cfgs/SHARED/namelist_ref b/cfgs/SHARED/namelist_ref
index dcd680dde927413ae84a44c71a542489f49a9658..72affc4a899d521b5ddca5da6cdb6475cafa0901 100644
--- a/cfgs/SHARED/namelist_ref
+++ b/cfgs/SHARED/namelist_ref
@@ -667,8 +667,13 @@
 &namsbc_fwb    !   freshwater-budget adjustment                         (nn_fwb > 0)
 !-----------------------------------------------------------------------
    rn_fwb0     = 0.0          ! Initial freshwater adjustment flux [kg/m2/s] (nn_fwb = 2)
+   nn_fwb_voltype = 1         ! = 1 : Control ICE+OCEAN volume
+      !                       ! = 2 : Control     OCEAN volume
+   ln_hvolg_var = .false.     ! = T : Set an analytical variation of volume:
+      rn_hvolg_amp = 17.e-3      ! Peak to peak seasonnal variation (m)
+      rn_hvolg_trd =  0.0        ! Trend (m/s)
+      nn_hvolg_mth = 8           ! Month when volume anomaly crosses 0 (1-12)
 /
-
 !!======================================================================
 !!               ***  Lateral boundary condition  ***                 !!
 !!                                                                    !!
diff --git a/ext/AGRIF/AGRIF_FILES/modcluster.F90 b/ext/AGRIF/AGRIF_FILES/modcluster.F90
index 88aab47c442812295deb5766ad751e22c406a42e..9ca8a56d960bc5541b48b80fcacc1aee5f16ce66 100644
--- a/ext/AGRIF/AGRIF_FILES/modcluster.F90
+++ b/ext/AGRIF/AGRIF_FILES/modcluster.F90
@@ -1179,6 +1179,8 @@ recursive subroutine Agrif_Init_Hierarchy_Parallel_2 ( g, procname )
     do is = 1, g % child_seq % nb_seqs
 !
         parcours => Agrif_seq_select_child(g,is)
+
+        if (.not.associated(parcours)) cycle
 !
 !     Instanciation of the variables of the current grid
         call Agrif_Instance(parcours % gr)
diff --git a/ext/AGRIF/AGRIF_FILES/modmpp.F90 b/ext/AGRIF/AGRIF_FILES/modmpp.F90
index c7a028643baffba758234e7bab8a52fff1cd22c6..47587284bbe6c6adfc0a8c3ef6a9a2e778353cae 100644
--- a/ext/AGRIF/AGRIF_FILES/modmpp.F90
+++ b/ext/AGRIF/AGRIF_FILES/modmpp.F90
@@ -38,6 +38,9 @@ module Agrif_Mpp
     integer, private :: Agrif_MPI_prec
 !
     private :: Agrif_get_proc_info
+
+    integer :: Agrif_GlobProcRank
+    integer :: Agrif_totalprocs
 !
 contains
 !
@@ -76,6 +79,11 @@ subroutine Agrif_MPI_Init ( comm )
     else
         call Agrif_MPI_switch_comm(MPI_COMM_WORLD)
     endif
+
+! Global rank
+    Agrif_GlobProcRank = Agrif_ProcRank    
+! Total number of cores
+    Agrif_totalprocs = Agrif_Nbprocs
 !
     Agrif_Mygrid % communicator = Agrif_mpi_comm
 !
diff --git a/ext/AGRIF/AGRIF_FILES/modseq.F90 b/ext/AGRIF/AGRIF_FILES/modseq.F90
index c1136067d64a2a9fca26bd72bb8fb44284662dc2..6518a0577676c926dd06dc8631ffb295b105bab4 100644
--- a/ext/AGRIF/AGRIF_FILES/modseq.F90
+++ b/ext/AGRIF/AGRIF_FILES/modseq.F90
@@ -5,6 +5,8 @@ module Agrif_seq
     use Agrif_Arrays
 !
     implicit none
+
+    integer, private, parameter :: subdomain_minwidth = 6
 !
 contains
 !
@@ -108,6 +110,10 @@ subroutine Agrif_seq_build_required_proclist ( grid )
         proc_rect => proc_rect % next
 !
     enddo
+    if (agrif_debug_parallel_sisters) then
+        if (Agrif_GlobProcRank == 0) print *,'Grid ',grid%fixedrank,' requires'
+        if (Agrif_GlobProcRank == 0) call Agrif_pl_print(grid % required_proc_list)
+    endif
 !---------------------------------------------------------------------------------------------------
 end subroutine Agrif_seq_build_required_proclist
 !===================================================================================================
@@ -175,11 +181,19 @@ subroutine Agrif_seq_create_proc_sequences ( grid )
 ! Create sequence structure
     cur_seq = 0
     grid % child_seq => Agrif_seq_allocate_list(nb_seqs)
+    if (agrif_debug_parallel_sisters) then
+        if (Agrif_GlobProcRank == 0) print *,'GRILLE SEQ = ',grid%fixedrank,nb_seqs
+    endif
     child_p => sorted_child_list % first
     do while ( associated(child_p) )
         if ( cur_seq /= child_p % gr % seq_num ) then
             cur_seq = child_p % gr % seq_num
         endif
+        if (agrif_debug_parallel_sisters) then
+            if (Agrif_GlobProcRank == 0) then
+                print *,'Grille = ',child_p % gr %fixedrank,' Seq = ',child_p % gr % seq_num
+            endif
+        endif
         call Agrif_seq_add_grid(grid % child_seq,cur_seq,child_p% gr)
         child_p => child_p % next
     enddo
@@ -446,13 +460,24 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
     type(Agrif_Proc),      pointer  :: proc
     type(Agrif_Proc_p),    pointer  :: pp
     type(Agrif_Proc), dimension(:), allocatable, target :: procarray
-    type(Agrif_Grid), dimension(:), allocatable         :: gridarray
+    type(Agrif_PGrid), dimension(:), allocatable         :: gridarray
     type(Agrif_Sequence_List), pointer :: seqlist
-    real,dimension(:),allocatable :: grid_costs
+    real,dimension(:),allocatable :: grid_costs, grid_sizes
+    real :: total_grid_sizes
     integer,dimension(:), allocatable :: nbprocs_per_grid
-    integer :: i1, i2, j1, j2
+    logical,dimension(:), allocatable :: proc_was_required
+    integer :: i1, i2, j1, j2, i, j
     real :: max_cost
     integer :: max_index
+    integer,dimension(2) :: closest_nb_procs
+    integer :: number_of_procs_toremove, number_of_procs_toadd, add_possible, number_of_procs_added, nproc
+    logical,dimension(:,:),allocatable :: ispossibletoadd
+    logical :: inform_possible_nbprocs
+    type(Agrif_Grid)                    , pointer :: save_grid
+    integer :: maximum_number_of_procs_to_add, ip2, ip3
+    integer :: imax, jmax, deltaij
+
+
 !
     seqlist => coarse_grid % child_seq
     if ( .not. associated(seqlist) ) return
@@ -466,6 +491,11 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
 !
 ! For each sequence...
     do is = 1,seqlist % nb_seqs
+        if (agrif_debug_parallel_sisters) then
+            if (Agrif_GlobProcRank == 0) then
+                print *,'Sequence = ',is
+            endif
+        endif
 !
         proclist => seqlist % sequences(is) % proclist
         gridlist => seqlist % sequences(is) % gridlist
@@ -478,6 +508,7 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
         ngrids = gridlist % nitems
         allocate(gridarray(1:ngrids))
         allocate(grid_costs(1:ngrids))
+        allocate(grid_sizes(1:ngrids))
         allocate(nbprocs_per_grid(1:ngrids))
 
       nbprocs_per_grid = 0
@@ -487,7 +518,7 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
         ig = 0
         do while ( associated(gp) )
             grid => gp % gr
-            ig = ig+1 ; gridarray(ig) = grid
+            ig = ig+1 ; gridarray(ig)%gr => grid
             pp => grid % required_proc_list % first
             do while ( associated(pp) )
                 procarray( pp % proc % pn+1 ) % grid_id = grid % fixedrank
@@ -503,13 +534,32 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
 
 ! Estimate current costs
 
+        total_grid_sizes = 0.
         do ig = 1, ngrids
-          i1 = gridarray(ig)%ix(1)
-          i2 = gridarray(ig)%ix(1)+gridarray(ig)%nb(1)/gridarray(ig)%spaceref(1)-1
-          j1 = gridarray(ig)%ix(2)
-          j2 = gridarray(ig)%ix(2)+gridarray(ig)%nb(2)/gridarray(ig)%spaceref(2)-1
+          i1 = gridarray(ig)%gr%ix(1)
+          i2 = gridarray(ig)%gr%ix(1)+gridarray(ig)%gr%nb(1)/gridarray(ig)%gr%spaceref(1)-1
+          j1 = gridarray(ig)%gr%ix(2)
+          j2 = gridarray(ig)%gr%ix(2)+gridarray(ig)%gr%nb(2)/gridarray(ig)%gr%spaceref(2)-1
+
+          grid_sizes(ig) = (i2-i1+1)*(j2-j1+1)
+          total_grid_sizes = total_grid_sizes + grid_sizes(ig)
+
+          save_grid => Agrif_Curgrid
+          Call Agrif_instance(gridarray(ig)%gr)
+
           Call Agrif_estimate_parallel_cost(i1,i2,j1,j2,nbprocs_per_grid(ig),grid_costs(ig))
-          grid_costs(ig) = grid_costs(ig) * gridarray(ig)%timeref(1)
+
+          Call Agrif_instance(save_grid)
+
+          grid_costs(ig) = grid_costs(ig) * gridarray(ig)%gr%timeref(1)
+        enddo
+
+        allocate(proc_was_required(proclist%nitems))
+        proc_was_required = .TRUE.
+        do ip = 1,proclist%nitems
+            if ( procarray( ip ) % grid_id == 0 ) then
+                proc_was_required(ip) = .FALSE.
+            endif
         enddo
 
         ig = 1
@@ -526,18 +576,220 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
               enddo
 
               ig = max_index
-              procarray( ip ) % grid_id = gridarray(ig) % fixedrank
 
-              nbprocs_per_grid(ig) =  nbprocs_per_grid(ig) + 1
-              i1 = gridarray(ig)%ix(1)
-              i2 = gridarray(ig)%ix(1)+gridarray(ig)%nb(1)/gridarray(ig)%spaceref(1)-1
-              j1 = gridarray(ig)%ix(2)
-              j2 = gridarray(ig)%ix(2)+gridarray(ig)%nb(2)/gridarray(ig)%spaceref(2)-1
+
+              i1 = gridarray(ig)%gr%ix(1)
+              i2 = gridarray(ig)%gr%ix(1)+gridarray(ig)%gr%nb(1)/gridarray(ig)%gr%spaceref(1)-1
+              j1 = gridarray(ig)%gr%ix(2)
+              j2 = gridarray(ig)%gr%ix(2)+gridarray(ig)%gr%nb(2)/gridarray(ig)%gr%spaceref(2)-1
+              if (agrif_debug_parallel_sisters) then
+                  if (Agrif_GlobProcRank == 0) then
+                      print *,'Grid = ',ig,' Cost = ',grid_costs(ig)
+                      print *,'Total number of procs = ',nbprocs_per_grid(ig) + 1
+                      print *,'Size = ',gridarray(ig)%gr%nb(1),gridarray(ig)%gr%nb(2),(i2-i1+1)*(j2-j1+1)
+                      print *,'Total Size = ',nint(total_grid_sizes)
+                  endif
+              endif
+
+              maximum_number_of_procs_to_add = 1
+              do ip2=ip+1,proclist%nitems
+                 if (procarray(ip2)%grid_id == 0) then
+                         maximum_number_of_procs_to_add = maximum_number_of_procs_to_add + 1
+                 endif
+              enddo
+
+              if (agrif_debug_parallel_sisters) then
+                  if (Agrif_GlobProcRank == 0) then
+                         print *,'Number of procs remaining = ',maximum_number_of_procs_to_add
+                  endif
+              endif
+
+              maximum_number_of_procs_to_add =                          &
+                      max(maximum_number_of_procs_to_add/               &
+                      (nint(2.*total_grid_sizes/grid_sizes(ig))),1)
+
+              ip3 = 0
+              ip2 = ip
+              do while (ip3 < maximum_number_of_procs_to_add)
+                 if (procarray(ip2)%grid_id == 0) then
+                       ip3 = ip3 + 1
+                       procarray( ip2 ) % grid_id = gridarray(ig) %gr % fixedrank
+                 endif 
+                 ip2 = ip2 + 1
+              enddo
+              if (agrif_debug_parallel_sisters) then
+                  if (Agrif_GlobProcRank == 0) then
+                         print *,'Number of procs added = ',maximum_number_of_procs_to_add
+                  endif
+              endif
+
+              !procarray( ip ) % grid_id = gridarray(ig) %gr % fixedrank
+
+              !nbprocs_per_grid(ig) =  nbprocs_per_grid(ig) + 1
+              nbprocs_per_grid(ig) =  nbprocs_per_grid(ig) + maximum_number_of_procs_to_add
+
+              save_grid => Agrif_Curgrid
+              Call Agrif_instance(gridarray(ig)%gr)
+
               Call Agrif_estimate_parallel_cost(i1,i2,j1,j2,nbprocs_per_grid(ig),grid_costs(ig))
-              grid_costs(ig) = grid_costs(ig) * gridarray(ig)%timeref(1)
+
+              Call Agrif_instance(save_grid)
+
+              grid_costs(ig) = grid_costs(ig) * gridarray(ig)%gr%timeref(1)
+              if (agrif_debug_parallel_sisters) then
+                  if (Agrif_GlobProcRank == 0) then
+                         print *,'Current grid costs = ',grid_costs(1:ngrids)
+                  endif
+              endif
 
             endif
         enddo
+
+! Adjust the number of processors per grid
+              if (agrif_debug_parallel_sisters) then
+                  if (Agrif_GlobProcRank == 0) then
+                         print *,'Now adjust the number of procs'
+                  endif
+              endif
+        do ig = 1,ngrids
+            imax = gridarray(ig)%gr%nb(1)/subdomain_minwidth
+            jmax = gridarray(ig)%gr%nb(2)/subdomain_minwidth
+            closest_nb_procs=-1
+            deltaij=huge(1)
+            do j=1,jmax
+              do i=1,min(imax,nbprocs_per_grid(ig)/j)
+                if ((nbprocs_per_grid(ig)-i*j) < deltaij) then
+                        deltaij = nbprocs_per_grid(ig)-i*j
+                        closest_nb_procs(1)=i
+                        closest_nb_procs(2)=j
+                endif
+              enddo
+            enddo
+
+            if (agrif_debug_parallel_sisters) then
+                if (Agrif_GlobProcRank == 0) then
+                    print *,'The grid ',ig,' has ',nbprocs_per_grid(ig),' procs'
+                    print *,'The closest decomposition is ',closest_nb_procs(1)*closest_nb_procs(2), &
+                           closest_nb_procs(1),closest_nb_procs(2)
+                endif
+            endif
+            number_of_procs_toremove = nbprocs_per_grid(ig) - closest_nb_procs(1)*closest_nb_procs(2)
+            do while (number_of_procs_toremove > 0)
+                do ip = 1,proclist%nitems
+                  if ((procarray( ip ) % grid_id == gridarray(ig)%gr % fixedrank).AND.(.NOT. proc_was_required(ip))) then
+                    procarray( ip ) % grid_id = 0
+                    number_of_procs_toremove = number_of_procs_toremove - 1
+                    nbprocs_per_grid(ig) = nbprocs_per_grid(ig) - 1
+                    exit
+                  endif
+                enddo
+            enddo
+            if (agrif_debug_parallel_sisters) then
+                if (Agrif_GlobProcRank == 0) then
+                    print *,'After correction : The grid ',ig,' has ',nbprocs_per_grid(ig),' procs'
+                endif
+            endif
+        enddo
+
+! Now add the remaining procs
+        ! Count the number of procs to add
+        number_of_procs_toadd = 0
+        do ip = 1,proclist%nitems
+            if ( procarray( ip ) % grid_id == 0 ) then
+                number_of_procs_toadd = number_of_procs_toadd + 1
+            endif
+        enddo
+
+        inform_possible_nbprocs = .FALSE.
+
+        if (number_of_procs_toadd > 0) then
+            ! allocate(ispossibletoadd(ngrids,-2*number_of_procs_toadd:2*number_of_procs_toadd))
+            ! ispossibletoadd = .FALSE.
+            ! do ig=1,ngrids
+            !     mask_possible_procs = .TRUE.
+            !     do j = 1,Agrif_maxprocs1D
+            !         if (gridarray(ig)%nb(2)/j < subdomain_minwidth) mask_possible_procs(:,j) = .FALSE.
+            !     enddo
+            !     do i = 1, Agrif_maxprocs1D
+            !         if (gridarray(ig)%nb(1)/i < subdomain_minwidth) mask_possible_procs(i,:) = .FALSE.
+            !     enddo
+            !     do j=-2*number_of_procs_toadd,2*number_of_procs_toadd
+            !         add_possible=minval(abs(possible_procs-(nbprocs_per_grid(ig)+j)),mask=mask_possible_procs)
+            !         ispossibletoadd(ig,j) = add_possible == 0
+            !     enddo
+            ! enddo
+
+            
+            ! if (Agrif_GlobProcRank==0) then
+            !     print *,'Possible add'
+            ! do ig=1,ngrids
+              
+            !   do j=-2*number_of_procs_toadd,2*number_of_procs_toadd
+            !     if (ispossibletoadd(ig,j)) then
+            !         print *,'On grid = ',ig,' , ',j,' is ok'
+            !     endif
+            !   enddo
+            ! enddo
+            ! endif
+            !deallocate(ispossibletoadd)
+
+
+
+        do while (number_of_procs_toadd > 0)
+            donproc: do nproc = number_of_procs_toadd, 1, -1
+                number_of_procs_added = 0
+                do ig = 1,ngrids
+                ! test the possiblity to add to grid ig
+                    imax = gridarray(ig)%gr%nb(1)/subdomain_minwidth
+                    jmax = gridarray(ig)%gr%nb(2)/subdomain_minwidth
+                    add_possible=1
+                    externj: do j=1,jmax
+                      do i=1,imax
+                        if (i*j == nbprocs_per_grid(ig)+nproc) then
+                          add_possible = 0
+                          exit externj
+                        endif
+                      enddo
+                    enddo externj
+                    if (add_possible == 0) then
+                        do ip = 1,proclist%nitems
+                            if ( procarray( ip ) % grid_id == 0 ) then
+                                procarray( ip ) % grid_id =gridarray(ig)%gr % fixedrank
+                                nbprocs_per_grid(ig) = nbprocs_per_grid(ig) + 1
+                                number_of_procs_added = number_of_procs_added + 1
+                                if (agrif_debug_parallel_sisters) then
+                                    if (Agrif_GlobProcRank == 0) then
+                                        print *,'The proc can be added to the grid ',ig
+                                    endif
+                                endif
+                                if (number_of_procs_added == nproc) exit donproc
+                            endif
+                        enddo
+                        exit
+                    endif
+                enddo
+            end do donproc
+            if (number_of_procs_added == 0) then
+                inform_possible_nbprocs = .TRUE.
+                exit
+            endif
+            number_of_procs_toadd = number_of_procs_toadd - nproc
+            if (agrif_debug_parallel_sisters) then
+                if (Agrif_GlobProcRank == 0) then
+                    print *,'Remaining number of procs to add = ',number_of_procs_toadd
+                endif
+            endif
+        enddo
+        endif
+
+        if (inform_possible_nbprocs) then
+            if (agrif_debug_parallel_sisters) then
+                if (Agrif_GlobProcRank == 0) then
+                    print *,'Impossible to add more procs'
+                endif
+                stop
+            endif
+        endif
 !
 !     Allocate proc nums to each grid
         gp => gridlist % first
@@ -553,7 +805,13 @@ subroutine Agrif_seq_allocate_procs_to_childs ( coarse_grid )
         enddo
 !
 !     Clean up
-        deallocate(procarray, gridarray, grid_costs, nbprocs_per_grid)
+        deallocate(procarray, gridarray, grid_costs, grid_sizes, nbprocs_per_grid, proc_was_required)
+
+              if (agrif_debug_parallel_sisters) then
+                  if (Agrif_GlobProcRank == 0) then
+                         print *, 'sortie de modseq'
+                  endif
+              endif
 !
     enddo
 !---------------------------------------------------------------------------------------------------
@@ -571,15 +829,20 @@ subroutine Agrif_seq_create_communicators ( grid )
     type(Agrif_Proc),  pointer          :: proc
     integer     :: i, ierr
     integer     :: current_comm, comm_seq, color_seq
+    integer, parameter :: color_seq_not_used = 100000000
+    integer, parameter :: color_seq_not_initialized = -100
 !
     seqlist => grid % child_seq
     if ( .not. associated(seqlist) ) return
 !
     current_comm = grid % communicator
-    color_seq = MPI_COMM_NULL
+    
 !
 ! For each sequence, split the current communicator into as many groups as needed.
     do i = 1,seqlist % nb_seqs
+        color_seq = color_seq_not_initialized ! color_seq should be positive
+                                              ! Initialize it to color_seq_not_initialized (<0)
+                                              ! for MPI_COMM_SPLIT to raise error if needed
 !
 !     Loop over each proclist to give a color to the current process
         gridp => seqlist % sequences(i) % gridlist % first
@@ -599,8 +862,20 @@ subroutine Agrif_seq_create_communicators ( grid )
             gridp => gridp % next
         enddo grid_loop
 !
-        call MPI_COMM_SPLIT(current_comm, color_seq, Agrif_ProcRank, comm_seq, ierr)
-        gridp % gr % communicator = comm_seq
+        if (color_seq == color_seq_not_initialized) then
+            if (agrif_debug_parallel_sisters) then
+                print *,'The proc ',Agrif_Procrank,' is not integrating sequence ',i
+            endif
+            color_seq = color_seq_not_used ! color for non working processors
+        endif
+      !  print *,'sequnce = ',i,' current_comm = ',current_comm, 'color_seq = ',color_seq
+        if (color_seq /= color_seq_not_used) then
+           call MPI_COMM_SPLIT(current_comm, color_seq, Agrif_ProcRank, comm_seq, ierr)
+       !    print *,'la grille ',gridp % gr % fixedrank,' a le communcateur ',comm_seq
+             gridp % gr % communicator = comm_seq
+        else
+            call MPI_COMM_SPLIT(current_comm, MPI_UNDEFINED, Agrif_ProcRank, comm_seq, ierr)
+        endif
 !
     enddo
 !---------------------------------------------------------------------------------------------------
@@ -626,6 +901,10 @@ function Agrif_seq_select_child ( g, is ) result ( gridp )
         endif
         gridp => gridp % next
     enddo
+    if (.not.associated(gridp)) then
+        nullify(gridp) ! may happen if not all the processors are in the current sequence
+        return
+    endif
     write(*,'("### Error Agrif_seq_select_child : no grid found in sequence ",i0," (mother G",i0,") for P",i0)')&
         is, g%fixedrank, Agrif_Procrank
     stop
diff --git a/ext/AGRIF/AGRIF_FILES/modtypes.F90 b/ext/AGRIF/AGRIF_FILES/modtypes.F90
index d00cd3e8fa4b4ecdd8d04bb961593a51993a3292..a8a2250fd30ab3b79f3b1c588d8bc4e3b818e7fd 100644
--- a/ext/AGRIF/AGRIF_FILES/modtypes.F90
+++ b/ext/AGRIF/AGRIF_FILES/modtypes.F90
@@ -433,6 +433,7 @@ end type Agrif_Variables_List
     logical :: agrif_debug = .false.        ! may be activaded in users subroutine for debugging purposes
     logical :: agrif_debug_interp = .false. ! may be activaded in users subroutine for debugging interpolations
     logical :: agrif_debug_update = .false. ! may be activaded in users subroutine for debugging updates
+    logical :: agrif_debug_parallel_sisters = .true. ! may be activaded in users subroutine for debugging Paralllel_Sisters
 
 ! If a grand mother grid is present
     logical :: agrif_coarse = .false.
diff --git a/ext/AGRIF/AGRIF_FILES/modutil.F90 b/ext/AGRIF/AGRIF_FILES/modutil.F90
index 619bcbf9cde9ecc422522eefc958489c33aab3d8..a6aa2c3971fe8825ddb24022e8de21e269af276d 100644
--- a/ext/AGRIF/AGRIF_FILES/modutil.F90
+++ b/ext/AGRIF/AGRIF_FILES/modutil.F90
@@ -540,9 +540,9 @@ recursive subroutine Agrif_Integrate_Parallel ( g, procname )
     integer                     :: i, k, is
 !
 !   Instanciation of the variables of the current grid
-    if ( g % fixedrank /= 0 ) then
+!    if ( g % fixedrank /= 0 ) then
         call Agrif_Instance(g)
-    endif
+!    endif
 !
 ! One step on the current grid
     call procname ()
@@ -632,6 +632,8 @@ recursive subroutine Agrif_Integrate_ChildGrids ( procname )
     else
 ! Continue only if the grid has defined sequences of child integrations.
     if ( .not. associated(save_grid % child_seq) ) return
+
+!    if (Agrif_GlobProcRank == 0) print *,'number of sequences = ',save_grid % child_seq % nb_seqs
 !
     do is = 1, save_grid % child_seq % nb_seqs
 !
@@ -653,10 +655,10 @@ recursive subroutine Agrif_Integrate_ChildGrids ( procname )
 !
     enddo
     endif
-#endif 
+#endif
 
     call Agrif_Instance(save_grid)
-    
+
 !---------------------------------------------------------------------------------------------------
 end subroutine Agrif_Integrate_ChildGrids
 !===================================================================================================
@@ -722,7 +724,7 @@ recursive subroutine Agrif_Integrate_Child_Parallel ( g, procname )
 !
 !     For each sequence, a given processor does integrate only on grid.
         gridp => Agrif_seq_select_child(g,is)
-        call Agrif_Integrate_Child_Parallel(gridp % gr, procname)
+        if (associated(gridp)) call Agrif_Integrate_Child_Parallel(gridp % gr, procname)
 !
     enddo
 !
@@ -775,7 +777,7 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
        coarse_timeref(1:Agrif_Probdim) = rhot
     endif
     close(nunit)
-    
+
     Agrif_UseSpecialValue = .FALSE.
     Agrif_UseSpecialValueFineGrid = .FALSE.
     Agrif_SpecialValue = 0.
@@ -844,7 +846,7 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
 !
 !   Total number of fixed grids
     Agrif_nbfixedgrids = 0
-    
+
 ! If a grand mother grid is declared
 
     if (agrif_coarse) then
@@ -852,7 +854,7 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
 
       Agrif_Coarsegrid % ngridstep = 0
       Agrif_Coarsegrid % grid_id   = -9999
-      
+
     do i = 1, Agrif_Probdim
         Agrif_Coarsegrid%spaceref(i) = coarse_spaceref(i)
         Agrif_Coarsegrid%timeref(i) = coarse_timeref(i)
@@ -864,13 +866,13 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
         Agrif_Coarsegrid % NearRootBorder(i) = .true.
         Agrif_Coarsegrid % DistantRootBorder(i) = .true.
         Agrif_Coarsegrid % nb(i) =Agrif_mygrid%nb(i) / coarse_spaceref(i)
-    enddo      
+    enddo
 
 !   The root coarse grid is a fixed grid
     Agrif_Coarsegrid % fixed = .TRUE.
 !   Level of the root grid
     Agrif_Coarsegrid % level = -1
-    
+
     Agrif_Coarsegrid % grand_mother_grid = .true.
 
 !   Number of the grid pointed by Agrif_Mygrid (root coarse grid)
@@ -878,17 +880,17 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
 !
 !   Number of the root grid as a fixed grid
     Agrif_Coarsegrid % fixedrank = -9999
-    
+
       Agrif_Mygrid%parent => Agrif_Coarsegrid
-      
+
 ! Not used but required to prevent seg fault
       Agrif_Coarsegrid%parent => Agrif_Mygrid
-      
+
       call Agrif_Create_Var(Agrif_Coarsegrid)
 
 ! Reset to null
       Nullify(Agrif_Coarsegrid%parent)
-      
+
       Agrif_Coarsegrid%child_list%nitems = 1
       allocate(Agrif_Coarsegrid%child_list%first)
       allocate(Agrif_Coarsegrid%child_list%last)
@@ -896,7 +898,7 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
       Agrif_Coarsegrid%child_list%last%gr => Agrif_Mygrid
 
     endif
-    
+
     return
 
 98  INQUIRE(FILE='AGRIF_FixedGrids.in',EXIST=BEXIST)
@@ -907,7 +909,7 @@ subroutine Agrif_Init_Grids ( procname1, procname2 )
         print*,'Error opening file AGRIF_FixedGrids.in'
         STOP
     endif
-    
+
 !---------------------------------------------------------------------------------------------------
 end subroutine Agrif_Init_Grids
 !===================================================================================================
@@ -1104,7 +1106,11 @@ subroutine Agrif_Step_Child_adj ( procname )
 !---------------------------------------------------------------------------------------------------
     procedure(step_proc)        :: procname !< Subroutine to call on each grid
 !
-    call Agrif_Integrate_Child_adj(Agrif_Mygrid,procname)
+    if (Agrif_Parallel_sisters) then
+      call Agrif_Integrate_Child_Parallel_adj(Agrif_Mygrid,procname)
+    else
+      call Agrif_Integrate_Child_adj(Agrif_Mygrid,procname)
+    endif
 !
     if ( Agrif_Mygrid % child_list % nitems > 0 ) call Agrif_Instance(Agrif_Mygrid)
 !
@@ -1143,6 +1149,36 @@ recursive subroutine Agrif_Integrate_Child_adj ( g, procname )
 !---------------------------------------------------------------------------------------------------
 end subroutine Agrif_Integrate_Child_adj
 !===================================================================================================
+!> Manages the backward time integration of the grid hierarchy (Parallel sisters case)
+!! Recursive subroutine and call on subroutines Agrif_Init::Agrif_Instance & Agrif_Step_adj.
+!---------------------------------------------------------------------------------------------------
+recursive subroutine Agrif_Integrate_Child_Parallel_adj ( g, procname )
+!---------------------------------------------------------------------------------------------------
+    type(Agrif_Grid),pointer   :: g          !< Pointer on the current grid
+    procedure(step_proc)       :: procname   !< Subroutine to call on each grid
+
+    type(Agrif_PGrid), pointer  :: gridp    ! Pointer for the recursive procedure
+    integer :: is, ierr
+!
+!
+    if (associated(g % child_seq) ) then
+!
+      do is = 1, g % child_seq % nb_seqs
+!
+!     For each sequence, a given processor does integrate only on grid.
+        gridp => Agrif_seq_select_child(g,is)
+        if (associated(gridp)) call Agrif_Integrate_Child_Parallel_adj(gridp % gr, procname)
+!
+      enddo
+    endif
+!
+    call Agrif_Instance(g)
+!
+!   One step on the current grid
+    call procname()
+!---------------------------------------------------------------------------------------------------
+end subroutine Agrif_Integrate_Child_Parallel_adj
+!===================================================================================================
 !
 !===================================================================================================
 
diff --git a/ext/IOIPSL/src/histcom.f90 b/ext/IOIPSL/src/histcom.f90
index 4d5a17419466b4a1d36476c1edba3e06cc5ef69e..0e82aeabddc223ce31afacb104e111af32d5d87e 100644
--- a/ext/IOIPSL/src/histcom.f90
+++ b/ext/IOIPSL/src/histcom.f90
@@ -74,7 +74,7 @@ MODULE histcom
 !-
 ! Fixed parameter
 !-
-  INTEGER,PARAMETER :: nb_files_max=20,nb_var_max=400, &
+  INTEGER,PARAMETER :: nb_files_max=100,nb_var_max=400, &
  &                     nb_hax_max=5,nb_zax_max=10,nbopp_max=10
   REAL,PARAMETER :: missing_val=nf90_fill_real
   INTEGER,PARAMETER,PUBLIC :: &
diff --git a/src/ICE/iceupdate.F90 b/src/ICE/iceupdate.F90
index a973312f04364781465403618f175cf52c6170ed..6c8221ca1e91590ca2e03a47a22c99f8c5a1e707 100644
--- a/src/ICE/iceupdate.F90
+++ b/src/ICE/iceupdate.F90
@@ -187,7 +187,6 @@ CONTAINS
          snwice_fmass (ji,jj) = ( snwice_mass(ji,jj) - snwice_mass_b(ji,jj) ) * r1_Dt_ice
 
       END_2D
-
       ! Storing the transmitted variables
       !----------------------------------
       fr_i  (:,:)   = at_i(:,:)             ! Sea-ice fraction
@@ -446,12 +445,15 @@ CONTAINS
                CALL iom_get( numrir, jpdom_auto, 'snwice_mass_b', snwice_mass_b )
             ELSE                                     ! start from rest
                IF(lwp) WRITE(numout,*) '   ==>>   previous run without snow-ice mass output then set it'
-               snwice_mass  (:,:) = tmask(:,:,1) * ( rhos * vt_s(:,:) + rhoi * vt_i(:,:) )
+               snwice_mass  (:,:) = tmask(:,:,1) * ( rhos * vt_s(:,:) + rhoi * vt_i(:,:) & 
+                                                &  + rhow * (vt_ip(:,:) + vt_il(:,:))  )
                snwice_mass_b(:,:) = snwice_mass(:,:)
             ENDIF
          ELSE                                   !* Start from rest
+!JC: I think this is useless with what is now done in ice_istate
             IF(lwp) WRITE(numout,*) '   ==>>   start from rest: set the snow-ice mass'
-            snwice_mass  (:,:) = tmask(:,:,1) * ( rhos * vt_s(:,:) + rhoi * vt_i(:,:) )
+            snwice_mass  (:,:) = tmask(:,:,1) * ( rhos * vt_s(:,:) + rhoi * vt_i(:,:) & 
+                                             &  + rhow * (vt_ip(:,:) + vt_il(:,:))  )
             snwice_mass_b(:,:) = snwice_mass(:,:)
          ENDIF
          !
diff --git a/src/NST/agrif_all_update.F90 b/src/NST/agrif_all_update.F90
index 753bee7cd3ae087392792fe42a48addd6127ab73..5389c0c2cd240cef1d33241ba0488bfccc93f2b2 100644
--- a/src/NST/agrif_all_update.F90
+++ b/src/NST/agrif_all_update.F90
@@ -42,6 +42,11 @@ CONTAINS
       !
       IF (lwp.AND.lk_agrif_debug) Write(*,*) ' --> START AGRIF UPDATE from grid Number',Agrif_Fixed()
       !
+      ! Update computionnal domain mask once:
+      IF (lk_agrif_fstep) THEN
+         CALL Agrif_Update_Variable(tmask_id,locupdate=(/ nn_shift_bar,-2/), procname = update_tmask_agrif)
+      ENDIF
+      !
       CALL Agrif_Update_ssh()                      ! Update sea level
       !
       IF (.NOT.ln_linssh) CALL Agrif_Update_vvl()  ! Update scale factors
diff --git a/src/NST/agrif_ice_update.F90 b/src/NST/agrif_ice_update.F90
index d142e9ab77275f1233f07d188420e8696e2465e2..4557ef513ef4fa52c4f0e9bef05b97039c5ac8a4 100644
--- a/src/NST/agrif_ice_update.F90
+++ b/src/NST/agrif_ice_update.F90
@@ -24,6 +24,7 @@ MODULE agrif_ice_update
    USE sbc_oce
    USE agrif_oce
    USE ice
+   USE sbc_ice , ONLY : snwice_mass
    USE agrif_ice 
    USE phycst , ONLY: rt0
 
@@ -101,21 +102,21 @@ CONTAINS
       IF( before ) THEN
          jm = 1
          DO jl = 1, jpl
-            ptab(i1:i2,j1:j2,jm  ) = a_i (i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+1) = v_i (i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+2) = v_s (i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+3) = sv_i(i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+4) = oa_i(i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+5) = a_ip(i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+6) = v_ip(i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+7) = v_il(i1:i2,j1:j2,jl)
-            ptab(i1:i2,j1:j2,jm+8) = t_su(i1:i2,j1:j2,jl)
+            ptab(i1:i2,j1:j2,jm  ) = a_i (i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2) 
+            ptab(i1:i2,j1:j2,jm+1) = v_i (i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+2) = v_s (i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+3) = sv_i(i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+4) = oa_i(i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+5) = a_ip(i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+6) = v_ip(i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+7) = v_il(i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
+            ptab(i1:i2,j1:j2,jm+8) = t_su(i1:i2,j1:j2,jl) * e1e2t_frac(i1:i2,j1:j2)
             jm = jm + 9
             DO jk = 1, nlay_s
-               ptab(i1:i2,j1:j2,jm) = e_s(i1:i2,j1:j2,jk,jl)   ;   jm = jm + 1
+               ptab(i1:i2,j1:j2,jm) = e_s(i1:i2,j1:j2,jk,jl) * e1e2t_frac(i1:i2,j1:j2)   ;   jm = jm + 1
             END DO
             DO jk = 1, nlay_i
-               ptab(i1:i2,j1:j2,jm) = e_i(i1:i2,j1:j2,jk,jl)   ;   jm = jm + 1
+               ptab(i1:i2,j1:j2,jm) = e_i(i1:i2,j1:j2,jk,jl) * e1e2t_frac(i1:i2,j1:j2)   ;   jm = jm + 1
             END DO
          END DO
          !
@@ -164,6 +165,17 @@ CONTAINS
          DO jl = 1, jpl
             WHERE( tmask(i1:i2,j1:j2,1) == 0._wp )   t_su(i1:i2,j1:j2,jl) = rt0   ! to avoid a division by 0 in sbcblk.F90
          END DO
+
+         ! new mass per unit area  
+         vt_s (i1:i2,j1:j2) = SUM(  v_s(i1:i2,j1:j2,:), dim=3 )
+         vt_i (i1:i2,j1:j2) = SUM(  v_i(i1:i2,j1:j2,:), dim=3 )
+         vt_ip(i1:i2,j1:j2) = SUM( v_ip(i1:i2,j1:j2,:), dim=3 )
+         vt_il(i1:i2,j1:j2) = SUM( v_il(i1:i2,j1:j2,:), dim=3 )
+
+         snwice_mass(i1:i2,j1:j2) = tmask(i1:i2,j1:j2,1) * ( rhos *   vt_s(i1:i2,j1:j2)  & 
+                                  &                        + rhoi *   vt_i(i1:i2,j1:j2)  &
+                                  &                        + rhow * (vt_ip(i1:i2,j1:j2)  & 
+                                  &                                + vt_il(i1:i2,j1:j2)) )
          
       ENDIF
       !
diff --git a/src/NST/agrif_oce.F90 b/src/NST/agrif_oce.F90
index 01d8034632dc960c82e8c04363a562fdc5837bd7..691e418458b9c7635bc5806bdf9c04e8d38eaeda 100644
--- a/src/NST/agrif_oce.F90
+++ b/src/NST/agrif_oce.F90
@@ -80,7 +80,7 @@ MODULE agrif_oce
    INTEGER, PUBLIC :: unb_interp_id, vnb_interp_id, ub2b_interp_id, vb2b_interp_id
    INTEGER, PUBLIC :: ub2b_update_id, vb2b_update_id, unb_update_id, vnb_update_id
    INTEGER, PUBLIC :: ub2b_cor_id, vb2b_cor_id, sshn_id
-   INTEGER, PUBLIC :: sshn_frc_id
+   INTEGER, PUBLIC :: sshn_frc_id, tmask_id
    INTEGER, PUBLIC :: e3t_id, e3u_id, e3v_id, e3f_id
    INTEGER, PUBLIC :: r3t_id, r3u_id, r3v_id, r3f_id
    INTEGER, PUBLIC :: scales_t_id
diff --git a/src/NST/agrif_oce_interp.F90 b/src/NST/agrif_oce_interp.F90
index 3d816e5847f837114064fe987b1aeef59884d009..221450eb7d8474a06bd3e0e75d7061a05b747d9c 100644
--- a/src/NST/agrif_oce_interp.F90
+++ b/src/NST/agrif_oce_interp.F90
@@ -52,6 +52,7 @@ MODULE agrif_oce_interp
    PUBLIC   interp_e1e2t_frac, interp_e2u_frac, interp_e1v_frac
    PUBLIC   agrif_istate_oce, agrif_istate_ssh   ! called by icestate.F90 and domvvl.F90
    PUBLIC   agrif_check_bat
+   PUBlIC   interp_tmask_agrif
 
    INTEGER ::   bdy_tinterp = 0
 
@@ -1159,6 +1160,26 @@ CONTAINS
    END SUBROUTINE interpsshn_frc
 
 
+   SUBROUTINE interp_tmask_agrif( ptab, i1, i2, j1, j2, before )
+      !!----------------------------------------------------------------------
+      !!               ***  ROUTINE interp_tmask_agrif  ***
+      !!
+      !!               set tmask_agrif = 0 over ghost points 
+      !!
+      !!----------------------------------------------------------------------  
+      INTEGER                         , INTENT(in   ) ::   i1, i2, j1, j2
+      REAL(wp), DIMENSION(i1:i2,j1:j2), INTENT(inout) ::   ptab
+      LOGICAL                         , INTENT(in   ) ::   before
+      !
+      !!----------------------------------------------------------------------  
+      !
+      IF(.NOT.before) THEN
+         tmask_agrif(i1:i2,j1:j2) = 0._wp 
+      ENDIF
+      !
+   END SUBROUTINE interp_tmask_agrif
+
+
    SUBROUTINE interpun( ptab, i1, i2, j1, j2, k1, k2, m1, m2, before )
       !!----------------------------------------------------------------------
       !!                  *** ROUTINE interpun ***
diff --git a/src/NST/agrif_oce_update.F90 b/src/NST/agrif_oce_update.F90
index 72fb2eca14f143e5fdcbe8b22978ac1ece46c82a..3de6bdc211095f2462a5e31e932b89708d06e732 100644
--- a/src/NST/agrif_oce_update.F90
+++ b/src/NST/agrif_oce_update.F90
@@ -36,6 +36,7 @@ MODULE agrif_oce_update
 
    PUBLIC   Agrif_Update_Tra, Agrif_Update_Dyn, Agrif_Update_vvl, Agrif_Update_ssh
    PUBLIC   Agrif_Check_parent_bat
+   PUBLIC   update_tmask_agrif
 
    !! * Substitutions
 #  include "domzgr_substitute.h90"
@@ -1944,6 +1945,24 @@ CONTAINS
       ENDIF
       !
    END SUBROUTINE check_parent_e3v0
+
+
+   SUBROUTINE update_tmask_agrif( tabres, i1, i2, j1, j2, before )
+      !!----------------------------------------------------------------------
+      !!                   *** ROUTINE updatetmsk ***
+      !!----------------------------------------------------------------------
+      INTEGER                         , INTENT(in   ) ::   i1, i2, j1, j2
+      REAL(wp), DIMENSION(i1:i2,j1:j2), INTENT(inout) ::   tabres
+      LOGICAL                         , INTENT(in   ) ::   before
+      !!
+      !!----------------------------------------------------------------------
+      ! 
+      IF( .NOT.before ) THEN
+         tmask_agrif(i1:i2,j1:j2)  = 0._wp 
+      ENDIF
+      !
+   END SUBROUTINE update_tmask_agrif
+
 #else
    !!----------------------------------------------------------------------
    !!   Empty module                                          no AGRIF zoom
diff --git a/src/NST/agrif_user.F90 b/src/NST/agrif_user.F90
index 2bdc85c5dd683eb011b0ce1e7e0cba9272e7a8a1..cdace1b36ed8b1fe1373f7850a7f111cb7120d02 100644
--- a/src/NST/agrif_user.F90
+++ b/src/NST/agrif_user.F90
@@ -26,6 +26,9 @@
       USE nemogcm
       !!----------------------------------------------------------------------
       !
+
+      mpi_comm_oce = Agrif_MPI_get_grid_comm()
+
       CALL nemo_init       !* Initializations of each fine grid
 # if defined key_RK3
       Kbb_a = Nbb; Kmm_a = Nbb; Krhs_a = Nrhs
@@ -103,6 +106,7 @@
       CALL agrif_declare_variable((/1,1    /),(/ind2-1,ind3-1    /),(/'x','y'        /),(/1,1    /),(/jpi,jpj        /),        r3f_id)
 #endif
       CALL agrif_declare_variable((/2,2,0  /),(/ind2  ,ind3  ,0  /),(/'x','y','N'    /),(/1,1,1  /),(/jpi,jpj,jpk    /),e3t0_interp_id)
+      CALL agrif_declare_variable((/2,2    /),(/ind2  ,ind3      /),(/'x','y'        /),(/1,1    /),(/jpi,jpj        /),      tmask_id)
       CALL agrif_declare_variable((/2,2    /),(/ind2  ,ind3      /),(/'x','y'        /),(/1,1    /),(/jpi,jpj        /),       mbkt_id)
       CALL agrif_declare_variable((/2,2    /),(/ind2  ,ind3      /),(/'x','y'        /),(/1,1    /),(/jpi,jpj        /),        ht0_id)
       CALL agrif_declare_variable((/2,2    /),(/ind2  ,ind3      /),(/'x','y'        /),(/1,1    /),(/jpi,jpj        /), e1e2t_frac_id)   
@@ -131,6 +135,7 @@
 #endif
       CALL Agrif_Set_bcinterp(e3t0_interp_id,interp =AGRIF_linear  )
       CALL Agrif_Set_interp  (e3t0_interp_id,interp =AGRIF_linear  )
+      CALL Agrif_Set_bcinterp(      tmask_id,interp =AGRIF_constant)
       CALL Agrif_Set_bcinterp(       mbkt_id,interp =AGRIF_constant)
       CALL Agrif_Set_interp  (       mbkt_id,interp =AGRIF_constant)
       CALL Agrif_Set_bcinterp(        ht0_id,interp =AGRIF_constant)
@@ -172,6 +177,7 @@
       ! extend the interpolation zone by 1 more point than necessary:
       ! RB check here
       CALL Agrif_Set_bc( e3t0_interp_id, (/-nn_sponge_len*imaxrho-2,ind1-1/) )
+      CALL Agrif_Set_bc(       tmask_id, (/-imaxrho*nn_shift_bar,ind1-1/) )
       CALL Agrif_Set_bc(        mbkt_id, (/-nn_sponge_len*imaxrho-2,ind1-1/) )
       CALL Agrif_Set_bc(         ht0_id, (/-nn_sponge_len*imaxrho-2,ind1-1/) )
       CALL Agrif_Set_bc(  e1e2t_frac_id, (/-nn_sponge_len*imaxrho-2,ind1-1/) )
@@ -202,6 +208,7 @@
       CALL Agrif_Set_Updatetype(        r3f_id,update  = Agrif_Update_Copy   )
 #endif 
 #endif      
+      CALL Agrif_Set_Updatetype(      tmask_id,update  = AGRIF_Update_Average)
 
       CALL Agrif_Set_ExternalMapping(nemo_mapping)
       !
@@ -246,6 +253,8 @@
       ht0_parent( :,:) = 0._wp
       mbkt_parent(:,:) = 0
       !
+      ! Build tmask_agrif such that it is zero outside barotropic dynamical interface:
+      CALL Agrif_Bc_variable(tmask_id ,calledweight=1.,procname=interp_tmask_agrif)
 !     CALL Agrif_Bc_variable(ht0_id ,calledweight=1.,procname=interpht0 )
 !     CALL Agrif_Bc_variable(mbkt_id,calledweight=1.,procname=interpmbkt)
       CALL Agrif_Init_Variable(ht0_id,        procname=interpht0 )
@@ -1128,6 +1137,11 @@
       !!                 *** ROUTINE Agrif_estimate_parallel_cost ***
       !!----------------------------------------------------------------------
       USE par_oce
+      USE domain         ! domain initialization   (dom_init & dom_cfg routines)
+      USE usrdef_nam     ! user defined configuration namelist
+      USE lib_mpp        ! distributed memory computing
+      USE mppini         ! shared/distributed memory setting (mpp_init routine)
+      USE iom            ! nemo I/O library
       !!
       IMPLICIT NONE
       !
@@ -1137,7 +1151,92 @@
       REAL(wp), INTENT(out) :: grid_cost
       !!----------------------------------------------------------------------
       !
+      INTEGER :: nprocx, nprocy
+      LOGICAL :: lwm
+      INTEGER ::   ios
+      INTEGER :: number_of_land_subdomains
+      LOGICAL ::   ln_listonly
+      INTEGER, ALLOCATABLE, DIMENSION(:,:  ) ::   iimppt, ijpi, ipproc
+      INTEGER, ALLOCATABLE, DIMENSION(:,:  ) ::   ijmppt, ijpj
+      INTEGER :: ji,jj
+      REAL(wp) :: max_grid_cost
+
+      NAMELIST/namcfg/ ln_read_cfg, cn_domcfg, ln_closea, ln_write_cfg, cn_domcfg_out, ln_use_jattr
+      NAMELIST/nammpp/ jpni, jpnj, nn_hls, ln_nnogather, ln_listonly, nn_comm
+
+     ! If not using land supresssion information
       grid_cost = REAL(imax-imin+1,wp)*REAL(jmax-jmin+1,wp) / REAL(nbprocs,wp)
+      return
+
+      narea = Agrif_GlobProcRank + 1    ! mpprank: the rank of proc (0 --> mppsize -1 )
+      lwm = (narea == 1)                ! control of output namelists
+
+      CALL load_nml( numnam_ref,        'namelist_ref',                                           -1, lwm )
+      CALL load_nml( numnam_cfg,        'namelist_cfg',                                           -1, lwm )
+
+      READ  ( numnam_ref, namcfg, IOSTAT = ios, ERR = 903 )
+903   IF( ios /= 0 )   CALL ctl_nam ( ios , 'namcfg in reference namelist' )
+      READ  ( numnam_cfg, namcfg, IOSTAT = ios, ERR = 904 )
+904   IF( ios >  0 )   CALL ctl_nam ( ios , 'namcfg in configuration namelist' )
+
+      IF( ln_read_cfg ) THEN            ! Read sizes in domain configuration file
+         CALL domain_cfg ( cn_cfg, nn_cfg, Ni0glo, Nj0glo, jpkglo, l_Iperio, l_Jperio, l_NFold, c_NFtype )
+      ELSE                              ! user-defined namelist
+         CALL usr_def_nam( cn_cfg, nn_cfg, Ni0glo, Nj0glo, jpkglo, l_Iperio, l_Jperio, l_NFold, c_NFtype )
+      ENDIF
+
+      READ  ( numnam_ref, nammpp, IOSTAT = ios, ERR = 901 )
+901   IF( ios /= 0 )   CALL ctl_nam ( ios , 'nammpp in reference namelist' )
+      READ  ( numnam_cfg, nammpp, IOSTAT = ios, ERR = 902 )
+902   IF( ios >  0 )   CALL ctl_nam ( ios , 'nammpp in configuration namelist' )
+      !
+      nn_hls = MAX(1, nn_hls)   ! nn_hls must be > 0
+
+      jpiglo = Ni0glo + 2 * nn_hls
+      jpjglo = Nj0glo + 2 * nn_hls
+
+      mppsize = Agrif_totalprocs
+
+      IF(               ln_read_cfg ) CALL iom_open( cn_domcfg,    numbot )
+      IF( ln_bdy .AND. ln_mask_file ) CALL iom_open( cn_mask_file, numbdy )
+
+
+      CALL bestpartition( nbprocs, nprocx, nprocy, knbcnt = number_of_land_subdomains )
+
+! Reinitializations
+
+      IF(               ln_read_cfg ) CALL iom_close( numbot )
+      IF( ln_bdy .AND. ln_mask_file ) CALL iom_close( numbdy )
+
+      numbot = -1
+      numbdy = -1
+
+      DEALLOCATE(numnam_ref)
+      DEALLOCATE(numnam_cfg)
+
+      ALLOCATE(iimppt(nprocx,nprocy), ijmppt(nprocx,nprocy), ijpi(nprocx,nprocy), ijpj(nprocx,nprocy))
+
+      CALL mpp_basesplit( jpiglo, jpjglo, nn_hls, nprocx, nprocy, jpimax, jpjmax, iimppt, ijmppt, ijpi, ijpj )
+
+      max_grid_cost = 0.
+
+      DO jj=1,nprocy
+        DO ji=1,nprocx
+          max_grid_cost=max(max_grid_cost,1.*ijpi(ji,jj)*ijpj(ji,jj))
+        ENDDO
+      ENDDO
+
+
+      DEALLOCATE(iimppt, ijmppt, ijpi, ijpj)
+
+      !grid_cost = REAL(imax-imin+1,wp)*REAL(jmax-jmin+1,wp) / REAL(nprocx*nprocy,wp)
+
+      !if (Agrif_GlobProcRank == 0) then
+      !        print *,'MAXCOST = ',grid_cost,max_grid_cost
+      !endif
+      grid_cost = max_grid_cost
+
+
       !
    END SUBROUTINE Agrif_estimate_parallel_cost
 
diff --git a/src/OCE/DIA/diaar5.F90 b/src/OCE/DIA/diaar5.F90
index c90cea702ea532aee148d302057cc161672bdf5a..82fe74e622840226583e015e1227f11568090417 100644
--- a/src/OCE/DIA/diaar5.F90
+++ b/src/OCE/DIA/diaar5.F90
@@ -21,6 +21,8 @@ MODULE diaar5
    USE iom            ! I/O manager library
    USE fldread        ! type FLD_N
    USE timing         ! preformance summary
+   USE sbc_oce , ONLY : nn_ice
+   USE sbc_ice , ONLY : snwice_mass, snwice_mass_b
 
    IMPLICIT NONE
    PRIVATE
@@ -73,7 +75,7 @@ CONTAINS
       !
       INTEGER  ::   ji, jj, jk, iks, ikb                      ! dummy loop arguments
       REAL(wp) ::   zvolssh, zvol, zssh_steric, zztmp, zarho, ztemp, zsal, zmass, zsst
-      REAL(wp) ::   zaw, zbw, zrw
+      REAL(wp) ::   zaw, zbw, zrw, ztf
       !
       REAL(wp), ALLOCATABLE, DIMENSION(:,:)     :: zarea_ssh , zbotpres       ! 2D workspace
       REAL(wp), ALLOCATABLE, DIMENSION(:,:)     :: z2d, zpe                   ! 2D workspace
@@ -126,6 +128,20 @@ CONTAINS
          !
       ENDIF
 
+      IF( iom_use( 'sshice' ) ) THEN
+         !                                         ! total volume of ice+snow 
+         IF( nn_ice == 0 ) THEN
+            zvolssh = 0._wp
+         ELSE
+            ztf = REAL(MOD( kt-1, nn_fsbc ), wp) / REAL(nn_fsbc, wp)
+            z2d = ztf * snwice_mass(:,:) + (1._wp - ztf) * snwice_mass_b(:,:)
+            zvolssh = glob_sum( 'diaar5', e1e2t(:,:) * z2d(:,:) * r1_rho0 )
+         ENDIF
+
+         CALL iom_put( 'sshice', zvolssh / area_tot )
+         !
+      ENDIF
+
       IF( iom_use( 'botpres' ) .OR. iom_use( 'sshthster' )  .OR. iom_use( 'sshsteric' )  ) THEN
          !
          ztsn(:,:,:,jp_tem) = ts(:,:,:,jp_tem,Kmm)                    ! thermosteric ssh
@@ -371,7 +387,7 @@ CONTAINS
          &  iom_use( 'uadv_salttr' ) .OR. iom_use( 'udiff_salttr' ) .OR. &
          &  iom_use( 'vadv_heattr' ) .OR. iom_use( 'vdiff_heattr' ) .OR. &
          &  iom_use( 'vadv_salttr' ) .OR. iom_use( 'vdiff_salttr' ) .OR. &
-         &  iom_use( 'rhop' )  ) L_ar5 = .TRUE.
+         &  iom_use( 'rhop' ) .OR. iom_use( 'sshice' )  ) L_ar5 = .TRUE.
 
       IF( l_ar5 ) THEN
          !
diff --git a/src/OCE/DOM/dom_oce.F90 b/src/OCE/DOM/dom_oce.F90
index 71d351a9a8b5d84d08fe69f7fff2fe05dc13c8d1..cb50b13560d118f597c7d90340529ee53d7690c0 100644
--- a/src/OCE/DOM/dom_oce.F90
+++ b/src/OCE/DOM/dom_oce.F90
@@ -198,7 +198,8 @@ MODULE dom_oce
    REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:), TARGET ::   tmask, umask, vmask, wmask, fmask   !: land/ocean mask at T-, U-, V-, W- and F-pts
    REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:), TARGET ::   wumask, wvmask                      !: land/ocean mask at WU- and WV-pts
    REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:), TARGET ::   fe3mask                             !: land/ocean mask at F-pts
-   REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: tmask_upd, umask_upd, vmask_upd                 !: land/ocean mask at F-pts
+   REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: tmask_upd, umask_upd, vmask_upd                 !: agrif mask defining barotropic update 
+   REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: tmask_agrif                !: agrif mask at T-points excluding ghosts and updated areas 
 
    !!----------------------------------------------------------------------
    !! calendar variables
@@ -342,7 +343,8 @@ CONTAINS
          !
 #if defined key_agrif
       ii = ii+1
-      ALLOCATE( tmask_upd(jpi,jpj) , umask_upd(jpi,jpj), vmask_upd(jpi,jpj) , STAT=ierr(ii) )
+      ALLOCATE( tmask_upd(jpi,jpj) , umask_upd(jpi,jpj), vmask_upd(jpi,jpj), & 
+         &      tmask_agrif(jpi,jpj), STAT=ierr(ii) )
 #endif
       !
       dom_oce_alloc = MAXVAL(ierr)
diff --git a/src/OCE/DOM/dommsk.F90 b/src/OCE/DOM/dommsk.F90
index 904fec9b03f78748a6aa4e12da5f2420d1f7428d..362b11e3e48d4bfad0279a14b9b72c8847554aeb 100644
--- a/src/OCE/DOM/dommsk.F90
+++ b/src/OCE/DOM/dommsk.F90
@@ -158,6 +158,15 @@ CONTAINS
       ! In case of a coarsened grid, account her for possibly aditionnal  
       ! masked points; these have been read in the mesh file and stored in mbku, mbkv, mbkf
       DO_2D( 0, 0, 0, 0 )
+         ! Ugly patch to accomodate batropic case (jpk=2)
+         ! but with possible masked faces in the coarsening case
+         ! A better way would be to keep track of mbku/v/f=0 until here
+         ! but these have been assigned a minimum of 1. TBC
+         IF (jpk>2) THEN   
+            IF (mbku(ji,jj)==1) umask(ji,jj,:) = 0._wp
+            IF (mbkv(ji,jj)==1) vmask(ji,jj,:) = 0._wp
+            IF (mbkf(ji,jj)==1) fmask(ji,jj,:) = 0._wp
+         ENDIF
          IF ( MAXVAL(umask(ji,jj,:))/=0._wp )  umask(ji,jj,mbku(ji,jj)+1:jpk) = 0._wp
          IF ( MAXVAL(vmask(ji,jj,:))/=0._wp )  vmask(ji,jj,mbkv(ji,jj)+1:jpk) = 0._wp
          IF ( MAXVAL(fmask(ji,jj,:))/=0._wp )  fmask(ji,jj,mbkf(ji,jj)+1:jpk) = 0._wp
@@ -225,6 +234,10 @@ CONTAINS
       tmask_upd(:,:) = 0._wp
       umask_upd(:,:) = 0._wp
       vmask_upd(:,:) = 0._wp
+      !
+      ! Reset mask defining actual computationnal domain
+      ! e.g. excluding ghosts and updated cells.
+      tmask_agrif(:,:) = 1._wp
 #endif     
       !
    END SUBROUTINE dom_msk
diff --git a/src/OCE/DYN/dynspg_ts.F90 b/src/OCE/DYN/dynspg_ts.F90
index 21e76f5b5a4758e22cd407f945ea01e2e96dfb43..6e51d97c41e5a1784fb383ea2fce397daaa02e9c 100644
--- a/src/OCE/DYN/dynspg_ts.F90
+++ b/src/OCE/DYN/dynspg_ts.F90
@@ -1149,6 +1149,12 @@ CONTAINS
          zcu(ji,jj) = SQRT( grav * MAX(ht_0(ji,jj),0._wp) * (zxr2 + zyr2) )
       END_2D
       !
+#if defined key_agrif
+     ! Discard points that are not stepped by 2d mode:
+     zcu(Nis0:Nie0,Njs0:Nje0) = zcu(Nis0:Nie0,Njs0:Nje0) &
+                              & * (1._wp - tmask_upd(Nis0:Nie0,Njs0:Nje0))
+#endif
+      !
       zcmax = MAXVAL( zcu(Nis0:Nie0,Njs0:Nje0) )
       CALL mpp_max( 'dynspg_ts', zcmax )
 
diff --git a/src/OCE/IOM/iom_def.F90 b/src/OCE/IOM/iom_def.F90
index 5a842c4cc61427fa686a9162af3d1c1082f6d565..f8251e4a9b893358ac41c2e6a5f453330012eb07 100644
--- a/src/OCE/IOM/iom_def.F90
+++ b/src/OCE/IOM/iom_def.F90
@@ -25,7 +25,7 @@ MODULE iom_def
    INTEGER, PARAMETER, PUBLIC ::   jp_i2    = 203      !: write INTEGER(2)
    INTEGER, PARAMETER, PUBLIC ::   jp_i1    = 204      !: write INTEGER(1)
 
-   INTEGER, PARAMETER, PUBLIC ::   jpmax_files  = 100  !: maximum number of simultaneously opened file
+   INTEGER, PARAMETER, PUBLIC ::   jpmax_files  = 200  !: maximum number of simultaneously opened file
    INTEGER, PARAMETER, PUBLIC ::   jpmax_vars   = 1200 !: maximum number of variables in one file
    INTEGER, PARAMETER, PUBLIC ::   jpmax_dims   =  4   !: maximum number of dimensions for one variable
    INTEGER, PARAMETER, PUBLIC ::   jpmax_digits =  9   !: maximum number of digits for the cpu number in the file name
diff --git a/src/OCE/LBC/lib_mpp.F90 b/src/OCE/LBC/lib_mpp.F90
index 0bcde50b8b6b0f5ec11e9d5d80bdee7fed706e1d..bf839de5ac0f224d52f6194f47efa110fd8ff1f8 100644
--- a/src/OCE/LBC/lib_mpp.F90
+++ b/src/OCE/LBC/lib_mpp.F90
@@ -135,9 +135,7 @@ MODULE lib_mpp
 
    INTEGER, PUBLIC ::   mppsize        ! number of process
    INTEGER, PUBLIC ::   mpprank        ! process number  [ 0 - size-1 ]
-!$AGRIF_DO_NOT_TREAT
    INTEGER, PUBLIC ::   mpi_comm_oce   ! opa local communicator
-!$AGRIF_END_DO_NOT_TREAT
 
    INTEGER :: MPI_SUMDD
 
@@ -261,17 +259,17 @@ CONTAINS
             mpi_comm_oce = localComm
          ENDIF
       ELSE
-         CALL mpi_comm_dup( mpi_comm_world, mpi_comm_oce, ierr)
-         IF( ierr /= MPI_SUCCESS ) CALL ctl_stop( 'STOP', ' lib_mpp: Error in routine mpi_comm_dup' )
-      ENDIF
-
 # if defined key_agrif
-      IF( Agrif_Root() ) THEN
-         CALL Agrif_MPI_Init(mpi_comm_oce)
-      ELSE
-         CALL Agrif_MPI_set_grid_comm(mpi_comm_oce)
-      ENDIF
+          IF( Agrif_Root() ) THEN
 # endif
+              CALL mpi_comm_dup( mpi_comm_world, mpi_comm_oce, ierr)
+              IF( ierr /= MPI_SUCCESS ) CALL ctl_stop( 'STOP', ' lib_mpp: Error in routine mpi_comm_dup' )
+# if defined key_agrif
+          ELSE
+              mpi_comm_oce = Agrif_MPI_get_grid_comm()
+          ENDIF
+# endif
+      ENDIF
 
       CALL mpi_comm_rank( mpi_comm_oce, mpprank, ierr )
       CALL mpi_comm_size( mpi_comm_oce, mppsize, ierr )
diff --git a/src/OCE/LBC/mppini.F90 b/src/OCE/LBC/mppini.F90
index cd10d88720a34d3cc70f05e0ed2dcbfe8691be9f..7af3c0fe8fbf7cc329dc63a64fe8bbf606538340 100644
--- a/src/OCE/LBC/mppini.F90
+++ b/src/OCE/LBC/mppini.F90
@@ -35,6 +35,9 @@ MODULE mppini
    PUBLIC   mpp_getnum     ! called by prtctl
    PUBLIC   mpp_basesplit  ! called by prtctl
    PUBLIC   mpp_is_ocean   ! called by prtctl
+#ifdef key_agrif
+   PUBLIC bestpartition    ! called by Agrif_estimate_parallel_cost
+#endif
 
    INTEGER ::   numbot = -1   ! 'bottom_level' local logical unit
    INTEGER ::   numbdy = -1   ! 'bdy_msk'      local logical unit
@@ -730,22 +733,36 @@ CONTAINS
 #else
          iszitst = ( Ni0glo + (ji-1) ) / ji + 2*nn_hls   ! max subdomain i-size
 #endif
-         IF( iszitst < isziref .AND. iszitst >= iszimin ) THEN
-            isziref = iszitst
-            inbimax = inbimax + 1
-            inbi0(inbimax) = ji
-            iszi0(inbimax) = isziref
+         IF( iszitst >= iszimin ) THEN
+#ifdef key_agrif
+            IF (((.Not.Agrif_Root()).AND.(Agrif_Parallel_Sisters).AND.(iszitst <= isziref)) &
+                  .OR.(iszitst < isziref)) THEN
+#else
+            IF (iszitst < isziref) THEN
+#endif
+               isziref = iszitst
+               inbimax = inbimax + 1
+               inbi0(inbimax) = ji
+               iszi0(inbimax) = isziref
+            ENDIF
          ENDIF
 #if defined key_nemocice_decomp
          iszjtst = ( ny_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls    ! first  dim.
 #else
          iszjtst = ( Nj0glo + (ji-1) ) / ji + 2*nn_hls   ! max subdomain j-size
 #endif
-         IF( iszjtst < iszjref .AND. iszjtst >= iszjmin ) THEN
-            iszjref = iszjtst
-            inbjmax = inbjmax + 1
-            inbj0(inbjmax) = ji
-            iszj0(inbjmax) = iszjref
+         IF( iszjtst >= iszjmin ) THEN
+#ifdef key_agrif
+            IF (((.Not.Agrif_Root()).AND.(Agrif_Parallel_Sisters).AND.(iszjtst <= iszjref)) &
+                  .OR.(iszjtst < iszjref)) THEN
+#else
+            IF (iszjtst < iszjref) THEN
+#endif
+               iszjref = iszjtst
+               inbjmax = inbjmax + 1
+               inbj0(inbjmax) = ji
+               iszj0(inbjmax) = iszjref
+            ENDIF
          ENDIF
       END DO
       IF( inbimax == 0 ) THEN
@@ -805,7 +822,13 @@ CONTAINS
       iszij = jpiglo*jpjglo+1                                   ! default: larger than global domain
       DO WHILE( inbij <= inbijmax )                             ! if we did not reach the max of inbij1
          ii = MINLOC(iszij1, mask = inbij1 == inbij, dim = 1)   ! warning: send back the first occurence if multiple results
-         IF ( iszij1(ii) < iszij ) THEN
+         IF ( ( iszij1(ii) < iszij )    &
+#ifdef key_agrif
+            ! if key_agrif is defined, we keep the partition if the number of cores is correct
+            ! even if the perimeter is larger than the best partition
+            .OR.(Agrif_Parallel_Sisters.AND.(inbij == knbij))  &
+#endif
+            ) THEN
             ii = MINLOC( iszi1+iszj1, mask = iszij1 == iszij1(ii) .AND. inbij1 == inbij, dim = 1)  ! select the smaller perimeter if multiple min
             isz0 = isz0 + 1
             indexok(isz0) = ii
diff --git a/src/OCE/SBC/sbcfwb.F90 b/src/OCE/SBC/sbcfwb.F90
index 58a17f028f4a7cc404859b8c7c4a66f98cd04406..3cb6d7e241160ea7fe667cf78dad3213d056a5b6 100644
--- a/src/OCE/SBC/sbcfwb.F90
+++ b/src/OCE/SBC/sbcfwb.F90
@@ -8,6 +8,8 @@ MODULE sbcfwb
    !!            3.0  ! 2006-08  (G. Madec)  Surface module
    !!            3.2  ! 2009-07  (C. Talandier) emp mean s spread over erp area 
    !!            3.6  ! 2014-11  (P. Mathiot  ) add ice shelf melting
+   !!            4.2  ! 2022-12  (J. Chanut   ) Compatibility with AGRIF
+   !!                                            + analytical cycle 
    !!----------------------------------------------------------------------
 
    !!----------------------------------------------------------------------
@@ -17,10 +19,13 @@ MODULE sbcfwb
    USE dom_oce        ! ocean space and time domain
    USE sbc_oce        ! surface ocean boundary condition
    USE isf_oce , ONLY : fwfisf_cav, fwfisf_par                    ! ice shelf melting contribution
-   USE sbc_ice , ONLY : snwice_mass, snwice_mass_b, snwice_fmass
+   USE sbc_ice , ONLY : snwice_mass_b, snwice_fmass
    USE phycst         ! physical constants
    USE sbcrnf         ! ocean runoffs
    USE sbcssr         ! Sea-Surface damping terms
+#if defined key_agrif
+   USE agrif_oce , ONLY: Kmm_a
+#endif
    !
    USE in_out_manager ! I/O manager
    USE iom            ! IOM
@@ -34,11 +39,23 @@ MODULE sbcfwb
 
    PUBLIC   sbc_fwb    ! routine called by step
 
-   REAL(wp) ::   rn_fwb0   ! initial freshwater adjustment flux [kg/m2/s] (nn_fwb = 2 only)
+   REAL(wp) ::   rn_fwb0     ! initial freshwater adjustment flux [kg/m2/s] (nn_fwb = 2 only)
+   INTEGER  ::   nn_fwb_voltype
+   LOGICAL  ::   ln_hvolg_var ! Prescribed a time varying volume
+   REAL(wp) ::   rn_hvolg_amp ! Global seasonal volume height amplitude (m)
+   REAL(wp) ::   rn_hvolg_trd ! Global seasonal volume height trend (m/s)
+   INTEGER  ::   nn_hvolg_mth ! Month when global volume height starts to rise
    REAL(wp) ::   a_fwb     ! annual domain averaged freshwater budget from the previous year
    REAL(wp) ::   a_fwb_b   ! annual domain averaged freshwater budget from the year before or at initial state
-   REAL(wp) ::   a_fwb_ini ! initial domain averaged freshwater budget
    REAL(wp) ::   area      ! global mean ocean surface (interior domain)
+   REAL(wp) ::   emp_corr  ! current, globally constant, emp correction
+   REAL(wp) ::   emp_ext   ! prescribed emp flux
+   REAL(wp) ::   hvolg_n, hvolg_a  ! Now and future equivalent height to prescribe 
+#if defined key_agrif
+!$AGRIF_DO_NOT_TREAT
+   REAL(wp), ALLOCATABLE, DIMENSION(:) :: agrif_tmp ! temporary array holding values for each grid
+!$AGRIF_END_DO_NOT_TREAT
+#endif
 
    !!----------------------------------------------------------------------
    !! NEMO/OCE 4.0 , NEMO Consortium (2018)
@@ -66,15 +83,16 @@ CONTAINS
       INTEGER, INTENT( in ) ::   kn_fwb   ! ocean time-step index
       INTEGER, INTENT( in ) ::   Kmm      ! ocean time level index
       !
-      INTEGER  ::   ios, inum, ikty       ! local integers
-      REAL(wp) ::   z_fwf, z_fwf_nsrf, zsum_fwf, zsum_erp                ! local scalars
-      REAL(wp) ::   zsurf_neg, zsurf_pos, zsurf_tospread, zcoef          !   -      -
+      INTEGER  ::   ios, inum, ikty, igrid       ! local integers
+      INTEGER  ::   ji, jj, istart, iend, jstart, jend
+      REAL(wp) ::   z_fwf, z_fwf_nsrf, zsum_fwf, zsum_erp         ! local scalars
+      REAL(wp) ::   zsurf_neg, zsurf_pos, zsurf_tospread          !   -      -
       REAL(wp), ALLOCATABLE, DIMENSION(:,:) ::   ztmsk_neg, ztmsk_pos, z_wgt ! 2D workspaces
       REAL(wp), ALLOCATABLE, DIMENSION(:,:) ::   ztmsk_tospread, zerp_cor    !   -      -
       REAL(wp)   ,DIMENSION(1) ::   z_fwfprv  
       COMPLEX(dp),DIMENSION(1) ::   y_fwfnow  
       !
-      NAMELIST/namsbc_fwb/rn_fwb0
+      NAMELIST/namsbc_fwb/rn_fwb0, nn_fwb_voltype, ln_hvolg_var, rn_hvolg_amp, rn_hvolg_trd, nn_hvolg_mth
       !!----------------------------------------------------------------------
       !
       IF( kt == nit000 ) THEN
@@ -83,112 +101,321 @@ CONTAINS
          READ( numnam_cfg, namsbc_fwb, IOSTAT = ios, ERR = 902 )
 902      IF( ios >  0 ) CALL ctl_nam( ios, 'namsbc_fwb in configuration namelist' )
          IF(lwm) WRITE( numond, namsbc_fwb )
+#if defined key_agrif
+         IF ( .NOT.Agrif_Root() ) THEN ! Copy namelist values from parent (for print)
+            nn_fwb_voltype = Agrif_parent(nn_fwb_voltype)   
+            rn_fwb0        = Agrif_parent(rn_fwb0)
+            ln_hvolg_var   = Agrif_parent(ln_hvolg_var)
+            rn_hvolg_amp   = Agrif_parent(rn_hvolg_amp)
+            rn_hvolg_trd   = Agrif_parent(rn_hvolg_trd)
+            nn_hvolg_mth   = Agrif_parent(nn_hvolg_mth)
+         ENDIF
+#endif
+
          IF(lwp) THEN
             WRITE(numout,*)
             WRITE(numout,*) 'sbc_fwb : FreshWater Budget correction'
             WRITE(numout,*) '~~~~~~~'
-            IF( kn_fwb == 1 )   WRITE(numout,*) '          instantaneously set to zero'
-            IF( kn_fwb == 3 )   WRITE(numout,*) '          fwf set to zero and spread out over erp area'
-            IF( kn_fwb == 2 ) THEN
-               WRITE(numout,*) '          adjusted from previous year budget'
-               WRITE(numout,*)
+            SELECT CASE ( kn_fwb )
+            CASE ( 1 )
+               WRITE(numout,*) '          nn_fwb = 1: Volume set to zero at each time step'
+               WRITE(numout,*) '          => uniform correction to emp'
+            CASE(  2 )
+               WRITE(numout,*) '          nn_fwb = 2: Volume adjusted from previous year budget'
+               WRITE(numout,*) '          => uniform correction to emp'
                WRITE(numout,*) '   Namelist namsbc_fwb'
                WRITE(numout,*) '      Initial freshwater adjustment flux [kg/m2/s] = ', rn_fwb0
-            END IF
+            CASE( 3 )
+               WRITE(numout,*) '          nn_fwb = 3: Volume set to zero at each time step'
+               WRITE(numout,*) '          => non-uniform correction proportional to erp'
+            CASE DEFAULT
+               CALL ctl_stop( 'sbc_fwb : wrong nn_fwb value for the FreshWater Budget correction, choose either 1, 2 or 3' )
+            END SELECT
+            !
+            SELECT CASE ( nn_fwb_voltype )
+            CASE( 1 )
+               WRITE(numout,*) ' '
+               WRITE(numout,*) '          nn_fwb_voltype = 1: Control ICE + OCEAN volume'
+               WRITE(numout,*) ' '
+            CASE( 2 )
+               WRITE(numout,*) ' '
+               WRITE(numout,*) '          nn_fwb_voltype = 2: Control OCEAN volume'
+               WRITE(numout,*) ' '
+            CASE DEFAULT
+               CALL ctl_stop( 'sbc_fwb : wrong nn_fwb_voltype value for the FreshWater Budget correction, choose either 1 or 2' )
+            END SELECT
+            !
+            IF (ln_hvolg_var) THEN
+               WRITE(numout,*) ' '
+               WRITE(numout,*) '          ln_hvolg_var = T: specify the global volume variation'
+               WRITE(numout,*) '          Seasonnal height amplitude [m]:              ', rn_hvolg_amp
+               WRITE(numout,*) '          Volume anomaly crosses zero at month (1:12): ', nn_hvolg_mth
+               WRITE(numout,*) '          Trend [m/s]:                                 ', rn_hvolg_trd
+               WRITE(numout,*) ' '
+            ELSE
+               WRITE(numout,*) ' '
+               WRITE(numout,*) '          ln_hvolg_var = F: no specification of the volume variation '
+               WRITE(numout,*) ' '
+            ENDIF   
          ENDIF
          !
          IF( kn_fwb == 3 .AND. nn_sssr /= 2 )   CALL ctl_stop( 'sbc_fwb: nn_fwb = 3 requires nn_sssr = 2, we stop ' )
          IF( kn_fwb == 3 .AND. ln_isfcav    )   CALL ctl_stop( 'sbc_fwb: nn_fwb = 3 with ln_isfcav = .TRUE. not working, we stop ' )
+#if defined key_agrif
+         IF((kn_fwb == 3).AND.(Agrif_maxlevel()/=0)) CALL ctl_stop( 'sbc_fwb: nn_fwb = 3 not yet implemented with AGRIF zooms ' ) 
+#endif
          !
-         area = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask(:,:,1))           ! interior global domain surface
+         IF ( Agrif_Root() ) THEN
+#if defined key_agrif
+            ALLOCATE(agrif_tmp(Agrif_nb_fine_grids()+1))
+            agrif_tmp(:) = 1.e+40_dp                     ! Initialize to a big value
+            agrif_tmp(1) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:)) ! Coarse grid value
+            CALL Agrif_step_child_adj(glob_sum_area_agrif)                    ! Get value over child grids
+            CALL mpp_min('sbcfwb', agrif_tmp(:)) ! Required with // sisters to populate the value of each grid on each processor
+            area = SUM(agrif_tmp)                ! Sum over all grids
+            IF (lwp) WRITE(numout,*) 'Domain area for each agrif grid (km**2):'
+            DO igrid = 1, Agrif_nb_fine_grids() + 1
+               IF (lwp) WRITE(numout,*) '                                        ', igrid, agrif_tmp(igrid)/1000._wp/1000._wp
+            END DO
+#else
+            area = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask(:,:,1))         ! interior global domain surface
+#endif
+            IF (lwp) WRITE(numout,*) 'Total Domain area (km**2):', area/1000._wp/1000._wp
+            !
+            IF ( ln_hvolg_var ) THEN
+               ! get global ssh at "now" time step:
+               CALL set_hglo_ana(kt,  0, rn_hvolg_amp, nn_hvolg_mth, rn_hvolg_trd, hvolg_a)
+            ELSE
+               hvolg_n  = 0._wp
+               hvolg_a  = 0._wp
+               emp_ext  = 0._wp
+            ENDIF   
+         ENDIF
          ! isf cavities are excluded because it can feedback to the melting with generation of inhibition of plumes
          ! and in case of no melt, it can generate HSSW.
          !
          IF( nn_ice == 0 ) THEN
             snwice_mass_b(:,:) = 0.e0               ! no sea-ice model is being used : no snow+ice mass
-            snwice_mass  (:,:) = 0.e0
             snwice_fmass (:,:) = 0.e0
          ENDIF
          !
       ENDIF
 
+
+         ! If needed, define the volume change to prescribe:
+#if defined key_agrif
+      IF ( Agrif_Root() ) THEN
+#endif
+         IF ( ln_hvolg_var ) THEN
+            IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
+               hvolg_n  = hvolg_a
+               CALL set_hglo_ana(kt,  kn_fsbc, rn_hvolg_amp, nn_hvolg_mth, rn_hvolg_trd, hvolg_a)
+               ! prescribed volume change leads to the following freshwater flux:
+               emp_ext = -rho0 * (hvolg_a - hvolg_n) / ( rn_Dt * REAL(kn_fsbc, wp) ) 
+            ENDIF   
+         ENDIF
+#if defined key_agrif
+      ENDIF
+#endif
+
       SELECT CASE ( kn_fwb )
       !
-      CASE ( 1 )                             !==  global mean fwf set to zero  ==!
-         !
-         IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
-            y_fwfnow(1) = local_sum( e1e2t(:,:) * ( emp(:,:) - rnf(:,:) - fwfisf_cav(:,:) - fwfisf_par(:,:) - snwice_fmass(:,:) ) )
-            CALL mpp_delay_sum( 'sbcfwb', 'fwb', y_fwfnow(:), z_fwfprv(:), kt == nitend - nn_fsbc + 1 )
-            z_fwfprv(1) = z_fwfprv(1) / area
-            zcoef = z_fwfprv(1) * rcp
-            emp(:,:) = emp(:,:) - z_fwfprv(1)        * tmask(:,:,1)
-            qns(:,:) = qns(:,:) + zcoef * sst_m(:,:) * tmask(:,:,1) ! account for change to the heat budget due to fw correction
-            ! outputs
-            IF( iom_use('hflx_fwb_cea') )  CALL iom_put( 'hflx_fwb_cea', zcoef * sst_m(:,:) * tmask(:,:,1) )
-            IF( iom_use('vflx_fwb_cea') )  CALL iom_put( 'vflx_fwb_cea', z_fwfprv(1)        * tmask(:,:,1) )
-         ENDIF
+      CASE ( 1 )                             !==  set volume at each time step  ==!
          !
-      CASE ( 2 )                             !==  fw adjustment based on fw budget at the end of the previous year  ==!
-         !                                                simulation is supposed to start 1st of January
-         IF( kt == nit000 ) THEN                                                                 ! initialisation
-            !                                                                                    ! set the fw adjustment (a_fwb)
-            IF ( ln_rstart .AND. iom_varid( numror, 'a_fwb_b', ldstop = .FALSE. ) > 0     &      !    as read from restart file
-               &           .AND. iom_varid( numror, 'a_fwb',   ldstop = .FALSE. ) > 0 ) THEN
-               IF(lwp)   WRITE(numout,*) 'sbc_fwb : reading freshwater-budget from restart file'
-               CALL iom_get( numror, 'a_fwb_b', a_fwb_b )
-               CALL iom_get( numror, 'a_fwb'  , a_fwb )
+#if defined key_agrif                                            
+         IF ( Agrif_Root() ) THEN
+            IF ( Agrif_maxlevel()==0 ) THEN
+               ! No child grid, correct "now" fluxes (i.e. as in the "no agrif" case)
+#endif
+               IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
+                  SELECT CASE (nn_fwb_voltype)
+                  CASE( 1 )
+                     z_fwfprv(1) = glob_sum( 'sbcfwb', e1e2t(:,:) * (  emp(:,:) - rnf(:,:)                & 
+                               &                                      - fwfisf_cav(:,:) - fwfisf_par(:,:) & 
+                               &                                      - snwice_fmass(:,:) ) )
+                     !y_fwfnow(1) = local_sum( e1e2t(:,:) * ( emp(:,:) - rnf(:,:) - fwfisf_cav(:,:) - fwfisf_par(:,:) - snwice_fmass(:,:) ) )
+                     !CALL mpp_delay_sum( 'sbcfwb', 'fwb', y_fwfnow(:), z_fwfprv(:), kt == nitend - nn_fsbc + 1 )
+                  CASE( 2 )
+                     z_fwfprv(1)  = glob_sum( 'sbcfwb', e1e2t(:,:) * (  emp(:,:) - rnf(:,:)                & 
+                                &                                      - fwfisf_cav(:,:) - fwfisf_par(:,:) ))
+                     !y_fwfnow(1) = local_sum( e1e2t(:,:) * ( emp(:,:) - rnf(:,:) - fwfisf_cav(:,:) - fwfisf_par(:,:)  ) )
+                     !CALL mpp_delay_sum( 'sbcfwb', 'fwb', y_fwfnow(:), z_fwfprv(:), kt == nitend - nn_fsbc + 1 )
+                  END SELECT
+              ENDIF
+              emp_corr = emp_ext - z_fwfprv(1) / area
+#if defined key_agrif
+            ELSE
                !
-               a_fwb_ini = a_fwb_b
-            ELSE                                                                                 !    as specified in namelist
-               IF(lwp)   WRITE(numout,*) 'sbc_fwb : setting freshwater-budget from namelist rn_fwb0'
-               a_fwb   = rn_fwb0
-               a_fwb_b = 0._wp   ! used only the first year then it is replaced by a_fwb_ini
+               ! Volume is here corrected according to the budget computed in the past, e.g. between
+               ! the last two consecutive calls to the surface module. Hence, the volume is allowed to drift slightly during
+               ! the current time step. 
+               !
+               IF( kt == nit000 ) THEN                                                               ! initialisation
+                  !                                                                                  ! 
+                  IF ( ln_rstart .AND. iom_varid( numror, 'a_fwb',      ldstop = .FALSE. ) > 0     & ! read from restart file
+                     &           .AND. iom_varid( numror, 'emp_corr',   ldstop = .FALSE. ) > 0 ) THEN
+                     IF(lwp)   WRITE(numout,*) 'sbc_fwb : reading freshwater-budget from restart file'
+                     CALL iom_get( numror, 'a_fwb'     , a_fwb    )
+                     CALL iom_get( numror, 'emp_corr'  , emp_corr )
+                     !                                                                                 
+                  ELSE                                                                               
+                     emp_corr = 0._wp
+                     a_fwb    = 999._wp
+                     a_fwb_b  = 999._wp   
+                  END IF
+                  !
+                  IF(lwp)   WRITE(numout,*)
+                  IF(lwp)   WRITE(numout,*)'sbc_fwb : initial freshwater correction flux = ', emp_corr , 'kg/m2/s'
+                  !
+               ENDIF
                !
-               a_fwb_ini = glob_sum( 'sbcfwb', e1e2t(:,:) * ( ssh(:,:,Kmm) + snwice_mass(:,:) * r1_rho0 ) ) &
-                  &      * rho0 / ( area * rday * REAL(nyear_len(1), wp) )
+               IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
+                  a_fwb_b   = a_fwb                            ! time swap
+                  agrif_tmp(:) = 1.e+40_dp                     ! Initialize to a big value
+                  SELECT CASE (nn_fwb_voltype)
+                  CASE( 1 )
+                     agrif_tmp(1) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:) * ( ssh(:,:,Kmm) + snwice_mass_b(:,:) * r1_rho0 ))
+                  CASE( 2 )
+                     agrif_tmp(1) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:) * ssh(:,:,Kmm))
+                  END SELECT
+                  CALL Agrif_step_child_adj(glob_sum_volume_agrif) ! Get value over child grids
+                  CALL mpp_min('sbcfwb', agrif_tmp(:)) ! Required with // sisters to populate the value of each grid on each processor
+                  a_fwb = SUM(agrif_tmp) * rho0 / area ! Sum over all grids
+                  IF ( a_fwb_b == 999._wp ) a_fwb_b = a_fwb
+                  emp_corr = (a_fwb - a_fwb_b) / ( rn_Dt * REAL(kn_fsbc, wp) ) + emp_corr + emp_ext
+!!                  IF (lwp) WRITE(numout,*) 'Averaged liquid height (m) and flux correction (kg/m2/s):', kt, a_fwb * r1_rho0, emp_corr
+               ENDIF    
+               !   
+            ENDIF 
+         ELSE ! child grid if any
+            IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
+                emp_corr  =  Agrif_parent(emp_corr) 
+            ENDIF  
+         ENDIF
+#endif
+         !
+         IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN        ! correct the freshwater fluxes on all grids
+            emp(:,:) = emp(:,:) + emp_corr * tmask(:,:,1)
+            qns(:,:) = qns(:,:) - emp_corr * rcp * sst_m(:,:) * tmask(:,:,1) ! account for change to the heat budget due to fw correction
+         ENDIF
+
+         IF ( Agrif_Root() ) THEN
+            ! Output restart information (root grid only)
+            IF( lrst_oce ) THEN
+               IF(lwp) WRITE(numout,*)
+               IF(lwp) WRITE(numout,*) 'sbc_fwb : writing FW-budget adjustment to ocean restart file at it = ', kt
+               IF(lwp) WRITE(numout,*) '~~~~'
+               CALL iom_rstput( kt, nitrst, numrow, 'a_fwb'  ,   a_fwb   )
+               CALL iom_rstput( kt, nitrst, numrow, 'emp_corr', emp_corr )
             END IF
             !
-            IF(lwp)   WRITE(numout,*)
-            IF(lwp)   WRITE(numout,*)'sbc_fwb : freshwater-budget at the end of previous year = ', a_fwb    , 'kg/m2/s'
-            IF(lwp)   WRITE(numout,*)'          freshwater-budget at initial state            = ', a_fwb_ini, 'kg/m2/s'
+            IF( kt == nitend .AND. lwp ) THEN
+               WRITE(numout,*) 'sbc_fwb : freshwater-budget at the end of simulation (year now) = ', emp_corr  , 'kg/m2/s'
+            END IF
+         END IF
+
+         ! outputs
+         IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
+            IF( iom_use('hflx_fwb_cea') )  CALL iom_put( 'hflx_fwb_cea', -emp_corr * rcp * sst_m(:,:) * tmask(:,:,1) )
+            IF( iom_use('vflx_fwb_cea') )  CALL iom_put( 'vflx_fwb_cea', -emp_corr * tmask(:,:,1) )
+         ENDIF   
+         !
+      CASE ( 2 )                             !==  set volume annual trend  ==!
+         ! 
+         IF ( Agrif_Root() ) THEN
+            IF( kt == nit000 ) THEN                                                                 ! initialisation
+               !                                                                                    ! 
+               IF ( ln_rstart .AND. iom_varid( numror, 'a_fwb',      ldstop = .FALSE. ) > 0     &   ! read from restart file
+                  &           .AND. iom_varid( numror, 'a_fwb_b',    ldstop = .FALSE. ) > 0     & 
+                  &           .AND. iom_varid( numror, 'emp_corr',   ldstop = .FALSE. ) > 0 ) THEN
+                  IF(lwp)   WRITE(numout,*) 'sbc_fwb : reading freshwater-budget from restart file'
+                  CALL iom_get( numror, 'a_fwb'     , a_fwb    )
+                  CALL iom_get( numror, 'a_fwb_b'   , a_fwb_b  )
+                  CALL iom_get( numror, 'emp_corr'  , emp_corr )
+               ELSE                                                                                 !    as specified in namelist
+                  IF(lwp)   WRITE(numout,*) 'sbc_fwb : setting freshwater-budget from namelist rn_fwb0'
+                  emp_corr = rn_fwb0
+                  a_fwb    = 999._wp
+                  a_fwb_b  = 999._wp    
+               END IF
+               !
+               IF(lwp)   WRITE(numout,*)
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : initial freshwater correction flux = ', emp_corr , 'kg/m2/s'
+               !
+            ENDIF
             !
-         ELSE
             ! at the end of year n:
-            ikty = nyear_len(1) * 86400 / NINT(rn_Dt)
-            IF( MOD( kt, ikty ) == 0 ) THEN   ! Update a_fwb at the last time step of a year
-               !                                It should be the first time step of a year MOD(kt-1,ikty) but then the restart would be wrong
-               !                                Hence, we make a small error here but the code is restartable
-               a_fwb_b = a_fwb_ini
+            ikty = nyear_len(1) * rday / NINT(rn_Dt)
+            IF( MOD( kt-1, ikty ) == 0 ) THEN   ! Update a_fwb at the last time step of a year
+               a_fwb_b = a_fwb
                ! mean sea level taking into account ice+snow
-               a_fwb   = glob_sum( 'sbcfwb', e1e2t(:,:) * ( ssh(:,:,Kmm) + snwice_mass(:,:) * r1_rho0 ) )
-               a_fwb   = a_fwb * rho0 / ( area * rday * REAL(nyear_len(1), wp) )   ! convert in kg/m2/s
+#if defined key_agrif
+               agrif_tmp(:) = 1.e+40_dp                     ! Initialize to a big value
+               SELECT CASE (nn_fwb_voltype)
+               CASE( 1 )
+                  agrif_tmp(1) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:) * ( ssh(:,:,Kmm) + snwice_mass_b(:,:) * r1_rho0 ))
+               CASE( 2 )
+                  agrif_tmp(1) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:) * ssh(:,:,Kmm) )
+               END SELECT
+               CALL Agrif_step_child_adj(glob_sum_volume_agrif) ! Get value over child grids
+               CALL mpp_min('sbcfwb', agrif_tmp(:)) ! Required with // sisters to populate the value of each grid on each processor
+               a_fwb = SUM(agrif_tmp) ! Sum over all grids
+#else          
+               SELECT CASE (nn_fwb_voltype)
+               CASE( 1 )     
+                  a_fwb   = glob_sum( 'sbcfwb', e1e2t(:,:) * ( ssh(:,:,Kmm) + snwice_mass_b(:,:) * r1_rho0 ) )
+               CASE( 2 )
+                  a_fwb   = glob_sum( 'sbcfwb', e1e2t(:,:) * ssh(:,:,Kmm)  )
+               END SELECT
+#endif
+               a_fwb = a_fwb * rho0 / area - hvolg_n * rho0
+               !
+               ! Special case if less than a year has been performed:
+               ! hence namelist rn_fwb0 still rules
+               IF ( a_fwb_b == 999._wp ) a_fwb_b = a_fwb
+               !
+               emp_corr = ( a_fwb - a_fwb_b ) / ( rday * REAL(nyear_len(0), wp) ) + emp_corr
+               IF(lwp)   WRITE(numout,*)
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : Compute new global mass at step = ', kt
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : New      averaged liquid height (ocean + snow + ice) = ',    a_fwb * r1_rho0, 'm'
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : Previous averaged liquid height (ocean + snow + ice) = ',  a_fwb_b * r1_rho0, 'm'
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : Implied freshwater-budget correction flux = ', emp_corr , 'kg/m2/s'
             ENDIF
-            !
+#if defined key_agrif
+         ELSE ! child grid if any
+            IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN
+                emp_corr  =  Agrif_parent(emp_corr) 
+            ENDIF
+#endif
          ENDIF
          !
-         IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN         ! correct the freshwater fluxes using previous year budget minus initial state
-            zcoef = ( a_fwb - a_fwb_b )
-            emp(:,:) = emp(:,:) + zcoef * tmask(:,:,1)
-            qns(:,:) = qns(:,:) - zcoef * rcp * sst_m(:,:) * tmask(:,:,1) ! account for change to the heat budget due to fw correction
+         IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN         ! correct the freshwater fluxes
+            emp(:,:) = emp(:,:) + emp_corr * tmask(:,:,1)
+            qns(:,:) = qns(:,:) - emp_corr * rcp * sst_m(:,:) * tmask(:,:,1) ! account for change to the heat budget due to fw correction
             ! outputs
-            IF( iom_use('hflx_fwb_cea') )  CALL iom_put( 'hflx_fwb_cea', -zcoef * rcp * sst_m(:,:) * tmask(:,:,1) )
-            IF( iom_use('vflx_fwb_cea') )  CALL iom_put( 'vflx_fwb_cea', -zcoef * tmask(:,:,1) )
-         ENDIF
-         ! Output restart information
-         IF( lrst_oce ) THEN
-            IF(lwp) WRITE(numout,*)
-            IF(lwp) WRITE(numout,*) 'sbc_fwb : writing FW-budget adjustment to ocean restart file at it = ', kt
-            IF(lwp) WRITE(numout,*) '~~~~'
-            CALL iom_rstput( kt, nitrst, numrow, 'a_fwb_b', a_fwb_b )
-            CALL iom_rstput( kt, nitrst, numrow, 'a_fwb',   a_fwb   )
-         END IF
-         !
-         IF( kt == nitend .AND. lwp ) THEN
-            WRITE(numout,*) 'sbc_fwb : freshwater-budget at the end of simulation (year now) = ', a_fwb  , 'kg/m2/s'
-            WRITE(numout,*) '          freshwater-budget at initial state                    = ', a_fwb_b, 'kg/m2/s'
+            IF( iom_use('hflx_fwb_cea') )  CALL iom_put( 'hflx_fwb_cea', -emp_corr * rcp * sst_m(:,:) * tmask(:,:,1) )
+            IF( iom_use('vflx_fwb_cea') )  CALL iom_put( 'vflx_fwb_cea', -emp_corr * tmask(:,:,1) )
          ENDIF
+
+         IF ( Agrif_Root() ) THEN
+            ! Output restart information (root grid only)
+            IF( lrst_oce ) THEN
+               IF(lwp) WRITE(numout,*)
+               IF(lwp) WRITE(numout,*) 'sbc_fwb : writing FW-budget adjustment to ocean restart file at it = ', kt
+               IF(lwp) WRITE(numout,*) '~~~~'
+               CALL iom_rstput( kt, nitrst, numrow, 'a_fwb',   a_fwb   )
+               CALL iom_rstput( kt, nitrst, numrow, 'a_fwb_b', a_fwb_b )
+               CALL iom_rstput( kt, nitrst, numrow, 'emp_corr',emp_corr)
+            END IF
+            !
+            IF( kt == nitend .AND. lwp ) THEN
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : Previous          year averaged liquid height (ocean + snow + ice) = ',    a_fwb * r1_rho0, 'm'
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : Previous previous year averaged liquid height (ocean + snow + ice) = ',  a_fwb_b * r1_rho0, 'm'
+               IF(lwp)   WRITE(numout,*)'sbc_fwb : freshwater-budget correction flux = ', emp_corr , 'kg/m2/s'
+            END IF
+         END IF 
          !
-      CASE ( 3 )                             !==  global fwf set to zero and spread out over erp area  ==!
+      CASE ( 3 )                   !==  set volume at each time step and spread out the correction over erp area  ==!
          !
          ALLOCATE( ztmsk_neg(jpi,jpj) , ztmsk_pos(jpi,jpj) , ztmsk_tospread(jpi,jpj) , z_wgt(jpi,jpj) , zerp_cor(jpi,jpj) )
          !
@@ -196,8 +423,13 @@ CONTAINS
             ztmsk_pos(:,:) = tmask_i(:,:)                      ! Select <0 and >0 area of erp
             WHERE( erp < 0._wp )   ztmsk_pos = 0._wp
             ztmsk_neg(:,:) = tmask_i(:,:) - ztmsk_pos(:,:)
-            !                                                  ! fwf global mean (excluding ocean to ice/snow exchanges) 
-            z_fwf     = glob_sum( 'sbcfwb', e1e2t(:,:) * ( emp(:,:) - rnf(:,:) - fwfisf_cav(:,:) - fwfisf_par(:,:) - snwice_fmass(:,:) ) ) / area
+            !                                                  ! fwf global mean (excluding ocean to ice/snow exchanges)
+            SELECT CASE (nn_fwb_voltype)
+            CASE( 1 )  
+               z_fwf     = -emp_ext + glob_sum( 'sbcfwb', e1e2t(:,:) * ( emp(:,:) - rnf(:,:) - fwfisf_cav(:,:) - fwfisf_par(:,:) - snwice_fmass(:,:) ) ) / area
+            CASE( 2 )
+               z_fwf     = -emp_ext + glob_sum( 'sbcfwb', e1e2t(:,:) * ( emp(:,:) - rnf(:,:) - fwfisf_cav(:,:) - fwfisf_par(:,:) ) ) / area
+            END SELECT
             !            
             IF( z_fwf < 0._wp ) THEN         ! spread out over >0 erp area to increase evaporation
                zsurf_pos = glob_sum( 'sbcfwb', e1e2t(:,:)*ztmsk_pos(:,:) )
@@ -245,12 +477,77 @@ CONTAINS
          ENDIF
          DEALLOCATE( ztmsk_neg , ztmsk_pos , ztmsk_tospread , z_wgt , zerp_cor )
          !
-      CASE DEFAULT                           !==  you should never be there  ==!
-         CALL ctl_stop( 'sbc_fwb : wrong nn_fwb value for the FreshWater Budget correction, choose either 1, 2 or 3' )
-         !
       END SELECT
       !
    END SUBROUTINE sbc_fwb
 
+
+   SUBROUTINE set_hglo_ana(kt, koffset, rn_sshm_amp, nn_sshm_mth, rn_sshm_trd, phglo)
+      !!---------------------------------------------------------------------
+      !!                   ***  Set the global volume  ***
+      !!  
+      !!    Define the globally averaged equivalent volume height analytically 
+      !!----------------------------------------------------------------------
+      INTEGER,  INTENT(in)  :: kt, koffset
+      REAL(wp), INTENT(in)  :: rn_sshm_amp, rn_sshm_trd
+      INTEGER,  INTENT(in)  :: nn_sshm_mth
+      REAL(wp), INTENT(out) :: phglo
+      !
+      REAL(wp)              :: zt, zf0, zr_nsy
+      !!----------------------------------------------------------------------    
+      !
+      IF ( nleapy==1 ) THEN 
+         zr_nsy = 1._wp / (    365.25_wp * rday )
+      ELSE 
+         zr_nsy = 1._wp / ( nyear_len(1) * rday )
+      ENDIF
+      !
+      ! Time at "Now" time step (i.e. 0.5*dt seconds in the past compared to what nsec_year gives)
+      zt  = REAL(nsec1jan000 + nsec_year, wp) + (REAL(koffset,wp) - 0.5_wp) * rn_Dt
+      zf0 = 2._wp * rpi * REAL(nn_sshm_mth-1, wp) / 12._wp  ! Phase lag
+      ! 
+      phglo  = 0.5_wp * rn_sshm_amp * SIN(2._wp * rpi * zr_nsy * zt - zf0)  & 
+             &        + rn_sshm_trd * (kt + koffset - 1) * rn_Dt    
+
+   END SUBROUTINE set_hglo_ana 
+
+
+#if defined key_agrif
+   SUBROUTINE glob_sum_area_agrif()
+      !!---------------------------------------------------------------------
+      !!           ***  compute area with embedded zooms ***
+      !!----------------------------------------------------------------------
+      INTEGER :: igrid
+
+      IF (Agrif_root()) RETURN
+
+      igrid = agrif_fixed() + 1
+      agrif_tmp(igrid) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:))
+
+   END SUBROUTINE glob_sum_area_agrif   
+
+   SUBROUTINE glob_sum_volume_agrif()
+      !!---------------------------------------------------------------------
+      !!           ***  Compute volume with embedded zooms  ***
+      !!----------------------------------------------------------------------
+      INTEGER :: igrid
+      !
+      IF (Agrif_root()) RETURN
+
+      igrid = agrif_fixed() + 1
+      IF (( nn_ice==2 ).OR.(nn_fwb_voltype==1)) THEN
+         ! NB: we use "now" value for snwice_mass over child grids since it has not been updated yet at the time
+         !     this call is made (e.g. when starting a new step over the parent grid)
+         agrif_tmp(igrid) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:) * ( ssh(:,:,Kmm_a) + snwice_mass(:,:) * r1_rho0 ))
+      ELSE
+         agrif_tmp(igrid) = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask_agrif(:,:) * ssh(:,:,Kmm_a) )
+      ENDIF
+
+   END SUBROUTINE glob_sum_volume_agrif 
+
+
+
+#endif
+
    !!======================================================================
 END MODULE sbcfwb
diff --git a/src/OCE/SBC/sbcmod.F90 b/src/OCE/SBC/sbcmod.F90
index 4960c48972ae21d82a7d0c829261641d8635b63b..dbbb7c71dca93e597d3650d6cafb49c4c04a9699 100644
--- a/src/OCE/SBC/sbcmod.F90
+++ b/src/OCE/SBC/sbcmod.F90
@@ -125,6 +125,10 @@ CONTAINS
 #endif
 #if ! defined key_si3
       IF( nn_ice == 2 )    nn_ice = 0  ! without key key_si3 you cannot use si3...
+#endif
+#if defined key_agrif
+      ! In case of an agrif zoom, the freshwater water budget is determined by parent:
+      IF (.NOT.Agrif_Root()) nn_fwb = Agrif_Parent(nn_fwb) 
 #endif
       !
       !
diff --git a/src/OCE/nemogcm.F90 b/src/OCE/nemogcm.F90
index 9e5ebf364cb019878d34e1d4fd7fc44a94021891..7fd8bc84a7f28dce2a89ab3cd5d4b2e777062449 100644
--- a/src/OCE/nemogcm.F90
+++ b/src/OCE/nemogcm.F90
@@ -119,7 +119,10 @@ CONTAINS
       !!----------------------------------------------------------------------
       !
 #if defined key_agrif
-      CALL Agrif_Init_Grids()      ! AGRIF: set the meshes
+#if defined key_agrif_psisters
+      Agrif_Parallel_sisters = .TRUE.  ! Activate parallel sisters
+#endif
+      CALL Agrif_Init_Grids()          ! AGRIF: set the meshes
 #endif
       !                            !-----------------------!
       CALL nemo_init               !==  Initialisations  ==!
@@ -525,6 +528,13 @@ CONTAINS
       !
       IF( ln_timing    )   CALL timing_stop( 'nemo_init')
       !
+
+# if defined key_agrif
+      IF( Agrif_Root() ) THEN
+          CALL Agrif_MPI_Init(mpi_comm_oce)
+      ENDIF
+# endif
+
    END SUBROUTINE nemo_init
 
 
diff --git a/tests/ADIAB_WAVE/.DS_Store b/tests/ADIAB_WAVE/.DS_Store
deleted file mode 100644
index 1481a5d1a07b7f36b15e1620f027634b832c5c93..0000000000000000000000000000000000000000
Binary files a/tests/ADIAB_WAVE/.DS_Store and /dev/null differ
diff --git a/tools/DOMAINcfg/src/agrif_dom_update.F90 b/tools/DOMAINcfg/src/agrif_dom_update.F90
index 64a95f0809a285de78ec97042a04d6bb61cf6a09..72c7154e002b5c2799db6269356d338c3c669aca 100644
--- a/tools/DOMAINcfg/src/agrif_dom_update.F90
+++ b/tools/DOMAINcfg/src/agrif_dom_update.F90
@@ -10,7 +10,9 @@ MODULE agrif_dom_update
    PRIVATE
 
    REAL(wp), PARAMETER :: rminfrac = 0.98_wp ! Should be < 1
-   LOGICAL             :: l_match_area=.FALSE.   
+!$AGRIF_DO_NOT_TREAT
+   LOGICAL             :: l_match_area=.FALSE.
+!$AGRIF_END_DO_NOT_TREAT
 
    PUBLIC agrif_update_all
 
@@ -26,6 +28,15 @@ CONTAINS
 
       IF( Agrif_Root() ) return
       !
+      IF ( l_match_area ) THEN
+         l_match_area = .FALSE.
+         ! Fill land with values unweighted by surface mask:
+         CALL agrif_update_variable(e1e2t_upd_id, procname = update_e1e2t)
+         CALL agrif_update_variable(e2u_id,   locupdate1=(/-1,-2/) , procname = update_e2u)
+         CALL agrif_update_variable(e1v_id,   locupdate2=(/-1,-2/) , procname = update_e1v)
+         l_match_area = .TRUE.
+      ENDIF
+      !
       ! Update e1t and e2t (set grid cell area as over child grid):
       CALL agrif_update_variable(e1e2t_upd_id, procname = update_e1e2t)
       
@@ -152,7 +163,10 @@ CONTAINS
          mbku(i1:i2,j1:j2) = nint(ptab(i1:i2,j1:j2))
          
          WHERE ( mbku(i1:i2,j1:j2) .EQ. 0 )
+            ssumask(i1:i2,j1:j2) = 0._wp
             mbku(i1:i2,j1:j2)    = 1
+         ELSEWHERE
+            ssumask(i1:i2,j1:j2) = 1._wp
          END WHERE 
       ENDIF
       !
@@ -175,8 +189,11 @@ CONTAINS
          mbkv(i1:i2,j1:j2) = nint(ptab(i1:i2,j1:j2))
          
          WHERE ( mbkv(i1:i2,j1:j2) .EQ. 0 )
+            ssvmask(i1:i2,j1:j2) = 0._wp
             mbkv(i1:i2,j1:j2)    = 1
-         END WHERE 
+         ELSEWHERE
+            ssvmask(i1:i2,j1:j2) = 1._wp
+         END WHERE
       ENDIF
       !
    END SUBROUTINE update_mbkv