Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
MODULE obs_mpp
!!======================================================================
!! *** MODULE obs_mpp ***
!! Observation diagnostics: Various MPP support routines
!!======================================================================
!! History : 2.0 ! 2006-03 (K. Mogensen) Original code
!! - ! 2006-05 (K. Mogensen) Reformatted
!! - ! 2008-01 (K. Mogensen) add mpp_global_max
!! 3.6 ! 2015-01 (J. Waters) obs_mpp_find_obs_proc
!! rewritten to avoid global arrays
!!----------------------------------------------------------------------
# define mpivar mpi_double_precision
!!----------------------------------------------------------------------
!! obs_mpp_bcast_integer : Broadcast an integer array from a processor to all processors
!! obs_mpp_max_integer : Find maximum on all processors of each value in an integer on all processors
!! obs_mpp_find_obs_proc : Find processors which should hold the observations, avoiding global arrays
!! obs_mpp_sum_integers : Sum an integer array from all processors
!! obs_mpp_sum_integer : Sum an integer from all processors
!!----------------------------------------------------------------------
USE mpp_map, ONLY : mppmap
USE in_out_manager
#if ! defined key_mpi_off
USE lib_mpp, ONLY : mpi_comm_oce ! MPP library
#endif
IMPLICIT NONE
PRIVATE
PUBLIC obs_mpp_bcast_integer, & !: Broadcast an integer array from a proc to all procs
& obs_mpp_max_integer, & !: Find maximum across processors in an integer array
& obs_mpp_find_obs_proc, & !: Find processors which should hold the observations
& obs_mpp_sum_integers, & !: Sum an integer array from all processors
& obs_mpp_sum_integer, & !: Sum an integer from all processors
& mpp_alltoall_int, &
& mpp_alltoallv_int, &
& mpp_alltoallv_real, &
& mpp_global_max
!!----------------------------------------------------------------------
!! NEMO/OCE 4.0 , NEMO Consortium (2018)
!! $Id: obs_mpp.F90 14275 2021-01-07 12:13:16Z smasson $
!! Software governed by the CeCILL license (see ./LICENSE)
!!----------------------------------------------------------------------
CONTAINS
SUBROUTINE obs_mpp_bcast_integer( kvals, kno, kroot )
!!----------------------------------------------------------------------
!! *** ROUTINE obs_mpp_bcast_integer ***
!!
!! ** Purpose : Send array kvals to all processors
!!
!! ** Method : MPI broadcast
!!
!! ** Action : This does only work for MPI.
!! MPI_COMM_OCE needs to be replace for OASIS4.!
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: kno ! Number of elements in array
INTEGER , INTENT(in ) :: kroot ! Processor to send data
INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot
!
#if ! defined key_mpi_off
!
INTEGER :: ierr
!
INCLUDE 'mpif.h'
!!----------------------------------------------------------------------
! Call the MPI library to broadcast data
CALL mpi_bcast( kvals, kno, mpi_integer, &
& kroot, mpi_comm_oce, ierr )
#else
! no MPI: empty routine
#endif
!
END SUBROUTINE obs_mpp_bcast_integer
SUBROUTINE obs_mpp_max_integer( kvals, kno )
!!----------------------------------------------------------------------
!! *** ROUTINE obs_mpp_bcast_integer ***
!!
!! ** Purpose : Find maximum across processors in an integer array.
!!
!! ** Method : MPI all reduce.
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!! MPI_COMM_OCE needs to be replace for OASIS4.!
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: kno ! Number of elements in array
INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot
!
#if ! defined key_mpi_off
!
INTEGER :: ierr
INTEGER, DIMENSION(kno) :: ivals
!
INCLUDE 'mpif.h'
!!----------------------------------------------------------------------
! Call the MPI library to find the maximum across processors
CALL mpi_allreduce( kvals, ivals, kno, mpi_integer, &
& mpi_max, mpi_comm_oce, ierr )
kvals(:) = ivals(:)
#else
! no MPI: empty routine
#endif
END SUBROUTINE obs_mpp_max_integer
SUBROUTINE obs_mpp_find_obs_proc( kobsp,kno )
!!----------------------------------------------------------------------
!! *** ROUTINE obs_mpp_find_obs_proc ***
!!
!! ** Purpose : From the array kobsp containing the results of the
!! grid search on each processor the processor return a
!! decision of which processors should hold the observation.
!!
!! ** Method : Synchronize the processor number for each obs using
!! obs_mpp_max_integer. If an observation exists on two
!! processors it will be allocated to the lower numbered
!! processor.
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: kno
INTEGER, DIMENSION(kno), INTENT(inout) :: kobsp
!
#if ! defined key_mpi_off
!
!
INTEGER :: ji, isum
INTEGER, DIMENSION(kno) :: iobsp
!!
!!
iobsp(:)=kobsp(:)
WHERE( iobsp(:) == -1 )
iobsp(:) = 9999999
END WHERE
iobsp(:)=-1*iobsp(:)
CALL obs_mpp_max_integer( iobsp, kno )
kobsp(:)=-1*iobsp(:)
isum=0
DO ji = 1, kno
IF ( kobsp(ji) == 9999999 ) THEN
isum=isum+1
kobsp(ji)=-1
ENDIF
ENDDO
IF ( isum > 0 ) THEN
IF (lwp) WRITE(numout,*) isum, ' observations failed the grid search.'
IF (lwp) WRITE(numout,*)'If ln_grid_search_lookup=.TRUE., try reducing grid_search_res'
ENDIF
#else
! no MPI: empty routine
#endif
END SUBROUTINE obs_mpp_find_obs_proc
SUBROUTINE obs_mpp_sum_integers( kvalsin, kvalsout, kno )
!!----------------------------------------------------------------------
!! *** ROUTINE obs_mpp_sum_integers ***
!!
!! ** Purpose : Sum an integer array.
!!
!! ** Method : MPI all reduce.
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: kno
INTEGER, DIMENSION(kno), INTENT(in ) :: kvalsin
INTEGER, DIMENSION(kno), INTENT( out) :: kvalsout
!
#if ! defined key_mpi_off
!
INTEGER :: ierr
!
INCLUDE 'mpif.h'
!!----------------------------------------------------------------------
!
!-----------------------------------------------------------------------
! Call the MPI library to find the sum across processors
!-----------------------------------------------------------------------
CALL mpi_allreduce( kvalsin, kvalsout, kno, mpi_integer, &
& mpi_sum, mpi_comm_oce, ierr )
#else
!-----------------------------------------------------------------------
! For no-MPP just return input values
!-----------------------------------------------------------------------
kvalsout(:) = kvalsin(:)
#endif
!
END SUBROUTINE obs_mpp_sum_integers
SUBROUTINE obs_mpp_sum_integer( kvalin, kvalout )
!!----------------------------------------------------------------------
!! *** ROUTINE obs_mpp_sum_integers ***
!!
!! ** Purpose : Sum a single integer
!!
!! ** Method : MPI all reduce.
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER, INTENT(in ) :: kvalin
INTEGER, INTENT( out) :: kvalout
!
#if ! defined key_mpi_off
!
INTEGER :: ierr
!
INCLUDE 'mpif.h'
!!----------------------------------------------------------------------
!
!-----------------------------------------------------------------------
! Call the MPI library to find the sum across processors
!-----------------------------------------------------------------------
CALL mpi_allreduce( kvalin, kvalout, 1, mpi_integer, &
& mpi_sum, mpi_comm_oce, ierr )
#else
!-----------------------------------------------------------------------
! For no-MPP just return input values
!-----------------------------------------------------------------------
kvalout = kvalin
#endif
!
END SUBROUTINE obs_mpp_sum_integer
SUBROUTINE mpp_global_max( pval )
!!----------------------------------------------------------------------
!! *** ROUTINE mpp_global_or ***
!!
!! ** Purpose : Get the maximum value across processors for a global
!! real array
!!
!! ** Method : MPI allreduce
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
REAL(KIND=wp), DIMENSION(jpiglo,jpjglo), INTENT(inout) :: pval
!
INTEGER :: ierr
!
#if ! defined key_mpi_off
!
INCLUDE 'mpif.h'
REAL(KIND=wp), DIMENSION(:,:), ALLOCATABLE :: zcp
!!----------------------------------------------------------------------
! Copy data for input to MPI
ALLOCATE( &
& zcp(jpiglo,jpjglo) &
& )
zcp(:,:) = pval(:,:)
! Call the MPI library to find the coast lines globally
CALL mpi_allreduce( zcp, pval, jpiglo*jpjglo, mpivar, &
& mpi_max, mpi_comm_oce, ierr )
DEALLOCATE( &
& zcp &
& )
#else
! no MPI: empty routine
#endif
!
END SUBROUTINE mpp_global_max
SUBROUTINE mpp_alltoall_int( kno, kvalsin, kvalsout )
!!----------------------------------------------------------------------
!! *** ROUTINE mpp_allgatherv ***
!!
!! ** Purpose : all to all.
!!
!! ** Method : MPI alltoall
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: kno
INTEGER, DIMENSION(kno*jpnij), INTENT(in ) :: kvalsin
INTEGER, DIMENSION(kno*jpnij), INTENT( out) :: kvalsout
!!
INTEGER :: ierr
!
#if ! defined key_mpi_off
!
INCLUDE 'mpif.h'
!-----------------------------------------------------------------------
! Call the MPI library to do the all to all operation of the data
!-----------------------------------------------------------------------
CALL mpi_alltoall( kvalsin, kno, mpi_integer, &
& kvalsout, kno, mpi_integer, &
& mpi_comm_oce, ierr )
#else
!-----------------------------------------------------------------------
! For no-MPP just return input values
!-----------------------------------------------------------------------
kvalsout = kvalsin
#endif
!
END SUBROUTINE mpp_alltoall_int
SUBROUTINE mpp_alltoallv_int( kvalsin, knoin , kinv , kvalsout, &
& knoout, koutv )
!!----------------------------------------------------------------------
!! *** ROUTINE mpp_alltoallv_int ***
!!
!! ** Purpose : all to all (integer version).
!!
!! ** Method : MPI alltoall
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in) :: knoin
INTEGER , INTENT(in) :: knoout

sparonuz
committed
INTEGER, DIMENSION(jpnij), INTENT(IN) :: kinv, koutv
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
INTEGER, DIMENSION(knoin) , INTENT(in ) :: kvalsin
INTEGER, DIMENSION(knoout), INTENT( out) :: kvalsout
!!
INTEGER :: ierr
INTEGER :: jproc
!
#if ! defined key_mpi_off
!
INCLUDE 'mpif.h'
INTEGER, DIMENSION(jpnij) :: irdsp, isdsp
!-----------------------------------------------------------------------
! Compute displacements
!-----------------------------------------------------------------------
irdsp(1) = 0
isdsp(1) = 0
DO jproc = 2, jpnij
isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1)
irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1)
END DO
!-----------------------------------------------------------------------
! Call the MPI library to do the all to all operation of the data
!-----------------------------------------------------------------------
CALL mpi_alltoallv( kvalsin, kinv, isdsp, mpi_integer, &
& kvalsout, koutv, irdsp, mpi_integer, &
& mpi_comm_oce, ierr )
#else
!-----------------------------------------------------------------------
! For no-MPP just return input values
!-----------------------------------------------------------------------
kvalsout = kvalsin
#endif
!
END SUBROUTINE mpp_alltoallv_int
SUBROUTINE mpp_alltoallv_real( pvalsin, knoin , kinv , pvalsout, &
& knoout, koutv )
!!----------------------------------------------------------------------
!! *** ROUTINE mpp_alltoallv_real ***
!!
!! ** Purpose : all to all (integer version).
!!
!! ** Method : MPI alltoall
!!
!! ** Action : This does only work for MPI.
!! It does not work for SHMEM.
!!
!! References : http://www.mpi-forum.org
!!----------------------------------------------------------------------
INTEGER , INTENT(in ) :: knoin
INTEGER , INTENT(in ) :: knoout
INTEGER , DIMENSION(jpnij) :: kinv, koutv
REAL(wp), DIMENSION(knoin) , INTENT(in ) :: pvalsin
REAL(wp), DIMENSION(knoout), INTENT( out) :: pvalsout
!!
INTEGER :: ierr
INTEGER :: jproc
!
#if ! defined key_mpi_off
!
INCLUDE 'mpif.h'
INTEGER, DIMENSION(jpnij) :: irdsp, isdsp
!!----------------------------------------------------------------------
!
!-----------------------------------------------------------------------
! Compute displacements
!-----------------------------------------------------------------------
irdsp(1) = 0
isdsp(1) = 0
DO jproc = 2, jpnij
isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1)
irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1)
END DO
!-----------------------------------------------------------------------
! Call the MPI library to do the all to all operation of the data
!-----------------------------------------------------------------------
CALL mpi_alltoallv( pvalsin, kinv, isdsp, mpivar, &
& pvalsout, koutv, irdsp, mpivar, &
& mpi_comm_oce, ierr )
#else
!-----------------------------------------------------------------------
! For no-MPP just return input values
!-----------------------------------------------------------------------
pvalsout = pvalsin
#endif
!
END SUBROUTINE mpp_alltoallv_real
!!======================================================================
END MODULE obs_mpp