EMPIRE DA  v1.9.1
Data assimilation codes using EMPIRE communication
 All Classes Files Functions Variables Pages
minimal_model_v5.f90
Go to the documentation of this file.
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 ! minimal_model_comms just sets up communicators for empire for
3 ! testing purposes
4 !
5 !The MIT License (MIT)
6 !
7 !Copyright (c) 2015 Philip A. Browne
8 !
9 !Permission is hereby granted, free of charge, to any person obtaining a copy
10 !of this software and associated documentation files (the "Software"), to deal
11 !in the Software without restriction, including without limitation the rights
12 !to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 !copies of the Software, and to permit persons to whom the Software is
14 !furnished to do so, subject to the following conditions:
15 !
16 !The above copyright notice and this permission notice shall be included in all
17 !copies or substantial portions of the Software.
18 !
19 !THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 !IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 !FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 !AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 !LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 !OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 !SOFTWARE.
26 !
27 !Email: p.browne@reading.ac.uk
28 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
29 
31  implicit none
32 
33  include 'mpif.h'
34  integer :: mpi_err
35  integer :: world_rank
36  integer :: world_size
37  integer, parameter :: mdl_num_proc=16
38  integer, parameter :: ensemble_members_per_proc=2
39  integer :: mdl_mpi_comm
40  integer :: temp_mdls_comm
41  integer :: temp_mdls_rank
42  integer :: mdl_rank
43  integer :: da
44  integer, parameter :: rk = kind(1.0d0)
45  real(kind=rk), allocatable, dimension(:) :: state_vector
46  integer :: state_dim
47  integer :: i,j
48  integer :: cpl_root
49  integer :: instance_rank
50  integer :: particle_rank
51  integer :: nda
52  integer :: nens
53  integer :: temp_mdls_size
54  integer :: temp_cpl_comm
55  integer :: first_ptcl
56  integer :: final_ptcl
57 ! integer :: cpl_mpi_comm
58  integer, dimension(ensemble_members_per_proc) :: cpl_mpi_comms
59  integer :: null_mpi_comm
60  real(kind=rk), dimension(0) :: send_null
61  integer :: total_timesteps
62  integer :: tag
63 
64  call mpi_init(mpi_err)
65  if(mpi_err .eq. 0) then
66  print*,'mpi_init successful'
67  else
68  print*,'mpi_init unsuccessful'
69  end if
70 
71 
72  call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
73  if(mpi_err .eq. 0) then
74  print*,'mpi_comm_rank successful'
75  print*,'world_rank = ',world_rank
76  else
77  print*,'mpi_comm_rank unsuccessful'
78  end if
79 
80 
81  call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
82  if(mpi_err .eq. 0) then
83  print*,'mpi_comm_size successful'
84  print*,'world_size = ',world_size
85  else
86  print*,'mpi_comm_size unsuccessful'
87  end if
88 
89 
90  cpl_root = world_size-1
91  print*,'rank = ',world_rank,' on mpi_comm_world which has size ',world_size
92  da = 0
93 
94 
95  call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
96  &,mpi_comm_world,mpi_err)
97  if(mpi_err .eq. 0) then
98  print*,'mpi_allreduce successful'
99  print*,'i = ',i
100  else
101  print*,'mpi_allreduce unsuccessful'
102  end if
103 
104 
105  call mpi_allreduce(ensemble_members_per_proc,i,1,mpi_integer,mpi_max&
106  &,mpi_comm_world,mpi_err)
107  if(mpi_err .eq. 0) then
108  print*,'mpi_allreduce successful'
109  print*,'i = ',i
110  else
111  print*,'mpi_allreduce unsuccessful'
112  end if
113 
114 
115  print*,'first split happening with the inputs: ', mpi_comm_world,da,world_rank,temp_mdls_comm&
116  &,mpi_err
117  call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
118  &,mpi_err)
119  print*,'first split happened with the outputs: ', mpi_comm_world&
120  &,da,world_rank,temp_mdls_comm,mpi_err
121  if(mpi_err .eq. 0) then
122  print*,'mpi_comm_split successful: temp_mdls_comm created: ',temp_mdls_comm
123  else
124  print*,'mpi_comm_split unsuccessful: temp_mdls_comm not created'
125  end if
126 
127  call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
128  if(mpi_err .eq. 0) then
129  print*,'mpi_comm_size successful'
130  print*,'temp_mdls_size = ',temp_mdls_size
131  else
132  print*,'mpi_comm_size unsuccessful'
133  end if
134 
135 
136  if(mod(temp_mdls_size,mdl_num_proc) .ne. 0) then
137  print*,'MINIMAL MODEL LAUNCH ERROR.'
138  print*,'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,' copies of the &
139  &model'
140  stop
141  end if
142 
143 
144  nda = world_size-temp_mdls_size
145  print*,'nda = ',nda
146  if(nda .lt. 1) then
147  print*,'MINIMAL MODEL COMMS v5 ERROR: nda is less than 1.'
148  print*,'Make sure you launch with a DA CODE'
149  stop
150  end if
151 
152 
153 
154  nens = (temp_mdls_size/mdl_num_proc)*ensemble_members_per_proc
155  print*,'nens = ',nens
156  call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
157  if(mpi_err .eq. 0) then
158  print*,'mpi_comm_rank successful'
159  print*,'temp_mdls_rank = ',temp_mdls_rank
160  else
161  print*,'mpi_comm_rank unsuccessful'
162  end if
163 
164 
165  instance_rank = temp_mdls_rank/mdl_num_proc
166 
167  call mpi_comm_split(temp_mdls_comm,instance_rank,temp_mdls_rank&
168  &,mdl_mpi_comm,mpi_err)
169  if(mpi_err .eq. 0) then
170  print*,'mpi_comm_split successful: mdl_mpi_comm created ',mdl_mpi_comm
171  else
172  print*,'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
173  end if
174 
175 
176 
177  call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
178  if(mpi_err .eq. 0) then
179  print*,'mpi_comm_rank successful'
180  print*,'mdl_rank = ',mdl_rank
181  else
182  print*,'mpi_comm_rank unsuccessful'
183  end if
184 
185 
186  cpl_root = nda*instance_rank/(nens/ensemble_members_per_proc)
187  print*,'cpl_root = ',cpl_root
188 
189  if(cpl_root .lt. 0) then
190  print*,'MINIMAL MODEL COMMS v5 ERROR: cpl_root is less than 0.'
191  print*,'Make sure you launch with a DA CODE'
192  stop
193  end if
194 
195 
196  first_ptcl = -((-cpl_root)*nens/nda)
197  final_ptcl = -((-cpl_root-1)*nens/nda)-1
198 
199  first_ptcl = ceiling(real(cpl_root)*real(nens)/real(nda))
200  final_ptcl = ceiling(real(cpl_root+1)*real(nens)/real(nda))-1
201 
202 
203  print*,'range of particles = ',first_ptcl,final_ptcl
204 
205 
206  !check to see if mod(n_mdl_instances,npfs) == 0:
207  if(mod(temp_mdls_size/mdl_num_proc,nda) .ne. 0 ) then
208  print*,'EMPIRE MESSAGE: SEQUENTIAL SPLITTING OF MPI_COMM_WORLD'
209  j = 0
210 
211  do i = 0,nens-1
212  print*,'i = ',i,' particle_rank = ',particle_rank
213  print*,'instance_rank*ensemble_members_per_proc = ',instance_rank*ensemble_members_per_proc
214  print*,'(instance_rank+1)*ensemble_members_per_proc -1 = '&
215  &,(instance_rank+1)*ensemble_members_per_proc -1
216  if(i .ge. instance_rank*ensemble_members_per_proc .and. &
217  i .le. (instance_rank+1)*ensemble_members_per_proc -1) then
218  j = j + 1
219  call mpi_comm_split(mpi_comm_world,1,temp_mdls_rank&
220  &,cpl_mpi_comms(j),mpi_err)
221  print*,'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
222  else
223  print*,'doing null splitting'
224  call mpi_comm_split(mpi_comm_world,0,temp_mdls_rank&
225  &,null_mpi_comm,mpi_err)
226  print*,'created mpi_comm_null'
227  call mpi_comm_free(null_mpi_comm,mpi_err)
228  print*,'freed up null_mpi_comm'
229  end if
230 
231 
232  end do
233 
234 
235 
236  else
237  print*,''
238  print*,''
239  print*,''
240  print*,''
241  print*,''
242 
243  print*,'doing first split based on pfrank',cpl_root
244  print*,'doinf first split...key = ',temp_mdls_rank
245  temp_cpl_comm = 0
246  print*,mpi_comm_world,cpl_root,temp_mdls_rank&
247  &,temp_cpl_comm,mpi_err
248  call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank&
249  &,temp_cpl_comm,mpi_err)
250  if(mpi_err .eq. 0) then
251  print*,'mpi_comm_split successful: temp_cpl_comm created ',temp_cpl_comm
252  else
253  print*,'mpi_comm_split unsuccessful: temp_cpl_comm not created'
254  end if
255 
256  print*,'finished first split'
257  print*,''
258  print*,''
259  print*,''
260  print*,''
261  print*,''
262 
263 
264  j = 0
265 
266  do i = first_ptcl,final_ptcl
267  print*,'i = ',i,' particle_rank = ',particle_rank
268  if(i .ge. instance_rank*ensemble_members_per_proc .and. &
269  i .le. (instance_rank+1)*ensemble_members_per_proc -1) then
270  j = j + 1
271  call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
272  &,cpl_mpi_comms(j),mpi_err)
273  print*,'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
274  else
275  print*,'doing null splitting'
276  call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
277  &,null_mpi_comm,mpi_err)
278  print*,'created mpi_comm_null'
279  call mpi_comm_free(null_mpi_comm,mpi_err)
280  print*,'freed up null_mpi_comm'
281  end if
282 
283 
284  end do
285 
286 
287 
288 
289  end if
290 
291 
292  cpl_root = mdl_num_proc
293 
294 
295 
296  select case(mdl_rank)
297  case(0)
298  state_dim = 1
299  case(1)
300  state_dim = 3
301  case(2)
302  state_dim = 2
303  case(3)
304  state_dim = 5
305  case(4)
306  state_dim = 1
307  case(5,6,7,8,9,10,11,12,13,14,15)
308  state_dim = 2
309  case default
310  print*,'it was at this point, model realised, he fucked up'
311  stop
312  end select
313  allocate(state_vector(state_dim))
314 
315 
316 
317 
318  state_vector = 10*mdl_rank + (/ (real(i,rk), i = 1,state_dim) /)
319 
320  print*,'state_vector = '
321  print*,state_vector
322 
323  do j = 1,ensemble_members_per_proc
324  print*,'doing a gather on cpl_mpi_comm(j)',cpl_mpi_comms(j),cpl_root
325  call mpi_gather(state_dim,1,mpi_integer,state_dim&
326  &,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
327  print*,'finished the gather on cpl_mpi_comms(j)'
328  end do
329 
330 
331 
332 
333 
334 
335 
336 
337 
338 
339 
340 
341 
342 
343 
344 
345 
346 
347 
348 
349 
350  print*,'Reading total_timesteps from file timesteps: '
351  open(11,file='timesteps',action='read',status='old')
352  read(11,*) total_timesteps
353  close(11)
354 
355  do j = 1,ensemble_members_per_proc
356  !send the state to da code with mpi_gatherv
357  call mpi_gatherv(state_vector,state_dim,mpi_double_precision,state_vector&
358  &,state_dim,state_dim,mpi_double_precision,cpl_root&
359  &,cpl_mpi_comms(j),mpi_err)
360  end do
361 
362  do j = 1,ensemble_members_per_proc
363  !get the state back from da code with mpi_gatherv
364  call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
365  &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comms(j),mpi_err)
366  !get the tag from the da code
367  call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
368  print*,'Received tag = ',tag
369  end do
370  print*,'state = ',state_vector
371 
372 
373  do i = 1,total_timesteps
374  print*,'Timestep = ',i
375 
376  do j = 1,ensemble_members_per_proc
377  !send the state to da code with mpi_gatherv
378  call mpi_gatherv(state_vector,state_dim,mpi_double_precision&
379  &,state_vector&
380  &,state_dim,state_dim,mpi_double_precision,cpl_root&
381  &,cpl_mpi_comms(j),mpi_err)
382  end do
383 
384  do j = 1,ensemble_members_per_proc
385  !get the state back from da code with mpi_gatherv
386  call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
387  &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comms(j),mpi_err)
388  !get the tag from the da code
389  call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
390  print*,'Received tag = ',tag
391  print*,'state = ',state_vector
392  end do
393  end do
394 
395 
396 
397 
398 
399 
400 
401 
402 
403 
404 
405 
406 
407 
408 
409 
410 
411 
412 
413  call mpi_finalize(mpi_err)
414  print*,'MINIMAL_MODEL_v5 finished nicely'
415  print*,'my communicators were', cpl_mpi_comms(:)
416 end program minimal_model_v5
program minimal_model_v5