EMPIRE DA  v1.9.1
Data assimilation codes using EMPIRE communication
 All Classes Files Functions Variables Pages
minimal_model_comms_v5.f90
Go to the documentation of this file.
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 ! minimal_model_comms just sets up communicators for empire for
3 ! testing purposes
4 !
5 !The MIT License (MIT)
6 !
7 !Copyright (c) 2015 Philip A. Browne
8 !
9 !Permission is hereby granted, free of charge, to any person obtaining a copy
10 !of this software and associated documentation files (the "Software"), to deal
11 !in the Software without restriction, including without limitation the rights
12 !to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 !copies of the Software, and to permit persons to whom the Software is
14 !furnished to do so, subject to the following conditions:
15 !
16 !The above copyright notice and this permission notice shall be included in all
17 !copies or substantial portions of the Software.
18 !
19 !THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 !IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 !FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 !AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 !LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 !OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 !SOFTWARE.
26 !
27 !Email: p.browne@reading.ac.uk
28 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
29 
31  implicit none
32 
33  include 'mpif.h'
34  integer :: mpi_err
35  integer :: world_rank
36  integer :: world_size
37  integer, parameter :: mdl_num_proc=5
38  integer, parameter :: ensemble_members_per_proc=4
39  integer :: mdl_mpi_comm
40  integer :: temp_mdls_comm
41  integer :: temp_mdls_rank
42  integer :: mdl_rank
43  integer :: da
44  integer, parameter :: rk = kind(1.0d0)
45  real(kind=rk), allocatable, dimension(:) :: state_vector
46  integer :: state_dim
47  integer :: i,j
48  integer :: cpl_root
49  integer :: instance_rank
50  integer :: particle_rank
51  integer :: nda
52  integer :: nens
53  integer :: temp_mdls_size
54  integer :: temp_cpl_comm
55  integer :: first_ptcl
56  integer :: final_ptcl
57 ! integer :: cpl_mpi_comm
58  integer, dimension(ensemble_members_per_proc) :: cpl_mpi_comms
59  integer :: null_mpi_comm
60 
61  call mpi_init(mpi_err)
62  if(mpi_err .eq. 0) then
63  print*,'mpi_init successful'
64  else
65  print*,'mpi_init unsuccessful'
66  end if
67 
68 
69  call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
70  if(mpi_err .eq. 0) then
71  print*,'mpi_comm_rank successful'
72  print*,'world_rank = ',world_rank
73  else
74  print*,'mpi_comm_rank unsuccessful'
75  end if
76 
77 
78  call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
79  if(mpi_err .eq. 0) then
80  print*,'mpi_comm_size successful'
81  print*,'world_size = ',world_size
82  else
83  print*,'mpi_comm_size unsuccessful'
84  end if
85 
86 
87  cpl_root = world_size-1
88  print*,'rank = ',world_rank,' on mpi_comm_world which has size ',world_size
89  da = 0
90 
91 
92  call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
93  &,mpi_comm_world,mpi_err)
94  if(mpi_err .eq. 0) then
95  print*,'mpi_allreduce successful'
96  print*,'i = ',i
97  else
98  print*,'mpi_allreduce unsuccessful'
99  end if
100 
101 
102  call mpi_allreduce(ensemble_members_per_proc,i,1,mpi_integer,mpi_max&
103  &,mpi_comm_world,mpi_err)
104  if(mpi_err .eq. 0) then
105  print*,'mpi_allreduce successful'
106  print*,'i = ',i
107  else
108  print*,'mpi_allreduce unsuccessful'
109  end if
110 
111 
112  call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
113  &,mpi_err)
114  if(mpi_err .eq. 0) then
115  print*,'mpi_comm_split successful: temp_mdls_comm created'
116  else
117  print*,'mpi_comm_split unsuccessful: temp_mdls_comm not created'
118  end if
119 
120  call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
121  if(mpi_err .eq. 0) then
122  print*,'mpi_comm_size successful'
123  print*,'temp_mdls_size = ',temp_mdls_size
124  else
125  print*,'mpi_comm_size unsuccessful'
126  end if
127 
128 
129  if(mod(temp_mdls_size,mdl_num_proc) .ne. 0) then
130  print*,'MINIMAL MODEL LAUNCH ERROR.'
131  print*,'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,' copies of the &
132  &model'
133  stop
134  end if
135 
136 
137  nda = world_size-temp_mdls_size
138  print*,'nda = ',nda
139  if(nda .lt. 1) then
140  print*,'MINIMAL MODEL COMMS v5 ERROR: nda is less than 1.'
141  print*,'Make sure you launch with a DA CODE'
142  stop
143  end if
144 
145 
146 
147  nens = (temp_mdls_size/mdl_num_proc)*ensemble_members_per_proc
148  print*,'nens = ',nens
149  call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
150  if(mpi_err .eq. 0) then
151  print*,'mpi_comm_rank successful'
152  print*,'temp_mdls_rank = ',temp_mdls_rank
153  else
154  print*,'mpi_comm_rank unsuccessful'
155  end if
156 
157 
158  instance_rank = temp_mdls_rank/mdl_num_proc
159 
160  call mpi_comm_split(temp_mdls_comm,instance_rank,temp_mdls_rank&
161  &,mdl_mpi_comm,mpi_err)
162  if(mpi_err .eq. 0) then
163  print*,'mpi_comm_split successful: mdl_mpi_comm created'
164  else
165  print*,'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
166  end if
167 
168 
169 
170  call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
171  if(mpi_err .eq. 0) then
172  print*,'mpi_comm_rank successful'
173  print*,'mdl_rank = ',mdl_rank
174  else
175  print*,'mpi_comm_rank unsuccessful'
176  end if
177 
178 
179  cpl_root = nda*instance_rank/(nens/ensemble_members_per_proc)
180  print*,'cpl_root = ',cpl_root
181 
182  if(cpl_root .lt. 0) then
183  print*,'MINIMAL MODEL COMMS v5 ERROR: cpl_root is less than 0.'
184  print*,'Make sure you launch with a DA CODE'
185  stop
186  end if
187 
188 
189  first_ptcl = -((-cpl_root)*nens/nda)
190  final_ptcl = -((-cpl_root-1)*nens/nda)-1
191 
192  first_ptcl = ceiling(real(cpl_root)*real(nens)/real(nda))
193  final_ptcl = ceiling(real(cpl_root+1)*real(nens)/real(nda))-1
194 
195 
196  print*,'range of particles = ',first_ptcl,final_ptcl
197 
198 
199  !check to see if mod(n_mdl_instances,npfs) == 0:
200  if(mod(temp_mdls_size/mdl_num_proc,nda) .ne. 0 ) then
201  print*,'EMPIRE MESSAGE: SEQUENTIAL SPLITTING OF MPI_COMM_WORLD'
202  j = 0
203 
204  do i = 0,nens-1
205  print*,'i = ',i,' particle_rank = ',particle_rank
206  print*,'instance_rank*ensemble_members_per_proc = ',instance_rank*ensemble_members_per_proc
207  print*,'(instance_rank+1)*ensemble_members_per_proc -1 = '&
208  &,(instance_rank+1)*ensemble_members_per_proc -1
209  if(i .ge. instance_rank*ensemble_members_per_proc .and. &
210  i .le. (instance_rank+1)*ensemble_members_per_proc -1) then
211  j = j + 1
212  call mpi_comm_split(mpi_comm_world,1,temp_mdls_rank&
213  &,cpl_mpi_comms(j),mpi_err)
214  print*,'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
215  else
216  print*,'doing null splitting'
217  call mpi_comm_split(mpi_comm_world,0,temp_mdls_rank&
218  &,null_mpi_comm,mpi_err)
219  print*,'created mpi_comm_null'
220  call mpi_comm_free(null_mpi_comm,mpi_err)
221  print*,'freed up null_mpi_comm'
222  end if
223 
224 
225  end do
226 
227 
228 
229  else
230 
231  print*,'doing first split based on pfrank',cpl_root
232  call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank&
233  &,temp_cpl_comm,mpi_err)
234  if(mpi_err .eq. 0) then
235  print*,'mpi_comm_split successful: temp_cpl_comm created'
236  else
237  print*,'mpi_comm_split unsuccessful: temp_cpl_comm not created'
238  end if
239 
240  print*,'finished first split'
241 
242  j = 0
243 
244  do i = first_ptcl,final_ptcl
245  print*,'i = ',i,' particle_rank = ',particle_rank
246  if(i .ge. instance_rank*ensemble_members_per_proc .and. &
247  i .le. (instance_rank+1)*ensemble_members_per_proc -1) then
248  j = j + 1
249  call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
250  &,cpl_mpi_comms(j),mpi_err)
251  print*,'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
252  else
253  print*,'doing null splitting'
254  call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
255  &,null_mpi_comm,mpi_err)
256  print*,'created mpi_comm_null'
257  call mpi_comm_free(null_mpi_comm,mpi_err)
258  print*,'freed up null_mpi_comm'
259  end if
260 
261 
262  end do
263 
264 
265 
266 
267  end if
268 
269 
270  cpl_root = mdl_num_proc
271 
272 
273 
274  select case(mdl_rank)
275  case(0)
276  state_dim = 1
277  case(1)
278  state_dim = 3
279  case(2)
280  state_dim = 2
281  case(3)
282  state_dim = 5
283  case(4)
284  state_dim = 1
285  case default
286  print*,'it was at this point, model realised, he fucked up'
287  stop
288  end select
289  allocate(state_vector(state_dim))
290 
291 
292 
293 
294  state_vector = 10*mdl_rank + (/ (real(i,rk), i = 1,state_dim) /)
295 
296  print*,'state_vector = '
297  print*,state_vector
298 
299  do j = 1,ensemble_members_per_proc
300  print*,'doing a gather on cpl_mpi_comm(j)',cpl_mpi_comms(j),cpl_root
301  call mpi_gather(state_dim,1,mpi_integer,state_dim&
302  &,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
303  print*,'finished the gather on cpl_mpi_comms(j)'
304  end do
305 
306 
307 
308  call mpi_finalize(mpi_err)
309  print*,'MINIMAL_MODEL_COMMS_v5 finished nicely'
310 end program minimal_model_comms_v5
program minimal_model_comms_v5