EMPIRE DA  v1.9.1
Data assimilation codes using EMPIRE communication
 All Classes Files Functions Variables Pages
minimal_model_v2.f90
Go to the documentation of this file.
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 ! minimal_model_comms just sets up communicators for empire for
3 ! testing purposes
4 !
5 !The MIT License (MIT)
6 !
7 !Copyright (c) 2015 Philip A. Browne
8 !
9 !Permission is hereby granted, free of charge, to any person obtaining a copy
10 !of this software and associated documentation files (the "Software"), to deal
11 !in the Software without restriction, including without limitation the rights
12 !to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 !copies of the Software, and to permit persons to whom the Software is
14 !furnished to do so, subject to the following conditions:
15 !
16 !The above copyright notice and this permission notice shall be included in all
17 !copies or substantial portions of the Software.
18 !
19 !THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 !IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 !FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 !AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 !LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 !OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 !SOFTWARE.
26 !
27 !Email: p.browne@reading.ac.uk
28 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
29 
31  implicit none
32 
33  include 'mpif.h'
34  integer :: mpi_err
35  integer :: world_rank
36  integer :: world_size
37  integer, parameter :: mdl_num_proc=5
38  integer :: mdl_mpi_comm
39  integer :: temp_mdls_comm
40  integer :: temp_mdls_rank
41  integer :: mdl_rank
42  integer :: da
43  integer, parameter :: rk = kind(1.0d0)
44  real(kind=rk), allocatable, dimension(:) :: state_vector
45  integer :: state_dim
46  integer :: i
47  integer :: cpl_root
48  integer :: particle_rank
49  integer :: nda
50  integer :: nens
51  integer :: temp_mdls_size
52  integer :: temp_cpl_comm
53  integer :: first_ptcl
54  integer :: final_ptcl
55  integer :: cpl_mpi_comm
56  integer :: null_mpi_comm
57  real(kind=rk), dimension(0) :: send_null
58  integer :: total_timesteps
59  integer :: tag
60 
61  call mpi_init(mpi_err)
62  if(mpi_err .eq. 0) then
63  print*,'mpi_init successful'
64  else
65  print*,'mpi_init unsuccessful'
66  end if
67 
68 
69  call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
70  if(mpi_err .eq. 0) then
71  print*,'mpi_comm_rank successful'
72  print*,'world_rank = ',world_rank
73  else
74  print*,'mpi_comm_rank unsuccessful'
75  end if
76 
77 
78  call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
79  if(mpi_err .eq. 0) then
80  print*,'mpi_comm_size successful'
81  print*,'world_size = ',world_size
82  else
83  print*,'mpi_comm_size unsuccessful'
84  end if
85 
86 
87  cpl_root = world_size-1
88  print*,'rank = ',world_rank,' on mpi_comm_world which has size ',world_size
89  da = 0
90 
91 
92  call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
93  &,mpi_comm_world,mpi_err)
94  if(mpi_err .eq. 0) then
95  print*,'mpi_allreduce successful'
96  print*,'i = ',i
97  else
98  print*,'mpi_allreduce unsuccessful'
99  end if
100 
101 
102 
103  call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
104  &,mpi_err)
105  if(mpi_err .eq. 0) then
106  print*,'mpi_comm_split successful: temp_mdls_comm created'
107  else
108  print*,'mpi_comm_split unsuccessful: temp_mdls_comm not created'
109  end if
110 
111  call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
112  if(mpi_err .eq. 0) then
113  print*,'mpi_comm_size successful'
114  print*,'temp_mdls_size = ',temp_mdls_size
115  else
116  print*,'mpi_comm_size unsuccessful'
117  end if
118 
119 
120  if(mod(temp_mdls_size,mdl_num_proc) .ne. 0) then
121  print*,'MINIMAL MODEL LAUNCH ERROR.'
122  print*,'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,' copies of the &
123  &model'
124  stop
125  end if
126 
127 
128  nda = world_size-temp_mdls_size
129  if(nda .lt. 1) then
130  print*,'MINIMAL MODEL COMMS v2 ERROR: nda is less than 1.'
131  print*,'Make sure you launch with a DA CODE'
132  stop
133  end if
134 
135 
136 
137  nens = temp_mdls_size/mdl_num_proc
138  call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
139  if(mpi_err .eq. 0) then
140  print*,'mpi_comm_rank successful'
141  print*,'temp_mdls_rank = ',temp_mdls_rank
142  else
143  print*,'mpi_comm_rank unsuccessful'
144  end if
145 
146 
147  particle_rank = temp_mdls_rank/mdl_num_proc
148 
149  call mpi_comm_split(temp_mdls_comm,particle_rank,temp_mdls_rank&
150  &,mdl_mpi_comm,mpi_err)
151  if(mpi_err .eq. 0) then
152  print*,'mpi_comm_split successful: mdl_mpi_comm created'
153  else
154  print*,'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
155  end if
156 
157 
158 
159  call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
160  if(mpi_err .eq. 0) then
161  print*,'mpi_comm_rank successful'
162  print*,'mdl_rank = ',mdl_rank
163  else
164  print*,'mpi_comm_rank unsuccessful'
165  end if
166 
167 
168  cpl_root = nda*particle_rank/nens
169  print*,'cpl_root = ',cpl_root
170 
171  if(cpl_root .lt. 0) then
172  print*,'MINIMAL MODEL COMMS v2 ERROR: cpl_root is less than 0.'
173  print*,'Make sure you launch with a DA CODE'
174  stop
175  end if
176 
177  call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank,temp_cpl_comm,mpi_err)
178  if(mpi_err .eq. 0) then
179  print*,'mpi_comm_split successful: temp_cpl_comm created'
180  else
181  print*,'mpi_comm_split unsuccessful: temp_cpl_comm not created'
182  end if
183 
184 
185 
186 
187 
188 
189  first_ptcl = -((-cpl_root)*nens/nda)
190  final_ptcl = -((-cpl_root-1)*nens/nda)-1
191 
192  first_ptcl = ceiling(real(cpl_root)*real(nens)/real(nda))
193  final_ptcl = ceiling(real(cpl_root+1)*real(nens)/real(nda))-1
194 
195 
196  print*,'range of particles = ',first_ptcl,final_ptcl
197 
198 
199 
200  do i = first_ptcl,final_ptcl
201  print*,'i = ',i,' particle_rank = ',particle_rank
202  if(i .eq. particle_rank) then
203  call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
204  &,cpl_mpi_comm,mpi_err)
205  print*,'created cpl_mpi_comm'
206  else
207  print*,'doing null splitting'
208  call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
209  &,null_mpi_comm,mpi_err)
210  print*,'created mpi_comm_null'
211  call mpi_comm_free(null_mpi_comm,mpi_err)
212  print*,'freed up null_mpi_comm'
213  end if
214 
215 
216  end do
217 
218  cpl_root = mdl_num_proc
219 
220 
221 
222 
223 
224 
225 
226 
227  select case(mdl_rank)
228  case(0)
229  state_dim = 1
230  case(1)
231  state_dim = 3
232  case(2)
233  state_dim = 2
234  case(3)
235  state_dim = 5
236  case(4)
237  state_dim = 1
238  case default
239  print*,'it was at this point, model realised, he fucked up'
240  stop
241  end select
242  allocate(state_vector(state_dim))
243 
244 
245 
246 
247  state_vector = 10*mdl_rank + (/ (real(i,rk), i = 1,state_dim) /)
248 
249  print*,'state_vector = '
250  print*,state_vector
251 
252  print*,'doing a gather on cpl_mpi_comm'
253  call mpi_gather(state_dim,1,mpi_integer,state_dim&
254  &,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
255  print*,'finished the gather on cpl_mpi_comm'
256 
257 
258 
259 
260 
261 
262 
263 
264 
265 
266 
267 
268 
269 
270 
271 
272 
273 
274 
275 
276 
277  print*,'Reading total_timesteps from file timesteps: '
278  open(11,file='timesteps',action='read',status='old')
279  read(11,*) total_timesteps
280  close(11)
281 
282  !send the state to da code with mpi_gatherv
283  call mpi_gatherv(state_vector,state_dim,mpi_double_precision,state_vector&
284  &,state_dim,state_dim,mpi_double_precision,cpl_root&
285  &,cpl_mpi_comm,mpi_err)
286 
287 
288  !get the state back from da code with mpi_gatherv
289  call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
290  &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
291  !get the tag from the da code
292  call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
293  print*,'Received tag = ',tag
294 
295 
296  do i = 1,total_timesteps
297  print*,'Timestep = ',i
298 
299  !send the state to da code with mpi_gatherv
300  call mpi_gatherv(state_vector,state_dim,mpi_double_precision,state_vector&
301  &,state_dim,state_dim,mpi_double_precision,cpl_root&
302  &,cpl_mpi_comm,mpi_err)
303 
304  !get the state back from da code with mpi_gatherv
305  call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
306  &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
307  !get the tag from the da code
308  call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
309  print*,'Received tag = ',tag
310  end do
311 
312 
313 
314 
315 
316 
317 
318 
319 
320 
321 
322 
323 
324 
325 
326 
327 
328 
329 
330  call mpi_finalize(mpi_err)
331  print*,'MINIMAL_MODEL_COMMS_v2 finished nicely'
332 end program minimal_model_comms_v2
program minimal_model_comms_v2