37 integer,
parameter :: mdl_num_proc=16
38 integer,
parameter :: ensemble_members_per_proc=2
39 integer :: mdl_mpi_comm
40 integer :: temp_mdls_comm
41 integer :: temp_mdls_rank
44 integer,
parameter :: rk = kind(1.0d0)
45 real(kind=rk),
allocatable,
dimension(:) :: state_vector
49 integer :: instance_rank
50 integer :: particle_rank
53 integer :: temp_mdls_size
54 integer :: temp_cpl_comm
58 integer,
dimension(ensemble_members_per_proc) :: cpl_mpi_comms
59 integer :: null_mpi_comm
60 real(kind=rk),
dimension(0) :: send_null
61 integer :: total_timesteps
64 call mpi_init(mpi_err)
65 if(mpi_err .eq. 0)
then
66 print*,
'mpi_init successful'
68 print*,
'mpi_init unsuccessful'
72 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
73 if(mpi_err .eq. 0)
then
74 print*,
'mpi_comm_rank successful'
75 print*,
'world_rank = ',world_rank
77 print*,
'mpi_comm_rank unsuccessful'
81 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
82 if(mpi_err .eq. 0)
then
83 print*,
'mpi_comm_size successful'
84 print*,
'world_size = ',world_size
86 print*,
'mpi_comm_size unsuccessful'
90 cpl_root = world_size-1
91 print*,
'rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
95 call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
96 &,mpi_comm_world,mpi_err)
97 if(mpi_err .eq. 0)
then
98 print*,
'mpi_allreduce successful'
101 print*,
'mpi_allreduce unsuccessful'
105 call mpi_allreduce(ensemble_members_per_proc,i,1,mpi_integer,mpi_max&
106 &,mpi_comm_world,mpi_err)
107 if(mpi_err .eq. 0)
then
108 print*,
'mpi_allreduce successful'
111 print*,
'mpi_allreduce unsuccessful'
115 print*,
'first split happening with the inputs: ', mpi_comm_world,da,world_rank,temp_mdls_comm&
117 call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
119 print*,
'first split happened with the outputs: ', mpi_comm_world&
120 &,da,world_rank,temp_mdls_comm,mpi_err
121 if(mpi_err .eq. 0)
then
122 print*,
'mpi_comm_split successful: temp_mdls_comm created: ',temp_mdls_comm
124 print*,
'mpi_comm_split unsuccessful: temp_mdls_comm not created'
127 call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
128 if(mpi_err .eq. 0)
then
129 print*,
'mpi_comm_size successful'
130 print*,
'temp_mdls_size = ',temp_mdls_size
132 print*,
'mpi_comm_size unsuccessful'
136 if(mod(temp_mdls_size,mdl_num_proc) .ne. 0)
then
137 print*,
'MINIMAL MODEL LAUNCH ERROR.'
138 print*,
'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,
' copies of the &
144 nda = world_size-temp_mdls_size
147 print*,
'MINIMAL MODEL COMMS v5 ERROR: nda is less than 1.'
148 print*,
'Make sure you launch with a DA CODE'
154 nens = (temp_mdls_size/mdl_num_proc)*ensemble_members_per_proc
155 print*,
'nens = ',nens
156 call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
157 if(mpi_err .eq. 0)
then
158 print*,
'mpi_comm_rank successful'
159 print*,
'temp_mdls_rank = ',temp_mdls_rank
161 print*,
'mpi_comm_rank unsuccessful'
165 instance_rank = temp_mdls_rank/mdl_num_proc
167 call mpi_comm_split(temp_mdls_comm,instance_rank,temp_mdls_rank&
168 &,mdl_mpi_comm,mpi_err)
169 if(mpi_err .eq. 0)
then
170 print*,
'mpi_comm_split successful: mdl_mpi_comm created ',mdl_mpi_comm
172 print*,
'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
177 call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
178 if(mpi_err .eq. 0)
then
179 print*,
'mpi_comm_rank successful'
180 print*,
'mdl_rank = ',mdl_rank
182 print*,
'mpi_comm_rank unsuccessful'
186 cpl_root = nda*instance_rank/(nens/ensemble_members_per_proc)
187 print*,
'cpl_root = ',cpl_root
189 if(cpl_root .lt. 0)
then
190 print*,
'MINIMAL MODEL COMMS v5 ERROR: cpl_root is less than 0.'
191 print*,
'Make sure you launch with a DA CODE'
196 first_ptcl = -((-cpl_root)*nens/nda)
197 final_ptcl = -((-cpl_root-1)*nens/nda)-1
199 first_ptcl = ceiling(
real(cpl_root)*
real(nens)/
real(nda))
200 final_ptcl = ceiling(
real(cpl_root+1)*
real(nens)/
real(nda))-1
203 print*,
'range of particles = ',first_ptcl,final_ptcl
207 if(mod(temp_mdls_size/mdl_num_proc,nda) .ne. 0 )
then
208 print*,
'EMPIRE MESSAGE: SEQUENTIAL SPLITTING OF MPI_COMM_WORLD'
212 print*,
'i = ',i,
' particle_rank = ',particle_rank
213 print*,
'instance_rank*ensemble_members_per_proc = ',instance_rank*ensemble_members_per_proc
214 print*,
'(instance_rank+1)*ensemble_members_per_proc -1 = '&
215 &,(instance_rank+1)*ensemble_members_per_proc -1
216 if(i .ge. instance_rank*ensemble_members_per_proc .and. &
217 i .le. (instance_rank+1)*ensemble_members_per_proc -1)
then
219 call mpi_comm_split(mpi_comm_world,1,temp_mdls_rank&
220 &,cpl_mpi_comms(j),mpi_err)
221 print*,
'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
223 print*,
'doing null splitting'
224 call mpi_comm_split(mpi_comm_world,0,temp_mdls_rank&
225 &,null_mpi_comm,mpi_err)
226 print*,
'created mpi_comm_null'
227 call mpi_comm_free(null_mpi_comm,mpi_err)
228 print*,
'freed up null_mpi_comm'
243 print*,
'doing first split based on pfrank',cpl_root
244 print*,
'doinf first split...key = ',temp_mdls_rank
246 print*,mpi_comm_world,cpl_root,temp_mdls_rank&
247 &,temp_cpl_comm,mpi_err
248 call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank&
249 &,temp_cpl_comm,mpi_err)
250 if(mpi_err .eq. 0)
then
251 print*,
'mpi_comm_split successful: temp_cpl_comm created ',temp_cpl_comm
253 print*,
'mpi_comm_split unsuccessful: temp_cpl_comm not created'
256 print*,
'finished first split'
266 do i = first_ptcl,final_ptcl
267 print*,
'i = ',i,
' particle_rank = ',particle_rank
268 if(i .ge. instance_rank*ensemble_members_per_proc .and. &
269 i .le. (instance_rank+1)*ensemble_members_per_proc -1)
then
271 call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
272 &,cpl_mpi_comms(j),mpi_err)
273 print*,
'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
275 print*,
'doing null splitting'
276 call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
277 &,null_mpi_comm,mpi_err)
278 print*,
'created mpi_comm_null'
279 call mpi_comm_free(null_mpi_comm,mpi_err)
280 print*,
'freed up null_mpi_comm'
292 cpl_root = mdl_num_proc
296 select case(mdl_rank)
307 case(5,6,7,8,9,10,11,12,13,14,15)
310 print*,
'it was at this point, model realised, he fucked up'
313 allocate(state_vector(state_dim))
318 state_vector = 10*mdl_rank + (/ (
real(i,rk), i = 1,state_dim) /)
320 print*,
'state_vector = '
323 do j = 1,ensemble_members_per_proc
324 print*,
'doing a gather on cpl_mpi_comm(j)',cpl_mpi_comms(j),cpl_root
325 call mpi_gather(state_dim,1,mpi_integer,state_dim&
326 &,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
327 print*,
'finished the gather on cpl_mpi_comms(j)'
350 print*,
'Reading total_timesteps from file timesteps: '
351 open(11,file=
'timesteps',action=
'read',status=
'old')
352 read(11,*) total_timesteps
355 do j = 1,ensemble_members_per_proc
357 call mpi_gatherv(state_vector,state_dim,mpi_double_precision,state_vector&
358 &,state_dim,state_dim,mpi_double_precision,cpl_root&
359 &,cpl_mpi_comms(j),mpi_err)
362 do j = 1,ensemble_members_per_proc
364 call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
365 &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comms(j),mpi_err)
367 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
368 print*,
'Received tag = ',tag
370 print*,
'state = ',state_vector
373 do i = 1,total_timesteps
374 print*,
'Timestep = ',i
376 do j = 1,ensemble_members_per_proc
378 call mpi_gatherv(state_vector,state_dim,mpi_double_precision&
380 &,state_dim,state_dim,mpi_double_precision,cpl_root&
381 &,cpl_mpi_comms(j),mpi_err)
384 do j = 1,ensemble_members_per_proc
386 call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
387 &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comms(j),mpi_err)
389 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
390 print*,
'Received tag = ',tag
391 print*,
'state = ',state_vector
413 call mpi_finalize(mpi_err)
414 print*,
'MINIMAL_MODEL_v5 finished nicely'
415 print*,
'my communicators were', cpl_mpi_comms(:)