37 integer,
parameter :: mdl_num_proc=5
38 integer :: mdl_mpi_comm
39 integer :: temp_mdls_comm
40 integer :: temp_mdls_rank
43 integer,
parameter :: rk = kind(1.0d0)
44 real(kind=rk),
allocatable,
dimension(:) :: state_vector
48 integer :: particle_rank
51 integer :: temp_mdls_size
52 integer :: temp_cpl_comm
55 integer :: cpl_mpi_comm
56 integer :: null_mpi_comm
57 real(kind=rk),
dimension(0) :: send_null
58 integer :: total_timesteps
61 call mpi_init(mpi_err)
62 if(mpi_err .eq. 0)
then
63 print*,
'mpi_init successful'
65 print*,
'mpi_init unsuccessful'
69 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
70 if(mpi_err .eq. 0)
then
71 print*,
'mpi_comm_rank successful'
72 print*,
'world_rank = ',world_rank
74 print*,
'mpi_comm_rank unsuccessful'
78 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
79 if(mpi_err .eq. 0)
then
80 print*,
'mpi_comm_size successful'
81 print*,
'world_size = ',world_size
83 print*,
'mpi_comm_size unsuccessful'
87 cpl_root = world_size-1
88 print*,
'rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
92 call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
93 &,mpi_comm_world,mpi_err)
94 if(mpi_err .eq. 0)
then
95 print*,
'mpi_allreduce successful'
98 print*,
'mpi_allreduce unsuccessful'
103 call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
105 if(mpi_err .eq. 0)
then
106 print*,
'mpi_comm_split successful: temp_mdls_comm created'
108 print*,
'mpi_comm_split unsuccessful: temp_mdls_comm not created'
111 call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
112 if(mpi_err .eq. 0)
then
113 print*,
'mpi_comm_size successful'
114 print*,
'temp_mdls_size = ',temp_mdls_size
116 print*,
'mpi_comm_size unsuccessful'
120 if(mod(temp_mdls_size,mdl_num_proc) .ne. 0)
then
121 print*,
'MINIMAL MODEL LAUNCH ERROR.'
122 print*,
'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,
' copies of the &
128 nda = world_size-temp_mdls_size
130 print*,
'MINIMAL MODEL COMMS v2 ERROR: nda is less than 1.'
131 print*,
'Make sure you launch with a DA CODE'
137 nens = temp_mdls_size/mdl_num_proc
138 call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
139 if(mpi_err .eq. 0)
then
140 print*,
'mpi_comm_rank successful'
141 print*,
'temp_mdls_rank = ',temp_mdls_rank
143 print*,
'mpi_comm_rank unsuccessful'
147 particle_rank = temp_mdls_rank/mdl_num_proc
149 call mpi_comm_split(temp_mdls_comm,particle_rank,temp_mdls_rank&
150 &,mdl_mpi_comm,mpi_err)
151 if(mpi_err .eq. 0)
then
152 print*,
'mpi_comm_split successful: mdl_mpi_comm created'
154 print*,
'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
159 call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
160 if(mpi_err .eq. 0)
then
161 print*,
'mpi_comm_rank successful'
162 print*,
'mdl_rank = ',mdl_rank
164 print*,
'mpi_comm_rank unsuccessful'
168 cpl_root = nda*particle_rank/nens
169 print*,
'cpl_root = ',cpl_root
171 if(cpl_root .lt. 0)
then
172 print*,
'MINIMAL MODEL COMMS v2 ERROR: cpl_root is less than 0.'
173 print*,
'Make sure you launch with a DA CODE'
177 call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank,temp_cpl_comm,mpi_err)
178 if(mpi_err .eq. 0)
then
179 print*,
'mpi_comm_split successful: temp_cpl_comm created'
181 print*,
'mpi_comm_split unsuccessful: temp_cpl_comm not created'
189 first_ptcl = -((-cpl_root)*nens/nda)
190 final_ptcl = -((-cpl_root-1)*nens/nda)-1
192 first_ptcl = ceiling(
real(cpl_root)*
real(nens)/
real(nda))
193 final_ptcl = ceiling(
real(cpl_root+1)*
real(nens)/
real(nda))-1
196 print*,
'range of particles = ',first_ptcl,final_ptcl
200 do i = first_ptcl,final_ptcl
201 print*,
'i = ',i,
' particle_rank = ',particle_rank
202 if(i .eq. particle_rank)
then
203 call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
204 &,cpl_mpi_comm,mpi_err)
205 print*,
'created cpl_mpi_comm'
207 print*,
'doing null splitting'
208 call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
209 &,null_mpi_comm,mpi_err)
210 print*,
'created mpi_comm_null'
211 call mpi_comm_free(null_mpi_comm,mpi_err)
212 print*,
'freed up null_mpi_comm'
218 cpl_root = mdl_num_proc
227 select case(mdl_rank)
239 print*,
'it was at this point, model realised, he fucked up'
242 allocate(state_vector(state_dim))
247 state_vector = 10*mdl_rank + (/ (
real(i,rk), i = 1,state_dim) /)
249 print*,
'state_vector = '
252 print*,
'doing a gather on cpl_mpi_comm'
253 call mpi_gather(state_dim,1,mpi_integer,state_dim&
254 &,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
255 print*,
'finished the gather on cpl_mpi_comm'
277 print*,
'Reading total_timesteps from file timesteps: '
278 open(11,file=
'timesteps',action=
'read',status=
'old')
279 read(11,*) total_timesteps
283 call mpi_gatherv(state_vector,state_dim,mpi_double_precision,state_vector&
284 &,state_dim,state_dim,mpi_double_precision,cpl_root&
285 &,cpl_mpi_comm,mpi_err)
289 call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
290 &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
292 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
293 print*,
'Received tag = ',tag
296 do i = 1,total_timesteps
297 print*,
'Timestep = ',i
300 call mpi_gatherv(state_vector,state_dim,mpi_double_precision,state_vector&
301 &,state_dim,state_dim,mpi_double_precision,cpl_root&
302 &,cpl_mpi_comm,mpi_err)
305 call mpi_scatterv(send_null,0,0,mpi_double_precision,state_vector&
306 &,state_dim,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
308 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
309 print*,
'Received tag = ',tag
330 call mpi_finalize(mpi_err)
331 print*,
'MINIMAL_MODEL_COMMS_v2 finished nicely'
program minimal_model_comms_v2