37 integer,
parameter :: mdl_num_proc=5
38 integer,
parameter :: ensemble_members_per_proc=4
39 integer :: mdl_mpi_comm
40 integer :: temp_mdls_comm
41 integer :: temp_mdls_rank
44 integer,
parameter :: rk = kind(1.0d0)
45 real(kind=rk),
allocatable,
dimension(:) :: state_vector
49 integer :: instance_rank
50 integer :: particle_rank
53 integer :: temp_mdls_size
54 integer :: temp_cpl_comm
58 integer,
dimension(ensemble_members_per_proc) :: cpl_mpi_comms
59 integer :: null_mpi_comm
61 call mpi_init(mpi_err)
62 if(mpi_err .eq. 0)
then
63 print*,
'mpi_init successful'
65 print*,
'mpi_init unsuccessful'
69 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
70 if(mpi_err .eq. 0)
then
71 print*,
'mpi_comm_rank successful'
72 print*,
'world_rank = ',world_rank
74 print*,
'mpi_comm_rank unsuccessful'
78 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
79 if(mpi_err .eq. 0)
then
80 print*,
'mpi_comm_size successful'
81 print*,
'world_size = ',world_size
83 print*,
'mpi_comm_size unsuccessful'
87 cpl_root = world_size-1
88 print*,
'rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
92 call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
93 &,mpi_comm_world,mpi_err)
94 if(mpi_err .eq. 0)
then
95 print*,
'mpi_allreduce successful'
98 print*,
'mpi_allreduce unsuccessful'
102 call mpi_allreduce(ensemble_members_per_proc,i,1,mpi_integer,mpi_max&
103 &,mpi_comm_world,mpi_err)
104 if(mpi_err .eq. 0)
then
105 print*,
'mpi_allreduce successful'
108 print*,
'mpi_allreduce unsuccessful'
112 call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
114 if(mpi_err .eq. 0)
then
115 print*,
'mpi_comm_split successful: temp_mdls_comm created'
117 print*,
'mpi_comm_split unsuccessful: temp_mdls_comm not created'
120 call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
121 if(mpi_err .eq. 0)
then
122 print*,
'mpi_comm_size successful'
123 print*,
'temp_mdls_size = ',temp_mdls_size
125 print*,
'mpi_comm_size unsuccessful'
129 if(mod(temp_mdls_size,mdl_num_proc) .ne. 0)
then
130 print*,
'MINIMAL MODEL LAUNCH ERROR.'
131 print*,
'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,
' copies of the &
137 nda = world_size-temp_mdls_size
140 print*,
'MINIMAL MODEL COMMS v5 ERROR: nda is less than 1.'
141 print*,
'Make sure you launch with a DA CODE'
147 nens = (temp_mdls_size/mdl_num_proc)*ensemble_members_per_proc
148 print*,
'nens = ',nens
149 call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
150 if(mpi_err .eq. 0)
then
151 print*,
'mpi_comm_rank successful'
152 print*,
'temp_mdls_rank = ',temp_mdls_rank
154 print*,
'mpi_comm_rank unsuccessful'
158 instance_rank = temp_mdls_rank/mdl_num_proc
160 call mpi_comm_split(temp_mdls_comm,instance_rank,temp_mdls_rank&
161 &,mdl_mpi_comm,mpi_err)
162 if(mpi_err .eq. 0)
then
163 print*,
'mpi_comm_split successful: mdl_mpi_comm created'
165 print*,
'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
170 call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
171 if(mpi_err .eq. 0)
then
172 print*,
'mpi_comm_rank successful'
173 print*,
'mdl_rank = ',mdl_rank
175 print*,
'mpi_comm_rank unsuccessful'
179 cpl_root = nda*instance_rank/(nens/ensemble_members_per_proc)
180 print*,
'cpl_root = ',cpl_root
182 if(cpl_root .lt. 0)
then
183 print*,
'MINIMAL MODEL COMMS v5 ERROR: cpl_root is less than 0.'
184 print*,
'Make sure you launch with a DA CODE'
189 first_ptcl = -((-cpl_root)*nens/nda)
190 final_ptcl = -((-cpl_root-1)*nens/nda)-1
192 first_ptcl = ceiling(
real(cpl_root)*
real(nens)/
real(nda))
193 final_ptcl = ceiling(
real(cpl_root+1)*
real(nens)/
real(nda))-1
196 print*,
'range of particles = ',first_ptcl,final_ptcl
200 if(mod(temp_mdls_size/mdl_num_proc,nda) .ne. 0 )
then
201 print*,
'EMPIRE MESSAGE: SEQUENTIAL SPLITTING OF MPI_COMM_WORLD'
205 print*,
'i = ',i,
' particle_rank = ',particle_rank
206 print*,
'instance_rank*ensemble_members_per_proc = ',instance_rank*ensemble_members_per_proc
207 print*,
'(instance_rank+1)*ensemble_members_per_proc -1 = '&
208 &,(instance_rank+1)*ensemble_members_per_proc -1
209 if(i .ge. instance_rank*ensemble_members_per_proc .and. &
210 i .le. (instance_rank+1)*ensemble_members_per_proc -1)
then
212 call mpi_comm_split(mpi_comm_world,1,temp_mdls_rank&
213 &,cpl_mpi_comms(j),mpi_err)
214 print*,
'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
216 print*,
'doing null splitting'
217 call mpi_comm_split(mpi_comm_world,0,temp_mdls_rank&
218 &,null_mpi_comm,mpi_err)
219 print*,
'created mpi_comm_null'
220 call mpi_comm_free(null_mpi_comm,mpi_err)
221 print*,
'freed up null_mpi_comm'
231 print*,
'doing first split based on pfrank',cpl_root
232 call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank&
233 &,temp_cpl_comm,mpi_err)
234 if(mpi_err .eq. 0)
then
235 print*,
'mpi_comm_split successful: temp_cpl_comm created'
237 print*,
'mpi_comm_split unsuccessful: temp_cpl_comm not created'
240 print*,
'finished first split'
244 do i = first_ptcl,final_ptcl
245 print*,
'i = ',i,
' particle_rank = ',particle_rank
246 if(i .ge. instance_rank*ensemble_members_per_proc .and. &
247 i .le. (instance_rank+1)*ensemble_members_per_proc -1)
then
249 call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
250 &,cpl_mpi_comms(j),mpi_err)
251 print*,
'created cpl_mpi_comms(j) with number',cpl_mpi_comms(j)
253 print*,
'doing null splitting'
254 call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
255 &,null_mpi_comm,mpi_err)
256 print*,
'created mpi_comm_null'
257 call mpi_comm_free(null_mpi_comm,mpi_err)
258 print*,
'freed up null_mpi_comm'
270 cpl_root = mdl_num_proc
274 select case(mdl_rank)
286 print*,
'it was at this point, model realised, he fucked up'
289 allocate(state_vector(state_dim))
294 state_vector = 10*mdl_rank + (/ (
real(i,rk), i = 1,state_dim) /)
296 print*,
'state_vector = '
299 do j = 1,ensemble_members_per_proc
300 print*,
'doing a gather on cpl_mpi_comm(j)',cpl_mpi_comms(j),cpl_root
301 call mpi_gather(state_dim,1,mpi_integer,state_dim&
302 &,1,mpi_integer,cpl_root,cpl_mpi_comms(j),mpi_err)
303 print*,
'finished the gather on cpl_mpi_comms(j)'
308 call mpi_finalize(mpi_err)
309 print*,
'MINIMAL_MODEL_COMMS_v5 finished nicely'
program minimal_model_comms_v5