47 real(kind=kind(1.0D0)),
dimension(:),
allocatable :: x
49 integer,
dimension(2) :: data
50 integer :: mpi_err,mdl_rank,cpl_root,cpl_mpi_comm
51 integer,
dimension(MPI_STATUS_SIZE) :: mpi_status
52 real(kind=kind(1.0d0)),
dimension(0) :: send_null
59 call mpi_recv(
data,2,mpi_integer,cpl_root,mpi_any_tag,cpl_mpi_comm&
73 call mpi_gatherv(x,3,mpi_double_precision,x&
74 &,3,3,mpi_double_precision,cpl_root&
75 &,cpl_mpi_comm,mpi_err)
78 call mpi_scatterv(send_null,0,0,mpi_double_precision,x&
79 &,3,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
81 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
94 call mpi_gatherv(x,3,mpi_double_precision,x&
95 &,3,3,mpi_double_precision,cpl_root&
96 &,cpl_mpi_comm,mpi_err)
99 call mpi_scatterv(send_null,0,0,mpi_double_precision,x&
100 &,3,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
102 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
107 elseif(tag .eq. 2)
then
109 elseif(tag .eq. 3)
then
112 print*,
'Linear model error: unknown MPI_TAG: ',tag
124 call mpi_finalize(mpi_err)
130 integer,
intent(in) :: n
131 real(kind=kind(1.0D0)),
intent(in),
dimension (n) :: x
132 real(kind=kind(1.0D0)),
dimension(n) :: f
143 integer,
intent(out) :: mdl_rank
144 integer,
intent(out) :: cpl_root
145 integer,
intent(out) :: cpl_mpi_comm
147 integer,
parameter :: mdl_num_proc=1
148 integer :: mdl_mpi_comm
151 integer :: world_rank
152 integer :: world_size
154 integer :: temp_mdls_size
155 integer :: temp_cpl_comm
156 integer :: temp_mdls_comm
157 integer :: temp_mdls_rank
160 integer :: particle_rank
163 integer :: first_ptcl
164 integer :: final_ptcl
165 integer :: null_mpi_comm
167 logical :: msg=.true.
170 call mpi_init(mpi_err)
171 if(mpi_err .eq. 0)
then
172 if(msg) print*,
'mpi_init successful'
174 print*,
'mpi_init unsuccessful'
178 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
179 if(mpi_err .eq. 0)
then
180 if(msg) print*,
'mpi_comm_rank successful'
181 if(msg) print*,
'world_rank = ',world_rank
183 print*,
'mpi_comm_rank unsuccessful'
187 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
188 if(mpi_err .eq. 0)
then
189 if(msg) print*,
'mpi_comm_size successful'
190 if(msg) print*,
'world_size = ',world_size
192 print*,
'mpi_comm_size unsuccessful'
196 cpl_root = world_size-1
198 print*,
'rank = ',world_rank,
' on mpi_comm_world which has &
205 call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
206 &,mpi_comm_world,mpi_err)
207 if(mpi_err .eq. 0)
then
208 if(msg) print*,
'mpi_allreduce successful'
209 if(msg) print*,
'i = ',i
211 print*,
'mpi_allreduce unsuccessful'
216 call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
218 if(mpi_err .eq. 0)
then
219 if(msg) print*,
'mpi_comm_split successful: temp_mdls_comm created'
221 print*,
'mpi_comm_split unsuccessful: temp_mdls_comm not created'
224 call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
225 if(mpi_err .eq. 0)
then
226 if(msg) print*,
'mpi_comm_size successful'
227 if(msg) print*,
'temp_mdls_size = ',temp_mdls_size
229 print*,
'mpi_comm_size unsuccessful'
233 if(mod(temp_mdls_size,mdl_num_proc) .ne. 0)
then
234 print*,
'MINIMAL MODEL LAUNCH ERROR.'
235 print*,
'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,
' copies of the &
241 nda = world_size-temp_mdls_size
243 print*,
'MINIMAL MODEL COMMS v2 ERROR: nda is less than 1.'
244 print*,
'Make sure you launch with a DA CODE'
250 nens = temp_mdls_size/mdl_num_proc
251 call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
252 if(mpi_err .eq. 0)
then
253 if(msg) print*,
'mpi_comm_rank successful'
254 if(msg) print*,
'temp_mdls_rank = ',temp_mdls_rank
256 print*,
'mpi_comm_rank unsuccessful'
260 particle_rank = temp_mdls_rank/mdl_num_proc
262 call mpi_comm_split(temp_mdls_comm,particle_rank,temp_mdls_rank&
263 &,mdl_mpi_comm,mpi_err)
264 if(mpi_err .eq. 0)
then
265 if(msg) print*,
'mpi_comm_split successful: mdl_mpi_comm created'
267 print*,
'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
272 call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
273 if(mpi_err .eq. 0)
then
274 if(msg) print*,
'mpi_comm_rank successful'
275 if(msg) print*,
'mdl_rank = ',mdl_rank
277 print*,
'mpi_comm_rank unsuccessful'
281 cpl_root = nda*particle_rank/nens
282 if(msg) print*,
'cpl_root = ',cpl_root
284 if(cpl_root .lt. 0)
then
285 print*,
'MINIMAL MODEL COMMS v2 ERROR: cpl_root is less than 0.'
286 print*,
'Make sure you launch with a DA CODE'
290 call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank,temp_cpl_comm,mpi_err)
291 if(mpi_err .eq. 0)
then
292 if(msg) print*,
'mpi_comm_split successful: temp_cpl_comm created'
294 print*,
'mpi_comm_split unsuccessful: temp_cpl_comm not created'
302 first_ptcl = ceiling(
real(cpl_root)*
real(nens)/
real(nda))
303 final_ptcl = ceiling(
real(cpl_root+1)*
real(nens)/
real(nda))-1
306 if(msg) print*,
'range of particles = ',first_ptcl,final_ptcl
310 do i = first_ptcl,final_ptcl
311 if(msg) print*,
'i = ',i,
' particle_rank = ',particle_rank
312 if(i .eq. particle_rank)
then
313 call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
314 &,cpl_mpi_comm,mpi_err)
315 if(msg) print*,
'created cpl_mpi_comm'
317 if(msg) print*,
'doing null splitting'
318 call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
319 &,null_mpi_comm,mpi_err)
320 if(msg) print*,
'created mpi_comm_null'
321 call mpi_comm_free(null_mpi_comm,mpi_err)
322 if(msg) print*,
'freed up null_mpi_comm'
328 cpl_root = mdl_num_proc
340 integer,
intent(in) :: N
341 integer,
intent(in) :: cpl_root
342 integer,
intent(in) :: cpl_mpi_comm
344 logical :: msg = .true.
346 if(msg) print*,
'called empire_process_dimensions'
347 call mpi_gather(n,1,mpi_integer,n&
348 &,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
349 if(msg) print*,
'finished the gather on cpl_mpi_comm for empire_process_dimensions'
subroutine empire_process_dimensions(N, cpl_root, cpl_mpi_comm)
subroutine initialise_mpi_v2(mdl_rank, cpl_root, cpl_mpi_comm)
real(kind=kind(1.0d0)) function, dimension(n) f(n, x)
program linear
program to implement a simple linear model of no use to anyone but for testing and debugging purposes...