32 real(kind=kind(1.0D0)) :: dt=1.0d-2
33 real(kind=kind(1.0D0)),
allocatable,
dimension(:) :: x,k1,k2,k3,k4
34 real(kind=kind(1.0d0)) :: F=8.0d0
36 integer :: total_timesteps=100
38 integer :: mpi_err,mdl_rank,cpl_root,cpl_mpi_comm
39 real(kind=kind(1.0d0)),
dimension(0) :: send_null
49 inquire(file=
'l96.nml', exist=l96_exists)
51 open(32,file=
'l96.nml',iostat=ios,action=
'read'&
53 if(ios .ne. 0) stop
'Cannot open l96.nml'
62 allocate(x(n),k1(n),k2(n),k3(n),k4(n))
69 call mpi_gatherv(x,n,mpi_double_precision,x&
70 &,n,n,mpi_double_precision,cpl_root&
71 &,cpl_mpi_comm,mpi_err)
75 call mpi_scatterv(send_null,0,0,mpi_double_precision,x&
76 &,n,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
78 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
82 do t = 1,total_timesteps
84 k2 =
g(x +0.5d0 * dt * k1 , n , f )
85 k3 =
g(x +0.5d0 * dt * k2 , n , f )
86 k4 =
g(x + dt * k3 , n , f )
87 x = x + dt *( k1 + 2.0d0 *( k2 + k3 ) + k4 )/6.0d0
91 call mpi_gatherv(x,n,mpi_double_precision,x&
92 &,n,n,mpi_double_precision,cpl_root&
93 &,cpl_mpi_comm,mpi_err)
96 call mpi_scatterv(send_null,0,0,mpi_double_precision,x&
97 &,n,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
99 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
115 call mpi_finalize(mpi_err)
118 function g (x , N, F )
120 real(kind=kind(1.0D0)),
intent(in),
dimension(0:N-1) :: x
121 integer,
intent(in) :: N
122 real(kind=kind(1.0D0)),
dimension(0:N-1) :: g
123 real(kind=kind(1.0D0)),
intent(in) :: F
126 g(j) = ( x(modulo(j+1,n)) -x( modulo(j-2,n)) )*&
127 &x(modulo(j-1,n)) - x(j) + f
136 integer,
intent(out) :: mdl_rank
137 integer,
intent(out) :: cpl_root
138 integer,
intent(out) :: cpl_mpi_comm
140 integer,
parameter :: mdl_num_proc=1
141 integer :: mdl_mpi_comm
144 integer :: world_rank
145 integer :: world_size
147 integer :: temp_mdls_size
148 integer :: temp_cpl_comm
149 integer :: temp_mdls_comm
150 integer :: temp_mdls_rank
153 integer :: particle_rank
156 integer :: first_ptcl
157 integer :: final_ptcl
158 integer :: null_mpi_comm
160 logical :: msg=.true.
163 call mpi_init(mpi_err)
164 if(mpi_err .eq. 0)
then
165 if(msg) print*,
'mpi_init successful'
167 print*,
'mpi_init unsuccessful'
171 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
172 if(mpi_err .eq. 0)
then
173 if(msg) print*,
'mpi_comm_rank successful'
174 if(msg) print*,
'world_rank = ',world_rank
176 print*,
'mpi_comm_rank unsuccessful'
180 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
181 if(mpi_err .eq. 0)
then
182 if(msg) print*,
'mpi_comm_size successful'
183 if(msg) print*,
'world_size = ',world_size
185 print*,
'mpi_comm_size unsuccessful'
189 cpl_root = world_size-1
191 print*,
'rank = ',world_rank,
' on mpi_comm_world which has &
198 call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
199 &,mpi_comm_world,mpi_err)
200 if(mpi_err .eq. 0)
then
201 if(msg) print*,
'mpi_allreduce successful'
202 if(msg) print*,
'i = ',i
204 print*,
'mpi_allreduce unsuccessful'
209 call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
211 if(mpi_err .eq. 0)
then
212 if(msg) print*,
'mpi_comm_split successful: temp_mdls_comm created'
214 print*,
'mpi_comm_split unsuccessful: temp_mdls_comm not created'
217 call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
218 if(mpi_err .eq. 0)
then
219 if(msg) print*,
'mpi_comm_size successful'
220 if(msg) print*,
'temp_mdls_size = ',temp_mdls_size
222 print*,
'mpi_comm_size unsuccessful'
226 if(mod(temp_mdls_size,mdl_num_proc) .ne. 0)
then
227 print*,
'MINIMAL MODEL LAUNCH ERROR.'
228 print*,
'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,
' copies of the &
234 nda = world_size-temp_mdls_size
236 print*,
'MINIMAL MODEL COMMS v2 ERROR: nda is less than 1.'
237 print*,
'Make sure you launch with a DA CODE'
243 nens = temp_mdls_size/mdl_num_proc
244 call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
245 if(mpi_err .eq. 0)
then
246 if(msg) print*,
'mpi_comm_rank successful'
247 if(msg) print*,
'temp_mdls_rank = ',temp_mdls_rank
249 print*,
'mpi_comm_rank unsuccessful'
253 particle_rank = temp_mdls_rank/mdl_num_proc
255 call mpi_comm_split(temp_mdls_comm,particle_rank,temp_mdls_rank&
256 &,mdl_mpi_comm,mpi_err)
257 if(mpi_err .eq. 0)
then
258 if(msg) print*,
'mpi_comm_split successful: mdl_mpi_comm created'
260 print*,
'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
265 call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
266 if(mpi_err .eq. 0)
then
267 if(msg) print*,
'mpi_comm_rank successful'
268 if(msg) print*,
'mdl_rank = ',mdl_rank
270 print*,
'mpi_comm_rank unsuccessful'
274 cpl_root = nda*particle_rank/nens
275 if(msg) print*,
'cpl_root = ',cpl_root
277 if(cpl_root .lt. 0)
then
278 print*,
'MINIMAL MODEL COMMS v2 ERROR: cpl_root is less than 0.'
279 print*,
'Make sure you launch with a DA CODE'
283 call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank,temp_cpl_comm,mpi_err)
284 if(mpi_err .eq. 0)
then
285 if(msg) print*,
'mpi_comm_split successful: temp_cpl_comm created'
287 print*,
'mpi_comm_split unsuccessful: temp_cpl_comm not created'
295 first_ptcl = ceiling(
real(cpl_root)*
real(nens)/
real(nda))
296 final_ptcl = ceiling(
real(cpl_root+1)*
real(nens)/
real(nda))-1
299 if(msg) print*,
'range of particles = ',first_ptcl,final_ptcl
303 do i = first_ptcl,final_ptcl
304 if(msg) print*,
'i = ',i,
' particle_rank = ',particle_rank
305 if(i .eq. particle_rank)
then
306 call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
307 &,cpl_mpi_comm,mpi_err)
308 if(msg) print*,
'created cpl_mpi_comm'
310 if(msg) print*,
'doing null splitting'
311 call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
312 &,null_mpi_comm,mpi_err)
313 if(msg) print*,
'created mpi_comm_null'
314 call mpi_comm_free(null_mpi_comm,mpi_err)
315 if(msg) print*,
'freed up null_mpi_comm'
321 cpl_root = mdl_num_proc
331 integer,
intent(in) :: N
332 integer,
intent(in) :: cpl_root
333 integer,
intent(in) :: cpl_mpi_comm
335 logical :: msg = .true.
337 if(msg) print*,
'called empire_process_dimensions'
338 call mpi_gather(n,1,mpi_integer,n&
339 &,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
340 if(msg) print*,
'finished the gather on cpl_mpi_comm for empire_process_dimensions'
subroutine empire_process_dimensions(N, cpl_root, cpl_mpi_comm)
real(kind=kind(1.0d0)) function, dimension(n, 3) g(X, N, F, alpha, delta, epsilon, gamma)
subroutine initialise_mpi_v2(mdl_rank, cpl_root, cpl_mpi_comm)