33 real(kind=kind(1.0D0)) :: dt=2.5d-3
34 real(kind=kind(1.0D0)),
allocatable,
dimension(:,:) :: x,k1,k2,k3,k4
35 real(kind=kind(1.0d0)) :: F=8.0d0
36 real(kind=kind(1.0d0)) :: alpha=0.5d0
37 real(kind=kind(1.0d0)) :: delta=0.5d0
38 real(kind=kind(1.0d0)) :: epsilon=2.5d-3
39 real(kind=kind(1.0d0)) :: gamma=0.1d0
41 integer :: total_timesteps=1000
43 integer :: mpi_err,mdl_rank,cpl_root,cpl_mpi_comm
44 real(kind=kind(1.0d0)),
dimension(0) :: send_null
59 inquire(file=
'l96.nml', exist=l96_exists)
61 open(32,file=
'l96.nml',iostat=ios,action=
'read'&
63 if(ios .ne. 0) stop
'Cannot open l96.nml'
73 allocate(x(n,3),k1(n,3),k2(n,3),k3(n,3),k4(n,3))
84 call mpi_gatherv(x(:,1),n,mpi_double_precision,x(:,1)&
85 &,n,n,mpi_double_precision,cpl_root&
86 &,cpl_mpi_comm,mpi_err)
90 call mpi_scatterv(send_null,0,0,mpi_double_precision,x(:,1)&
91 &,n,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
93 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
97 do t = 1,total_timesteps
98 k1 =
g(x , n , f ,alpha,delta,epsilon,gamma)
99 k2 =
g(x +0.5d0 * dt * k1 , n , f ,alpha,delta,epsilon,gamma)
100 k3 =
g(x +0.5d0 * dt * k2 , n , f ,alpha,delta,epsilon,gamma)
101 k4 =
g(x + dt * k3 , n , f ,alpha,delta,epsilon,gamma)
102 x = x + dt *( k1 + 2.0d0 *( k2 + k3 ) + k4 )/6.0d0
106 call mpi_gatherv(x(:,1),n,mpi_double_precision,x(:,1)&
107 &,n,n,mpi_double_precision,cpl_root&
108 &,cpl_mpi_comm,mpi_err)
111 call mpi_scatterv(send_null,0,0,mpi_double_precision,x(:,1)&
112 &,n,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
114 call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
130 call mpi_finalize(mpi_err)
133 function g (X , N, F ,alpha,delta,epsilon,gamma)
135 real(kind=kind(1.0D0)),
intent(in),
dimension(N,3) :: X
136 integer,
intent(in) :: N
137 real(kind=kind(1.0D0)),
dimension(N,3) :: g
138 real(kind=kind(1.0D0)),
intent(in) :: F
139 real(kind=kind(1.0D0)),
intent(in) :: alpha
140 real(kind=kind(1.0D0)),
intent(in) :: delta
141 real(kind=kind(1.0D0)),
intent(in) :: epsilon
142 real(kind=kind(1.0D0)),
intent(in) :: gamma
145 g(1,1) = (1.0d0-delta)*(x(2,1)-x(n-1,1) )*x(n,1) + delta*(x(n,1)*x(2,2)-x(n-1,1)*x(n,2)) - x(1,1) + f
147 g(1,3) = (-x(1,2) + alpha**2*(x(2,2)-2*x(1,2)+x(n,2)) + x(1,1) - gamma*epsilon**2*x(1,3))/(epsilon**2)
149 g(2,1) = (1.0d0-delta)*(x(3,1)-x(n,1) )*x(1,1) + delta*(x(1,1)*x(3,2)-x(n,1)*x(1,2)) - x(2,1) + f
151 g(2,3) = (-x(2,2) + alpha**2*(x(3,2)-2*x(2,2)+x(1,2)) + x(2,1) - gamma*epsilon**2*x(2,3))/(epsilon**2)
155 g(j,1) = (1.0d0-delta)*(x(j+1,1)-x(j-2,1))*x(j-1,1) + delta*(x(j-1,1)*x(j+1,2)-x(j-2,1)*x(j-1,2)) - x(j,1) + f
157 g(j,3) = (-x(j,2) + alpha**2*(x(j+1,2)-2*x(j,2)+x(j-1,2)) + x(j,1) - gamma*epsilon**2*x(j,3))/(epsilon**2)
160 g(n,1) = (1.0d0-delta)*(x(1,1)-x(n-2,1) )*x(n-1,1) + delta*(x(n-1,1)*x(1,2)-x(n-2,1)*x(n-1,2)) - x(n,1) + f
162 g(n,3) = (-x(n,2) + alpha**2*(x(1,2)-2*x(n,2)+x(n-1,2)) + x(n,1) - gamma*epsilon**2*x(n,3))/(epsilon**2)
173 integer,
intent(out) :: mdl_rank
174 integer,
intent(out) :: cpl_root
175 integer,
intent(out) :: cpl_mpi_comm
177 integer,
parameter :: mdl_num_proc=1
178 integer :: mdl_mpi_comm
181 integer :: world_rank
182 integer :: world_size
184 integer :: temp_mdls_size
185 integer :: temp_cpl_comm
186 integer :: temp_mdls_comm
187 integer :: temp_mdls_rank
190 integer :: particle_rank
193 integer :: first_ptcl
194 integer :: final_ptcl
195 integer :: null_mpi_comm
197 logical :: msg=.true.
200 call mpi_init(mpi_err)
201 if(mpi_err .eq. 0)
then
202 if(msg) print*,
'mpi_init successful'
204 print*,
'mpi_init unsuccessful'
208 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
209 if(mpi_err .eq. 0)
then
210 if(msg) print*,
'mpi_comm_rank successful'
211 if(msg) print*,
'world_rank = ',world_rank
213 print*,
'mpi_comm_rank unsuccessful'
217 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
218 if(mpi_err .eq. 0)
then
219 if(msg) print*,
'mpi_comm_size successful'
220 if(msg) print*,
'world_size = ',world_size
222 print*,
'mpi_comm_size unsuccessful'
226 cpl_root = world_size-1
228 print*,
'rank = ',world_rank,
' on mpi_comm_world which has &
235 call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
236 &,mpi_comm_world,mpi_err)
237 if(mpi_err .eq. 0)
then
238 if(msg) print*,
'mpi_allreduce successful'
239 if(msg) print*,
'i = ',i
241 print*,
'mpi_allreduce unsuccessful'
246 call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
248 if(mpi_err .eq. 0)
then
249 if(msg) print*,
'mpi_comm_split successful: temp_mdls_comm created'
251 print*,
'mpi_comm_split unsuccessful: temp_mdls_comm not created'
254 call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
255 if(mpi_err .eq. 0)
then
256 if(msg) print*,
'mpi_comm_size successful'
257 if(msg) print*,
'temp_mdls_size = ',temp_mdls_size
259 print*,
'mpi_comm_size unsuccessful'
263 if(mod(temp_mdls_size,mdl_num_proc) .ne. 0)
then
264 print*,
'MINIMAL MODEL LAUNCH ERROR.'
265 print*,
'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,
' copies of the &
271 nda = world_size-temp_mdls_size
273 print*,
'MINIMAL MODEL COMMS v2 ERROR: nda is less than 1.'
274 print*,
'Make sure you launch with a DA CODE'
280 nens = temp_mdls_size/mdl_num_proc
281 call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
282 if(mpi_err .eq. 0)
then
283 if(msg) print*,
'mpi_comm_rank successful'
284 if(msg) print*,
'temp_mdls_rank = ',temp_mdls_rank
286 print*,
'mpi_comm_rank unsuccessful'
290 particle_rank = temp_mdls_rank/mdl_num_proc
292 call mpi_comm_split(temp_mdls_comm,particle_rank,temp_mdls_rank&
293 &,mdl_mpi_comm,mpi_err)
294 if(mpi_err .eq. 0)
then
295 if(msg) print*,
'mpi_comm_split successful: mdl_mpi_comm created'
297 print*,
'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
302 call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
303 if(mpi_err .eq. 0)
then
304 if(msg) print*,
'mpi_comm_rank successful'
305 if(msg) print*,
'mdl_rank = ',mdl_rank
307 print*,
'mpi_comm_rank unsuccessful'
311 cpl_root = nda*particle_rank/nens
312 if(msg) print*,
'cpl_root = ',cpl_root
314 if(cpl_root .lt. 0)
then
315 print*,
'MINIMAL MODEL COMMS v2 ERROR: cpl_root is less than 0.'
316 print*,
'Make sure you launch with a DA CODE'
320 call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank,temp_cpl_comm,mpi_err)
321 if(mpi_err .eq. 0)
then
322 if(msg) print*,
'mpi_comm_split successful: temp_cpl_comm created'
324 print*,
'mpi_comm_split unsuccessful: temp_cpl_comm not created'
332 first_ptcl = ceiling(
real(cpl_root)*
real(nens)/
real(nda))
333 final_ptcl = ceiling(
real(cpl_root+1)*
real(nens)/
real(nda))-1
336 if(msg) print*,
'range of particles = ',first_ptcl,final_ptcl
340 do i = first_ptcl,final_ptcl
341 if(msg) print*,
'i = ',i,
' particle_rank = ',particle_rank
342 if(i .eq. particle_rank)
then
343 call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
344 &,cpl_mpi_comm,mpi_err)
345 if(msg) print*,
'created cpl_mpi_comm'
347 if(msg) print*,
'doing null splitting'
348 call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
349 &,null_mpi_comm,mpi_err)
350 if(msg) print*,
'created mpi_comm_null'
351 call mpi_comm_free(null_mpi_comm,mpi_err)
352 if(msg) print*,
'freed up null_mpi_comm'
358 cpl_root = mdl_num_proc
368 integer,
intent(in) :: N
369 integer,
intent(in) :: cpl_root
370 integer,
intent(in) :: cpl_mpi_comm
372 logical :: msg = .true.
374 if(msg) print*,
'called empire_process_dimensions'
375 call mpi_gather(n,1,mpi_integer,n&
376 &,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
377 if(msg) print*,
'finished the gather on cpl_mpi_comm for empire_process_dimensions'
subroutine empire_process_dimensions(N, cpl_root, cpl_mpi_comm)
program lorenz96_hidden_v2
real(kind=kind(1.0d0)) function, dimension(n, 3) g(X, N, F, alpha, delta, epsilon, gamma)
subroutine initialise_mpi_v2(mdl_rank, cpl_root, cpl_mpi_comm)