EMPIRE DA  v1.9.1
Data assimilation codes using EMPIRE communication
 All Classes Files Functions Variables Pages
Lorenz63_empire_v2.f90
Go to the documentation of this file.
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 ! Lorenz63_empire.f90 Implements Lorenz 1963 model with EMPIRE coupling
3 !
4 !The MIT License (MIT)
5 !
6 !Copyright (c) 2014 Philip A. Browne
7 !
8 !Permission is hereby granted, free of charge, to any person obtaining a copy
9 !of this software and associated documentation files (the "Software"), to deal
10 !in the Software without restriction, including without limitation the rights
11 !to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 !copies of the Software, and to permit persons to whom the Software is
13 !furnished to do so, subject to the following conditions:
14 !
15 !The above copyright notice and this permission notice shall be included in all
16 !copies or substantial portions of the Software.
17 !
18 !THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 !IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 !FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 !AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 !LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 !OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 !SOFTWARE.
25 !
26 !Email: p.browne@reading.ac.uk
27 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
28 
29 program lorenz63_v2
30  implicit none
31  include 'mpif.h'
32  real(kind=kind(1.0D0)) :: t,sigma,rho,beta,dt,tstart,tstop
33  real(kind=kind(1.0D0)), dimension(3) :: x,k1,k2,k3,k4
34  integer :: mpi_err,mdl_rank,cpl_root,cpl_mpi_comm
35  integer :: tag
36  real(kind=kind(1.0d0)), dimension(0) :: send_null
37  call initialise_mpi_v2(mdl_rank,cpl_root,cpl_mpi_comm)
38  tstart =0.0d0 ; dt = 0.01d0 ; tstop = real(40*100)*dt
39  sigma = 10.0d0 ; rho = 28.0d0 ; beta = 8.0d0 /3.0d0
40  x = (/ 1.508870d0, -1.531271d0 , 25.46091d0 /)
41  call empire_process_dimensions(3,cpl_root,cpl_mpi_comm)
42 
43 
44  !send the state to da code with mpi_gatherv
45  call mpi_gatherv(x,3,mpi_double_precision,x&
46  &,3,3,mpi_double_precision,cpl_root&
47  &,cpl_mpi_comm,mpi_err)
48 
49 
50  !get the state back from da code with mpi_gatherv
51  call mpi_scatterv(send_null,0,0,mpi_double_precision,x&
52  &,3,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
53  !get the tag from the da code
54  call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
55 2 continue
56  t = tstart
57  do; if ( t .ge. tstop -1.0d-10) exit
58  k1 = f(x , sigma , rho , beta )
59  k2 = f(x +0.5d0 * dt * k1 , sigma , rho , beta )
60  k3 = f(x +0.5d0 * dt * k2 , sigma , rho , beta )
61  k4 = f(x + dt * k3 , sigma , rho , beta )
62  x = x + dt *( k1 + 2.0d0 *( k2 + k3 ) + k4 )/6.0d0
63 
64  !send the state to da code with mpi_gatherv
65  call mpi_gatherv(x,3,mpi_double_precision,x&
66  &,3,3,mpi_double_precision,cpl_root&
67  &,cpl_mpi_comm,mpi_err)
68 
69 
70  !get the state back from da code with mpi_gatherv
71  call mpi_scatterv(send_null,0,0,mpi_double_precision,x&
72  &,3,mpi_double_precision,cpl_root,cpl_mpi_comm,mpi_err)
73  !get the tag from the da code
74  call mpi_bcast(tag,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
75 
76  t = t + dt
77  print*,x(1),x(2),x(3)
78  select case(tag)
79  case(2)
80  go to 2
81  case(3)
82  go to 3
83  case default
84  end select
85 
86  end do
87 3 continue
88  call mpi_finalize(mpi_err)
89 contains
90  function f (x , sigma , rho , beta )
91  implicit none
92  real(kind=kind(1.0D0)),intent(in),dimension (3) :: x
93  real(kind=kind(1.0D0)),dimension(3) :: f
94  real(kind=kind(1.0D0)),intent(in) :: sigma , rho , beta
95  f = (/sigma *(x(2)-x(1)),x(1)*(rho-x(3)) -x(2),x(1)*x(2)-beta*x(3)/)
96  end function f
97 
98  subroutine initialise_mpi_v2(mdl_rank,cpl_root,cpl_mpi_comm)
99 
100 
101  implicit none
102 
103  include 'mpif.h'
104  integer, intent(out) :: mdl_rank
105  integer, intent(out) :: cpl_root
106  integer, intent(out) :: cpl_mpi_comm
107 
108  integer, parameter :: mdl_num_proc=1
109  integer :: mdl_mpi_comm
110 
111  integer :: mpi_err
112  integer :: world_rank
113  integer :: world_size
114 
115  integer :: temp_mdls_size
116  integer :: temp_cpl_comm
117  integer :: temp_mdls_comm
118  integer :: temp_mdls_rank
119  integer :: da
120  integer :: i
121  integer :: particle_rank
122  integer :: nda
123  integer :: nens
124  integer :: first_ptcl
125  integer :: final_ptcl
126  integer :: null_mpi_comm
127 
128  logical :: msg=.true.
129 
130 
131  call mpi_init(mpi_err)
132  if(mpi_err .eq. 0) then
133  if(msg) print*,'mpi_init successful'
134  else
135  print*,'mpi_init unsuccessful'
136  end if
137 
138 
139  call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
140  if(mpi_err .eq. 0) then
141  if(msg) print*,'mpi_comm_rank successful'
142  if(msg) print*,'world_rank = ',world_rank
143  else
144  print*,'mpi_comm_rank unsuccessful'
145  end if
146 
147 
148  call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
149  if(mpi_err .eq. 0) then
150  if(msg) print*,'mpi_comm_size successful'
151  if(msg) print*,'world_size = ',world_size
152  else
153  print*,'mpi_comm_size unsuccessful'
154  end if
155 
156 
157  cpl_root = world_size-1
158  if(msg) then
159  print*,'rank = ',world_rank,' on mpi_comm_world which has &
160  &size ',world_size
161  end if
162 
163  da = 0
164 
165 
166  call mpi_allreduce(mdl_num_proc,i,1,mpi_integer,mpi_max&
167  &,mpi_comm_world,mpi_err)
168  if(mpi_err .eq. 0) then
169  if(msg) print*,'mpi_allreduce successful'
170  if(msg) print*,'i = ',i
171  else
172  print*,'mpi_allreduce unsuccessful'
173  end if
174 
175 
176 
177  call mpi_comm_split(mpi_comm_world,da,world_rank,temp_mdls_comm&
178  &,mpi_err)
179  if(mpi_err .eq. 0) then
180  if(msg) print*,'mpi_comm_split successful: temp_mdls_comm created'
181  else
182  print*,'mpi_comm_split unsuccessful: temp_mdls_comm not created'
183  end if
184 
185  call mpi_comm_size(temp_mdls_comm,temp_mdls_size,mpi_err)
186  if(mpi_err .eq. 0) then
187  if(msg) print*,'mpi_comm_size successful'
188  if(msg) print*,'temp_mdls_size = ',temp_mdls_size
189  else
190  print*,'mpi_comm_size unsuccessful'
191  end if
192 
193 
194  if(mod(temp_mdls_size,mdl_num_proc) .ne. 0) then
195  print*,'MINIMAL MODEL LAUNCH ERROR.'
196  print*,'MUST LAUNCH A MULTIPLE OF ',mdl_num_proc,' copies of the &
197  &model'
198  stop
199  end if
200 
201 
202  nda = world_size-temp_mdls_size
203  if(nda .lt. 1) then
204  print*,'MINIMAL MODEL COMMS v2 ERROR: nda is less than 1.'
205  print*,'Make sure you launch with a DA CODE'
206  stop
207  end if
208 
209 
210 
211  nens = temp_mdls_size/mdl_num_proc
212  call mpi_comm_rank(temp_mdls_comm,temp_mdls_rank,mpi_err)
213  if(mpi_err .eq. 0) then
214  if(msg) print*,'mpi_comm_rank successful'
215  if(msg) print*,'temp_mdls_rank = ',temp_mdls_rank
216  else
217  print*,'mpi_comm_rank unsuccessful'
218  end if
219 
220 
221  particle_rank = temp_mdls_rank/mdl_num_proc
222 
223  call mpi_comm_split(temp_mdls_comm,particle_rank,temp_mdls_rank&
224  &,mdl_mpi_comm,mpi_err)
225  if(mpi_err .eq. 0) then
226  if(msg) print*,'mpi_comm_split successful: mdl_mpi_comm created'
227  else
228  print*,'mpi_comm_split unsuccessful: mdl_mpi_comm not created'
229  end if
230 
231 
232 
233  call mpi_comm_rank(mdl_mpi_comm,mdl_rank,mpi_err)
234  if(mpi_err .eq. 0) then
235  if(msg) print*,'mpi_comm_rank successful'
236  if(msg) print*,'mdl_rank = ',mdl_rank
237  else
238  print*,'mpi_comm_rank unsuccessful'
239  end if
240 
241 
242  cpl_root = nda*particle_rank/nens
243  if(msg) print*,'cpl_root = ',cpl_root
244 
245  if(cpl_root .lt. 0) then
246  print*,'MINIMAL MODEL COMMS v2 ERROR: cpl_root is less than 0.'
247  print*,'Make sure you launch with a DA CODE'
248  stop
249  end if
250 
251  call mpi_comm_split(mpi_comm_world,cpl_root,temp_mdls_rank,temp_cpl_comm,mpi_err)
252  if(mpi_err .eq. 0) then
253  if(msg) print*,'mpi_comm_split successful: temp_cpl_comm created'
254  else
255  print*,'mpi_comm_split unsuccessful: temp_cpl_comm not created'
256  end if
257 
258 
259 
260 
261 
262 
263  first_ptcl = ceiling(real(cpl_root)*real(nens)/real(nda))
264  final_ptcl = ceiling(real(cpl_root+1)*real(nens)/real(nda))-1
265 
266 
267  if(msg) print*,'range of particles = ',first_ptcl,final_ptcl
268 
269 
270 
271  do i = first_ptcl,final_ptcl
272  if(msg) print*,'i = ',i,' particle_rank = ',particle_rank
273  if(i .eq. particle_rank) then
274  call mpi_comm_split(temp_cpl_comm,1,temp_mdls_rank&
275  &,cpl_mpi_comm,mpi_err)
276  if(msg) print*,'created cpl_mpi_comm'
277  else
278  if(msg) print*,'doing null splitting'
279  call mpi_comm_split(temp_cpl_comm,0,temp_mdls_rank&
280  &,null_mpi_comm,mpi_err)
281  if(msg) print*,'created mpi_comm_null'
282  call mpi_comm_free(null_mpi_comm,mpi_err)
283  if(msg) print*,'freed up null_mpi_comm'
284  end if
285 
286 
287  end do
288 
289  cpl_root = mdl_num_proc
290 
291 
292 
293 
294 
295 
296  end subroutine initialise_mpi_v2
297 
298  subroutine empire_process_dimensions(N,cpl_root,cpl_mpi_comm)
299  implicit none
300  include 'mpif.h'
301  integer, intent(in) :: N
302  integer, intent(in) :: cpl_root
303  integer, intent(in) :: cpl_mpi_comm
304  integer :: mpi_err
305  logical :: msg = .true.
306 
307  if(msg) print*,'called empire_process_dimensions'
308  call mpi_gather(n,1,mpi_integer,n&
309  &,1,mpi_integer,cpl_root,cpl_mpi_comm,mpi_err)
310  if(msg) print*,'finished the gather on cpl_mpi_comm for empire_process_dimensions'
311  end subroutine empire_process_dimensions
312 
313 end program lorenz63_v2
subroutine empire_process_dimensions(N, cpl_root, cpl_mpi_comm)
subroutine initialise_mpi_v2(mdl_rank, cpl_root, cpl_mpi_comm)
real(kind=kind(1.0d0)) function, dimension(n) f(n, x)
program lorenz63_v2