61 integer :: cpl_mpi_comm
66 integer :: pf_mpi_comm
69 integer,
allocatable,
dimension(:) :: gblcount
71 integer,
allocatable,
dimension(:) :: gbldisp
77 integer,
allocatable,
dimension(:) :: particles
79 integer,
allocatable,
dimension(:) :: cpl_mpi_comms
81 integer,
allocatable,
dimension(:) :: state_dims
83 integer,
allocatable,
dimension(:) :: state_displacements
86 integer,
allocatable,
dimension(:) :: obs_dims
88 integer,
allocatable,
dimension(:) :: obs_displacements
91 integer :: mdl_num_proc
93 integer :: pf_member_comm
96 integer :: pf_ens_comm
99 integer :: pf_ens_rank
100 integer :: pf_ens_size
101 integer :: pf_member_rank
103 integer :: pf_member_size
122 select case(comm_version)
136 write(emp_e,*)
'ERROR: comm_version ',comm_version,
' not implemente&
138 write(emp_e,*)
'STOPPING.'
153 integer :: couple_colour
158 integer :: world_size
160 if(comm_version .eq. 2)
then
167 call mpi_init(mpi_err)
170 call mpi_comm_rank(mpi_comm_world,world_rank, mpi_err)
171 call mpi_comm_size(mpi_comm_world,world_size, mpi_err)
172 call mpi_comm_split(mpi_comm_world,da, world_rank, pf_mpi_comm, mpi_err)
173 call mpi_comm_rank(pf_mpi_comm, pfrank, mpi_err)
174 call mpi_comm_size(pf_mpi_comm, npfs, mpi_err)
175 call mpi_comm_split(mpi_comm_world,couple_colour,world_size,cpl_mpi_comm,mpi_err)
176 call mpi_comm_rank(cpl_mpi_comm, myrank, mpi_err)
177 call mpi_comm_size(cpl_mpi_comm, nens, mpi_err)
181 print*,
'nens = ',nens
182 print*,
'npfs = ',npfs
187 count = ceiling(
real((myrank-nens+1)*nens)/
real(npfs)) -&
188 & ceiling(real((myrank-nens)*nens)/real(npfs))
190 allocate(pf%particles(count))
191 allocate( particles(count))
193 pf%particles = (/ (i, i = ceiling(
real((myrank-nens)*nens)&
195 ceiling(
real((myrank-nens+1)*nens)/
real(npfs))-1) /)+1
196 particles = pf%particles
198 allocate(gblcount(npfs))
199 allocate(gbldisp(npfs))
202 call mpi_allgather(count,1,mpi_integer,gblcount,1,mpi_integer&
203 &,pf_mpi_comm,mpi_err)
209 gbldisp(i) = gbldisp(i-1) + gblcount(i-1)
216 print*,
'PF_rank = ',pfrank,
' and I own particles ',pf%particles
218 pf_ens_comm=pf_mpi_comm
234 integer :: world_size
235 integer,
parameter :: da=1
236 integer,
parameter :: rk = kind(1.0d0)
239 integer :: first_ptcl
240 integer :: final_ptcl
241 integer :: tmp_cpl_comm
244 call mpi_init(mpi_err)
245 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
246 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
247 print*,
'EMPIRE: rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
251 call mpi_allreduce(0,mdl_num_proc,1,mpi_integer,mpi_max&
252 &,mpi_comm_world,mpi_err)
254 if(mdl_num_proc .lt. 1)
then
255 write(emp_e,*)
'EMPIRE COMMS v2 ERROR: mdl_num_proc < 1'
256 write(emp_e,*)
'mdl_num_proc = ',mdl_num_proc
257 write(emp_e,*)
'THIS SUGGESTS YOU HAVE NOT LINKED TO A MODEL. STOP.'
260 print*,
'mdl_num_proc = ',mdl_num_proc
264 call mpi_comm_split(mpi_comm_world,da,world_rank,pf_mpi_comm,mpi_err)
265 call mpi_comm_size(pf_mpi_comm,npfs,mpi_err)
266 call mpi_comm_rank(pf_mpi_comm,pfrank,mpi_err)
269 mdl_procs = world_size-npfs
270 print*,
'npfs = ',npfs
272 print*,
'mdl_procs = ',mdl_procs
276 nens = mdl_procs/mdl_num_proc
277 print*,
'nens = ',nens
281 first_ptcl = ceiling(
real(pfrank)*
real(nens)/
real(npfs))
282 final_ptcl = ceiling(
real(pfrank+1)*
real(nens)/
real(npfs))-1
283 particles = (/ (i, i = first_ptcl,final_ptcl) /)
284 print*,
'range of particles = ',first_ptcl,final_ptcl
288 call mpi_comm_split(mpi_comm_world,pfrank,world_size+pfrank&
289 &,tmp_cpl_comm,mpi_err)
290 print*,
'split and created tmp_cpl_comm'
294 cnt = final_ptcl-first_ptcl+1
296 write(emp_e,*)
'EMPIRE ERROR: YOU HAVE LAUNCHED MORE EMPIRE DA PROCESSES'
297 write(emp_e,*)
'EMPIRE ERROR: THAN MODELS. I AM REDUDANT AND STOPPING.'
298 write(emp_e,*)
'EMPIRE ERROR: RECONSIDER HOW YOU EXECUTE NEXT TIME. xx'
304 allocate(cpl_mpi_comms(cnt))
310 call mpi_comm_split(tmp_cpl_comm,1,world_size,cpl_mpi_comms(i)&
312 write(*,
'(A,i3.3,A)')
'created cpl_mpi_comms(',i,
')'
317 cpl_rank = mdl_num_proc
321 call mpi_comm_free(tmp_cpl_comm,mpi_err)
325 allocate(state_dims(mdl_num_proc+1))
326 allocate(state_displacements(mdl_num_proc+1))
334 print*,
'doing a gather on cpl_mpi_comm'
338 call mpi_gather(state_dim,1,mpi_integer,state_dims&
339 &,1,mpi_integer,cpl_rank,cpl_mpi_comms(i),mpi_err)
341 print*,
'state_dims = ',state_dims
345 state_displacements = 0
346 do i = 2,mdl_num_proc
347 state_displacements(i:) = state_displacements(i:) + state_dims(i-1)
349 print*,
'state_displacements = ',state_displacements
353 state_dim = sum(state_dims)
354 print*,
'total state_dim = ',state_dim
359 allocate(gblcount(npfs))
360 allocate(gbldisp(npfs))
363 call mpi_allgather(cnt,1,mpi_integer,gblcount,1,mpi_integer&
364 &,pf_mpi_comm,mpi_err)
365 if(mpi_err .eq. 0)
then
366 print*,
'mpi_allgather successful: gblcount known on all da proc&
368 print*,
'gblcount = ',gblcount
370 print*,
'mpi_allgather unsucessful'
377 gbldisp(i) = gbldisp(i-1) + gblcount(i-1)
385 pf_ens_comm=pf_mpi_comm
392 pf%particles = particles+1
405 integer :: world_size
406 integer,
parameter :: da=1
407 integer,
parameter :: rk = kind(1.0d0)
410 integer :: first_ptcl
411 integer :: final_ptcl
412 integer :: tmp_cpl_comm
413 integer :: tmp_cpl_comm2
414 integer :: tmp_cpl_rank
415 integer :: tmp_cpl_colour2
416 integer :: pf_member_colour
417 integer :: pf_ens_colour
418 integer :: status(mpi_status_size)
421 call mpi_init(mpi_err)
422 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
423 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
424 print*,
'EMPIRE: rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
428 call mpi_allreduce(0,mdl_num_proc,1,mpi_integer,mpi_max&
429 &,mpi_comm_world,mpi_err)
431 if(mdl_num_proc .lt. 1)
then
432 write(emp_e,*)
'EMPIRE COMMS v3 ERROR: mdl_num_proc < 1'
433 write(emp_e,*)
'mdl_num_proc = ',mdl_num_proc
434 write(emp_e,*)
'THIS SUGGESTS YOU HAVE NOT LINKED TO A MODEL. STOP.'
437 print*,
'mdl_num_proc = ',mdl_num_proc
445 call mpi_comm_split(mpi_comm_world,da,world_rank,pf_mpi_comm,mpi_err)
446 call mpi_comm_size(pf_mpi_comm,npfs,mpi_err)
447 call mpi_comm_rank(pf_mpi_comm,pfrank,mpi_err)
455 pf_member_colour= pfrank/mdl_num_proc
456 call mpi_comm_split(pf_mpi_comm,pf_member_colour,pfrank&
457 &,pf_member_comm,mpi_err)
458 call mpi_comm_rank(pf_member_comm,pf_member_rank,mpi_err)
462 pf_ens_colour = mod(pfrank,mdl_num_proc)
463 call mpi_comm_split(pf_mpi_comm,pf_ens_colour,pfrank,pf_ens_comm&
465 call mpi_comm_size(pf_ens_comm,pf_ens_size,mpi_err)
466 call mpi_comm_rank(pf_ens_comm,pf_ens_rank,mpi_err)
472 mdl_procs = world_size-npfs
475 print*,
'npfs = ',npfs
476 print*,
'mdl_procs = ',mdl_procs
480 nens = mdl_procs/mdl_num_proc
481 print*,
'nens = ',nens
485 first_ptcl = ceiling(
real(pf_member_colour)*
real(nens)/
real(pf_ens_size))
486 final_ptcl = ceiling(
real(pf_member_colour+1)*
real(nens)/
real(pf_ens_size))-1
487 particles = (/ (i, i = first_ptcl,final_ptcl) /)
488 print*,
'range of particles = ',first_ptcl,final_ptcl
492 cnt = final_ptcl-first_ptcl+1
494 write(emp_e,*)
'EMPIRE ERROR: YOU HAVE LAUNCHED MORE EMPIRE DA PROCESSES'
495 write(emp_e,*)
'EMPIRE ERROR: THAN MODELS. I AM REDUDANT AND STOPPING.'
496 write(emp_e,*)
'EMPIRE ERROR: RECONSIDER HOW YOU EXECUTE NEXT TIME. xx'
505 call mpi_comm_split(mpi_comm_world,pf_member_colour,world_size+pf_member_colour&
506 &,tmp_cpl_comm,mpi_err)
507 print*,
'split and created tmp_cpl_comm'
514 call mpi_comm_rank(tmp_cpl_comm,tmp_cpl_rank,mpi_err)
515 tmp_cpl_colour2 = mod(tmp_cpl_rank,mdl_num_proc)
518 call mpi_comm_split(tmp_cpl_comm,tmp_cpl_colour2,tmp_cpl_rank&
519 &,tmp_cpl_comm2,mpi_err)
526 allocate(cpl_mpi_comms(cnt))
532 call mpi_comm_split(tmp_cpl_comm2,1,world_size,cpl_mpi_comms(i)&
534 write(*,
'(A,i3.3,A,i0)')
'created cpl_mpi_comms(',i,
') ',cpl_mpi_comms(i)
546 call mpi_comm_free(tmp_cpl_comm ,mpi_err)
547 call mpi_comm_free(tmp_cpl_comm2,mpi_err)
555 call mpi_recv(state_dim,1,mpi_integer,0,mpi_any_tag&
556 &,cpl_mpi_comms(i),status,mpi_err)
558 print*,
'state_dim = ',state_dim
565 call mpi_allreduce(state_dim,state_dim_g,1,mpi_integer,mpi_sum&
566 &,pf_member_comm,mpi_err)
567 print*,
'total state vector size = ',state_dim_g
572 allocate(gblcount(pf_ens_size))
573 allocate(gbldisp(pf_ens_size))
576 call mpi_allgather(cnt,1,mpi_integer,gblcount,1,mpi_integer&
577 &,pf_ens_comm,mpi_err)
578 if(mpi_err .eq. 0)
then
579 print*,
'mpi_allgather successful: gblcount known on all da proc&
581 print*,
'gblcount = ',gblcount
583 print*,
'mpi_allgather unsucessful'
590 gbldisp(i) = gbldisp(i-1) + gblcount(i-1)
599 pf%particles = particles+1
615 integer :: world_size
616 integer,
parameter :: rk = kind(1.0d0)
618 integer :: first_ptcl
619 integer :: final_ptcl
621 logical :: file_exists
622 namelist/comms_v4/nens
625 call mpi_init(mpi_err)
626 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
627 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
628 print*,
'EMPIRE: rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
637 inquire(file=
'pf_parameters.dat',exist=file_exists)
639 open(unit_nml,file=
'pf_parameters.dat',iostat=ios,action=
'read'&
642 write(emp_e,*)
'Cannot open pf_parameters.dat'
646 inquire(file=
'empire.nml',exist=file_exists)
648 open(unit_nml,file=
'empire.nml',iostat=ios,action=
'read'&
651 write(emp_e,*)
'Cannot open empire.nml'
655 write(emp_e,*)
'ERROR: cannot find pf_parameters.dat or empire.nml'
662 read(unit_nml,nml=comms_v4)
665 if( nens .lt. 1 )
then
666 write(emp_e,*)
'EMPIRE ERROR: __________initialise_mpi_v4_____________'
667 write(emp_e,*)
'EMPIRE ERROR: nens is less than 1... nens = ',nens
668 write(emp_e,*)
'EMPIRE ERROR: please correctly specify this in empire.n&
673 if (npfs .gt. nens)
then
674 write(emp_e,*)
'EMPIRE ERROR: __________initialise_mpi_v4_____________'
675 write(emp_e,*)
'EMPIRE ERROR: npfs is great than nens...'
676 write(emp_e,*)
'EMPIRE ERROR: npfs = ',npfs,
' nens = ',nens
681 first_ptcl = ceiling(
real(pfrank)*
real(nens)/
real(npfs))
682 final_ptcl = ceiling(
real(pfrank+1)*
real(nens)/
real(npfs))-1
683 particles = (/ (i, i = first_ptcl,final_ptcl) /)
684 print*,
'range of particles = ',first_ptcl,final_ptcl
687 pf_mpi_comm = mpi_comm_world
688 pf_ens_comm = mpi_comm_world
696 cnt = final_ptcl-first_ptcl+1
700 allocate(gblcount(npfs))
701 allocate(gbldisp(npfs))
704 gblcount(i) = ceiling(
real(i)*
real(nens)/
real(npfs)) &
705 &- ceiling(real(i-1)*real(nens)/real(npfs))
711 gbldisp(i) = gbldisp(i-1) + gblcount(i-1)
715 pf%particles = particles+1
731 integer :: world_size
732 integer,
parameter :: da=1
733 integer,
parameter :: rk = kind(1.0d0)
736 integer :: first_ptcl
737 integer :: final_ptcl
738 integer :: tmp_cpl_comm
739 integer :: n_mdl_instances
740 integer :: nens_per_instance
742 call mpi_init(mpi_err)
743 call mpi_comm_rank(mpi_comm_world,world_rank,mpi_err)
744 call mpi_comm_size(mpi_comm_world,world_size,mpi_err)
745 print*,
'EMPIRE: rank = ',world_rank,
' on mpi_comm_world which has size ',world_size
749 call mpi_allreduce(0,mdl_num_proc,1,mpi_integer,mpi_max&
750 &,mpi_comm_world,mpi_err)
752 if(mdl_num_proc .lt. 1)
then
753 write(emp_e,*)
'EMPIRE COMMS v5 ERROR: mdl_num_proc < 1'
754 write(emp_e,*)
'mdl_num_proc = ',mdl_num_proc
755 write(emp_e,*)
'THIS SUGGESTS YOU HAVE NOT LINKED TO A MODEL. STOP.'
758 print*,
'mdl_num_proc = ',mdl_num_proc
762 call mpi_allreduce(0,nens_per_instance,1,mpi_integer,mpi_max&
763 &,mpi_comm_world,mpi_err)
764 if(nens_per_instance .lt. 1)
then
765 write(emp_e,*)
'EMPIRE COMMS v5 ERROR: nens_per_instance < 1'
766 write(emp_e,*)
'nens_per_instance = ',nens_per_instance
767 write(emp_e,*)
'THIS SUGGESTS YOU HAVE NOT LINKED TO A MODEL. STOP.'
770 print*,
'nens_per_instance = ',nens_per_instance
776 call mpi_comm_split(mpi_comm_world,da,world_rank,pf_mpi_comm,mpi_err)
777 call mpi_comm_size(pf_mpi_comm,npfs,mpi_err)
778 call mpi_comm_rank(pf_mpi_comm,pfrank,mpi_err)
781 mdl_procs = world_size-npfs
782 print*,
'npfs = ',npfs
784 print*,
'mdl_procs = ',mdl_procs
788 n_mdl_instances = mdl_procs/mdl_num_proc
789 print*,
'n_mdl_instances = ',n_mdl_instances
791 nens = n_mdl_instances*nens_per_instance
792 print*,
'nens = ',nens
796 first_ptcl = ceiling(
real(pfrank)*
real(nens)/
real(npfs))
797 final_ptcl = ceiling(
real(pfrank+1)*
real(nens)/
real(npfs))-1
798 particles = (/ (i, i = first_ptcl,final_ptcl) /)
799 print*,
'range of particles = ',first_ptcl,final_ptcl
803 cnt = final_ptcl-first_ptcl+1
805 write(emp_e,*)
'EMPIRE ERROR: YOU HAVE LAUNCHED MORE EMPIRE DA PROCESSES'
806 write(emp_e,*)
'EMPIRE ERROR: THAN MODELS. I AM REDUDANT AND STOPPING.'
807 write(emp_e,*)
'EMPIRE ERROR: RECONSIDER HOW YOU EXECUTE NEXT TIME. xx'
813 allocate(cpl_mpi_comms(cnt))
815 print*,
'you have got to this point:',mod(n_mdl_instances,npfs)
817 if(mod(n_mdl_instances,npfs) .ne. 0 )
then
823 print*,
'EMPIRE V5: SEQUENTIAL SPLITTING ON MPI_COMM_WORLD...'
824 print*,
'nens = ',nens
825 print*,
'first_ptcl = ',first_ptcl
826 print*,
'final_ptcl = ',final_ptcl
828 if( i .ge. first_ptcl .and. i .le. final_ptcl)
then
831 call mpi_comm_split(mpi_comm_world,1,world_size,cpl_mpi_comms(j)&
833 write(*,
'(A,i3.3,A)')
'created cpl_mpi_comms(',j,
')'
836 call mpi_comm_split(mpi_comm_world,0,world_size,tmp_cpl_comm,mpi_err)
837 call mpi_comm_free(tmp_cpl_comm,mpi_err)
846 print*,
'doing first split based on pfrank',pfrank
847 call mpi_comm_split(mpi_comm_world,pfrank,world_size,tmp_cpl_comm,mpi_err)
848 print*,
'finished first split mpi_err = ',mpi_err
852 call mpi_comm_split(tmp_cpl_comm,1,world_size,cpl_mpi_comms(i)&
854 write(*,
'(A,i3.3,A)')
'created cpl_mpi_comms(',i,
')'
856 call mpi_comm_free(tmp_cpl_comm,mpi_err)
859 print*,
'EMPIRE: all commiunicators made'
862 cpl_rank = mdl_num_proc
865 allocate(state_dims(mdl_num_proc+1))
866 allocate(state_displacements(mdl_num_proc+1))
874 print*,
'doing a gather on cpl_mpi_comm'
879 print*,
'cpl_mpi_comms(i) = ',cpl_mpi_comms(i),cpl_rank
880 call mpi_gather(state_dim,1,mpi_integer,state_dims&
881 &,1,mpi_integer,cpl_rank,cpl_mpi_comms(i),mpi_err)
883 print*,
'state_dims = ',state_dims
887 state_displacements = 0
888 do i = 2,mdl_num_proc
889 state_displacements(i:) = state_displacements(i:) + state_dims(i-1)
891 print*,
'state_displacements = ',state_displacements
895 state_dim = sum(state_dims)
896 print*,
'total state_dim = ',state_dim
901 allocate(gblcount(npfs))
902 allocate(gbldisp(npfs))
905 call mpi_allgather(cnt,1,mpi_integer,gblcount,1,mpi_integer&
906 &,pf_mpi_comm,mpi_err)
907 if(mpi_err .eq. 0)
then
908 print*,
'mpi_allgather successful: gblcount known on all da proc&
910 print*,
'gblcount = ',gblcount
912 print*,
'mpi_allgather unsucessful'
919 gbldisp(i) = gbldisp(i-1) + gblcount(i-1)
923 pf_ens_comm = pf_mpi_comm
930 pf%particles = particles+1
948 integer,
intent(in) :: stateDim
949 integer,
intent(in) :: nrhs
950 real(kind=kind(1.0d0)),
intent(in),
dimension(stateDim,nrhs) :: x
951 integer,
intent(in) :: tag
955 real(kind=kind(1.0d0)),
dimension(0) :: send_null
956 select case(comm_version)
961 particle = particles(k)
962 call mpi_send(x(:,k),statedim,mpi_double_precision&
963 &,particle-1,tag,cpl_mpi_comm,mpi_err)
967 call mpi_scatterv(x(:,k),state_dims,state_displacements&
968 &,mpi_double_precision,send_null,0,mpi_double_precision&
969 &,cpl_rank,cpl_mpi_comms(k),mpi_err)
970 call mpi_bcast(tag,1,mpi_integer,cpl_rank,cpl_mpi_comms(k)&
975 call mpi_send(x(:,k),statedim,mpi_double_precision&
976 &,0,tag,cpl_mpi_comms(k),mpi_err)
980 particle = particles(k)
984 write(emp_e,*)
'EMPIRE ERROR: THIS ISNT BACK TO THE FUTURE. empire_v&
985 &ersion not yet implemented'
997 integer,
intent(in) :: stateDim
998 integer,
intent(in) :: nrhs
999 real(kind=kind(1.0d0)),
intent(out),
dimension(stateDim,nrhs) :: x
1001 integer,
dimension(MPI_STATUS_SIZE) :: mpi_status
1004 real(kind=kind(1.0d0)),
dimension(0) :: send_null
1005 select case(comm_version)
1010 particle = particles(k)
1011 CALL mpi_recv(x(:,k), statedim, mpi_double_precision, &
1012 particle-1, mpi_any_tag, cpl_mpi_comm,mpi_status, mpi_err)
1016 call mpi_gatherv(send_null,0,mpi_double_precision,x(:,k)&
1017 &,state_dims,state_displacements,mpi_double_precision,cpl_rank&
1018 &,cpl_mpi_comms(k),mpi_err)
1022 CALL mpi_recv(x(:,k), statedim, mpi_double_precision, &
1023 0, mpi_any_tag, cpl_mpi_comms(k),mpi_status, mpi_err)
1027 particle = particles(k)
1031 write(emp_e,*)
'EMPIRE ERROR: THIS ISNT BACK TO THE FUTURE PART 2. empire_v&
1032 &ersion not yet implemented'
1048 integer,
intent(in) :: stateDim
1049 integer,
intent(in) :: nrhs
1050 real(kind=kind(1.0d0)),
intent(out),
dimension(stateDim,nrhs) :: x
1051 integer,
dimension(nrhs),
intent(inout) :: requests
1055 real(kind=kind(1.0d0)),
dimension(0) :: send_null
1056 integer,
dimension(MPI_STATUS_SIZE) :: mpi_status
1057 select case(comm_version)
1062 particle = particles(k)
1063 CALL mpi_irecv(x(:,k), statedim, mpi_double_precision, &
1064 particle-1, mpi_any_tag, cpl_mpi_comm,requests(k), mpi_err)
1074 call mpi_gatherv(send_null,0,mpi_double_precision,x(:,k)&
1075 &,state_dims,state_displacements,mpi_double_precision,cpl_rank&
1076 &,cpl_mpi_comms(k),mpi_err)
1079 call mpi_isend(send_null,0,mpi_double_precision,cpl_rank&
1080 &,1,cpl_mpi_comms(k),requests(k),mpi_err)
1081 CALL mpi_recv(send_null,0,mpi_double_precision,cpl_rank&
1082 &,1,cpl_mpi_comms(k),mpi_status, mpi_err)
1087 CALL mpi_irecv(x(:,k), statedim, mpi_double_precision, &
1088 0, mpi_any_tag, cpl_mpi_comms(k),requests(k), mpi_err)
1092 particle = particles(k)
1097 requests(k) = mpi_request_null
1100 write(emp_e,*)
'EMPIRE ERROR: THIS ISNT BACK TO THE FUTURE PART 3. empire_v&
1101 &ersion not yet implemented'
1112 integer :: mpi_err,i
1114 select case(comm_version)
1117 state_dim_g = state_dim
1119 pf_ens_rank = pfrank
1122 if(
allocated(obs_dims))
deallocate(obs_dims)
1123 allocate(obs_dims(pf_member_size))
1124 if(
allocated(obs_displacements))
deallocate(obs_displacements)
1125 allocate(obs_displacements(pf_member_size))
1127 call mpi_allgather(obs_dim,1,mpi_integer,obs_dims,1&
1128 &,mpi_integer,pf_member_comm,mpi_err)
1130 obs_displacements(1) = 0
1131 if(pf_member_size .gt. 1)
then
1132 do i = 2,pf_member_size
1133 obs_displacements(i) = obs_displacements(i-1) + obs_dims(i&
1139 if(.not.
allocated(state_dims))
then
1140 allocate(state_dims(pf_member_size))
1141 allocate(state_displacements(pf_member_size))
1143 call mpi_allgather(state_dim,1,mpi_integer,state_dims,1&
1144 &,mpi_integer,pf_member_comm,mpi_err)
1146 state_displacements(1) = 0
1147 if(pf_member_size .gt. 1)
then
1148 do i = 2,pf_member_size
1149 state_displacements(i) = state_displacements(i-1) +&
1155 print*,
'EMPIRE ERROR: COMM VERSION IN VERIFY SIZES NOT IMPLEMENTED'
subroutine user_mpi_recv(stateDim, nrhs, x)
subroutine user_mpi_send(stateDim, nrhs, x, tag)
subroutine send_all_models(stateDim, nrhs, x, tag)
subroutine to send all the model states to the models
Module containing EMPIRE coupling data.
Module that stores the information about the outputting from empire.
subroutine deallocate_data
Module that stores the information about the timestepping process.
subroutine recv_all_models(stateDim, nrhs, x)
subroutine to receive all the model states from the models after
subroutine initialise_mpi_v4
subroutine to initialise empire communicators when the model is to be a subroutine itself ...
subroutine initialise_mpi(mdl_id, cpl_root, cpl_mpi_comm)
subroutine initialise_mpi_v1
subroutine to make EMPIRE connections and saves details into pf_control module
subroutine user_initialise_mpi
Subroutine to initialise mpi in a special way if the model is weird like HadCM3 for example...
Module that stores the dimension of observation and state spaces.
subroutine initialise_mpi_v2(mdl_rank, cpl_root, cpl_mpi_comm)
subroutine initialise_mpi_v3
subroutine to initialise even newer version of empire
module to store the parameter comm_version to control the communication pattern that empire will use...
subroutine initialise_mpi_v5
subroutine to initialise empire communication pattern similarly to v2 but with multiple ensemble memb...
subroutine model_as_subroutine_start(x, particle, tag)
subroutine to increment the model when the model is a subroutine of empire. This is comms_v4 routine...
module pf_control holds all the information to control the the main program
subroutine model_as_subroutine_return(x, particle)
subroutine to initialise and return the state from the model
subroutine irecv_all_models(stateDim, nrhs, x, requests)
subroutine to receive all the model states from the models after
subroutine user_mpi_irecv(stateDim, nrhs, x, requests)
subroutine timestep_data_set_no_analysis
subroutine to define if the current ensemble is not an analysis