Divide blocks among processors. Initial work on boundaries.

BLOCK STRUCTURE: MPI

 - divide initial block structure between all processors and remove
   non-local data blocks

DATA I/O

 - dump the total number of blocks and the number of data blocks

BOUNDARY CONDITIONS

 - initial work on the boundary condition for new structure of blocks
This commit is contained in:
Grzegorz Kowal 2009-09-14 19:15:21 -03:00
parent bf5e332ce1
commit ac188d32e4
4 changed files with 272 additions and 271 deletions

View File

@ -43,7 +43,8 @@ module boundaries
subroutine boundary
use config , only : im, jm, km
use blocks , only : nv => nvars, block, plist, ndims, get_pointer, nblocks
use blocks , only : nv => nvars, ndims, nsides, nfaces &
, block_meta, list_meta, get_pointer, nblocks
use error , only : print_error
use mpitools, only : ncpus, ncpu, msendi, mrecvi, msendf, mrecvf, mallreducemaxl
@ -67,28 +68,28 @@ module boundaries
! local pointers
!
type(block), pointer :: pblock, pneigh
type(block_meta), pointer :: pblock, pneigh
!
!-------------------------------------------------------------------------------
!
#ifdef MPI
! allocate temporary arrays; since we have two blocks per boundary and 4 sides
! of each block we need to increase the second dimension
! #ifdef MPI
! ! allocate temporary arrays; since we have two blocks per boundary and 4 sides
! ! of each block we need to increase the second dimension
! !
! allocate(cn (0:ncpus-1))
! allocate(ll (0:ncpus-1,0:ncpus-1))
! allocate(iblk(0:ncpus-1,2**(NDIMS-1)*NDIMS*nblocks,6))
!
allocate(cn (0:ncpus-1))
allocate(ll (0:ncpus-1,0:ncpus-1))
allocate(iblk(0:ncpus-1,2**(NDIMS-1)*NDIMS*nblocks,6))
! reset the local arrays storing blocks to exchange
!
cn(:) = 0
ll(:,:) = 0
iblk(:,:,:) = 0
#endif /* MPI */
! ! reset the local arrays storing blocks to exchange
! !
! cn(:) = 0
! ll(:,:) = 0
! iblk(:,:,:) = 0
! #endif /* MPI */
! iterate over all blocks and perform boundary update
!
pblock => plist
pblock => list_meta
do while (associated(pblock))
! if the current block is a leaf...
@ -98,70 +99,101 @@ module boundaries
! iterate over all neighbor blocks
!
do i = 1, ndims
do j = 1, 2
do k = 1, 2
do j = 1, nsides
do k = 1, nfaces
if (pblock%neigh(i,j,k)%id .eq. -1) then
! neighbor is not associated, it means that we have non periodic boundary here
! associate pointer to the neighbor
!
if (k .eq. 1) &
call bnd_spec(pblock, i, j, k)
pneigh => pblock%neigh(i,j,k)%ptr
else
#ifdef MPI
! neighbor associated; exchange boundaries
! check if neighbor is associated, if yes exchange boundaries, if not call
! specific boundary conditions
!
if (pblock%neigh(i,j,k)%cpu .eq. ncpu) then
#endif /* MPI */
if (associated(pneigh)) then
! neighbor is on the same CPU, update
! check if the neighbor is on the same cpu
!
pneigh => get_pointer(pblock%neigh(i,j,k)%id)
if (pneigh%cpu .eq. ncpu) then
! calculate the difference of current and neighbor levels
! calculate the difference of current and neighbor level
!
dl = pblock%level - pneigh%level
! depending on the level difference
!
select case(dl)
case(-1) ! restriction and prolongation
call bnd_rest(pblock, pneigh, i, j, k)
case(0) ! the same level, copying
if (k .eq. 1) &
call bnd_copy(pblock, pneigh, i, j, k)
case(1) ! prolongation is handled by bnd_rest
case default
call print_error("boundaries::boundary", "Level difference unsupported!")
end select
! ! depending on the level difference
! !
! select case(dl)
! case(-1) ! restriction and prolongation
! call bnd_rest(pblock, pneigh, i, j, k)
! case(0) ! the same level, copying
! if (k .eq. 1) &
! call bnd_copy(pblock, pneigh, i, j, k)
! case(1) ! prolongation is handled by bnd_rest
! case default
! call print_error("boundaries::boundary", "Level difference unsupported!")
! end select
else
endif
! #ifdef MPI
! ! neighbor associated; exchange boundaries
! !
! if (pblock%neigh(i,j,k)%cpu .eq. ncpu) then
! #endif /* MPI */
!
! ! neighbor is on the same CPU, update
! !
! pneigh => get_pointer(pblock%neigh(i,j,k)%id)
!
! ! calculate the difference of current and neighbor levels
! !
! dl = pblock%level - pneigh%level
!
! ! depending on the level difference
! !
! select case(dl)
! case(-1) ! restriction and prolongation
! call bnd_rest(pblock, pneigh, i, j, k)
! case(0) ! the same level, copying
! if (k .eq. 1) &
! call bnd_copy(pblock, pneigh, i, j, k)
! case(1) ! prolongation is handled by bnd_rest
! case default
! call print_error("boundaries::boundary", "Level difference unsupported!")
! end select
!
! #ifdef MPI
! else
!
! ! in the array 'info' we store IDs of all blocks which have to be updated from
! ! the blocks laying on the other processors
! !
! ! get the processor number of neighbor
! !
! p = pblock%neigh(i,j,k)%cpu
!
! ! increase the number of blocks to retrieve from that CPU
! !
! cn(p) = cn(p) + 1
!
! ! fill out the info array
! !
! iblk(p,cn(p),1) = pblock%id ! 1: local block ID
! iblk(p,cn(p),2) = pblock%level ! 2: local block level
! iblk(p,cn(p),3) = pblock%neigh(i,j,k)%id ! 3: neighbor block ID
! iblk(p,cn(p),4) = i ! 4: directions of boundary
! iblk(p,cn(p),5) = j ! 5: side at the boundary
! iblk(p,cn(p),6) = k ! 6: part of the boundary
!
! endif
! #endif /* MPI */
#ifdef MPI
else
! in the array 'info' we store IDs of all blocks which have to be updated from
! the blocks laying on the other processors
! neighbor is not associated, it means that we have non periodic boundary here
!
! get the processor number of neighbor
!
p = pblock%neigh(i,j,k)%cpu
! increase the number of blocks to retrieve from that CPU
!
cn(p) = cn(p) + 1
! fill out the info array
!
iblk(p,cn(p),1) = pblock%id ! 1: local block ID
iblk(p,cn(p),2) = pblock%level ! 2: local block level
iblk(p,cn(p),3) = pblock%neigh(i,j,k)%id ! 3: neighbor block ID
iblk(p,cn(p),4) = i ! 4: directions of boundary
iblk(p,cn(p),5) = j ! 5: side at the boundary
iblk(p,cn(p),6) = k ! 6: part of the boundary
endif
#endif /* MPI */
! if (k .eq. 1) &
! call bnd_spec(pblock, i, j, k)
endif
end do
@ -176,162 +208,162 @@ module boundaries
end do
#ifdef MPI
! TODO: 1) update info globally, write an MPI subroutine to sum the variable
! 'info' over all processes
! 2) then iterate over all source and destination processes and send/receive
! blocks
! 3) after receiving the block call bnd_copy, bnd_rest, or bnd_prol to update
! the boundary of destination block
! #ifdef MPI
! ! TODO: 1) update info globally, write an MPI subroutine to sum the variable
! ! 'info' over all processes
! ! 2) then iterate over all source and destination processes and send/receive
! ! blocks
! ! 3) after receiving the block call bnd_copy, bnd_rest, or bnd_prol to update
! ! the boundary of destination block
! !
! do i = 0, ncpus-1
! if (cn(i) .gt. 0) then
! allocate(ibuf(cn(i),1))
! l = 1
! ibuf(1,1) = iblk(i,1,1)
! do p = 2, cn(i)
! lf = .true.
! do k = 1, l
! lf = lf .and. (iblk(i,p,1) .ne. ibuf(k,1))
! end do
!
do i = 0, ncpus-1
if (cn(i) .gt. 0) then
allocate(ibuf(cn(i),1))
l = 1
ibuf(1,1) = iblk(i,1,1)
do p = 2, cn(i)
lf = .true.
do k = 1, l
lf = lf .and. (iblk(i,p,1) .ne. ibuf(k,1))
end do
if (lf) then
l = l + 1
ibuf(l,1) = iblk(i,p,1)
endif
end do
ll(ncpu,i) = l
deallocate(ibuf)
endif
end do
! update number of blocks across all processes
! if (lf) then
! l = l + 1
! ibuf(l,1) = iblk(i,p,1)
! endif
! end do
! ll(ncpu,i) = l
! deallocate(ibuf)
! endif
! end do
!
call mallreducemaxl(size(ll),ll)
! if (ncpu .eq. 0) print *, ll
! allocate buffer for IDs and levels
! ! update number of blocks across all processes
! !
! call mallreducemaxl(size(ll),ll)
!
allocate(ibuf(maxval(ll),2))
do i = 0, ncpus-1
do j = 0, ncpus-1
if (ll(i,j) .gt. 0) then
! get the tag for communication
! ! if (ncpu .eq. 0) print *, ll
!
itag = 10*(i * ncpus + j) + 1111
! allocate space for variables
! ! allocate buffer for IDs and levels
! !
! allocate(ibuf(maxval(ll),2))
!
allocate(rbuf(ll(i,j),nv,im,jm,km))
! if i == ncpu we are sending the data
! do i = 0, ncpus-1
! do j = 0, ncpus-1
! if (ll(i,j) .gt. 0) then
!
if (i .eq. ncpu) then
! find all blocks to send from this process
! ! get the tag for communication
! !
! itag = 10*(i * ncpus + j) + 1111
!
l = 1
ibuf(l,1:2) = iblk(j,1,1:2)
do p = 2, cn(j)
lf = .true.
do k = 1, l
lf = lf .and. (iblk(j,p,1) .ne. ibuf(k,1))
end do
if (lf) then
l = l + 1
ibuf(l,1:2) = iblk(j,p,1:2)
endif
end do
! send block IDs and levels
! ! allocate space for variables
! !
! allocate(rbuf(ll(i,j),nv,im,jm,km))
!
l = ll(i,j)
call msendi(size(ibuf(1:l,:)), j, itag, ibuf(1:l,:))
! fill the buffer with data
! ! if i == ncpu we are sending the data
! !
! if (i .eq. ncpu) then
!
do l = 1, ll(i,j)
pblock => get_pointer(ibuf(l,1))
rbuf(l,:,:,:,:) = pblock%u(:,:,:,:)
end do
! send data
! ! find all blocks to send from this process
! !
! l = 1
! ibuf(l,1:2) = iblk(j,1,1:2)
! do p = 2, cn(j)
! lf = .true.
! do k = 1, l
! lf = lf .and. (iblk(j,p,1) .ne. ibuf(k,1))
! end do
! if (lf) then
! l = l + 1
! ibuf(l,1:2) = iblk(j,p,1:2)
! endif
! end do
!
call msendf(size(rbuf), j, itag+1, rbuf)
endif
! if j == ncpu we are receiving the data
! ! send block IDs and levels
! !
! l = ll(i,j)
! call msendi(size(ibuf(1:l,:)), j, itag, ibuf(1:l,:))
!
if (j .eq. ncpu) then
! receive block IDs and levels
! ! fill the buffer with data
! !
! do l = 1, ll(i,j)
! pblock => get_pointer(ibuf(l,1))
!
l = ll(i,j)
call mrecvi(size(ibuf(1:l,:)), i, itag, ibuf(1:l,:))
! receive data
! rbuf(l,:,:,:,:) = pblock%u(:,:,:,:)
! end do
!
call mrecvf(size(rbuf(1:l,:,:,:,:)), i, itag+1, rbuf(1:l,:,:,:,:))
! iterate over all blocks
! ! send data
! !
! call msendf(size(rbuf), j, itag+1, rbuf)
!
do p = 1, cn(i)
! get pointer to the local block
! endif
!
pblock => get_pointer(iblk(i,p,1))
! find the position of block iblk(i,p,3) in ibuf
! ! if j == ncpu we are receiving the data
! !
! if (j .eq. ncpu) then
!
l = 1
do while(ibuf(l,1) .ne. iblk(i,p,3) .and. l .le. ll(i,j))
l = l + 1
end do
! get the level difference
! ! receive block IDs and levels
! !
! l = ll(i,j)
! call mrecvi(size(ibuf(1:l,:)), i, itag, ibuf(1:l,:))
!
dl = pblock%level - ibuf(l,2)
! update boundaries
! ! receive data
! !
! call mrecvf(size(rbuf(1:l,:,:,:,:)), i, itag+1, rbuf(1:l,:,:,:,:))
!
select case(dl)
case(-1) ! restriction
call bnd_rest_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
case(0) ! the same level, copying
if (iblk(i,p,6) .eq. 1) &
call bnd_copy_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
case(1) ! prolongation
if (iblk(i,p,6) .eq. 1) &
call bnd_prol_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),pblock%pos(3-iblk(i,p,4)))
case default
call print_error("boundaries::boundary", "Level difference unsupported!")
end select
end do
endif
! deallocate buffers
! ! iterate over all blocks
! !
! do p = 1, cn(i)
!
deallocate(rbuf)
endif
end do
end do
! deallocate temporary arrays
! ! get pointer to the local block
! !
! pblock => get_pointer(iblk(i,p,1))
!
deallocate(ibuf)
deallocate(iblk)
deallocate(ll)
deallocate(cn)
#endif /* MPI */
! ! find the position of block iblk(i,p,3) in ibuf
! !
! l = 1
! do while(ibuf(l,1) .ne. iblk(i,p,3) .and. l .le. ll(i,j))
! l = l + 1
! end do
!
! ! get the level difference
! !
! dl = pblock%level - ibuf(l,2)
!
! ! update boundaries
! !
! select case(dl)
! case(-1) ! restriction
! call bnd_rest_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
! case(0) ! the same level, copying
! if (iblk(i,p,6) .eq. 1) &
! call bnd_copy_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
! case(1) ! prolongation
! if (iblk(i,p,6) .eq. 1) &
! call bnd_prol_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),pblock%pos(3-iblk(i,p,4)))
! case default
! call print_error("boundaries::boundary", "Level difference unsupported!")
! end select
!
! end do
!
! endif
!
! ! deallocate buffers
! !
! deallocate(rbuf)
!
! endif
!
! end do
! end do
!
! ! deallocate temporary arrays
! !
! deallocate(ibuf)
! deallocate(iblk)
! deallocate(ll)
! deallocate(cn)
! #endif /* MPI */
!-------------------------------------------------------------------------------
!

View File

@ -84,7 +84,7 @@ module evolution
! update boundaries
!
call start_timer(4)
! call boundary
call boundary
call stop_timer(4)
! reset maximum speed

View File

@ -39,7 +39,7 @@ module io
!
subroutine write_data(ftype, nfile, nproc)
use blocks , only : block_data, list_data, nv => nvars
use blocks , only : block_data, list_data, nv => nvars, nblocks, dblocks
use config , only : ncells, nghost, ngrids, igrids, jgrids, kgrids &
, im, jm, km, maxlev, xmin, xmax, ymin, ymax, zmin, zmax
use error , only : print_error
@ -175,6 +175,14 @@ module io
call h5awrite_f(aid, H5T_NATIVE_INTEGER, ncpu, am, err)
call h5aclose_f(aid, err)
call h5acreate_f(gid, 'nblocks', H5T_NATIVE_INTEGER, sid, aid, err)
call h5awrite_f(aid, H5T_NATIVE_INTEGER, nblocks, am, err)
call h5aclose_f(aid, err)
call h5acreate_f(gid, 'dblocks', H5T_NATIVE_INTEGER, sid, aid, err)
call h5awrite_f(aid, H5T_NATIVE_INTEGER, dblocks, am, err)
call h5aclose_f(aid, err)
call h5sclose_f(sid, err)
call h5gclose_f(gid, err)

View File

@ -53,7 +53,7 @@ module mesh
, ncells, maxlev
use blocks , only : block_meta, block_data, list_meta, list_data &
, list_allocated, init_blocks, clear_blocks &
, refine_block, get_pointer &
, refine_block, deallocate_datablock, get_pointer &
, block, nchild, ndims, plist, last_id, nblocks, nleafs, nsides, nfaces
use error , only : print_info, print_error
use mpitools, only : is_master, ncpu, ncpus
@ -63,7 +63,7 @@ module mesh
! local pointers
!
type(block_meta), pointer :: pmeta_block, pneigh
type(block_meta), pointer :: pmeta_block, pneigh, pnext
type(block_data), pointer :: pdata_block
! local variables
@ -228,74 +228,35 @@ module mesh
end do
#ifdef MPI
! ! divide all blocks between all processes
! !
! l = 0
! pblock => plist
! do while (associated(pblock))
! divide blocks between all processes
!
! ! assign the cpu to the current block
! !
! pblock%cpu = l * ncpus / nblocks
l = 0
pmeta_block => list_meta
do while (associated(pmeta_block))
! assign the cpu to the current block
!
! ! assign pointer to the next block
! !
! pblock => pblock%next
pmeta_block%cpu = l * ncpus / nblocks
! assign pointer to the next block
!
! l = l + 1
! end do
pmeta_block => pmeta_block%next
l = l + 1
end do
! remove all data blocks which don't belong to the current process
!
! ! update the cpu field of the neighbors, parent and children
! !
! pblock => plist
! do while (associated(pblock))
!
! ! update neighbors
! !
! do i = 1, ndims
! do j = 1, 2
! do k = 1, 2
!
! pneigh => get_pointer(pblock%neigh(i,j,k)%id)
!
! if (associated(pneigh)) &
! pblock%neigh(i,j,k)%cpu = pneigh%cpu
!
! end do
! end do
! end do
!
! ! update parent
! !
! pparent => get_pointer(pblock%parent%id)
! if (associated(pparent)) &
! pblock%parent%cpu = pparent%cpu
!
! ! update children
! !
! do p = 1, nchild
! pchild => get_pointer(pblock%child(p)%id)
!
! if (associated(pchild)) &
! pblock%child(p)%cpu = pchild%cpu
! end do
!
! ! assign pointer to the next block
! !
! pblock => pblock%next
! end do
!
! ! remove all blocks which don't belong to the current process
! !
! pblock => plist
! do while (associated(pblock))
! pnext => pblock%next
!
! if (pblock%cpu .ne. ncpu) &
! call deallocate_block(pblock)
!
! pblock => pnext
! end do
pmeta_block => list_meta
do while (associated(pmeta_block))
pnext => pmeta_block%next
if (pmeta_block%cpu .ne. ncpu) then
call deallocate_datablock(pmeta_block%data)
end if
pmeta_block => pnext
end do
#endif /* MPI */
! print information