Divide blocks among processors. Initial work on boundaries.

BLOCK STRUCTURE: MPI

 - divide initial block structure between all processors and remove
   non-local data blocks

DATA I/O

 - dump the total number of blocks and the number of data blocks

BOUNDARY CONDITIONS

 - initial work on the boundary condition for new structure of blocks
This commit is contained in:
Grzegorz Kowal 2009-09-14 19:15:21 -03:00
parent bf5e332ce1
commit ac188d32e4
4 changed files with 272 additions and 271 deletions

View File

@ -43,7 +43,8 @@ module boundaries
subroutine boundary subroutine boundary
use config , only : im, jm, km use config , only : im, jm, km
use blocks , only : nv => nvars, block, plist, ndims, get_pointer, nblocks use blocks , only : nv => nvars, ndims, nsides, nfaces &
, block_meta, list_meta, get_pointer, nblocks
use error , only : print_error use error , only : print_error
use mpitools, only : ncpus, ncpu, msendi, mrecvi, msendf, mrecvf, mallreducemaxl use mpitools, only : ncpus, ncpu, msendi, mrecvi, msendf, mrecvf, mallreducemaxl
@ -67,28 +68,28 @@ module boundaries
! local pointers ! local pointers
! !
type(block), pointer :: pblock, pneigh type(block_meta), pointer :: pblock, pneigh
! !
!------------------------------------------------------------------------------- !-------------------------------------------------------------------------------
! !
#ifdef MPI ! #ifdef MPI
! allocate temporary arrays; since we have two blocks per boundary and 4 sides ! ! allocate temporary arrays; since we have two blocks per boundary and 4 sides
! of each block we need to increase the second dimension ! ! of each block we need to increase the second dimension
! !
! allocate(cn (0:ncpus-1))
! allocate(ll (0:ncpus-1,0:ncpus-1))
! allocate(iblk(0:ncpus-1,2**(NDIMS-1)*NDIMS*nblocks,6))
! !
allocate(cn (0:ncpus-1)) ! ! reset the local arrays storing blocks to exchange
allocate(ll (0:ncpus-1,0:ncpus-1)) ! !
allocate(iblk(0:ncpus-1,2**(NDIMS-1)*NDIMS*nblocks,6)) ! cn(:) = 0
! ll(:,:) = 0
! reset the local arrays storing blocks to exchange ! iblk(:,:,:) = 0
! ! #endif /* MPI */
cn(:) = 0
ll(:,:) = 0
iblk(:,:,:) = 0
#endif /* MPI */
! iterate over all blocks and perform boundary update ! iterate over all blocks and perform boundary update
! !
pblock => plist pblock => list_meta
do while (associated(pblock)) do while (associated(pblock))
! if the current block is a leaf... ! if the current block is a leaf...
@ -98,70 +99,101 @@ module boundaries
! iterate over all neighbor blocks ! iterate over all neighbor blocks
! !
do i = 1, ndims do i = 1, ndims
do j = 1, 2 do j = 1, nsides
do k = 1, 2 do k = 1, nfaces
if (pblock%neigh(i,j,k)%id .eq. -1) then ! associate pointer to the neighbor
! neighbor is not associated, it means that we have non periodic boundary here
! !
if (k .eq. 1) & pneigh => pblock%neigh(i,j,k)%ptr
call bnd_spec(pblock, i, j, k)
else ! check if neighbor is associated, if yes exchange boundaries, if not call
! specific boundary conditions
#ifdef MPI
! neighbor associated; exchange boundaries
! !
if (pblock%neigh(i,j,k)%cpu .eq. ncpu) then if (associated(pneigh)) then
#endif /* MPI */
! neighbor is on the same CPU, update ! check if the neighbor is on the same cpu
! !
pneigh => get_pointer(pblock%neigh(i,j,k)%id) if (pneigh%cpu .eq. ncpu) then
! calculate the difference of current and neighbor levels ! calculate the difference of current and neighbor level
! !
dl = pblock%level - pneigh%level dl = pblock%level - pneigh%level
! depending on the level difference ! ! depending on the level difference
! ! !
select case(dl) ! select case(dl)
case(-1) ! restriction and prolongation ! case(-1) ! restriction and prolongation
call bnd_rest(pblock, pneigh, i, j, k) ! call bnd_rest(pblock, pneigh, i, j, k)
case(0) ! the same level, copying ! case(0) ! the same level, copying
if (k .eq. 1) & ! if (k .eq. 1) &
call bnd_copy(pblock, pneigh, i, j, k) ! call bnd_copy(pblock, pneigh, i, j, k)
case(1) ! prolongation is handled by bnd_rest ! case(1) ! prolongation is handled by bnd_rest
case default ! case default
call print_error("boundaries::boundary", "Level difference unsupported!") ! call print_error("boundaries::boundary", "Level difference unsupported!")
end select ! end select
#ifdef MPI
else else
! in the array 'info' we store IDs of all blocks which have to be updated from
! the blocks laying on the other processors
!
! get the processor number of neighbor
!
p = pblock%neigh(i,j,k)%cpu
! increase the number of blocks to retrieve from that CPU
!
cn(p) = cn(p) + 1
! fill out the info array
!
iblk(p,cn(p),1) = pblock%id ! 1: local block ID
iblk(p,cn(p),2) = pblock%level ! 2: local block level
iblk(p,cn(p),3) = pblock%neigh(i,j,k)%id ! 3: neighbor block ID
iblk(p,cn(p),4) = i ! 4: directions of boundary
iblk(p,cn(p),5) = j ! 5: side at the boundary
iblk(p,cn(p),6) = k ! 6: part of the boundary
endif endif
#endif /* MPI */
! #ifdef MPI
! ! neighbor associated; exchange boundaries
! !
! if (pblock%neigh(i,j,k)%cpu .eq. ncpu) then
! #endif /* MPI */
!
! ! neighbor is on the same CPU, update
! !
! pneigh => get_pointer(pblock%neigh(i,j,k)%id)
!
! ! calculate the difference of current and neighbor levels
! !
! dl = pblock%level - pneigh%level
!
! ! depending on the level difference
! !
! select case(dl)
! case(-1) ! restriction and prolongation
! call bnd_rest(pblock, pneigh, i, j, k)
! case(0) ! the same level, copying
! if (k .eq. 1) &
! call bnd_copy(pblock, pneigh, i, j, k)
! case(1) ! prolongation is handled by bnd_rest
! case default
! call print_error("boundaries::boundary", "Level difference unsupported!")
! end select
!
! #ifdef MPI
! else
!
! ! in the array 'info' we store IDs of all blocks which have to be updated from
! ! the blocks laying on the other processors
! !
! ! get the processor number of neighbor
! !
! p = pblock%neigh(i,j,k)%cpu
!
! ! increase the number of blocks to retrieve from that CPU
! !
! cn(p) = cn(p) + 1
!
! ! fill out the info array
! !
! iblk(p,cn(p),1) = pblock%id ! 1: local block ID
! iblk(p,cn(p),2) = pblock%level ! 2: local block level
! iblk(p,cn(p),3) = pblock%neigh(i,j,k)%id ! 3: neighbor block ID
! iblk(p,cn(p),4) = i ! 4: directions of boundary
! iblk(p,cn(p),5) = j ! 5: side at the boundary
! iblk(p,cn(p),6) = k ! 6: part of the boundary
!
! endif
! #endif /* MPI */
else
! neighbor is not associated, it means that we have non periodic boundary here
!
! if (k .eq. 1) &
! call bnd_spec(pblock, i, j, k)
endif endif
end do end do
@ -176,162 +208,162 @@ module boundaries
end do end do
#ifdef MPI ! #ifdef MPI
! TODO: 1) update info globally, write an MPI subroutine to sum the variable ! ! TODO: 1) update info globally, write an MPI subroutine to sum the variable
! 'info' over all processes ! ! 'info' over all processes
! 2) then iterate over all source and destination processes and send/receive ! ! 2) then iterate over all source and destination processes and send/receive
! blocks ! ! blocks
! 3) after receiving the block call bnd_copy, bnd_rest, or bnd_prol to update ! ! 3) after receiving the block call bnd_copy, bnd_rest, or bnd_prol to update
! the boundary of destination block ! ! the boundary of destination block
! !
! do i = 0, ncpus-1
! if (cn(i) .gt. 0) then
! allocate(ibuf(cn(i),1))
! l = 1
! ibuf(1,1) = iblk(i,1,1)
! do p = 2, cn(i)
! lf = .true.
! do k = 1, l
! lf = lf .and. (iblk(i,p,1) .ne. ibuf(k,1))
! end do
! !
do i = 0, ncpus-1 ! if (lf) then
if (cn(i) .gt. 0) then ! l = l + 1
allocate(ibuf(cn(i),1)) ! ibuf(l,1) = iblk(i,p,1)
l = 1 ! endif
ibuf(1,1) = iblk(i,1,1) ! end do
do p = 2, cn(i) ! ll(ncpu,i) = l
lf = .true. ! deallocate(ibuf)
do k = 1, l ! endif
lf = lf .and. (iblk(i,p,1) .ne. ibuf(k,1)) ! end do
end do
if (lf) then
l = l + 1
ibuf(l,1) = iblk(i,p,1)
endif
end do
ll(ncpu,i) = l
deallocate(ibuf)
endif
end do
! update number of blocks across all processes
! !
call mallreducemaxl(size(ll),ll) ! ! update number of blocks across all processes
! !
! if (ncpu .eq. 0) print *, ll ! call mallreducemaxl(size(ll),ll)
! allocate buffer for IDs and levels
! !
allocate(ibuf(maxval(ll),2)) ! ! if (ncpu .eq. 0) print *, ll
do i = 0, ncpus-1
do j = 0, ncpus-1
if (ll(i,j) .gt. 0) then
! get the tag for communication
! !
itag = 10*(i * ncpus + j) + 1111 ! ! allocate buffer for IDs and levels
! !
! allocate space for variables ! allocate(ibuf(maxval(ll),2))
! !
allocate(rbuf(ll(i,j),nv,im,jm,km)) ! do i = 0, ncpus-1
! do j = 0, ncpus-1
! if i == ncpu we are sending the data ! if (ll(i,j) .gt. 0) then
! !
if (i .eq. ncpu) then ! ! get the tag for communication
! !
! find all blocks to send from this process ! itag = 10*(i * ncpus + j) + 1111
! !
l = 1 ! ! allocate space for variables
ibuf(l,1:2) = iblk(j,1,1:2) ! !
do p = 2, cn(j) ! allocate(rbuf(ll(i,j),nv,im,jm,km))
lf = .true.
do k = 1, l
lf = lf .and. (iblk(j,p,1) .ne. ibuf(k,1))
end do
if (lf) then
l = l + 1
ibuf(l,1:2) = iblk(j,p,1:2)
endif
end do
! send block IDs and levels
! !
l = ll(i,j) ! ! if i == ncpu we are sending the data
call msendi(size(ibuf(1:l,:)), j, itag, ibuf(1:l,:)) ! !
! if (i .eq. ncpu) then
! fill the buffer with data
! !
do l = 1, ll(i,j) ! ! find all blocks to send from this process
pblock => get_pointer(ibuf(l,1)) ! !
! l = 1
rbuf(l,:,:,:,:) = pblock%u(:,:,:,:) ! ibuf(l,1:2) = iblk(j,1,1:2)
end do ! do p = 2, cn(j)
! lf = .true.
! send data ! do k = 1, l
! lf = lf .and. (iblk(j,p,1) .ne. ibuf(k,1))
! end do
! if (lf) then
! l = l + 1
! ibuf(l,1:2) = iblk(j,p,1:2)
! endif
! end do
! !
call msendf(size(rbuf), j, itag+1, rbuf) ! ! send block IDs and levels
! !
endif ! l = ll(i,j)
! call msendi(size(ibuf(1:l,:)), j, itag, ibuf(1:l,:))
! if j == ncpu we are receiving the data
! !
if (j .eq. ncpu) then ! ! fill the buffer with data
! !
! receive block IDs and levels ! do l = 1, ll(i,j)
! pblock => get_pointer(ibuf(l,1))
! !
l = ll(i,j) ! rbuf(l,:,:,:,:) = pblock%u(:,:,:,:)
call mrecvi(size(ibuf(1:l,:)), i, itag, ibuf(1:l,:)) ! end do
! receive data
! !
call mrecvf(size(rbuf(1:l,:,:,:,:)), i, itag+1, rbuf(1:l,:,:,:,:)) ! ! send data
! !
! iterate over all blocks ! call msendf(size(rbuf), j, itag+1, rbuf)
! !
do p = 1, cn(i) ! endif
! get pointer to the local block
! !
pblock => get_pointer(iblk(i,p,1)) ! ! if j == ncpu we are receiving the data
! !
! find the position of block iblk(i,p,3) in ibuf ! if (j .eq. ncpu) then
! !
l = 1 ! ! receive block IDs and levels
do while(ibuf(l,1) .ne. iblk(i,p,3) .and. l .le. ll(i,j)) ! !
l = l + 1 ! l = ll(i,j)
end do ! call mrecvi(size(ibuf(1:l,:)), i, itag, ibuf(1:l,:))
! get the level difference
! !
dl = pblock%level - ibuf(l,2) ! ! receive data
! !
! update boundaries ! call mrecvf(size(rbuf(1:l,:,:,:,:)), i, itag+1, rbuf(1:l,:,:,:,:))
! !
select case(dl) ! ! iterate over all blocks
case(-1) ! restriction ! !
call bnd_rest_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6)) ! do p = 1, cn(i)
case(0) ! the same level, copying
if (iblk(i,p,6) .eq. 1) &
call bnd_copy_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
case(1) ! prolongation
if (iblk(i,p,6) .eq. 1) &
call bnd_prol_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),pblock%pos(3-iblk(i,p,4)))
case default
call print_error("boundaries::boundary", "Level difference unsupported!")
end select
end do
endif
! deallocate buffers
! !
deallocate(rbuf) ! ! get pointer to the local block
! !
endif ! pblock => get_pointer(iblk(i,p,1))
end do
end do
! deallocate temporary arrays
! !
deallocate(ibuf) ! ! find the position of block iblk(i,p,3) in ibuf
deallocate(iblk) ! !
deallocate(ll) ! l = 1
deallocate(cn) ! do while(ibuf(l,1) .ne. iblk(i,p,3) .and. l .le. ll(i,j))
#endif /* MPI */ ! l = l + 1
! end do
!
! ! get the level difference
! !
! dl = pblock%level - ibuf(l,2)
!
! ! update boundaries
! !
! select case(dl)
! case(-1) ! restriction
! call bnd_rest_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
! case(0) ! the same level, copying
! if (iblk(i,p,6) .eq. 1) &
! call bnd_copy_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),iblk(i,p,6))
! case(1) ! prolongation
! if (iblk(i,p,6) .eq. 1) &
! call bnd_prol_u(pblock,rbuf(l,:,:,:,:),iblk(i,p,4),iblk(i,p,5),pblock%pos(3-iblk(i,p,4)))
! case default
! call print_error("boundaries::boundary", "Level difference unsupported!")
! end select
!
! end do
!
! endif
!
! ! deallocate buffers
! !
! deallocate(rbuf)
!
! endif
!
! end do
! end do
!
! ! deallocate temporary arrays
! !
! deallocate(ibuf)
! deallocate(iblk)
! deallocate(ll)
! deallocate(cn)
! #endif /* MPI */
!------------------------------------------------------------------------------- !-------------------------------------------------------------------------------
! !

View File

@ -84,7 +84,7 @@ module evolution
! update boundaries ! update boundaries
! !
call start_timer(4) call start_timer(4)
! call boundary call boundary
call stop_timer(4) call stop_timer(4)
! reset maximum speed ! reset maximum speed

View File

@ -39,7 +39,7 @@ module io
! !
subroutine write_data(ftype, nfile, nproc) subroutine write_data(ftype, nfile, nproc)
use blocks , only : block_data, list_data, nv => nvars use blocks , only : block_data, list_data, nv => nvars, nblocks, dblocks
use config , only : ncells, nghost, ngrids, igrids, jgrids, kgrids & use config , only : ncells, nghost, ngrids, igrids, jgrids, kgrids &
, im, jm, km, maxlev, xmin, xmax, ymin, ymax, zmin, zmax , im, jm, km, maxlev, xmin, xmax, ymin, ymax, zmin, zmax
use error , only : print_error use error , only : print_error
@ -175,6 +175,14 @@ module io
call h5awrite_f(aid, H5T_NATIVE_INTEGER, ncpu, am, err) call h5awrite_f(aid, H5T_NATIVE_INTEGER, ncpu, am, err)
call h5aclose_f(aid, err) call h5aclose_f(aid, err)
call h5acreate_f(gid, 'nblocks', H5T_NATIVE_INTEGER, sid, aid, err)
call h5awrite_f(aid, H5T_NATIVE_INTEGER, nblocks, am, err)
call h5aclose_f(aid, err)
call h5acreate_f(gid, 'dblocks', H5T_NATIVE_INTEGER, sid, aid, err)
call h5awrite_f(aid, H5T_NATIVE_INTEGER, dblocks, am, err)
call h5aclose_f(aid, err)
call h5sclose_f(sid, err) call h5sclose_f(sid, err)
call h5gclose_f(gid, err) call h5gclose_f(gid, err)

View File

@ -53,7 +53,7 @@ module mesh
, ncells, maxlev , ncells, maxlev
use blocks , only : block_meta, block_data, list_meta, list_data & use blocks , only : block_meta, block_data, list_meta, list_data &
, list_allocated, init_blocks, clear_blocks & , list_allocated, init_blocks, clear_blocks &
, refine_block, get_pointer & , refine_block, deallocate_datablock, get_pointer &
, block, nchild, ndims, plist, last_id, nblocks, nleafs, nsides, nfaces , block, nchild, ndims, plist, last_id, nblocks, nleafs, nsides, nfaces
use error , only : print_info, print_error use error , only : print_info, print_error
use mpitools, only : is_master, ncpu, ncpus use mpitools, only : is_master, ncpu, ncpus
@ -63,7 +63,7 @@ module mesh
! local pointers ! local pointers
! !
type(block_meta), pointer :: pmeta_block, pneigh type(block_meta), pointer :: pmeta_block, pneigh, pnext
type(block_data), pointer :: pdata_block type(block_data), pointer :: pdata_block
! local variables ! local variables
@ -228,74 +228,35 @@ module mesh
end do end do
#ifdef MPI #ifdef MPI
! ! divide all blocks between all processes ! divide blocks between all processes
! !
! l = 0
! pblock => plist
! do while (associated(pblock))
! !
! ! assign the cpu to the current block l = 0
! ! pmeta_block => list_meta
! pblock%cpu = l * ncpus / nblocks do while (associated(pmeta_block))
! assign the cpu to the current block
! !
! ! assign pointer to the next block pmeta_block%cpu = l * ncpus / nblocks
! !
! pblock => pblock%next ! assign pointer to the next block
! !
! l = l + 1 pmeta_block => pmeta_block%next
! end do
l = l + 1
end do
! remove all data blocks which don't belong to the current process
! !
! ! update the cpu field of the neighbors, parent and children pmeta_block => list_meta
! ! do while (associated(pmeta_block))
! pblock => plist pnext => pmeta_block%next
! do while (associated(pblock))
! if (pmeta_block%cpu .ne. ncpu) then
! ! update neighbors call deallocate_datablock(pmeta_block%data)
! ! end if
! do i = 1, ndims
! do j = 1, 2 pmeta_block => pnext
! do k = 1, 2 end do
!
! pneigh => get_pointer(pblock%neigh(i,j,k)%id)
!
! if (associated(pneigh)) &
! pblock%neigh(i,j,k)%cpu = pneigh%cpu
!
! end do
! end do
! end do
!
! ! update parent
! !
! pparent => get_pointer(pblock%parent%id)
! if (associated(pparent)) &
! pblock%parent%cpu = pparent%cpu
!
! ! update children
! !
! do p = 1, nchild
! pchild => get_pointer(pblock%child(p)%id)
!
! if (associated(pchild)) &
! pblock%child(p)%cpu = pchild%cpu
! end do
!
! ! assign pointer to the next block
! !
! pblock => pblock%next
! end do
!
! ! remove all blocks which don't belong to the current process
! !
! pblock => plist
! do while (associated(pblock))
! pnext => pblock%next
!
! if (pblock%cpu .ne. ncpu) &
! call deallocate_block(pblock)
!
! pblock => pnext
! end do
#endif /* MPI */ #endif /* MPI */
! print information ! print information