这实际上是相对简单做(如果你已经知道一些关于BLACS / ScaLAPACK的和MPI-io的!),但即使是这样的文件 - 甚至在线 - 是你已经发现了,有点差。
了解MPI-IO的第一件事情是,它可以让你使用普通的MPI数据类型指定每个进程的文件的‘查看’,然后只读落入该视图中的数据。 在我们的中心,我们有滑梯和并行IO半天课程的源代码; 第一第三左右为约MPI-IO的基本知识。 有滑梯这里和示例源代码在这里 。
要知道的第二件事是,MPI有一个内置的方式来创建“分布式数组”数据类型,其中一个组合可以让你铺陈块循环数据分布; 这是一个在笼统讨论在我回答这个问题: 是什么在MPI darray和子阵之间的差别? 。
因此,这意味着,如果你有一个包含一个大的矩阵的二进制文件,你可以与MPI-IO使用阅读MPI_Type_create_darray
,它会通过在一个块循环方式的任务分配。 然后,它只是一个做BLACS或电话的ScaLAPACK的问题。 使用的ScaLAPACK psgemv为矩阵向量乘法,而不是psgemm的例子程序在我的答案中所列的问题在计算科学堆叠交换。
给你的组件如何相互配合的想法,下面是一个简单的程序,它在含有基质(方阵N中的第一大小,然后在N ^ 2种元素)的二进制文件中读取,然后计算特征值和使用的ScaLAPACK的(新)载体pssyevr
程序。 它结合了MPI-IO,darray和ScaLAPACK的东西。 这是在Fortran语言,但函数调用是基于C的语言一样。
!
! Use MPI-IO to read a diagonal matrix distributed block-cyclically,
! use Scalapack to calculate its eigenvalues, and compare
! to expected results.
!
program darray
use mpi
implicit none
integer :: n, nb ! problem size and block size
integer :: myArows, myAcols ! size of local subset of global array
real :: p
real, dimension(:), allocatable :: myA, myZ
real, dimension(:), allocatable :: work
integer, dimension(:), allocatable :: iwork
real, dimension(:), allocatable :: eigenvals
real, dimension(:), allocatable :: expected
integer :: worksize, totwork, iworksize
integer, external :: numroc ! blacs routine
integer :: me, procs, icontxt, prow, pcol, myrow, mycol ! blacs data
integer :: info ! scalapack return value
integer, dimension(9) :: ides_a, ides_z ! scalapack array desc
integer :: clock
real :: calctime, iotime
character(len=128) :: filename
integer :: mpirank
integer :: ierr
integer, dimension(2) :: pdims, dims, distribs, dargs
integer :: infile
integer, dimension(MPI_STATUS_SIZE) :: mpistatus
integer :: darray
integer :: locsize, nelements
integer(kind=MPI_ADDRESS_KIND) :: lb, locextent
integer(kind=MPI_OFFSET_KIND) :: disp
integer :: nargs
integer :: m, nz
! Initialize MPI (for MPI-IO)
call MPI_Init(ierr)
call MPI_Comm_size(MPI_COMM_WORLD,procs,ierr)
call MPI_Comm_rank(MPI_COMM_WORLD,mpirank,ierr)
! May as well get the process grid from MPI_Dims_create
pdims = 0
call MPI_Dims_create(procs, 2, pdims, ierr)
prow = pdims(1)
pcol = pdims(2)
! get filename
nargs = command_argument_count()
if (nargs /= 1) then
print *,'Usage: darray filename'
print *,' Where filename = name of binary matrix file.'
call MPI_Abort(MPI_COMM_WORLD,1,ierr)
endif
call get_command_argument(1, filename)
! find the size of the array we'll be using
call tick(clock)
call MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, infile, ierr)
call MPI_File_read_all(infile,n,1,MPI_INTEGER,mpistatus,ierr)
call MPI_File_close(infile,ierr)
! create the darray that will read in the data.
nb = 64
if (nb > (N/prow)) nb = N/prow
if (nb > (N/pcol)) nb = N/pcol
dims = [n,n]
distribs = [MPI_DISTRIBUTE_CYCLIC, MPI_DISTRIBUTE_CYCLIC]
dargs = [nb, nb]
call MPI_Type_create_darray(procs, mpirank, 2, dims, distribs, dargs, &
pdims, MPI_ORDER_FORTRAN, MPI_REAL, darray, ierr)
call MPI_Type_commit(darray,ierr)
call MPI_Type_size(darray, locsize, ierr)
nelements = locsize/4
call MPI_Type_get_extent(darray, lb, locextent, ierr)
! Initialize local arrays
allocate(myA(nelements))
allocate(myZ(nelements))
allocate(eigenvals(n), expected(n))
! read in the data
call MPI_File_open(MPI_COMM_WORLD, trim(filename), MPI_MODE_RDONLY, MPI_INFO_NULL, infile, ierr)
disp = 4 ! skip N = 4 bytes
call MPI_File_set_view(infile, disp, MPI_REAL, darray, "native", MPI_INFO_NULL, ierr)
call MPI_File_read_all(infile, myA, nelements, MPI_REAL, mpistatus, ierr)
call MPI_File_close(infile,ierr)
iotime = tock(clock)
if (mpirank == 0) then
print *,'I/O time = ', iotime
endif
! Initialize blacs processor grid
call tick(clock)
call blacs_pinfo (me,procs)
call blacs_get (-1, 0, icontxt)
call blacs_gridinit(icontxt, 'R', prow, pcol)
call blacs_gridinfo(icontxt, prow, pcol, myrow, mycol)
myArows = numroc(n, nb, myrow, 0, prow)
myAcols = numroc(n, nb, mycol, 0, pcol)
! Construct local arrays
! Global structure: matrix A of n rows and n columns
! Prepare array descriptors for ScaLAPACK
call descinit( ides_a, n, n, nb, nb, 0, 0, icontxt, myArows, info )
call descinit( ides_z, n, n, nb, nb, 0, 0, icontxt, myArows, info )
! Call ScaLAPACK library routine
allocate(work(1), iwork(1))
iwork(1) = -1
work(1) = -1.
call pssyevr( 'V', 'A', 'U', n, myA, 1, 1, ides_a, -1.e20, +1.e20, 1, n, &
m, nz, eigenvals, myZ, 1, 1, ides_z, work, -1, &
iwork, -1, info )
worksize = int(work(1))/2*3
iworksize = iwork(1)/2*3
print *, 'Local workspace ', worksize
call MPI_Reduce(worksize, totwork, 1, MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, ierr)
if (mpirank == 0) print *, ' total work space ', totwork
call MPI_Reduce(iworksize, totwork, 1, MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, ierr)
if (mpirank == 0) print *, ' total iwork space ', totwork
deallocate(work,iwork)
allocate(work(worksize),iwork(iworksize))
call pssyev('N','U',n,myA,1,1,ides_a,eigenvals,myZ,1,1,ides_z,work,worksize,info)
if (info /= 0) then
print *, 'Error: info = ', info
else if (mpirank == 0) then
print *, 'Calculated ', m, ' eigenvalues and ', nz, ' eigenvectors.'
endif
! Deallocate the local arrays
deallocate(myA, myZ)
deallocate(work, iwork)
! End blacs for processors that are used
call blacs_gridexit(icontxt)
calctime = tock(clock)
! calculated the expected eigenvalues for a particular test matrix
p = 3. + sqrt((4. * n - 3.) * (n - 1.)*3./(n+1.))
expected(1) = p/(n*(5.-2.*n))
expected(2) = 6./(p*(n + 1.))
expected(3:n) = 1.
! Print results
if (me == 0) then
if (info /= 0) then
print *, 'Error -- info = ', info
endif
print *,'Eigenvalues L_infty err = ', &
maxval(abs(eigenvals-expected))
print *,'Compute time = ', calctime
endif
deallocate(eigenvals, expected)
call MPI_Finalize(ierr)
contains
subroutine tick(t)
integer, intent(OUT) :: t
call system_clock(t)
end subroutine tick
! returns time in seconds from now to time described by t
real function tock(t)
integer, intent(in) :: t
integer :: now, clock_rate
call system_clock(now,clock_rate)
tock = real(now - t)/real(clock_rate)
end function tock
end program darray