MPI, MPI/IO tutorials by 바죠


http://www.nersc.gov/nusers/help/tutorials/

http://www-unix.mcs.anl.gov/mpi/tutorial/gropp/talk.html

http://www-unix.mcs.anl.gov/mpi/tutorial/mpiintro/index.htm

http://www.personal.leeds.ac.uk/~bgy1mm/MPITutorial/MPIHome.html

Using mpi with "include mpif.h" is outdated and should not be done anymore. Instead think about putting "use mpi" or even better (for all functionality) "use mpi_f08".

Fortran90 + MPI 3.0 이상 : use mpi_f08
Fortran90 + MPI 3.0 이하 : use mpi
Fortran77 + Any MPI : include 'mpif.h'

mpiintro.ppt
tutorial_mpi.ps
message_passing_with_mpi.pdf
intro_parallel_prog_using_mpi.pdf
mpiguide.ps
mpi_uk.ps
mpi_day2.pdf
mpi_course.pdf





! example of parallel MPI write into a single file, in Fortran 
PROGRAM main 
    ! Fortran 90 users can (and should) use 
    !     use mpi 
    ! instead of include 'mpif.h' if their MPI implementation provides a 
    ! mpi module. 
    include 'mpif.h' 
 
    integer ierr, i, myrank, BUFSIZE, thefile 
    parameter (BUFSIZE=100) 
    integer buf(BUFSIZE) 
    integer(kind=MPI_OFFSET_KIND) disp 
 
    call MPI_INIT(ierr) 
    call MPI_COMM_RANK(MPI_COMM_WORLD, myrank, ierr) 
 
    do i = 0, BUFSIZE 
        buf(i) = myrank * BUFSIZE + i 
    enddo 
    call MPI_FILE_OPEN(MPI_COMM_WORLD, 'testfile', & 
                       MPI_MODE_WRONLY + MPI_MODE_CREATE, & 
                       MPI_INFO_NULL, thefile, ierr) 
    ! assume 4-byte integers 
    disp = myrank * BUFSIZE * 4 
    call MPI_FILE_SET_VIEW(thefile, disp, MPI_INTEGER, & 
                           MPI_INTEGER, 'native', & 
                           MPI_INFO_NULL, ierr) 
    call MPI_FILE_WRITE(thefile, buf, BUFSIZE, MPI_INTEGER, & 
                        MPI_STATUS_IGNORE, ierr) 
    call MPI_FILE_CLOSE(thefile, ierr) 
    call MPI_FINALIZE(ierr) 
 
END PROGRAM main 

#include <mpi.h>
...
    MPI_File fh;
    MPI_Status status;
...
    MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                   MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
    MPI_File_write(fh, buf, nints, MPI_INT, &status);
    MPI_File_close(&fh);

    MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
    MPI_File_read(fh, buf, nints, MPI_INT, &status);
    MPI_File_close(&fh);
...


---------------------------------------------------------------------------------------------------------------------

        program  create_file
  !**************************************************************************
  !  This is a Fortran 90 program to write data directly to a file by each
  !  member of an MPI group.  It is suitable for large jobs which will not
  !  fit into core memory (such as "out of core" solvers)  
  !
  !  Copyright by the Trustees of Indiana University 2005
  ***************************************************************************
        USE MPI
        integer, parameter :: kind_val = 4
        integer, parameter  :: filesize = 40
        integer :: realsize = 4
        integer ::  rank, ierr, fh, nprocs, num_reals
        integer ::  i, region
        real (kind = kind_val) :: datum
        integer, dimension (MPI_STATUS_SIZE) :: status
        integer (kind = MPI_OFFSET_KIND) :: offset, empty
  !  Set filename to output datafile
        character (len = *), parameter :: filename = "/u/ac/rays/new_data.dat"
        real (kind = kind_val), dimension ( : ), allocatable  :: bucket
  !  Basic MPI set-up
        call MPI_INIT(ierr)
        call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
        call MPI_COMM_SIZE(MPI_COMM_WORLD, nprocs, ierr)
  !  Sanity print
         print*, "myid is ", rank
  !  Carve out a piece of the output file and create a data bucket
         empty = 0
         region = filesize / (nprocs )
         offset = ( region * rank )
         allocate (bucket(region))
  !  There is no guarantee that an old file will be clobbered,  so wipe out any previous output file
          if (rank .eq. 0) then
                  call MPI_File_delete(filename, MPI_INFO_NULL, ierr)
          endif
  !  Set the file handle to an initial value (this should not be required)
           fh = 0
  !  Open the output file

           call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)

  !  Wait on everyone to catch up.
           call MPI_BARRIER(MPI_COMM_WORLD, ierr)
  !  Do some work and fill up the data bucket
           call random_seed()
           do i = 1, region
               call random_number(datum)
               bucket(i) = datum * 1000000. * (rank + 1)
               print *, " bucket  ",i ,"= ", bucket(i)
           enddo
  !  Basic "belt and suspenders insurance that everyone's file pointer is at the beginning of the output file.
            call MPI_FILE_SET_VIEW(fh, empty, MPI_REAL4, MPI_REAL4, 'native', MPI_INFO_NULL, ierr)
  !  Send the data bucket to the output file in the proper place
            call MPI_FILE_WRITE_AT(fh, offset, bucket, region, MPI_REAL4, status, ierr)
  !  Wait on everyone to finish and close up shop
           call MPI_BARRIER(MPI_COMM_WORLD, ierr)
           call MPI_FILE_CLOSE(fh, ierr)
           call MPI_FINALIZE(ierr)
           end  program  create_file


program  read_file
  !**************************************************************************
  !  This is a Fortran 90 program to read data directly from a file by each
  !  member of an MPI group.  It is suitable for large jobs which will not
  !  fit into core memory (such as "out of core" solvers)  
  !
  !  Copyright by the Trustees of Indiana University 2005
  ***************************************************************************
          USE MPI
        integer, parameter :: kind_val = 4
        integer,  parameter  :: filesize = 40
        integer :: realsize = 4
        integer ::  rank, ierr, fh, nprocs, num_reals
        integer ::  i, region
        integer, dimension (MPI_STATUS_SIZE) :: status
        integer (kind = MPI_OFFSET_KIND) :: offset, empty
  !  Set filename to output datafile
        character (len = *), parameter :: filename = "/u/ac/rays/new_data.dat"
        real (kind = kind_val), dimension ( : ), allocatable  :: bucket
  !  Basic MPI set-up
        call MPI_INIT(ierr)
        call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
        call MPI_COMM_SIZE(MPI_COMM_WORLD, nprocs, ierr)
  !  Carve out a piece of the output file and create a data bucket
        empty = 0
        region = filesize / (nprocs )
        offset = (region * rank )
        allocate (bucket(region))
  !  Sanity print
        print*, "myid is ", rank
  !  Set the file handle to an initial value (this should not be required)
        fh = 0
  !  Open the output file
        call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, fh, ierr)
  !  Wait on everyone to catch up.
        call MPI_BARRIER(MPI_COMM_WORLD, ierr)
  !  Basic "belt and suspenders insurance that everyone's file pointer is at the beginning of the output file.
         call MPI_FILE_SET_VIEW(fh, 0, MPI_REAL4, MPI_REAL4, 'native', MPI_INFO_NULL, ierr)
  !  Read only the section of the data file each process needs and put data in the data bucket.
         call MPI_FILE_READ_AT(fh, offset, bucket, region, MPI_REAL4, status, ierr)
  !  We could check the values received in the bucket (debug hint)
  !
  !      do i = 1, region
  !         print *, "my id is ", rank, " and my ", i, "number is ", bucket(i)
  !      enddo

  !  Wait on everyone to finish and close up shop
        call MPI_BARRIER(MPI_COMM_WORLD, ierr) 
        call MPI_FILE_CLOSE(fh, ierr)
        call MPI_FINALIZE(ierr)
        end  program  read_file


 #include <stdio.h>
  #include <stdlib.h>
  #include <mpi.h>       /* Include the MPI definitions */

  void ErrorMessage(int error, int rank, char* string)
  {
          fprintf(stderr, "Process %d: Error %d in %s\n", rank, error, string);
          MPI_Finalize();
          exit(-1);
  }

  main(int argc, char *argv[])
  {
    int start, end;
    int length;
    int error;
    char* buffer;
    int nprocs;
    int myrank;
    MPI_Status    status;
    MPI_File      fh;
    MPI_Offset    filesize;

    if (argc != 3)
    {
          fprintf(stderr, "Usage: %s FileToRead FileToWrite\n", argv[0]);
          exit(-1);
    }

    /* Initialize MPI */
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);

    /* Open file to read */
    error = MPI_File_open(MPI_COMM_WORLD, argv[1],
                  MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
    if(error != MPI_SUCCESS) ErrorMessage(error, myrank, "MPI_File_open");

    /* Get the size of file */
    error = MPI_File_get_size(fh, &filesize);
    if(error != MPI_SUCCESS) ErrorMessage(error, myrank, "MPI_File_get_size");

    /* calculate the range for each process to read */
    length = filesize / nprocs;
    start = length * myrank;
    if (myrank == nprocs-1)
          end = filesize;
    else
          end = start + length;
    fprintf(stdout, "Proc %d: range = [%d, %d)\n", myrank, start, end);

    /* Allocate space */
    buffer = (char *)malloc((end - start) * sizeof(char));
    if (buffer == NULL) ErrorMessage(-1, myrank, "malloc");

    /* Each process read in data from the file */
    MPI_File_seek(fh, start, MPI_SEEK_SET);
    error = MPI_File_read(fh, buffer, end-start, MPI_BYTE, &status);
    if(error != MPI_SUCCESS) ErrorMessage(error, myrank, "MPI_File_read");

    /* close the file */
    MPI_File_close(&fh);

    /* Open file to write */
    error = MPI_File_open(MPI_COMM_WORLD, argv[2],
                  MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, amp;fh);
    if(error != MPI_SUCCESS) ErrorMessage(error, myrank, "MPI_File_open");

    error = MPI_File_write_at(fh, start, buffer, end-start, MPI_BYTE, amp;status);
    if(error != MPI_SUCCESS) ErrorMessage(error, myrank, "MPI_File_write");

    /* close the file */
    MPI_File_close(amp;fh);

    /* Finalize MPI */
    MPI_Finalize();
  }

---------------------------------------------------------------------------------------------------------------------

PROGRAM pi_integral
  include "mpif.h"

  integer, parameter:: num_steps=1000000000
  real(8) sum, step, x, pi, tsum;
  integer :: i, nprocs, myrank, ierr

  call MPI_INIT(ierr);
  call MPI_Comm_size(MPI_COMM_WORLD,nprocs,ierr)
  call MPI_Comm_rank(MPI_COMM_WORLD,myrank,ierr)

  sum=0.0
  step=1./dble(num_steps)

  call para_range(1, num_steps, nprocs, myrank, ista, iend)
  print*, "myrank =", myrank, ":", ista," ~ ", iend
  do i=ista, iend
     x = (i-0.5)*step
     sum = sum + 4.0/(1.0+x*x)
  enddo

  call MPI_REDUCE(sum, tsum, 1, MPI_REAL8, MPI_SUM, 0, &
       MPI_COMM_WORLD, ierr)

  if(myrank ==0) then
     pi = step*tsum
     print*, "numerical  pi = ", pi
     print*, "analytical pi = ", dacos(-1.d0)
     print*, " Error = ", dabs(dacos(-1.d0)-pi)
  endif

  call MPI_FINALIZE(ierr)

end PROGRAM pi_integral

SUBROUTINE para_range(n1, n2, nprocs, irank, ista, iend)
  iwork1 = (n2 - n1 + 1) / nprocs
  iwork2 = MOD(n2 - n1 + 1, nprocs)
  ista = irank * iwork1 + n1 + MIN(irank, iwork2)
  iend = ista + iwork1 - 1
  IF (iwork2 > irank) iend = iend + 1
END SUBROUTINE para_range





핑백

덧글

  • 꼴무랭이 2007/08/11 22:29 # 답글

    와우~ 좋은 자료 항상 감사합니다.~ ^^
  • 바죠 2007/08/12 16:06 # 답글

    꼴무랭이 >> 별 말씀을 다 하시는 군요.
    MPI 사용하시나 봅니다.
  • 료단 2009/06/15 15:59 # 삭제 답글

    자료 잘보고 갑니다.감사합니다 ^^;
  • 바죠 2009/06/15 20:46 # 삭제

    감사드립니다.
댓글 입력 영역

최근 포토로그



MathJax