1
 2  program greetings
 3
 4  ! Include the module with all the MPI parameters, interfaces, etc.
 5
 6   use mpi
 7
 8   integer :: myid, numprocs
 9   integer :: len
10   parameter (len = MPI_MAX_PROCESSOR_NAME+1)
11   character*(len) name,chbuf
12   integer :: stat(MPI_STATUS_SIZE),ierr,i,dest,tag,reslen
13
14
15  !-------------- Startup phase (create parallel context) ---------------
16  !
17  ! MPI_Init: Initialise the MPI context (must be called before any other 
18  !           MPI routine). As for most MPI functions, in case of an 
19  !           error, the return value "ierr" contains information about 
20  !           the error. 
21  !
22   call MPI_Init(ierr)
23  !
24  ! MPI_Comm_rank:  Returns the processor id ( 0 <= myid < numprocs ) 
25  ! MPI_COMM_WORLD: Predefined communicator consisting of all the
26  !                 processes running when program execution starts
27  !
28   call MPI_Comm_rank(MPI_COMM_WORLD, myid, ierr)
29  !
30  ! MPI_Comm_size: Returns the number of processors
31  !
32   call MPI_Comm_size(MPI_COMM_WORLD, numprocs, ierr)
33  !
34  ! MPI_Get_processor_name: Returns the name of the machine which the
35  !                         process is running on (not important!!)
36   call MPI_Get_processor_name(name,reslen,ierr)
37  !
38  !-------------- End of startup phase -----------------------------------
39
40
41  !-------------- Send machine names from slaves to master ---------------
42  !--------------         Master prints them out           ---------------
43  !
44   if (myid /= 0) then
45  !
46  ! MPI_Send: Send the message defined by address "name", length "len" 
47  !           and data type "MPI_CHARACTER" to the processor "dest". 
48  !           The communicator "MPI_COMM_WORLD" and the identifier "tag" 
49  !           can be used to "code" the message, to make it unique.    
50      dest = 0
51      tag  = myid
52      call MPI_Send(name,len,MPI_CHARACTER,dest,tag, &
53           MPI_COMM_WORLD,ierr)
54
55   else
56
57      print*,' There are ',numprocs,'processes running'
58      print*,' Master process ',myid,' runs on  ',name
59  !
60  ! MPI_Recv: Receive a message sent by MPI_Send. The first three 
61  !           arguments give the address, length and data type of the
62  !           message as before. Wildcards can be used for argument 4, 
63  !           the source, (i.e. MPI_ANY_SOURCE) and for argument 5, the 
64  !           tag, (i.e. MPI_ANY_TAG), otherwise the tag has to be as
65  !           given by the source processor. The sixth argument is an
66  !           array of size MPI_STATUS_SIZE (= at least 3) and returns
67  !           information of the data that was actually received, i.e. 
68  !           source, tag, error, etc.
69  ! 
70     do i=1,numprocs-1
71         call MPI_RECV(chbuf,len,MPI_CHARACTER,MPI_ANY_SOURCE, &
72              MPI_ANY_TAG,MPI_COMM_WORLD, stat,ierr)
73         print*,' Slave process ',stat(MPI_SOURCE), ' runs on  ',chbuf
74      end do
75
76   endif
77  !
78  !--------------------------- End sending -------------------------------
79  !
80
81  !
82  !-------------------------- Finishing MPI ------------------------------
83  !
84  ! MPI_Finalize: The pendant to MPI_Init. Has to be the last MPI command
85  !               called in a program. It cleans up "unfinished business",
86  !               frees memory, etc.
87  !
88   call MPI_FINALIZE(ierr)
89   stop
90
91  end program greetings
92
93
94
95