From 7ee4bc2fd6b782de3a8024c06180f2fc30fd0ff3 Mon Sep 17 00:00:00 2001 From: leopekkas Date: Fri, 28 Jul 2023 10:35:51 +0300 Subject: [PATCH] Fix MPI rank naming to be consistent in slides --- mpi/docs/03-special-variables.md | 20 +++++++++--------- mpi/docs/04-collectives.md | 21 +++++++++---------- mpi/docs/06-user-defined-communicators.md | 10 ++++----- .../08-communicator-topologies-cartesian.md | 6 +++--- 4 files changed, 28 insertions(+), 29 deletions(-) diff --git a/mpi/docs/03-special-variables.md b/mpi/docs/03-special-variables.md index 2d00b28d9..bd7a99d28 100644 --- a/mpi/docs/03-special-variables.md +++ b/mpi/docs/03-special-variables.md @@ -20,7 +20,7 @@ lang: en
```fortran -if (myid == 0) then +if (rank == 0) then call mpi_send(message, msgsize, & MPI_INTEGER, 1, & 42, MPI_COMM_WORLD, rc) @@ -28,7 +28,7 @@ if (myid == 0) then MPI_INTEGER, 1, & 42, MPI_COMM_WORLD, & status, rc) -else if (myid == 1) then +else if (rank == 1) then call mpi_send(message, msgsize, & MPI_INTEGER, 0, & 42, MPI_COMM_WORLD, rc) @@ -43,8 +43,8 @@ else if (myid == 1) then ```fortran ! Modulo operation can be used for ! wrapping around -dst = mod(myid + 1, ntasks) -src = mod(myid - 1 + ntasks, ntasks) +dst = mod(rank + 1, ntasks) +src = mod(rank - 1 + ntasks, ntasks) call mpi_send(message, msgsize, & MPI_INTEGER, dst, & @@ -64,7 +64,7 @@ call mpi_recv(recvBuf, arraysize, & chosen as the special task in scatter and gather type operations ```c++ -if (0 == myid) { +if (0 == rank) { for (int i=1; i < ntasks; i++) { MPI_Send(&data, 1, MPI_INT, i, 42, MPI_COMM_WORLD); } @@ -84,13 +84,13 @@ if (0 == myid) { # Example ```fortran -dst = myid + 1 -src = myid - 1 +dst = rank + 1 +src = rank - 1 -if (myid == 0) then +if (rank == 0) then src = MPI_PROC_NULL end if -if (myid == ntasks - 1) then +if (rank == ntasks - 1) then dst = MPI_PROC_NULL end if @@ -115,7 +115,7 @@ call mpi_recv(message, msgsize, MPI_INTEGER, src, 42, MPI_COMM_WORLD, status, rc
```c++ -if (0 == myid) { +if (0 == rank) { for (int i=1; i < ntasks; i++) { MPI_Recv(&data, 1, MPI_INT, MPI_ANY_SOURCE, 42, MPI_COMM_WORLD, &status); diff --git a/mpi/docs/04-collectives.md b/mpi/docs/04-collectives.md index 784d7d94b..e46d3bb1b 100644 --- a/mpi/docs/04-collectives.md +++ b/mpi/docs/04-collectives.md @@ -23,7 +23,7 @@ lang: en
```fortran -if (my_id == 0) then +if (rank == 0) then do i = 1, ntasks-1 call mpi_send(a, 1048576, & MPI_REAL, i, tag, & @@ -128,14 +128,14 @@ Assume 4 MPI tasks. What would the (full) program print?
```fortran -if (my_id==0) then +if (rank==0) then do i = 1, 16 a(i) = i end do end if call mpi_bcast(a, 16, MPI_INTEGER, 0, & MPI_COMM_WORLD, rc) -if (my_id==3) print *, a(:) +if (rank==3) print *, a(:) ``` **A)** `1 2 3 4`
@@ -146,14 +146,14 @@ if (my_id==3) print *, a(:)
```fortran -if (my_id==0) then +if (rank==0) then do i = 1, 16 a(i) = i end do end if call mpi_scatter(a, 4, MPI_INTEGER, aloc, 4 & MPI_INTEGER, 0, MPI_COMM_WORLD, rc) -if (my_id==3) print *, aloc(:) +if (rank==3) print *, aloc(:) ``` **A)** `1 2 3 4`
@@ -197,7 +197,7 @@ MPI_Scatterv(`sendbuf`{.input}, `sendcounts`{.input}, `displs`{.input}, `sendtyp
```fortran -if (my_id==0) then +if (rank==0) then do i = 1, 10 a(i) = i end do @@ -208,7 +208,7 @@ displs(0:3) = [ 0, 1, 3, 6 ] call mpi_scatterv(a, scounts, & displs, MPI_INTEGER, & - aloc, scounts(my_id), & + aloc, scounts(rank), & MPI_INTEGER, 0, & MPI_COMM_WORLD, rc) @@ -383,7 +383,7 @@ MPI_Alltoall(`sendbuf`{.input}, `sendcount`{.input}, `sendtype`{.input}, `recvbu
```fortran -if (my_id==0) then +if (rank==0) then do i = 1, 16 a(i) = i end do @@ -497,7 +497,7 @@ MPI_Allreduce(`sendbuf`{.input}, `recvbuf`{.output}, `count`{.input}, `datatype` ```fortran real :: a(1024), aloc(128) ... -if (my_id==0) then +if (rank==0) then call random_number(a) end if call mpi_scatter(a, 128, MPI_INTEGER, & @@ -527,7 +527,7 @@ call mpi_allreduce(rloc, r, 1, MPI_REAL, & # Common mistakes with collectives - Using a collective operation within if-rank test:
-`if (my_id == 0) call mpi_bcast(...` +`if (rank == 0) call mpi_bcast(...` - All the processes, both the root (the sender or the gatherer) and the rest (receivers or senders), must call the collective routine! - Assuming that all processes making a collective call would complete at the same time @@ -546,4 +546,3 @@ call mpi_allreduce(rloc, r, 1, MPI_REAL, & # Summary ![](img/collective-patterns.png){.center width=100%} - diff --git a/mpi/docs/06-user-defined-communicators.md b/mpi/docs/06-user-defined-communicators.md index bd1af5f80..a5ccdbc12 100644 --- a/mpi/docs/06-user-defined-communicators.md +++ b/mpi/docs/06-user-defined-communicators.md @@ -53,18 +53,18 @@ new communicators
```c -if (myid%2 == 0) { +if (rank%2 == 0) { color = 1; } else { color = 2; } MPI_Comm_split(MPI_COMM_WORLD, color, - myid, &subcomm); + rank, &subcomm); MPI_Comm_rank(subcomm, &mysubid); printf ("I am rank %d in MPI_COMM_WORLD, but" - "%d in Comm %d.\n", myid, mysubid, + "%d in Comm %d.\n", rank, mysubid, color); ``` @@ -93,13 +93,13 @@ I am rank 1 in MPI_COMM_WORLD, but 0 in Comm 2.

```c -if (myid%2 == 0) { +if (rank%2 == 0) { color = 1; } else { color = 2; } MPI_Comm_split(MPI_COMM_WORLD, color, - myid, &subcomm); + rank, &subcomm); MPI_Comm_rank(subcomm, &mysubid); MPI_Bcast(sendbuf, 8, MPI_INT, 0, subcomm); diff --git a/mpi/docs/08-communicator-topologies-cartesian.md b/mpi/docs/08-communicator-topologies-cartesian.md index 269915494..a4fed27a3 100644 --- a/mpi/docs/08-communicator-topologies-cartesian.md +++ b/mpi/docs/08-communicator-topologies-cartesian.md @@ -132,8 +132,8 @@ period=(/ .true., .false. /) call mpi_dims_create(ntasks, 2, dims, rc) call mpi_cart_create(mpi_comm_world, 2, dims, period, .true., comm2d, rc) -call mpi_comm_rank(comm2d, my_id, rc) -call mpi_cart_coords(comm2d, my_id, 2, coords, rc) +call mpi_comm_rank(comm2d, rank, rc) +call mpi_cart_coords(comm2d, rank, 2, coords, rc) ``` @@ -252,4 +252,4 @@ call mpi_sendrecv(buf(1,1), 1, rowtype, nbr_up, tag_up, & - Neighborhood collectives enable communication between neighbours in process topology with a single MPI call -- Neighborhood communication provides optimization opportunities for MPI library \ No newline at end of file +- Neighborhood communication provides optimization opportunities for MPI library