Skip to content

Commit 9f5bf0a

Browse files
committed
Add MPI exercises and project description.
1 parent e818139 commit 9f5bf0a

File tree

9 files changed

+332
-0
lines changed

9 files changed

+332
-0
lines changed

7-mpi/demo/circular.cpp

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <mpi.h>
5+
6+
int main(int argc, char **argv)
7+
{
8+
int rank, num_procs;
9+
10+
/* Initialize the infrastructure necessary for communication */
11+
MPI_Init(&argc, &argv);
12+
13+
/* Identify this process */
14+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
15+
16+
/* Find out how many total processes are active */
17+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
18+
19+
std::string ping("ping");
20+
std::string rdata(4, ' ');
21+
int next_rank = (rank + 1) % num_procs;
22+
int prev_rank = ((rank + num_procs) - 1) % num_procs;
23+
printf("%d: send ping to %d...\n", rank, next_rank);
24+
MPI_Ssend(ping.data(), ping.size(), MPI_CHAR, next_rank, 0, MPI_COMM_WORLD);
25+
printf("%d: receive ping from %d...\n", rank, prev_rank);
26+
MPI_Recv(rdata.data(), rdata.size(), MPI_CHAR, prev_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
27+
28+
/* Tear down the communication infrastructure */
29+
MPI_Finalize();
30+
return 0;
31+
}

7-mpi/demo/hello_world.cpp

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
/*
2+
"Hello World" MPI Test Program (from Wikipedia)
3+
*/
4+
#include <assert.h>
5+
#include <stdio.h>
6+
#include <string.h>
7+
#include <mpi.h>
8+
9+
int main(int argc, char **argv)
10+
{
11+
char buf[256];
12+
int my_rank, num_procs;
13+
14+
/* Initialize the infrastructure necessary for communication */
15+
MPI_Init(&argc, &argv);
16+
17+
/* Identify this process */
18+
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
19+
20+
/* Find out how many total processes are active */
21+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
22+
23+
/* Until this point, all programs have been doing exactly the same.
24+
Here, we check the rank to distinguish the roles of the programs */
25+
if (my_rank == 0) {
26+
int other_rank;
27+
printf("We have %i processes.\n", num_procs);
28+
29+
/* Send messages to all other processes */
30+
for (other_rank = 1; other_rank < num_procs; other_rank++)
31+
{
32+
sprintf(buf, "Hello %i!", other_rank);
33+
MPI_Send(buf, 256, MPI_CHAR, other_rank,
34+
0, MPI_COMM_WORLD);
35+
}
36+
37+
/* Receive messages from all other processes */
38+
for (other_rank = 1; other_rank < num_procs; other_rank++)
39+
{
40+
MPI_Recv(buf, 256, MPI_CHAR, other_rank,
41+
0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
42+
printf("%s\n", buf);
43+
}
44+
45+
} else {
46+
/* Receive message from process #0 */
47+
MPI_Recv(buf, 256, MPI_CHAR, 0,
48+
0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
49+
assert(memcmp(buf, "Hello ", 6) == 0);
50+
51+
/* Send message to process #0 */
52+
sprintf(buf, "Process %i reporting for duty.", my_rank);
53+
MPI_Send(buf, 256, MPI_CHAR, 0,
54+
0, MPI_COMM_WORLD);
55+
56+
}
57+
58+
/* Tear down the communication infrastructure */
59+
MPI_Finalize();
60+
return 0;
61+
}

7-mpi/demo/order_guarantee.cpp

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <iostream>
5+
#include <mpi.h>
6+
7+
int main(int argc, char **argv)
8+
{
9+
int rank, num_procs;
10+
11+
/* Initialize the infrastructure necessary for communication */
12+
MPI_Init(&argc, &argv);
13+
14+
/* Identify this process */
15+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
16+
17+
/* Find out how many total processes are active */
18+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
19+
20+
std::string hello("hello");
21+
std::string world("world");
22+
23+
if(rank == 0) {
24+
MPI_Send(hello.data(), hello.size(), MPI_CHAR, 1, 0, MPI_COMM_WORLD);
25+
MPI_Send(world.data(), world.size(), MPI_CHAR, 1, 0, MPI_COMM_WORLD);
26+
}
27+
else if(rank == 1) {
28+
MPI_Recv(hello.data(), hello.size(), MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
29+
MPI_Recv(world.data(), world.size(), MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
30+
std::cout << "We should receive hello world, and we received: " << hello << " " << world << std::endl;
31+
}
32+
33+
/* Tear down the communication infrastructure */
34+
MPI_Finalize();
35+
return 0;
36+
}

7-mpi/demo/progress_guarantee.cpp

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <iostream>
5+
#include <mpi.h>
6+
7+
int main(int argc, char **argv)
8+
{
9+
int rank, num_procs;
10+
11+
/* Initialize the infrastructure necessary for communication */
12+
MPI_Init(&argc, &argv);
13+
14+
/* Identify this process */
15+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
16+
17+
/* Find out how many total processes are active */
18+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
19+
20+
std::string hello("hello");
21+
std::string world("world");
22+
int tag1 = 1;
23+
int tag2 = 2;
24+
if(rank == 0) {
25+
size_t buf_size = hello.size() * sizeof(char) + MPI_BSEND_OVERHEAD;
26+
char* b = (char*)malloc(buf_size);
27+
MPI_Buffer_attach(b, buf_size);
28+
MPI_Bsend(hello.data(), hello.size(), MPI_CHAR, 1, tag1, MPI_COMM_WORLD);
29+
MPI_Ssend(world.data(), world.size(), MPI_CHAR, 1, tag2, MPI_COMM_WORLD);
30+
}
31+
else if(rank == 1) {
32+
MPI_Recv(world.data(), world.size(), MPI_CHAR, 0, tag2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
33+
MPI_Recv(hello.data(), hello.size(), MPI_CHAR, 0, tag1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
34+
std::cout << "We should receive hello world, and we received: " << hello << " " << world << std::endl;
35+
}
36+
37+
/* Tear down the communication infrastructure */
38+
MPI_Finalize();
39+
return 0;
40+
}

7-mpi/exercises/ping_pong.cpp

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <mpi.h>
5+
6+
int main(int argc, char **argv)
7+
{
8+
int rank, num_procs;
9+
10+
/* Initialize the infrastructure necessary for communication */
11+
MPI_Init(&argc, &argv);
12+
13+
if(argc < 2) {
14+
printf("usage: %s <num-ping-pong>\n", argv[0]);
15+
MPI_Abort(MPI_COMM_WORLD, 1);
16+
}
17+
int num_ping_pong = std::stoi(argv[1]);
18+
19+
/* Identify this process */
20+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
21+
22+
/* Find out how many total processes are active */
23+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
24+
25+
if(num_procs % 2 != 0) {
26+
printf("This program must be ran with an even number of processes.\n");
27+
MPI_Abort(MPI_COMM_WORLD, 1);
28+
exit(1);
29+
}
30+
31+
/** TODO: processes exchange "ping" / "pong" messages `num_ping_pong` times. */
32+
33+
/* Tear down the communication infrastructure */
34+
MPI_Finalize();
35+
return 0;
36+
}

7-mpi/exercises/ping_pong_ring.cpp

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <iostream>
5+
#include <mpi.h>
6+
7+
int main(int argc, char **argv)
8+
{
9+
int rank, num_procs;
10+
11+
/* Initialize the infrastructure necessary for communication */
12+
MPI_Init(&argc, &argv);
13+
14+
if(argc < 2) {
15+
printf("usage: %s <num-ping-pong>\n", argv[0]);
16+
MPI_Abort(MPI_COMM_WORLD, 1);
17+
}
18+
int num_ping_pong = std::stoi(argv[1]);
19+
20+
/* Identify this process */
21+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
22+
23+
/* Find out how many total processes are active */
24+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
25+
26+
/** TODO: ping pong ring. */
27+
28+
/* Tear down the communication infrastructure */
29+
MPI_Finalize();
30+
return 0;
31+
}

7-mpi/exercises/prime.cpp

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <iostream>
5+
#include <mpi.h>
6+
7+
int main(int argc, char **argv)
8+
{
9+
int rank, num_procs;
10+
11+
/* Initialize the infrastructure necessary for communication */
12+
MPI_Init(&argc, &argv);
13+
14+
if(argc < 2) {
15+
printf("usage: %s <upper_bound_prime>\n", argv[0]);
16+
MPI_Abort(MPI_COMM_WORLD, 1);
17+
}
18+
int n = std::stoi(argv[1]);
19+
20+
/* Identify this process */
21+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
22+
23+
/* Find out how many total processes are active */
24+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
25+
26+
/** This is a sequential loop: need to be parallelized. */
27+
int totalPrimes = 0;
28+
bool prime;
29+
for(int i = 2; i <= n; i++) {
30+
prime = true;
31+
for(int j = 2; j < i; j++) {
32+
if((i % j) == 0){
33+
prime = false;
34+
break;
35+
}
36+
}
37+
totalPrimes += prime;
38+
}
39+
40+
/* Tear down the communication infrastructure */
41+
MPI_Finalize();
42+
return 0;
43+
}

7-mpi/exercises/stop_when.cpp

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#include <cassert>
2+
#include <cstdio>
3+
#include <string>
4+
#include <mpi.h>
5+
6+
int main(int argc, char **argv)
7+
{
8+
int rank, num_procs;
9+
10+
/* Initialize the infrastructure necessary for communication */
11+
MPI_Init(&argc, &argv);
12+
13+
/* Identify this process */
14+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
15+
16+
/* Find out how many total processes are active */
17+
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
18+
19+
if(num_procs != 3) {
20+
printf("This program must be ran with 3 processes.\n");
21+
MPI_Abort(MPI_COMM_WORLD, 1);
22+
exit(1);
23+
}
24+
25+
/** TODO: Stop when either process 1 or 2 wakes up. */
26+
27+
/* Tear down the communication infrastructure */
28+
MPI_Finalize();
29+
return 0;
30+
}

README.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -162,3 +162,27 @@ contiguous lines of the global image. What do you observe in terms of speed-up c
162162
### [Optional⭐] N-Queens
163163
164164
* `6-pgas-chapel/exercises/nqueens.chpl`: Parallelize the N-queens code seen in a previous course using Chapel.
165+
166+
## MPI
167+
168+
* `7-mpi/exercises/ping_pong.cpp`: Given `N` processes, we pair processes `(0,1), (2,3), ...` such that `0` sends "ping" to `1` which replies with "pong". This is repeated `num_ping_pong` times as specified by a program's argument.
169+
* `7-mpi/exercises/ping_pong_ring.cpp`: Given `N` processes organized in a ring, a process send "ping" to its right, receive "ping" from its left, send "pong" to its left and receive "pong" from its right. This is repeated `num_ping_pong` times as specified by a program's argument.
170+
* `7-mpi/exercises/stop_when.cpp`: Given 3 processes, process 1 and 2 waits for a random number of seconds, and afterwards they send a message to the process 0 (can use `MPI_ANY_SOURCE`). Once process 0 receives the message, it broadcasts a termination message to everybody, and all processes *immediately* stop.
171+
* `7-mpi/exercises/sum.cpp`: Suppose we have `N` processes. The process 0 generates a random array `arr`. Using `MPI_Scatter`, the array is distributed among the processes. They all compute the local sum of the array. Using `MPI_Reduce` we retrieve the global sum and print it in the process 0.
172+
* Read [this paper](http://www1.cs.columbia.edu/~sedwards/papers/kahn1974semantics.pdf) on Kahn network.
173+
174+
### Project: Maze Creation and Testing
175+
176+
In this project, you will develop a parallel algorithm which creates a maze and test how many paths can enter and exit the maze.
177+
178+
* Use randomized depth-first search to create a maze, or with any technique of your choice (see [here](https://en.wikipedia.org/wiki/Maze_generation_algorithm#Randomized_depth-first_search)).
179+
* Given the beginning and exit of the labyrinth, design three different algorithms with MPI (different communication and/or data-splitting strategies) finding *all different paths* in the labyrinth.
180+
* Compare the paths found (if the maze is small enough) or the number of paths found (otherwise) among the algorithms. You must obtain the same results, otherwise an algorithm is wrong.
181+
* Write a short report explaining the intuitions behind the three algorithms, and evaluate those algorithms on a set of generated maze (from small to large).
182+
* Give hypothesis on why an algorithm is working better than another.
183+
* The goal is to practice with MPI and different topologies/strategies. It is OK if your algorithm is slower than the others as long as you can explain why.
184+
* For students in the Master in HPC: it is mandatory to run some experiments on the HPC with the number of nodes >= 2.
185+
* The code you produce must be your own and *you cannot copy* an existing MPI algorithm from internet.
186+
* You can work in team of 3, each member must design and implement a different algorithm, and you must explicitly say in the report who wrote which algorithm.
187+
* Send the project before the exam at pierre.talbot@uni.lu with the subject "[MHPC][PGC] Project MPI".
188+
* *Optional*: Use the [Boost.MPI library](https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html) to create your application.

0 commit comments

Comments
 (0)