-
Notifications
You must be signed in to change notification settings - Fork 0
/
mpi_dijkstra.c
343 lines (321 loc) · 15 KB
/
mpi_dijkstra.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
/*==============================================================================
Author: = Tim Siwula
Liscense: = GPLv2
File: = mpi_dijkstra.c
Version: = 0.02
Created: = 10/28/2015
==============================================================================
Compile: = mpicc -g -Wall -o mpi_dijkstra mpi_dijkstra.c
Run: = mpiexec -n <p> ./mpi_dijkstra (on lab machines)
= csmpiexec -n <p> ./mpi_dijkstra (on the penguin cluster)
==============================================================================
Purpose: = Implement I/O functions that will be useful in an
an MPI implementation of Dijkstra's algorithm.
In particular, the program creates an MPI_Datatype
that can be used to implement input and output of
a matrix that is distributed by block columns. It
also implements input and output functions that use
this datatype. Finally, it implements a function
that prints out a process' submatrix as a string.
This makes it more likely that printing the submatrix
assigned to one process will be printed without
interruption by another process.
==============================================================================
Input: = n: the number of rows and the number of columns
in the matrix
mat: the matrix: note that INFINITY should be
input as 1000000
==============================================================================
Output: = The submatrix assigned to each process and the
complete matrix printed from process 0. Both
print "i" instead of 1000000 for infinity.
==============================================================================
Note: = 1. The number of processes, p, should evenly divide n.
2. You should free the MPI_Datatype object created by
the program with a call to MPI_Type_free: see the
main function.
3. Example: Suppose the matrix is
0 1 2 3
4 0 5 6
7 8 0 9
8 7 6 0
Then if there are two processes, the matrix will be
distributed as follows:
Proc 0: 0 1 Proc 1: 2 3
4 0 5 6
7 8 0 9
8 7 6 0
.......................................................................... */
/* ========================================================================== */
/* External libaries */
/* ========================================================================== */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
/* .......................................................................... */
/* ========================================================================== */
/* Constants */
/* ========================================================================== */
#define MAX_STRING 10000
#define INFINITY 1000000
#define MAX_ELEMENT 20
/* .......................................................................... */
/* ========================================================================== */
/* Custom function definitions */
/* ========================================================================== */
int Read_n(int my_rank, MPI_Comm comm);
MPI_Datatype Build_blk_col_type(int n, int loc_n);
void Print_paths(int pred[], int n, int global_pred[], int my_rank, int loc_n);
void Print_dists(int dist[], int n, int my_rank, int loc_n,
int global_dist[]);
int Find_min_dist(int loc_dist[], int loc_known[], int loc_n, int my_rank,
int* min_dist_p);
void Dijkstra(int loc_mat[], int loc_dist[], int loc_pred[], int loc_n, int n,
int my_rank);
void Read_matrix(int loc_mat[], int n, int loc_n,
MPI_Datatype blk_col_mpi_t, int my_rank, MPI_Comm comm);
/* .......................................................................... */
/* ========================================================================== */
/* Main( ) */
/* ========================================================================== */
int main(int argc, char **argv)
{
int n, loc_n, p, my_rank;
int *loc_mat, *loc_dist, *loc_pred, *global_dist, *global_pred;
MPI_Datatype blk_col_mpi_t;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
n = Read_n(my_rank, MPI_COMM_WORLD);
loc_n = n/p;
loc_mat = malloc(n*loc_n*sizeof(int));
global_dist = malloc(n*sizeof(int));
global_pred = malloc(n*sizeof(int));
loc_dist = malloc(loc_n*sizeof(int));
loc_pred = malloc(loc_n*sizeof(int));
blk_col_mpi_t = Build_blk_col_type(n, loc_n);
Read_matrix(loc_mat, n, loc_n, blk_col_mpi_t, my_rank, MPI_COMM_WORLD);
Dijkstra(loc_mat, loc_dist, loc_pred, loc_n, n, my_rank);
Print_dists(loc_dist, n, my_rank, loc_n, global_dist);
Print_paths(loc_pred, n, global_pred, my_rank, loc_n);
free(loc_mat);
free(global_dist);
free(global_pred);
free(loc_dist);
free(loc_pred);
MPI_Type_free(&blk_col_mpi_t);
MPI_Finalize();
return 0;
} /* main */
/* .......................................................................... */
/* ============================================================================
Function: = Dijkstra
Purpose: = Apply Dijkstra's algorithm to the matrix mat for each
= process.
==============================================================================
Input arg: = 1. loc_mat: adjacency matrix of graph for each process.
= 2. loc_dist: each processors cheapest edge.
= 3. loc_pred: each processors previous vertex in path.
= 4. loc_n: each processors range of work.
= 5. n: the number of vertices.
= 6. my_rank: each processors rank.
==============================================================================
Output arg: = 1. dist: dist[v] = distance 0 to v.
= 2. pred: pred[v] = predecessor of v on a shortest path
= 0->v.
============================================================================ */
void Dijkstra(int loc_mat[], int loc_dist[], int loc_pred[], int loc_n, int n,
int my_rank){
int i, loc_u, loc_v, *loc_known, new_dist, min_dist_p;
loc_known = malloc(loc_n*sizeof(int));
loc_dist[0] = 0; loc_pred[0] = 0; loc_known[0] = 1;
for (loc_v = 0; loc_v < loc_n; loc_v++){
loc_dist[loc_v] = loc_mat[0*loc_n + loc_v];
loc_pred[loc_v] = 0;
loc_known[loc_v] = 0;
}
if (my_rank == 0) loc_known[0] = 1;
for (i = 1; i < n; i++){
loc_u = Find_min_dist(loc_dist, loc_known, loc_n, my_rank, &min_dist_p);
if (loc_u/loc_n == my_rank)
loc_known[loc_u % loc_n] = 1;
for (loc_v = 0; loc_v < loc_n; loc_v++){
if (!loc_known[loc_v]){
new_dist = min_dist_p + loc_mat[loc_u*loc_n + loc_v];
if (new_dist < loc_dist[loc_v]){
loc_dist[loc_v] = new_dist; // update vertex
loc_pred[loc_v] = loc_u;
}}}} /* for i */
free(loc_known);
} /* Dijkstra */
/* .......................................................................... */
/* ============================================================================
Function: = Find_min_dist
Purpose: = Find the vertex u with minimum distance to 0
(dist[u]) among the vertices whose distance
to 0 is not known.
==============================================================================
Input arg: = 1. loc_dist: dist[v] = current estimate of distance
0->v
= 2. loc_known: whether the minimum distance 0-> is known
= 3. loc_n: each processors range of work.
= 4. min_dist_p: the current minimum distance.
==============================================================================
Output arg: = 1. loc_u: The vertex u whose distance to 0, dist[u]
is a minimum among vertices whose distance
to 0 is not known.
============================================================================ */
int Find_min_dist(int loc_dist[], int loc_known[], int loc_n, int my_rank,
int* min_dist_p){
int loc_v, loc_min_dist = INFINITY+1, global_u;
int loc_u = -1; // local vertex number, ranges from 0 to loc_n = n/p
int my_min[2], glbl_min[2];
for (loc_v = 0; loc_v < loc_n; loc_v++){
if (!loc_known[loc_v]){
if (loc_dist[loc_v] < loc_min_dist){
loc_u = loc_v;
loc_min_dist = loc_dist[loc_v];
}}}
if (loc_u != -1) {
global_u = loc_u + my_rank*loc_n; // converts loc_u to glbl_u
my_min[0] = loc_dist[loc_u]; // distance from 0 to loc_u
my_min[1] = global_u;
} else {
global_u = -1; // converts loc_u to glbl_u
my_min[0] = INFINITY+1; // distance from 0 to loc_u
my_min[1] = global_u;
}
MPI_Allreduce(my_min, glbl_min, 1, MPI_2INT, MPI_MINLOC, MPI_COMM_WORLD);
loc_u = glbl_min[1]; // global min vertex
*min_dist_p = glbl_min[0];
return loc_u; // returns the global min vertex
} /* Find_min_dist */
/* .......................................................................... */
/*-------------------------------------------------------------------
* Function: Print_dists
* Purpose: Print the length of the shortest path from 0 to each
* vertex
* In args: n: the number of vertices
* dist: distances from 0 to each vertex v: dist[v]
* is the length of the shortest path 0->v
*/
void Print_dists(int loc_dist[], int n, int my_rank, int loc_n,
int global_dist[]){
int v;
MPI_Gather(loc_dist, loc_n, MPI_INT, global_dist, loc_n,
MPI_INT, 0, MPI_COMM_WORLD);
if (my_rank == 0){
printf("The distance from 0 to each vertex is:\n");
printf(" v dist 0->v\n");
printf("---- ---------\n");
for (v = 1; v < n; v++)
printf("%3d %4d\n", v, global_dist[v]);
printf("\n");
}
} /* Print_dists */
/*-------------------------------------------------------------------
* Function: Print_paths
* Purpose: Print the shortest path from 0 to each vertex
* In args: n: the number of vertices
* pred: list of predecessors: pred[v] = u if
* u precedes v on the shortest path 0->v
*/
void Print_paths(int pred[], int n, int global_pred[], int my_rank, int loc_n ){
int v, w, count, i;
int *path = malloc(n*sizeof(int));
MPI_Gather(pred, loc_n, MPI_INT, global_pred, loc_n,
MPI_INT, 0, MPI_COMM_WORLD);
if (my_rank == 0)
{
printf(" v Path 0->v\n");
printf("---- ---------\n");
for (v = 1; v < n; v++){
printf("%3d: ", v);
count = 0;
w = v;
while (w != 0){
path[count] = w;
count++;
w = global_pred[w];
}
printf("0 ");
for (i = count-1; i >= 0; i--)
printf("%d ", path[i]);
printf("\n");
}
}
free(path);
} /* Print_paths */
/*---------------------------------------------------------------------
* Function: Read_n
* Purpose: Read in the number of rows in the matrix on process 0
* and broadcast this value to the other processes
* In args: my_rank: the calling process' rank
* comm: Communicator containing all calling processes
* Ret val: n: the number of rows in the matrix
*/
int Read_n(int my_rank, MPI_Comm comm) {
int n;
if (my_rank == 0)
{
printf("Enter the number of verticies for your matrix.\n");
scanf("%d", &n);
//printf("\n");
}
MPI_Bcast(&n, 1, MPI_INT, 0, comm);
return n;
} /* Read_n */
/*---------------------------------------------------------------------
* Function: Build_blk_col_type
* Purpose: Build an MPI_Datatype that represents a block column of
* a matrix
* In args: n: number of rows in the matrix and the block column
* loc_n = n/p: number cols in the block column
* Ret val: blk_col_mpi_t: MPI_Datatype that represents a block
* column
*/
MPI_Datatype Build_blk_col_type(int n, int loc_n) {
MPI_Aint lb, extent;
MPI_Datatype block_mpi_t;
MPI_Datatype first_bc_mpi_t;
MPI_Datatype blk_col_mpi_t;
MPI_Type_contiguous(loc_n, MPI_INT, &block_mpi_t);
MPI_Type_get_extent(block_mpi_t, &lb, &extent);
MPI_Type_vector(n, loc_n, n, MPI_INT, &first_bc_mpi_t);
MPI_Type_create_resized(first_bc_mpi_t, lb, extent,
&blk_col_mpi_t);
MPI_Type_commit(&blk_col_mpi_t);
MPI_Type_free(&block_mpi_t);
MPI_Type_free(&first_bc_mpi_t);
return blk_col_mpi_t;
} /* Build_blk_col_type */
/*---------------------------------------------------------------------
* Function: Read_matrix
* Purpose: Read in an nxn matrix of ints on process 0, and
* distribute it among the processes so that each
* process gets a block column with n rows and n/p
* columns
* In args: n: the number of rows in the matrix and the submatrices
* loc_n = n/p: the number of columns in the submatrices
* blk_col_mpi_t: the MPI_Datatype used on process 0
* my_rank: the caller's rank in comm
* comm: Communicator consisting of all the processes
* Out arg: loc_mat: the calling process' submatrix (needs to be
* allocated by the caller)
*/
void Read_matrix(int loc_mat[], int n, int loc_n,
MPI_Datatype blk_col_mpi_t, int my_rank, MPI_Comm comm) {
int* mat = NULL, i, j;
if (my_rank == 0)
{
printf("Please read in matrix into process 0.\n");
mat = malloc(n*n*sizeof(int));
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
scanf("%d", &mat[i*n + j]);
}
MPI_Scatter(mat, 1, blk_col_mpi_t,
loc_mat, n*loc_n, MPI_INT, 0, comm);
if (my_rank == 0) free(mat);
} /* Read_matrix */