@@ -52,13 +52,13 @@ int32_t jenkins_one_at_a_time_hash(char const* key, size_t len)
52
52
return hash ;
53
53
}
54
54
55
- /* =============== Fortran Wrappers for MPI_Init =============== */
55
+
56
56
static void MPI_Init_fortran_wrapper (MPI_Fint * ierr ) {
57
57
int argc = 0 ;
58
58
char * * argv = NULL ;
59
59
int _wrap_py_return_val = 0 ;
60
60
61
- _wrap_py_return_val = MPI_Init (& argc , & argv );
61
+ _wrap_py_return_val = PMPI_Init (& argc , & argv );
62
62
PMPI_Barrier (MPI_COMM_WORLD );
63
63
64
64
// Measure the current time and TSC.
@@ -111,7 +111,59 @@ _EXTERN_C_ void mpi_init__(MPI_Fint *ierr) {
111
111
fortran_init = 4 ;
112
112
MPI_Init_fortran_wrapper (ierr );
113
113
}
114
- /* ================= End Wrappers for MPI_Init ================= */
114
+
115
+
116
+ static void MPI_Init_thread_fortran_wrapper (MPI_Fint * argc , MPI_Fint * * * argv , MPI_Fint * required , MPI_Fint * provided , MPI_Fint * ierr ) {
117
+ int _wrap_py_return_val = 0 ;
118
+
119
+ _wrap_py_return_val = PMPI_Init_thread ((int * )argc , (char * * * )argv , * required , (int * )provided );
120
+ PMPI_Barrier (MPI_COMM_WORLD );
121
+
122
+ // Measure the current time and TSC.
123
+ Tsc const tsc = fenced_rdtscp ();
124
+ struct timeval timeofday ;
125
+ gettimeofday (& timeofday , NULL );
126
+
127
+ // Set the rank of the current MPI process/thread
128
+ PMPI_Comm_rank (MPI_COMM_WORLD , & current_rank );
129
+
130
+ MpiCall const initthread = {
131
+ .kind = Initthread ,
132
+ .time = timeofday .tv_sec + timeofday .tv_usec / 1e6 ,
133
+ .tsc = tsc ,
134
+ .duration = 0 ,
135
+ .current_rank = current_rank ,
136
+ .partner_rank = -1 ,
137
+ .nb_bytes_s = 0 ,
138
+ .nb_bytes_r = 0 ,
139
+ .comm = -1 ,
140
+ .req = -1 ,
141
+ .tag = -1 ,
142
+ .required_thread_lvl = * required ,
143
+ .provided_thread_lvl = * provided ,
144
+ .op_type = -1 ,
145
+ .finished = false,
146
+ };
147
+
148
+ register_mpi_call (initthread );
149
+ * ierr = _wrap_py_return_val ;
150
+ }
151
+
152
+ _EXTERN_C_ void MPI_INIT_THREAD (MPI_Fint * argc , MPI_Fint * * * argv , MPI_Fint * required , MPI_Fint * provided , MPI_Fint * ierr ) {
153
+ MPI_Init_thread_fortran_wrapper (argc , argv , required , provided , ierr );
154
+ }
155
+
156
+ _EXTERN_C_ void mpi_init_thread (MPI_Fint * argc , MPI_Fint * * * argv , MPI_Fint * required , MPI_Fint * provided , MPI_Fint * ierr ) {
157
+ MPI_Init_thread_fortran_wrapper (argc , argv , required , provided , ierr );
158
+ }
159
+
160
+ _EXTERN_C_ void mpi_init_thread_ (MPI_Fint * argc , MPI_Fint * * * argv , MPI_Fint * required , MPI_Fint * provided , MPI_Fint * ierr ) {
161
+ MPI_Init_thread_fortran_wrapper (argc , argv , required , provided , ierr );
162
+ }
163
+
164
+ _EXTERN_C_ void mpi_init_thread__ (MPI_Fint * argc , MPI_Fint * * * argv , MPI_Fint * required , MPI_Fint * provided , MPI_Fint * ierr ) {
165
+ MPI_Init_thread_fortran_wrapper (argc , argv , required , provided , ierr );
166
+ }
115
167
116
168
static void MPI_Finalize_fortran_wrapper (MPI_Fint * ierr ) {
117
169
int _wrap_py_return_val = 0 ;
@@ -147,7 +199,7 @@ static void MPI_Finalize_fortran_wrapper(MPI_Fint *ierr) {
147
199
sort_all_traces ();
148
200
}
149
201
150
- _wrap_py_return_val = MPI_Finalize ();
202
+ _wrap_py_return_val = PMPI_Finalize ();
151
203
152
204
* ierr = _wrap_py_return_val ;
153
205
}
0 commit comments