@@ -790,7 +790,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
790790 assert ( not kernel ().connection_manager .is_source_table_cleared () );
791791
792792 // assume all threads have some work to do
793- gather_completed_checker_[ tid ] .set_false ();
793+ gather_completed_checker_.set_false ( tid );
794794 assert ( gather_completed_checker_.all_false () );
795795
796796 const AssignedRanks assigned_ranks = kernel ().vp_manager .get_assigned_ranks ( tid );
@@ -802,7 +802,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
802802 {
803803 // assume this is the last gather round and change to false
804804 // otherwise
805- gather_completed_checker_[ tid ] .set_true ();
805+ gather_completed_checker_.set_true ( tid );
806806
807807#pragma omp master
808808 {
@@ -819,7 +819,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
819819 assigned_ranks, kernel ().mpi_manager .get_send_recv_count_target_data_per_rank () );
820820
821821 const bool gather_completed = collocate_target_data_buffers_ ( tid, assigned_ranks, send_buffer_position );
822- gather_completed_checker_[ tid ] .logical_and ( gather_completed );
822+ gather_completed_checker_.logical_and ( tid, gather_completed );
823823
824824 if ( gather_completed_checker_.all_true () )
825825 {
@@ -842,7 +842,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
842842#pragma omp barrier
843843
844844 const bool distribute_completed = distribute_target_data_buffers_ ( tid );
845- gather_completed_checker_[ tid ] .logical_and ( distribute_completed );
845+ gather_completed_checker_.logical_and ( tid, distribute_completed );
846846
847847 // resize mpi buffers, if necessary and allowed
848848 if ( gather_completed_checker_.any_false () and kernel ().mpi_manager .adaptive_target_buffers () )
@@ -864,7 +864,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
864864 assert ( not kernel ().connection_manager .is_source_table_cleared () );
865865
866866 // assume all threads have some work to do
867- gather_completed_checker_[ tid ] .set_false ();
867+ gather_completed_checker_.set_false ( tid );
868868 assert ( gather_completed_checker_.all_false () );
869869
870870 const AssignedRanks assigned_ranks = kernel ().vp_manager .get_assigned_ranks ( tid );
@@ -874,7 +874,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
874874 while ( gather_completed_checker_.any_false () )
875875 {
876876 // assume this is the last gather round and change to false otherwise
877- gather_completed_checker_[ tid ] .set_true ();
877+ gather_completed_checker_.set_true ( tid );
878878
879879#pragma omp master
880880 {
@@ -891,7 +891,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
891891 const bool gather_completed =
892892 collocate_target_data_buffers_compressed_ ( tid, assigned_ranks, send_buffer_position );
893893
894- gather_completed_checker_[ tid ] .logical_and ( gather_completed );
894+ gather_completed_checker_.logical_and ( tid, gather_completed );
895895
896896 if ( gather_completed_checker_.all_true () )
897897 {
@@ -916,7 +916,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
916916 // all data it is responsible for to buffers. Now combine with information on whether other ranks
917917 // have sent all their data. Note: All threads will return the same value for distribute_completed.
918918 const bool distribute_completed = distribute_target_data_buffers_ ( tid );
919- gather_completed_checker_[ tid ] .logical_and ( distribute_completed );
919+ gather_completed_checker_.logical_and ( tid, distribute_completed );
920920
921921 // resize mpi buffers, if necessary and allowed
922922 if ( gather_completed_checker_.any_false () and kernel ().mpi_manager .adaptive_target_buffers () )
0 commit comments