@@ -17,19 +17,23 @@ static inline void reset_bitfields(tw_event *revent)
17
17
memset (& revent -> cv , 0 , sizeof (revent -> cv ));
18
18
}
19
19
20
- // To be used in `tw_sched_event_q`, `tw_scheduler_sequential`, `tw_sched_batch`, `tw_scheduler_optimistic`, and `tw_scheduler_optimistic_realtime`
20
+ // To be used instead of littering the file with ifdef's all over. If this grows
21
+ // far too large, there might be a need to rethink how to implement the
22
+ // tie-breaker mechanism so that it can be deactivated
21
23
#ifdef USE_RAND_TIEBREAKER
22
24
#define PQ_MINUMUM (pe ) tw_pq_minimum_sig(pe->pq).recv_ts
23
25
#define CMP_KP_TO_EVENT_TIME (kp , e ) tw_event_sig_compare(kp->last_sig, e->sig)
24
26
#define CMP_GVT_HOOK_TO_NEXT_IN_QUEUE (pe ) tw_event_sig_compare(g_tw_trigger_gvt_hook.sig_at, tw_pq_minimum_sig(pe->pq))
25
27
#define TRIGGER_ROLLBACK_TO_EVENT_TIME (kp , e ) tw_kp_rollback_to_sig(kp, e->sig)
26
28
#define STIME_FROM_PE (pe ) TW_STIME_DBL(pe->GVT_sig.recv_ts)
29
+ #define STIME_FROM_KP (kp ) TW_STIME_DBL(kp->last_sig.recv_ts)
27
30
#else
28
31
#define PQ_MINUMUM (pe ) tw_pq_minimum(pe->pq)
29
32
#define CMP_KP_TO_EVENT_TIME (kp , e ) TW_STIME_CMP(kp->last_time, e->recv_ts)
30
33
#define CMP_GVT_HOOK_TO_NEXT_IN_QUEUE (pe ) (g_tw_trigger_gvt_hook.at - tw_pq_minimum(pe->pq))
31
34
#define TRIGGER_ROLLBACK_TO_EVENT_TIME (kp , e ) tw_kp_rollback_to(kp, e->recv_ts);
32
35
#define STIME_FROM_PE (pe ) TW_STIME_DBL(pe->GVT)
36
+ #define STIME_FROM_KP (kp ) TW_STIME_DBL(kp->last_time)
33
37
#endif
34
38
35
39
/**
@@ -556,7 +560,7 @@ void tw_scheduler_sequential(tw_pe * me) {
556
560
tw_clock const event_start = tw_clock_read ();
557
561
(* clp -> type -> event )(clp -> cur_state , & cev -> cv , tw_event_data (cev ), clp );
558
562
if (g_st_ev_trace == FULL_TRACE )
559
- st_collect_event_data (cev , tw_clock_read () / g_tw_clock_rate );
563
+ st_collect_event_data (cev , ( double ) tw_clock_read () / g_tw_clock_rate );
560
564
if (* clp -> type -> commit ) {
561
565
(* clp -> type -> commit )(clp -> cur_state , & cev -> cv , tw_event_data (cev ), clp );
562
566
}
@@ -617,12 +621,9 @@ void tw_scheduler_conservative(tw_pe * me) {
617
621
tw_sched_event_q (me );
618
622
tw_gvt_step2 (me );
619
623
620
- #ifdef USE_RAND_TIEBREAKER
621
- if (TW_STIME_DBL (me -> GVT_sig .recv_ts ) > g_tw_ts_end )
622
- #else
623
- if (TW_STIME_DBL (me -> GVT ) > g_tw_ts_end )
624
- #endif
624
+ if (STIME_FROM_PE (me ) > g_tw_ts_end ) {
625
625
break ;
626
+ }
626
627
627
628
// put "batch" loop directly here
628
629
/* Process g_tw_mblock events, or until the PQ is empty
@@ -641,12 +642,9 @@ void tw_scheduler_conservative(tw_pe * me) {
641
642
break ;
642
643
}
643
644
644
- #ifdef USE_RAND_TIEBREAKER
645
- if (TW_STIME_DBL (tw_pq_minimum_sig (me -> pq ).recv_ts ) >= TW_STIME_DBL (me -> GVT_sig .recv_ts ) + g_tw_lookahead )
646
- #else
647
- if (TW_STIME_DBL (tw_pq_minimum (me -> pq )) >= TW_STIME_DBL (me -> GVT ) + g_tw_lookahead )
648
- #endif
645
+ if (TW_STIME_DBL (PQ_MINUMUM (me )) >= STIME_FROM_PE (me ) + g_tw_lookahead ) {
649
646
break ;
647
+ }
650
648
651
649
start = tw_clock_read ();
652
650
if (!(cev = tw_pq_dequeue (me -> pq ))) {
@@ -665,21 +663,15 @@ void tw_scheduler_conservative(tw_pe * me) {
665
663
ckp = clp -> kp ;
666
664
me -> cur_event = cev ;
667
665
668
- #ifdef USE_RAND_TIEBREAKER
669
- if (tw_event_sig_compare (ckp -> last_sig , cev -> sig ) > 0 ) {
666
+ if (CMP_KP_TO_EVENT_TIME (ckp , cev ) > 0 ) {
670
667
tw_error (TW_LOC , "Found KP last time %lf > current event time %lf for LP %d, PE %lu"
671
668
"src LP %lu, src PE %lu" ,
672
- ckp -> last_sig . recv_ts , cev -> recv_ts , clp -> gid , clp -> pe -> id ,
669
+ STIME_FROM_KP ( ckp ) , cev -> recv_ts , clp -> gid , clp -> pe -> id ,
673
670
cev -> send_lp , cev -> send_pe );
674
671
}
672
+ #ifdef USE_RAND_TIEBREAKER
675
673
ckp -> last_sig = cev -> sig ;
676
674
#else
677
- if ( TW_STIME_CMP (ckp -> last_time , cev -> recv_ts ) > 0 ){
678
- tw_error (TW_LOC , "Found KP last time %lf > current event time %lf for LP %d, PE %lu"
679
- "src LP %lu, src PE %lu" ,
680
- ckp -> last_time , cev -> recv_ts , clp -> gid , clp -> pe -> id ,
681
- cev -> send_lp , cev -> send_pe );
682
- }
683
675
ckp -> last_time = cev -> recv_ts ;
684
676
#endif
685
677
@@ -1037,7 +1029,7 @@ void tw_scheduler_sequential_rollback_check(tw_pe * me) {
1037
1029
event_start = tw_clock_read ();
1038
1030
(* clp -> type -> event )(clp -> cur_state , & cev -> cv , tw_event_data (cev ), clp );
1039
1031
if (g_st_ev_trace == FULL_TRACE )
1040
- st_collect_event_data (cev , tw_clock_read () / g_tw_clock_rate );
1032
+ st_collect_event_data (cev , ( double ) tw_clock_read () / g_tw_clock_rate );
1041
1033
if (* clp -> type -> commit ) {
1042
1034
(* clp -> type -> commit )(clp -> cur_state , & cev -> cv , tw_event_data (cev ), clp );
1043
1035
}
0 commit comments