@@ -227,7 +227,7 @@ static void sigdie_handler(int sig, siginfo_t *info, void *context)
227
227
uv_tty_reset_mode ();
228
228
if (sig == SIGILL )
229
229
jl_show_sigill (context );
230
- jl_critical_error (sig , jl_to_bt_context (context ));
230
+ jl_critical_error (sig , jl_to_bt_context (context ), jl_get_current_task () );
231
231
if (sig != SIGSEGV &&
232
232
sig != SIGBUS &&
233
233
sig != SIGILL ) {
@@ -406,7 +406,7 @@ CFI_NORETURN
406
406
// (unavoidable due to its async nature).
407
407
// Try harder to exit each time if we get multiple exit requests.
408
408
if (thread0_exit_count <= 1 ) {
409
- jl_critical_error (thread0_exit_state - 128 , NULL );
409
+ jl_critical_error (thread0_exit_state - 128 , NULL , jl_current_task );
410
410
jl_exit (thread0_exit_state );
411
411
}
412
412
else if (thread0_exit_count == 2 ) {
@@ -743,68 +743,68 @@ static void *signal_listener(void *arg)
743
743
unw_context_t * signal_context ;
744
744
// sample each thread, round-robin style in reverse order
745
745
// (so that thread zero gets notified last)
746
- if (critical || profile )
746
+ if (critical || profile ) {
747
747
jl_lock_profile ();
748
- for (int i = jl_n_threads ; i -- > 0 ; ) {
749
- // notify thread to stop
750
- jl_thread_suspend_and_get_state (i , & signal_context );
751
-
752
- // do backtrace on thread contexts for critical signals
753
- // this part must be signal-handler safe
754
- if (critical ) {
755
- bt_size += rec_backtrace_ctx (bt_data + bt_size ,
756
- JL_MAX_BT_SIZE / jl_n_threads - 1 ,
757
- signal_context , NULL );
758
- bt_data [bt_size ++ ].uintptr = 0 ;
759
- }
760
-
761
- // do backtrace for profiler
762
- if (profile && running ) {
763
- if (jl_profile_is_buffer_full ()) {
764
- // Buffer full: Delete the timer
765
- jl_profile_stop_timer ();
748
+ for (int i = jl_n_threads ; i -- > 0 ; ) {
749
+ // notify thread to stop
750
+ jl_thread_suspend_and_get_state (i , & signal_context );
751
+
752
+ // do backtrace on thread contexts for critical signals
753
+ // this part must be signal-handler safe
754
+ if (critical ) {
755
+ bt_size += rec_backtrace_ctx (bt_data + bt_size ,
756
+ JL_MAX_BT_SIZE / jl_n_threads - 1 ,
757
+ signal_context , NULL );
758
+ bt_data [bt_size ++ ].uintptr = 0 ;
766
759
}
767
- else {
768
- // unwinding can fail, so keep track of the current state
769
- // and restore from the SEGV handler if anything happens.
770
- jl_jmp_buf * old_buf = jl_get_safe_restore ();
771
- jl_jmp_buf buf ;
772
-
773
- jl_set_safe_restore (& buf );
774
- if (jl_setjmp (buf , 0 )) {
775
- jl_safe_printf ("WARNING: profiler attempt to access an invalid memory location\n" );
776
- } else {
777
- // Get backtrace data
778
- bt_size_cur += rec_backtrace_ctx ((jl_bt_element_t * )bt_data_prof + bt_size_cur ,
779
- bt_size_max - bt_size_cur - 1 , signal_context , NULL );
780
- }
781
- jl_set_safe_restore (old_buf );
782
-
783
- jl_ptls_t ptls = jl_all_tls_states [i ];
784
-
785
- // store threadid but add 1 as 0 is preserved to indicate end of block
786
- bt_data_prof [bt_size_cur ++ ].uintptr = ptls -> tid + 1 ;
787
-
788
- // store task id
789
- bt_data_prof [bt_size_cur ++ ].jlvalue = (jl_value_t * )ptls -> current_task ;
790
760
791
- // store cpu cycle clock
792
- bt_data_prof [bt_size_cur ++ ].uintptr = cycleclock ();
793
-
794
- // store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block
795
- bt_data_prof [bt_size_cur ++ ].uintptr = ptls -> sleep_check_state + 1 ;
761
+ // do backtrace for profiler
762
+ if (profile && running ) {
763
+ if (jl_profile_is_buffer_full ()) {
764
+ // Buffer full: Delete the timer
765
+ jl_profile_stop_timer ();
766
+ }
767
+ else {
768
+ // unwinding can fail, so keep track of the current state
769
+ // and restore from the SEGV handler if anything happens.
770
+ jl_jmp_buf * old_buf = jl_get_safe_restore ();
771
+ jl_jmp_buf buf ;
772
+
773
+ jl_set_safe_restore (& buf );
774
+ if (jl_setjmp (buf , 0 )) {
775
+ jl_safe_printf ("WARNING: profiler attempt to access an invalid memory location\n" );
776
+ } else {
777
+ // Get backtrace data
778
+ bt_size_cur += rec_backtrace_ctx ((jl_bt_element_t * )bt_data_prof + bt_size_cur ,
779
+ bt_size_max - bt_size_cur - 1 , signal_context , NULL );
780
+ }
781
+ jl_set_safe_restore (old_buf );
782
+
783
+ jl_ptls_t ptls = jl_all_tls_states [i ];
784
+
785
+ // store threadid but add 1 as 0 is preserved to indicate end of block
786
+ bt_data_prof [bt_size_cur ++ ].uintptr = ptls -> tid + 1 ;
787
+
788
+ // store task id
789
+ bt_data_prof [bt_size_cur ++ ].jlvalue = (jl_value_t * )ptls -> current_task ;
790
+
791
+ // store cpu cycle clock
792
+ bt_data_prof [bt_size_cur ++ ].uintptr = cycleclock ();
793
+
794
+ // store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block
795
+ bt_data_prof [bt_size_cur ++ ].uintptr = ptls -> sleep_check_state + 1 ;
796
+
797
+ // Mark the end of this block with two 0's
798
+ bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
799
+ bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
800
+ }
796
801
797
- // Mark the end of this block with two 0's
798
- bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
799
- bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
802
+ // notify thread to resume
803
+ jl_thread_resume (i , sig );
800
804
}
801
805
}
802
-
803
- // notify thread to resume
804
- jl_thread_resume (i , sig );
805
- }
806
- if (critical || profile )
807
806
jl_unlock_profile ();
807
+ }
808
808
#ifndef HAVE_MACH
809
809
if (profile && running ) {
810
810
#if defined(HAVE_TIMER )
0 commit comments