@@ -1295,7 +1295,7 @@ Thread::UserAbort(EEPolicy::ThreadAbortTypes abortType, DWORD timeout)
1295
1295
// The thread being aborted may clear the TS_AbortRequested bit and the matching increment
1296
1296
// of g_TrapReturningThreads behind our back. Increment g_TrapReturningThreads here
1297
1297
// to ensure that we stop for the stack crawl even if the TS_AbortRequested bit is cleared.
1298
- ThreadStore::TrapReturningThreadsIncrement ();
1298
+ ThreadStore::IncrementTrapReturningThreads ();
1299
1299
}
1300
1300
void NeedStackCrawl ()
1301
1301
{
@@ -1310,7 +1310,7 @@ Thread::UserAbort(EEPolicy::ThreadAbortTypes abortType, DWORD timeout)
1310
1310
if (m_NeedRelease)
1311
1311
{
1312
1312
m_NeedRelease = FALSE ;
1313
- ThreadStore::TrapReturningThreadsDecrement ();
1313
+ ThreadStore::DecrementTrapReturningThreads ();
1314
1314
ThreadStore::SetStackCrawlEvent ();
1315
1315
m_pThread->ResetThreadState (TS_StackCrawlNeeded);
1316
1316
if (!m_fHoldingThreadStoreLock)
@@ -1755,7 +1755,7 @@ void Thread::SetAbortRequestBit()
1755
1755
}
1756
1756
if (InterlockedCompareExchange ((LONG*)&m_State, curValue|TS_AbortRequested, curValue) == curValue)
1757
1757
{
1758
- ThreadStore::TrapReturningThreadsIncrement ();
1758
+ ThreadStore::IncrementTrapReturningThreads ();
1759
1759
1760
1760
break ;
1761
1761
}
@@ -1771,7 +1771,7 @@ void Thread::RemoveAbortRequestBit()
1771
1771
1772
1772
#ifdef _DEBUG
1773
1773
// There's a race between removing the TS_AbortRequested bit and decrementing g_TrapReturningThreads
1774
- // We may remove the bit, but before we have a chance to call ThreadStore::TrapReturningThreadsDecrement ()
1774
+ // We may remove the bit, but before we have a chance to call ThreadStore::DecrementTrapReturningThreads ()
1775
1775
// DbgFindThread() may execute, and find too few threads with the bit set.
1776
1776
// To ensure the assert in DbgFindThread does not fire under such a race we set the ChgInFlight before hand.
1777
1777
CounterHolder trtHolder (&g_trtChgInFlight);
@@ -1785,7 +1785,7 @@ void Thread::RemoveAbortRequestBit()
1785
1785
}
1786
1786
if (InterlockedCompareExchange ((LONG*)&m_State, curValue&(~TS_AbortRequested), curValue) == curValue)
1787
1787
{
1788
- ThreadStore::TrapReturningThreadsDecrement ();
1788
+ ThreadStore::DecrementTrapReturningThreads ();
1789
1789
1790
1790
break ;
1791
1791
}
@@ -2113,7 +2113,7 @@ void Thread::RareDisablePreemptiveGC()
2113
2113
if (ThreadStore::HoldingThreadStore (this ))
2114
2114
{
2115
2115
// In theory threads should not try entering coop mode while holding TS lock,
2116
- // but some scenarios like GCCoopHackNoThread end up here
2116
+ // but some scenarios like GCCoopHackNoThread and GCX_COOP_NO_THREAD_BROKEN end up here
2117
2117
goto Exit;
2118
2118
}
2119
2119
@@ -2143,7 +2143,7 @@ void Thread::RareDisablePreemptiveGC()
2143
2143
{
2144
2144
#ifdef DEBUGGING_SUPPORTED
2145
2145
// If debugger wants the thread to suspend, give the debugger precedence.
2146
- if ((m_State & TS_DebugSuspendPending) && !IsInForbidSuspendForDebuggerRegion ())
2146
+ if (HasThreadStateOpportunistic ( TS_DebugSuspendPending) && !IsInForbidSuspendForDebuggerRegion ())
2147
2147
{
2148
2148
EnablePreemptiveGC ();
2149
2149
@@ -2214,7 +2214,7 @@ void Thread::RareDisablePreemptiveGC()
2214
2214
continue ;
2215
2215
}
2216
2216
2217
- if (HasThreadState (TS_StackCrawlNeeded))
2217
+ if (HasThreadStateOpportunistic (TS_StackCrawlNeeded))
2218
2218
{
2219
2219
EnablePreemptiveGC ();
2220
2220
ThreadStore::WaitForStackCrawlEvent ();
@@ -2394,7 +2394,7 @@ void Thread::PulseGCMode()
2394
2394
// Indicate whether threads should be trapped when returning to the EE (i.e. disabling
2395
2395
// preemptive GC mode)
2396
2396
Volatile<LONG> g_fTrapReturningThreadsLock;
2397
- void ThreadStore::TrapReturningThreadsIncrement ()
2397
+ void ThreadStore::IncrementTrapReturningThreads ()
2398
2398
{
2399
2399
CONTRACTL {
2400
2400
NOTHROW;
@@ -2432,7 +2432,7 @@ void ThreadStore::TrapReturningThreadsIncrement()
2432
2432
g_fTrapReturningThreadsLock = 0 ;
2433
2433
}
2434
2434
2435
- void ThreadStore::TrapReturningThreadsDecrement ()
2435
+ void ThreadStore::DecrementTrapReturningThreads ()
2436
2436
{
2437
2437
CONTRACTL {
2438
2438
NOTHROW;
@@ -3210,14 +3210,14 @@ COR_PRF_SUSPEND_REASON GCSuspendReasonToProfSuspendReason(ThreadSuspend::SUSPEND
3210
3210
}
3211
3211
#endif // PROFILING_SUPPORTED
3212
3212
3213
- int64_t QueryPerformanceCounter ()
3213
+ static int64_t QueryPerformanceCounter ()
3214
3214
{
3215
3215
LARGE_INTEGER ts;
3216
3216
QueryPerformanceCounter (&ts);
3217
3217
return ts.QuadPart ;
3218
3218
}
3219
3219
3220
- int64_t QueryPerformanceFrequency ()
3220
+ static int64_t QueryPerformanceFrequency ()
3221
3221
{
3222
3222
LARGE_INTEGER ts;
3223
3223
QueryPerformanceFrequency (&ts);
@@ -3596,8 +3596,6 @@ void EnableStressHeapHelper()
3596
3596
}
3597
3597
#endif
3598
3598
3599
- // We're done with our GC. Let all the threads run again.
3600
- // By this point we've already unblocked most threads. This just releases the ThreadStore lock.
3601
3599
void ThreadSuspend::ResumeAllThreads (BOOL SuspendSucceeded)
3602
3600
{
3603
3601
CONTRACTL {
@@ -5359,7 +5357,7 @@ void Thread::MarkForSuspension(ULONG bit)
5359
5357
_ASSERTE ((m_State & bit) == 0 );
5360
5358
5361
5359
InterlockedOr ((LONG*)&m_State, bit);
5362
- ThreadStore::TrapReturningThreadsIncrement ();
5360
+ ThreadStore::IncrementTrapReturningThreads ();
5363
5361
}
5364
5362
5365
5363
void Thread::UnmarkForSuspension (ULONG mask)
@@ -5378,7 +5376,7 @@ void Thread::UnmarkForSuspension(ULONG mask)
5378
5376
_ASSERTE ((m_State & ~mask) != 0 );
5379
5377
5380
5378
// we decrement the global first to be able to satisfy the assert from DbgFindThread
5381
- ThreadStore::TrapReturningThreadsDecrement ();
5379
+ ThreadStore::DecrementTrapReturningThreads ();
5382
5380
InterlockedAnd ((LONG*)&m_State, mask);
5383
5381
}
5384
5382
0 commit comments