Skip to content

Commit 8d06783

Browse files
author
Mike McLaughlin
authored
Fix VS div-by-0 in DacEnumerableHashTable code (#112542)
* Fix VS div-by-0 in DacEnumerableHashTable code * Code review feedback
1 parent da6c473 commit 8d06783

File tree

1 file changed

+37
-32
lines changed

1 file changed

+37
-32
lines changed

src/coreclr/vm/dacenumerablehash.inl

Lines changed: 37 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -345,45 +345,50 @@ DPTR(VALUE) DacEnumerableHashTable<DAC_ENUM_HASH_ARGS>::BaseFindFirstEntryByHash
345345
do
346346
{
347347
DWORD cBuckets = GetLength(curBuckets);
348+
// DAC hardening for invalid process state
349+
#ifdef DACCESS_COMPILE
350+
if (cBuckets > 0)
351+
#endif
352+
{
353+
// Compute which bucket the entry belongs in based on the hash.
354+
// +2 to skip "length" and "next" slots
355+
DWORD dwBucket = iHash % cBuckets + SKIP_SPECIAL_SLOTS;
348356

349-
// Compute which bucket the entry belongs in based on the hash.
350-
// +2 to skip "length" and "next" slots
351-
DWORD dwBucket = iHash % cBuckets + SKIP_SPECIAL_SLOTS;
352-
353-
// Point at the first entry in the bucket chain that stores entries with the given hash code.
354-
PTR_VolatileEntry pEntry = VolatileLoadWithoutBarrier(&curBuckets[dwBucket]);
355-
TADDR expectedEndSentinel = ComputeEndSentinel(BaseEndSentinel(curBuckets), dwBucket);
357+
// Point at the first entry in the bucket chain that stores entries with the given hash code.
358+
PTR_VolatileEntry pEntry = VolatileLoadWithoutBarrier(&curBuckets[dwBucket]);
359+
TADDR expectedEndSentinel = ComputeEndSentinel(BaseEndSentinel(curBuckets), dwBucket);
356360

357-
// Walk the bucket chain one entry at a time.
358-
while (!IsEndSentinel(pEntry))
359-
{
360-
if (pEntry->m_iHashValue == iHash)
361+
// Walk the bucket chain one entry at a time.
362+
while (!IsEndSentinel(pEntry))
361363
{
362-
// We've found our match.
364+
if (pEntry->m_iHashValue == iHash)
365+
{
366+
// We've found our match.
363367

364-
// Record our current search state into the provided context so that a subsequent call to
365-
// BaseFindNextEntryByHash can pick up the search where it left off.
366-
pContext->m_pEntry = dac_cast<TADDR>(pEntry);
367-
pContext->m_curBuckets = curBuckets;
368-
pContext->m_expectedEndSentinel = dac_cast<TADDR>(expectedEndSentinel);
368+
// Record our current search state into the provided context so that a subsequent call to
369+
// BaseFindNextEntryByHash can pick up the search where it left off.
370+
pContext->m_pEntry = dac_cast<TADDR>(pEntry);
371+
pContext->m_curBuckets = curBuckets;
372+
pContext->m_expectedEndSentinel = dac_cast<TADDR>(expectedEndSentinel);
369373

370-
// Return the address of the sub-classes' embedded entry structure.
371-
return VALUE_FROM_VOLATILE_ENTRY(pEntry);
372-
}
374+
// Return the address of the sub-classes' embedded entry structure.
375+
return VALUE_FROM_VOLATILE_ENTRY(pEntry);
376+
}
373377

374-
// Move to the next entry in the chain.
375-
pEntry = VolatileLoadWithoutBarrier(&pEntry->m_pNextEntry);
376-
}
378+
// Move to the next entry in the chain.
379+
pEntry = VolatileLoadWithoutBarrier(&pEntry->m_pNextEntry);
380+
}
377381

378-
if (!AcceptableEndSentinel(pEntry, expectedEndSentinel))
379-
{
380-
// If we hit this logic, we've managed to hit a case where the linked list was in the process of being
381-
// moved to a new set of buckets while we were walking the list, and we walked part of the list of the
382-
// bucket in the old hash table (which is fine), and part of the list in the new table, which may not
383-
// be the correct bucket to walk. Most notably, the situation that can cause this will cause the list in
384-
// the old bucket to be missing items. Restart the lookup, as the linked list is unlikely to still be under
385-
// edit a second time.
386-
continue;
382+
if (!AcceptableEndSentinel(pEntry, expectedEndSentinel))
383+
{
384+
// If we hit this logic, we've managed to hit a case where the linked list was in the process of being
385+
// moved to a new set of buckets while we were walking the list, and we walked part of the list of the
386+
// bucket in the old hash table (which is fine), and part of the list in the new table, which may not
387+
// be the correct bucket to walk. Most notably, the situation that can cause this will cause the list in
388+
// the old bucket to be missing items. Restart the lookup, as the linked list is unlikely to still be under
389+
// edit a second time.
390+
continue;
391+
}
387392
}
388393

389394
// in a rare case if resize is in progress, look in the new table as well.

0 commit comments

Comments
 (0)