Skip to content
This repository was archived by the owner on Jan 23, 2023. It is now read-only.

Delete PlatformHelper class #42230

Merged
merged 1 commit into from
Oct 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
<Compile Include="System\Collections\Concurrent\OrderablePartitioner.cs" />
<Compile Include="System\Collections\Concurrent\Partitioner.cs" />
<Compile Include="System\Collections\Concurrent\PartitionerStatic.cs" />
<Compile Include="System\Collections\Concurrent\PlatformHelper.cs" />
<Compile Include="$(CommonPath)\CoreLib\System\Collections\Concurrent\IProducerConsumerCollectionDebugView.cs">
<Link>Common\CoreLib\System\Collections\Concurrent\IProducerConsumerCollectionDebugView.cs</Link>
</Compile>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1906,10 +1906,7 @@ private static void GetBucketAndLockNo(int hashcode, out int bucketNo, out int l
/// <summary>
/// The number of concurrent writes for which to optimize by default.
/// </summary>
private static int DefaultConcurrencyLevel
{
get { return PlatformHelper.ProcessorCount; }
}
private static int DefaultConcurrencyLevel => Environment.ProcessorCount;

/// <summary>
/// Acquires all locks for this hash table, and increments locksAcquired by the number
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,7 @@ public static OrderablePartitioner<Tuple<long, long>> Create(long fromInclusive,
{
if (toExclusive <= fromInclusive) throw new ArgumentOutOfRangeException(nameof(toExclusive));
decimal range = (decimal)toExclusive - fromInclusive;
long rangeSize = (long)(range /
(PlatformHelper.ProcessorCount * CoreOversubscriptionRate));
long rangeSize = (long)(range / (Environment.ProcessorCount * CoreOversubscriptionRate));
if (rangeSize == 0) rangeSize = 1;
return Partitioner.Create(CreateRanges(fromInclusive, toExclusive, rangeSize), EnumerablePartitionerOptions.NoBuffering); // chunk one range at a time
}
Expand Down Expand Up @@ -246,8 +245,7 @@ public static OrderablePartitioner<Tuple<int, int>> Create(int fromInclusive, in
{
if (toExclusive <= fromInclusive) throw new ArgumentOutOfRangeException(nameof(toExclusive));
long range = (long)toExclusive - fromInclusive;
int rangeSize = (int)(range /
(PlatformHelper.ProcessorCount * CoreOversubscriptionRate));
int rangeSize = (int)(range / (Environment.ProcessorCount * CoreOversubscriptionRate));
if (rangeSize == 0) rangeSize = 1;
return Partitioner.Create(CreateRanges(fromInclusive, toExclusive, rangeSize), EnumerablePartitionerOptions.NoBuffering); // chunk one range at a time
}
Expand Down Expand Up @@ -591,7 +589,7 @@ internal InternalPartitionEnumerable(IEnumerator<TSource> sharedReader, bool use
{
// Time to allocate the fill buffer which is used to reduce the contention on the shared lock.
// First pick the buffer size multiplier. We use 4 for when there are more than 4 cores, and just 1 for below. This is based on empirical evidence.
int fillBufferMultiplier = (PlatformHelper.ProcessorCount > 4) ? 4 : 1;
int fillBufferMultiplier = (Environment.ProcessorCount > 4) ? 4 : 1;

// and allocate the fill buffer using these two numbers
_fillBuffer = new KeyValuePair<long, TSource>[fillBufferMultiplier * Partitioner.GetDefaultChunkSize<TSource>()];
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<Compile Include="System\Threading\PlatformHelper.cs" />
<Compile Include="System\Threading\Tasks\Parallel.cs" />
<Compile Include="System\Threading\Tasks\ParallelETWProvider.cs" />
<Compile Include="System\Threading\Tasks\ParallelLoopState.cs" />
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -1040,7 +1040,7 @@ private static ParallelLoopResult ForWorker<TLocal>(

// initialize ranges with passed in loop arguments and expected number of workers
int numExpectedWorkers = (parallelOptions.EffectiveMaxConcurrencyLevel == -1) ?
PlatformHelper.ProcessorCount :
Environment.ProcessorCount :
parallelOptions.EffectiveMaxConcurrencyLevel;
RangeManager rangeManager = new RangeManager(fromInclusive, toExclusive, 1, numExpectedWorkers);

Expand Down Expand Up @@ -1302,7 +1302,7 @@ private static ParallelLoopResult ForWorker64<TLocal>(

// initialize ranges with passed in loop arguments and expected number of workers
int numExpectedWorkers = (parallelOptions.EffectiveMaxConcurrencyLevel == -1) ?
PlatformHelper.ProcessorCount :
Environment.ProcessorCount :
parallelOptions.EffectiveMaxConcurrencyLevel;
RangeManager rangeManager = new RangeManager(fromInclusive, toExclusive, 1, numExpectedWorkers);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ private static int GenerateCooperativeMultitaskingTaskTimeout()
{
// This logic ensures that we have a diversity of timeouts across worker tasks (100, 150, 200, 250, 100, etc)
// Otherwise all worker will try to timeout at precisely the same point, which is bad if the work is just about to finish.
int period = PlatformHelper.ProcessorCount;
int period = Environment.ProcessorCount;
int pseudoRnd = Environment.TickCount;
return CooperativeMultitaskingTaskTimeout_Min + (pseudoRnd % period) * CooperativeMultitaskingTaskTimeout_Increment;
}
Expand Down