Skip to content

Commit c32bf0c

Browse files
authored
chore: Simplify CometShuffleMemoryAllocator to use Spark unified memory allocator (apache#1063)
1 parent 9657b75 commit c32bf0c

File tree

10 files changed

+336
-148
lines changed

10 files changed

+336
-148
lines changed

common/src/main/scala/org/apache/comet/CometConf.scala

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -322,8 +322,10 @@ object CometConf extends ShimCometConf {
322322

323323
val COMET_COLUMNAR_SHUFFLE_MEMORY_SIZE: OptionalConfigEntry[Long] =
324324
conf("spark.comet.columnar.shuffle.memorySize")
325+
.internal()
325326
.doc(
326-
"The optional maximum size of the memory used for Comet columnar shuffle, in MiB. " +
327+
"Test-only config. This is only used to test Comet shuffle with Spark tests. " +
328+
"The optional maximum size of the memory used for Comet columnar shuffle, in MiB. " +
327329
"Note that this config is only used when `spark.comet.exec.shuffle.mode` is " +
328330
"`jvm`. Once allocated memory size reaches this config, the current batch will be " +
329331
"flushed to disk immediately. If this is not configured, Comet will use " +
@@ -335,8 +337,10 @@ object CometConf extends ShimCometConf {
335337

336338
val COMET_COLUMNAR_SHUFFLE_MEMORY_FACTOR: ConfigEntry[Double] =
337339
conf("spark.comet.columnar.shuffle.memory.factor")
340+
.internal()
338341
.doc(
339-
"Fraction of Comet memory to be allocated per executor process for Comet shuffle. " +
342+
"Test-only config. This is only used to test Comet shuffle with Spark tests. " +
343+
"Fraction of Comet memory to be allocated per executor process for Comet shuffle. " +
340344
"Comet memory size is specified by `spark.comet.memoryOverhead` or " +
341345
"calculated by `spark.comet.memory.overhead.factor` * `spark.executor.memory`.")
342346
.doubleConf
@@ -345,6 +349,17 @@ object CometConf extends ShimCometConf {
345349
"Ensure that Comet shuffle memory overhead factor is a double greater than 0")
346350
.createWithDefault(1.0)
347351

352+
val COMET_COLUMNAR_SHUFFLE_UNIFIED_MEMORY_ALLOCATOR_IN_TEST: ConfigEntry[Boolean] =
353+
conf("spark.comet.columnar.shuffle.unifiedMemoryAllocatorTest")
354+
.doc("Whether to use Spark unified memory allocator for Comet columnar shuffle in tests." +
355+
"If not configured, Comet will use a test-only memory allocator for Comet columnar " +
356+
"shuffle when Spark test env detected. The test-ony allocator is proposed to run with " +
357+
"Spark tests as these tests require on-heap memory configuration. " +
358+
"By default, this config is false.")
359+
.internal()
360+
.booleanConf
361+
.createWithDefault(false)
362+
348363
val COMET_COLUMNAR_SHUFFLE_BATCH_SIZE: ConfigEntry[Int] =
349364
conf("spark.comet.columnar.shuffle.batch.size")
350365
.internal()

docs/source/user-guide/configs.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ Comet provides the following configuration settings.
2929
| spark.comet.columnar.shuffle.async.enabled | Whether to enable asynchronous shuffle for Arrow-based shuffle. | false |
3030
| spark.comet.columnar.shuffle.async.max.thread.num | Maximum number of threads on an executor used for Comet async columnar shuffle. This is the upper bound of total number of shuffle threads per executor. In other words, if the number of cores * the number of shuffle threads per task `spark.comet.columnar.shuffle.async.thread.num` is larger than this config. Comet will use this config as the number of shuffle threads per executor instead. | 100 |
3131
| spark.comet.columnar.shuffle.async.thread.num | Number of threads used for Comet async columnar shuffle per shuffle task. Note that more threads means more memory requirement to buffer shuffle data before flushing to disk. Also, more threads may not always improve performance, and should be set based on the number of cores available. | 3 |
32-
| spark.comet.columnar.shuffle.memory.factor | Fraction of Comet memory to be allocated per executor process for Comet shuffle. Comet memory size is specified by `spark.comet.memoryOverhead` or calculated by `spark.comet.memory.overhead.factor` * `spark.executor.memory`. | 1.0 |
3332
| spark.comet.convert.csv.enabled | When enabled, data from Spark (non-native) CSV v1 and v2 scans will be converted to Arrow format. Note that to enable native vectorized execution, both this config and 'spark.comet.exec.enabled' need to be enabled. | false |
3433
| spark.comet.convert.json.enabled | When enabled, data from Spark (non-native) JSON v1 and v2 scans will be converted to Arrow format. Note that to enable native vectorized execution, both this config and 'spark.comet.exec.enabled' need to be enabled. | false |
3534
| spark.comet.convert.parquet.enabled | When enabled, data from Spark (non-native) Parquet v1 and v2 scans will be converted to Arrow format. Note that to enable native vectorized execution, both this config and 'spark.comet.exec.enabled' need to be enabled. | false |

spark/src/main/java/org/apache/spark/shuffle/comet/CometShuffleMemoryAllocator.java

Lines changed: 45 additions & 137 deletions
Original file line numberDiff line numberDiff line change
@@ -20,179 +20,87 @@
2020
package org.apache.spark.shuffle.comet;
2121

2222
import java.io.IOException;
23-
import java.util.BitSet;
2423

2524
import org.apache.spark.SparkConf;
2625
import org.apache.spark.memory.MemoryConsumer;
2726
import org.apache.spark.memory.MemoryMode;
28-
import org.apache.spark.memory.SparkOutOfMemoryError;
2927
import org.apache.spark.memory.TaskMemoryManager;
30-
import org.apache.spark.sql.internal.SQLConf;
31-
import org.apache.spark.unsafe.array.LongArray;
3228
import org.apache.spark.unsafe.memory.MemoryBlock;
33-
import org.apache.spark.unsafe.memory.UnsafeMemoryAllocator;
29+
import org.apache.spark.util.Utils;
3430

35-
import org.apache.comet.CometSparkSessionExtensions$;
31+
import org.apache.comet.CometConf$;
3632

3733
/**
3834
* A simple memory allocator used by `CometShuffleExternalSorter` to allocate memory blocks which
39-
* store serialized rows. We don't rely on Spark memory allocator because we need to allocate
40-
* off-heap memory no matter memory mode is on-heap or off-heap. This allocator is configured with
41-
* fixed size of memory, and it will throw `SparkOutOfMemoryError` if the memory is not enough.
42-
*
43-
* <p>Some methods are copied from `org.apache.spark.unsafe.memory.TaskMemoryManager` with
44-
* modifications. Most modifications are to remove the dependency on the configured memory mode.
35+
* store serialized rows. This class is simply an implementation of `MemoryConsumer` that delegates
36+
* memory allocation to the `TaskMemoryManager`. This requires that the `TaskMemoryManager` is
37+
* configured with `MemoryMode.OFF_HEAP`, i.e. it is using off-heap memory.
4538
*/
46-
public final class CometShuffleMemoryAllocator extends MemoryConsumer {
47-
private final UnsafeMemoryAllocator allocator = new UnsafeMemoryAllocator();
48-
49-
private final long pageSize;
50-
private final long totalMemory;
51-
private long allocatedMemory = 0L;
52-
53-
/** The number of bits used to address the page table. */
54-
private static final int PAGE_NUMBER_BITS = 13;
55-
56-
/** The number of entries in the page table. */
57-
private static final int PAGE_TABLE_SIZE = 1 << PAGE_NUMBER_BITS;
58-
59-
private final MemoryBlock[] pageTable = new MemoryBlock[PAGE_TABLE_SIZE];
60-
private final BitSet allocatedPages = new BitSet(PAGE_TABLE_SIZE);
39+
public final class CometShuffleMemoryAllocator extends CometShuffleMemoryAllocatorTrait {
40+
private static CometShuffleMemoryAllocatorTrait INSTANCE;
6141

62-
private static final int OFFSET_BITS = 51;
63-
private static final long MASK_LONG_LOWER_51_BITS = 0x7FFFFFFFFFFFFL;
64-
65-
private static CometShuffleMemoryAllocator INSTANCE;
66-
67-
public static synchronized CometShuffleMemoryAllocator getInstance(
42+
/**
43+
* Returns the singleton instance of `CometShuffleMemoryAllocator`. This method should be used
44+
* instead of the constructor to ensure that only one instance of `CometShuffleMemoryAllocator` is
45+
* created. For Spark tests, this returns `CometTestShuffleMemoryAllocator` which is a test-only
46+
* allocator that should not be used in production.
47+
*/
48+
public static CometShuffleMemoryAllocatorTrait getInstance(
6849
SparkConf conf, TaskMemoryManager taskMemoryManager, long pageSize) {
69-
if (INSTANCE == null) {
70-
INSTANCE = new CometShuffleMemoryAllocator(conf, taskMemoryManager, pageSize);
50+
boolean isSparkTesting = Utils.isTesting();
51+
boolean useUnifiedMemAllocator =
52+
(boolean)
53+
CometConf$.MODULE$.COMET_COLUMNAR_SHUFFLE_UNIFIED_MEMORY_ALLOCATOR_IN_TEST().get();
54+
55+
if (isSparkTesting && !useUnifiedMemAllocator) {
56+
synchronized (CometShuffleMemoryAllocator.class) {
57+
if (INSTANCE == null) {
58+
// CometTestShuffleMemoryAllocator handles pages by itself so it can be a singleton.
59+
INSTANCE = new CometTestShuffleMemoryAllocator(conf, taskMemoryManager, pageSize);
60+
}
61+
}
62+
return INSTANCE;
63+
} else {
64+
if (taskMemoryManager.getTungstenMemoryMode() != MemoryMode.OFF_HEAP) {
65+
throw new IllegalArgumentException(
66+
"CometShuffleMemoryAllocator should be used with off-heap "
67+
+ "memory mode, but got "
68+
+ taskMemoryManager.getTungstenMemoryMode());
69+
}
70+
71+
// CometShuffleMemoryAllocator stores pages in TaskMemoryManager which is not singleton,
72+
// but one instance per task. So we need to create a new instance for each task.
73+
return new CometShuffleMemoryAllocator(taskMemoryManager, pageSize);
7174
}
72-
73-
return INSTANCE;
7475
}
7576

76-
CometShuffleMemoryAllocator(SparkConf conf, TaskMemoryManager taskMemoryManager, long pageSize) {
77+
CometShuffleMemoryAllocator(TaskMemoryManager taskMemoryManager, long pageSize) {
7778
super(taskMemoryManager, pageSize, MemoryMode.OFF_HEAP);
78-
this.pageSize = pageSize;
79-
this.totalMemory =
80-
CometSparkSessionExtensions$.MODULE$.getCometShuffleMemorySize(conf, SQLConf.get());
81-
}
82-
83-
public synchronized long acquireMemory(long size) {
84-
if (allocatedMemory >= totalMemory) {
85-
throw new SparkOutOfMemoryError(
86-
"Unable to acquire "
87-
+ size
88-
+ " bytes of memory, current usage "
89-
+ "is "
90-
+ allocatedMemory
91-
+ " bytes and max memory is "
92-
+ totalMemory
93-
+ " bytes");
94-
}
95-
long allocationSize = Math.min(size, totalMemory - allocatedMemory);
96-
allocatedMemory += allocationSize;
97-
return allocationSize;
9879
}
9980

10081
public long spill(long l, MemoryConsumer memoryConsumer) throws IOException {
82+
// JVM shuffle writer does not support spilling for other memory consumers
10183
return 0;
10284
}
10385

104-
public synchronized LongArray allocateArray(long size) {
105-
long required = size * 8L;
106-
MemoryBlock page = allocate(required);
107-
return new LongArray(page);
108-
}
109-
110-
public synchronized void freeArray(LongArray array) {
111-
if (array == null) {
112-
return;
113-
}
114-
free(array.memoryBlock());
115-
}
116-
117-
public synchronized MemoryBlock allocatePage(long required) {
118-
long size = Math.max(pageSize, required);
119-
return allocate(size);
120-
}
121-
122-
private synchronized MemoryBlock allocate(long required) {
123-
if (required > TaskMemoryManager.MAXIMUM_PAGE_SIZE_BYTES) {
124-
throw new TooLargePageException(required);
125-
}
126-
127-
long got = acquireMemory(required);
128-
129-
if (got < required) {
130-
allocatedMemory -= got;
131-
132-
throw new SparkOutOfMemoryError(
133-
"Unable to acquire "
134-
+ required
135-
+ " bytes of memory, got "
136-
+ got
137-
+ " bytes. Available: "
138-
+ (totalMemory - allocatedMemory));
139-
}
140-
141-
int pageNumber = allocatedPages.nextClearBit(0);
142-
if (pageNumber >= PAGE_TABLE_SIZE) {
143-
allocatedMemory -= got;
144-
145-
throw new IllegalStateException(
146-
"Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages");
147-
}
148-
149-
MemoryBlock block = allocator.allocate(got);
150-
151-
block.pageNumber = pageNumber;
152-
pageTable[pageNumber] = block;
153-
allocatedPages.set(pageNumber);
154-
155-
return block;
86+
public synchronized MemoryBlock allocate(long required) {
87+
return this.allocatePage(required);
15688
}
15789

15890
public synchronized void free(MemoryBlock block) {
159-
if (block.pageNumber == MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER) {
160-
// Already freed block
161-
return;
162-
}
163-
allocatedMemory -= block.size();
164-
165-
pageTable[block.pageNumber] = null;
166-
allocatedPages.clear(block.pageNumber);
167-
block.pageNumber = MemoryBlock.FREED_IN_TMM_PAGE_NUMBER;
168-
169-
allocator.free(block);
170-
}
171-
172-
public synchronized long getAvailableMemory() {
173-
return totalMemory - allocatedMemory;
91+
this.freePage(block);
17492
}
17593

17694
/**
17795
* Returns the offset in the page for the given page plus base offset address. Note that this
17896
* method assumes that the page number is valid.
17997
*/
18098
public long getOffsetInPage(long pagePlusOffsetAddress) {
181-
long offsetInPage = decodeOffset(pagePlusOffsetAddress);
182-
int pageNumber = TaskMemoryManager.decodePageNumber(pagePlusOffsetAddress);
183-
assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE);
184-
MemoryBlock page = pageTable[pageNumber];
185-
assert (page != null);
186-
return page.getBaseOffset() + offsetInPage;
187-
}
188-
189-
public long decodeOffset(long pagePlusOffsetAddress) {
190-
return pagePlusOffsetAddress & MASK_LONG_LOWER_51_BITS;
99+
return taskMemoryManager.getOffsetInPage(pagePlusOffsetAddress);
191100
}
192101

193102
public long encodePageNumberAndOffset(int pageNumber, long offsetInPage) {
194-
assert (pageNumber >= 0);
195-
return ((long) pageNumber) << OFFSET_BITS | offsetInPage & MASK_LONG_LOWER_51_BITS;
103+
return TaskMemoryManager.encodePageNumberAndOffset(pageNumber, offsetInPage);
196104
}
197105

198106
public long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage) {
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.apache.spark.shuffle.comet;
21+
22+
import org.apache.spark.memory.MemoryConsumer;
23+
import org.apache.spark.memory.MemoryMode;
24+
import org.apache.spark.memory.TaskMemoryManager;
25+
import org.apache.spark.unsafe.memory.MemoryBlock;
26+
27+
/** The base class for Comet JVM shuffle memory allocators. */
28+
public abstract class CometShuffleMemoryAllocatorTrait extends MemoryConsumer {
29+
protected CometShuffleMemoryAllocatorTrait(
30+
TaskMemoryManager taskMemoryManager, long pageSize, MemoryMode mode) {
31+
super(taskMemoryManager, pageSize, mode);
32+
}
33+
34+
public abstract MemoryBlock allocate(long required);
35+
36+
public abstract void free(MemoryBlock block);
37+
38+
public abstract long getOffsetInPage(long pagePlusOffsetAddress);
39+
40+
public abstract long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage);
41+
}

0 commit comments

Comments
 (0)