Skip to content

Commit 028e1b0

Browse files
committed
Review comments
1 parent 64e01d9 commit 028e1b0

File tree

5 files changed

+14
-12
lines changed

5 files changed

+14
-12
lines changed

core/src/test/scala/org/apache/spark/deploy/history/ApplicationCacheSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -373,8 +373,9 @@ class ApplicationCacheSuite extends SparkFunSuite with Logging with MockitoSugar
373373
when(request.getRequestURI()).thenReturn("http://localhost:18080/history/local-123/jobs/job/")
374374
when(request.getQueryString()).thenReturn("id=2")
375375
val resp = mock[HttpServletResponse]
376-
when(resp.encodeRedirectURL(any())).thenAnswer((invocationOnMock: InvocationOnMock) =>
377-
invocationOnMock.getArguments()(0).asInstanceOf[String])
376+
when(resp.encodeRedirectURL(any())).thenAnswer { (invocationOnMock: InvocationOnMock) =>
377+
invocationOnMock.getArguments()(0).asInstanceOf[String]
378+
}
378379
filter.doFilter(request, resp, null)
379380
verify(resp).sendRedirect("http://localhost:18080/history/local-123/jobs/job/?id=2")
380381
}

core/src/test/scala/org/apache/spark/scheduler/BlacklistTrackerSuite.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,7 @@ class BlacklistTrackerSuite extends SparkFunSuite with BeforeAndAfterEach with M
479479
test("blacklisting kills executors, configured by BLACKLIST_KILL_ENABLED") {
480480
val allocationClientMock = mock[ExecutorAllocationClient]
481481
when(allocationClientMock.killExecutors(any(), any(), any(), any())).thenReturn(Seq("called"))
482-
when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer((_: InvocationOnMock) =>
482+
when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer { (_: InvocationOnMock) =>
483483
// To avoid a race between blacklisting and killing, it is important that the nodeBlacklist
484484
// is updated before we ask the executor allocation client to kill all the executors
485485
// on a particular host.
@@ -488,7 +488,7 @@ class BlacklistTrackerSuite extends SparkFunSuite with BeforeAndAfterEach with M
488488
} else {
489489
throw new IllegalStateException("hostA should be on the blacklist")
490490
}
491-
)
491+
}
492492
blacklist = new BlacklistTracker(listenerBusMock, conf, Some(allocationClientMock), clock)
493493

494494
// Disable auto-kill. Blacklist an executor and make sure killExecutors is not called.
@@ -550,7 +550,7 @@ class BlacklistTrackerSuite extends SparkFunSuite with BeforeAndAfterEach with M
550550
test("fetch failure blacklisting kills executors, configured by BLACKLIST_KILL_ENABLED") {
551551
val allocationClientMock = mock[ExecutorAllocationClient]
552552
when(allocationClientMock.killExecutors(any(), any(), any(), any())).thenReturn(Seq("called"))
553-
when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer((_: InvocationOnMock) =>
553+
when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer { (_: InvocationOnMock) =>
554554
// To avoid a race between blacklisting and killing, it is important that the nodeBlacklist
555555
// is updated before we ask the executor allocation client to kill all the executors
556556
// on a particular host.
@@ -559,7 +559,7 @@ class BlacklistTrackerSuite extends SparkFunSuite with BeforeAndAfterEach with M
559559
} else {
560560
throw new IllegalStateException("hostA should be on the blacklist")
561561
}
562-
)
562+
}
563563

564564
conf.set(config.BLACKLIST_FETCH_FAILURE_ENABLED, true)
565565
blacklist = new BlacklistTracker(listenerBusMock, conf, Some(allocationClientMock), clock)

core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,9 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte
104104
temporaryFilesCreated += file
105105
(blockId, file)
106106
})
107-
when(diskBlockManager.getFile(any[BlockId])).thenAnswer((invocation: InvocationOnMock) =>
108-
blockIdToFileMap(invocation.getArguments.head.asInstanceOf[BlockId]))
107+
when(diskBlockManager.getFile(any[BlockId])).thenAnswer { (invocation: InvocationOnMock) =>
108+
blockIdToFileMap(invocation.getArguments.head.asInstanceOf[BlockId])
109+
}
109110
}
110111

111112
override def afterEach(): Unit = {

core/src/test/scala/org/apache/spark/storage/PartiallySerializedBlockSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,9 @@ class PartiallySerializedBlockSuite
5858

5959
val bbos: ChunkedByteBufferOutputStream = {
6060
val spy = Mockito.spy(new ChunkedByteBufferOutputStream(128, ByteBuffer.allocate))
61-
Mockito.doAnswer((invocationOnMock: InvocationOnMock) =>
61+
Mockito.doAnswer { (invocationOnMock: InvocationOnMock) =>
6262
Mockito.spy(invocationOnMock.callRealMethod().asInstanceOf[ChunkedByteBuffer])
63-
).when(spy).toChunkedByteBuffer
63+
}.when(spy).toChunkedByteBuffer
6464
spy
6565
}
6666

external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisCheckpointerSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,9 +123,9 @@ class KinesisCheckpointerSuite extends TestSuiteBase
123123
test("if checkpointing is going on, wait until finished before removing and checkpointing") {
124124
when(receiverMock.getLatestSeqNumToCheckpoint(shardId))
125125
.thenReturn(someSeqNum).thenReturn(someOtherSeqNum)
126-
when(checkpointerMock.checkpoint(anyString)).thenAnswer((_: InvocationOnMock) =>
126+
when(checkpointerMock.checkpoint(anyString)).thenAnswer { (_: InvocationOnMock) =>
127127
clock.waitTillTime(clock.getTimeMillis() + checkpointInterval.milliseconds / 2)
128-
)
128+
}
129129

130130
kinesisCheckpointer.setCheckpointer(shardId, checkpointerMock)
131131
clock.advance(checkpointInterval.milliseconds)

0 commit comments

Comments
 (0)