Skip to content

Commit f90c8c7

Browse files
Merge remote-tracking branch 'upstream/master' into SPARK-3278
2 parents 0d14bd3 + f3bfc76 commit f90c8c7

File tree

314 files changed

+4723
-4893
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

314 files changed

+4723
-4893
lines changed

bin/spark-class

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ fi
148148
if [[ "$1" =~ org.apache.spark.tools.* ]]; then
149149
if test -z "$SPARK_TOOLS_JAR"; then
150150
echo "Failed to find Spark Tools Jar in $FWDIR/tools/target/scala-$SPARK_SCALA_VERSION/" 1>&2
151-
echo "You need to build Spark before running $1." 1>&2
151+
echo "You need to run \"build/sbt tools/package\" before running $1." 1>&2
152152
exit 1
153153
fi
154154
CLASSPATH="$CLASSPATH:$SPARK_TOOLS_JAR"
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark;
19+
20+
import org.apache.spark.scheduler.SparkListener;
21+
import org.apache.spark.scheduler.SparkListenerApplicationEnd;
22+
import org.apache.spark.scheduler.SparkListenerApplicationStart;
23+
import org.apache.spark.scheduler.SparkListenerBlockManagerAdded;
24+
import org.apache.spark.scheduler.SparkListenerBlockManagerRemoved;
25+
import org.apache.spark.scheduler.SparkListenerEnvironmentUpdate;
26+
import org.apache.spark.scheduler.SparkListenerExecutorAdded;
27+
import org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate;
28+
import org.apache.spark.scheduler.SparkListenerExecutorRemoved;
29+
import org.apache.spark.scheduler.SparkListenerJobEnd;
30+
import org.apache.spark.scheduler.SparkListenerJobStart;
31+
import org.apache.spark.scheduler.SparkListenerStageCompleted;
32+
import org.apache.spark.scheduler.SparkListenerStageSubmitted;
33+
import org.apache.spark.scheduler.SparkListenerTaskEnd;
34+
import org.apache.spark.scheduler.SparkListenerTaskGettingResult;
35+
import org.apache.spark.scheduler.SparkListenerTaskStart;
36+
import org.apache.spark.scheduler.SparkListenerUnpersistRDD;
37+
38+
/**
39+
* Java clients should extend this class instead of implementing
40+
* SparkListener directly. This is to prevent java clients
41+
* from breaking when new events are added to the SparkListener
42+
* trait.
43+
*
44+
* This is a concrete class instead of abstract to enforce
45+
* new events get added to both the SparkListener and this adapter
46+
* in lockstep.
47+
*/
48+
public class JavaSparkListener implements SparkListener {
49+
50+
@Override
51+
public void onStageCompleted(SparkListenerStageCompleted stageCompleted) { }
52+
53+
@Override
54+
public void onStageSubmitted(SparkListenerStageSubmitted stageSubmitted) { }
55+
56+
@Override
57+
public void onTaskStart(SparkListenerTaskStart taskStart) { }
58+
59+
@Override
60+
public void onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult) { }
61+
62+
@Override
63+
public void onTaskEnd(SparkListenerTaskEnd taskEnd) { }
64+
65+
@Override
66+
public void onJobStart(SparkListenerJobStart jobStart) { }
67+
68+
@Override
69+
public void onJobEnd(SparkListenerJobEnd jobEnd) { }
70+
71+
@Override
72+
public void onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate) { }
73+
74+
@Override
75+
public void onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded) { }
76+
77+
@Override
78+
public void onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved) { }
79+
80+
@Override
81+
public void onUnpersistRDD(SparkListenerUnpersistRDD unpersistRDD) { }
82+
83+
@Override
84+
public void onApplicationStart(SparkListenerApplicationStart applicationStart) { }
85+
86+
@Override
87+
public void onApplicationEnd(SparkListenerApplicationEnd applicationEnd) { }
88+
89+
@Override
90+
public void onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate executorMetricsUpdate) { }
91+
92+
@Override
93+
public void onExecutorAdded(SparkListenerExecutorAdded executorAdded) { }
94+
95+
@Override
96+
public void onExecutorRemoved(SparkListenerExecutorRemoved executorRemoved) { }
97+
}

core/src/main/java/org/apache/spark/TaskContext.java

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ static void unset() {
6262
*/
6363
public abstract boolean isInterrupted();
6464

65-
/** @deprecated: use isRunningLocally() */
65+
/** @deprecated use {@link #isRunningLocally()} */
6666
@Deprecated
6767
public abstract boolean runningLocally();
6868

@@ -87,19 +87,39 @@ static void unset() {
8787
* is for HadoopRDD to register a callback to close the input stream.
8888
* Will be called in any situation - success, failure, or cancellation.
8989
*
90-
* @deprecated: use addTaskCompletionListener
90+
* @deprecated use {@link #addTaskCompletionListener(scala.Function1)}
9191
*
9292
* @param f Callback function.
9393
*/
9494
@Deprecated
9595
public abstract void addOnCompleteCallback(final Function0<Unit> f);
9696

97+
/**
98+
* The ID of the stage that this task belong to.
99+
*/
97100
public abstract int stageId();
98101

102+
/**
103+
* The ID of the RDD partition that is computed by this task.
104+
*/
99105
public abstract int partitionId();
100106

107+
/**
108+
* How many times this task has been attempted. The first task attempt will be assigned
109+
* attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
110+
*/
111+
public abstract int attemptNumber();
112+
113+
/** @deprecated use {@link #taskAttemptId()}; it was renamed to avoid ambiguity. */
114+
@Deprecated
101115
public abstract long attemptId();
102116

117+
/**
118+
* An ID that is unique to this task attempt (within the same SparkContext, no two task attempts
119+
* will share the same attempt ID). This is roughly equivalent to Hadoop's TaskAttemptID.
120+
*/
121+
public abstract long taskAttemptId();
122+
103123
/** ::DeveloperApi:: */
104124
@DeveloperApi
105125
public abstract TaskMetrics taskMetrics();

core/src/main/resources/org/apache/spark/ui/static/webui.css

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
height: 50px;
2020
font-size: 15px;
2121
margin-bottom: 15px;
22+
min-width: 1200px
2223
}
2324

2425
.navbar .navbar-inner {
@@ -39,12 +40,12 @@
3940

4041
.navbar .nav > li a {
4142
height: 30px;
42-
line-height: 30px;
43+
line-height: 2;
4344
}
4445

4546
.navbar-text {
4647
height: 50px;
47-
line-height: 50px;
48+
line-height: 3.3;
4849
}
4950

5051
table.sortable thead {
@@ -170,7 +171,7 @@ span.additional-metric-title {
170171
}
171172

172173
.version {
173-
line-height: 30px;
174+
line-height: 2.5;
174175
vertical-align: bottom;
175176
font-size: 12px;
176177
padding: 0;

core/src/main/scala/org/apache/spark/CacheManager.scala

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,11 @@ private[spark] class CacheManager(blockManager: BlockManager) extends Logging {
4444
blockManager.get(key) match {
4545
case Some(blockResult) =>
4646
// Partition is already materialized, so just return its values
47-
context.taskMetrics.inputMetrics = Some(blockResult.inputMetrics)
47+
val inputMetrics = blockResult.inputMetrics
48+
val existingMetrics = context.taskMetrics
49+
.getInputMetricsForReadMethod(inputMetrics.readMethod)
50+
existingMetrics.addBytesRead(inputMetrics.bytesRead)
51+
4852
new InterruptibleIterator(context, blockResult.data.asInstanceOf[Iterator[T]])
4953

5054
case None =>

0 commit comments

Comments
 (0)