Skip to content

Commit f3954e0

Browse files
committed
Add identifier tags in comments to work around scaladocs bug
The bug is that annotations and comments cannot both be displayed in the summary page. The workaround is adding a special pattern that we grep for as we post-process the DOM tree in JavaScript. Example: A @DeveloperAPI annotated class's comment must begin with ":: Developer API ::"
1 parent 99192ef commit f3954e0

38 files changed

+109
-25
lines changed

core/src/main/scala/org/apache/spark/Aggregator.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import org.apache.spark.annotations.DeveloperAPI
2121
import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}
2222

2323
/**
24+
* :: DeveloperAPI ::
2425
* A set of functions used to aggregate data.
2526
*
2627
* @param createCombiner function to create the initial value of the aggregation.

core/src/main/scala/org/apache/spark/Dependency.scala

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,15 @@ import org.apache.spark.rdd.RDD
2222
import org.apache.spark.serializer.Serializer
2323

2424
/**
25+
* :: DeveloperAPI ::
2526
* Base class for dependencies.
2627
*/
2728
@DeveloperAPI
2829
abstract class Dependency[T](val rdd: RDD[T]) extends Serializable
2930

3031

3132
/**
33+
* :: DeveloperAPI ::
3234
* Base class for dependencies where each partition of the parent RDD is used by at most one
3335
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
3436
*/
@@ -44,6 +46,7 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
4446

4547

4648
/**
49+
* :: DeveloperAPI ::
4750
* Represents a dependency on the output of a shuffle stage.
4851
* @param rdd the parent RDD
4952
* @param partitioner partitioner used to partition the shuffle output
@@ -63,6 +66,7 @@ class ShuffleDependency[K, V](
6366

6467

6568
/**
69+
* :: DeveloperAPI ::
6670
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
6771
*/
6872
@DeveloperAPI
@@ -72,6 +76,7 @@ class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
7276

7377

7478
/**
79+
* :: DeveloperAPI ::
7580
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
7681
* @param rdd the parent RDD
7782
* @param inStart the start of the range in the parent RDD

core/src/main/scala/org/apache/spark/FutureAction.scala

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import org.apache.spark.rdd.RDD
2626
import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}
2727

2828
/**
29+
* :: Experimental ::
2930
* A future for the result of an action to support cancellation. This is an extension of the
3031
* Scala Future interface to support cancellation.
3132
*/
@@ -86,6 +87,7 @@ trait FutureAction[T] extends Future[T] {
8687

8788

8889
/**
90+
* :: Experimental ::
8991
* A [[FutureAction]] holding the result of an action that triggers a single job. Examples include
9092
* count, collect, reduce.
9193
*/
@@ -151,6 +153,7 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:
151153

152154

153155
/**
156+
* :: Experimental ::
154157
* A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take,
155158
* takeSample. Cancellation works by setting the cancelled flag to true and interrupting the
156159
* action thread if it is being blocked by a job.

core/src/main/scala/org/apache/spark/Logging.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import org.slf4j.impl.StaticLoggerBinder
2424
import org.apache.spark.annotations.DeveloperAPI
2525

2626
/**
27+
* :: DeveloperAPI ::
2728
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
2829
* logging messages at different levels using methods that only evaluate parameters lazily if the
2930
* log level is enabled.

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ import org.apache.spark.ui.SparkUI
4949
import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedHashMap, Utils}
5050

5151
/**
52+
* :: DeveloperAPI ::
5253
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
5354
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
5455
*
@@ -65,6 +66,7 @@ class SparkContext(config: SparkConf) extends Logging {
6566
private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()
6667

6768
/**
69+
* :: DeveloperAPI ::
6870
* Alternative constructor for setting preferred locations where Spark will create executors.
6971
*
7072
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
@@ -716,6 +718,7 @@ class SparkContext(config: SparkConf) extends Logging {
716718
}
717719

718720
/**
721+
* :: DeveloperAPI ::
719722
* Register a listener to receive up-calls from events that happen during execution.
720723
*/
721724
@DeveloperAPI
@@ -1028,6 +1031,7 @@ class SparkContext(config: SparkConf) extends Logging {
10281031
}
10291032

10301033
/**
1034+
* :: DeveloperAPI ::
10311035
* Run a job that can return approximate results.
10321036
*/
10331037
@DeveloperAPI
@@ -1046,6 +1050,7 @@ class SparkContext(config: SparkConf) extends Logging {
10461050
}
10471051

10481052
/**
1053+
*
10491054
* Submit a job for execution and return a FutureJob holding the result.
10501055
*/
10511056
@Experimental

core/src/main/scala/org/apache/spark/SparkEnv.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import org.apache.spark.storage._
3636
import org.apache.spark.util.{AkkaUtils, Utils}
3737

3838
/**
39+
* :: DeveloperAPI ::
3940
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
4041
* including the serializer, Akka actor system, block manager, map output tracker, etc. Currently
4142
* Spark code finds the SparkEnv through a thread-local variable, so each thread that accesses these

core/src/main/scala/org/apache/spark/TaskContext.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import org.apache.spark.annotations.DeveloperAPI
2323
import org.apache.spark.executor.TaskMetrics
2424

2525
/**
26+
* :: DeveloperAPI ::
2627
* Contextual information about a task which can be read or mutated during execution.
2728
*/
2829
@DeveloperAPI

core/src/main/scala/org/apache/spark/TaskEndReason.scala

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import org.apache.spark.executor.TaskMetrics
2222
import org.apache.spark.storage.BlockManagerId
2323

2424
/**
25+
* :: DeveloperAPI ::
2526
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
2627
* tasks several times for "ephemeral" failures, and only report back failures that require some
2728
* old stages to be resubmitted, such as shuffle map fetch failures.
@@ -52,6 +53,7 @@ case class ExceptionFailure(
5253
extends TaskEndReason
5354

5455
/**
56+
* :: DeveloperAPI ::
5557
* The task finished successfully, but the result was lost from the executor's block manager before
5658
* it was fetched.
5759
*/
@@ -62,13 +64,15 @@ case object TaskResultLost extends TaskEndReason
6264
case object TaskKilled extends TaskEndReason
6365

6466
/**
67+
* :: DeveloperAPI ::
6568
* The task failed because the executor that it was running on was lost. This may happen because
6669
* the task crashed the JVM.
6770
*/
6871
@DeveloperAPI
6972
case object ExecutorLostFailure extends TaskEndReason
7073

7174
/**
75+
* :: DeveloperAPI ::
7276
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
7377
* deserializing the task result.
7478
*/

core/src/main/scala/org/apache/spark/broadcast/BroadcastFactory.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,8 @@ import org.apache.spark.SparkConf
2222
import org.apache.spark.annotations.DeveloperAPI
2323

2424
/**
25-
* An interface for all the broadcast implementations in Spark (to allow
25+
* :: DeveloperAPI ::
26+
* An interface for all the broadcast implementations in Spark (to allow
2627
* multiple broadcast implementations). SparkContext uses a user-specified
2728
* BroadcastFactory implementation to instantiate a particular broadcast for the
2829
* entire Spark job.

core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import org.apache.spark.annotations.DeveloperAPI
2121
import org.apache.spark.storage.{BlockId, BlockStatus}
2222

2323
/**
24+
* :: DeveloperAPI ::
2425
* Metrics tracked during the execution of a task.
2526
*/
2627
@DeveloperAPI
@@ -88,6 +89,7 @@ private[spark] object TaskMetrics {
8889

8990

9091
/**
92+
* :: DeveloperAPI ::
9193
* Metrics pertaining to shuffle data read in a given task.
9294
*/
9395
@DeveloperAPI
@@ -126,6 +128,7 @@ class ShuffleReadMetrics extends Serializable {
126128
}
127129

128130
/**
131+
* :: DeveloperAPI ::
129132
* Metrics pertaining to shuffle data written in a given task.
130133
*/
131134
@DeveloperAPI

0 commit comments

Comments
 (0)