Skip to content

Commit a01c076

Browse files
committed
Merge pull request #1 from andrewor14/annotations
Replace all <span>...</span>s with annotations
2 parents 037755c + c1bcb41 commit a01c076

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+349
-114
lines changed

core/src/main/scala/org/apache/spark/Aggregator.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,16 +17,18 @@
1717

1818
package org.apache.spark
1919

20+
import org.apache.spark.annotations.DeveloperApi
2021
import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}
2122

2223
/**
23-
* <span class="developer badge">Developer API</span>
24+
* :: DeveloperApi ::
2425
* A set of functions used to aggregate data.
2526
*
2627
* @param createCombiner function to create the initial value of the aggregation.
2728
* @param mergeValue function to merge a new value into the aggregation result.
2829
* @param mergeCombiners function to merge outputs from multiple mergeValue function.
2930
*/
31+
@DeveloperApi
3032
case class Aggregator[K, V, C] (
3133
createCombiner: V => C,
3234
mergeValue: (C, V) => C,

core/src/main/scala/org/apache/spark/Dependency.scala

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,24 @@
1717

1818
package org.apache.spark
1919

20+
import org.apache.spark.annotations.DeveloperApi
2021
import org.apache.spark.rdd.RDD
2122
import org.apache.spark.serializer.Serializer
2223

2324
/**
24-
* <span class="developer badge">Developer API</span>
25+
* :: DeveloperApi ::
2526
* Base class for dependencies.
2627
*/
28+
@DeveloperApi
2729
abstract class Dependency[T](val rdd: RDD[T]) extends Serializable
2830

2931

3032
/**
31-
* <span class="developer badge">Developer API</span>
33+
* :: DeveloperApi ::
3234
* Base class for dependencies where each partition of the parent RDD is used by at most one
3335
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
3436
*/
37+
@DeveloperApi
3538
abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
3639
/**
3740
* Get the parent partitions for a child partition.
@@ -43,14 +46,15 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
4346

4447

4548
/**
46-
* <span class="developer badge">Developer API</span>
49+
* :: DeveloperApi ::
4750
* Represents a dependency on the output of a shuffle stage.
4851
* @param rdd the parent RDD
4952
* @param partitioner partitioner used to partition the shuffle output
5053
* @param serializer [[org.apache.spark.serializer.Serializer Serializer]] to use. If set to null,
5154
* the default serializer, as specified by `spark.serializer` config option, will
5255
* be used.
5356
*/
57+
@DeveloperApi
5458
class ShuffleDependency[K, V](
5559
@transient rdd: RDD[_ <: Product2[K, V]],
5660
val partitioner: Partitioner,
@@ -62,22 +66,24 @@ class ShuffleDependency[K, V](
6266

6367

6468
/**
65-
* <span class="developer badge">Developer API</span>
69+
* :: DeveloperApi ::
6670
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
6771
*/
72+
@DeveloperApi
6873
class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
6974
override def getParents(partitionId: Int) = List(partitionId)
7075
}
7176

7277

7378
/**
74-
* <span class="developer badge">Developer API</span>
79+
* :: DeveloperApi ::
7580
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
7681
* @param rdd the parent RDD
7782
* @param inStart the start of the range in the parent RDD
7883
* @param outStart the start of the range in the child RDD
7984
* @param length the length of the range
8085
*/
86+
@DeveloperApi
8187
class RangeDependency[T](rdd: RDD[T], inStart: Int, outStart: Int, length: Int)
8288
extends NarrowDependency[T](rdd) {
8389

core/src/main/scala/org/apache/spark/FutureAction.scala

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,16 @@ import scala.concurrent._
2121
import scala.concurrent.duration.Duration
2222
import scala.util.Try
2323

24+
import org.apache.spark.annotations.Experimental
2425
import org.apache.spark.rdd.RDD
2526
import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}
2627

2728
/**
28-
* <span class="experimental badge">Experimental</span>
29+
* :: Experimental ::
2930
* A future for the result of an action to support cancellation. This is an extension of the
3031
* Scala Future interface to support cancellation.
3132
*/
33+
@Experimental
3234
trait FutureAction[T] extends Future[T] {
3335
// Note that we redefine methods of the Future trait here explicitly so we can specify a different
3436
// documentation (with reference to the word "action").
@@ -85,10 +87,11 @@ trait FutureAction[T] extends Future[T] {
8587

8688

8789
/**
88-
* <span class="experimental badge">Experimental</span>
90+
* :: Experimental ::
8991
* A [[FutureAction]] holding the result of an action that triggers a single job. Examples include
9092
* count, collect, reduce.
9193
*/
94+
@Experimental
9295
class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc: => T)
9396
extends FutureAction[T] {
9497

@@ -150,11 +153,12 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:
150153

151154

152155
/**
153-
* <span class="experimental badge">Experimental</span>
156+
* :: Experimental ::
154157
* A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take,
155158
* takeSample. Cancellation works by setting the cancelled flag to true and interrupting the
156159
* action thread if it is being blocked by a job.
157160
*/
161+
@Experimental
158162
class ComplexFutureAction[T] extends FutureAction[T] {
159163

160164
// Pointer to the thread that is executing the action. It is set when the action is run.

core/src/main/scala/org/apache/spark/Logging.scala

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,18 @@ import org.apache.log4j.{LogManager, PropertyConfigurator}
2121
import org.slf4j.{Logger, LoggerFactory}
2222
import org.slf4j.impl.StaticLoggerBinder
2323

24+
import org.apache.spark.annotations.DeveloperApi
25+
2426
/**
25-
* <span class="developer badge">Developer API</span>
27+
* :: DeveloperApi ::
2628
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
2729
* logging messages at different levels using methods that only evaluate parameters lazily if the
2830
* log level is enabled.
2931
*
3032
* NOTE: DO NOT USE this class outside of Spark. It is intended as an internal utility.
3133
* This will likely be changed or removed in future releases.
3234
*/
35+
@DeveloperApi
3336
trait Logging {
3437
// Make the log field transient so that objects with Logging can
3538
// be serialized and used on another machine

core/src/main/scala/org/apache/spark/SerializableWritable.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,9 @@ import org.apache.hadoop.conf.Configuration
2323
import org.apache.hadoop.io.ObjectWritable
2424
import org.apache.hadoop.io.Writable
2525

26-
/** <span class="developer badge">Developer API</span> */
26+
import org.apache.spark.annotations.DeveloperApi
27+
28+
@DeveloperApi
2729
class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
2830
def value = t
2931
override def toString = t.toString

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
3535
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
3636
import org.apache.mesos.MesosNativeLibrary
3737

38+
import org.apache.spark.annotations.{DeveloperApi, Experimental}
3839
import org.apache.spark.broadcast.Broadcast
3940
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
4041
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
@@ -48,28 +49,31 @@ import org.apache.spark.ui.SparkUI
4849
import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedHashMap, Utils}
4950

5051
/**
52+
* :: DeveloperApi ::
5153
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
5254
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
5355
*
5456
* @param config a Spark Config object describing the application configuration. Any settings in
5557
* this config overrides the default configs as well as system properties.
5658
*/
57-
class SparkContext(config: SparkConf)
58-
extends Logging {
59+
60+
@DeveloperApi
61+
class SparkContext(config: SparkConf) extends Logging {
5962

6063
// This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
6164
// etc) too. This is typically generated from InputFormatInfo.computePreferredLocations. It
6265
// contains a map from hostname to a list of input format splits on the host.
6366
private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()
6467

6568
/**
66-
* <span class="developer badge">Developer API</span>
69+
* :: DeveloperApi ::
6770
* Alternative constructor for setting preferred locations where Spark will create executors.
6871
*
6972
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
7073
* be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
7174
* from a list of input files or InputFormats for the application.
7275
*/
76+
@DeveloperApi
7377
def this(config: SparkConf, preferredNodeLocationData: Map[String, Set[SplitInfo]]) = {
7478
this(config)
7579
this.preferredNodeLocationData = preferredNodeLocationData
@@ -714,9 +718,10 @@ class SparkContext(config: SparkConf)
714718
}
715719

716720
/**
717-
* <span class="developer badge">Developer API</span>
721+
* :: DeveloperApi ::
718722
* Register a listener to receive up-calls from events that happen during execution.
719723
*/
724+
@DeveloperApi
720725
def addSparkListener(listener: SparkListener) {
721726
listenerBus.addListener(listener)
722727
}
@@ -1026,9 +1031,10 @@ class SparkContext(config: SparkConf)
10261031
}
10271032

10281033
/**
1029-
* <span class="developer badge">Developer API</span>
1034+
* :: DeveloperApi ::
10301035
* Run a job that can return approximate results.
10311036
*/
1037+
@DeveloperApi
10321038
def runApproximateJob[T, U, R](
10331039
rdd: RDD[T],
10341040
func: (TaskContext, Iterator[T]) => U,
@@ -1044,9 +1050,9 @@ class SparkContext(config: SparkConf)
10441050
}
10451051

10461052
/**
1047-
* <span class="experimental badge">Experimental</span>
10481053
* Submit a job for execution and return a FutureJob holding the result.
10491054
*/
1055+
@Experimental
10501056
def submitJob[T, U, R](
10511057
rdd: RDD[T],
10521058
processPartition: Iterator[T] => U,

core/src/main/scala/org/apache/spark/SparkEnv.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import scala.util.Properties
2525
import akka.actor._
2626
import com.google.common.collect.MapMaker
2727

28+
import org.apache.spark.annotations.DeveloperApi
2829
import org.apache.spark.api.python.PythonWorkerFactory
2930
import org.apache.spark.broadcast.BroadcastManager
3031
import org.apache.spark.metrics.MetricsSystem
@@ -35,13 +36,14 @@ import org.apache.spark.storage._
3536
import org.apache.spark.util.{AkkaUtils, Utils}
3637

3738
/**
38-
* <span class="developer badge">Developer API</span>
39+
* :: DeveloperApi ::
3940
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
4041
* including the serializer, Akka actor system, block manager, map output tracker, etc. Currently
4142
* Spark code finds the SparkEnv through a thread-local variable, so each thread that accesses these
4243
* objects needs to have the right SparkEnv set. You can get the current environment with
4344
* SparkEnv.get (e.g. after creating a SparkContext) and set it with SparkEnv.set.
4445
*/
46+
@DeveloperApi
4547
class SparkEnv (
4648
val executorId: String,
4749
val actorSystem: ActorSystem,

core/src/main/scala/org/apache/spark/TaskContext.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,14 @@ package org.apache.spark
1919

2020
import scala.collection.mutable.ArrayBuffer
2121

22+
import org.apache.spark.annotations.DeveloperApi
2223
import org.apache.spark.executor.TaskMetrics
2324

2425
/**
25-
* <span class="developer badge">Developer API</span>
26+
* :: DeveloperApi ::
2627
* Contextual information about a task which can be read or mutated during execution.
2728
*/
29+
@DeveloperApi
2830
class TaskContext(
2931
val stageId: Int,
3032
val partitionId: Int,

core/src/main/scala/org/apache/spark/TaskEndReason.scala

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,33 +17,34 @@
1717

1818
package org.apache.spark
1919

20+
import org.apache.spark.annotations.DeveloperApi
2021
import org.apache.spark.executor.TaskMetrics
2122
import org.apache.spark.storage.BlockManagerId
2223

2324
/**
24-
* <span class="developer badge">Developer API</span>
25+
* :: DeveloperApi ::
2526
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
2627
* tasks several times for "ephemeral" failures, and only report back failures that require some
2728
* old stages to be resubmitted, such as shuffle map fetch failures.
2829
*/
29-
30+
@DeveloperApi
3031
sealed trait TaskEndReason
3132

32-
/** <span class="developer badge">Developer API</span> */
33+
@DeveloperApi
3334
case object Success extends TaskEndReason
3435

35-
/** <span class="developer badge">Developer API</span> */
36+
@DeveloperApi
3637
case object Resubmitted extends TaskEndReason // Task was finished earlier but we've now lost it
3738

38-
/** <span class="developer badge">Developer API</span> */
39+
@DeveloperApi
3940
case class FetchFailed(
4041
bmAddress: BlockManagerId,
4142
shuffleId: Int,
4243
mapId: Int,
4344
reduceId: Int)
4445
extends TaskEndReason
4546

46-
/** <span class="developer badge">Developer API</span> */
47+
@DeveloperApi
4748
case class ExceptionFailure(
4849
className: String,
4950
description: String,
@@ -52,25 +53,28 @@ case class ExceptionFailure(
5253
extends TaskEndReason
5354

5455
/**
55-
* <span class="developer badge">Developer API</span>
56+
* :: DeveloperApi ::
5657
* The task finished successfully, but the result was lost from the executor's block manager before
5758
* it was fetched.
5859
*/
60+
@DeveloperApi
5961
case object TaskResultLost extends TaskEndReason
6062

61-
/** <span class="developer badge">Developer API</span> */
63+
@DeveloperApi
6264
case object TaskKilled extends TaskEndReason
6365

6466
/**
65-
* <span class="developer badge">Developer API</span>
67+
* :: DeveloperApi ::
6668
* The task failed because the executor that it was running on was lost. This may happen because
6769
* the task crashed the JVM.
6870
*/
71+
@DeveloperApi
6972
case object ExecutorLostFailure extends TaskEndReason
7073

7174
/**
72-
* <span class="developer badge">Developer API</span>
75+
* :: DeveloperApi ::
7376
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
7477
* deserializing the task result.
7578
*/
79+
@DeveloperApi
7680
case object UnknownReason extends TaskEndReason
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark.annotations;
19+
20+
import java.lang.annotation.*;
21+
22+
@Retention(RetentionPolicy.SOURCE)
23+
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD,
24+
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
25+
public @interface AlphaComponent {}

0 commit comments

Comments
 (0)