Skip to content

Commit c1bcb41

Browse files
committed
DeveloperAPI -> DeveloperApi
1 parent 0d48908 commit c1bcb41

37 files changed

+170
-170
lines changed

core/src/main/scala/org/apache/spark/Aggregator.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,18 @@
1717

1818
package org.apache.spark
1919

20-
import org.apache.spark.annotations.DeveloperAPI
20+
import org.apache.spark.annotations.DeveloperApi
2121
import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}
2222

2323
/**
24-
* :: DeveloperAPI ::
24+
* :: DeveloperApi ::
2525
* A set of functions used to aggregate data.
2626
*
2727
* @param createCombiner function to create the initial value of the aggregation.
2828
* @param mergeValue function to merge a new value into the aggregation result.
2929
* @param mergeCombiners function to merge outputs from multiple mergeValue function.
3030
*/
31-
@DeveloperAPI
31+
@DeveloperApi
3232
case class Aggregator[K, V, C] (
3333
createCombiner: V => C,
3434
mergeValue: (C, V) => C,

core/src/main/scala/org/apache/spark/Dependency.scala

+11-11
Original file line numberDiff line numberDiff line change
@@ -17,24 +17,24 @@
1717

1818
package org.apache.spark
1919

20-
import org.apache.spark.annotations.DeveloperAPI
20+
import org.apache.spark.annotations.DeveloperApi
2121
import org.apache.spark.rdd.RDD
2222
import org.apache.spark.serializer.Serializer
2323

2424
/**
25-
* :: DeveloperAPI ::
25+
* :: DeveloperApi ::
2626
* Base class for dependencies.
2727
*/
28-
@DeveloperAPI
28+
@DeveloperApi
2929
abstract class Dependency[T](val rdd: RDD[T]) extends Serializable
3030

3131

3232
/**
33-
* :: DeveloperAPI ::
33+
* :: DeveloperApi ::
3434
* Base class for dependencies where each partition of the parent RDD is used by at most one
3535
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
3636
*/
37-
@DeveloperAPI
37+
@DeveloperApi
3838
abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
3939
/**
4040
* Get the parent partitions for a child partition.
@@ -46,15 +46,15 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
4646

4747

4848
/**
49-
* :: DeveloperAPI ::
49+
* :: DeveloperApi ::
5050
* Represents a dependency on the output of a shuffle stage.
5151
* @param rdd the parent RDD
5252
* @param partitioner partitioner used to partition the shuffle output
5353
* @param serializer [[org.apache.spark.serializer.Serializer Serializer]] to use. If set to null,
5454
* the default serializer, as specified by `spark.serializer` config option, will
5555
* be used.
5656
*/
57-
@DeveloperAPI
57+
@DeveloperApi
5858
class ShuffleDependency[K, V](
5959
@transient rdd: RDD[_ <: Product2[K, V]],
6060
val partitioner: Partitioner,
@@ -66,24 +66,24 @@ class ShuffleDependency[K, V](
6666

6767

6868
/**
69-
* :: DeveloperAPI ::
69+
* :: DeveloperApi ::
7070
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
7171
*/
72-
@DeveloperAPI
72+
@DeveloperApi
7373
class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
7474
override def getParents(partitionId: Int) = List(partitionId)
7575
}
7676

7777

7878
/**
79-
* :: DeveloperAPI ::
79+
* :: DeveloperApi ::
8080
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
8181
* @param rdd the parent RDD
8282
* @param inStart the start of the range in the parent RDD
8383
* @param outStart the start of the range in the child RDD
8484
* @param length the length of the range
8585
*/
86-
@DeveloperAPI
86+
@DeveloperApi
8787
class RangeDependency[T](rdd: RDD[T], inStart: Int, outStart: Int, length: Int)
8888
extends NarrowDependency[T](rdd) {
8989

core/src/main/scala/org/apache/spark/Logging.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,18 @@ import org.apache.log4j.{LogManager, PropertyConfigurator}
2121
import org.slf4j.{Logger, LoggerFactory}
2222
import org.slf4j.impl.StaticLoggerBinder
2323

24-
import org.apache.spark.annotations.DeveloperAPI
24+
import org.apache.spark.annotations.DeveloperApi
2525

2626
/**
27-
* :: DeveloperAPI ::
27+
* :: DeveloperApi ::
2828
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
2929
* logging messages at different levels using methods that only evaluate parameters lazily if the
3030
* log level is enabled.
3131
*
3232
* NOTE: DO NOT USE this class outside of Spark. It is intended as an internal utility.
3333
* This will likely be changed or removed in future releases.
3434
*/
35-
@DeveloperAPI
35+
@DeveloperApi
3636
trait Logging {
3737
// Make the log field transient so that objects with Logging can
3838
// be serialized and used on another machine

core/src/main/scala/org/apache/spark/SerializableWritable.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ import org.apache.hadoop.conf.Configuration
2323
import org.apache.hadoop.io.ObjectWritable
2424
import org.apache.hadoop.io.Writable
2525

26-
import org.apache.spark.annotations.DeveloperAPI
26+
import org.apache.spark.annotations.DeveloperApi
2727

28-
@DeveloperAPI
28+
@DeveloperApi
2929
class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
3030
def value = t
3131
override def toString = t.toString

core/src/main/scala/org/apache/spark/SparkContext.scala

+9-9
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
3535
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
3636
import org.apache.mesos.MesosNativeLibrary
3737

38-
import org.apache.spark.annotations.{DeveloperAPI, Experimental}
38+
import org.apache.spark.annotations.{DeveloperApi, Experimental}
3939
import org.apache.spark.broadcast.Broadcast
4040
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
4141
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
@@ -49,15 +49,15 @@ import org.apache.spark.ui.SparkUI
4949
import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedHashMap, Utils}
5050

5151
/**
52-
* :: DeveloperAPI ::
52+
* :: DeveloperApi ::
5353
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
5454
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
5555
*
5656
* @param config a Spark Config object describing the application configuration. Any settings in
5757
* this config overrides the default configs as well as system properties.
5858
*/
5959

60-
@DeveloperAPI
60+
@DeveloperApi
6161
class SparkContext(config: SparkConf) extends Logging {
6262

6363
// This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
@@ -66,14 +66,14 @@ class SparkContext(config: SparkConf) extends Logging {
6666
private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()
6767

6868
/**
69-
* :: DeveloperAPI ::
69+
* :: DeveloperApi ::
7070
* Alternative constructor for setting preferred locations where Spark will create executors.
7171
*
7272
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
7373
* be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
7474
* from a list of input files or InputFormats for the application.
7575
*/
76-
@DeveloperAPI
76+
@DeveloperApi
7777
def this(config: SparkConf, preferredNodeLocationData: Map[String, Set[SplitInfo]]) = {
7878
this(config)
7979
this.preferredNodeLocationData = preferredNodeLocationData
@@ -718,10 +718,10 @@ class SparkContext(config: SparkConf) extends Logging {
718718
}
719719

720720
/**
721-
* :: DeveloperAPI ::
721+
* :: DeveloperApi ::
722722
* Register a listener to receive up-calls from events that happen during execution.
723723
*/
724-
@DeveloperAPI
724+
@DeveloperApi
725725
def addSparkListener(listener: SparkListener) {
726726
listenerBus.addListener(listener)
727727
}
@@ -1031,10 +1031,10 @@ class SparkContext(config: SparkConf) extends Logging {
10311031
}
10321032

10331033
/**
1034-
* :: DeveloperAPI ::
1034+
* :: DeveloperApi ::
10351035
* Run a job that can return approximate results.
10361036
*/
1037-
@DeveloperAPI
1037+
@DeveloperApi
10381038
def runApproximateJob[T, U, R](
10391039
rdd: RDD[T],
10401040
func: (TaskContext, Iterator[T]) => U,

core/src/main/scala/org/apache/spark/SparkEnv.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import scala.util.Properties
2525
import akka.actor._
2626
import com.google.common.collect.MapMaker
2727

28-
import org.apache.spark.annotations.DeveloperAPI
28+
import org.apache.spark.annotations.DeveloperApi
2929
import org.apache.spark.api.python.PythonWorkerFactory
3030
import org.apache.spark.broadcast.BroadcastManager
3131
import org.apache.spark.metrics.MetricsSystem
@@ -36,14 +36,14 @@ import org.apache.spark.storage._
3636
import org.apache.spark.util.{AkkaUtils, Utils}
3737

3838
/**
39-
* :: DeveloperAPI ::
39+
* :: DeveloperApi ::
4040
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
4141
* including the serializer, Akka actor system, block manager, map output tracker, etc. Currently
4242
* Spark code finds the SparkEnv through a thread-local variable, so each thread that accesses these
4343
* objects needs to have the right SparkEnv set. You can get the current environment with
4444
* SparkEnv.get (e.g. after creating a SparkContext) and set it with SparkEnv.set.
4545
*/
46-
@DeveloperAPI
46+
@DeveloperApi
4747
class SparkEnv (
4848
val executorId: String,
4949
val actorSystem: ActorSystem,

core/src/main/scala/org/apache/spark/TaskContext.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@ package org.apache.spark
1919

2020
import scala.collection.mutable.ArrayBuffer
2121

22-
import org.apache.spark.annotations.DeveloperAPI
22+
import org.apache.spark.annotations.DeveloperApi
2323
import org.apache.spark.executor.TaskMetrics
2424

2525
/**
26-
* :: DeveloperAPI ::
26+
* :: DeveloperApi ::
2727
* Contextual information about a task which can be read or mutated during execution.
2828
*/
29-
@DeveloperAPI
29+
@DeveloperApi
3030
class TaskContext(
3131
val stageId: Int,
3232
val partitionId: Int,

core/src/main/scala/org/apache/spark/TaskEndReason.scala

+14-14
Original file line numberDiff line numberDiff line change
@@ -17,34 +17,34 @@
1717

1818
package org.apache.spark
1919

20-
import org.apache.spark.annotations.DeveloperAPI
20+
import org.apache.spark.annotations.DeveloperApi
2121
import org.apache.spark.executor.TaskMetrics
2222
import org.apache.spark.storage.BlockManagerId
2323

2424
/**
25-
* :: DeveloperAPI ::
25+
* :: DeveloperApi ::
2626
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
2727
* tasks several times for "ephemeral" failures, and only report back failures that require some
2828
* old stages to be resubmitted, such as shuffle map fetch failures.
2929
*/
30-
@DeveloperAPI
30+
@DeveloperApi
3131
sealed trait TaskEndReason
3232

33-
@DeveloperAPI
33+
@DeveloperApi
3434
case object Success extends TaskEndReason
3535

36-
@DeveloperAPI
36+
@DeveloperApi
3737
case object Resubmitted extends TaskEndReason // Task was finished earlier but we've now lost it
3838

39-
@DeveloperAPI
39+
@DeveloperApi
4040
case class FetchFailed(
4141
bmAddress: BlockManagerId,
4242
shuffleId: Int,
4343
mapId: Int,
4444
reduceId: Int)
4545
extends TaskEndReason
4646

47-
@DeveloperAPI
47+
@DeveloperApi
4848
case class ExceptionFailure(
4949
className: String,
5050
description: String,
@@ -53,28 +53,28 @@ case class ExceptionFailure(
5353
extends TaskEndReason
5454

5555
/**
56-
* :: DeveloperAPI ::
56+
* :: DeveloperApi ::
5757
* The task finished successfully, but the result was lost from the executor's block manager before
5858
* it was fetched.
5959
*/
60-
@DeveloperAPI
60+
@DeveloperApi
6161
case object TaskResultLost extends TaskEndReason
6262

63-
@DeveloperAPI
63+
@DeveloperApi
6464
case object TaskKilled extends TaskEndReason
6565

6666
/**
67-
* :: DeveloperAPI ::
67+
* :: DeveloperApi ::
6868
* The task failed because the executor that it was running on was lost. This may happen because
6969
* the task crashed the JVM.
7070
*/
71-
@DeveloperAPI
71+
@DeveloperApi
7272
case object ExecutorLostFailure extends TaskEndReason
7373

7474
/**
75-
* :: DeveloperAPI ::
75+
* :: DeveloperApi ::
7676
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
7777
* deserializing the task result.
7878
*/
79-
@DeveloperAPI
79+
@DeveloperApi
8080
case object UnknownReason extends TaskEndReason

core/src/main/scala/org/apache/spark/annotations/DeveloperAPI.java renamed to core/src/main/scala/org/apache/spark/annotations/DeveloperApi.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,4 +22,4 @@
2222
@Retention(RetentionPolicy.SOURCE)
2323
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD,
2424
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
25-
public @interface DeveloperAPI {}
25+
public @interface DeveloperApi {}

core/src/main/scala/org/apache/spark/broadcast/BroadcastFactory.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -19,16 +19,16 @@ package org.apache.spark.broadcast
1919

2020
import org.apache.spark.SecurityManager
2121
import org.apache.spark.SparkConf
22-
import org.apache.spark.annotations.DeveloperAPI
22+
import org.apache.spark.annotations.DeveloperApi
2323

2424
/**
25-
* :: DeveloperAPI ::
25+
* :: DeveloperApi ::
2626
* An interface for all the broadcast implementations in Spark (to allow
2727
* multiple broadcast implementations). SparkContext uses a user-specified
2828
* BroadcastFactory implementation to instantiate a particular broadcast for the
2929
* entire Spark job.
3030
*/
31-
@DeveloperAPI
31+
@DeveloperApi
3232
trait BroadcastFactory {
3333
def initialize(isDriver: Boolean, conf: SparkConf, securityMgr: SecurityManager): Unit
3434
def newBroadcast[T](value: T, isLocal: Boolean, id: Long): Broadcast[T]

core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala

+7-7
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@
1717

1818
package org.apache.spark.executor
1919

20-
import org.apache.spark.annotations.DeveloperAPI
20+
import org.apache.spark.annotations.DeveloperApi
2121
import org.apache.spark.storage.{BlockId, BlockStatus}
2222

2323
/**
24-
* :: DeveloperAPI ::
24+
* :: DeveloperApi ::
2525
* Metrics tracked during the execution of a task.
2626
*/
27-
@DeveloperAPI
27+
@DeveloperApi
2828
class TaskMetrics extends Serializable {
2929
/**
3030
* Host's name the task runs on
@@ -89,10 +89,10 @@ private[spark] object TaskMetrics {
8989

9090

9191
/**
92-
* :: DeveloperAPI ::
92+
* :: DeveloperApi ::
9393
* Metrics pertaining to shuffle data read in a given task.
9494
*/
95-
@DeveloperAPI
95+
@DeveloperApi
9696
class ShuffleReadMetrics extends Serializable {
9797
/**
9898
* Absolute time when this task finished reading shuffle data
@@ -128,10 +128,10 @@ class ShuffleReadMetrics extends Serializable {
128128
}
129129

130130
/**
131-
* :: DeveloperAPI ::
131+
* :: DeveloperApi ::
132132
* Metrics pertaining to shuffle data written in a given task.
133133
*/
134-
@DeveloperAPI
134+
@DeveloperApi
135135
class ShuffleWriteMetrics extends Serializable {
136136
/**
137137
* Number of bytes written for the shuffle by this task

0 commit comments

Comments
 (0)