@@ -35,7 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
35
35
import org .apache .hadoop .mapreduce .lib .input .{FileInputFormat => NewFileInputFormat }
36
36
import org .apache .mesos .MesosNativeLibrary
37
37
38
- import org .apache .spark .annotations .{DeveloperAPI , Experimental }
38
+ import org .apache .spark .annotations .{DeveloperApi , Experimental }
39
39
import org .apache .spark .broadcast .Broadcast
40
40
import org .apache .spark .deploy .{LocalSparkCluster , SparkHadoopUtil }
41
41
import org .apache .spark .partial .{ApproximateEvaluator , PartialResult }
@@ -49,15 +49,15 @@ import org.apache.spark.ui.SparkUI
49
49
import org .apache .spark .util .{ClosureCleaner , MetadataCleaner , MetadataCleanerType , TimeStampedHashMap , Utils }
50
50
51
51
/**
52
- * :: DeveloperAPI ::
52
+ * :: DeveloperApi ::
53
53
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
54
54
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
55
55
*
56
56
* @param config a Spark Config object describing the application configuration. Any settings in
57
57
* this config overrides the default configs as well as system properties.
58
58
*/
59
59
60
- @ DeveloperAPI
60
+ @ DeveloperApi
61
61
class SparkContext (config : SparkConf ) extends Logging {
62
62
63
63
// This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
@@ -66,14 +66,14 @@ class SparkContext(config: SparkConf) extends Logging {
66
66
private [spark] var preferredNodeLocationData : Map [String , Set [SplitInfo ]] = Map ()
67
67
68
68
/**
69
- * :: DeveloperAPI ::
69
+ * :: DeveloperApi ::
70
70
* Alternative constructor for setting preferred locations where Spark will create executors.
71
71
*
72
72
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
73
73
* be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations ]]
74
74
* from a list of input files or InputFormats for the application.
75
75
*/
76
- @ DeveloperAPI
76
+ @ DeveloperApi
77
77
def this (config : SparkConf , preferredNodeLocationData : Map [String , Set [SplitInfo ]]) = {
78
78
this (config)
79
79
this .preferredNodeLocationData = preferredNodeLocationData
@@ -718,10 +718,10 @@ class SparkContext(config: SparkConf) extends Logging {
718
718
}
719
719
720
720
/**
721
- * :: DeveloperAPI ::
721
+ * :: DeveloperApi ::
722
722
* Register a listener to receive up-calls from events that happen during execution.
723
723
*/
724
- @ DeveloperAPI
724
+ @ DeveloperApi
725
725
def addSparkListener (listener : SparkListener ) {
726
726
listenerBus.addListener(listener)
727
727
}
@@ -1031,10 +1031,10 @@ class SparkContext(config: SparkConf) extends Logging {
1031
1031
}
1032
1032
1033
1033
/**
1034
- * :: DeveloperAPI ::
1034
+ * :: DeveloperApi ::
1035
1035
* Run a job that can return approximate results.
1036
1036
*/
1037
- @ DeveloperAPI
1037
+ @ DeveloperApi
1038
1038
def runApproximateJob [T , U , R ](
1039
1039
rdd : RDD [T ],
1040
1040
func : (TaskContext , Iterator [T ]) => U ,
0 commit comments