Skip to content

Commit 3e63d98

Browse files
NirmalReddypwendell
authored andcommitted
Spark 1095 : Adding explicit return types to all public methods
Excluded those that are self-evident and the cases that are discussed in the mailing list. Author: NirmalReddy <nirmal_reddy2000@yahoo.com> Author: NirmalReddy <nirmal.reddy@imaginea.com> Closes #168 from NirmalReddy/Spark-1095 and squashes the following commits: ac54b29 [NirmalReddy] import misplaced 8c5ff3e [NirmalReddy] Changed syntax of unit returning methods 02d0778 [NirmalReddy] fixed explicit types in all the other packages 1c17773 [NirmalReddy] fixed explicit types in core package
1 parent be6d96c commit 3e63d98

File tree

25 files changed

+97
-57
lines changed

25 files changed

+97
-57
lines changed

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
3535
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
3636
import org.apache.mesos.MesosNativeLibrary
3737

38+
import org.apache.spark.broadcast.Broadcast
3839
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
3940
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
4041
import org.apache.spark.rdd._
@@ -230,7 +231,7 @@ class SparkContext(
230231
postEnvironmentUpdate()
231232

232233
/** A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. */
233-
val hadoopConfiguration = {
234+
val hadoopConfiguration: Configuration = {
234235
val env = SparkEnv.get
235236
val hadoopConf = SparkHadoopUtil.get.newConfiguration()
236237
// Explicitly check for S3 environment variables
@@ -630,7 +631,7 @@ class SparkContext(
630631
* standard mutable collections. So you can use this with mutable Map, Set, etc.
631632
*/
632633
def accumulableCollection[R <% Growable[T] with TraversableOnce[T] with Serializable, T]
633-
(initialValue: R) = {
634+
(initialValue: R): Accumulable[R, T] = {
634635
val param = new GrowableAccumulableParam[R,T]
635636
new Accumulable(initialValue, param)
636637
}
@@ -640,7 +641,7 @@ class SparkContext(
640641
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
641642
* The variable will be sent to each cluster only once.
642643
*/
643-
def broadcast[T](value: T) = env.broadcastManager.newBroadcast[T](value, isLocal)
644+
def broadcast[T](value: T): Broadcast[T] = env.broadcastManager.newBroadcast[T](value, isLocal)
644645

645646
/**
646647
* Add a file to be downloaded with this Spark job on every node.
@@ -1126,7 +1127,7 @@ object SparkContext extends Logging {
11261127
implicit def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]) = new AsyncRDDActions(rdd)
11271128

11281129
implicit def rddToSequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable: ClassTag](
1129-
rdd: RDD[(K, V)]) =
1130+
rdd: RDD[(K, V)]) =
11301131
new SequenceFileRDDFunctions(rdd)
11311132

11321133
implicit def rddToOrderedRDDFunctions[K <% Ordered[K]: ClassTag, V: ClassTag](
@@ -1163,27 +1164,33 @@ object SparkContext extends Logging {
11631164
}
11641165

11651166
// Helper objects for converting common types to Writable
1166-
private def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) = {
1167+
private def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
1168+
: WritableConverter[T] = {
11671169
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
11681170
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
11691171
}
11701172

1171-
implicit def intWritableConverter() = simpleWritableConverter[Int, IntWritable](_.get)
1173+
implicit def intWritableConverter(): WritableConverter[Int] =
1174+
simpleWritableConverter[Int, IntWritable](_.get)
11721175

1173-
implicit def longWritableConverter() = simpleWritableConverter[Long, LongWritable](_.get)
1176+
implicit def longWritableConverter(): WritableConverter[Long] =
1177+
simpleWritableConverter[Long, LongWritable](_.get)
11741178

1175-
implicit def doubleWritableConverter() = simpleWritableConverter[Double, DoubleWritable](_.get)
1179+
implicit def doubleWritableConverter(): WritableConverter[Double] =
1180+
simpleWritableConverter[Double, DoubleWritable](_.get)
11761181

1177-
implicit def floatWritableConverter() = simpleWritableConverter[Float, FloatWritable](_.get)
1182+
implicit def floatWritableConverter(): WritableConverter[Float] =
1183+
simpleWritableConverter[Float, FloatWritable](_.get)
11781184

1179-
implicit def booleanWritableConverter() =
1185+
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
11801186
simpleWritableConverter[Boolean, BooleanWritable](_.get)
11811187

1182-
implicit def bytesWritableConverter() = {
1188+
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
11831189
simpleWritableConverter[Array[Byte], BytesWritable](_.getBytes)
11841190
}
11851191

1186-
implicit def stringWritableConverter() = simpleWritableConverter[String, Text](_.toString)
1192+
implicit def stringWritableConverter(): WritableConverter[String] =
1193+
simpleWritableConverter[String, Text](_.toString)
11871194

11881195
implicit def writableWritableConverter[T <: Writable]() =
11891196
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])

core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -391,19 +391,24 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
391391
/**
392392
* Save this RDD as a text file, using string representations of elements.
393393
*/
394-
def saveAsTextFile(path: String) = rdd.saveAsTextFile(path)
394+
def saveAsTextFile(path: String): Unit = {
395+
rdd.saveAsTextFile(path)
396+
}
395397

396398

397399
/**
398400
* Save this RDD as a compressed text file, using string representations of elements.
399401
*/
400-
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]) =
402+
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit = {
401403
rdd.saveAsTextFile(path, codec)
404+
}
402405

403406
/**
404407
* Save this RDD as a SequenceFile of serialized objects.
405408
*/
406-
def saveAsObjectFile(path: String) = rdd.saveAsObjectFile(path)
409+
def saveAsObjectFile(path: String): Unit = {
410+
rdd.saveAsObjectFile(path)
411+
}
407412

408413
/**
409414
* Creates tuples of the elements in this RDD by applying `f`.
@@ -420,7 +425,9 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
420425
* executed on this RDD. It is strongly recommended that this RDD is persisted in
421426
* memory, otherwise saving it on a file will require recomputation.
422427
*/
423-
def checkpoint() = rdd.checkpoint()
428+
def checkpoint(): Unit = {
429+
rdd.checkpoint()
430+
}
424431

425432
/**
426433
* Return whether this RDD has been checkpointed or not

core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,7 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork
463463
sc.setCheckpointDir(dir)
464464
}
465465

466-
def getCheckpointDir = JavaUtils.optionToOptional(sc.getCheckpointDir)
466+
def getCheckpointDir: Optional[String] = JavaUtils.optionToOptional(sc.getCheckpointDir)
467467

468468
protected def checkpointFile[T](path: String): JavaRDD[T] = {
469469
implicit val ctag: ClassTag[T] = fakeClassTag

core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,5 +112,5 @@ private[spark] class ClientArguments(args: Array[String]) {
112112
}
113113

114114
object ClientArguments {
115-
def isValidJarUrl(s: String) = s.matches("(.+):(.+)jar")
115+
def isValidJarUrl(s: String): Boolean = s.matches("(.+):(.+)jar")
116116
}

core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ import scala.collection.JavaConversions._
3232
* Contains util methods to interact with Hadoop from Spark.
3333
*/
3434
class SparkHadoopUtil {
35-
val conf = newConfiguration()
35+
val conf: Configuration = newConfiguration()
3636
UserGroupInformation.setConfiguration(conf)
3737

3838
def runAsUser(user: String)(func: () => Unit) {

core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ package org.apache.spark.deploy.master
2020
import scala.collection.JavaConversions._
2121

2222
import akka.serialization.Serialization
23+
import org.apache.curator.framework.CuratorFramework
2324
import org.apache.zookeeper.CreateMode
2425

2526
import org.apache.spark.{Logging, SparkConf}
@@ -29,7 +30,7 @@ class ZooKeeperPersistenceEngine(serialization: Serialization, conf: SparkConf)
2930
with Logging
3031
{
3132
val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
32-
val zk = SparkCuratorUtil.newClient(conf)
33+
val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)
3334

3435
SparkCuratorUtil.mkdir(zk, WORKING_DIR)
3536

core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class ConsoleSink(val property: Properties, val registry: MetricRegistry,
3838
case None => CONSOLE_DEFAULT_PERIOD
3939
}
4040

41-
val pollUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) match {
41+
val pollUnit: TimeUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) match {
4242
case Some(s) => TimeUnit.valueOf(s.toUpperCase())
4343
case None => TimeUnit.valueOf(CONSOLE_DEFAULT_UNIT)
4444
}

core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class CsvSink(val property: Properties, val registry: MetricRegistry,
4141
case None => CSV_DEFAULT_PERIOD
4242
}
4343

44-
val pollUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
44+
val pollUnit: TimeUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
4545
case Some(s) => TimeUnit.valueOf(s.toUpperCase())
4646
case None => TimeUnit.valueOf(CSV_DEFAULT_UNIT)
4747
}

core/src/main/scala/org/apache/spark/metrics/sink/GraphiteSink.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class GraphiteSink(val property: Properties, val registry: MetricRegistry,
3939
val GRAPHITE_KEY_UNIT = "unit"
4040
val GRAPHITE_KEY_PREFIX = "prefix"
4141

42-
def propertyToOption(prop: String) = Option(property.getProperty(prop))
42+
def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))
4343

4444
if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
4545
throw new Exception("Graphite sink requires 'host' property.")
@@ -57,7 +57,7 @@ class GraphiteSink(val property: Properties, val registry: MetricRegistry,
5757
case None => GRAPHITE_DEFAULT_PERIOD
5858
}
5959

60-
val pollUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
60+
val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
6161
case Some(s) => TimeUnit.valueOf(s.toUpperCase())
6262
case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
6363
}

core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[_ <: Product2[K, _]]], part:
103103
array
104104
}
105105

106-
override val partitioner = Some(part)
106+
override val partitioner: Some[Partitioner] = Some(part)
107107

108108
override def compute(s: Partition, context: TaskContext): Iterator[(K, CoGroupCombiner)] = {
109109
val sparkConf = SparkEnv.get.conf

core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ class HadoopRDD[K, V](
171171
array
172172
}
173173

174-
override def compute(theSplit: Partition, context: TaskContext) = {
174+
override def compute(theSplit: Partition, context: TaskContext): InterruptibleIterator[(K, V)] = {
175175
val iter = new NextIterator[(K, V)] {
176176

177177
val split = theSplit.asInstanceOf[HadoopPartition]

core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ class JdbcRDD[T: ClassTag](
116116
}
117117

118118
object JdbcRDD {
119-
def resultSetToObjectArray(rs: ResultSet) = {
119+
def resultSetToObjectArray(rs: ResultSet): Array[Object] = {
120120
Array.tabulate[Object](rs.getMetaData.getColumnCount)(i => rs.getObject(i + 1))
121121
}
122122
}

core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ class NewHadoopRDD[K, V](
8080
result
8181
}
8282

83-
override def compute(theSplit: Partition, context: TaskContext) = {
83+
override def compute(theSplit: Partition, context: TaskContext): InterruptibleIterator[(K, V)] = {
8484
val iter = new Iterator[(K, V)] {
8585
val split = theSplit.asInstanceOf[NewHadoopPartition]
8686
logInfo("Input split: " + split.serializableHadoopSplit)

core/src/main/scala/org/apache/spark/rdd/RDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ abstract class RDD[T: ClassTag](
121121
@transient var name: String = null
122122

123123
/** Assign a name to this RDD */
124-
def setName(_name: String) = {
124+
def setName(_name: String): RDD[T] = {
125125
name = _name
126126
this
127127
}

core/src/main/scala/org/apache/spark/storage/StorageLevel.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -126,15 +126,16 @@ object StorageLevel {
126126
val MEMORY_AND_DISK_SER_2 = new StorageLevel(true, true, false, 2)
127127

128128
/** Create a new StorageLevel object */
129-
def apply(useDisk: Boolean, useMemory: Boolean, deserialized: Boolean, replication: Int = 1) =
129+
def apply(useDisk: Boolean, useMemory: Boolean, deserialized: Boolean,
130+
replication: Int = 1): StorageLevel =
130131
getCachedStorageLevel(new StorageLevel(useDisk, useMemory, deserialized, replication))
131132

132133
/** Create a new StorageLevel object from its integer representation */
133-
def apply(flags: Int, replication: Int) =
134+
def apply(flags: Int, replication: Int): StorageLevel =
134135
getCachedStorageLevel(new StorageLevel(flags, replication))
135136

136137
/** Read StorageLevel object from ObjectInput stream */
137-
def apply(in: ObjectInput) = {
138+
def apply(in: ObjectInput): StorageLevel = {
138139
val obj = new StorageLevel()
139140
obj.readExternal(in)
140141
getCachedStorageLevel(obj)

core/src/main/scala/org/apache/spark/util/Distribution.scala

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ package org.apache.spark.util
1919

2020
import java.io.PrintStream
2121

22+
import scala.collection.immutable.IndexedSeq
23+
2224
/**
2325
* Util for getting some stats from a small sample of numeric values, with some handy
2426
* summary functions.
@@ -40,15 +42,16 @@ class Distribution(val data: Array[Double], val startIdx: Int, val endIdx: Int)
4042
* given from 0 to 1
4143
* @param probabilities
4244
*/
43-
def getQuantiles(probabilities: Traversable[Double] = defaultProbabilities) = {
45+
def getQuantiles(probabilities: Traversable[Double] = defaultProbabilities)
46+
: IndexedSeq[Double] = {
4447
probabilities.toIndexedSeq.map{p:Double => data(closestIndex(p))}
4548
}
4649

4750
private def closestIndex(p: Double) = {
4851
math.min((p * length).toInt + startIdx, endIdx - 1)
4952
}
5053

51-
def showQuantiles(out: PrintStream = System.out) = {
54+
def showQuantiles(out: PrintStream = System.out): Unit = {
5255
out.println("min\t25%\t50%\t75%\tmax")
5356
getQuantiles(defaultProbabilities).foreach{q => out.print(q + "\t")}
5457
out.println

extras/spark-ganglia-lgpl/src/main/scala/org/apache/spark/metrics/sink/GangliaSink.scala

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit
2323
import com.codahale.metrics.MetricRegistry
2424
import com.codahale.metrics.ganglia.GangliaReporter
2525
import info.ganglia.gmetric4j.gmetric.GMetric
26+
import info.ganglia.gmetric4j.gmetric.GMetric.UDPAddressingMode
2627

2728
import org.apache.spark.SecurityManager
2829
import org.apache.spark.metrics.MetricsSystem
@@ -33,10 +34,10 @@ class GangliaSink(val property: Properties, val registry: MetricRegistry,
3334
val GANGLIA_DEFAULT_PERIOD = 10
3435

3536
val GANGLIA_KEY_UNIT = "unit"
36-
val GANGLIA_DEFAULT_UNIT = TimeUnit.SECONDS
37+
val GANGLIA_DEFAULT_UNIT: TimeUnit = TimeUnit.SECONDS
3738

3839
val GANGLIA_KEY_MODE = "mode"
39-
val GANGLIA_DEFAULT_MODE = GMetric.UDPAddressingMode.MULTICAST
40+
val GANGLIA_DEFAULT_MODE: UDPAddressingMode = GMetric.UDPAddressingMode.MULTICAST
4041

4142
// TTL for multicast messages. If listeners are X hops away in network, must be at least X.
4243
val GANGLIA_KEY_TTL = "ttl"
@@ -45,7 +46,7 @@ class GangliaSink(val property: Properties, val registry: MetricRegistry,
4546
val GANGLIA_KEY_HOST = "host"
4647
val GANGLIA_KEY_PORT = "port"
4748

48-
def propertyToOption(prop: String) = Option(property.getProperty(prop))
49+
def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))
4950

5051
if (!propertyToOption(GANGLIA_KEY_HOST).isDefined) {
5152
throw new Exception("Ganglia sink requires 'host' property.")
@@ -58,11 +59,12 @@ class GangliaSink(val property: Properties, val registry: MetricRegistry,
5859
val host = propertyToOption(GANGLIA_KEY_HOST).get
5960
val port = propertyToOption(GANGLIA_KEY_PORT).get.toInt
6061
val ttl = propertyToOption(GANGLIA_KEY_TTL).map(_.toInt).getOrElse(GANGLIA_DEFAULT_TTL)
61-
val mode = propertyToOption(GANGLIA_KEY_MODE)
62+
val mode: UDPAddressingMode = propertyToOption(GANGLIA_KEY_MODE)
6263
.map(u => GMetric.UDPAddressingMode.valueOf(u.toUpperCase)).getOrElse(GANGLIA_DEFAULT_MODE)
6364
val pollPeriod = propertyToOption(GANGLIA_KEY_PERIOD).map(_.toInt)
6465
.getOrElse(GANGLIA_DEFAULT_PERIOD)
65-
val pollUnit = propertyToOption(GANGLIA_KEY_UNIT).map(u => TimeUnit.valueOf(u.toUpperCase))
66+
val pollUnit: TimeUnit = propertyToOption(GANGLIA_KEY_UNIT)
67+
.map(u => TimeUnit.valueOf(u.toUpperCase))
6668
.getOrElse(GANGLIA_DEFAULT_UNIT)
6769

6870
MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

graphx/src/main/scala/org/apache/spark/graphx/Graph.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -419,5 +419,6 @@ object Graph {
419419
* All the convenience operations are defined in the [[GraphOps]] class which may be
420420
* shared across multiple graph implementations.
421421
*/
422-
implicit def graphToGraphOps[VD: ClassTag, ED: ClassTag](g: Graph[VD, ED]) = g.ops
422+
implicit def graphToGraphOps[VD: ClassTag, ED: ClassTag]
423+
(g: Graph[VD, ED]): GraphOps[VD, ED] = g.ops
423424
} // end of Graph object

graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ class GraphImpl[VD: ClassTag, ED: ClassTag] protected (
197197
override def mapReduceTriplets[A: ClassTag](
198198
mapFunc: EdgeTriplet[VD, ED] => Iterator[(VertexId, A)],
199199
reduceFunc: (A, A) => A,
200-
activeSetOpt: Option[(VertexRDD[_], EdgeDirection)] = None) = {
200+
activeSetOpt: Option[(VertexRDD[_], EdgeDirection)] = None): VertexRDD[A] = {
201201

202202
ClosureCleaner.clean(mapFunc)
203203
ClosureCleaner.clean(reduceFunc)

graphx/src/main/scala/org/apache/spark/graphx/lib/Analytics.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import org.apache.spark.graphx.PartitionStrategy._
2626
*/
2727
object Analytics extends Logging {
2828

29-
def main(args: Array[String]) = {
29+
def main(args: Array[String]): Unit = {
3030
val host = args(0)
3131
val taskType = args(1)
3232
val fname = args(2)

streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -431,7 +431,7 @@ class StreamingContext private[streaming] (
431431
* Stop the execution of the streams.
432432
* @param stopSparkContext Stop the associated SparkContext or not
433433
*/
434-
def stop(stopSparkContext: Boolean = true) = synchronized {
434+
def stop(stopSparkContext: Boolean = true): Unit = synchronized {
435435
scheduler.stop()
436436
logInfo("StreamingContext stopped successfully")
437437
waiter.notifyStop()
@@ -489,7 +489,7 @@ object StreamingContext extends Logging {
489489
* Find the JAR from which a given class was loaded, to make it easy for users to pass
490490
* their JARs to StreamingContext.
491491
*/
492-
def jarOfClass(cls: Class[_]) = SparkContext.jarOfClass(cls)
492+
def jarOfClass(cls: Class[_]): Seq[String] = SparkContext.jarOfClass(cls)
493493

494494
private[streaming] def createNewSparkContext(conf: SparkConf): SparkContext = {
495495
// Set the default cleaner delay to an hour if not already set.

0 commit comments

Comments
 (0)