Skip to content

Commit 007762b

Browse files
committed
Remove dead scaladoc links
1 parent b8ff8cb commit 007762b

File tree

8 files changed

+31
-32
lines changed

8 files changed

+31
-32
lines changed

bagel/src/main/scala/org/apache/spark/bagel/Bagel.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ object Bagel extends Logging {
2727

2828
/**
2929
* Runs a Bagel program.
30-
* @param sc [[org.apache.spark.SparkContext]] to use for the program.
30+
* @param sc org.apache.spark.SparkContext to use for the program.
3131
* @param vertices vertices of the graph represented as an RDD of (Key, Vertex) pairs. Often the
3232
* Key will be the vertex id.
3333
* @param messages initial set of messages represented as an RDD of (Key, Message) pairs. Often
@@ -38,12 +38,12 @@ object Bagel extends Logging {
3838
* @param aggregator [[org.apache.spark.bagel.Aggregator]] performs a reduce across all vertices
3939
* after each superstep and provides the result to each vertex in the next
4040
* superstep.
41-
* @param partitioner [[org.apache.spark.Partitioner]] partitions values by key
41+
* @param partitioner org.apache.spark.Partitioner partitions values by key
4242
* @param numPartitions number of partitions across which to split the graph.
4343
* Default is the default parallelism of the SparkContext
44-
* @param storageLevel [[org.apache.spark.storage.StorageLevel]] to use for caching of
44+
* @param storageLevel org.apache.spark.storage.StorageLevel to use for caching of
4545
* intermediate RDDs in each superstep. Defaults to caching in memory.
46-
* @param compute function that takes a Vertex, optional set of (possibly combined) messages to
46+
*@param compute function that takes a Vertex, optional set of (possibly combined) messages to
4747
* the Vertex, optional Aggregator and the current superstep,
4848
* and returns a set of (Vertex, outgoing Messages) pairs
4949
* @tparam K key
@@ -131,7 +131,7 @@ object Bagel extends Logging {
131131

132132
/**
133133
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]], default
134-
* [[org.apache.spark.HashPartitioner]] and default storage level
134+
* org.apache.spark.HashPartitioner and default storage level
135135
*/
136136
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest](
137137
sc: SparkContext,
@@ -146,7 +146,7 @@ object Bagel extends Logging {
146146

147147
/**
148148
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]] and the
149-
* default [[org.apache.spark.HashPartitioner]]
149+
* default org.apache.spark.HashPartitioner
150150
*/
151151
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest](
152152
sc: SparkContext,
@@ -166,7 +166,7 @@ object Bagel extends Logging {
166166

167167
/**
168168
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]],
169-
* default [[org.apache.spark.HashPartitioner]],
169+
* default org.apache.spark.HashPartitioner,
170170
* [[org.apache.spark.bagel.DefaultCombiner]] and the default storage level
171171
*/
172172
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest](
@@ -180,7 +180,7 @@ object Bagel extends Logging {
180180

181181
/**
182182
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]],
183-
* the default [[org.apache.spark.HashPartitioner]]
183+
* the default org.apache.spark.HashPartitioner
184184
* and [[org.apache.spark.bagel.DefaultCombiner]]
185185
*/
186186
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest](

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ class SparkContext(
351351
* using the older MapReduce API (`org.apache.hadoop.mapred`).
352352
*
353353
* @param conf JobConf for setting up the dataset
354-
* @param inputFormatClass Class of the [[InputFormat]]
354+
* @param inputFormatClass Class of the InputFormat
355355
* @param keyClass Class of the keys
356356
* @param valueClass Class of the values
357357
* @param minSplits Minimum number of Hadoop Splits to generate.

core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ import scala.util.control.{ControlThrowable, NonFatal}
2323
import com.typesafe.config.Config
2424

2525
/**
26-
* An [[akka.actor.ActorSystem]] which refuses to shut down in the event of a fatal exception.
26+
* An akka.actor.ActorSystem which refuses to shut down in the event of a fatal exception
2727
* This is necessary as Spark Executors are allowed to recover from fatal exceptions
28-
* (see [[org.apache.spark.executor.Executor]]).
28+
* (see org.apache.spark.executor.Executor)
2929
*/
3030
object IndestructibleActorSystem {
3131
def apply(name: String, config: Config): ActorSystem =

core/src/main/scala/org/apache/spark/util/StatCounter.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,7 @@ package org.apache.spark.util
2020
/**
2121
* A class for tracking the statistics of a set of numbers (count, mean and variance) in a
2222
* numerically robust way. Includes support for merging two StatCounters. Based on
23-
* [[http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
24-
* Welford and Chan's algorithms for running variance]].
23+
* [[http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance Welford and Chan's algorithms for running variance]].
2524
*
2625
* @constructor Initialize the StatCounter with the given values.
2726
*/

core/src/main/scala/org/apache/spark/util/Vector.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ object Vector {
136136

137137
/**
138138
* Creates this [[org.apache.spark.util.Vector]] of given length containing random numbers
139-
* between 0.0 and 1.0. Optional [[scala.util.Random]] number generator can be provided.
139+
* between 0.0 and 1.0. Optional scala.util.Random number generator can be provided.
140140
*/
141141
def random(length: Int, random: Random = new XORShiftRandom()) =
142142
Vector(length, _ => random.nextDouble())

streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
127127
/**
128128
* Return a new DStream by applying `groupByKey` on each RDD of `this` DStream.
129129
* Therefore, the values for each key in `this` DStream's RDDs are grouped into a
130-
* single sequence to generate the RDDs of the new DStream. [[org.apache.spark.Partitioner]]
130+
* single sequence to generate the RDDs of the new DStream. org.apache.spark.Partitioner
131131
* is used to control the partitioning of each RDD.
132132
*/
133133
def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JList[V]] =
@@ -151,7 +151,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
151151

152152
/**
153153
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
154-
* merged using the supplied reduce function. [[org.apache.spark.Partitioner]] is used to control
154+
* merged using the supplied reduce function. org.apache.spark.Partitioner is used to control
155155
* thepartitioning of each RDD.
156156
*/
157157
def reduceByKey(func: JFunction2[V, V, V], partitioner: Partitioner): JavaPairDStream[K, V] = {
@@ -161,7 +161,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
161161
/**
162162
* Combine elements of each key in DStream's RDDs using custom function. This is similar to the
163163
* combineByKey for RDDs. Please refer to combineByKey in
164-
* [[org.apache.spark.rdd.PairRDDFunctions]] for more information.
164+
* org.apache.spark.rdd.PairRDDFunctions for more information.
165165
*/
166166
def combineByKey[C](createCombiner: JFunction[V, C],
167167
mergeValue: JFunction2[C, V, C],
@@ -176,7 +176,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
176176
/**
177177
* Combine elements of each key in DStream's RDDs using custom function. This is similar to the
178178
* combineByKey for RDDs. Please refer to combineByKey in
179-
* [[org.apache.spark.rdd.PairRDDFunctions]] for more information.
179+
* org.apache.spark.rdd.PairRDDFunctions for more information.
180180
*/
181181
def combineByKey[C](createCombiner: JFunction[V, C],
182182
mergeValue: JFunction2[C, V, C],
@@ -479,7 +479,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
479479
/**
480480
* Return a new "state" DStream where the state for each key is updated by applying
481481
* the given function on the previous state of the key and the new values of the key.
482-
* [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
482+
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
483483
* @param updateFunc State update function. If `this` function returns None, then
484484
* corresponding state key-value pair will be eliminated.
485485
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
@@ -579,7 +579,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
579579

580580
/**
581581
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
582-
* The supplied [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
582+
* The supplied org.apache.spark.Partitioner is used to control the partitioning of each RDD.
583583
*/
584584
def join[W](
585585
other: JavaPairDStream[K, W],
@@ -619,7 +619,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
619619

620620
/**
621621
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
622-
* The supplied [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
622+
* The supplied org.apache.spark.Partitioner is used to control the partitioning of each RDD.
623623
*/
624624
def leftOuterJoin[W](
625625
other: JavaPairDStream[K, W],
@@ -660,7 +660,7 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
660660

661661
/**
662662
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
663-
* `other` DStream. The supplied [[org.apache.spark.Partitioner]] is used to control
663+
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
664664
* the partitioning of each RDD.
665665
*/
666666
def rightOuterJoin[W](

streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ class JavaStreamingContext(val ssc: StreamingContext) {
406406
* JavaPairDStream in the list of JavaDStreams, convert it to a JavaDStream using
407407
* [[org.apache.spark.streaming.api.java.JavaPairDStream]].toJavaDStream().
408408
* In the transform function, convert the JavaRDD corresponding to that JavaDStream to
409-
* a JavaPairRDD using [[org.apache.spark.api.java.JavaPairRDD]].fromJavaRDD().
409+
* a JavaPairRDD using org.apache.spark.api.java.JavaPairRDD.fromJavaRDD().
410410
*/
411411
def transform[T](
412412
dstreams: JList[JavaDStream[_]],
@@ -429,7 +429,7 @@ class JavaStreamingContext(val ssc: StreamingContext) {
429429
* JavaPairDStream in the list of JavaDStreams, convert it to a JavaDStream using
430430
* [[org.apache.spark.streaming.api.java.JavaPairDStream]].toJavaDStream().
431431
* In the transform function, convert the JavaRDD corresponding to that JavaDStream to
432-
* a JavaPairRDD using [[org.apache.spark.api.java.JavaPairRDD]].fromJavaRDD().
432+
* a JavaPairRDD using org.apache.spark.api.java.JavaPairRDD.fromJavaRDD().
433433
*/
434434
def transform[K, V](
435435
dstreams: JList[JavaDStream[_]],

streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
6565

6666
/**
6767
* Return a new DStream by applying `groupByKey` on each RDD. The supplied
68-
* [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
68+
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
6969
*/
7070
def groupByKey(partitioner: Partitioner): DStream[(K, Seq[V])] = {
7171
val createCombiner = (v: V) => ArrayBuffer[V](v)
@@ -95,7 +95,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
9595

9696
/**
9797
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
98-
* merged using the supplied reduce function. [[org.apache.spark.Partitioner]] is used to control
98+
* merged using the supplied reduce function. org.apache.spark.Partitioner is used to control
9999
* the partitioning of each RDD.
100100
*/
101101
def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): DStream[(K, V)] = {
@@ -376,7 +376,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
376376
/**
377377
* Return a new "state" DStream where the state for each key is updated by applying
378378
* the given function on the previous state of the key and the new values of the key.
379-
* [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
379+
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
380380
* @param updateFunc State update function. If `this` function returns None, then
381381
* corresponding state key-value pair will be eliminated.
382382
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
@@ -396,7 +396,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
396396
/**
397397
* Return a new "state" DStream where the state for each key is updated by applying
398398
* the given function on the previous state of the key and the new values of each key.
399-
* [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
399+
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
400400
* @param updateFunc State update function. If `this` function returns None, then
401401
* corresponding state key-value pair will be eliminated. Note, that
402402
* this function may generate a different a tuple with a different key
@@ -453,7 +453,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
453453

454454
/**
455455
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
456-
* The supplied [[org.apache.spark.Partitioner]] is used to partition the generated RDDs.
456+
* The supplied org.apache.spark.Partitioner is used to partition the generated RDDs.
457457
*/
458458
def cogroup[W: ClassTag](
459459
other: DStream[(K, W)],
@@ -483,7 +483,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
483483

484484
/**
485485
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
486-
* The supplied [[org.apache.spark.Partitioner]] is used to control the partitioning of each RDD.
486+
* The supplied org.apache.spark.Partitioner is used to control the partitioning of each RDD.
487487
*/
488488
def join[W: ClassTag](
489489
other: DStream[(K, W)],
@@ -518,7 +518,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
518518

519519
/**
520520
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
521-
* `other` DStream. The supplied [[org.apache.spark.Partitioner]] is used to control
521+
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
522522
* the partitioning of each RDD.
523523
*/
524524
def leftOuterJoin[W: ClassTag](
@@ -554,7 +554,7 @@ class PairDStreamFunctions[K: ClassTag, V: ClassTag](self: DStream[(K,V)])
554554

555555
/**
556556
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
557-
* `other` DStream. The supplied [[org.apache.spark.Partitioner]] is used to control
557+
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
558558
* the partitioning of each RDD.
559559
*/
560560
def rightOuterJoin[W: ClassTag](

0 commit comments

Comments
 (0)