Skip to content

Commit 24d74c9

Browse files
committed
review commit
1 parent 763a5e4 commit 24d74c9

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
package org.apache.spark.api.java
1919

2020
import java.util.{Comparator, List => JList, Iterator => JIterator}
21-
import java.lang.{Iterable => JIterable}
21+
import java.lang.{Iterable => JIterable, Long => JLong}
2222

2323
import scala.collection.JavaConversions._
2424
import scala.reflect.ClassTag
@@ -268,8 +268,8 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
268268
* 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method
269269
* won't trigger a spark job, which is different from [[org.apache.spark.rdd.RDD#zipWithIndex]].
270270
*/
271-
def zipWithUniqueId[Long](): JavaPairRDD[T, Long] = {
272-
JavaPairRDD.fromRDD(rdd.zipWithUniqueId()).asInstanceOf[JavaPairRDD[T, Long]]
271+
def zipWithUniqueId(): JavaPairRDD[T, JLong] = {
272+
JavaPairRDD.fromRDD(rdd.zipWithUniqueId()).asInstanceOf[JavaPairRDD[T, JLong]]
273273
}
274274

275275
/**
@@ -279,8 +279,8 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
279279
* This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
280280
* This method needs to trigger a spark job when this RDD contains more than one partitions.
281281
*/
282-
def zipWithIndex[Long](): JavaPairRDD[T, Long] = {
283-
JavaPairRDD.fromRDD(rdd.zipWithIndex()).asInstanceOf[JavaPairRDD[T, Long]]
282+
def zipWithIndex(): JavaPairRDD[T, JLong] = {
283+
JavaPairRDD.fromRDD(rdd.zipWithIndex()).asInstanceOf[JavaPairRDD[T, JLong]]
284284
}
285285

286286
// Actions (launch a job to return a value to the user program)

0 commit comments

Comments
 (0)