18
18
package org .apache .spark .api .java
19
19
20
20
import java .util .{Comparator , List => JList , Iterator => JIterator }
21
- import java .lang .{Iterable => JIterable }
21
+ import java .lang .{Iterable => JIterable , Long => JLong }
22
22
23
23
import scala .collection .JavaConversions ._
24
24
import scala .reflect .ClassTag
@@ -268,8 +268,8 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
268
268
* 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method
269
269
* won't trigger a spark job, which is different from [[org.apache.spark.rdd.RDD#zipWithIndex ]].
270
270
*/
271
- def zipWithUniqueId [ Long ] (): JavaPairRDD [T , Long ] = {
272
- JavaPairRDD .fromRDD(rdd.zipWithUniqueId()).asInstanceOf [JavaPairRDD [T , Long ]]
271
+ def zipWithUniqueId (): JavaPairRDD [T , JLong ] = {
272
+ JavaPairRDD .fromRDD(rdd.zipWithUniqueId()).asInstanceOf [JavaPairRDD [T , JLong ]]
273
273
}
274
274
275
275
/**
@@ -279,8 +279,8 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
279
279
* This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
280
280
* This method needs to trigger a spark job when this RDD contains more than one partitions.
281
281
*/
282
- def zipWithIndex [ Long ] (): JavaPairRDD [T , Long ] = {
283
- JavaPairRDD .fromRDD(rdd.zipWithIndex()).asInstanceOf [JavaPairRDD [T , Long ]]
282
+ def zipWithIndex (): JavaPairRDD [T , JLong ] = {
283
+ JavaPairRDD .fromRDD(rdd.zipWithIndex()).asInstanceOf [JavaPairRDD [T , JLong ]]
284
284
}
285
285
286
286
// Actions (launch a job to return a value to the user program)
0 commit comments