@@ -851,8 +851,10 @@ class SparkContext(
851
851
partitions : Seq [Int ],
852
852
allowLocal : Boolean ,
853
853
resultHandler : (Int , U ) => Unit ) {
854
- val outIndex = partitions.toSet.diff(rdd.partitions.map(_.index).toSet)
855
- require(outIndex.isEmpty," Partition index out of bounds: " + outIndex.mkString(" ," ))
854
+ // TODO: All RDDs have continuous index space. How to ensure this?
855
+ partitions.foreach{ p =>
856
+ require(p >= 0 && p < rdd.partitions.size, s " Invalid partition requested: $p" )
857
+ }
856
858
val callSite = getCallSite
857
859
val cleanedFunc = clean(func)
858
860
logInfo(" Starting job: " + callSite)
@@ -956,8 +958,9 @@ class SparkContext(
956
958
resultHandler : (Int , U ) => Unit ,
957
959
resultFunc : => R ): SimpleFutureAction [R ] =
958
960
{
959
- val outIndex = partitions.toSet.diff(rdd.partitions.map(_.index).toSet)
960
- require(outIndex.isEmpty," Partition index out of bounds: " + outIndex.mkString(" ," ))
961
+ partitions.foreach{ p =>
962
+ require(p >= 0 && p < rdd.partitions.size, s " Invalid partition requested: $p" )
963
+ }
961
964
val cleanF = clean(processPartition)
962
965
val callSite = getCallSite
963
966
val waiter = dagScheduler.submitJob(
0 commit comments