Skip to content

Commit 1d16afb

Browse files
committed
Replace JavaConversions implicits with JavaConverters and add scalastyle check
1 parent 7bc9a8c commit 1d16afb

File tree

171 files changed

+863
-880
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

171 files changed

+863
-880
lines changed

core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525
import scala.Option;
2626
import scala.Product2;
27-
import scala.collection.JavaConversions;
27+
import scala.collection.JavaConverters;
2828
import scala.collection.immutable.Map;
2929
import scala.reflect.ClassTag;
3030
import scala.reflect.ClassTag$;
@@ -160,7 +160,7 @@ public long getPeakMemoryUsedBytes() {
160160
*/
161161
@VisibleForTesting
162162
public void write(Iterator<Product2<K, V>> records) throws IOException {
163-
write(JavaConversions.asScalaIterator(records));
163+
write(JavaConverters.asScalaIteratorConverter(records).asScala());
164164
}
165165

166166
@Override

core/src/main/scala/org/apache/spark/MapOutputTracker.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ import java.io._
2121
import java.util.concurrent.ConcurrentHashMap
2222
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
2323

24+
import scala.collection.JavaConverters._
2425
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
25-
import scala.collection.JavaConversions._
2626
import scala.reflect.ClassTag
2727

2828
import org.apache.spark.rpc.{RpcEndpointRef, RpcEnv, RpcCallContext, RpcEndpoint}
@@ -398,7 +398,7 @@ private[spark] class MapOutputTrackerMaster(conf: SparkConf)
398398
*/
399399
private[spark] class MapOutputTrackerWorker(conf: SparkConf) extends MapOutputTracker(conf) {
400400
protected val mapStatuses: Map[Int, Array[MapStatus]] =
401-
new ConcurrentHashMap[Int, Array[MapStatus]]
401+
new ConcurrentHashMap[Int, Array[MapStatus]]().asScala
402402
}
403403

404404
private[spark] object MapOutputTracker extends Logging {

core/src/main/scala/org/apache/spark/SSLOptions.scala

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,11 @@
1717

1818
package org.apache.spark
1919

20-
import java.io.{File, FileInputStream}
21-
import java.security.{KeyStore, NoSuchAlgorithmException}
22-
import javax.net.ssl.{KeyManager, KeyManagerFactory, SSLContext, TrustManager, TrustManagerFactory}
20+
import java.io.File
21+
import java.security.NoSuchAlgorithmException
22+
import javax.net.ssl.SSLContext
23+
24+
import scala.collection.JavaConverters._
2325

2426
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
2527
import org.eclipse.jetty.util.ssl.SslContextFactory
@@ -79,7 +81,6 @@ private[spark] case class SSLOptions(
7981
* object. It can be used then to compose the ultimate Akka configuration.
8082
*/
8183
def createAkkaConfig: Option[Config] = {
82-
import scala.collection.JavaConversions._
8384
if (enabled) {
8485
Some(ConfigFactory.empty()
8586
.withValue("akka.remote.netty.tcp.security.key-store",
@@ -97,7 +98,7 @@ private[spark] case class SSLOptions(
9798
.withValue("akka.remote.netty.tcp.security.protocol",
9899
ConfigValueFactory.fromAnyRef(protocol.getOrElse("")))
99100
.withValue("akka.remote.netty.tcp.security.enabled-algorithms",
100-
ConfigValueFactory.fromIterable(supportedAlgorithms.toSeq))
101+
ConfigValueFactory.fromIterable(supportedAlgorithms.asJava))
101102
.withValue("akka.remote.netty.tcp.enable-ssl",
102103
ConfigValueFactory.fromAnyRef(true)))
103104
} else {

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ import java.util.{Arrays, Properties, UUID}
2626
import java.util.concurrent.atomic.{AtomicReference, AtomicBoolean, AtomicInteger}
2727
import java.util.UUID.randomUUID
2828

29+
import scala.collection.JavaConverters._
2930
import scala.collection.{Map, Set}
30-
import scala.collection.JavaConversions._
3131
import scala.collection.generic.Growable
3232
import scala.collection.mutable.HashMap
3333
import scala.reflect.{ClassTag, classTag}
@@ -1533,7 +1533,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
15331533
def getAllPools: Seq[Schedulable] = {
15341534
assertNotStopped()
15351535
// TODO(xiajunluan): We should take nested pools into account
1536-
taskScheduler.rootPool.schedulableQueue.toSeq
1536+
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
15371537
}
15381538

15391539
/**

core/src/main/scala/org/apache/spark/TestUtils.scala

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,12 @@ package org.apache.spark
1919

2020
import java.io.{ByteArrayInputStream, File, FileInputStream, FileOutputStream}
2121
import java.net.{URI, URL}
22+
import java.nio.charset.StandardCharsets
23+
import java.util.Arrays
2224
import java.util.jar.{JarEntry, JarOutputStream}
2325

24-
import scala.collection.JavaConversions._
26+
import scala.collection.JavaConverters._
2527

26-
import com.google.common.base.Charsets.UTF_8
2728
import com.google.common.io.{ByteStreams, Files}
2829
import javax.tools.{JavaFileObject, SimpleJavaFileObject, ToolProvider}
2930

@@ -71,7 +72,7 @@ private[spark] object TestUtils {
7172
files.foreach { case (k, v) =>
7273
val entry = new JarEntry(k)
7374
jarStream.putNextEntry(entry)
74-
ByteStreams.copy(new ByteArrayInputStream(v.getBytes(UTF_8)), jarStream)
75+
ByteStreams.copy(new ByteArrayInputStream(v.getBytes(StandardCharsets.UTF_8)), jarStream)
7576
}
7677
jarStream.close()
7778
jarFile.toURI.toURL
@@ -125,7 +126,7 @@ private[spark] object TestUtils {
125126
} else {
126127
Seq()
127128
}
128-
compiler.getTask(null, null, null, options, null, Seq(sourceFile)).call()
129+
compiler.getTask(null, null, null, options.asJava, null, Arrays.asList(sourceFile)).call()
129130

130131
val fileName = className + ".class"
131132
val result = new File(fileName)

core/src/main/scala/org/apache/spark/api/java/JavaHadoopRDD.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
package org.apache.spark.api.java
1919

20-
import scala.collection.JavaConversions._
20+
import scala.collection.JavaConverters._
2121
import scala.reflect.ClassTag
2222

2323
import org.apache.hadoop.mapred.InputSplit
@@ -37,7 +37,7 @@ class JavaHadoopRDD[K, V](rdd: HadoopRDD[K, V])
3737
def mapPartitionsWithInputSplit[R](
3838
f: JFunction2[InputSplit, java.util.Iterator[(K, V)], java.util.Iterator[R]],
3939
preservesPartitioning: Boolean = false): JavaRDD[R] = {
40-
new JavaRDD(rdd.mapPartitionsWithInputSplit((a, b) => f.call(a, asJavaIterator(b)),
40+
new JavaRDD(rdd.mapPartitionsWithInputSplit((a, b) => f.call(a, b.asJava).asScala,
4141
preservesPartitioning)(fakeClassTag))(fakeClassTag)
4242
}
4343
}

core/src/main/scala/org/apache/spark/api/java/JavaNewHadoopRDD.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
package org.apache.spark.api.java
1919

20-
import scala.collection.JavaConversions._
20+
import scala.collection.JavaConverters._
2121
import scala.reflect.ClassTag
2222

2323
import org.apache.hadoop.mapreduce.InputSplit
@@ -37,7 +37,7 @@ class JavaNewHadoopRDD[K, V](rdd: NewHadoopRDD[K, V])
3737
def mapPartitionsWithInputSplit[R](
3838
f: JFunction2[InputSplit, java.util.Iterator[(K, V)], java.util.Iterator[R]],
3939
preservesPartitioning: Boolean = false): JavaRDD[R] = {
40-
new JavaRDD(rdd.mapPartitionsWithInputSplit((a, b) => f.call(a, asJavaIterator(b)),
40+
new JavaRDD(rdd.mapPartitionsWithInputSplit((a, b) => f.call(a, b.asJava).asScala,
4141
preservesPartitioning)(fakeClassTag))(fakeClassTag)
4242
}
4343
}

core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ package org.apache.spark.api.java
2020
import java.util.{Comparator, List => JList, Map => JMap}
2121
import java.lang.{Iterable => JIterable}
2222

23-
import scala.collection.JavaConversions._
23+
import scala.collection.JavaConverters._
2424
import scala.language.implicitConversions
2525
import scala.reflect.ClassTag
2626

@@ -142,7 +142,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
142142
def sampleByKey(withReplacement: Boolean,
143143
fractions: JMap[K, Double],
144144
seed: Long): JavaPairRDD[K, V] =
145-
new JavaPairRDD[K, V](rdd.sampleByKey(withReplacement, fractions, seed))
145+
new JavaPairRDD[K, V](rdd.sampleByKey(withReplacement, fractions.asScala, seed))
146146

147147
/**
148148
* Return a subset of this RDD sampled by key (via stratified sampling).
@@ -173,7 +173,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
173173
def sampleByKeyExact(withReplacement: Boolean,
174174
fractions: JMap[K, Double],
175175
seed: Long): JavaPairRDD[K, V] =
176-
new JavaPairRDD[K, V](rdd.sampleByKeyExact(withReplacement, fractions, seed))
176+
new JavaPairRDD[K, V](rdd.sampleByKeyExact(withReplacement, fractions.asScala, seed))
177177

178178
/**
179179
* ::Experimental::
@@ -768,7 +768,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
768768
* Return the list of values in the RDD for key `key`. This operation is done efficiently if the
769769
* RDD has a known partitioner by only searching the partition that the key maps to.
770770
*/
771-
def lookup(key: K): JList[V] = seqAsJavaList(rdd.lookup(key))
771+
def lookup(key: K): JList[V] = rdd.lookup(key).asJava
772772

773773
/** Output the RDD to any Hadoop-supported file system. */
774774
def saveAsHadoopFile[F <: OutputFormat[_, _]](
@@ -987,30 +987,27 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
987987
object JavaPairRDD {
988988
private[spark]
989989
def groupByResultToJava[K: ClassTag, T](rdd: RDD[(K, Iterable[T])]): RDD[(K, JIterable[T])] = {
990-
rddToPairRDDFunctions(rdd).mapValues(asJavaIterable)
990+
rddToPairRDDFunctions(rdd).mapValues(_.asJava)
991991
}
992992

993993
private[spark]
994994
def cogroupResultToJava[K: ClassTag, V, W](
995995
rdd: RDD[(K, (Iterable[V], Iterable[W]))]): RDD[(K, (JIterable[V], JIterable[W]))] = {
996-
rddToPairRDDFunctions(rdd).mapValues(x => (asJavaIterable(x._1), asJavaIterable(x._2)))
996+
rddToPairRDDFunctions(rdd).mapValues(x => (x._1.asJava, x._2.asJava))
997997
}
998998

999999
private[spark]
10001000
def cogroupResult2ToJava[K: ClassTag, V, W1, W2](
10011001
rdd: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))])
10021002
: RDD[(K, (JIterable[V], JIterable[W1], JIterable[W2]))] = {
1003-
rddToPairRDDFunctions(rdd)
1004-
.mapValues(x => (asJavaIterable(x._1), asJavaIterable(x._2), asJavaIterable(x._3)))
1003+
rddToPairRDDFunctions(rdd).mapValues(x => (x._1.asJava, x._2.asJava, x._3.asJava))
10051004
}
10061005

10071006
private[spark]
10081007
def cogroupResult3ToJava[K: ClassTag, V, W1, W2, W3](
10091008
rdd: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))])
10101009
: RDD[(K, (JIterable[V], JIterable[W1], JIterable[W2], JIterable[W3]))] = {
1011-
rddToPairRDDFunctions(rdd)
1012-
.mapValues(x =>
1013-
(asJavaIterable(x._1), asJavaIterable(x._2), asJavaIterable(x._3), asJavaIterable(x._4)))
1010+
rddToPairRDDFunctions(rdd).mapValues(x => (x._1.asJava, x._2.asJava, x._3.asJava, x._4.asJava))
10141011
}
10151012

10161013
def fromRDD[K: ClassTag, V: ClassTag](rdd: RDD[(K, V)]): JavaPairRDD[K, V] = {

0 commit comments

Comments
 (0)