Skip to content

Commit 274b238

Browse files
committed
[SPARK-2627] [PySpark] minor indentation changes
1 parent 983d963 commit 274b238

File tree

1 file changed

+9
-7
lines changed

1 file changed

+9
-7
lines changed

python/pyspark/tests.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -594,7 +594,8 @@ def test_oldhadoop(self):
594594
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
595595
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
596596
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
597-
"mapred.output.dir": basepath + "/olddataset/"}
597+
"mapred.output.dir": basepath + "/olddataset/"
598+
}
598599
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
599600
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
600601
old_dataset = sorted(self.sc.hadoopRDD(
@@ -624,11 +625,13 @@ def test_newhadoop(self):
624625
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
625626
self.assertEqual(result, array_data)
626627

627-
conf = {"mapreduce.outputformat.class":
628+
conf = {
629+
"mapreduce.outputformat.class":
628630
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
629-
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
630-
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
631-
"mapred.output.dir": basepath + "/newdataset/"}
631+
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
632+
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
633+
"mapred.output.dir": basepath + "/newdataset/"
634+
}
632635
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
633636
conf,
634637
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
@@ -1012,8 +1015,7 @@ class NumPyTests(PySparkTestCase):
10121015
"""General PySpark tests that depend on numpy """
10131016

10141017
def test_statcounter_array(self):
1015-
x = self.sc.parallelize(
1016-
[np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
1018+
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
10171019
s = x.stats()
10181020
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
10191021
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())

0 commit comments

Comments
 (0)