@@ -134,6 +134,7 @@ class MaxHeapQ(object):
134
134
135
135
"""
136
136
An implementation of MaxHeap.
137
+
137
138
>>> import pyspark.rdd
138
139
>>> heap = pyspark.rdd.MaxHeapQ(5)
139
140
>>> [heap.insert(i) for i in range(10)]
@@ -381,6 +382,7 @@ def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
381
382
def getNumPartitions (self ):
382
383
"""
383
384
Returns the number of partitions in RDD
385
+
384
386
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
385
387
>>> rdd.getNumPartitions()
386
388
2
@@ -570,6 +572,7 @@ def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
570
572
"""
571
573
Sorts this RDD, which is assumed to consist of (key, value) pairs.
572
574
# noqa
575
+
573
576
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
574
577
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
575
578
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
@@ -1205,6 +1208,7 @@ def collectAsMap(self):
1205
1208
def keys (self ):
1206
1209
"""
1207
1210
Return an RDD with the keys of each tuple.
1211
+
1208
1212
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
1209
1213
>>> m.collect()
1210
1214
[1, 3]
@@ -1214,6 +1218,7 @@ def keys(self):
1214
1218
def values (self ):
1215
1219
"""
1216
1220
Return an RDD with the values of each tuple.
1221
+
1217
1222
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
1218
1223
>>> m.collect()
1219
1224
[2, 4]
@@ -1638,6 +1643,7 @@ def repartition(self, numPartitions):
1638
1643
Internally, this uses a shuffle to redistribute data.
1639
1644
If you are decreasing the number of partitions in this RDD, consider
1640
1645
using `coalesce`, which can avoid performing a shuffle.
1646
+
1641
1647
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
1642
1648
>>> sorted(rdd.glom().collect())
1643
1649
[[1], [2, 3], [4, 5], [6, 7]]
@@ -1652,6 +1658,7 @@ def repartition(self, numPartitions):
1652
1658
def coalesce (self , numPartitions , shuffle = False ):
1653
1659
"""
1654
1660
Return a new RDD that is reduced into `numPartitions` partitions.
1661
+
1655
1662
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
1656
1663
[[1], [2, 3], [4, 5]]
1657
1664
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
@@ -1690,6 +1697,7 @@ def name(self):
1690
1697
def setName (self , name ):
1691
1698
"""
1692
1699
Assign a name to this RDD.
1700
+
1693
1701
>>> rdd1 = sc.parallelize([1,2])
1694
1702
>>> rdd1.setName('RDD1')
1695
1703
>>> rdd1.name()
@@ -1749,6 +1757,7 @@ class PipelinedRDD(RDD):
1749
1757
1750
1758
"""
1751
1759
Pipelined maps:
1760
+
1752
1761
>>> rdd = sc.parallelize([1, 2, 3, 4])
1753
1762
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
1754
1763
[4, 8, 12, 16]
0 commit comments