-
Notifications
You must be signed in to change notification settings - Fork 28.7k
[SPARK-2871] [PySpark] Add missing API #1791
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
ff2cbe3
e0b3d30
5d5be95
a95eca0
4ffae00
7a9ea0a
53640be
9a01ac3
7ba5f88
a25c34e
1218b3b
034124f
9132456
977e474
ac606ca
f0158e4
cb4f712
96713fa
e9e1037
63c013d
1213aca
28fd368
1ac98d6
657a09b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -153,6 +153,7 @@ def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, | |
|
||
# Create the Java SparkContext through Py4J | ||
self._jsc = self._initialize_context(self._conf._jconf) | ||
self._conf._readonly = True | ||
|
||
# Create a single Accumulator in Java that we'll send all our updates through; | ||
# they will be passed back to us through a TCP server | ||
|
@@ -260,6 +261,22 @@ def defaultMinPartitions(self): | |
""" | ||
return self._jsc.sc().defaultMinPartitions() | ||
|
||
@property | ||
def isLocal(self): | ||
""" | ||
Whether the context run locally | ||
""" | ||
return self._jsc.isLocal() | ||
|
||
@property | ||
def conf(self): | ||
""" | ||
The L{SparkConf} object | ||
|
||
Configuration can not be changed after initialization. | ||
""" | ||
return self._conf | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I agree with Josh here, you need to clone the conf before returning it There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I will return an read-only copy of it. |
||
|
||
def stop(self): | ||
""" | ||
Shut down the SparkContext. | ||
|
@@ -733,6 +750,13 @@ def sparkUser(self): | |
""" | ||
return self._jsc.sc().sparkUser() | ||
|
||
@property | ||
def startTime(self): | ||
""" | ||
Return the start time of context in millis seconds | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I saw it in Java API docs,so add it here. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The primary use of this, outside of SparkContext, seems to be printing the context's uptime. So, why not add an There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Change it to uptime will not improve anything, or remove it? |
||
""" | ||
return self._jsc.startTime() | ||
|
||
def cancelJobGroup(self, groupId): | ||
""" | ||
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup} | ||
|
@@ -772,6 +796,13 @@ def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False): | |
it = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, javaPartitions, allowLocal) | ||
return list(mappedRDD._collect_iterator_through_file(it)) | ||
|
||
# TODO | ||
# def runApproximateJob(self, rdd, func, evaluator, timeout): | ||
# """ | ||
# :: DeveloperApi :: | ||
# Run a job that can return approximate results. | ||
# """ | ||
|
||
|
||
def _test(): | ||
import atexit | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,22 +17,24 @@ | |
|
||
from base64 import standard_b64encode as b64enc | ||
import copy | ||
from collections import defaultdict | ||
from collections import namedtuple | ||
from itertools import chain, ifilter, imap | ||
import operator | ||
import os | ||
import sys | ||
import shlex | ||
import traceback | ||
from subprocess import Popen, PIPE | ||
from tempfile import NamedTemporaryFile | ||
from threading import Thread | ||
import warnings | ||
import heapq | ||
import array | ||
import bisect | ||
import math | ||
from collections import defaultdict, namedtuple | ||
from itertools import chain, ifilter, imap | ||
from random import Random | ||
from math import sqrt, log | ||
from bisect import bisect_right | ||
from subprocess import Popen, PIPE | ||
from tempfile import NamedTemporaryFile | ||
from threading import Thread | ||
|
||
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \ | ||
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \ | ||
|
@@ -1741,6 +1743,13 @@ def batch_as(rdd, batchSize): | |
other._jrdd_deserializer) | ||
return RDD(pairRDD, self.ctx, deserializer) | ||
|
||
# TODO | ||
# def zipPartitions(self, other, f, preservesPartitioning=False): | ||
# """ | ||
# Zip this RDD's partitions with one (or more) RDD(s) and return a | ||
# new RDD by applying a function to the zipped partitions. | ||
# """ | ||
|
||
def zipWithIndex(self): | ||
""" | ||
Zips this RDD with its element indices. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The Scala documentation is much more descriptive about what this method does: /**
* Zips this RDD with its element indices. The ordering is first based on the partition index
* and then the ordering of items within each partition. So the first item in the first
* partition gets index 0, and the last item in the last partition receives the largest index.
* This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
* This method needs to trigger a spark job when this RDD contains more than one partitions.
*/
def zipWithIndex(): RDD[(T, Long)] = new ZippedWithIndexRDD(this) The Python documentation should explain these subtleties, too. |
||
|
@@ -1850,10 +1859,28 @@ def _defaultReducePartitions(self): | |
else: | ||
return self.getNumPartitions() | ||
|
||
# TODO: `lookup` is disabled because we can't make direct comparisons based | ||
# on the key; we need to compare the hash of the key to the hash of the | ||
# keys in the pairs. This could be an expensive operation, since those | ||
# hashes aren't retained. | ||
# TODO | ||
# def countApproxDistinctByKey(self, timeout, confidence=0.95): | ||
# """ | ||
# :: Experimental :: | ||
# Return approximate number of distinct values for each key in this RDD. | ||
# """ | ||
|
||
# TODO | ||
# def countByKeyApprox(self, timeout, confidence=0.95): | ||
# """ | ||
# :: Experimental :: | ||
# Approximate version of countByKey that can return a partial result | ||
# if it does not finish within a timeout. | ||
# """ | ||
# | ||
# def countByValueApprox(self, timeout, confidence=0.95): | ||
# """ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you'd like you can implement lookup() the same way as in Scala, it's not too bad |
||
# :: Experimental:: | ||
# Approximate version of countByValue(). | ||
# | ||
# """ | ||
# return self.map(lambda x: (x, None)).countByKeyApprox(timeout, confidence) | ||
|
||
def _is_pickled(self): | ||
""" Return this RDD is serialized by Pickle or not. """ | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This needs a docstring. Also, the Scala equivalent of this clones the SparkConf because it cannot be changed at runtime. We might want to do the same thing here (to guard against misuse); I'm not sure how clone() interacts with Py4J objects; do we need to implement a custom clone method for objects with Py4J objects as fields that calls those objects' JVM clone methods?