Skip to content

Commit cb51e6a

Browse files
committed
add BinaryClassificationEvaluator in PySpark
1 parent 3539cb7 commit cb51e6a

File tree

6 files changed

+174
-2
lines changed

6 files changed

+174
-2
lines changed

python/pyspark/ml/evaluation.py

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
#
2+
# Licensed to the Apache Software Foundation (ASF) under one or more
3+
# contributor license agreements. See the NOTICE file distributed with
4+
# this work for additional information regarding copyright ownership.
5+
# The ASF licenses this file to You under the Apache License, Version 2.0
6+
# (the "License"); you may not use this file except in compliance with
7+
# the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
from pyspark.ml.wrapper import JavaEvaluator
19+
from pyspark.ml.param import Param, Params
20+
from pyspark.ml.param.shared import HasLabelCol, HasRawPredictionCol
21+
from pyspark.ml.util import keyword_only
22+
23+
__all__ = ['BinaryClassificationEvaluator']
24+
25+
26+
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol):
27+
"""
28+
Evaluator for binary classification, which expects two input
29+
columns: rawPrediction and label.
30+
31+
>>> from pyspark.mllib.linalg import Vectors
32+
>>> scoreAndLabels = sc.parallelize([
33+
... (0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
34+
>>> rawPredictionAndLabels = scoreAndLabels.map(
35+
... lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]))
36+
>>> dataset = rawPredictionAndLabels.toDF(["raw", "label"])
37+
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
38+
>>> evaluator.evaluate(dataset)
39+
0.70...
40+
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
41+
0.83...
42+
"""
43+
44+
_java_class = "org.apache.spark.ml.evaluation.BinaryClassificationEvaluator"
45+
46+
# a placeholder to make it appear in the generated doc
47+
metricName = Param(Params._dummy(), "metricName",
48+
"metric name in evaluation (areaUnderROC|areaUnderPR)")
49+
50+
@keyword_only
51+
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
52+
metricName="areaUnderROC"):
53+
"""
54+
__init__(self, rawPredictionCol="rawPrediction", labelCol="label",
55+
metricName="areaUnderROC")
56+
"""
57+
super(BinaryClassificationEvaluator, self).__init__()
58+
#: param for metric name in evaluation (areaUnderROC|areaUnderPR)
59+
self.metricName = Param(self, "metricName",
60+
"metric name in evaluation (areaUnderROC|areaUnderPR)")
61+
self._setDefault(rawPredictionCol="rawPrediction", labelCol="label",
62+
metricName="areaUnderROC")
63+
kwargs = self.__init__._input_kwargs
64+
self._set(**kwargs)
65+
66+
def setMetricName(self, value):
67+
"""
68+
Sets the value of :py:attr:`metricName`.
69+
"""
70+
self.paramMap[self.metricName] = value
71+
return self
72+
73+
def getMetricName(self):
74+
"""
75+
Gets the value of metricName or its default value.
76+
"""
77+
return self.getOrDefault(self.metricName)
78+
79+
@keyword_only
80+
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
81+
metricName="areaUnderROC"):
82+
"""
83+
setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
84+
metricName="areaUnderROC")
85+
Sets params for binary classification evaluator.
86+
"""
87+
kwargs = self.setParams._input_kwargs
88+
return self._set(**kwargs)
89+
90+
91+
if __name__ == "__main__":
92+
import doctest
93+
from pyspark.context import SparkContext
94+
from pyspark.sql import SQLContext
95+
globs = globals().copy()
96+
# The small batch size here ensures that we see multiple batches,
97+
# even in these small test examples:
98+
sc = SparkContext("local[2]", "ml.feature tests")
99+
sqlContext = SQLContext(sc)
100+
globs['sc'] = sc
101+
globs['sqlContext'] = sqlContext
102+
(failure_count, test_count) = doctest.testmod(
103+
globs=globs, optionflags=doctest.ELLIPSIS)
104+
sc.stop()
105+
if failure_count:
106+
exit(-1)

python/pyspark/ml/param/_shared_params_code_gen.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ def get$Name(self):
9393
("featuresCol", "features column name", "'features'"),
9494
("labelCol", "label column name", "'label'"),
9595
("predictionCol", "prediction column name", "'prediction'"),
96+
("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
9697
("inputCol", "input column name", None),
9798
("outputCol", "output column name", None),
9899
("numFeatures", "number of features", None)]

python/pyspark/ml/param/shared.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,35 @@ def getPredictionCol(self):
165165
return self.getOrDefault(self.predictionCol)
166166

167167

168+
class HasRawPredictionCol(Params):
169+
"""
170+
Mixin for param rawPredictionCol: raw prediction column name.
171+
"""
172+
173+
# a placeholder to make it appear in the generated doc
174+
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction column name")
175+
176+
def __init__(self):
177+
super(HasRawPredictionCol, self).__init__()
178+
#: param for raw prediction column name
179+
self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction column name")
180+
if 'rawPrediction' is not None:
181+
self._setDefault(rawPredictionCol='rawPrediction')
182+
183+
def setRawPredictionCol(self, value):
184+
"""
185+
Sets the value of :py:attr:`rawPredictionCol`.
186+
"""
187+
self.paramMap[self.rawPredictionCol] = value
188+
return self
189+
190+
def getRawPredictionCol(self):
191+
"""
192+
Gets the value of rawPredictionCol or its default value.
193+
"""
194+
return self.getOrDefault(self.rawPredictionCol)
195+
196+
168197
class HasInputCol(Params):
169198
"""
170199
Mixin for param inputCol: input column name.

python/pyspark/ml/pipeline.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from pyspark.mllib.common import inherit_doc
2323

2424

25-
__all__ = ['Estimator', 'Transformer', 'Pipeline', 'PipelineModel']
25+
__all__ = ['Estimator', 'Transformer', 'Pipeline', 'PipelineModel', 'Evaluator']
2626

2727

2828
@inherit_doc
@@ -168,3 +168,23 @@ def transform(self, dataset, params={}):
168168
for t in self.transformers:
169169
dataset = t.transform(dataset, paramMap)
170170
return dataset
171+
172+
173+
class Evaluator(object):
174+
"""
175+
Base class for evaluators that compute metrics from predictions.
176+
"""
177+
178+
__metaclass__ = ABCMeta
179+
180+
@abstractmethod
181+
def evaluate(self, dataset, params={}):
182+
"""
183+
Evaluates the output.
184+
:param dataset: a dataset that contains labels/observations
185+
and predictions
186+
:param params: an optional param map that overrides embedded
187+
params
188+
:return: metric
189+
"""
190+
raise NotImplementedError()

python/pyspark/ml/wrapper.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from pyspark import SparkContext
2121
from pyspark.sql import DataFrame
2222
from pyspark.ml.param import Params
23-
from pyspark.ml.pipeline import Estimator, Transformer
23+
from pyspark.ml.pipeline import Estimator, Transformer, Evaluator
2424
from pyspark.mllib.common import inherit_doc
2525

2626

@@ -147,3 +147,18 @@ def __init__(self, java_model):
147147

148148
def _java_obj(self):
149149
return self._java_model
150+
151+
152+
@inherit_doc
153+
class JavaEvaluator(Evaluator, JavaWrapper):
154+
"""
155+
Base class for :py:class:`Evaluator`s that wrap Java/Scala
156+
implementations.
157+
"""
158+
159+
__metaclass__ = ABCMeta
160+
161+
def evaluate(self, dataset, params={}):
162+
java_obj = self._java_obj()
163+
self._transfer_params_to_java(params, java_obj)
164+
return java_obj.evaluate(dataset._jdf, self._empty_java_param_map())

python/run-tests

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ function run_ml_tests() {
100100
run_test "pyspark/ml/classification.py"
101101
run_test "pyspark/ml/tuning.py"
102102
run_test "pyspark/ml/tests.py"
103+
run_test "pyspark/ml/evaluation.py"
103104
}
104105

105106
function run_streaming_tests() {

0 commit comments

Comments
 (0)