@@ -31,11 +31,10 @@ class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPrediction
31
31
columns: rawPrediction and label.
32
32
33
33
>>> from pyspark.mllib.linalg import Vectors
34
- >>> scoreAndLabels = sc.parallelize([
35
- ... (0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
36
- >>> rawPredictionAndLabels = scoreAndLabels.map(
37
- ... lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]))
38
- >>> dataset = rawPredictionAndLabels.toDF(["raw", "label"])
34
+ >>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
35
+ ... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
36
+ >>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["raw", "label"])
37
+ ...
39
38
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
40
39
>>> evaluator.evaluate(dataset)
41
40
0.70...
@@ -97,7 +96,7 @@ def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
97
96
globs = globals ().copy ()
98
97
# The small batch size here ensures that we see multiple batches,
99
98
# even in these small test examples:
100
- sc = SparkContext ("local[2]" , "ml.feature tests" )
99
+ sc = SparkContext ("local[2]" , "ml.evaluation tests" )
101
100
sqlContext = SQLContext (sc )
102
101
globs ['sc' ] = sc
103
102
globs ['sqlContext' ] = sqlContext
0 commit comments