|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark.ml.feature |
| 19 | + |
| 20 | +import scala.util.Random |
| 21 | + |
| 22 | +import org.scalatest.FunSuite |
| 23 | + |
| 24 | +import org.apache.spark.SparkException |
| 25 | +import org.apache.spark.mllib.linalg.Vectors |
| 26 | +import org.apache.spark.mllib.util.MLlibTestSparkContext |
| 27 | +import org.apache.spark.mllib.util.TestingUtils._ |
| 28 | +import org.apache.spark.sql.{DataFrame, Row, SQLContext} |
| 29 | + |
| 30 | +class BucketizerSuite extends FunSuite with MLlibTestSparkContext { |
| 31 | + |
| 32 | + @transient private var sqlContext: SQLContext = _ |
| 33 | + |
| 34 | + override def beforeAll(): Unit = { |
| 35 | + super.beforeAll() |
| 36 | + sqlContext = new SQLContext(sc) |
| 37 | + } |
| 38 | + |
| 39 | + test("Bucket continuous features, without -inf,inf") { |
| 40 | + // Check a set of valid feature values. |
| 41 | + val splits = Array(-0.5, 0.0, 0.5) |
| 42 | + val validData = Array(-0.5, -0.3, 0.0, 0.2) |
| 43 | + val expectedBuckets = Array(0.0, 0.0, 1.0, 1.0) |
| 44 | + val dataFrame: DataFrame = |
| 45 | + sqlContext.createDataFrame(validData.zip(expectedBuckets)).toDF("feature", "expected") |
| 46 | + |
| 47 | + val bucketizer: Bucketizer = new Bucketizer() |
| 48 | + .setInputCol("feature") |
| 49 | + .setOutputCol("result") |
| 50 | + .setSplits(splits) |
| 51 | + |
| 52 | + bucketizer.transform(dataFrame).select("result", "expected").collect().foreach { |
| 53 | + case Row(x: Double, y: Double) => |
| 54 | + assert(x === y, |
| 55 | + s"The feature value is not correct after bucketing. Expected $y but found $x") |
| 56 | + } |
| 57 | + |
| 58 | + // Check for exceptions when using a set of invalid feature values. |
| 59 | + val invalidData1: Array[Double] = Array(-0.9) ++ validData |
| 60 | + val invalidData2 = Array(0.5) ++ validData |
| 61 | + val badDF1 = sqlContext.createDataFrame(invalidData1.zipWithIndex).toDF("feature", "idx") |
| 62 | + intercept[RuntimeException]{ |
| 63 | + bucketizer.transform(badDF1).collect() |
| 64 | + println("Invalid feature value -0.9 was not caught as an invalid feature!") |
| 65 | + } |
| 66 | + val badDF2 = sqlContext.createDataFrame(invalidData2.zipWithIndex).toDF("feature", "idx") |
| 67 | + intercept[RuntimeException]{ |
| 68 | + bucketizer.transform(badDF2).collect() |
| 69 | + println("Invalid feature value 0.5 was not caught as an invalid feature!") |
| 70 | + } |
| 71 | + } |
| 72 | + |
| 73 | + test("Bucket continuous features, with -inf,inf") { |
| 74 | + val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, Double.PositiveInfinity) |
| 75 | + val validData = Array(-0.9, -0.5, -0.3, 0.0, 0.2, 0.5, 0.9) |
| 76 | + val expectedBuckets = Array(0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0) |
| 77 | + val dataFrame: DataFrame = |
| 78 | + sqlContext.createDataFrame(validData.zip(expectedBuckets)).toDF("feature", "expected") |
| 79 | + |
| 80 | + val bucketizer: Bucketizer = new Bucketizer() |
| 81 | + .setInputCol("feature") |
| 82 | + .setOutputCol("result") |
| 83 | + .setSplits(splits) |
| 84 | + |
| 85 | + bucketizer.transform(dataFrame).select("result", "expected").collect().foreach { |
| 86 | + case Row(x: Double, y: Double) => |
| 87 | + assert(x === y, |
| 88 | + s"The feature value is not correct after bucketing. Expected $y but found $x") |
| 89 | + } |
| 90 | + } |
| 91 | + |
| 92 | + test("Binary search correctness on hand-picked examples") { |
| 93 | + import BucketizerSuite.checkBinarySearch |
| 94 | + // length 3, with -inf |
| 95 | + checkBinarySearch(Array(Double.NegativeInfinity, 0.0, 1.0)) |
| 96 | + // length 4 |
| 97 | + checkBinarySearch(Array(-1.0, -0.5, 0.0, 1.0)) |
| 98 | + // length 5 |
| 99 | + checkBinarySearch(Array(-1.0, -0.5, 0.0, 1.0, 1.5)) |
| 100 | + // length 3, with inf |
| 101 | + checkBinarySearch(Array(0.0, 1.0, Double.PositiveInfinity)) |
| 102 | + // length 3, with -inf and inf |
| 103 | + checkBinarySearch(Array(Double.NegativeInfinity, 1.0, Double.PositiveInfinity)) |
| 104 | + // length 4, with -inf and inf |
| 105 | + checkBinarySearch(Array(Double.NegativeInfinity, 0.0, 1.0, Double.PositiveInfinity)) |
| 106 | + } |
| 107 | + |
| 108 | + test("Binary search correctness in contrast with linear search, on random data") { |
| 109 | + val data = Array.fill(100)(Random.nextDouble()) |
| 110 | + val splits: Array[Double] = Double.NegativeInfinity +: |
| 111 | + Array.fill(10)(Random.nextDouble()).sorted :+ Double.PositiveInfinity |
| 112 | + val bsResult = Vectors.dense(data.map(x => Bucketizer.binarySearchForBuckets(splits, x))) |
| 113 | + val lsResult = Vectors.dense(data.map(x => BucketizerSuite.linearSearchForBuckets(splits, x))) |
| 114 | + assert(bsResult ~== lsResult absTol 1e-5) |
| 115 | + } |
| 116 | +} |
| 117 | + |
| 118 | +private object BucketizerSuite extends FunSuite { |
| 119 | + /** Brute force search for buckets. Bucket i is defined by the range [split(i), split(i+1)). */ |
| 120 | + def linearSearchForBuckets(splits: Array[Double], feature: Double): Double = { |
| 121 | + require(feature >= splits.head) |
| 122 | + var i = 0 |
| 123 | + while (i < splits.length - 1) { |
| 124 | + if (feature < splits(i + 1)) return i |
| 125 | + i += 1 |
| 126 | + } |
| 127 | + throw new RuntimeException( |
| 128 | + s"linearSearchForBuckets failed to find bucket for feature value $feature") |
| 129 | + } |
| 130 | + |
| 131 | + /** Check all values in splits, plus values between all splits. */ |
| 132 | + def checkBinarySearch(splits: Array[Double]): Unit = { |
| 133 | + def testFeature(feature: Double, expectedBucket: Double): Unit = { |
| 134 | + assert(Bucketizer.binarySearchForBuckets(splits, feature) === expectedBucket, |
| 135 | + s"Expected feature value $feature to be in bucket $expectedBucket with splits:" + |
| 136 | + s" ${splits.mkString(", ")}") |
| 137 | + } |
| 138 | + var i = 0 |
| 139 | + while (i < splits.length - 1) { |
| 140 | + testFeature(splits(i), i) // Split i should fall in bucket i. |
| 141 | + testFeature((splits(i) + splits(i + 1)) / 2, i) // Value between splits i,i+1 should be in i. |
| 142 | + i += 1 |
| 143 | + } |
| 144 | + if (splits.last === Double.PositiveInfinity) { |
| 145 | + testFeature(Double.PositiveInfinity, splits.length - 2) |
| 146 | + } |
| 147 | + } |
| 148 | +} |
0 commit comments