|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark.examples.ml |
| 19 | + |
| 20 | +import org.apache.spark.{SparkConf, SparkContext} |
| 21 | +import org.apache.spark.SparkContext._ |
| 22 | +import org.apache.spark.ml.classification.{Classifier, ClassifierParams, ClassificationModel} |
| 23 | +import org.apache.spark.ml.param.{Params, IntParam, ParamMap} |
| 24 | +import org.apache.spark.mllib.linalg.{BLAS, Vector, Vectors, VectorUDT} |
| 25 | +import org.apache.spark.mllib.regression.LabeledPoint |
| 26 | +import org.apache.spark.sql.{DataType, SchemaRDD, Row, SQLContext} |
| 27 | + |
| 28 | +/** |
| 29 | + * A simple example demonstrating how to write your own learning algorithm using Estimator, |
| 30 | + * Transformer, and other abstractions. |
| 31 | + * This mimics [[org.apache.spark.ml.classification.LogisticRegression]]. |
| 32 | + * Run with |
| 33 | + * {{{ |
| 34 | + * bin/run-example ml.DeveloperApiExample |
| 35 | + * }}} |
| 36 | + */ |
| 37 | +object DeveloperApiExample { |
| 38 | + |
| 39 | + def main(args: Array[String]) { |
| 40 | + val conf = new SparkConf().setAppName("DeveloperApiExample") |
| 41 | + val sc = new SparkContext(conf) |
| 42 | + val sqlContext = new SQLContext(sc) |
| 43 | + import sqlContext._ |
| 44 | + |
| 45 | + // Prepare training data. |
| 46 | + // We use LabeledPoint, which is a case class. Spark SQL can convert RDDs of Java Beans |
| 47 | + // into SchemaRDDs, where it uses the bean metadata to infer the schema. |
| 48 | + val training = sparkContext.parallelize(Seq( |
| 49 | + LabeledPoint(1.0, Vectors.dense(0.0, 1.1, 0.1)), |
| 50 | + LabeledPoint(0.0, Vectors.dense(2.0, 1.0, -1.0)), |
| 51 | + LabeledPoint(0.0, Vectors.dense(2.0, 1.3, 1.0)), |
| 52 | + LabeledPoint(1.0, Vectors.dense(0.0, 1.2, -0.5)))) |
| 53 | + |
| 54 | + // Create a LogisticRegression instance. This instance is an Estimator. |
| 55 | + val lr = new MyLogisticRegression() |
| 56 | + // Print out the parameters, documentation, and any default values. |
| 57 | + println("MyLogisticRegression parameters:\n" + lr.explainParams() + "\n") |
| 58 | + |
| 59 | + // We may set parameters using setter methods. |
| 60 | + lr.setMaxIter(10) |
| 61 | + |
| 62 | + // Learn a LogisticRegression model. This uses the parameters stored in lr. |
| 63 | + val model = lr.fit(training) |
| 64 | + |
| 65 | + // Prepare test data. |
| 66 | + val test = sparkContext.parallelize(Seq( |
| 67 | + LabeledPoint(1.0, Vectors.dense(-1.0, 1.5, 1.3)), |
| 68 | + LabeledPoint(0.0, Vectors.dense(3.0, 2.0, -0.1)), |
| 69 | + LabeledPoint(1.0, Vectors.dense(0.0, 2.2, -1.5)))) |
| 70 | + |
| 71 | + // Make predictions on test data. |
| 72 | + val sumPredictions: Double = model.transform(test) |
| 73 | + .select('features, 'label, 'prediction) |
| 74 | + .collect() |
| 75 | + .map { case Row(features: Vector, label: Double, prediction: Double) => |
| 76 | + prediction |
| 77 | + }.sum |
| 78 | + assert(sumPredictions == 0.0, |
| 79 | + "MyLogisticRegression predicted something other than 0, even though all weights are 0!") |
| 80 | + } |
| 81 | +} |
| 82 | + |
| 83 | +/** |
| 84 | + * Example of defining a parameter trait for a user-defined type of [[Classifier]]. |
| 85 | + * |
| 86 | + * NOTE: This is private since it is an example. In practice, you may not want it to be private. |
| 87 | + */ |
| 88 | +private trait MyLogisticRegressionParams extends ClassifierParams { |
| 89 | + |
| 90 | + /** param for max number of iterations */ |
| 91 | + val maxIter: IntParam = new IntParam(this, "maxIter", "max number of iterations") |
| 92 | + def getMaxIter: Int = get(maxIter) |
| 93 | +} |
| 94 | + |
| 95 | +/** |
| 96 | + * Example of defining a type of [[Classifier]]. |
| 97 | + * |
| 98 | + * NOTE: This is private since it is an example. In practice, you may not want it to be private. |
| 99 | + */ |
| 100 | +private class MyLogisticRegression |
| 101 | + extends Classifier[Vector, MyLogisticRegression, MyLogisticRegressionModel] |
| 102 | + with MyLogisticRegressionParams { |
| 103 | + |
| 104 | + setMaxIter(100) // Initialize |
| 105 | + |
| 106 | + def setMaxIter(value: Int): this.type = set(maxIter, value) |
| 107 | + |
| 108 | + override def fit(dataset: SchemaRDD, paramMap: ParamMap): MyLogisticRegressionModel = { |
| 109 | + // Check schema (types). This allows early failure before running the algorithm. |
| 110 | + transformSchema(dataset.schema, paramMap, logging = true) |
| 111 | + |
| 112 | + // Extract columns from data using helper method. |
| 113 | + val oldDataset = extractLabeledPoints(dataset, paramMap) |
| 114 | + |
| 115 | + // Combine given parameters with the embedded parameters, where the given paramMap overrides |
| 116 | + // any embedded settings. |
| 117 | + val map = this.paramMap ++ paramMap |
| 118 | + |
| 119 | + // Do learning to estimate the weight vector. |
| 120 | + val numFeatures = oldDataset.take(1)(0).features.size |
| 121 | + val weights = Vectors.zeros(numFeatures) // Learning would happen here. |
| 122 | + |
| 123 | + // Create a model to return. |
| 124 | + val lrm = new MyLogisticRegressionModel(this, map, weights) |
| 125 | + |
| 126 | + // Copy model params. |
| 127 | + // An Estimator stores the parameters for the Model it produces, and this copies any relevant |
| 128 | + // parameters to the model. |
| 129 | + Params.inheritValues(map, this, lrm) |
| 130 | + |
| 131 | + // Return the learned model. |
| 132 | + lrm |
| 133 | + } |
| 134 | + |
| 135 | + /** |
| 136 | + * Returns the SQL DataType corresponding to the FeaturesType type parameter. |
| 137 | + * This is used by [[ClassifierParams.validateAndTransformSchema()]] to check the input data. |
| 138 | + */ |
| 139 | + override protected def featuresDataType: DataType = new VectorUDT |
| 140 | +} |
| 141 | + |
| 142 | +/** |
| 143 | + * Example of defining a type of [[ClassificationModel]]. |
| 144 | + * |
| 145 | + * NOTE: This is private since it is an example. In practice, you may not want it to be private. |
| 146 | + */ |
| 147 | +private class MyLogisticRegressionModel( |
| 148 | + override val parent: MyLogisticRegression, |
| 149 | + override val fittingParamMap: ParamMap, |
| 150 | + val weights: Vector) |
| 151 | + extends ClassificationModel[Vector, MyLogisticRegressionModel] |
| 152 | + with MyLogisticRegressionParams { |
| 153 | + |
| 154 | + // This uses the default implementation of transform(), which reads column "features" and outputs |
| 155 | + // columns "prediction" and "rawPrediction." |
| 156 | + |
| 157 | + // This uses the default implementation of predict(), which chooses the label corresponding to |
| 158 | + // the maximum value returned by [[predictRaw()]]. |
| 159 | + |
| 160 | + /** |
| 161 | + * Raw prediction for each possible label. |
| 162 | + * The meaning of a "raw" prediction may vary between algorithms, but it intuitively gives |
| 163 | + * a measure of confidence in each possible label (where larger = more confident). |
| 164 | + * This internal method is used to implement [[transform()]] and output [[rawPredictionCol]]. |
| 165 | + * |
| 166 | + * @return vector where element i is the raw prediction for label i. |
| 167 | + * This raw prediction may be any real number, where a larger value indicates greater |
| 168 | + * confidence for that label. |
| 169 | + */ |
| 170 | + override protected def predictRaw(features: Vector): Vector = { |
| 171 | + val margin = BLAS.dot(features, weights) |
| 172 | + // There are 2 classes (binary classification), so we return a length-2 vector, |
| 173 | + // where index i corresponds to class i (i = 0, 1). |
| 174 | + Vectors.dense(-margin, margin) |
| 175 | + } |
| 176 | + |
| 177 | + /** Number of classes the label can take. 2 indicates binary classification. */ |
| 178 | + override val numClasses: Int = 2 |
| 179 | + |
| 180 | + /** |
| 181 | + * Create a copy of the model. |
| 182 | + * The copy is shallow, except for the embedded paramMap, which gets a deep copy. |
| 183 | + * |
| 184 | + * This is used for the defaul implementation of [[transform()]]. |
| 185 | + */ |
| 186 | + override protected def copy(): MyLogisticRegressionModel = { |
| 187 | + val m = new MyLogisticRegressionModel(parent, fittingParamMap, weights) |
| 188 | + Params.inheritValues(this.paramMap, this, m) |
| 189 | + m |
| 190 | + } |
| 191 | + |
| 192 | + /** |
| 193 | + * Returns the SQL DataType corresponding to the FeaturesType type parameter. |
| 194 | + * This is used by [[ClassifierParams.validateAndTransformSchema()]] to check the input data. |
| 195 | + */ |
| 196 | + override protected def featuresDataType: DataType = new VectorUDT |
| 197 | +} |
0 commit comments