Skip to content

[SPARK-13986][CORE][MLLIB] Remove DeveloperApi-annotations for non-publics #11797

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,5 +29,4 @@ sealed trait JobResult
@DeveloperApi
case object JobSucceeded extends JobResult

@DeveloperApi
private[spark] case class JobFailed(exception: Exception) extends JobResult
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,13 @@ package org.apache.spark.util.collection

import scala.reflect.ClassTag

import org.apache.spark.annotation.DeveloperApi

/**
* :: DeveloperApi ::
* A fast hash map implementation for nullable keys. This hash map supports insertions and updates,
* but not deletions. This map is about 5X faster than java.util.HashMap, while using much less
* space overhead.
*
* Under the hood, it uses our OpenHashSet implementation.
*/
@DeveloperApi
private[spark]
class OpenHashMap[K : ClassTag, @specialized(Long, Int, Double) V: ClassTag](
initialCapacity: Int)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,12 @@ import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams}


/**
* :: DeveloperApi ::
*
* Single-label regression
*
* @tparam FeaturesType Type of input features. E.g., [[org.apache.spark.mllib.linalg.Vector]]
* @tparam Learner Concrete Estimator type
* @tparam M Concrete Model type
*/
@DeveloperApi
private[spark] abstract class Regressor[
FeaturesType,
Learner <: Regressor[FeaturesType, Learner, M],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ private[spark] class NodeIdCache(
}
}

@DeveloperApi
private[spark] object NodeIdCache {
/**
* Initialize the node Id cache with initial node Id values.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,14 @@

package org.apache.spark.ml.tuning

import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.Estimator
import org.apache.spark.ml.evaluation.Evaluator
import org.apache.spark.ml.param.{Param, ParamMap, Params}
import org.apache.spark.sql.types.StructType

/**
* :: DeveloperApi ::
* Common params for [[TrainValidationSplitParams]] and [[CrossValidatorParams]].
*/
@DeveloperApi
private[ml] trait ValidatorParams extends Params {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ sealed trait Matrix extends Serializable {
def numActives: Int
}

@DeveloperApi
private[spark] class MatrixUDT extends UserDefinedType[Matrix] {

override def sqlType: StructType = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,6 @@ private[spark] class NodeIdCache(
}
}

@DeveloperApi
private[spark] object NodeIdCache {
/**
* Initialize the node Id cache with initial node Id values.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ private[spark] object InformationGainStats {
}

/**
* :: DeveloperApi ::
* Impurity statistics for each split
* @param gain information gain value
* @param impurity current node impurity
Expand All @@ -89,7 +88,6 @@ private[spark] object InformationGainStats {
* @param valid whether the current split satisfies minimum info gain or
* minimum number of instances per node
*/
@DeveloperApi
private[spark] class ImpurityStats(
val gain: Double,
val impurity: Double,
Expand Down