Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

noise models: Add getters for all model parameters #1175

Merged
merged 4 commits into from
Apr 22, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 116 additions & 35 deletions gtsam/linear/LossFunctions.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,23 +54,31 @@ namespace noiseModel {
// clang-format on
namespace mEstimator {

//---------------------------------------------------------------------------------------

/**
* Pure virtual class for all robust error function classes.
*
* It provides the machinery for block vs scalar reweighting strategies, in
* addition to defining the interface of derived classes.
*/
class GTSAM_EXPORT Base {
public:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;

protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
/// Strategy for reweighting \sa ReweightScheme
ReweightScheme reweight_;

public:
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
virtual ~Base() {}

/*
/// Returns the reweight scheme, as explained in ReweightScheme
ReweightScheme reweightScheme() const { return reweight_; }

/**
* This method is responsible for returning the total penalty for a given
* amount of error. For example, this method is responsible for implementing
* the quadratic function for an L2 penalty, the absolute value function for
Expand All @@ -80,16 +88,20 @@ class GTSAM_EXPORT Base {
* error vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*
* This returns \rho(x) in \ref mEstimator
*/
virtual double loss(double distance) const { return 0; };
virtual double loss(double distance) const { return 0; }

/*
/**
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
*
* This returns w(x) in \ref mEstimator
*/
virtual double weight(double distance) const = 0;

Expand Down Expand Up @@ -124,7 +136,15 @@ class GTSAM_EXPORT Base {
}
};

/// Null class should behave as Gaussian
/** "Null" robust loss function, equivalent to a Gaussian pdf noise model, or
* plain least-squares (non-robust).
*
* This model has no additional parameters.
*
* - Loss \rho(x) = 0.5 x²
* - Derivative \phi(x) = x
* - Weight w(x) = \phi(x)/x = 1
*/
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
Expand All @@ -146,7 +166,14 @@ class GTSAM_EXPORT Null : public Base {
}
};

/// Fair implements the "Fair" robust error model (Zhang97ivc)
/** Implementation of the "Fair" robust error model (Zhang97ivc)
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = c² (|x|/c - log(1+|x|/c))
* - Derivative \phi(x) = x/(1+|x|/c)
* - Weight w(x) = \phi(x)/x = 1/(1+|x|/c)
*/
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
Expand All @@ -160,6 +187,7 @@ class GTSAM_EXPORT Fair : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }

private:
/** Serialization function */
Expand All @@ -171,7 +199,14 @@ class GTSAM_EXPORT Fair : public Base {
}
};

/// Huber implements the "Huber" robust error model (Zhang97ivc)
/** The "Huber" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0.5 x² if |x|<k, 0.5 k² + k|x-k| otherwise
* - Derivative \phi(x) = x if |x|<k, k sgn(x) otherwise
* - Weight w(x) = \phi(x)/x = 1 if |x|<k, k/|x| otherwise
*/
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
Expand All @@ -185,6 +220,7 @@ class GTSAM_EXPORT Huber : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }

private:
/** Serialization function */
Expand All @@ -196,12 +232,19 @@ class GTSAM_EXPORT Huber : public Base {
}
};

/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed
/// by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
/** Implementation of the "Cauchy" robust error model (Lee2013IROS).
* Contributed by:
* Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
* Information Technology, Karlsruhe, Germany.
* oberlaender@fzi.de
* Thanks Jan!
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0.5 k² log(1+x²/k²)
* - Derivative \phi(x) = (k²x)/(x²+k²)
* - Weight w(x) = \phi(x)/x = k²/(x²+k²)
*/
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
Expand All @@ -215,6 +258,7 @@ class GTSAM_EXPORT Cauchy : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }

private:
/** Serialization function */
Expand All @@ -227,7 +271,14 @@ class GTSAM_EXPORT Cauchy : public Base {
}
};

/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
/** Implementation of the "Tukey" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|<c, c²/6 otherwise
* - Derivative \phi(x) = x(1-x²/c²)² if |x|<c, 0 otherwise
* - Weight w(x) = \phi(x)/x = (1-x²/c²)² if |x|<c, 0 otherwise
*/
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
Expand All @@ -241,6 +292,7 @@ class GTSAM_EXPORT Tukey : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }

private:
/** Serialization function */
Expand All @@ -252,7 +304,14 @@ class GTSAM_EXPORT Tukey : public Base {
}
};

/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
/** Implementation of the "Welsch" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = -0.5 c² (exp(-x²/c²) - 1)
* - Derivative \phi(x) = x exp(-x²/c²)
* - Weight w(x) = \phi(x)/x = exp(-x²/c²)
*/
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
Expand All @@ -266,6 +325,7 @@ class GTSAM_EXPORT Welsch : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }

private:
/** Serialization function */
Expand All @@ -278,12 +338,16 @@ class GTSAM_EXPORT Welsch : public Base {
}
};

/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
/** Implementation of the "Geman-McClure" robust error model (Zhang97ivc).
*
* Note that Geman-McClure weight function uses the parameter c == 1.0,
* but here it's allowed to use different values, so we actually have
* the generalized Geman-McClure from (Agarwal15phd).
*
* - Loss \rho(x) = 0.5 (c²x²)/(c²+x²)
* - Derivative \phi(x) = xc⁴/(c²+x²)²
* - Weight w(x) = \phi(x)/x = c⁴/(c²+x²)²
*/
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
Expand All @@ -295,6 +359,7 @@ class GTSAM_EXPORT GemanMcClure : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }

protected:
double c_;
Expand All @@ -309,11 +374,18 @@ class GTSAM_EXPORT GemanMcClure : public Base {
}
};

/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
/** DCS implements the Dynamic Covariance Scaling robust error model
* from the paper Robust Map Optimization (Agarwal13icra).
*
* Under the special condition of the parameter c == 1.0 and not
* forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
*
* This model has a scalar parameter "c" (with "units" of squared error).
*
* - Loss \rho(x) = (c²x² + cx⁴)/(x²+c)² (for any "x")
* - Derivative \phi(x) = 2c²x/(x²+c)²
* - Weight w(x) = \phi(x)/x = 2c²/(x²+c)² if x²>c, 1 otherwise
*/
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
Expand All @@ -325,6 +397,7 @@ class GTSAM_EXPORT DCS : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }

protected:
double c_;
Expand All @@ -339,12 +412,19 @@ class GTSAM_EXPORT DCS : public Base {
}
};

/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
/// width 2*k, centered at the origin. The resulting penalty within the dead
/// zone is always zero, and grows quadratically outside the dead zone. In this
/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
/// robust to outliers. This penalty can be used to create barrier functions in
/// a general way.
/** L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
* width 2*k, centered at the origin. The resulting penalty within the dead
* zone is always zero, and grows quadratically outside the dead zone. In this
* sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
* robust to outliers. This penalty can be used to create barrier functions in
* a general way.
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0 if |x|<k, 0.5(k-|x|)² otherwise
* - Derivative \phi(x) = 0 if |x|<k, (-k+x) if x>k, (k+x) if x<-k
* - Weight w(x) = \phi(x)/x = 0 if |x|<k, (-k+x)/x if x>k, (k+x)/x if x<-k
*/
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
Expand All @@ -358,6 +438,7 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }

private:
/** Serialization function */
Expand Down