diff --git a/examples/Pose3Localization.cpp b/examples/Pose3Localization.cpp index c18a9e9cee..44076ab38b 100644 --- a/examples/Pose3Localization.cpp +++ b/examples/Pose3Localization.cpp @@ -43,9 +43,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } @@ -74,10 +74,8 @@ int main(const int argc, const char* argv[]) { // Calculate and print marginal covariances for all variables Marginals marginals(*graph, result); - for (const auto key_value : result) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - std::cout << marginals.marginalCovariance(key_value.key) << endl; + for (const auto& key_pose : result.extract()) { + std::cout << marginals.marginalCovariance(key_pose.first) << endl; } return 0; } diff --git a/examples/Pose3SLAMExample_changeKeys.cpp b/examples/Pose3SLAMExample_changeKeys.cpp index 9aa697f140..7da83d211b 100644 --- a/examples/Pose3SLAMExample_changeKeys.cpp +++ b/examples/Pose3SLAMExample_changeKeys.cpp @@ -55,14 +55,14 @@ int main(const int argc, const char *argv[]) { std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl; // Additional: rewrite input with simplified keys 0,1,... Values simpleInitial; - for(const auto key_value: *initial) { + for (const auto k : initial->keys()) { Key key; - if(add) - key = key_value.key + firstKey; + if (add) + key = k + firstKey; else - key = key_value.key - firstKey; + key = k - firstKey; - simpleInitial.insert(key, initial->at(key_value.key)); + simpleInitial.insert(key, initial->at(k)); } NonlinearFactorGraph simpleGraph; for(const boost::shared_ptr& factor: *graph) { diff --git a/examples/Pose3SLAMExample_g2o.cpp b/examples/Pose3SLAMExample_g2o.cpp index 3679643072..7ae2978ce5 100644 --- a/examples/Pose3SLAMExample_g2o.cpp +++ b/examples/Pose3SLAMExample_g2o.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/Pose3SLAMExample_initializePose3Chordal.cpp b/examples/Pose3SLAMExample_initializePose3Chordal.cpp index 992750fed8..03db9fc77a 100644 --- a/examples/Pose3SLAMExample_initializePose3Chordal.cpp +++ b/examples/Pose3SLAMExample_initializePose3Chordal.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/Pose3SLAMExample_initializePose3Gradient.cpp b/examples/Pose3SLAMExample_initializePose3Gradient.cpp index 384f290a19..31693c5ff5 100644 --- a/examples/Pose3SLAMExample_initializePose3Gradient.cpp +++ b/examples/Pose3SLAMExample_initializePose3Gradient.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/SolverComparer.cpp b/examples/SolverComparer.cpp index 69975b620d..3fffc31d07 100644 --- a/examples/SolverComparer.cpp +++ b/examples/SolverComparer.cpp @@ -559,12 +559,12 @@ void runPerturb() // Perturb values VectorValues noise; - for(const Values::KeyValuePair key_val: initial) + for(const auto& key_dim: initial.dims()) { - Vector noisev(key_val.value.dim()); + Vector noisev(key_dim.second); for(Vector::Index i = 0; i < noisev.size(); ++i) noisev(i) = normal(rng); - noise.insert(key_val.key, noisev); + noise.insert(key_dim.first, noisev); } Values perturbed = initial.retract(noise); diff --git a/gtsam/base/SymmetricBlockMatrix.cpp b/gtsam/base/SymmetricBlockMatrix.cpp index 7f41da137b..d37bbf7d14 100644 --- a/gtsam/base/SymmetricBlockMatrix.cpp +++ b/gtsam/base/SymmetricBlockMatrix.cpp @@ -24,6 +24,12 @@ namespace gtsam { +/* ************************************************************************* */ +SymmetricBlockMatrix::SymmetricBlockMatrix() : blockStart_(0) { + variableColOffsets_.push_back(0); + assertInvariants(); +} + /* ************************************************************************* */ SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf( const SymmetricBlockMatrix& other) { @@ -61,6 +67,18 @@ Matrix SymmetricBlockMatrix::block(DenseIndex I, DenseIndex J) const { } } +/* ************************************************************************* */ +void SymmetricBlockMatrix::negate() { + full().triangularView() *= -1.0; +} + +/* ************************************************************************* */ +void SymmetricBlockMatrix::invertInPlace() { + const auto identity = Matrix::Identity(rows(), rows()); + full().triangularView() = + selfadjointView().llt().solve(identity).triangularView(); +} + /* ************************************************************************* */ void SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) { gttic(VerticalBlockMatrix_choleskyPartial); diff --git a/gtsam/base/SymmetricBlockMatrix.h b/gtsam/base/SymmetricBlockMatrix.h index 302a1ec347..e8afe6e386 100644 --- a/gtsam/base/SymmetricBlockMatrix.h +++ b/gtsam/base/SymmetricBlockMatrix.h @@ -63,12 +63,7 @@ namespace gtsam { public: /// Construct from an empty matrix (asserts that the matrix is empty) - SymmetricBlockMatrix() : - blockStart_(0) - { - variableColOffsets_.push_back(0); - assertInvariants(); - } + SymmetricBlockMatrix(); /// Construct from a container of the sizes of each block. template @@ -265,19 +260,10 @@ namespace gtsam { } /// Negate the entire active matrix. - void negate() { - full().triangularView() *= -1.0; - } + void negate(); /// Invert the entire active matrix in place. - void invertInPlace() { - const auto identity = Matrix::Identity(rows(), rows()); - full().triangularView() = - selfadjointView() - .llt() - .solve(identity) - .triangularView(); - } + void invertInPlace(); /// @} diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index d8b6daca80..21b44ada57 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -45,7 +45,7 @@ typedef std::vector > Point2Vector; /// multiply with scalar inline Point2 operator*(double s, const Point2& p) { - return p * s; + return Point2(s * p.x(), s * p.y()); } /* diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index 2652fc0730..6a8c61560a 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -158,6 +158,12 @@ bool Pose3::equals(const Pose3& pose, double tol) const { return R_.equals(pose.R_, tol) && traits::Equals(t_, pose.t_, tol); } +/* ************************************************************************* */ +Pose3 Pose3::interpolateRt(const Pose3& T, double t) const { + return Pose3(interpolate(R_, T.R_, t), + interpolate(t_, T.t_, t)); +} + /* ************************************************************************* */ /** Modified from Murray94book version (which assumes w and v normalized?) */ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) { diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 80741e1c3a..678df8376f 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -129,10 +129,7 @@ class GTSAM_EXPORT Pose3: public LieGroup { * @param T End point of interpolation. * @param t A value in [0, 1]. */ - Pose3 interpolateRt(const Pose3& T, double t) const { - return Pose3(interpolate(R_, T.R_, t), - interpolate(t_, T.t_, t)); - } + Pose3 interpolateRt(const Pose3& T, double t) const; /// @} /// @name Lie Group diff --git a/gtsam/geometry/Unit3.cpp b/gtsam/geometry/Unit3.cpp index 6f70d840e3..17169a5f56 100644 --- a/gtsam/geometry/Unit3.cpp +++ b/gtsam/geometry/Unit3.cpp @@ -32,6 +32,14 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ +Unit3::Unit3(const Vector3& p) : p_(p.normalized()) {} + +Unit3::Unit3(double x, double y, double z) : p_(x, y, z) { p_.normalize(); } + +Unit3::Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) { + p_.normalize(); +} /* ************************************************************************* */ Unit3 Unit3::FromPoint3(const Point3& point, OptionalJacobian<2, 3> H) { // 3*3 Derivative of representation with respect to point is 3*3: diff --git a/gtsam/geometry/Unit3.h b/gtsam/geometry/Unit3.h index ebd5c63c94..2989159a35 100644 --- a/gtsam/geometry/Unit3.h +++ b/gtsam/geometry/Unit3.h @@ -67,21 +67,14 @@ class GTSAM_EXPORT Unit3 { } /// Construct from point - explicit Unit3(const Vector3& p) : - p_(p.normalized()) { - } + explicit Unit3(const Vector3& p); /// Construct from x,y,z - Unit3(double x, double y, double z) : - p_(x, y, z) { - p_.normalize(); - } + Unit3(double x, double y, double z); /// Construct from 2D point in plane at focal length f /// Unit3(p,1) can be viewed as normalized homogeneous coordinates of 2D point - explicit Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) { - p_.normalize(); - } + explicit Unit3(const Point2& p, double f); /// Copy constructor Unit3(const Unit3& u) { diff --git a/gtsam/hybrid/HybridValues.h b/gtsam/hybrid/HybridValues.h index 005e3534b4..94365db75a 100644 --- a/gtsam/hybrid/HybridValues.h +++ b/gtsam/hybrid/HybridValues.h @@ -102,7 +102,7 @@ class GTSAM_EXPORT HybridValues { /// Check whether a variable with key \c j exists in values. bool existsNonlinear(Key j) { - return (nonlinear_.find(j) != nonlinear_.end()); + return nonlinear_.exists(j); }; /// Check whether a variable with key \c j exists. diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 068a2031c6..425e93415f 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -129,10 +129,7 @@ TEST(HybridBayesNet, evaluateHybrid) { TEST(HybridBayesNet, Choose) { Switching s(4); - Ordering ordering; - for (auto&& kvp : s.linearizationPoint) { - ordering += kvp.key; - } + const Ordering ordering(s.linearizationPoint.keys()); HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; @@ -163,10 +160,7 @@ TEST(HybridBayesNet, Choose) { TEST(HybridBayesNet, OptimizeAssignment) { Switching s(4); - Ordering ordering; - for (auto&& kvp : s.linearizationPoint) { - ordering += kvp.key; - } + const Ordering ordering(s.linearizationPoint.keys()); HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; diff --git a/gtsam/linear/LossFunctions.cpp b/gtsam/linear/LossFunctions.cpp index 7307c4a687..64ded7cc34 100644 --- a/gtsam/linear/LossFunctions.cpp +++ b/gtsam/linear/LossFunctions.cpp @@ -40,6 +40,10 @@ Vector Base::weight(const Vector& error) const { return w; } +Vector Base::sqrtWeight(const Vector &error) const { + return weight(error).cwiseSqrt(); +} + // The following three functions re-weight block matrices and a vector // according to their weight implementation diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index d9cfc1f3c3..ee05cb9871 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -115,9 +115,7 @@ class GTSAM_EXPORT Base { Vector weight(const Vector &error) const; /** square root version of the weight function */ - Vector sqrtWeight(const Vector &error) const { - return weight(error).cwiseSqrt(); - } + Vector sqrtWeight(const Vector &error) const; /** reweight block matrices and a vector according to their weight * implementation */ diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 7108a7660f..8c6b5fd555 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -239,6 +239,8 @@ void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const whitenInPlace(b); } +Matrix Gaussian::information() const { return R().transpose() * R(); } + /* ************************************************************************* */ // Diagonal /* ************************************************************************* */ @@ -284,6 +286,11 @@ Diagonal::shared_ptr Diagonal::Sigmas(const Vector& sigmas, bool smart) { full: return Diagonal::shared_ptr(new Diagonal(sigmas)); } +/* ************************************************************************* */ +Diagonal::shared_ptr Diagonal::Precisions(const Vector& precisions, + bool smart) { + return Variances(precisions.array().inverse(), smart); +} /* ************************************************************************* */ void Diagonal::print(const string& name) const { gtsam::print(sigmas_, name + "diagonal sigmas "); @@ -294,22 +301,18 @@ Vector Diagonal::whiten(const Vector& v) const { return v.cwiseProduct(invsigmas_); } -/* ************************************************************************* */ Vector Diagonal::unwhiten(const Vector& v) const { return v.cwiseProduct(sigmas_); } -/* ************************************************************************* */ Matrix Diagonal::Whiten(const Matrix& H) const { return vector_scale(invsigmas(), H); } -/* ************************************************************************* */ void Diagonal::WhitenInPlace(Matrix& H) const { vector_scale_inplace(invsigmas(), H); } -/* ************************************************************************* */ void Diagonal::WhitenInPlace(Eigen::Block H) const { H = invsigmas().asDiagonal() * H; } @@ -377,6 +380,32 @@ Vector Constrained::whiten(const Vector& v) const { return c; } +/* ************************************************************************* */ +Constrained::shared_ptr Constrained::MixedSigmas(const Vector& sigmas) { + return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas); +} + +Constrained::shared_ptr Constrained::MixedSigmas(double m, + const Vector& sigmas) { + return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas); +} + +Constrained::shared_ptr Constrained::MixedVariances(const Vector& mu, + const Vector& variances) { + return shared_ptr(new Constrained(mu, variances.cwiseSqrt())); +} +Constrained::shared_ptr Constrained::MixedVariances(const Vector& variances) { + return shared_ptr(new Constrained(variances.cwiseSqrt())); +} + +Constrained::shared_ptr Constrained::MixedPrecisions(const Vector& mu, + const Vector& precisions) { + return MixedVariances(mu, precisions.array().inverse()); +} +Constrained::shared_ptr Constrained::MixedPrecisions(const Vector& precisions) { + return MixedVariances(precisions.array().inverse()); +} + /* ************************************************************************* */ double Constrained::squaredMahalanobisDistance(const Vector& v) const { Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements @@ -623,6 +652,11 @@ void Unit::print(const std::string& name) const { cout << name << "unit (" << dim_ << ") " << endl; } +/* ************************************************************************* */ +double Unit::squaredMahalanobisDistance(const Vector& v) const { + return v.dot(v); +} + /* ************************************************************************* */ // Robust /* ************************************************************************* */ @@ -663,6 +697,13 @@ void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{ robust_->reweight(A1,A2,A3,b); } +Vector Robust::unweightedWhiten(const Vector& v) const { + return noise_->unweightedWhiten(v); +} +double Robust::weight(const Vector& v) const { + return robust_->weight(v.norm()); +} + Robust::shared_ptr Robust::Create( const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){ return shared_ptr(new Robust(robust,noise)); diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 7bcf808e50..447121848e 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -255,7 +255,7 @@ namespace gtsam { virtual Matrix R() const { return thisR();} /// Compute information matrix - virtual Matrix information() const { return R().transpose() * R(); } + virtual Matrix information() const; /// Compute covariance matrix virtual Matrix covariance() const; @@ -319,9 +319,7 @@ namespace gtsam { * A diagonal noise model created by specifying a Vector of precisions, i.e. * i.e. the diagonal of the information matrix, i.e., weights */ - static shared_ptr Precisions(const Vector& precisions, bool smart = true) { - return Variances(precisions.array().inverse(), smart); - } + static shared_ptr Precisions(const Vector& precisions, bool smart = true); void print(const std::string& name) const override; Vector sigmas() const override { return sigmas_; } @@ -426,39 +424,27 @@ namespace gtsam { * A diagonal noise model created by specifying a Vector of * standard devations, some of which might be zero */ - static shared_ptr MixedSigmas(const Vector& sigmas) { - return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas); - } + static shared_ptr MixedSigmas(const Vector& sigmas); /** * A diagonal noise model created by specifying a Vector of * standard devations, some of which might be zero */ - static shared_ptr MixedSigmas(double m, const Vector& sigmas) { - return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas); - } + static shared_ptr MixedSigmas(double m, const Vector& sigmas); /** * A diagonal noise model created by specifying a Vector of * standard devations, some of which might be zero */ - static shared_ptr MixedVariances(const Vector& mu, const Vector& variances) { - return shared_ptr(new Constrained(mu, variances.cwiseSqrt())); - } - static shared_ptr MixedVariances(const Vector& variances) { - return shared_ptr(new Constrained(variances.cwiseSqrt())); - } + static shared_ptr MixedVariances(const Vector& mu, const Vector& variances); + static shared_ptr MixedVariances(const Vector& variances); /** * A diagonal noise model created by specifying a Vector of * precisions, some of which might be inf */ - static shared_ptr MixedPrecisions(const Vector& mu, const Vector& precisions) { - return MixedVariances(mu, precisions.array().inverse()); - } - static shared_ptr MixedPrecisions(const Vector& precisions) { - return MixedVariances(precisions.array().inverse()); - } + static shared_ptr MixedPrecisions(const Vector& mu, const Vector& precisions); + static shared_ptr MixedPrecisions(const Vector& precisions); /** * The squaredMahalanobisDistance function for a constrained noisemodel, @@ -616,7 +602,7 @@ namespace gtsam { bool isUnit() const override { return true; } void print(const std::string& name) const override; - double squaredMahalanobisDistance(const Vector& v) const override {return v.dot(v); } + double squaredMahalanobisDistance(const Vector& v) const override; Vector whiten(const Vector& v) const override { return v; } Vector unwhiten(const Vector& v) const override { return v; } Matrix Whiten(const Matrix& H) const override { return H; } @@ -710,12 +696,8 @@ namespace gtsam { void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override; void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override; - Vector unweightedWhiten(const Vector& v) const override { - return noise_->unweightedWhiten(v); - } - double weight(const Vector& v) const override { - return robust_->weight(v.norm()); - } + Vector unweightedWhiten(const Vector& v) const override; + double weight(const Vector& v) const override; static shared_ptr Create( const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise); diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index a7af7d8d8c..1414121597 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -136,6 +136,17 @@ void GaussianFactorGraphSystem::rightPrecondition(const Vector &x, preconditioner_.transposeSolve(x, y); } +/**********************************************************************************/ +void GaussianFactorGraphSystem::scal(const double alpha, Vector &x) const { + x *= alpha; +} +double GaussianFactorGraphSystem::dot(const Vector &x, const Vector &y) const { + return x.dot(y); +} +void GaussianFactorGraphSystem::axpy(const double alpha, const Vector &x, + Vector &y) const { + y += alpha * x; +} /**********************************************************************************/ VectorValues buildVectorValues(const Vector &v, const Ordering &ordering, const map & dimensions) { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 198b77ec8d..cb90ae5200 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -102,15 +102,9 @@ class GTSAM_EXPORT GaussianFactorGraphSystem { void multiply(const Vector &x, Vector& y) const; void leftPrecondition(const Vector &x, Vector &y) const; void rightPrecondition(const Vector &x, Vector &y) const; - inline void scal(const double alpha, Vector &x) const { - x *= alpha; - } - inline double dot(const Vector &x, const Vector &y) const { - return x.dot(y); - } - inline void axpy(const double alpha, const Vector &x, Vector &y) const { - y += alpha * x; - } + void scal(const double alpha, Vector &x) const; + double dot(const Vector &x, const Vector &y) const; + void axpy(const double alpha, const Vector &x, Vector &y) const; void getb(Vector &b) const; }; diff --git a/gtsam/nonlinear/ISAM2-impl.h b/gtsam/nonlinear/ISAM2-impl.h index eb2285b281..7b76305820 100644 --- a/gtsam/nonlinear/ISAM2-impl.h +++ b/gtsam/nonlinear/ISAM2-impl.h @@ -436,36 +436,6 @@ struct GTSAM_EXPORT UpdateImpl { } } - /** - * Apply expmap to the given values, but only for indices appearing in - * \c mask. Values are expmapped in-place. - * \param mask Mask on linear indices, only \c true entries are expmapped - */ - static void ExpmapMasked(const VectorValues& delta, const KeySet& mask, - Values* theta) { - gttic(ExpmapMasked); - assert(theta->size() == delta.size()); - Values::iterator key_value; - VectorValues::const_iterator key_delta; -#ifdef GTSAM_USE_TBB - for (key_value = theta->begin(); key_value != theta->end(); ++key_value) { - key_delta = delta.find(key_value->key); -#else - for (key_value = theta->begin(), key_delta = delta.begin(); - key_value != theta->end(); ++key_value, ++key_delta) { - assert(key_value->key == key_delta->first); -#endif - Key var = key_value->key; - assert(static_cast(delta[var].size()) == key_value->value.dim()); - assert(delta[var].allFinite()); - if (mask.exists(var)) { - Value* retracted = key_value->value.retract_(delta[var]); - key_value->value = *retracted; - retracted->deallocate_(); - } - } - } - // Linearize new factors void linearizeNewFactors(const NonlinearFactorGraph& newFactors, const Values& theta, size_t numNonlinearFactors, diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index f56b23777c..1c15469ccb 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -454,7 +454,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, update.findFluid(roots_, relinKeys, &result.markedKeys, result.details()); // 6. Update linearization point for marked variables: // \Theta_{J}:=\Theta_{J}+\Delta_{J}. - UpdateImpl::ExpmapMasked(delta_, relinKeys, &theta_); + theta_.retractMasked(delta_, relinKeys); } result.variablesRelinearized = result.markedKeys.size(); } diff --git a/gtsam/nonlinear/NonlinearFactorGraph.cpp b/gtsam/nonlinear/NonlinearFactorGraph.cpp index dfa54f26f0..84c15c0042 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.cpp +++ b/gtsam/nonlinear/NonlinearFactorGraph.cpp @@ -285,8 +285,8 @@ static Scatter scatterFromValues(const Values& values) { scatter.reserve(values.size()); // use "natural" ordering with keys taken from the initial values - for (const auto key_value : values) { - scatter.add(key_value.key, key_value.value.dim()); + for (const auto& key_dim : values.dims()) { + scatter.add(key_dim.first, key_dim.second); } return scatter; diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index 598963761d..a354e0139f 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -25,10 +25,8 @@ #pragma once #include - #include - -#include // Only so Eclipse finds class definition +#include namespace gtsam { @@ -95,13 +93,13 @@ namespace gtsam { std::map Values::extract(const std::function& filterFcn) const { std::map result; - for (const auto& key_value : *this) { + for (const auto& key_value : values_) { // Check if key matches - if (filterFcn(key_value.key)) { + if (filterFcn(key_value.first)) { // Check if type matches (typically does as symbols matched with types) if (auto t = - dynamic_cast*>(&key_value.value)) - result[key_value.key] = t->value(); + dynamic_cast*>(key_value.second)) + result[key_value.first] = t->value(); } } return result; @@ -109,6 +107,8 @@ namespace gtsam { /* ************************************************************************* */ #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 +#include + template class Values::Filtered { public: diff --git a/gtsam/nonlinear/Values.cpp b/gtsam/nonlinear/Values.cpp index adadc99c06..c7321c904d 100644 --- a/gtsam/nonlinear/Values.cpp +++ b/gtsam/nonlinear/Values.cpp @@ -25,8 +25,6 @@ #include #include -#include - #include #include #include @@ -52,15 +50,15 @@ namespace gtsam { /* ************************************************************************* */ Values::Values(const Values& other, const VectorValues& delta) { - for (const_iterator key_value = other.begin(); key_value != other.end(); ++key_value) { - VectorValues::const_iterator it = delta.find(key_value->key); - Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument + for (const auto& key_value : other.values_) { + VectorValues::const_iterator it = delta.find(key_value.first); + Key key = key_value.first; // Non-const duplicate to deal with non-const insert argument if (it != delta.end()) { const Vector& v = it->second; - Value* retractedValue(key_value->value.retract_(v)); // Retract + Value* retractedValue(key_value.second->retract_(v)); // Retract values_.insert(key, retractedValue); // Add retracted result directly to result values } else { - values_.insert(key, key_value->value.clone_()); // Add original version to result values + values_.insert(key, key_value.second->clone_()); // Add original version to result values } } } @@ -69,9 +67,9 @@ namespace gtsam { void Values::print(const string& str, const KeyFormatter& keyFormatter) const { cout << str << (str.empty() ? "" : "\n"); cout << "Values with " << size() << " values:\n"; - for(const_iterator key_value = begin(); key_value != end(); ++key_value) { - cout << "Value " << keyFormatter(key_value->key) << ": "; - key_value->value.print(""); + for (const auto& key_value : values_) { + cout << "Value " << keyFormatter(key_value.first) << ": "; + key_value.second->print(""); cout << "\n"; } } @@ -80,12 +78,12 @@ namespace gtsam { bool Values::equals(const Values& other, double tol) const { if (this->size() != other.size()) return false; - for (const_iterator it1 = this->begin(), it2 = other.begin(); - it1 != this->end(); ++it1, ++it2) { - const Value& value1 = it1->value; - const Value& value2 = it2->value; - if (typeid(value1) != typeid(value2) || it1->key != it2->key - || !value1.equals_(value2, tol)) { + for (auto it1 = values_.begin(), it2 = other.values_.begin(); + it1 != values_.end(); ++it1, ++it2) { + const Value* value1 = it1->second; + const Value* value2 = it2->second; + if (typeid(*value1) != typeid(*value2) || it1->first != it2->first + || !value1->equals_(*value2, tol)) { return false; } } @@ -102,17 +100,44 @@ namespace gtsam { return Values(*this, delta); } + /* ************************************************************************* */ + void Values::retractMasked(const VectorValues& delta, const KeySet& mask) { + gttic(retractMasked); + assert(this->size() == delta.size()); + auto key_value = values_.begin(); + VectorValues::const_iterator key_delta; +#ifdef GTSAM_USE_TBB + for (; key_value != values_.end(); ++key_value) { + key_delta = delta.find(key_value->first); +#else + for (key_delta = delta.begin(); key_value != values_.end(); + ++key_value, ++key_delta) { + assert(key_value->first == key_delta->first); +#endif + Key var = key_value->first; + assert(static_cast(delta[var].size()) == key_value->second->dim()); + assert(delta[var].allFinite()); + if (mask.exists(var)) { + Value* retracted = key_value->second->retract_(delta[var]); + // TODO(dellaert): can we use std::move here? + *(key_value->second) = *retracted; + retracted->deallocate_(); + } + } + } + /* ************************************************************************* */ VectorValues Values::localCoordinates(const Values& cp) const { if(this->size() != cp.size()) throw DynamicValuesMismatched(); VectorValues result; - for(const_iterator it1=this->begin(), it2=cp.begin(); it1!=this->end(); ++it1, ++it2) { - if(it1->key != it2->key) + for (auto it1 = values_.begin(), it2 = cp.values_.begin(); + it1 != values_.end(); ++it1, ++it2) { + if(it1->first != it2->first) throw DynamicValuesMismatched(); // If keys do not match // Will throw a dynamic_cast exception if types do not match // NOTE: this is separate from localCoordinates(cp, ordering, result) due to at() vs. insert - result.insert(it1->key, it1->value.localCoordinates_(it2->value)); + result.insert(it1->first, it1->second->localCoordinates_(*it2->second)); } return result; } @@ -130,24 +155,26 @@ namespace gtsam { /* ************************************************************************* */ void Values::insert(Key j, const Value& val) { - std::pair insertResult = tryInsert(j, val); + auto insertResult = values_.insert(j, val.clone_()); if(!insertResult.second) throw ValuesKeyAlreadyExists(j); } /* ************************************************************************* */ - void Values::insert(const Values& values) { - for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) { - Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument - insert(key, key_value->value); + void Values::insert(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + insert(key_value->first, *(key_value->second)); } } /* ************************************************************************* */ +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 std::pair Values::tryInsert(Key j, const Value& value) { std::pair result = values_.insert(j, value.clone_()); return std::make_pair(boost::make_transform_iterator(result.first, &make_deref_pair), result.second); } +#endif /* ************************************************************************* */ void Values::update(Key j, const Value& val) { @@ -165,9 +192,10 @@ namespace gtsam { } /* ************************************************************************* */ - void Values::update(const Values& values) { - for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) { - this->update(key_value->key, key_value->value); + void Values::update(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + this->update(key_value->first, *(key_value->second)); } } @@ -183,10 +211,10 @@ namespace gtsam { } /* ************************************************************************ */ - void Values::insert_or_assign(const Values& values) { - for (const_iterator key_value = values.begin(); key_value != values.end(); - ++key_value) { - this->insert_or_assign(key_value->key, key_value->value); + void Values::insert_or_assign(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + this->insert_or_assign(key_value->first, *(key_value->second)); } } @@ -202,8 +230,16 @@ namespace gtsam { KeyVector Values::keys() const { KeyVector result; result.reserve(size()); - for(const_iterator key_value = begin(); key_value != end(); ++key_value) - result.push_back(key_value->key); + for(const auto& key_value: values_) + result.push_back(key_value.first); + return result; + } + + /* ************************************************************************* */ + KeySet Values::keySet() const { + KeySet result; + for(const auto& key_value: values_) + result.insert(key_value.first); return result; } @@ -217,8 +253,17 @@ namespace gtsam { /* ************************************************************************* */ size_t Values::dim() const { size_t result = 0; - for(const auto key_value: *this) { - result += key_value.value.dim(); + for (const auto key_value : values_) { + result += key_value->second->dim(); + } + return result; + } + + /* ************************************************************************* */ + std::map Values::dims() const { + std::map result; + for (const auto key_value : values_) { + result.emplace(key_value->first, key_value->second->dim()); } return result; } @@ -226,8 +271,8 @@ namespace gtsam { /* ************************************************************************* */ VectorValues Values::zeroVectors() const { VectorValues result; - for(const auto key_value: *this) - result.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for (const auto key_value : values_) + result.insert(key_value->first, Vector::Zero(key_value->second->dim())); return result; } diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index 79ddb02674..161df2cba1 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -28,10 +28,10 @@ #include #include #include -#include #include #include #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 +#include #include #endif @@ -81,10 +81,6 @@ namespace gtsam { // The member to store the values, see just above KeyValueMap values_; - // Types obtained by iterating - typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair; - typedef KeyValueMap::iterator::value_type KeyValuePtrPair; - public: /// A shared_ptr to this class @@ -110,22 +106,6 @@ namespace gtsam { ConstKeyValuePair(const KeyValuePair& kv) : key(kv.key), value(kv.value) {} }; - /// Mutable forward iterator, with value type KeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::iterator> iterator; - - /// Const forward iterator, with value type ConstKeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::const_iterator> const_iterator; - - /// Mutable reverse iterator, with value type KeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::reverse_iterator> reverse_iterator; - - /// Const reverse iterator, with value type ConstKeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::const_reverse_iterator> const_reverse_iterator; - typedef KeyValuePair value_type; /** Default constructor creates an empty Values class */ @@ -191,47 +171,24 @@ namespace gtsam { template boost::optional exists(Key j) const; - /** Find an element by key, returning an iterator, or end() if the key was - * not found. */ - iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); } - - /** Find an element by key, returning an iterator, or end() if the key was - * not found. */ - const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); } - - /** Find the element greater than or equal to the specified key. */ - iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); } - - /** Find the element greater than or equal to the specified key. */ - const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); } - - /** Find the lowest-ordered element greater than the specified key. */ - iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); } - - /** Find the lowest-ordered element greater than the specified key. */ - const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); } - /** The number of variables in this config */ size_t size() const { return values_.size(); } /** whether the config is empty */ bool empty() const { return values_.empty(); } - const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); } - const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); } - iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); } - iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); } - const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); } - const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); } - reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); } - reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); } - /// @name Manifold Operations /// @{ /** Add a delta config to current config and returns a new config */ Values retract(const VectorValues& delta) const; + /** + * Retract, but only for Keys appearing in \c mask. In-place. + * \param mask Mask on Keys where to apply retract. + */ + void retractMasked(const VectorValues& delta, const KeySet& mask); + /** Get a delta config about a linearization point c0 (*this) */ VectorValues localCoordinates(const Values& cp) const; @@ -252,12 +209,6 @@ namespace gtsam { /// version for double void insertDouble(Key j, double c) { insert(j,c); } - /** insert that mimics the STL map insert - if the value already exists, the map is not modified - * and an iterator to the existing value is returned, along with 'false'. If the value did not - * exist, it is inserted and an iterator pointing to the new element, along with 'true', is - * returned. */ - std::pair tryInsert(Key j, const Value& value); - /** single element change of existing element */ void update(Key j, const Value& val); @@ -288,11 +239,16 @@ namespace gtsam { void erase(Key j); /** - * Returns a set of keys in the config + * Returns a vector of keys in the config. * Note: by construction, the list is ordered */ KeyVector keys() const; + /** + * Returns a set of keys in the config. + */ + KeySet keySet() const; + /** Replace all keys and variables */ Values& operator=(const Values& rhs); @@ -305,6 +261,9 @@ namespace gtsam { /** Compute the total dimensionality of all values (\f$ O(n) \f$) */ size_t dim() const; + /** Return all dimensions in a map (\f$ O(n log n) \f$) */ + std::map dims() const; + /** Return a VectorValues of zero vectors for each variable in this Values */ VectorValues zeroVectors() const; @@ -312,8 +271,8 @@ namespace gtsam { template size_t count() const { size_t i = 0; - for (const auto key_value : *this) { - if (dynamic_cast*>(&key_value.value)) + for (const auto key_value : values_) { + if (dynamic_cast*>(key_value.second)) ++i; } return i; @@ -343,6 +302,67 @@ namespace gtsam { extract(const std::function& filterFcn = &_truePredicate) const; #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 + // Types obtained by iterating + typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair; + typedef KeyValueMap::iterator::value_type KeyValuePtrPair; + + /// Mutable forward iterator, with value type KeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::iterator> iterator; + + /// Const forward iterator, with value type ConstKeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::const_iterator> const_iterator; + + /// Mutable reverse iterator, with value type KeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::reverse_iterator> reverse_iterator; + + /// Const reverse iterator, with value type ConstKeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::const_reverse_iterator> const_reverse_iterator; + + /** insert that mimics the STL map insert - if the value already exists, the map is not modified + * and an iterator to the existing value is returned, along with 'false'. If the value did not + * exist, it is inserted and an iterator pointing to the new element, along with 'true', is + * returned. */ + std::pair tryInsert(Key j, const Value& value); + + static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) { + return ConstKeyValuePair(key_value.first, *key_value.second); } + + static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) { + return KeyValuePair(key_value.first, *key_value.second); } + + const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); } + const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); } + iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); } + iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); } + const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); } + const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); } + reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); } + reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); } + + /** Find the element greater than or equal to the specified key. */ + iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); } + + /** Find the element greater than or equal to the specified key. */ + const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); } + + /** Find the lowest-ordered element greater than the specified key. */ + iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); } + + /** Find the lowest-ordered element greater than the specified key. */ + const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); } + /** A filtered view of a Values, returned from Values::filter. */ template class Filtered; @@ -395,12 +415,6 @@ namespace gtsam { ar & BOOST_SERIALIZATION_NVP(values_); } - static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) { - return ConstKeyValuePair(key_value.first, *key_value.second); } - - static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) { - return KeyValuePair(key_value.first, *key_value.second); } - }; /* ************************************************************************* */ diff --git a/gtsam/nonlinear/internal/LevenbergMarquardtState.h b/gtsam/nonlinear/internal/LevenbergMarquardtState.h index 75e5a5135c..a055f3060f 100644 --- a/gtsam/nonlinear/internal/LevenbergMarquardtState.h +++ b/gtsam/nonlinear/internal/LevenbergMarquardtState.h @@ -128,9 +128,10 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { noiseModelCache.resize(0); // for each of the variables, add a prior damped.reserve(damped.size() + values.size()); - for (const auto key_value : values) { - const Key key = key_value.key; - const size_t dim = key_value.value.dim(); + std::map dims = values.dims(); + for (const auto& key_dim : dims) { + const Key& key = key_dim.first; + const size_t& dim = key_dim.second; const CachedModel* item = getCachedModel(dim); damped += boost::make_shared(key, item->A, item->b, item->model); } diff --git a/gtsam/nonlinear/tests/testLinearContainerFactor.cpp b/gtsam/nonlinear/tests/testLinearContainerFactor.cpp index 22ae4d73e6..9bd9ace98d 100644 --- a/gtsam/nonlinear/tests/testLinearContainerFactor.cpp +++ b/gtsam/nonlinear/tests/testLinearContainerFactor.cpp @@ -350,8 +350,8 @@ TEST(TestLinearContainerFactor, Rekey) { // For extra fun lets try linearizing this LCF gtsam::Values linearization_pt_rekeyed; - for (auto key_val : linearization_pt) { - linearization_pt_rekeyed.insert(key_map.at(key_val.key), key_val.value); + for (auto key : linearization_pt.keys()) { + linearization_pt_rekeyed.insert(key_map.at(key), linearization_pt.at(key)); } // Check independent values since we don't want to unnecessarily sort diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index 85dd2f4b3c..644b8c84f1 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -195,6 +195,7 @@ TEST(Values, basic_functions) values.insert(6, M1); values.insert(8, M2); +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 // find EXPECT_LONGS_EQUAL(4, values.find(4)->key); EXPECT_LONGS_EQUAL(4, values_c.find(4)->key); @@ -210,7 +211,7 @@ TEST(Values, basic_functions) EXPECT_LONGS_EQUAL(6, values_c.upper_bound(4)->key); EXPECT_LONGS_EQUAL(4, values.upper_bound(3)->key); EXPECT_LONGS_EQUAL(4, values_c.upper_bound(3)->key); - +#endif } /* ************************************************************************* */ @@ -220,8 +221,8 @@ TEST(Values, retract_full) config0.insert(key1, Vector3(1.0, 2.0, 3.0)); config0.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues delta {{key1, Vector3(1.0, 1.1, 1.2)}, - {key2, Vector3(1.3, 1.4, 1.5)}}; + const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)}, + {key2, Vector3(1.3, 1.4, 1.5)}}; Values expected; expected.insert(key1, Vector3(2.0, 3.1, 4.2)); @@ -238,7 +239,7 @@ TEST(Values, retract_partial) config0.insert(key1, Vector3(1.0, 2.0, 3.0)); config0.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues delta {{key2, Vector3(1.3, 1.4, 1.5)}}; + const VectorValues delta{{key2, Vector3(1.3, 1.4, 1.5)}}; Values expected; expected.insert(key1, Vector3(1.0, 2.0, 3.0)); @@ -248,6 +249,24 @@ TEST(Values, retract_partial) CHECK(assert_equal(expected, Values(config0, delta))); } +/* ************************************************************************* */ +TEST(Values, retract_masked) +{ + Values config0; + config0.insert(key1, Vector3(1.0, 2.0, 3.0)); + config0.insert(key2, Vector3(5.0, 6.0, 7.0)); + + const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)}, + {key2, Vector3(1.3, 1.4, 1.5)}}; + + Values expected; + expected.insert(key1, Vector3(1.0, 2.0, 3.0)); + expected.insert(key2, Vector3(6.3, 7.4, 8.5)); + + config0.retractMasked(delta, {key2}); + CHECK(assert_equal(expected, config0)); +} + /* ************************************************************************* */ TEST(Values, equals) { diff --git a/gtsam/slam/InitializePose.h b/gtsam/slam/InitializePose.h index f9b99e47e3..79e3fe813e 100644 --- a/gtsam/slam/InitializePose.h +++ b/gtsam/slam/InitializePose.h @@ -62,10 +62,10 @@ static Values computePoses(const Values& initialRot, // Upgrade rotations to full poses Values initialPose; - for (const auto key_value : initialRot) { - Key key = key_value.key; - const auto& rot = initialRot.at(key); - Pose initializedPose = Pose(rot, origin); + for (const auto& key_rot : initialRot.extract()) { + const Key& key = key_rot.first; + const auto& rot = key_rot.second; + const Pose initializedPose(rot, origin); initialPose.insert(key, initializedPose); } @@ -82,14 +82,14 @@ static Values computePoses(const Values& initialRot, params.setVerbosity("TERMINATION"); } GaussNewtonOptimizer optimizer(*posegraph, initialPose, params); - Values GNresult = optimizer.optimize(); + const Values GNresult = optimizer.optimize(); // put into Values structure Values estimate; - for (const auto key_value : GNresult) { - Key key = key_value.key; + for (const auto& key_pose : GNresult.extract()) { + const Key& key = key_pose.first; if (key != kAnchorKey) { - const Pose& pose = GNresult.at(key); + const Pose& pose = key_pose.second; estimate.insert(key, pose); } } diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index 6bb1b0f360..e8ec9181c0 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -122,12 +122,12 @@ Values InitializePose3::computeOrientationsGradient( gttic(InitializePose3_computeOrientationsGradient); // this works on the inverse rotations, according to Tron&Vidal,2011 - Values inverseRot; - inverseRot.insert(initialize::kAnchorKey, Rot3()); - for(const auto key_value: givenGuess) { - Key key = key_value.key; - const Pose3& pose = givenGuess.at(key); - inverseRot.insert(key, pose.rotation().inverse()); + std::map inverseRot; + inverseRot.emplace(initialize::kAnchorKey, Rot3()); + for(const auto& key_pose: givenGuess.extract()) { + const Key& key = key_pose.first; + const Pose3& pose = key_pose.second; + inverseRot.emplace(key, pose.rotation().inverse()); } // Create the map of edges incident on each node @@ -138,10 +138,8 @@ Values InitializePose3::computeOrientationsGradient( // calculate max node degree & allocate gradient size_t maxNodeDeg = 0; - VectorValues grad; - for(const auto key_value: inverseRot) { - Key key = key_value.key; - grad.insert(key,Z_3x1); + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; size_t currNodeDeg = (adjEdgesMap.at(key)).size(); if(currNodeDeg > maxNodeDeg) maxNodeDeg = currNodeDeg; @@ -162,28 +160,29 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // compute the gradient at each node maxGrad = 0; - for (const auto key_value : inverseRot) { - Key key = key_value.key; + VectorValues grad; + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; + const Rot3& Ri = key_R.second; Vector gradKey = Z_3x1; // collect the gradient for each edge incident on key for (const size_t& factorId : adjEdgesMap.at(key)) { - Rot3 Rij = factorId2RotMap.at(factorId); - Rot3 Ri = inverseRot.at(key); + const Rot3& Rij = factorId2RotMap.at(factorId); auto factor = pose3Graph.at(factorId); const auto& keys = factor->keys(); if (key == keys[0]) { Key key1 = keys[1]; - Rot3 Rj = inverseRot.at(key1); + const Rot3& Rj = inverseRot.at(key1); gradKey = gradKey + gradientTron(Ri, Rij * Rj, a, b); } else if (key == keys[1]) { Key key0 = keys[0]; - Rot3 Rj = inverseRot.at(key0); + const Rot3& Rj = inverseRot.at(key0); gradKey = gradKey + gradientTron(Ri, Rij.between(Rj), a, b); } else { cout << "Error in gradient computation" << endl; } } // end of i-th gradient computation - grad.at(key) = stepsize * gradKey; + grad.insert(key, stepsize * gradKey); double normGradKey = (gradKey).norm(); if(normGradKey>maxGrad) @@ -192,8 +191,12 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // update estimates - inverseRot = inverseRot.retract(grad); - + for (auto& key_R : inverseRot) { + const Key& key = key_R.first; + Rot3& Ri = key_R.second; + Ri = Ri.retract(grad.at(key)); + } + ////////////////////////////////////////////////////////////////////////// // check stopping condition if (it>20 && maxGrad < 5e-3) @@ -201,12 +204,13 @@ Values InitializePose3::computeOrientationsGradient( } // enf of gradient iterations // Return correct rotations - const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior + const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior Values estimateRot; - for(const auto key_value: inverseRot) { - Key key = key_value.key; + for (const auto key_R : inverseRot) { + const Key& key = key_R.first; + const Rot3& Ri = key_R.second; if (key != initialize::kAnchorKey) { - const Rot3& R = inverseRot.at(key); + const Rot3& R = inverseRot.at(key); if(setRefFrame) estimateRot.insert(key, Rref.compose(R.inverse())); else diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index db79fcd3c0..9cb12aefba 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -586,9 +586,9 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, fstream stream(filename.c_str(), fstream::out); // save poses - for (const auto key_value : config) { - const Pose2 &pose = key_value.value.cast(); - stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() + for (const auto &key_pose : config.extract()) { + const Pose2 &pose = key_pose.second; + stream << "VERTEX2 " << key_pose.first << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } @@ -635,45 +635,33 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, auto index = [](gtsam::Key key) { return Symbol(key).index(); }; // save 2D poses - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Pose2 &pose = p->value(); - stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " + for (const auto &pair : estimate.extract()) { + const Pose2 &pose = pair.second; + stream << "VERTEX_SE2 " << index(pair.first) << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } // save 3D poses - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Pose3 &pose = p->value(); + for (const auto &pair : estimate.extract()) { + const Pose3 &pose = pair.second; const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); - stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " + stream << "VERTEX_SE3:QUAT " << index(pair.first) << " " << t.x() << " " << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w() << endl; } // save 2D landmarks - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Point2 &point = p->value(); - stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " + for (const auto &pair : estimate.extract()) { + const Point2 &point = pair.second; + stream << "VERTEX_XY " << index(pair.first) << " " << point.x() << " " << point.y() << endl; } // save 3D landmarks - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Point3 &point = p->value(); - stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + for (const auto &pair : estimate.extract()) { + const Point3 &point = pair.second; + stream << "VERTEX_TRACKXYZ " << index(pair.first) << " " << point.x() << " " << point.y() << " " << point.z() << endl; } diff --git a/gtsam/slam/tests/testLago.cpp b/gtsam/slam/tests/testLago.cpp index 0b74f06d7b..5d429796d1 100644 --- a/gtsam/slam/tests/testLago.cpp +++ b/gtsam/slam/tests/testLago.cpp @@ -283,9 +283,10 @@ TEST( Lago, largeGraphNoisy_orientations ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const auto key_val: *expected){ - Key k = key_val.key; - EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-5)); + for(const auto key_pose: expected->extract()){ + const Key& k = key_pose.first; + const Pose2& pose = key_pose.second; + EXPECT(assert_equal(pose, actual.at(k), 1e-5)); } } @@ -309,9 +310,10 @@ TEST( Lago, largeGraphNoisy ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const auto key_val: *expected){ - Key k = key_val.key; - EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-2)); + for(const auto key_pose: expected->extract()){ + const Key& k = key_pose.first; + const Pose2& pose = key_pose.second; + EXPECT(assert_equal(pose, actual.at(k), 1e-2)); } } diff --git a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp index 1494d784b3..52a45b6d0f 100644 --- a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp +++ b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp @@ -308,12 +308,12 @@ int main(int argc, char** argv) { // And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds cout << "After 15.0 seconds, each version contains to the following keys:" << endl; cout << " Concurrent Filter Keys: " << endl; - for(const auto key_value: concurrentFilter.getLinearizationPoint()) { - cout << setprecision(5) << " Key: " << key_value.key << endl; + for(const auto key: concurrentFilter.getLinearizationPoint().keys()) { + cout << setprecision(5) << " Key: " << key << endl; } cout << " Concurrent Smoother Keys: " << endl; - for(const auto key_value: concurrentSmoother.getLinearizationPoint()) { - cout << setprecision(5) << " Key: " << key_value.key << endl; + for(const auto key: concurrentSmoother.getLinearizationPoint().keys()) { + cout << setprecision(5) << " Key: " << key << endl; } cout << " Fixed-Lag Smoother Keys: " << endl; for(const auto& key_timestamp: fixedlagSmoother.timestamps()) { diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp index 18c664934d..f5280ceff4 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp @@ -59,8 +59,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update( // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for (const auto key_value : newTheta) { - ordering_.push_back(key_value.key); + for (const auto key : newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta delta_.insert(newTheta.zeroVectors()); @@ -161,8 +161,8 @@ void BatchFixedLagSmoother::eraseKeys(const KeyVector& keys) { factorIndex_.erase(key); // Erase the key from the set of linearized keys - if (linearKeys_.exists(key)) { - linearKeys_.erase(key); + if (linearValues_.exists(key)) { + linearValues_.erase(key); } } @@ -186,8 +186,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Create output result structure Result result; - result.nonlinearVariables = theta_.size() - linearKeys_.size(); - result.linearVariables = linearKeys_.size(); + result.nonlinearVariables = theta_.size() - linearValues_.size(); + result.linearVariables = linearValues_.size(); // Set optimization parameters double lambda = parameters_.lambdaInitial; @@ -265,10 +265,10 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Reset the deltas to zeros delta_.setZero(); // Put the linearization points and deltas back for specific variables - if (enforceConsistency_ && (linearKeys_.size() > 0)) { - theta_.update(linearKeys_); - for(const auto key_value: linearKeys_) { - delta_.at(key_value.key) = newDelta.at(key_value.key); + if (enforceConsistency_ && (linearValues_.size() > 0)) { + theta_.update(linearValues_); + for(const auto key: linearValues_.keys()) { + delta_.at(key) = newDelta.at(key); } } // Decrease lambda for next time diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h index 94cf130cfa..79bd22f9da 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h @@ -136,8 +136,8 @@ class GTSAM_UNSTABLE_EXPORT BatchFixedLagSmoother : public FixedLagSmoother { /** The current linearization point **/ Values theta_; - /** The set of keys involved in current linear factors. These keys should not be relinearized. **/ - Values linearKeys_; + /** The set of values involved in current linear factors. **/ + Values linearValues_; /** The current ordering */ Ordering ordering_; diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp index 83d0ab719f..a31a759205 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp @@ -139,8 +139,8 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const auto key_value: newTheta) { - ordering_.push_back(key_value.key); + for (const auto key : newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta delta_.insert(newTheta.zeroVectors()); @@ -221,10 +221,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm if(debug) { PrintNonlinearFactorGraph(smootherSummarization_, "ConcurrentBatchFilter::synchronize ", "Updated Smoother Summarization:", DefaultKeyFormatter); } // Find the set of new separator keys - KeySet newSeparatorKeys; - for(const auto key_value: separatorValues_) { - newSeparatorKeys.insert(key_value.key); - } + const KeySet newSeparatorKeys = separatorValues_.keySet(); if(debug) { PrintKeys(newSeparatorKeys, "ConcurrentBatchFilter::synchronize ", "Current Separator Keys:"); } @@ -236,9 +233,9 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm graph.push_back(smootherShortcut_); Values values; values.insert(smootherSummarizationValues); - for(const auto key_value: separatorValues_) { - if(!values.exists(key_value.key)) { - values.insert(key_value.key, key_value.value); + for(const auto key: newSeparatorKeys) { + if(!values.exists(key)) { + values.insert(key, separatorValues_.at(key)); } } // Calculate the summarized factor on just the new separator keys @@ -471,8 +468,8 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values // Put the linearization points and deltas back for specific variables if(linearValues.size() > 0) { theta.update(linearValues); - for(const auto key_value: linearValues) { - delta.at(key_value.key) = newDelta.at(key_value.key); + for(const auto key: linearValues.keys()) { + delta.at(key) = newDelta.at(key); } } @@ -574,9 +571,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList& keysToMove) { // Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove KeySet newSeparatorKeys = removedFactors.keys(); - for(const auto key_value: separatorValues_) { - newSeparatorKeys.insert(key_value.key); - } + newSeparatorKeys.merge(separatorValues_.keySet()); for(Key key: keysToMove) { newSeparatorKeys.erase(key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp index 75d491bde7..94a7d4c220 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp @@ -61,8 +61,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const auto key_value: newTheta) { - ordering_.push_back(key_value.key); + for(const auto key: newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta @@ -135,26 +135,30 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa removeFactors(filterSummarizationSlots_); // Insert new linpoints into the values, augment the ordering, and store new dims to augment delta - for(const auto key_value: smootherValues) { - std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); - if(iter_inserted.second) { - // If the insert succeeded - ordering_.push_back(key_value.key); - delta_.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for(const auto key: smootherValues.keys()) { + if(!theta_.exists(key)) { + // If this a new key for theta_, also add to ordering and delta. + const auto& value = smootherValues.at(key); + delta_.insert(key, Vector::Zero(value.dim())); + theta_.insert(key, value); + ordering_.push_back(key); } else { - // If the element already existed in theta_ - iter_inserted.first->value = key_value.value; + // If the key already existed in theta_, just update. + const auto& value = smootherValues.at(key); + theta_.update(key, value); } } - for(const auto key_value: separatorValues) { - std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); - if(iter_inserted.second) { - // If the insert succeeded - ordering_.push_back(key_value.key); - delta_.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for(const auto key: separatorValues.keys()) { + if(!theta_.exists(key)) { + // If this a new key for theta_, also add to ordering and delta. + const auto& value = separatorValues.at(key); + delta_.insert(key, Vector::Zero(value.dim())); + theta_.insert(key, value); + ordering_.push_back(key); } else { - // If the element already existed in theta_ - iter_inserted.first->value = key_value.value; + // If the key already existed in theta_, just update. + const auto& value = separatorValues.at(key); + theta_.update(key, value); } } @@ -322,8 +326,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() { // Put the linearization points and deltas back for specific variables if(separatorValues_.size() > 0) { theta_.update(separatorValues_); - for(const auto key_value: separatorValues_) { - delta_.at(key_value.key) = newDelta.at(key_value.key); + for(const auto key: separatorValues_.keys()) { + delta_.at(key) = newDelta.at(key); } } @@ -371,10 +375,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() { } // Get the set of separator keys - gtsam::KeySet separatorKeys; - for(const auto key_value: separatorValues_) { - separatorKeys.insert(key_value.key); - } + const KeySet separatorKeys = separatorValues_.keySet(); // Calculate the marginal factors on the separator smootherSummarization_ = internal::calculateMarginalFactors(graph, theta_, separatorKeys, parameters_.getEliminationFunction()); diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp index b9cf66a976..947c02cc02 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp @@ -69,14 +69,14 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No int group = 1; // Set all existing variables to Group1 if(isam2_.getLinearizationPoint().size() > 0) { - for(const auto key_value: isam2_.getLinearizationPoint()) { - orderingConstraints->operator[](key_value.key) = group; + for(const auto key: isam2_.getLinearizationPoint().keys()) { + orderingConstraints->operator[](key) = group; } ++group; } // Assign new variables to the root - for(const auto key_value: newTheta) { - orderingConstraints->operator[](key_value.key) = group; + for(const auto key: newTheta.keys()) { + orderingConstraints->operator[](key) = group; } // Set marginalizable variables to Group0 for(Key key: *keysToMove){ @@ -201,8 +201,8 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth // Force iSAM2 not to relinearize anything during this iteration FastList noRelinKeys; - for(const auto key_value: isam2_.getLinearizationPoint()) { - noRelinKeys.push_back(key_value.key); + for(const auto key: isam2_.getLinearizationPoint().keys()) { + noRelinKeys.push_back(key); } // Calculate the summarized factor on just the new separator keys diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp index 3886d0e422..b82b080482 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp @@ -66,9 +66,9 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons // Also, mark the separator keys as fixed linearization points FastMap constrainedKeys; FastList noRelinKeys; - for(const auto key_value: separatorValues_) { - constrainedKeys[key_value.key] = 1; - noRelinKeys.push_back(key_value.key); + for (const auto key : separatorValues_.keys()) { + constrainedKeys[key] = 1; + noRelinKeys.push_back(key); } // Use iSAM2 to perform an update @@ -82,14 +82,14 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons Values values(newTheta); // Unfortunately, we must be careful here, as some of the smoother values // and/or separator values may have been added previously - for(const auto key_value: smootherValues_) { - if(!isam2_.getLinearizationPoint().exists(key_value.key)) { - values.insert(key_value.key, smootherValues_.at(key_value.key)); + for(const auto key: smootherValues_.keys()) { + if(!isam2_.getLinearizationPoint().exists(key)) { + values.insert(key, smootherValues_.at(key)); } } - for(const auto key_value: separatorValues_) { - if(!isam2_.getLinearizationPoint().exists(key_value.key)) { - values.insert(key_value.key, separatorValues_.at(key_value.key)); + for(const auto key: separatorValues_.keys()) { + if(!isam2_.getLinearizationPoint().exists(key)) { + values.insert(key, separatorValues_.at(key)); } } @@ -245,10 +245,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() { } // Get the set of separator keys - KeySet separatorKeys; - for(const auto key_value: separatorValues_) { - separatorKeys.insert(key_value.key); - } + const KeySet separatorKeys = separatorValues_.keySet(); // Calculate the marginal factors on the separator smootherSummarization_ = internal::calculateMarginalFactors(graph, isam2_.getLinearizationPoint(), separatorKeys, diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp index 61db051673..15038c23fc 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp @@ -64,8 +64,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph - KeysToKeep.insert(key_value.key); + for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph + KeysToKeep.insert(key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { KeysToKeep.erase(key); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp index b5fb614285..a2733d509c 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp @@ -560,8 +560,8 @@ TEST( ConcurrentBatchSmoother, synchronize_3 ) GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues); KeySet eliminateKeys = linearFactors->keys(); - for(const auto key_value: filterSeparatorValues) { - eliminateKeys.erase(key_value.key); + for(const auto key: filterSeparatorValues.keys()) { + eliminateKeys.erase(key); } KeyVector variables(eliminateKeys.begin(), eliminateKeys.end()); GaussianFactorGraph result = *linearFactors->eliminatePartialMultifrontal(variables, EliminateCholesky).second; diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp index 08d71a4202..7c6a082781 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp @@ -80,8 +80,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph - KeysToKeep.insert(key_value.key); + for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph + KeysToKeep.insert(key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { KeysToKeep.erase(key); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp index ccb5a104e3..0e01112eb5 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp @@ -512,8 +512,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const auto key_value: filterSeparatorValues) { - actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); + for(const auto key: filterSeparatorValues.keys()) { + actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); } @@ -580,8 +580,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const auto key_value: filterSeparatorValues) { - allkeys.erase(key_value.key); + for(const auto key: filterSeparatorValues.keys()) { + allkeys.erase(key); } KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp index 53c3d16103..a33fcc4811 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp @@ -513,8 +513,8 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const auto key_value: filterSeparatorValues) { - actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); + for(const auto key: filterSeparatorValues.keys()) { + actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); } @@ -582,8 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const auto key_value: filterSeparatorValues) - allkeys.erase(key_value.key); + for (const auto key : filterSeparatorValues.keys()) allkeys.erase(key); KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); diff --git a/gtsam_unstable/timing/timeDSFvariants.cpp b/gtsam_unstable/timing/timeDSFvariants.cpp index 3da01bfafa..35ae7d4d5e 100644 --- a/gtsam_unstable/timing/timeDSFvariants.cpp +++ b/gtsam_unstable/timing/timeDSFvariants.cpp @@ -40,8 +40,7 @@ int main(int argc, char* argv[]) { os << "images,points,matches,Base,Map,BTree" << endl; // loop over number of images - vector ms; - ms += 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000; + vector ms {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000}; for(size_t m: ms) { // We use volatile here to make these appear to the optimizing compiler as // if their values are only known at run-time. diff --git a/timing/timeCameraExpression.cpp b/timing/timeCameraExpression.cpp index 07eeb1bcba..5092949992 100644 --- a/timing/timeCameraExpression.cpp +++ b/timing/timeCameraExpression.cpp @@ -100,7 +100,6 @@ int main() { // Oct 3, 2014, Macbook Air // 9.0 musecs/call typedef PinholeCamera Camera; - typedef Expression Camera_; NonlinearFactor::shared_ptr g3 = boost::make_shared >(model, z, Point2_(myProject, x, p)); diff --git a/timing/timeSchurFactors.cpp b/timing/timeSchurFactors.cpp index 370486c5f7..95296e80e5 100644 --- a/timing/timeSchurFactors.cpp +++ b/timing/timeSchurFactors.cpp @@ -101,7 +101,7 @@ void timeAll(size_t m, size_t N) { { // Raw memory Version FastVector < Key > keys; for (size_t i = 0; i < m; i++) - keys += i; + keys.push_back(i); Vector x = xvalues.vector(keys); double* xdata = x.data(); @@ -144,7 +144,7 @@ int main(void) { // ms += 20, 30, 40, 50; // ms += 20,30,40,50,60,70,80,90,100; for (size_t m = 2; m <= 50; m += 2) - ms += m; + ms.push_back(m); //for (size_t m=10;m<=100;m+=10) ms += m; // loop over number of images for(size_t m: ms)