Skip to content

Commit

Permalink
Merge pull request #1073 from borglab/release/4.2a4
Browse files Browse the repository at this point in the history
  • Loading branch information
dellaert authored Jan 28, 2022
2 parents 76c7419 + 48488a9 commit d6edcea
Show file tree
Hide file tree
Showing 98 changed files with 3,000 additions and 1,571 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,6 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
make -j2 install

cd $GITHUB_WORKSPACE/build/python
$PYTHON setup.py install --user --prefix=
$PYTHON -m pip install --user .
cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover -v
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ endif()
set (GTSAM_VERSION_MAJOR 4)
set (GTSAM_VERSION_MINOR 2)
set (GTSAM_VERSION_PATCH 0)
set (GTSAM_PRERELEASE_VERSION "a3")
set (GTSAM_PRERELEASE_VERSION "a4")
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")

if (${GTSAM_VERSION_PATCH} EQUAL 0)
Expand Down
2 changes: 1 addition & 1 deletion doc/Doxyfile.in
Original file line number Diff line number Diff line change
Expand Up @@ -1188,7 +1188,7 @@ USE_MATHJAX = YES
# MathJax, but it is strongly recommended to install a local copy of MathJax
# before deployment.

MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
# MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest

# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
Expand Down
35 changes: 27 additions & 8 deletions doc/gtsam.lyx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#LyX 2.2 created this file. For more info see http://www.lyx.org/
\lyxformat 508
#LyX 2.3 created this file. For more info see http://www.lyx.org/
\lyxformat 544
\begin_document
\begin_header
\save_transient_properties true
Expand Down Expand Up @@ -62,6 +62,8 @@
\font_osf false
\font_sf_scale 100 100
\font_tt_scale 100 100
\use_microtype false
\use_dash_ligatures true
\graphics default
\default_output_format default
\output_sync 0
Expand Down Expand Up @@ -91,6 +93,7 @@
\suppress_date false
\justification true
\use_refstyle 0
\use_minted 0
\index Index
\shortcut idx
\color #008000
Expand All @@ -105,7 +108,10 @@
\tocdepth 3
\paragraph_separation indent
\paragraph_indentation default
\quotes_language english
\is_math_indent 0
\math_numbering_side default
\quotes_style english
\dynamic_quotes 0
\papercolumns 1
\papersides 1
\paperpagestyle default
Expand Down Expand Up @@ -168,6 +174,7 @@ Factor graphs
\begin_inset CommandInset citation
LatexCommand citep
key "Koller09book"
literal "true"

\end_inset

Expand Down Expand Up @@ -270,13 +277,15 @@ Let us start with a one-page primer on factor graphs, which in no way replaces
\begin_inset CommandInset citation
LatexCommand citet
key "Kschischang01it"
literal "true"

\end_inset

and
\begin_inset CommandInset citation
LatexCommand citet
key "Loeliger04spm"
literal "true"

\end_inset

Expand Down Expand Up @@ -1321,6 +1330,7 @@ r in a pre-existing map, or indeed the presence of absence of ceiling lights
\begin_inset CommandInset citation
LatexCommand citet
key "Dellaert99b"
literal "true"

\end_inset

Expand Down Expand Up @@ -1542,6 +1552,7 @@ which is done on line 12.
\begin_inset CommandInset citation
LatexCommand citealt
key "Dellaert06ijrr"
literal "true"

\end_inset

Expand Down Expand Up @@ -1936,8 +1947,8 @@ reference "fig:CompareMarginals"

\end_inset

, where I show the marginals on position as covariance ellipses that contain
68.26% of all probability mass.
, where I show the marginals on position as 5-sigma covariance ellipses
that contain 99.9996% of all probability mass.
For the odometry marginals, it is immediately apparent from the figure
that (1) the uncertainty on pose keeps growing, and (2) the uncertainty
on angular odometry translates into increasing uncertainty on y.
Expand Down Expand Up @@ -1992,6 +2003,7 @@ PoseSLAM
\begin_inset CommandInset citation
LatexCommand citep
key "DurrantWhyte06ram"
literal "true"

\end_inset

Expand Down Expand Up @@ -2190,9 +2202,9 @@ reference "fig:example"
\end_inset

, along with covariance ellipses shown in green.
These covariance ellipses in 2D indicate the marginal over position, over
all possible orientations, and show the area which contain 68.26% of the
probability mass (in 1D this would correspond to one standard deviation).
These 5-sigma covariance ellipses in 2D indicate the marginal over position,
over all possible orientations, and show the area which contain 99.9996%
of the probability mass.
The graph shows in a clear manner that the uncertainty on pose
\begin_inset Formula $x_{5}$
\end_inset
Expand Down Expand Up @@ -3076,6 +3088,7 @@ reference "fig:Victoria-1"
\begin_inset CommandInset citation
LatexCommand citep
key "Kaess09ras"
literal "true"

\end_inset

Expand All @@ -3088,6 +3101,7 @@ key "Kaess09ras"
\begin_inset CommandInset citation
LatexCommand citep
key "Kaess08tro"
literal "true"

\end_inset

Expand Down Expand Up @@ -3355,6 +3369,7 @@ iSAM
\begin_inset CommandInset citation
LatexCommand citet
key "Kaess08tro,Kaess12ijrr"
literal "true"

\end_inset

Expand Down Expand Up @@ -3606,6 +3621,7 @@ subgraph preconditioning
\begin_inset CommandInset citation
LatexCommand citet
key "Dellaert10iros,Jian11iccv"
literal "true"

\end_inset

Expand Down Expand Up @@ -3638,6 +3654,7 @@ Visual Odometry
\begin_inset CommandInset citation
LatexCommand citet
key "Nister04cvpr2"
literal "true"

\end_inset

Expand All @@ -3661,6 +3678,7 @@ Visual SLAM
\begin_inset CommandInset citation
LatexCommand citet
key "Davison03iccv"
literal "true"

\end_inset

Expand Down Expand Up @@ -3711,6 +3729,7 @@ Filtering
\begin_inset CommandInset citation
LatexCommand citep
key "Smith87b"
literal "true"

\end_inset

Expand Down
Binary file modified doc/gtsam.pdf
Binary file not shown.
8 changes: 4 additions & 4 deletions doc/math.lyx
Original file line number Diff line number Diff line change
Expand Up @@ -2668,7 +2668,7 @@ reference "eq:pushforward"
\begin{eqnarray*}
\varphi(a)e^{\yhat} & = & \varphi(ae^{\xhat})\\
a^{-1}e^{\yhat} & = & \left(ae^{\xhat}\right)^{-1}\\
e^{\yhat} & = & -ae^{\xhat}a^{-1}\\
e^{\yhat} & = & ae^{-\xhat}a^{-1}\\
\yhat & = & -\Ad a\xhat
\end{eqnarray*}

Expand Down Expand Up @@ -3003,8 +3003,8 @@ between
\begin_inset Formula
\begin{align}
\varphi(g,h)e^{\yhat} & =\varphi(ge^{\xhat},h)\nonumber \\
g^{-1}he^{\yhat} & =\left(ge^{\xhat}\right)^{-1}h=-e^{\xhat}g^{-1}h\nonumber \\
e^{\yhat} & =-\left(h^{-1}g\right)e^{\xhat}\left(h^{-1}g\right)^{-1}=-\exp\Ad{\left(h^{-1}g\right)}\xhat\nonumber \\
g^{-1}he^{\yhat} & =\left(ge^{\xhat}\right)^{-1}h=e^{-\xhat}g^{-1}h\nonumber \\
e^{\yhat} & =\left(h^{-1}g\right)e^{-\xhat}\left(h^{-1}g\right)^{-1}=\exp\Ad{\left(h^{-1}g\right)}(-\xhat)\nonumber \\
\yhat & =-\Ad{\left(h^{-1}g\right)}\xhat=-\Ad{\varphi\left(h,g\right)}\xhat\label{eq:Dbetween1}
\end{align}

Expand Down Expand Up @@ -6674,7 +6674,7 @@ One representation of a line is through 2 vectors
\begin_inset Formula $d$
\end_inset

points from the orgin to the closest point on the line.
points from the origin to the closest point on the line.
\end_layout

\begin_layout Standard
Expand Down
Binary file modified doc/math.pdf
Binary file not shown.
9 changes: 4 additions & 5 deletions examples/DiscreteBayesNetExample.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,9 @@ int main(int argc, char **argv) {
// Create solver and eliminate
Ordering ordering;
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);

// solve
auto mpe = chordal->optimize();
auto mpe = fg.optimize();
GTSAM_PRINT(mpe);

// We can also build a Bayes tree (directed junction tree).
Expand All @@ -69,14 +68,14 @@ int main(int argc, char **argv) {
fg.add(Dyspnea, "0 1");

// solve again, now with evidence
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
auto mpe2 = chordal2->optimize();
auto mpe2 = fg.optimize();
GTSAM_PRINT(mpe2);

// We can also sample from it
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
cout << "\n10 samples:" << endl;
for (size_t i = 0; i < 10; i++) {
auto sample = chordal2->sample();
auto sample = chordal->sample();
GTSAM_PRINT(sample);
}
return 0;
Expand Down
8 changes: 4 additions & 4 deletions examples/DiscreteBayesNet_FG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ int main(int argc, char **argv) {
}

// "Most Probable Explanation", i.e., configuration with largest value
auto mpe = graph.eliminateSequential()->optimize();
auto mpe = graph.optimize();
cout << "\nMost Probable Explanation (MPE):" << endl;
print(mpe);

Expand All @@ -96,8 +96,7 @@ int main(int argc, char **argv) {
graph.add(Cloudy, "1 0");

// solve again, now with evidence
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
auto mpe_with_evidence = chordal->optimize();
auto mpe_with_evidence = graph.optimize();

cout << "\nMPE given C=0:" << endl;
print(mpe_with_evidence);
Expand All @@ -110,7 +109,8 @@ int main(int argc, char **argv) {
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
<< endl;

// We can also sample from it
// We can also sample from the eliminated graph
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
cout << "\n10 samples:" << endl;
for (size_t i = 0; i < 10; i++) {
auto sample = chordal->sample();
Expand Down
8 changes: 4 additions & 4 deletions examples/HMMExample.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,16 +59,16 @@ int main(int argc, char **argv) {
// Convert to factor graph
DiscreteFactorGraph factorGraph(hmm);

// Do max-prodcut
auto mpe = factorGraph.optimize();
GTSAM_PRINT(mpe);

// Create solver and eliminate
// This will create a DAG ordered with arrow of time reversed
DiscreteBayesNet::shared_ptr chordal =
factorGraph.eliminateSequential(ordering);
chordal->print("Eliminated");

// solve
auto mpe = chordal->optimize();
GTSAM_PRINT(mpe);

// We can also sample from it
cout << "\n10 samples:" << endl;
for (size_t k = 0; k < 10; k++) {
Expand Down
5 changes: 2 additions & 3 deletions examples/UGM_chain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,8 @@ int main(int argc, char** argv) {
<< graph.size() << " factors (Unary+Edge).";

// "Decoding", i.e., configuration with largest value
// We use sequential variable elimination
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
auto optimalDecoding = chordal->optimize();
// Uses max-product.
auto optimalDecoding = graph.optimize();
optimalDecoding.print("\nMost Probable Explanation (optimalDecoding)\n");

// "Inference" Computing marginals for each node
Expand Down
5 changes: 2 additions & 3 deletions examples/UGM_small.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,8 @@ int main(int argc, char** argv) {
}

// "Decoding", i.e., configuration with largest value (MPE)
// We use sequential variable elimination
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
auto optimalDecoding = chordal->optimize();
// Uses max-product
auto optimalDecoding = graph.optimize();
GTSAM_PRINT(optimalDecoding);

// "Inference" Computing marginals
Expand Down
13 changes: 13 additions & 0 deletions gtsam/base/utilities.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#include <gtsam/base/utilities.h>

namespace gtsam {

std::string RedirectCout::str() const {
return ssBuffer_.str();
}

RedirectCout::~RedirectCout() {
std::cout.rdbuf(coutBuffer_);
}

}
12 changes: 6 additions & 6 deletions gtsam/base/utilities.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
#pragma once

#include <string>
#include <iostream>
#include <sstream>

namespace gtsam {
/**
* For Python __str__().
Expand All @@ -12,14 +16,10 @@ struct RedirectCout {
RedirectCout() : ssBuffer_(), coutBuffer_(std::cout.rdbuf(ssBuffer_.rdbuf())) {}

/// return the string
std::string str() const {
return ssBuffer_.str();
}
std::string str() const;

/// destructor -- redirect stdout buffer to its original buffer
~RedirectCout() {
std::cout.rdbuf(coutBuffer_);
}
~RedirectCout();

private:
std::stringstream ssBuffer_;
Expand Down
Loading

0 comments on commit d6edcea

Please sign in to comment.