Merge branch 'develop' of https://github.com/borglab/gtsam into dsf-gtsfm-to-gtsam-port

release/4.3a0
senselessdev1 2022-09-26 10:31:59 -04:00
commit 8007271f4b
135 changed files with 5666 additions and 3087 deletions

View File

@ -43,46 +43,68 @@ if [ -z ${PYTHON_VERSION+x} ]; then
exit 127
fi
PYTHON="python${PYTHON_VERSION}"
export PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then
function install_dependencies()
{
if [[ $(uname) == "Darwin" ]]; then
brew install wget
else
else
# Install a system package required by our library
sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
fi
fi
PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
export PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
$PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
}
function build()
{
mkdir $GITHUB_WORKSPACE/build
cd $GITHUB_WORKSPACE/build
BUILD_PYBIND="ON"
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
BUILD_PYBIND="ON"
# Set to 2 cores so that Actions does not error out during resource provisioning.
make -j2 install
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
cd $GITHUB_WORKSPACE/build/python
$PYTHON -m pip install --user .
}
mkdir $GITHUB_WORKSPACE/build
cd $GITHUB_WORKSPACE/build
function test()
{
cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover -v
}
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
# Set to 2 cores so that Actions does not error out during resource provisioning.
make -j2 install
cd $GITHUB_WORKSPACE/build/python
$PYTHON -m pip install --user .
cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover -v
# select between build or test
case $1 in
-d)
install_dependencies
;;
-b)
build
;;
-t)
test
;;
esac

View File

@ -20,26 +20,26 @@ jobs:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
ubuntu-18.04-gcc-5,
ubuntu-18.04-gcc-9,
ubuntu-18.04-clang-9,
ubuntu-20.04-gcc-7,
ubuntu-20.04-gcc-9,
ubuntu-20.04-clang-9,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: ubuntu-18.04-gcc-5
os: ubuntu-18.04
- name: ubuntu-20.04-gcc-7
os: ubuntu-20.04
compiler: gcc
version: "5"
version: "7"
- name: ubuntu-18.04-gcc-9
os: ubuntu-18.04
- name: ubuntu-20.04-gcc-9
os: ubuntu-20.04
compiler: gcc
version: "9"
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
- name: ubuntu-20.04-clang-9
os: ubuntu-20.04
compiler: clang
version: "9"
@ -60,9 +60,9 @@ jobs:
gpg -a --export $LLVM_KEY | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
sudo apt-get -y update
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib

View File

@ -19,34 +19,34 @@ jobs:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
ubuntu-18.04-gcc-5,
ubuntu-18.04-gcc-9,
ubuntu-18.04-clang-9,
ubuntu-20.04-gcc-7,
ubuntu-20.04-gcc-9,
ubuntu-20.04-clang-9,
macOS-11-xcode-13.4.1,
ubuntu-18.04-gcc-5-tbb,
ubuntu-20.04-gcc-7-tbb,
]
build_type: [Debug, Release]
python_version: [3]
include:
- name: ubuntu-18.04-gcc-5
os: ubuntu-18.04
- name: ubuntu-20.04-gcc-7
os: ubuntu-20.04
compiler: gcc
version: "5"
version: "7"
- name: ubuntu-18.04-gcc-9
os: ubuntu-18.04
- name: ubuntu-20.04-gcc-9
os: ubuntu-20.04
compiler: gcc
version: "9"
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
- name: ubuntu-20.04-clang-9
os: ubuntu-20.04
compiler: clang
version: "9"
# NOTE temporarily added this as it is a required check.
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
- name: ubuntu-20.04-clang-9
os: ubuntu-20.04
compiler: clang
version: "9"
build_type: Debug
@ -57,10 +57,10 @@ jobs:
compiler: xcode
version: "13.4.1"
- name: ubuntu-18.04-gcc-5-tbb
os: ubuntu-18.04
- name: ubuntu-20.04-gcc-7-tbb
os: ubuntu-20.04
compiler: gcc
version: "5"
version: "7"
flag: tbb
steps:
@ -79,9 +79,9 @@ jobs:
gpg -a --export $LLVM_KEY | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
@ -117,11 +117,12 @@ jobs:
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 6
- name: Build (Linux)
if: runner.os == 'Linux'
- name: Install Dependencies
run: |
bash .github/scripts/python.sh
- name: Build (macOS)
if: runner.os == 'macOS'
bash .github/scripts/python.sh -d
- name: Build
run: |
bash .github/scripts/python.sh
bash .github/scripts/python.sh -b
- name: Test
run: |
bash .github/scripts/python.sh -t

View File

@ -32,31 +32,31 @@ jobs:
include:
- name: ubuntu-gcc-deprecated
os: ubuntu-18.04
os: ubuntu-20.04
compiler: gcc
version: "9"
flag: deprecated
- name: ubuntu-gcc-quaternions
os: ubuntu-18.04
os: ubuntu-20.04
compiler: gcc
version: "9"
flag: quaternions
- name: ubuntu-gcc-tbb
os: ubuntu-18.04
os: ubuntu-20.04
compiler: gcc
version: "9"
flag: tbb
- name: ubuntu-cayleymap
os: ubuntu-18.04
os: ubuntu-20.04
compiler: gcc
version: "9"
flag: cayley
- name: ubuntu-system-libs
os: ubuntu-18.04
os: ubuntu-20.04
compiler: gcc
version: "9"
flag: system-libs
@ -74,9 +74,9 @@ jobs:
gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
sudo apt-get -y update
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib

View File

@ -64,6 +64,29 @@ GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionali
We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) wrappers for GTSAM. Please refer to the linked documents for more details.
## Citation
If you are using GTSAM for academic work, please use the following citation:
```
@software{gtsam,
author = {Frank Dellaert and Richard Roberts and Varun Agrawal and Alex Cunningham and Chris Beall and Duy-Nguyen Ta and Fan Jiang and lucacarlone and nikai and Jose Luis Blanco-Claraco and Stephen Williams and ydjian and John Lambert and Andy Melim and Zhaoyang Lv and Akshay Krishnan and Jing Dong and Gerry Chen and Krunal Chande and balderdash-devil and DiffDecisionTrees and Sungtae An and mpaluri and Ellon Paiva Mendes and Mike Bosse and Akash Patel and Ayush Baid and Paul Furgale and matthewbroadwaynavenio and roderick-koehle},
title = {borglab/gtsam},
month = may,
year = 2022,
publisher = {Zenodo},
version = {4.2a7},
doi = {10.5281/zenodo.5794541},
url = {https://doi.org/10.5281/zenodo.5794541}
}
```
You can also get the latest citation available from Zenodo below:
[![DOI](https://zenodo.org/badge/86362856.svg)](https://doi.org/10.5281/zenodo.5794541)
Specific formats are available in the bottom-right corner of the Zenodo page.
## The Preintegrated IMU Factor
GTSAM includes a state of the art IMU handling scheme based on
@ -73,7 +96,7 @@ GTSAM includes a state of the art IMU handling scheme based on
Our implementation improves on this using integration on the manifold, as detailed in
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, _"Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors"_, Int. Conf. on Robotics and Automation (ICRA), 2014. [[link]](https://ieeexplore.ieee.org/abstract/document/6907483)
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, "IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf)
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, _"IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation"_, Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf)
If you are using the factor in academic work, please cite the publications above.

File diff suppressed because it is too large Load Diff

View File

@ -190,7 +190,7 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
endif()
if (NOT MSVC)
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON)
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
if(GTSAM_BUILD_WITH_MARCH_NATIVE AND (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64"))
# Add as public flag so all dependant projects also use it, as required
# by Eigen to avid crashes due to SIMD vectorization:

View File

@ -1,7 +1,10 @@
###############################################################################
# Option for using system Eigen or GTSAM-bundled Eigen
option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
# Default: Use system's Eigen if found automatically:
find_package(Eigen3 QUIET)
set(USE_SYSTEM_EIGEN_INITIAL_VALUE ${Eigen3_FOUND})
option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" ${USE_SYSTEM_EIGEN_INITIAL_VALUE})
unset(USE_SYSTEM_EIGEN_INITIAL_VALUE)
if(NOT GTSAM_USE_SYSTEM_EIGEN)
# This option only makes sense if using the embedded copy of Eigen, it is
@ -11,7 +14,7 @@ endif()
# Switch for using system Eigen or GTSAM-bundled Eigen
if(GTSAM_USE_SYSTEM_EIGEN)
find_package(Eigen3 REQUIRED)
find_package(Eigen3 REQUIRED) # need to find again as REQUIRED
# Use generic Eigen include paths e.g. <Eigen/Core>
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")

View File

@ -33,6 +33,7 @@ print_build_options_for_target(gtsam)
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
print_config("Use System Metis" "${GTSAM_USE_SYSTEM_METIS}")
print_config("Using Boost version" "${Boost_VERSION}")
if(GTSAM_USE_TBB)
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

View File

@ -1,26 +1,72 @@
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Varun Agrawal at 2021-09-27 17:39:09 -0400
%% Saved with string encoding Unicode (UTF-8)
@article{Lupton12tro,
author = {Lupton, Todd and Sukkarieh, Salah},
date-added = {2021-09-27 17:38:56 -0400},
date-modified = {2021-09-27 17:39:09 -0400},
doi = {10.1109/TRO.2011.2170332},
journal = {IEEE Transactions on Robotics},
number = {1},
pages = {61-76},
title = {Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built Environments Without Initial Conditions},
volume = {28},
year = {2012},
Bdsk-Url-1 = {https://doi.org/10.1109/TRO.2011.2170332}}
@inproceedings{Forster15rss,
author = {Christian Forster and Luca Carlone and Frank Dellaert and Davide Scaramuzza},
booktitle = {Robotics: Science and Systems},
date-added = {2021-09-26 20:44:41 -0400},
date-modified = {2021-09-26 20:45:03 -0400},
title = {IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation},
year = {2015}}
@article{Iserles00an,
title = {Lie-group methods},
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and
N{\o}rsett, Syvert P and Zanna, Antonella},
journal = {Acta Numerica 2000},
volume = {9},
pages = {215--365},
year = {2000},
publisher = {Cambridge Univ Press}
}
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and N{\o}rsett, Syvert P and Zanna, Antonella},
journal = {Acta Numerica 2000},
pages = {215--365},
publisher = {Cambridge Univ Press},
title = {Lie-group methods},
volume = {9},
year = {2000}}
@book{Murray94book,
title = {A mathematical introduction to robotic manipulation},
author = {Murray, Richard M and Li, Zexiang and Sastry, S
Shankar and Sastry, S Shankara},
year = {1994},
publisher = {CRC press}
}
author = {Murray, Richard M and Li, Zexiang and Sastry, S Shankar and Sastry, S Shankara},
publisher = {CRC press},
title = {A mathematical introduction to robotic manipulation},
year = {1994}}
@book{Spivak65book,
title = {Calculus on manifolds},
author = {Spivak, Michael},
volume = {1},
year = {1965},
publisher = {WA Benjamin New York}
}
author = {Spivak, Michael},
publisher = {WA Benjamin New York},
title = {Calculus on manifolds},
volume = {1},
year = {1965}}
@phdthesis{Nikolic16thesis,
title={Characterisation, calibration, and design of visual-inertial sensor systems for robot navigation},
author={Nikolic, Janosch},
year={2016},
school={ETH Zurich}
}
@book{Simon06book,
title={Optimal state estimation: Kalman, H infinity, and nonlinear approaches},
author={Simon, Dan},
year={2006},
publisher={John Wiley \& Sons}
}
@inproceedings{Trawny05report_IndirectKF,
title={Indirect Kalman Filter for 3 D Attitude Estimation},
author={Nikolas Trawny and Stergios I. Roumeliotis},
year={2005}
}

View File

@ -60,13 +60,14 @@ namespace po = boost::program_options;
po::variables_map parseOptions(int argc, char* argv[]) {
po::options_description desc;
desc.add_options()("help,h", "produce help message")(
"data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"),
"path to the CSV file with the IMU data")(
"output_filename",
po::value<string>()->default_value("imuFactorExampleResults.csv"),
"path to the result file to use")("use_isam", po::bool_switch(),
"use ISAM as the optimizer");
desc.add_options()("help,h", "produce help message") // help message
("data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"),
"path to the CSV file with the IMU data") // path to the data file
("output_filename",
po::value<string>()->default_value("imuFactorExampleResults.csv"),
"path to the result file to use") // filename to save results to
("use_isam", po::bool_switch(),
"use ISAM as the optimizer"); // flag for ISAM optimizer
po::variables_map vm;
po::store(po::parse_command_line(argc, argv, desc), vm);
@ -106,7 +107,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
I_3x3 * 1e-8; // error committed in integrating position from velocities
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
Matrix66 bias_acc_omega_int =
Matrix66 bias_acc_omega_init =
I_6x6 * 1e-5; // error in the bias used for preintegration
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
@ -122,7 +123,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
// PreintegrationCombinedMeasurements params:
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
p->biasAccOmegaInt = bias_acc_omega_int;
p->biasAccOmegaInt = bias_acc_omega_init;
return p;
}

View File

@ -94,7 +94,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
I_3x3 * 1e-8; // error committed in integrating position from velocities
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
Matrix66 bias_acc_omega_int =
Matrix66 bias_acc_omega_init =
I_6x6 * 1e-5; // error in the bias used for preintegration
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
@ -110,7 +110,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
// PreintegrationCombinedMeasurements params:
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
p->biasAccOmegaInt = bias_acc_omega_int;
p->biasAccOmegaInt = bias_acc_omega_init;
return p;
}

View File

@ -33,7 +33,6 @@ The following examples illustrate some concepts from Georgia Tech's research pap
## 2D Pose SLAM
* **LocalizationExample.cpp**: modeling robot motion
* **LocalizationExample2.cpp**: example with GPS like measurements
* **Pose2SLAMExample**: A 2D Pose SLAM example using the predefined typedefs in gtsam/slam/pose2SLAM.h
* **Pose2SLAMExample_advanced**: same, but uses an Optimizer object
* **Pose2SLAMwSPCG**: solve a simple 3 by 3 grid of Pose2 SLAM problem by using easy SPCG interface

View File

@ -95,7 +95,7 @@ template<class Class>
struct MultiplicativeGroupTraits {
typedef group_tag structure_category;
typedef multiplicative_group_tag group_flavor;
static Class Identity() { return Class::identity(); }
static Class Identity() { return Class::Identity(); }
static Class Compose(const Class &g, const Class & h) { return g * h;}
static Class Between(const Class &g, const Class & h) { return g.inverse() * h;}
static Class Inverse(const Class &g) { return g.inverse();}
@ -111,7 +111,7 @@ template<class Class>
struct AdditiveGroupTraits {
typedef group_tag structure_category;
typedef additive_group_tag group_flavor;
static Class Identity() { return Class::identity(); }
static Class Identity() { return Class::Identity(); }
static Class Compose(const Class &g, const Class & h) { return g + h;}
static Class Between(const Class &g, const Class & h) { return h - g;}
static Class Inverse(const Class &g) { return -g;}
@ -147,7 +147,7 @@ public:
DirectProduct(const G& g, const H& h):std::pair<G,H>(g,h) {}
// identity
static DirectProduct identity() { return DirectProduct(); }
static DirectProduct Identity() { return DirectProduct(); }
DirectProduct operator*(const DirectProduct& other) const {
return DirectProduct(traits<G>::Compose(this->first, other.first),
@ -181,7 +181,7 @@ public:
DirectSum(const G& g, const H& h):std::pair<G,H>(g,h) {}
// identity
static DirectSum identity() { return DirectSum(); }
static DirectSum Identity() { return DirectSum(); }
DirectSum operator+(const DirectSum& other) const {
return DirectSum(g()+other.g(), h()+other.h());

View File

@ -177,7 +177,7 @@ struct LieGroupTraits: GetDimensionImpl<Class, Class::dimension> {
/// @name Group
/// @{
typedef multiplicative_group_tag group_flavor;
static Class Identity() { return Class::identity();}
static Class Identity() { return Class::Identity();}
/// @}
/// @name Manifold

View File

@ -48,7 +48,7 @@ public:
/// @name Group
/// @{
typedef multiplicative_group_tag group_flavor;
static ProductLieGroup identity() {return ProductLieGroup();}
static ProductLieGroup Identity() {return ProductLieGroup();}
ProductLieGroup operator*(const ProductLieGroup& other) const {
return ProductLieGroup(traits<G>::Compose(this->first,other.first),

View File

@ -60,6 +60,7 @@ GTSAM_MAKE_VECTOR_DEFS(9)
GTSAM_MAKE_VECTOR_DEFS(10)
GTSAM_MAKE_VECTOR_DEFS(11)
GTSAM_MAKE_VECTOR_DEFS(12)
GTSAM_MAKE_VECTOR_DEFS(15)
typedef Eigen::VectorBlock<Vector> SubVector;
typedef Eigen::VectorBlock<const Vector> ConstSubVector;

View File

@ -169,7 +169,7 @@ struct HasVectorSpacePrereqs {
Vector v;
BOOST_CONCEPT_USAGE(HasVectorSpacePrereqs) {
p = Class::identity(); // identity
p = Class::Identity(); // identity
q = p + p; // addition
q = p - p; // subtraction
v = p.vector(); // conversion to vector
@ -192,7 +192,7 @@ struct VectorSpaceTraits: VectorSpaceImpl<Class, Class::dimension> {
/// @name Group
/// @{
typedef additive_group_tag group_flavor;
static Class Identity() { return Class::identity();}
static Class Identity() { return Class::Identity();}
/// @}
/// @name Manifold

View File

@ -29,7 +29,7 @@ class Symmetric: private Eigen::PermutationMatrix<N> {
Eigen::PermutationMatrix<N>(P) {
}
public:
static Symmetric identity() { return Symmetric(); }
static Symmetric Identity() { return Symmetric(); }
Symmetric() {
Eigen::PermutationMatrix<N>::setIdentity();
}

View File

@ -221,6 +221,6 @@ void PrintForest(const FOREST& forest, std::string str,
PrintForestVisitorPre visitor(keyFormatter);
DepthFirstForest(forest, str, visitor);
}
}
} // namespace treeTraversal
}
} // namespace gtsam

View File

@ -189,9 +189,9 @@ class ParameterMatrix {
* NOTE: The size at compile time is unknown so this identity is zero
* length and thus not valid.
*/
inline static ParameterMatrix identity() {
inline static ParameterMatrix Identity() {
// throw std::runtime_error(
// "ParameterMatrix::identity(): Don't use this function");
// "ParameterMatrix::Identity(): Don't use this function");
return ParameterMatrix(0);
}

View File

@ -137,7 +137,7 @@ class FitBasis {
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
const std::map<double, double>& sequence,
const gtsam::noiseModel::Base* model, size_t N);
This::Parameters parameters() const;
gtsam::This::Parameters parameters() const;
};
} // namespace gtsam

View File

@ -133,7 +133,7 @@ TEST(ParameterMatrix, VectorSpace) {
// vector
EXPECT(assert_equal(Vector::Ones(M * N), params.vector()));
// identity
EXPECT(assert_equal(ParameterMatrix<M>::identity(),
EXPECT(assert_equal(ParameterMatrix<M>::Identity(),
ParameterMatrix<M>(Matrix::Zero(M, 0))));
}

View File

@ -48,4 +48,25 @@ namespace gtsam {
return keys & key2;
}
void DiscreteKeys::print(const std::string& s,
const KeyFormatter& keyFormatter) const {
for (auto&& dkey : *this) {
std::cout << DefaultKeyFormatter(dkey.first) << " " << dkey.second
<< std::endl;
}
}
bool DiscreteKeys::equals(const DiscreteKeys& other, double tol) const {
if (this->size() != other.size()) {
return false;
}
for (size_t i = 0; i < this->size(); i++) {
if (this->at(i).first != other.at(i).first ||
this->at(i).second != other.at(i).second) {
return false;
}
}
return true;
}
}

View File

@ -21,6 +21,7 @@
#include <gtsam/global_includes.h>
#include <gtsam/inference/Key.h>
#include <boost/serialization/vector.hpp>
#include <map>
#include <string>
#include <vector>
@ -69,8 +70,30 @@ namespace gtsam {
push_back(key);
return *this;
}
/// Print the keys and cardinalities.
void print(const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
/// Check equality to another DiscreteKeys object.
bool equals(const DiscreteKeys& other, double tol = 0) const;
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& boost::serialization::make_nvp(
"DiscreteKeys",
boost::serialization::base_object<std::vector<DiscreteKey>>(*this));
}
}; // DiscreteKeys
/// Create a list from two keys
GTSAM_EXPORT DiscreteKeys operator&(const DiscreteKey& key1, const DiscreteKey& key2);
}
// traits
template <>
struct traits<DiscreteKeys> : public Testable<DiscreteKeys> {};
} // namespace gtsam

View File

@ -219,6 +219,16 @@ class DiscreteBayesTree {
};
#include <gtsam/discrete/DiscreteLookupDAG.h>
class DiscreteLookupTable : gtsam::DiscreteConditional{
DiscreteLookupTable(size_t nFrontals, const gtsam::DiscreteKeys& keys,
const gtsam::DecisionTreeFactor::ADT& potentials);
void print(
const std::string& s = "Discrete Lookup Table: ",
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const;
size_t argmax(const gtsam::DiscreteValues& parentsValues) const;
};
class DiscreteLookupDAG {
DiscreteLookupDAG();
void push_back(const gtsam::DiscreteLookupTable* table);

View File

@ -16,14 +16,29 @@
* @author Duy-Nguyen Ta
*/
#include <gtsam/base/Testable.h>
#include <gtsam/discrete/DiscreteFactor.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/base/serializationTestHelpers.h>
#include <gtsam/discrete/DiscreteFactor.h>
#include <boost/assign/std/map.hpp>
using namespace boost::assign;
using namespace std;
using namespace gtsam;
using namespace gtsam::serializationTestHelpers;
/* ************************************************************************* */
TEST(DisreteKeys, Serialization) {
DiscreteKeys keys;
keys& DiscreteKey(0, 2);
keys& DiscreteKey(1, 3);
keys& DiscreteKey(2, 4);
EXPECT(equalsObj<DiscreteKeys>(keys));
EXPECT(equalsXML<DiscreteKeys>(keys));
EXPECT(equalsBinary<DiscreteKeys>(keys));
}
/* ************************************************************************* */
int main() {
@ -31,4 +46,3 @@ int main() {
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -38,7 +38,7 @@ public:
/// Default constructor yields identity
Cyclic():i_(0) {
}
static Cyclic identity() { return Cyclic();}
static Cyclic Identity() { return Cyclic();}
/// Cast to size_t
operator size_t() const {

View File

@ -213,7 +213,7 @@ public:
}
/// for Canonical
static PinholeCamera identity() {
static PinholeCamera Identity() {
return PinholeCamera(); // assumes that the default constructor is valid
}

View File

@ -412,7 +412,7 @@ public:
}
/// for Canonical
static PinholePose identity() {
static PinholePose Identity() {
return PinholePose(); // assumes that the default constructor is valid
}

View File

@ -119,7 +119,7 @@ public:
/// @{
/// identity for group operation
inline static Pose2 identity() { return Pose2(); }
inline static Pose2 Identity() { return Pose2(); }
/// inverse
GTSAM_EXPORT Pose2 inverse() const;

View File

@ -103,7 +103,7 @@ public:
/// @{
/// identity for group operation
static Pose3 identity() {
static Pose3 Identity() {
return Pose3();
}

View File

@ -107,8 +107,8 @@ namespace gtsam {
/// @name Group
/// @{
/** identity */
inline static Rot2 identity() { return Rot2(); }
/** Identity */
inline static Rot2 Identity() { return Rot2(); }
/** The inverse rotation - negative angle */
Rot2 inverse() const { return Rot2(c_, -s_);}

View File

@ -297,7 +297,7 @@ class GTSAM_EXPORT Rot3 : public LieGroup<Rot3, 3> {
/// @{
/// identity rotation for group operation
inline static Rot3 identity() {
inline static Rot3 Identity() {
return Rot3();
}

View File

@ -178,13 +178,13 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
/// SO<N> identity for N >= 2
template <int N_ = N, typename = IsFixed<N_>>
static SO identity() {
static SO Identity() {
return SO();
}
/// SO<N> identity for N == Eigen::Dynamic
template <int N_ = N, typename = IsDynamic<N_>>
static SO identity(size_t n = 0) {
static SO Identity(size_t n = 0) {
return SO(n);
}

View File

@ -134,7 +134,7 @@ void Similarity2::print(const std::string& s) const {
<< std::endl;
}
Similarity2 Similarity2::identity() { return Similarity2(); }
Similarity2 Similarity2::Identity() { return Similarity2(); }
Similarity2 Similarity2::operator*(const Similarity2& S) const {
return Similarity2(R_ * S.R_, ((1.0 / S.s_) * t_) + R_ * S.t_, s_ * S.s_);

View File

@ -83,7 +83,7 @@ class GTSAM_EXPORT Similarity2 : public LieGroup<Similarity2, 4> {
/// @{
/// Return an identity transform
static Similarity2 identity();
static Similarity2 Identity();
/// Composition
Similarity2 operator*(const Similarity2& S) const;

View File

@ -122,7 +122,7 @@ void Similarity3::print(const std::string& s) const {
std::cout << "t: " << translation().transpose() << " s: " << scale() << std::endl;
}
Similarity3 Similarity3::identity() {
Similarity3 Similarity3::Identity() {
return Similarity3();
}
Similarity3 Similarity3::operator*(const Similarity3& S) const {

View File

@ -84,7 +84,7 @@ class GTSAM_EXPORT Similarity3 : public LieGroup<Similarity3, 7> {
/// @{
/// Return an identity transform
static Similarity3 identity();
static Similarity3 Identity();
/// Composition
Similarity3 operator*(const Similarity3& S) const;

View File

@ -88,7 +88,7 @@ class GTSAM_EXPORT SphericalCamera {
/// Default constructor
SphericalCamera()
: pose_(Pose3::identity()), emptyCal_(boost::make_shared<EmptyCal>()) {}
: pose_(Pose3()), emptyCal_(boost::make_shared<EmptyCal>()) {}
/// Constructor with pose
explicit SphericalCamera(const Pose3& pose)
@ -198,9 +198,9 @@ class GTSAM_EXPORT SphericalCamera {
}
/// for Canonical
static SphericalCamera identity() {
static SphericalCamera Identity() {
return SphericalCamera(
Pose3::identity()); // assumes that the default constructor is valid
Pose3::Identity()); // assumes that the default constructor is valid
}
/// for Linear Triangulation

View File

@ -71,7 +71,7 @@ public:
/// @{
/// identity
inline static StereoPoint2 identity() {
inline static StereoPoint2 Identity() {
return StereoPoint2();
}

View File

@ -16,7 +16,7 @@ class Point2 {
bool equals(const gtsam::Point2& point, double tol) const;
// Group
static gtsam::Point2 identity();
static gtsam::Point2 Identity();
// Standard Interface
double x() const;
@ -73,7 +73,7 @@ class StereoPoint2 {
bool equals(const gtsam::StereoPoint2& point, double tol) const;
// Group
static gtsam::StereoPoint2 identity();
static gtsam::StereoPoint2 Identity();
gtsam::StereoPoint2 inverse() const;
gtsam::StereoPoint2 compose(const gtsam::StereoPoint2& p2) const;
gtsam::StereoPoint2 between(const gtsam::StereoPoint2& p2) const;
@ -115,7 +115,7 @@ class Point3 {
bool equals(const gtsam::Point3& p, double tol) const;
// Group
static gtsam::Point3 identity();
static gtsam::Point3 Identity();
// Standard Interface
Vector vector() const;
@ -149,7 +149,7 @@ class Rot2 {
bool equals(const gtsam::Rot2& rot, double tol) const;
// Group
static gtsam::Rot2 identity();
static gtsam::Rot2 Identity();
gtsam::Rot2 inverse();
gtsam::Rot2 compose(const gtsam::Rot2& p2) const;
gtsam::Rot2 between(const gtsam::Rot2& p2) const;
@ -198,7 +198,7 @@ class SO3 {
bool equals(const gtsam::SO3& other, double tol) const;
// Group
static gtsam::SO3 identity();
static gtsam::SO3 Identity();
gtsam::SO3 inverse() const;
gtsam::SO3 between(const gtsam::SO3& R) const;
gtsam::SO3 compose(const gtsam::SO3& R) const;
@ -228,7 +228,7 @@ class SO4 {
bool equals(const gtsam::SO4& other, double tol) const;
// Group
static gtsam::SO4 identity();
static gtsam::SO4 Identity();
gtsam::SO4 inverse() const;
gtsam::SO4 between(const gtsam::SO4& Q) const;
gtsam::SO4 compose(const gtsam::SO4& Q) const;
@ -258,7 +258,7 @@ class SOn {
bool equals(const gtsam::SOn& other, double tol) const;
// Group
static gtsam::SOn identity();
static gtsam::SOn Identity();
gtsam::SOn inverse() const;
gtsam::SOn between(const gtsam::SOn& Q) const;
gtsam::SOn compose(const gtsam::SOn& Q) const;
@ -322,7 +322,7 @@ class Rot3 {
bool equals(const gtsam::Rot3& rot, double tol) const;
// Group
static gtsam::Rot3 identity();
static gtsam::Rot3 Identity();
gtsam::Rot3 inverse() const;
gtsam::Rot3 compose(const gtsam::Rot3& p2) const;
gtsam::Rot3 between(const gtsam::Rot3& p2) const;
@ -380,7 +380,7 @@ class Pose2 {
bool equals(const gtsam::Pose2& pose, double tol) const;
// Group
static gtsam::Pose2 identity();
static gtsam::Pose2 Identity();
gtsam::Pose2 inverse() const;
gtsam::Pose2 compose(const gtsam::Pose2& p2) const;
gtsam::Pose2 between(const gtsam::Pose2& p2) const;
@ -444,7 +444,7 @@ class Pose3 {
bool equals(const gtsam::Pose3& pose, double tol) const;
// Group
static gtsam::Pose3 identity();
static gtsam::Pose3 Identity();
gtsam::Pose3 inverse() const;
gtsam::Pose3 inverse(Eigen::Ref<Eigen::MatrixXd> H) const;
gtsam::Pose3 compose(const gtsam::Pose3& pose) const;
@ -1126,10 +1126,10 @@ class TriangulationResult {
Status status;
TriangulationResult(const gtsam::Point3& p);
const gtsam::Point3& get() const;
static TriangulationResult Degenerate();
static TriangulationResult Outlier();
static TriangulationResult FarPoint();
static TriangulationResult BehindCamera();
static gtsam::TriangulationResult Degenerate();
static gtsam::TriangulationResult Outlier();
static gtsam::TriangulationResult FarPoint();
static gtsam::TriangulationResult BehindCamera();
bool valid() const;
bool degenerate() const;
bool outlier() const;

View File

@ -902,7 +902,7 @@ TEST(Pose2 , TransformCovariance3) {
/* ************************************************************************* */
TEST(Pose2, Print) {
Pose2 pose(Rot2::identity(), Point2(1, 2));
Pose2 pose(Rot2::Identity(), Point2(1, 2));
// Generate the expected output
string s = "Planar Pose";

View File

@ -1133,7 +1133,7 @@ Pose3 testing_interpolate(const Pose3& t1, const Pose3& t2, double gamma) { retu
TEST(Pose3, interpolateJacobians) {
{
Pose3 X = Pose3::identity();
Pose3 X = Pose3::Identity();
Pose3 Y(Rot3::Rz(M_PI_2), Point3(1, 0, 0));
double t = 0.5;
Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0.5, -0.207107, 0)); // note: different from test above: this is full Pose3 interpolation
@ -1147,10 +1147,10 @@ TEST(Pose3, interpolateJacobians) {
EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6));
}
{
Pose3 X = Pose3::identity();
Pose3 Y(Rot3::identity(), Point3(1, 0, 0));
Pose3 X = Pose3::Identity();
Pose3 Y(Rot3::Identity(), Point3(1, 0, 0));
double t = 0.3;
Pose3 expectedPoseInterp(Rot3::identity(), Point3(0.3, 0, 0));
Pose3 expectedPoseInterp(Rot3::Identity(), Point3(0.3, 0, 0));
Matrix actualJacobianX, actualJacobianY;
EXPECT(assert_equal(expectedPoseInterp, interpolate(X, Y, t, actualJacobianX, actualJacobianY), 1e-5));
@ -1161,7 +1161,7 @@ TEST(Pose3, interpolateJacobians) {
EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6));
}
{
Pose3 X = Pose3::identity();
Pose3 X = Pose3::Identity();
Pose3 Y(Rot3::Rz(M_PI_2), Point3(0, 0, 0));
double t = 0.5;
Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0, 0, 0));
@ -1204,7 +1204,7 @@ TEST(Pose3, Create) {
/* ************************************************************************* */
TEST(Pose3, Print) {
Pose3 pose(Rot3::identity(), Point3(1, 2, 3));
Pose3 pose(Rot3::Identity(), Point3(1, 2, 3));
// Generate the expected output
std::string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 1 2 3\n";

View File

@ -33,8 +33,6 @@ static const Point2 P(0.2, 0.7);
static const Rot2 R = Rot2::fromAngle(0.3);
static const double s = 4;
const double degree = M_PI / 180;
//******************************************************************************
TEST(Similarity2, Concepts) {
BOOST_CONCEPT_ASSERT((IsGroup<Similarity2>));

View File

@ -203,11 +203,11 @@ TEST(Similarity3, ExpLogMap) {
Vector7 zeros;
zeros << 0, 0, 0, 0, 0, 0, 0;
Vector7 logIdentity = Similarity3::Logmap(Similarity3::identity());
Vector7 logIdentity = Similarity3::Logmap(Similarity3::Identity());
EXPECT(assert_equal(zeros, logIdentity));
Similarity3 expZero = Similarity3::Expmap(zeros);
Similarity3 ident = Similarity3::identity();
Similarity3 ident = Similarity3::Identity();
EXPECT(assert_equal(expZero, ident));
// Compare to matrix exponential, using expm in Lie.h
@ -391,7 +391,7 @@ TEST(Similarity3, Optimization) {
NonlinearFactorGraph graph;
graph.addPrior(key, prior, model);
// Create initial estimate with identity transform
// Create initial estimate with Identity transform
Values initial;
initial.insert<Similarity3>(key, Similarity3());

View File

@ -66,7 +66,7 @@ class KeySet {
void serialize() const;
};
// Actually a vector<Key>
// Actually a vector<Key>, needed for Matlab
class KeyVector {
KeyVector();
KeyVector(const gtsam::KeyVector& other);

View File

@ -119,11 +119,36 @@ void GaussianMixture::print(const std::string &s,
"", [&](Key k) { return formatter(k); },
[&](const GaussianConditional::shared_ptr &gf) -> std::string {
RedirectCout rd;
if (!gf->empty())
if (gf && !gf->empty())
gf->print("", formatter);
else
return {"nullptr"};
return rd.str();
});
}
/* *******************************************************************************/
void GaussianMixture::prune(const DecisionTreeFactor &decisionTree) {
// Functional which loops over all assignments and create a set of
// GaussianConditionals
auto pruner = [&decisionTree](
const Assignment<Key> &choices,
const GaussianConditional::shared_ptr &conditional)
-> GaussianConditional::shared_ptr {
// typecast so we can use this to get probability value
DiscreteValues values(choices);
if (decisionTree(values) == 0.0) {
// empty aka null pointer
boost::shared_ptr<GaussianConditional> null;
return null;
} else {
return conditional;
}
};
auto pruned_conditionals = conditionals_.apply(pruner);
conditionals_.root_ = pruned_conditionals.root_;
}
} // namespace gtsam

View File

@ -21,6 +21,7 @@
#include <gtsam/discrete/DecisionTree-inl.h>
#include <gtsam/discrete/DecisionTree.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/discrete/DiscreteKey.h>
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/inference/Conditional.h>
@ -30,11 +31,14 @@ namespace gtsam {
/**
* @brief A conditional of gaussian mixtures indexed by discrete variables, as
* part of a Bayes Network.
* part of a Bayes Network. This is the result of the elimination of a
* continuous variable in a hybrid scheme, such that the remaining variables are
* discrete+continuous.
*
* Represents the conditional density P(X | M, Z) where X is a continuous random
* variable, M is the selection of discrete variables corresponding to a subset
* of the Gaussian variables and Z is parent of this node
* Represents the conditional density P(X | M, Z) where X is the set of
* continuous random variables, M is the selection of discrete variables
* corresponding to a subset of the Gaussian variables and Z is parent of this
* node .
*
* The probability P(x|y,z,...) is proportional to
* \f$ \sum_i k_i \exp - \frac{1}{2} |R_i x - (d_i - S_i y - T_i z - ...)|^2 \f$
@ -118,7 +122,7 @@ class GTSAM_EXPORT GaussianMixture
/// Test equality with base HybridFactor
bool equals(const HybridFactor &lf, double tol = 1e-9) const override;
/* print utility */
/// Print utility
void print(
const std::string &s = "GaussianMixture\n",
const KeyFormatter &formatter = DefaultKeyFormatter) const override;
@ -128,6 +132,15 @@ class GTSAM_EXPORT GaussianMixture
/// Getter for the underlying Conditionals DecisionTree
const Conditionals &conditionals();
/**
* @brief Prune the decision tree of Gaussian factors as per the discrete
* `decisionTree`.
*
* @param decisionTree A pruned decision tree of discrete keys where the
* leaves are probabilities.
*/
void prune(const DecisionTreeFactor &decisionTree);
/**
* @brief Merge the Gaussian Factor Graphs in `this` and `sum` while
* maintaining the decision tree structure.

View File

@ -51,7 +51,7 @@ GaussianMixtureFactor GaussianMixtureFactor::FromFactors(
void GaussianMixtureFactor::print(const std::string &s,
const KeyFormatter &formatter) const {
HybridFactor::print(s, formatter);
std::cout << "]{\n";
std::cout << "{\n";
factors_.print(
"", [&](Key k) { return formatter(k); },
[&](const GaussianFactor::shared_ptr &gf) -> std::string {

View File

@ -22,7 +22,7 @@
#include <gtsam/discrete/DecisionTree.h>
#include <gtsam/discrete/DiscreteKey.h>
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/hybrid/HybridGaussianFactor.h>
#include <gtsam/linear/GaussianFactor.h>
namespace gtsam {
@ -108,7 +108,7 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor {
bool equals(const HybridFactor &lf, double tol = 1e-9) const override;
void print(
const std::string &s = "HybridFactor\n",
const std::string &s = "GaussianMixtureFactor\n",
const KeyFormatter &formatter = DefaultKeyFormatter) const override;
/// @}

View File

@ -10,7 +10,166 @@
* @file HybridBayesNet.cpp
* @brief A bayes net of Gaussian Conditionals indexed by discrete keys.
* @author Fan Jiang
* @author Varun Agrawal
* @author Shangjie Xue
* @date January 2022
*/
#include <gtsam/discrete/DiscreteBayesNet.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/hybrid/HybridBayesNet.h>
#include <gtsam/hybrid/HybridValues.h>
namespace gtsam {
/* ************************************************************************* */
/// Return the DiscreteKey vector as a set.
static std::set<DiscreteKey> DiscreteKeysAsSet(const DiscreteKeys &dkeys) {
std::set<DiscreteKey> s;
s.insert(dkeys.begin(), dkeys.end());
return s;
}
/* ************************************************************************* */
HybridBayesNet HybridBayesNet::prune(
const DecisionTreeFactor::shared_ptr &discreteFactor) const {
/* To Prune, we visitWith every leaf in the GaussianMixture.
* For each leaf, using the assignment we can check the discrete decision tree
* for 0.0 probability, then just set the leaf to a nullptr.
*
* We can later check the GaussianMixture for just nullptrs.
*/
HybridBayesNet prunedBayesNetFragment;
// Functional which loops over all assignments and create a set of
// GaussianConditionals
auto pruner = [&](const Assignment<Key> &choices,
const GaussianConditional::shared_ptr &conditional)
-> GaussianConditional::shared_ptr {
// typecast so we can use this to get probability value
DiscreteValues values(choices);
if ((*discreteFactor)(values) == 0.0) {
// empty aka null pointer
boost::shared_ptr<GaussianConditional> null;
return null;
} else {
return conditional;
}
};
// Go through all the conditionals in the
// Bayes Net and prune them as per discreteFactor.
for (size_t i = 0; i < this->size(); i++) {
HybridConditional::shared_ptr conditional = this->at(i);
GaussianMixture::shared_ptr gaussianMixture =
boost::dynamic_pointer_cast<GaussianMixture>(conditional->inner());
if (gaussianMixture) {
// We may have mixtures with less discrete keys than discreteFactor so we
// skip those since the label assignment does not exist.
auto gmKeySet = DiscreteKeysAsSet(gaussianMixture->discreteKeys());
auto dfKeySet = DiscreteKeysAsSet(discreteFactor->discreteKeys());
if (gmKeySet != dfKeySet) {
// Add the gaussianMixture which doesn't have to be pruned.
prunedBayesNetFragment.push_back(
boost::make_shared<HybridConditional>(gaussianMixture));
continue;
}
// Run the pruning to get a new, pruned tree
GaussianMixture::Conditionals prunedTree =
gaussianMixture->conditionals().apply(pruner);
DiscreteKeys discreteKeys = gaussianMixture->discreteKeys();
// reverse keys to get a natural ordering
std::reverse(discreteKeys.begin(), discreteKeys.end());
// Convert from boost::iterator_range to KeyVector
// so we can pass it to constructor.
KeyVector frontals(gaussianMixture->frontals().begin(),
gaussianMixture->frontals().end()),
parents(gaussianMixture->parents().begin(),
gaussianMixture->parents().end());
// Create the new gaussian mixture and add it to the bayes net.
auto prunedGaussianMixture = boost::make_shared<GaussianMixture>(
frontals, parents, discreteKeys, prunedTree);
// Type-erase and add to the pruned Bayes Net fragment.
prunedBayesNetFragment.push_back(
boost::make_shared<HybridConditional>(prunedGaussianMixture));
} else {
// Add the non-GaussianMixture conditional
prunedBayesNetFragment.push_back(conditional);
}
}
return prunedBayesNetFragment;
}
/* ************************************************************************* */
GaussianMixture::shared_ptr HybridBayesNet::atMixture(size_t i) const {
return factors_.at(i)->asMixture();
}
/* ************************************************************************* */
GaussianConditional::shared_ptr HybridBayesNet::atGaussian(size_t i) const {
return factors_.at(i)->asGaussian();
}
/* ************************************************************************* */
DiscreteConditional::shared_ptr HybridBayesNet::atDiscrete(size_t i) const {
return factors_.at(i)->asDiscreteConditional();
}
/* ************************************************************************* */
GaussianBayesNet HybridBayesNet::choose(
const DiscreteValues &assignment) const {
GaussianBayesNet gbn;
for (size_t idx = 0; idx < size(); idx++) {
if (factors_.at(idx)->isHybrid()) {
// If factor is hybrid, select based on assignment.
GaussianMixture gm = *this->atMixture(idx);
gbn.push_back(gm(assignment));
} else if (factors_.at(idx)->isContinuous()) {
// If continuous only, add gaussian conditional.
gbn.push_back((this->atGaussian(idx)));
} else if (factors_.at(idx)->isDiscrete()) {
// If factor at `idx` is discrete-only, we simply continue.
continue;
}
}
return gbn;
}
/* *******************************************************************************/
HybridValues HybridBayesNet::optimize() const {
// Solve for the MPE
DiscreteBayesNet discrete_bn;
for (auto &conditional : factors_) {
if (conditional->isDiscrete()) {
discrete_bn.push_back(conditional->asDiscreteConditional());
}
}
DiscreteValues mpe = DiscreteFactorGraph(discrete_bn).optimize();
// Given the MPE, compute the optimal continuous values.
GaussianBayesNet gbn = this->choose(mpe);
return HybridValues(mpe, gbn.optimize());
}
/* *******************************************************************************/
VectorValues HybridBayesNet::optimize(const DiscreteValues &assignment) const {
GaussianBayesNet gbn = this->choose(assignment);
return gbn.optimize();
}
} // namespace gtsam

View File

@ -17,8 +17,12 @@
#pragma once
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/global_includes.h>
#include <gtsam/hybrid/HybridConditional.h>
#include <gtsam/hybrid/HybridValues.h>
#include <gtsam/inference/BayesNet.h>
#include <gtsam/linear/GaussianBayesNet.h>
namespace gtsam {
@ -34,8 +38,94 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet<HybridConditional> {
using shared_ptr = boost::shared_ptr<HybridBayesNet>;
using sharedConditional = boost::shared_ptr<ConditionalType>;
/// @name Standard Constructors
/// @{
/** Construct empty bayes net */
HybridBayesNet() = default;
/// @}
/// @name Testable
/// @{
/** Check equality */
bool equals(const This &bn, double tol = 1e-9) const {
return Base::equals(bn, tol);
}
/// print graph
void print(
const std::string &s = "",
const KeyFormatter &formatter = DefaultKeyFormatter) const override {
Base::print(s, formatter);
}
/// @}
/// @name Standard Interface
/// @{
/// Add HybridConditional to Bayes Net
using Base::add;
/// Add a discrete conditional to the Bayes Net.
void add(const DiscreteKey &key, const std::string &table) {
push_back(
HybridConditional(boost::make_shared<DiscreteConditional>(key, table)));
}
/// Get a specific Gaussian mixture by index `i`.
GaussianMixture::shared_ptr atMixture(size_t i) const;
/// Get a specific Gaussian conditional by index `i`.
GaussianConditional::shared_ptr atGaussian(size_t i) const;
/// Get a specific discrete conditional by index `i`.
DiscreteConditional::shared_ptr atDiscrete(size_t i) const;
/**
* @brief Get the Gaussian Bayes Net which corresponds to a specific discrete
* value assignment.
*
* @param assignment The discrete value assignment for the discrete keys.
* @return GaussianBayesNet
*/
GaussianBayesNet choose(const DiscreteValues &assignment) const;
/**
* @brief Solve the HybridBayesNet by first computing the MPE of all the
* discrete variables and then optimizing the continuous variables based on
* the MPE assignment.
*
* @return HybridValues
*/
HybridValues optimize() const;
/**
* @brief Given the discrete assignment, return the optimized estimate for the
* selected Gaussian BayesNet.
*
* @param assignment An assignment of discrete values.
* @return Values
*/
VectorValues optimize(const DiscreteValues &assignment) const;
/// Prune the Hybrid Bayes Net given the discrete decision tree.
HybridBayesNet prune(
const DecisionTreeFactor::shared_ptr &discreteFactor) const;
/// @}
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
};
/// traits
template <>
struct traits<HybridBayesNet> : public Testable<HybridBayesNet> {};
} // namespace gtsam

View File

@ -18,10 +18,13 @@
*/
#include <gtsam/base/treeTraversal-inst.h>
#include <gtsam/discrete/DiscreteBayesNet.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/hybrid/HybridBayesNet.h>
#include <gtsam/hybrid/HybridBayesTree.h>
#include <gtsam/inference/BayesTree-inst.h>
#include <gtsam/inference/BayesTreeCliqueBase-inst.h>
#include <gtsam/linear/GaussianJunctionTree.h>
namespace gtsam {
@ -35,4 +38,110 @@ bool HybridBayesTree::equals(const This& other, double tol) const {
return Base::equals(other, tol);
}
/* ************************************************************************* */
HybridValues HybridBayesTree::optimize() const {
DiscreteBayesNet dbn;
DiscreteValues mpe;
auto root = roots_.at(0);
// Access the clique and get the underlying hybrid conditional
HybridConditional::shared_ptr root_conditional = root->conditional();
// The root should be discrete only, we compute the MPE
if (root_conditional->isDiscrete()) {
dbn.push_back(root_conditional->asDiscreteConditional());
mpe = DiscreteFactorGraph(dbn).optimize();
} else {
throw std::runtime_error(
"HybridBayesTree root is not discrete-only. Please check elimination "
"ordering or use continuous factor graph.");
}
VectorValues values = optimize(mpe);
return HybridValues(mpe, values);
}
/* ************************************************************************* */
/**
* @brief Helper class for Depth First Forest traversal on the HybridBayesTree.
*
* When traversing the tree, the pre-order visitor will receive an instance of
* this class with the parent clique data.
*/
struct HybridAssignmentData {
const DiscreteValues assignment_;
GaussianBayesTree::sharedNode parentClique_;
// The gaussian bayes tree that will be recursively created.
GaussianBayesTree* gaussianbayesTree_;
/**
* @brief Construct a new Hybrid Assignment Data object.
*
* @param assignment The MPE assignment for the optimal Gaussian cliques.
* @param parentClique The clique from the parent node of the current node.
* @param gbt The Gaussian Bayes Tree being generated during tree traversal.
*/
HybridAssignmentData(const DiscreteValues& assignment,
const GaussianBayesTree::sharedNode& parentClique,
GaussianBayesTree* gbt)
: assignment_(assignment),
parentClique_(parentClique),
gaussianbayesTree_(gbt) {}
/**
* @brief A function used during tree traversal that operators on each node
* before visiting the node's children.
*
* @param node The current node being visited.
* @param parentData The HybridAssignmentData from the parent node.
* @return HybridAssignmentData
*/
static HybridAssignmentData AssignmentPreOrderVisitor(
const HybridBayesTree::sharedNode& node,
HybridAssignmentData& parentData) {
// Extract the gaussian conditional from the Hybrid clique
HybridConditional::shared_ptr hybrid_conditional = node->conditional();
GaussianConditional::shared_ptr conditional;
if (hybrid_conditional->isHybrid()) {
conditional = (*hybrid_conditional->asMixture())(parentData.assignment_);
} else if (hybrid_conditional->isContinuous()) {
conditional = hybrid_conditional->asGaussian();
} else {
// Discrete only conditional, so we set to empty gaussian conditional
conditional = boost::make_shared<GaussianConditional>();
}
// Create the GaussianClique for the current node
auto clique = boost::make_shared<GaussianBayesTree::Node>(conditional);
// Add the current clique to the GaussianBayesTree.
parentData.gaussianbayesTree_->addClique(clique, parentData.parentClique_);
// Create new HybridAssignmentData where the current node is the parent
// This will be passed down to the children nodes
HybridAssignmentData data(parentData.assignment_, clique,
parentData.gaussianbayesTree_);
return data;
}
};
/* *************************************************************************
*/
VectorValues HybridBayesTree::optimize(const DiscreteValues& assignment) const {
GaussianBayesTree gbt;
HybridAssignmentData rootData(assignment, 0, &gbt);
{
treeTraversal::no_op visitorPost;
// Limits OpenMP threads since we're mixing TBB and OpenMP
TbbOpenMPMixedScope threadLimiter;
treeTraversal::DepthFirstForestParallel(
*this, rootData, HybridAssignmentData::AssignmentPreOrderVisitor,
visitorPost);
}
VectorValues result = gbt.optimize();
// Return the optimized bayes net result.
return result;
}
} // namespace gtsam

View File

@ -70,13 +70,46 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree<HybridBayesTreeClique> {
/** Check equality */
bool equals(const This& other, double tol = 1e-9) const;
/**
* @brief Optimize the hybrid Bayes tree by computing the MPE for the current
* set of discrete variables and using it to compute the best continuous
* update delta.
*
* @return HybridValues
*/
HybridValues optimize() const;
/**
* @brief Recursively optimize the BayesTree to produce a vector solution.
*
* @param assignment The discrete values assignment to select the Gaussian
* mixtures.
* @return VectorValues
*/
VectorValues optimize(const DiscreteValues& assignment) const;
/// @}
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
};
/// traits
template <>
struct traits<HybridBayesTree> : public Testable<HybridBayesTree> {};
/**
* @brief Class for Hybrid Bayes tree orphan subtrees.
*
* This does special stuff for the hybrid case
* This object stores parent keys in our base type factor so that
* eliminating those parent keys will pull this subtree into the
* elimination.
* This does special stuff for the hybrid case.
*
* @tparam CLIQUE
*/

View File

@ -69,7 +69,7 @@ class GTSAM_EXPORT HybridConditional
BaseConditional; ///< Typedef to our conditional base class
protected:
// Type-erased pointer to the inner type
/// Type-erased pointer to the inner type
boost::shared_ptr<Factor> inner_;
public:
@ -127,8 +127,7 @@ class GTSAM_EXPORT HybridConditional
* @param gaussianMixture Gaussian Mixture Conditional used to create the
* HybridConditional.
*/
HybridConditional(
boost::shared_ptr<GaussianMixture> gaussianMixture);
HybridConditional(boost::shared_ptr<GaussianMixture> gaussianMixture);
/**
* @brief Return HybridConditional as a GaussianMixture
@ -140,6 +139,17 @@ class GTSAM_EXPORT HybridConditional
return boost::static_pointer_cast<GaussianMixture>(inner_);
}
/**
* @brief Return HybridConditional as a GaussianConditional
*
* @return GaussianConditional::shared_ptr
*/
GaussianConditional::shared_ptr asGaussian() {
if (!isContinuous())
throw std::invalid_argument("Not a continuous conditional");
return boost::static_pointer_cast<GaussianConditional>(inner_);
}
/**
* @brief Return conditional as a DiscreteConditional
*
@ -168,10 +178,19 @@ class GTSAM_EXPORT HybridConditional
/// Get the type-erased pointer to the inner type
boost::shared_ptr<Factor> inner() { return inner_; }
}; // DiscreteConditional
private:
/** Serialization function */
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int /*version*/) {
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseFactor);
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseConditional);
}
}; // HybridConditional
// traits
template <>
struct traits<HybridConditional> : public Testable<DiscreteConditional> {};
struct traits<HybridConditional> : public Testable<HybridConditional> {};
} // namespace gtsam

View File

@ -52,7 +52,6 @@ DiscreteKeys CollectDiscreteKeys(const DiscreteKeys &key1,
HybridFactor::HybridFactor(const KeyVector &keys)
: Base(keys),
isContinuous_(true),
nrContinuous_(keys.size()),
continuousKeys_(keys) {}
/* ************************************************************************ */
@ -62,7 +61,6 @@ HybridFactor::HybridFactor(const KeyVector &continuousKeys,
isDiscrete_((continuousKeys.size() == 0) && (discreteKeys.size() != 0)),
isContinuous_((continuousKeys.size() != 0) && (discreteKeys.size() == 0)),
isHybrid_((continuousKeys.size() != 0) && (discreteKeys.size() != 0)),
nrContinuous_(continuousKeys.size()),
discreteKeys_(discreteKeys),
continuousKeys_(continuousKeys) {}
@ -78,7 +76,8 @@ bool HybridFactor::equals(const HybridFactor &lf, double tol) const {
const This *e = dynamic_cast<const This *>(&lf);
return e != nullptr && Base::equals(*e, tol) &&
isDiscrete_ == e->isDiscrete_ && isContinuous_ == e->isContinuous_ &&
isHybrid_ == e->isHybrid_ && nrContinuous_ == e->nrContinuous_;
isHybrid_ == e->isHybrid_ && continuousKeys_ == e->continuousKeys_ &&
discreteKeys_ == e->discreteKeys_;
}
/* ************************************************************************ */
@ -88,17 +87,23 @@ void HybridFactor::print(const std::string &s,
if (isContinuous_) std::cout << "Continuous ";
if (isDiscrete_) std::cout << "Discrete ";
if (isHybrid_) std::cout << "Hybrid ";
for (size_t c=0; c<continuousKeys_.size(); c++) {
std::cout << "[";
for (size_t c = 0; c < continuousKeys_.size(); c++) {
std::cout << formatter(continuousKeys_.at(c));
if (c < continuousKeys_.size() - 1) {
std::cout << " ";
} else {
std::cout << "; ";
std::cout << (discreteKeys_.size() > 0 ? "; " : "");
}
}
for(auto && discreteKey: discreteKeys_) {
std::cout << formatter(discreteKey.first) << " ";
for (size_t d = 0; d < discreteKeys_.size(); d++) {
std::cout << formatter(discreteKeys_.at(d).first);
if (d < discreteKeys_.size() - 1) {
std::cout << " ";
}
}
std::cout << "]";
}
} // namespace gtsam

View File

@ -47,11 +47,9 @@ class GTSAM_EXPORT HybridFactor : public Factor {
bool isContinuous_ = false;
bool isHybrid_ = false;
size_t nrContinuous_ = 0;
protected:
// Set of DiscreteKeys for this factor.
DiscreteKeys discreteKeys_;
/// Record continuous keys for book-keeping
KeyVector continuousKeys_;
@ -120,12 +118,28 @@ class GTSAM_EXPORT HybridFactor : public Factor {
bool isHybrid() const { return isHybrid_; }
/// Return the number of continuous variables in this factor.
size_t nrContinuous() const { return nrContinuous_; }
size_t nrContinuous() const { return continuousKeys_.size(); }
/// Return vector of discrete keys.
DiscreteKeys discreteKeys() const { return discreteKeys_; }
/// Return the discrete keys for this factor.
const DiscreteKeys &discreteKeys() const { return discreteKeys_; }
/// Return only the continuous keys for this factor.
const KeyVector &continuousKeys() const { return continuousKeys_; }
/// @}
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(isDiscrete_);
ar &BOOST_SERIALIZATION_NVP(isContinuous_);
ar &BOOST_SERIALIZATION_NVP(isHybrid_);
ar &BOOST_SERIALIZATION_NVP(discreteKeys_);
ar &BOOST_SERIALIZATION_NVP(continuousKeys_);
}
};
// HybridFactor

View File

@ -0,0 +1,140 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HybridFactorGraph.h
* @brief Hybrid factor graph base class that uses type erasure
* @author Varun Agrawal
* @date May 28, 2022
*/
#pragma once
#include <gtsam/discrete/DiscreteFactor.h>
#include <gtsam/hybrid/HybridDiscreteFactor.h>
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/inference/Ordering.h>
#include <boost/format.hpp>
namespace gtsam {
using SharedFactor = boost::shared_ptr<Factor>;
/**
* Hybrid Factor Graph
* -----------------------
* This is the base hybrid factor graph.
* Everything inside needs to be hybrid factor or hybrid conditional.
*/
class HybridFactorGraph : public FactorGraph<HybridFactor> {
public:
using Base = FactorGraph<HybridFactor>;
using This = HybridFactorGraph; ///< this class
using shared_ptr = boost::shared_ptr<This>; ///< shared_ptr to This
using Values = gtsam::Values; ///< backwards compatibility
using Indices = KeyVector; ///> map from keys to values
protected:
/// Check if FACTOR type is derived from DiscreteFactor.
template <typename FACTOR>
using IsDiscrete = typename std::enable_if<
std::is_base_of<DiscreteFactor, FACTOR>::value>::type;
/// Check if FACTOR type is derived from HybridFactor.
template <typename FACTOR>
using IsHybrid = typename std::enable_if<
std::is_base_of<HybridFactor, FACTOR>::value>::type;
public:
/// @name Constructors
/// @{
/// Default constructor
HybridFactorGraph() = default;
/**
* Implicit copy/downcast constructor to override explicit template container
* constructor. In BayesTree this is used for:
* `cachedSeparatorMarginal_.reset(*separatorMarginal)`
* */
template <class DERIVEDFACTOR>
HybridFactorGraph(const FactorGraph<DERIVEDFACTOR>& graph) : Base(graph) {}
/// @}
// Allow use of selected FactorGraph methods:
using Base::empty;
using Base::reserve;
using Base::size;
using Base::operator[];
using Base::add;
using Base::push_back;
using Base::resize;
/**
* Add a discrete factor *pointer* to the internal discrete graph
* @param discreteFactor - boost::shared_ptr to the factor to add
*/
template <typename FACTOR>
IsDiscrete<FACTOR> push_discrete(
const boost::shared_ptr<FACTOR>& discreteFactor) {
Base::push_back(boost::make_shared<HybridDiscreteFactor>(discreteFactor));
}
/**
* Add a discrete-continuous (Hybrid) factor *pointer* to the graph
* @param hybridFactor - boost::shared_ptr to the factor to add
*/
template <typename FACTOR>
IsHybrid<FACTOR> push_hybrid(const boost::shared_ptr<FACTOR>& hybridFactor) {
Base::push_back(hybridFactor);
}
/// delete emplace_shared.
template <class FACTOR, class... Args>
void emplace_shared(Args&&... args) = delete;
/// Construct a factor and add (shared pointer to it) to factor graph.
template <class FACTOR, class... Args>
IsDiscrete<FACTOR> emplace_discrete(Args&&... args) {
auto factor = boost::allocate_shared<FACTOR>(
Eigen::aligned_allocator<FACTOR>(), std::forward<Args>(args)...);
push_discrete(factor);
}
/// Construct a factor and add (shared pointer to it) to factor graph.
template <class FACTOR, class... Args>
IsHybrid<FACTOR> emplace_hybrid(Args&&... args) {
auto factor = boost::allocate_shared<FACTOR>(
Eigen::aligned_allocator<FACTOR>(), std::forward<Args>(args)...);
push_hybrid(factor);
}
/**
* @brief Add a single factor shared pointer to the hybrid factor graph.
* Dynamically handles the factor type and assigns it to the correct
* underlying container.
*
* @param sharedFactor The factor to add to this factor graph.
*/
void push_back(const SharedFactor& sharedFactor) {
if (auto p = boost::dynamic_pointer_cast<DiscreteFactor>(sharedFactor)) {
push_discrete(p);
}
if (auto p = boost::dynamic_pointer_cast<HybridFactor>(sharedFactor)) {
push_hybrid(p);
}
}
};
} // namespace gtsam

View File

@ -37,6 +37,8 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor {
using This = HybridGaussianFactor;
using shared_ptr = boost::shared_ptr<This>;
HybridGaussianFactor() = default;
// Explicit conversion from a shared ptr of GF
explicit HybridGaussianFactor(GaussianFactor::shared_ptr other);
@ -52,7 +54,7 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor {
/// GTSAM print utility.
void print(
const std::string &s = "HybridFactor\n",
const std::string &s = "HybridGaussianFactor\n",
const KeyFormatter &formatter = DefaultKeyFormatter) const override;
/// @}

View File

@ -98,6 +98,12 @@ GaussianMixtureFactor::Sum sumFrontals(
} else if (f->isContinuous()) {
deferredFactors.push_back(
boost::dynamic_pointer_cast<HybridGaussianFactor>(f)->inner());
} else if (f->isDiscrete()) {
// Don't do anything for discrete-only factors
// since we want to eliminate continuous values only.
continue;
} else {
// We need to handle the case where the object is actually an
// BayesTreeOrphanWrapper!
@ -106,8 +112,8 @@ GaussianMixtureFactor::Sum sumFrontals(
if (!orphan) {
auto &fr = *f;
throw std::invalid_argument(
std::string("factor is discrete in continuous elimination") +
typeid(fr).name());
std::string("factor is discrete in continuous elimination ") +
demangle(typeid(fr).name()));
}
}
}
@ -129,9 +135,9 @@ continuousElimination(const HybridGaussianFactorGraph &factors,
for (auto &fp : factors) {
if (auto ptr = boost::dynamic_pointer_cast<HybridGaussianFactor>(fp)) {
gfg.push_back(ptr->inner());
} else if (auto p =
boost::static_pointer_cast<HybridConditional>(fp)->inner()) {
gfg.push_back(boost::static_pointer_cast<GaussianConditional>(p));
} else if (auto ptr = boost::static_pointer_cast<HybridConditional>(fp)) {
gfg.push_back(
boost::static_pointer_cast<GaussianConditional>(ptr->inner()));
} else {
// It is an orphan wrapped conditional
}
@ -147,18 +153,20 @@ std::pair<HybridConditional::shared_ptr, HybridFactor::shared_ptr>
discreteElimination(const HybridGaussianFactorGraph &factors,
const Ordering &frontalKeys) {
DiscreteFactorGraph dfg;
for (auto &fp : factors) {
if (auto ptr = boost::dynamic_pointer_cast<HybridDiscreteFactor>(fp)) {
dfg.push_back(ptr->inner());
} else if (auto p =
boost::static_pointer_cast<HybridConditional>(fp)->inner()) {
dfg.push_back(boost::static_pointer_cast<DiscreteConditional>(p));
for (auto &factor : factors) {
if (auto p = boost::dynamic_pointer_cast<HybridDiscreteFactor>(factor)) {
dfg.push_back(p->inner());
} else if (auto p = boost::static_pointer_cast<HybridConditional>(factor)) {
auto discrete_conditional =
boost::static_pointer_cast<DiscreteConditional>(p->inner());
dfg.push_back(discrete_conditional);
} else {
// It is an orphan wrapper
}
}
auto result = EliminateDiscrete(dfg, frontalKeys);
auto result = EliminateForMPE(dfg, frontalKeys);
return {boost::make_shared<HybridConditional>(result.first),
boost::make_shared<HybridDiscreteFactor>(result.second)};
@ -178,6 +186,19 @@ hybridElimination(const HybridGaussianFactorGraph &factors,
// sum out frontals, this is the factor on the separator
GaussianMixtureFactor::Sum sum = sumFrontals(factors);
// If a tree leaf contains nullptr,
// convert that leaf to an empty GaussianFactorGraph.
// Needed since the DecisionTree will otherwise create
// a GFG with a single (null) factor.
auto emptyGaussian = [](const GaussianFactorGraph &gfg) {
bool hasNull =
std::any_of(gfg.begin(), gfg.end(),
[](const GaussianFactor::shared_ptr &ptr) { return !ptr; });
return hasNull ? GaussianFactorGraph() : gfg;
};
sum = GaussianMixtureFactor::Sum(sum, emptyGaussian);
using EliminationPair = GaussianFactorGraph::EliminationResult;
KeyVector keysOfEliminated; // Not the ordering
@ -189,7 +210,10 @@ hybridElimination(const HybridGaussianFactorGraph &factors,
if (graph.empty()) {
return {nullptr, nullptr};
}
auto result = EliminatePreferCholesky(graph, frontalKeys);
std::pair<boost::shared_ptr<GaussianConditional>,
boost::shared_ptr<GaussianFactor>>
result = EliminatePreferCholesky(graph, frontalKeys);
if (keysOfEliminated.empty()) {
keysOfEliminated =
result.first->keys(); // Initialize the keysOfEliminated to be the
@ -222,6 +246,7 @@ hybridElimination(const HybridGaussianFactorGraph &factors,
return exp(-factor->error(empty_values));
};
DecisionTree<Key, double> fdt(separatorFactors, factorError);
auto discreteFactor =
boost::make_shared<DecisionTreeFactor>(discreteSeparator, fdt);
@ -229,14 +254,27 @@ hybridElimination(const HybridGaussianFactorGraph &factors,
boost::make_shared<HybridDiscreteFactor>(discreteFactor)};
} else {
// Create a resulting DCGaussianMixture on the separator.
// Create a resulting GaussianMixtureFactor on the separator.
auto factor = boost::make_shared<GaussianMixtureFactor>(
KeyVector(continuousSeparator.begin(), continuousSeparator.end()),
discreteSeparator, separatorFactors);
return {boost::make_shared<HybridConditional>(conditional), factor};
}
}
/* ************************************************************************ */
/* ************************************************************************
* Function to eliminate variables **under the following assumptions**:
* 1. When the ordering is fully continuous, and the graph only contains
* continuous and hybrid factors
* 2. When the ordering is fully discrete, and the graph only contains discrete
* factors
*
* Any usage outside of this is considered incorrect.
*
* \warning This function is not meant to be used with arbitrary hybrid factor
* graphs. For example, if there exists continuous parents, and one tries to
* eliminate a discrete variable (as specified in the ordering), the result will
* be INCORRECT and there will be NO error raised.
*/
std::pair<HybridConditional::shared_ptr, HybridFactor::shared_ptr> //
EliminateHybrid(const HybridGaussianFactorGraph &factors,
const Ordering &frontalKeys) {
@ -366,4 +404,41 @@ void HybridGaussianFactorGraph::add(DecisionTreeFactor::shared_ptr factor) {
FactorGraph::add(boost::make_shared<HybridDiscreteFactor>(factor));
}
/* ************************************************************************ */
const KeySet HybridGaussianFactorGraph::getDiscreteKeys() const {
KeySet discrete_keys;
for (auto &factor : factors_) {
for (const DiscreteKey &k : factor->discreteKeys()) {
discrete_keys.insert(k.first);
}
}
return discrete_keys;
}
/* ************************************************************************ */
const KeySet HybridGaussianFactorGraph::getContinuousKeys() const {
KeySet keys;
for (auto &factor : factors_) {
for (const Key &key : factor->continuousKeys()) {
keys.insert(key);
}
}
return keys;
}
/* ************************************************************************ */
const Ordering HybridGaussianFactorGraph::getHybridOrdering() const {
KeySet discrete_keys = getDiscreteKeys();
for (auto &factor : factors_) {
for (const DiscreteKey &k : factor->discreteKeys()) {
discrete_keys.insert(k.first);
}
}
const VariableIndex index(factors_);
Ordering ordering = Ordering::ColamdConstrainedLast(
index, KeyVector(discrete_keys.begin(), discrete_keys.end()), true);
return ordering;
}
} // namespace gtsam

View File

@ -19,9 +19,12 @@
#pragma once
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/hybrid/HybridFactorGraph.h>
#include <gtsam/hybrid/HybridGaussianFactor.h>
#include <gtsam/inference/EliminateableFactorGraph.h>
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/inference/Ordering.h>
#include <gtsam/linear/GaussianFactor.h>
namespace gtsam {
@ -53,10 +56,9 @@ struct EliminationTraits<HybridGaussianFactorGraph> {
typedef HybridBayesNet
BayesNetType; ///< Type of Bayes net from sequential elimination
typedef HybridEliminationTree
EliminationTreeType; ///< Type of elimination tree
typedef HybridBayesTree BayesTreeType; ///< Type of Bayes tree
typedef HybridJunctionTree
JunctionTreeType; ///< Type of Junction tree
EliminationTreeType; ///< Type of elimination tree
typedef HybridBayesTree BayesTreeType; ///< Type of Bayes tree
typedef HybridJunctionTree JunctionTreeType; ///< Type of Junction tree
/// The default dense elimination function
static std::pair<boost::shared_ptr<ConditionalType>,
boost::shared_ptr<FactorType> >
@ -72,10 +74,16 @@ struct EliminationTraits<HybridGaussianFactorGraph> {
* Everything inside needs to be hybrid factor or hybrid conditional.
*/
class GTSAM_EXPORT HybridGaussianFactorGraph
: public FactorGraph<HybridFactor>,
: public HybridFactorGraph,
public EliminateableFactorGraph<HybridGaussianFactorGraph> {
protected:
/// Check if FACTOR type is derived from GaussianFactor.
template <typename FACTOR>
using IsGaussian = typename std::enable_if<
std::is_base_of<GaussianFactor, FACTOR>::value>::type;
public:
using Base = FactorGraph<HybridFactor>;
using Base = HybridFactorGraph;
using This = HybridGaussianFactorGraph; ///< this class
using BaseEliminateable =
EliminateableFactorGraph<This>; ///< for elimination
@ -100,7 +108,13 @@ class GTSAM_EXPORT HybridGaussianFactorGraph
/// @}
using FactorGraph::add;
using Base::empty;
using Base::reserve;
using Base::size;
using Base::operator[];
using Base::add;
using Base::push_back;
using Base::resize;
/// Add a Jacobian factor to the factor graph.
void add(JacobianFactor&& factor);
@ -113,6 +127,53 @@ class GTSAM_EXPORT HybridGaussianFactorGraph
/// Add a DecisionTreeFactor as a shared ptr.
void add(boost::shared_ptr<DecisionTreeFactor> factor);
/**
* Add a gaussian factor *pointer* to the internal gaussian factor graph
* @param gaussianFactor - boost::shared_ptr to the factor to add
*/
template <typename FACTOR>
IsGaussian<FACTOR> push_gaussian(
const boost::shared_ptr<FACTOR>& gaussianFactor) {
Base::push_back(boost::make_shared<HybridGaussianFactor>(gaussianFactor));
}
/// Construct a factor and add (shared pointer to it) to factor graph.
template <class FACTOR, class... Args>
IsGaussian<FACTOR> emplace_gaussian(Args&&... args) {
auto factor = boost::allocate_shared<FACTOR>(
Eigen::aligned_allocator<FACTOR>(), std::forward<Args>(args)...);
push_gaussian(factor);
}
/**
* @brief Add a single factor shared pointer to the hybrid factor graph.
* Dynamically handles the factor type and assigns it to the correct
* underlying container.
*
* @param sharedFactor The factor to add to this factor graph.
*/
void push_back(const SharedFactor& sharedFactor) {
if (auto p = boost::dynamic_pointer_cast<GaussianFactor>(sharedFactor)) {
push_gaussian(p);
} else {
Base::push_back(sharedFactor);
}
}
/// Get all the discrete keys in the factor graph.
const KeySet getDiscreteKeys() const;
/// Get all the continuous keys in the factor graph.
const KeySet getContinuousKeys() const;
/**
* @brief Return a Colamd constrained ordering where the discrete keys are
* eliminated after the continuous keys.
*
* @return const Ordering
*/
const Ordering getHybridOrdering() const;
};
} // namespace gtsam

View File

@ -41,6 +41,7 @@ HybridGaussianISAM::HybridGaussianISAM(const HybridBayesTree& bayesTree)
void HybridGaussianISAM::updateInternal(
const HybridGaussianFactorGraph& newFactors,
HybridBayesTree::Cliques* orphans,
const boost::optional<Ordering>& ordering,
const HybridBayesTree::Eliminate& function) {
// Remove the contaminated part of the Bayes tree
BayesNetType bn;
@ -74,16 +75,21 @@ void HybridGaussianISAM::updateInternal(
std::copy(allDiscrete.begin(), allDiscrete.end(),
std::back_inserter(newKeysDiscreteLast));
// KeyVector new
// Get an ordering where the new keys are eliminated last
const VariableIndex index(factors);
const Ordering ordering = Ordering::ColamdConstrainedLast(
index, KeyVector(newKeysDiscreteLast.begin(), newKeysDiscreteLast.end()),
true);
Ordering elimination_ordering;
if (ordering) {
elimination_ordering = *ordering;
} else {
elimination_ordering = Ordering::ColamdConstrainedLast(
index,
KeyVector(newKeysDiscreteLast.begin(), newKeysDiscreteLast.end()),
true);
}
// eliminate all factors (top, added, orphans) into a new Bayes tree
auto bayesTree = factors.eliminateMultifrontal(ordering, function, index);
HybridBayesTree::shared_ptr bayesTree =
factors.eliminateMultifrontal(elimination_ordering, function, index);
// Re-add into Bayes tree data structures
this->roots_.insert(this->roots_.end(), bayesTree->roots().begin(),
@ -93,9 +99,61 @@ void HybridGaussianISAM::updateInternal(
/* ************************************************************************* */
void HybridGaussianISAM::update(const HybridGaussianFactorGraph& newFactors,
const boost::optional<Ordering>& ordering,
const HybridBayesTree::Eliminate& function) {
Cliques orphans;
this->updateInternal(newFactors, &orphans, function);
this->updateInternal(newFactors, &orphans, ordering, function);
}
/* ************************************************************************* */
/**
* @brief Check if `b` is a subset of `a`.
* Non-const since they need to be sorted.
*
* @param a KeyVector
* @param b KeyVector
* @return True if the keys of b is a subset of a, else false.
*/
bool IsSubset(KeyVector a, KeyVector b) {
std::sort(a.begin(), a.end());
std::sort(b.begin(), b.end());
return std::includes(a.begin(), a.end(), b.begin(), b.end());
}
/* ************************************************************************* */
void HybridGaussianISAM::prune(const Key& root, const size_t maxNrLeaves) {
auto decisionTree = boost::dynamic_pointer_cast<DecisionTreeFactor>(
this->clique(root)->conditional()->inner());
DecisionTreeFactor prunedDiscreteFactor = decisionTree->prune(maxNrLeaves);
decisionTree->root_ = prunedDiscreteFactor.root_;
std::vector<gtsam::Key> prunedKeys;
for (auto&& clique : nodes()) {
// The cliques can be repeated for each frontal so we record it in
// prunedKeys and check if we have already pruned a particular clique.
if (std::find(prunedKeys.begin(), prunedKeys.end(), clique.first) !=
prunedKeys.end()) {
continue;
}
// Add all the keys of the current clique to be pruned to prunedKeys
for (auto&& key : clique.second->conditional()->frontals()) {
prunedKeys.push_back(key);
}
// Convert parents() to a KeyVector for comparison
KeyVector parents;
for (auto&& parent : clique.second->conditional()->parents()) {
parents.push_back(parent);
}
if (IsSubset(parents, decisionTree->keys())) {
auto gaussianMixture = boost::dynamic_pointer_cast<GaussianMixture>(
clique.second->conditional()->inner());
gaussianMixture->prune(prunedDiscreteFactor);
}
}
}
} // namespace gtsam

View File

@ -48,6 +48,7 @@ class GTSAM_EXPORT HybridGaussianISAM : public ISAM<HybridBayesTree> {
void updateInternal(
const HybridGaussianFactorGraph& newFactors,
HybridBayesTree::Cliques* orphans,
const boost::optional<Ordering>& ordering = boost::none,
const HybridBayesTree::Eliminate& function =
HybridBayesTree::EliminationTraitsType::DefaultEliminate);
@ -59,8 +60,17 @@ class GTSAM_EXPORT HybridGaussianISAM : public ISAM<HybridBayesTree> {
* @param function Elimination function.
*/
void update(const HybridGaussianFactorGraph& newFactors,
const boost::optional<Ordering>& ordering = boost::none,
const HybridBayesTree::Eliminate& function =
HybridBayesTree::EliminationTraitsType::DefaultEliminate);
/**
* @brief
*
* @param root The root key in the discrete conditional decision tree.
* @param maxNumberLeaves
*/
void prune(const Key& root, const size_t maxNumberLeaves);
};
/// traits

View File

@ -31,9 +31,7 @@ template class EliminatableClusterTree<HybridBayesTree,
template class JunctionTree<HybridBayesTree, HybridGaussianFactorGraph>;
struct HybridConstructorTraversalData {
typedef
typename JunctionTree<HybridBayesTree, HybridGaussianFactorGraph>::Node
Node;
typedef HybridJunctionTree::Node Node;
typedef
typename JunctionTree<HybridBayesTree,
HybridGaussianFactorGraph>::sharedNode sharedNode;
@ -62,6 +60,7 @@ struct HybridConstructorTraversalData {
data.junctionTreeNode = boost::make_shared<Node>(node->key, node->factors);
parentData.junctionTreeNode->addChild(data.junctionTreeNode);
// Add all the discrete keys in the hybrid factors to the current data
for (HybridFactor::shared_ptr& f : node->factors) {
for (auto& k : f->discreteKeys()) {
data.discreteKeys.insert(k.first);
@ -72,8 +71,8 @@ struct HybridConstructorTraversalData {
}
// Post-order visitor function
static void ConstructorTraversalVisitorPostAlg2(
const boost::shared_ptr<HybridEliminationTree::Node>& ETreeNode,
static void ConstructorTraversalVisitorPost(
const boost::shared_ptr<HybridEliminationTree::Node>& node,
const HybridConstructorTraversalData& data) {
// In this post-order visitor, we combine the symbolic elimination results
// from the elimination tree children and symbolically eliminate the current
@ -86,15 +85,15 @@ struct HybridConstructorTraversalData {
// Do symbolic elimination for this node
SymbolicFactors symbolicFactors;
symbolicFactors.reserve(ETreeNode->factors.size() +
symbolicFactors.reserve(node->factors.size() +
data.childSymbolicFactors.size());
// Add ETree node factors
symbolicFactors += ETreeNode->factors;
symbolicFactors += node->factors;
// Add symbolic factors passed up from children
symbolicFactors += data.childSymbolicFactors;
Ordering keyAsOrdering;
keyAsOrdering.push_back(ETreeNode->key);
keyAsOrdering.push_back(node->key);
SymbolicConditional::shared_ptr conditional;
SymbolicFactor::shared_ptr separatorFactor;
boost::tie(conditional, separatorFactor) =
@ -105,19 +104,19 @@ struct HybridConstructorTraversalData {
data.parentData->childSymbolicFactors.push_back(separatorFactor);
data.parentData->discreteKeys.merge(data.discreteKeys);
sharedNode node = data.junctionTreeNode;
sharedNode jt_node = data.junctionTreeNode;
const FastVector<SymbolicConditional::shared_ptr>& childConditionals =
data.childSymbolicConditionals;
node->problemSize_ = (int)(conditional->size() * symbolicFactors.size());
jt_node->problemSize_ = (int)(conditional->size() * symbolicFactors.size());
// Merge our children if they are in our clique - if our conditional has
// exactly one fewer parent than our child's conditional.
const size_t nrParents = conditional->nrParents();
const size_t nrChildren = node->nrChildren();
const size_t nrChildren = jt_node->nrChildren();
assert(childConditionals.size() == nrChildren);
// decide which children to merge, as index into children
std::vector<size_t> nrChildrenFrontals = node->nrFrontalsOfChildren();
std::vector<size_t> nrChildrenFrontals = jt_node->nrFrontalsOfChildren();
std::vector<bool> merge(nrChildren, false);
size_t nrFrontals = 1;
for (size_t i = 0; i < nrChildren; i++) {
@ -137,7 +136,7 @@ struct HybridConstructorTraversalData {
}
// now really merge
node->mergeChildren(merge);
jt_node->mergeChildren(merge);
}
};
@ -161,7 +160,7 @@ HybridJunctionTree::HybridJunctionTree(
// the junction tree roots
treeTraversal::DepthFirstForest(eliminationTree, rootData,
Data::ConstructorTraversalVisitorPre,
Data::ConstructorTraversalVisitorPostAlg2);
Data::ConstructorTraversalVisitorPost);
// Assign roots from the dummy node
this->addChildrenAsRoots(rootData.junctionTreeNode);

View File

@ -0,0 +1,41 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HybridNonlinearFactor.cpp
* @date May 28, 2022
* @author Varun Agrawal
*/
#include <gtsam/hybrid/HybridNonlinearFactor.h>
#include <boost/make_shared.hpp>
namespace gtsam {
/* ************************************************************************* */
HybridNonlinearFactor::HybridNonlinearFactor(
const NonlinearFactor::shared_ptr &other)
: Base(other->keys()), inner_(other) {}
/* ************************************************************************* */
bool HybridNonlinearFactor::equals(const HybridFactor &lf, double tol) const {
return Base::equals(lf, tol);
}
/* ************************************************************************* */
void HybridNonlinearFactor::print(const std::string &s,
const KeyFormatter &formatter) const {
HybridFactor::print(s, formatter);
inner_->print("\n", formatter);
};
} // namespace gtsam

View File

@ -0,0 +1,62 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HybridNonlinearFactor.h
* @date May 28, 2022
* @author Varun Agrawal
*/
#pragma once
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/hybrid/HybridGaussianFactor.h>
#include <gtsam/nonlinear/NonlinearFactor.h>
namespace gtsam {
/**
* A HybridNonlinearFactor is a layer over NonlinearFactor so that we do not
* have a diamond inheritance.
*/
class HybridNonlinearFactor : public HybridFactor {
private:
NonlinearFactor::shared_ptr inner_;
public:
using Base = HybridFactor;
using This = HybridNonlinearFactor;
using shared_ptr = boost::shared_ptr<This>;
// Explicit conversion from a shared ptr of NonlinearFactor
explicit HybridNonlinearFactor(const NonlinearFactor::shared_ptr &other);
/// @name Testable
/// @{
/// Check equality.
virtual bool equals(const HybridFactor &lf, double tol) const override;
/// GTSAM print utility.
void print(
const std::string &s = "HybridNonlinearFactor\n",
const KeyFormatter &formatter = DefaultKeyFormatter) const override;
/// @}
NonlinearFactor::shared_ptr inner() const { return inner_; }
/// Linearize to a HybridGaussianFactor at the linearization point `c`.
boost::shared_ptr<HybridGaussianFactor> linearize(const Values &c) const {
return boost::make_shared<HybridGaussianFactor>(inner_->linearize(c));
}
};
} // namespace gtsam

View File

@ -0,0 +1,100 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HybridNonlinearFactorGraph.cpp
* @brief Nonlinear hybrid factor graph that uses type erasure
* @author Varun Agrawal
* @date May 28, 2022
*/
#include <gtsam/hybrid/HybridNonlinearFactorGraph.h>
namespace gtsam {
/* ************************************************************************* */
void HybridNonlinearFactorGraph::add(
boost::shared_ptr<NonlinearFactor> factor) {
FactorGraph::add(boost::make_shared<HybridNonlinearFactor>(factor));
}
/* ************************************************************************* */
void HybridNonlinearFactorGraph::add(
boost::shared_ptr<DiscreteFactor> factor) {
FactorGraph::add(boost::make_shared<HybridDiscreteFactor>(factor));
}
/* ************************************************************************* */
void HybridNonlinearFactorGraph::print(const std::string& s,
const KeyFormatter& keyFormatter) const {
// Base::print(str, keyFormatter);
std::cout << (s.empty() ? "" : s + " ") << std::endl;
std::cout << "size: " << size() << std::endl;
for (size_t i = 0; i < factors_.size(); i++) {
std::stringstream ss;
ss << "factor " << i << ": ";
if (factors_[i]) {
factors_[i]->print(ss.str(), keyFormatter);
std::cout << std::endl;
}
}
}
/* ************************************************************************* */
HybridGaussianFactorGraph HybridNonlinearFactorGraph::linearize(
const Values& continuousValues) const {
// create an empty linear FG
HybridGaussianFactorGraph linearFG;
linearFG.reserve(size());
// linearize all hybrid factors
for (auto&& factor : factors_) {
// First check if it is a valid factor
if (factor) {
// Check if the factor is a hybrid factor.
// It can be either a nonlinear MixtureFactor or a linear
// GaussianMixtureFactor.
if (factor->isHybrid()) {
// Check if it is a nonlinear mixture factor
if (auto nlmf = boost::dynamic_pointer_cast<MixtureFactor>(factor)) {
linearFG.push_back(nlmf->linearize(continuousValues));
} else {
linearFG.push_back(factor);
}
// Now check if the factor is a continuous only factor.
} else if (factor->isContinuous()) {
// In this case, we check if factor->inner() is nonlinear since
// HybridFactors wrap over continuous factors.
auto nlhf = boost::dynamic_pointer_cast<HybridNonlinearFactor>(factor);
if (auto nlf =
boost::dynamic_pointer_cast<NonlinearFactor>(nlhf->inner())) {
auto hgf = boost::make_shared<HybridGaussianFactor>(
nlf->linearize(continuousValues));
linearFG.push_back(hgf);
} else {
linearFG.push_back(factor);
}
// Finally if nothing else, we are discrete-only which doesn't need
// lineariztion.
} else {
linearFG.push_back(factor);
}
} else {
linearFG.push_back(GaussianFactor::shared_ptr());
}
}
return linearFG;
}
} // namespace gtsam

View File

@ -0,0 +1,137 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HybridNonlinearFactorGraph.h
* @brief Nonlinear hybrid factor graph that uses type erasure
* @author Varun Agrawal
* @date May 28, 2022
*/
#pragma once
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/hybrid/HybridFactorGraph.h>
#include <gtsam/hybrid/HybridGaussianFactorGraph.h>
#include <gtsam/hybrid/HybridNonlinearFactor.h>
#include <gtsam/hybrid/MixtureFactor.h>
#include <gtsam/inference/Ordering.h>
#include <gtsam/nonlinear/NonlinearFactor.h>
#include <boost/format.hpp>
namespace gtsam {
/**
* Nonlinear Hybrid Factor Graph
* -----------------------
* This is the non-linear version of a hybrid factor graph.
* Everything inside needs to be hybrid factor or hybrid conditional.
*/
class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph {
protected:
/// Check if FACTOR type is derived from NonlinearFactor.
template <typename FACTOR>
using IsNonlinear = typename std::enable_if<
std::is_base_of<NonlinearFactor, FACTOR>::value>::type;
public:
using Base = HybridFactorGraph;
using This = HybridNonlinearFactorGraph; ///< this class
using shared_ptr = boost::shared_ptr<This>; ///< shared_ptr to This
using Values = gtsam::Values; ///< backwards compatibility
using Indices = KeyVector; ///> map from keys to values
/// @name Constructors
/// @{
HybridNonlinearFactorGraph() = default;
/**
* Implicit copy/downcast constructor to override explicit template container
* constructor. In BayesTree this is used for:
* `cachedSeparatorMarginal_.reset(*separatorMarginal)`
* */
template <class DERIVEDFACTOR>
HybridNonlinearFactorGraph(const FactorGraph<DERIVEDFACTOR>& graph)
: Base(graph) {}
/// @}
// Allow use of selected FactorGraph methods:
using Base::empty;
using Base::reserve;
using Base::size;
using Base::operator[];
using Base::add;
using Base::resize;
/**
* Add a nonlinear factor *pointer* to the internal nonlinear factor graph
* @param nonlinearFactor - boost::shared_ptr to the factor to add
*/
template <typename FACTOR>
IsNonlinear<FACTOR> push_nonlinear(
const boost::shared_ptr<FACTOR>& nonlinearFactor) {
Base::push_back(boost::make_shared<HybridNonlinearFactor>(nonlinearFactor));
}
/// Construct a factor and add (shared pointer to it) to factor graph.
template <class FACTOR, class... Args>
IsNonlinear<FACTOR> emplace_nonlinear(Args&&... args) {
auto factor = boost::allocate_shared<FACTOR>(
Eigen::aligned_allocator<FACTOR>(), std::forward<Args>(args)...);
push_nonlinear(factor);
}
/**
* @brief Add a single factor shared pointer to the hybrid factor graph.
* Dynamically handles the factor type and assigns it to the correct
* underlying container.
*
* @tparam FACTOR The factor type template
* @param sharedFactor The factor to add to this factor graph.
*/
template <typename FACTOR>
void push_back(const boost::shared_ptr<FACTOR>& sharedFactor) {
if (auto p = boost::dynamic_pointer_cast<NonlinearFactor>(sharedFactor)) {
push_nonlinear(p);
} else {
Base::push_back(sharedFactor);
}
}
/// Add a nonlinear factor as a shared ptr.
void add(boost::shared_ptr<NonlinearFactor> factor);
/// Add a discrete factor as a shared ptr.
void add(boost::shared_ptr<DiscreteFactor> factor);
/// Print the factor graph.
void print(
const std::string& s = "HybridNonlinearFactorGraph",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override;
/**
* @brief Linearize all the continuous factors in the
* HybridNonlinearFactorGraph.
*
* @param continuousValues: Dictionary of continuous values.
* @return HybridGaussianFactorGraph::shared_ptr
*/
HybridGaussianFactorGraph linearize(const Values& continuousValues) const;
};
template <>
struct traits<HybridNonlinearFactorGraph>
: public Testable<HybridNonlinearFactorGraph> {};
} // namespace gtsam

145
gtsam/hybrid/HybridValues.h Normal file
View File

@ -0,0 +1,145 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HybridValues.h
* @date Jul 28, 2022
* @author Shangjie Xue
*/
#pragma once
#include <gtsam/discrete/Assignment.h>
#include <gtsam/discrete/DiscreteKey.h>
#include <gtsam/discrete/DiscreteValues.h>
#include <gtsam/inference/Key.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/nonlinear/Values.h>
#include <map>
#include <string>
#include <vector>
namespace gtsam {
/**
* HybridValues represents a collection of DiscreteValues and VectorValues.
* It is typically used to store the variables of a HybridGaussianFactorGraph.
* Optimizing a HybridGaussianBayesNet returns this class.
*/
class GTSAM_EXPORT HybridValues {
private:
// DiscreteValue stored the discrete components of the HybridValues.
DiscreteValues discrete_;
// VectorValue stored the continuous components of the HybridValues.
VectorValues continuous_;
public:
/// @name Standard Constructors
/// @{
/// Default constructor creates an empty HybridValues.
HybridValues() = default;
/// Construct from DiscreteValues and VectorValues.
HybridValues(const DiscreteValues& dv, const VectorValues& cv)
: discrete_(dv), continuous_(cv){};
/// @}
/// @name Testable
/// @{
/// print required by Testable for unit testing
void print(const std::string& s = "HybridValues",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const {
std::cout << s << ": \n";
discrete_.print(" Discrete", keyFormatter); // print discrete components
continuous_.print(" Continuous",
keyFormatter); // print continuous components
};
/// equals required by Testable for unit testing
bool equals(const HybridValues& other, double tol = 1e-9) const {
return discrete_.equals(other.discrete_, tol) &&
continuous_.equals(other.continuous_, tol);
}
/// @}
/// @name Interface
/// @{
/// Return the discrete MPE assignment
DiscreteValues discrete() const { return discrete_; }
/// Return the delta update for the continuous vectors
VectorValues continuous() const { return continuous_; }
/// Check whether a variable with key \c j exists in DiscreteValue.
bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); };
/// Check whether a variable with key \c j exists in VectorValue.
bool existsVector(Key j) { return continuous_.exists(j); };
/// Check whether a variable with key \c j exists.
bool exists(Key j) { return existsDiscrete(j) || existsVector(j); };
/** Insert a discrete \c value with key \c j. Replaces the existing value if
* the key \c j is already used.
* @param value The vector to be inserted.
* @param j The index with which the value will be associated. */
void insert(Key j, int value) { discrete_[j] = value; };
/** Insert a vector \c value with key \c j. Throws an invalid_argument
* exception if the key \c j is already used.
* @param value The vector to be inserted.
* @param j The index with which the value will be associated. */
void insert(Key j, const Vector& value) { continuous_.insert(j, value); }
// TODO(Shangjie)- update() and insert_or_assign() , similar to Values.h
/**
* Read/write access to the discrete value with key \c j, throws
* std::out_of_range if \c j does not exist.
*/
size_t& atDiscrete(Key j) { return discrete_.at(j); };
/**
* Read/write access to the vector value with key \c j, throws
* std::out_of_range if \c j does not exist.
*/
Vector& at(Key j) { return continuous_.at(j); };
/// @name Wrapper support
/// @{
/**
* @brief Output as a html table.
*
* @param keyFormatter function that formats keys.
* @return string html output.
*/
std::string html(
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const {
std::stringstream ss;
ss << this->discrete_.html(keyFormatter);
ss << this->continuous_.html(keyFormatter);
return ss.str();
};
/// @}
};
// traits
template <>
struct traits<HybridValues> : public Testable<HybridValues> {};
} // namespace gtsam

View File

@ -0,0 +1,256 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file MixtureFactor.h
* @brief Nonlinear Mixture factor of continuous and discrete.
* @author Kevin Doherty, kdoherty@mit.edu
* @author Varun Agrawal
* @date December 2021
*/
#pragma once
#include <gtsam/discrete/DiscreteValues.h>
#include <gtsam/hybrid/GaussianMixtureFactor.h>
#include <gtsam/hybrid/HybridNonlinearFactor.h>
#include <gtsam/nonlinear/NonlinearFactor.h>
#include <gtsam/nonlinear/Symbol.h>
#include <algorithm>
#include <boost/format.hpp>
#include <cmath>
#include <limits>
#include <vector>
namespace gtsam {
/**
* @brief Implementation of a discrete conditional mixture factor.
*
* Implements a joint discrete-continuous factor where the discrete variable
* serves to "select" a mixture component corresponding to a NonlinearFactor
* type of measurement.
*
* This class stores all factors as HybridFactors which can then be typecast to
* one of (NonlinearFactor, GaussianFactor) which can then be checked to perform
* the correct operation.
*/
class MixtureFactor : public HybridFactor {
public:
using Base = HybridFactor;
using This = MixtureFactor;
using shared_ptr = boost::shared_ptr<MixtureFactor>;
using sharedFactor = boost::shared_ptr<NonlinearFactor>;
/**
* @brief typedef for DecisionTree which has Keys as node labels and
* NonlinearFactor as leaf nodes.
*/
using Factors = DecisionTree<Key, sharedFactor>;
private:
/// Decision tree of Gaussian factors indexed by discrete keys.
Factors factors_;
bool normalized_;
public:
MixtureFactor() = default;
/**
* @brief Construct from Decision tree.
*
* @param keys Vector of keys for continuous factors.
* @param discreteKeys Vector of discrete keys.
* @param factors Decision tree with of shared factors.
* @param normalized Flag indicating if the factor error is already
* normalized.
*/
MixtureFactor(const KeyVector& keys, const DiscreteKeys& discreteKeys,
const Factors& factors, bool normalized = false)
: Base(keys, discreteKeys), factors_(factors), normalized_(normalized) {}
/**
* @brief Convenience constructor that generates the underlying factor
* decision tree for us.
*
* Here it is important that the vector of factors has the correct number of
* elements based on the number of discrete keys and the cardinality of the
* keys, so that the decision tree is constructed appropriately.
*
* @tparam FACTOR The type of the factor shared pointers being passed in. Will
* be typecast to NonlinearFactor shared pointers.
* @param keys Vector of keys for continuous factors.
* @param discreteKeys Vector of discrete keys.
* @param factors Vector of shared pointers to factors.
* @param normalized Flag indicating if the factor error is already
* normalized.
*/
template <typename FACTOR>
MixtureFactor(const KeyVector& keys, const DiscreteKeys& discreteKeys,
const std::vector<boost::shared_ptr<FACTOR>>& factors,
bool normalized = false)
: Base(keys, discreteKeys), normalized_(normalized) {
std::vector<NonlinearFactor::shared_ptr> nonlinear_factors;
KeySet continuous_keys_set(keys.begin(), keys.end());
KeySet factor_keys_set;
for (auto&& f : factors) {
// Insert all factor continuous keys in the continuous keys set.
std::copy(f->keys().begin(), f->keys().end(),
std::inserter(factor_keys_set, factor_keys_set.end()));
nonlinear_factors.push_back(
boost::dynamic_pointer_cast<NonlinearFactor>(f));
}
factors_ = Factors(discreteKeys, nonlinear_factors);
if (continuous_keys_set != factor_keys_set) {
throw std::runtime_error(
"The specified continuous keys and the keys in the factors don't "
"match!");
}
}
~MixtureFactor() = default;
/**
* @brief Compute error of factor given both continuous and discrete values.
*
* @param continuousVals The continuous Values.
* @param discreteVals The discrete Values.
* @return double The error of this factor.
*/
double error(const Values& continuousVals,
const DiscreteValues& discreteVals) const {
// Retrieve the factor corresponding to the assignment in discreteVals.
auto factor = factors_(discreteVals);
// Compute the error for the selected factor
const double factorError = factor->error(continuousVals);
if (normalized_) return factorError;
return factorError +
this->nonlinearFactorLogNormalizingConstant(factor, continuousVals);
}
size_t dim() const {
// TODO(Varun)
throw std::runtime_error("MixtureFactor::dim not implemented.");
}
/// Testable
/// @{
/// print to stdout
void print(
const std::string& s = "MixtureFactor",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
std::cout << (s.empty() ? "" : s + " ");
Base::print("", keyFormatter);
std::cout << "\nMixtureFactor\n";
auto valueFormatter = [](const sharedFactor& v) {
if (v) {
return (boost::format("Nonlinear factor on %d keys") % v->size()).str();
} else {
return std::string("nullptr");
}
};
factors_.print("", keyFormatter, valueFormatter);
}
/// Check equality
bool equals(const HybridFactor& other, double tol = 1e-9) const override {
// We attempt a dynamic cast from HybridFactor to MixtureFactor. If it
// fails, return false.
if (!dynamic_cast<const MixtureFactor*>(&other)) return false;
// If the cast is successful, we'll properly construct a MixtureFactor
// object from `other`
const MixtureFactor& f(static_cast<const MixtureFactor&>(other));
// Ensure that this MixtureFactor and `f` have the same `factors_`.
auto compare = [tol](const sharedFactor& a, const sharedFactor& b) {
return traits<NonlinearFactor>::Equals(*a, *b, tol);
};
if (!factors_.equals(f.factors_, compare)) return false;
// If everything above passes, and the keys_, discreteKeys_ and normalized_
// member variables are identical, return true.
return (std::equal(keys_.begin(), keys_.end(), f.keys().begin()) &&
(discreteKeys_ == f.discreteKeys_) &&
(normalized_ == f.normalized_));
}
/// @}
/// Linearize specific nonlinear factors based on the assignment in
/// discreteValues.
GaussianFactor::shared_ptr linearize(
const Values& continuousVals, const DiscreteValues& discreteVals) const {
auto factor = factors_(discreteVals);
return factor->linearize(continuousVals);
}
/// Linearize all the continuous factors to get a GaussianMixtureFactor.
boost::shared_ptr<GaussianMixtureFactor> linearize(
const Values& continuousVals) const {
// functional to linearize each factor in the decision tree
auto linearizeDT = [continuousVals](const sharedFactor& factor) {
return factor->linearize(continuousVals);
};
DecisionTree<Key, GaussianFactor::shared_ptr> linearized_factors(
factors_, linearizeDT);
return boost::make_shared<GaussianMixtureFactor>(
continuousKeys_, discreteKeys_, linearized_factors);
}
/**
* If the component factors are not already normalized, we want to compute
* their normalizing constants so that the resulting joint distribution is
* appropriately computed. Remember, this is the _negative_ normalizing
* constant for the measurement likelihood (since we are minimizing the
* _negative_ log-likelihood).
*/
double nonlinearFactorLogNormalizingConstant(const sharedFactor& factor,
const Values& values) const {
// Information matrix (inverse covariance matrix) for the factor.
Matrix infoMat;
// If this is a NoiseModelFactor, we'll use its noiseModel to
// otherwise noiseModelFactor will be nullptr
if (auto noiseModelFactor =
boost::dynamic_pointer_cast<NoiseModelFactor>(factor)) {
// If dynamic cast to NoiseModelFactor succeeded, see if the noise model
// is Gaussian
auto noiseModel = noiseModelFactor->noiseModel();
auto gaussianNoiseModel =
boost::dynamic_pointer_cast<noiseModel::Gaussian>(noiseModel);
if (gaussianNoiseModel) {
// If the noise model is Gaussian, retrieve the information matrix
infoMat = gaussianNoiseModel->information();
} else {
// If the factor is not a Gaussian factor, we'll linearize it to get
// something with a normalized noise model
// TODO(kevin): does this make sense to do? I think maybe not in
// general? Should we just yell at the user?
auto gaussianFactor = factor->linearize(values);
infoMat = gaussianFactor->information();
}
}
// Compute the (negative) log of the normalizing constant
return -(factor->dim() * log(2.0 * M_PI) / 2.0) -
(log(infoMat.determinant()) / 2.0);
}
};
} // namespace gtsam

View File

@ -4,6 +4,22 @@
namespace gtsam {
#include <gtsam/hybrid/HybridValues.h>
class HybridValues {
gtsam::DiscreteValues discrete() const;
gtsam::VectorValues continuous() const;
HybridValues();
HybridValues(const gtsam::DiscreteValues &dv, const gtsam::VectorValues &cv);
void print(string s = "HybridValues",
const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const;
bool equals(const gtsam::HybridValues& other, double tol) const;
void insert(gtsam::Key j, int value);
void insert(gtsam::Key j, const gtsam::Vector& value);
size_t& atDiscrete(gtsam::Key j);
gtsam::Vector& at(gtsam::Key j);
};
#include <gtsam/hybrid/HybridFactor.h>
virtual class HybridFactor {
void print(string s = "HybridFactor\n",
@ -23,7 +39,17 @@ virtual class HybridConditional {
bool equals(const gtsam::HybridConditional& other, double tol = 1e-9) const;
size_t nrFrontals() const;
size_t nrParents() const;
Factor* inner();
gtsam::Factor* inner();
};
#include <gtsam/hybrid/HybridDiscreteFactor.h>
virtual class HybridDiscreteFactor {
HybridDiscreteFactor(gtsam::DecisionTreeFactor dtf);
void print(string s = "HybridDiscreteFactor\n",
const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const;
bool equals(const gtsam::HybridDiscreteFactor& other, double tol = 1e-9) const;
gtsam::Factor* inner();
};
#include <gtsam/hybrid/GaussianMixtureFactor.h>
@ -73,6 +99,8 @@ class HybridBayesTree {
bool empty() const;
const HybridBayesTreeClique* operator[](size_t j) const;
gtsam::HybridValues optimize() const;
string dot(const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const;
};
@ -84,6 +112,7 @@ class HybridBayesNet {
size_t size() const;
gtsam::KeySet keys() const;
const gtsam::HybridConditional* at(size_t i) const;
gtsam::HybridValues optimize() const;
void print(string s = "HybridBayesNet\n",
const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const;
@ -115,6 +144,7 @@ class HybridGaussianFactorGraph {
void add(gtsam::JacobianFactor* factor);
bool empty() const;
void remove(size_t i);
size_t size() const;
gtsam::KeySet keys() const;
const gtsam::HybridFactor* at(size_t i) const;
@ -142,4 +172,50 @@ class HybridGaussianFactorGraph {
const gtsam::DotWriter& writer = gtsam::DotWriter()) const;
};
#include <gtsam/hybrid/HybridNonlinearFactorGraph.h>
class HybridNonlinearFactorGraph {
HybridNonlinearFactorGraph();
HybridNonlinearFactorGraph(const gtsam::HybridNonlinearFactorGraph& graph);
void push_back(gtsam::HybridFactor* factor);
void push_back(gtsam::NonlinearFactor* factor);
void push_back(gtsam::HybridDiscreteFactor* factor);
void add(gtsam::NonlinearFactor* factor);
void add(gtsam::DiscreteFactor* factor);
gtsam::HybridGaussianFactorGraph linearize(const gtsam::Values& continuousValues) const;
bool empty() const;
void remove(size_t i);
size_t size() const;
gtsam::KeySet keys() const;
const gtsam::HybridFactor* at(size_t i) const;
void print(string s = "HybridNonlinearFactorGraph\n",
const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const;
};
#include <gtsam/hybrid/MixtureFactor.h>
class MixtureFactor : gtsam::HybridFactor {
MixtureFactor(const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys,
const gtsam::DecisionTree<gtsam::Key, gtsam::NonlinearFactor*>& factors, bool normalized = false);
template <FACTOR = {gtsam::NonlinearFactor}>
MixtureFactor(const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys,
const std::vector<FACTOR*>& factors,
bool normalized = false);
double error(const gtsam::Values& continuousVals,
const gtsam::DiscreteValues& discreteVals) const;
double nonlinearFactorLogNormalizingConstant(const gtsam::NonlinearFactor* factor,
const gtsam::Values& values) const;
GaussianMixtureFactor* linearize(
const gtsam::Values& continuousVals) const;
void print(string s = "MixtureFactor\n",
const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const;
};
} // namespace gtsam

View File

@ -18,20 +18,39 @@
#include <gtsam/base/Matrix.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/discrete/DiscreteDistribution.h>
#include <gtsam/hybrid/GaussianMixtureFactor.h>
#include <gtsam/hybrid/HybridGaussianFactorGraph.h>
#include <gtsam/hybrid/HybridNonlinearFactorGraph.h>
#include <gtsam/hybrid/MixtureFactor.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/NoiseModel.h>
#include <gtsam/nonlinear/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <vector>
#pragma once
using gtsam::symbol_shorthand::C;
using gtsam::symbol_shorthand::X;
namespace gtsam {
using symbol_shorthand::C;
using symbol_shorthand::M;
using symbol_shorthand::X;
/**
* @brief Create a switching system chain. A switching system is a continuous
* system which depends on a discrete mode at each time step of the chain.
*
* @param n The number of chain elements.
* @param keyFunc The functional to help specify the continuous key.
* @param dKeyFunc The functional to help specify the discrete key.
* @return HybridGaussianFactorGraph::shared_ptr
*/
inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain(
size_t n, std::function<Key(int)> keyFunc = X,
std::function<Key(int)> dKeyFunc = C) {
std::function<Key(int)> dKeyFunc = M) {
HybridGaussianFactorGraph hfg;
hfg.add(JacobianFactor(keyFunc(1), I_3x3, Z_3x1));
@ -54,9 +73,19 @@ inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain(
return boost::make_shared<HybridGaussianFactorGraph>(std::move(hfg));
}
/**
* @brief Return the ordering as a binary tree such that all parent nodes are
* above their children.
*
* This will result in a nested dissection Bayes tree after elimination.
*
* @param input The original ordering.
* @return std::pair<KeyVector, std::vector<int>>
*/
inline std::pair<KeyVector, std::vector<int>> makeBinaryOrdering(
std::vector<Key> &input) {
KeyVector new_order;
std::vector<int> levels(input.size());
std::function<void(std::vector<Key>::iterator, std::vector<Key>::iterator,
int)>
@ -79,8 +108,101 @@ inline std::pair<KeyVector, std::vector<int>> makeBinaryOrdering(
bsg(input.begin(), input.end(), 0);
std::reverse(new_order.begin(), new_order.end());
// std::reverse(levels.begin(), levels.end());
return {new_order, levels};
}
/* ***************************************************************************
*/
using MotionModel = BetweenFactor<double>;
// using MotionMixture = MixtureFactor<MotionModel>;
// Test fixture with switching network.
struct Switching {
size_t K;
DiscreteKeys modes;
HybridNonlinearFactorGraph nonlinearFactorGraph;
HybridGaussianFactorGraph linearizedFactorGraph;
Values linearizationPoint;
/// Create with given number of time steps.
Switching(size_t K, double between_sigma = 1.0, double prior_sigma = 0.1)
: K(K) {
// Create DiscreteKeys for binary K modes, modes[0] will not be used.
for (size_t k = 0; k <= K; k++) {
modes.emplace_back(M(k), 2);
}
// Create hybrid factor graph.
// Add a prior on X(1).
auto prior = boost::make_shared<PriorFactor<double>>(
X(1), 0, noiseModel::Isotropic::Sigma(1, prior_sigma));
nonlinearFactorGraph.push_nonlinear(prior);
// Add "motion models".
for (size_t k = 1; k < K; k++) {
KeyVector keys = {X(k), X(k + 1)};
auto motion_models = motionModels(k, between_sigma);
std::vector<NonlinearFactor::shared_ptr> components;
for (auto &&f : motion_models) {
components.push_back(boost::dynamic_pointer_cast<NonlinearFactor>(f));
}
nonlinearFactorGraph.emplace_hybrid<MixtureFactor>(
keys, DiscreteKeys{modes[k]}, components);
}
// Add measurement factors
auto measurement_noise = noiseModel::Isotropic::Sigma(1, prior_sigma);
for (size_t k = 2; k <= K; k++) {
nonlinearFactorGraph.emplace_nonlinear<PriorFactor<double>>(
X(k), 1.0 * (k - 1), measurement_noise);
}
// Add "mode chain"
addModeChain(&nonlinearFactorGraph);
// Create the linearization point.
for (size_t k = 1; k <= K; k++) {
linearizationPoint.insert<double>(X(k), static_cast<double>(k));
}
linearizedFactorGraph = nonlinearFactorGraph.linearize(linearizationPoint);
}
// Create motion models for a given time step
static std::vector<MotionModel::shared_ptr> motionModels(size_t k,
double sigma = 1.0) {
auto noise_model = noiseModel::Isotropic::Sigma(1, sigma);
auto still =
boost::make_shared<MotionModel>(X(k), X(k + 1), 0.0, noise_model),
moving =
boost::make_shared<MotionModel>(X(k), X(k + 1), 1.0, noise_model);
return {still, moving};
}
// Add "mode chain" to HybridNonlinearFactorGraph
void addModeChain(HybridNonlinearFactorGraph *fg) {
auto prior = boost::make_shared<DiscreteDistribution>(modes[1], "1/1");
fg->push_discrete(prior);
for (size_t k = 1; k < K - 1; k++) {
auto parents = {modes[k]};
auto conditional = boost::make_shared<DiscreteConditional>(
modes[k + 1], parents, "1/2 3/2");
fg->push_discrete(conditional);
}
}
// Add "mode chain" to HybridGaussianFactorGraph
void addModeChain(HybridGaussianFactorGraph *fg) {
auto prior = boost::make_shared<DiscreteDistribution>(modes[1], "1/1");
fg->push_discrete(prior);
for (size_t k = 1; k < K - 1; k++) {
auto parents = {modes[k]};
auto conditional = boost::make_shared<DiscreteConditional>(
modes[k + 1], parents, "1/2 3/2");
fg->push_discrete(conditional);
}
}
};
} // namespace gtsam

View File

@ -17,6 +17,7 @@
#include <CppUnitLite/Test.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/discrete/DiscreteKey.h>
#include <gtsam/discrete/DiscreteValues.h>
@ -30,6 +31,7 @@
#include <gtsam/hybrid/HybridGaussianFactor.h>
#include <gtsam/hybrid/HybridGaussianFactorGraph.h>
#include <gtsam/hybrid/HybridGaussianISAM.h>
#include <gtsam/hybrid/HybridValues.h>
#include <gtsam/inference/BayesNet.h>
#include <gtsam/inference/DotWriter.h>
#include <gtsam/inference/Key.h>
@ -52,32 +54,36 @@ using namespace boost::assign;
using namespace std;
using namespace gtsam;
using gtsam::symbol_shorthand::C;
using gtsam::symbol_shorthand::D;
using gtsam::symbol_shorthand::M;
using gtsam::symbol_shorthand::X;
using gtsam::symbol_shorthand::Y;
/* ************************************************************************* */
TEST(HybridGaussianFactorGraph, creation) {
HybridConditional test;
TEST(HybridGaussianFactorGraph, Creation) {
HybridConditional conditional;
HybridGaussianFactorGraph hfg;
hfg.add(HybridGaussianFactor(JacobianFactor(0, I_3x3, Z_3x1)));
hfg.add(HybridGaussianFactor(JacobianFactor(X(0), I_3x3, Z_3x1)));
GaussianMixture clgc(
{X(0)}, {X(1)}, DiscreteKeys(DiscreteKey{C(0), 2}),
GaussianMixture::Conditionals(
C(0),
boost::make_shared<GaussianConditional>(X(0), Z_3x1, I_3x3, X(1),
I_3x3),
boost::make_shared<GaussianConditional>(X(0), Vector3::Ones(), I_3x3,
X(1), I_3x3)));
GTSAM_PRINT(clgc);
// Define a gaussian mixture conditional P(x0|x1, c0) and add it to the factor
// graph
GaussianMixture gm({X(0)}, {X(1)}, DiscreteKeys(DiscreteKey{M(0), 2}),
GaussianMixture::Conditionals(
M(0),
boost::make_shared<GaussianConditional>(
X(0), Z_3x1, I_3x3, X(1), I_3x3),
boost::make_shared<GaussianConditional>(
X(0), Vector3::Ones(), I_3x3, X(1), I_3x3)));
hfg.add(gm);
EXPECT_LONGS_EQUAL(2, hfg.size());
}
/* ************************************************************************* */
TEST(HybridGaussianFactorGraph, eliminate) {
TEST(HybridGaussianFactorGraph, EliminateSequential) {
// Test elimination of a single variable.
HybridGaussianFactorGraph hfg;
hfg.add(HybridGaussianFactor(JacobianFactor(0, I_3x3, Z_3x1)));
@ -88,13 +94,15 @@ TEST(HybridGaussianFactorGraph, eliminate) {
}
/* ************************************************************************* */
TEST(HybridGaussianFactorGraph, eliminateMultifrontal) {
TEST(HybridGaussianFactorGraph, EliminateMultifrontal) {
// Test multifrontal elimination
HybridGaussianFactorGraph hfg;
DiscreteKey c(C(1), 2);
DiscreteKey m(M(1), 2);
// Add priors on x0 and c1
hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1));
hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8})));
hfg.add(HybridDiscreteFactor(DecisionTreeFactor(m, {2, 8})));
Ordering ordering;
ordering.push_back(X(0));
@ -108,117 +116,111 @@ TEST(HybridGaussianFactorGraph, eliminateMultifrontal) {
TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) {
HybridGaussianFactorGraph hfg;
DiscreteKey c1(C(1), 2);
DiscreteKey m1(M(1), 2);
// Add prior on x0
hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1));
// Add factor between x0 and x1
hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1));
// Add a gaussian mixture factor ϕ(x1, c1)
DecisionTree<Key, GaussianFactor::shared_ptr> dt(
C(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
M(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt));
hfg.add(GaussianMixtureFactor({X(1)}, {m1}, dt));
auto result =
hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {C(1)}));
hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {M(1)}));
auto dc = result->at(2)->asDiscreteConditional();
DiscreteValues dv;
dv[C(1)] = 0;
EXPECT_DOUBLES_EQUAL(0.6225, dc->operator()(dv), 1e-3);
dv[M(1)] = 0;
EXPECT_DOUBLES_EQUAL(1, dc->operator()(dv), 1e-3);
}
/* ************************************************************************* */
TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) {
HybridGaussianFactorGraph hfg;
DiscreteKey c1(C(1), 2);
DiscreteKey m1(M(1), 2);
// Add prior on x0
hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1));
// Add factor between x0 and x1
hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1));
DecisionTree<Key, GaussianFactor::shared_ptr> dt(
C(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones()));
std::vector<GaussianFactor::shared_ptr> factors = {
boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones())};
hfg.add(GaussianMixtureFactor({X(1)}, {m1}, factors));
hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt));
// hfg.add(GaussianMixtureFactor({X(0)}, {c1}, dt));
hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c1, {2, 8})));
hfg.add(HybridDiscreteFactor(
DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4")));
// hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(2), 2}, {C(3), 2}}, "1
// 2 3 4"))); hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(3), 2},
// {C(1), 2}}, "1 2 2 1")));
// Discrete probability table for c1
hfg.add(DecisionTreeFactor(m1, {2, 8}));
// Joint discrete probability table for c1, c2
hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4"));
auto result = hfg.eliminateSequential(
Ordering::ColamdConstrainedLast(hfg, {C(1), C(2)}));
HybridBayesNet::shared_ptr result = hfg.eliminateSequential(
Ordering::ColamdConstrainedLast(hfg, {M(1), M(2)}));
GTSAM_PRINT(*result);
// There are 4 variables (2 continuous + 2 discrete) in the bayes net.
EXPECT_LONGS_EQUAL(4, result->size());
}
/* ************************************************************************* */
TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) {
HybridGaussianFactorGraph hfg;
DiscreteKey c1(C(1), 2);
DiscreteKey m1(M(1), 2);
hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1));
hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1));
// DecisionTree<Key, GaussianFactor::shared_ptr> dt(
// C(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
// boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones()));
// hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt));
hfg.add(GaussianMixtureFactor::FromFactors(
{X(1)}, {{C(1), 2}},
{X(1)}, {{M(1), 2}},
{boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones())}));
// hfg.add(GaussianMixtureFactor({X(0)}, {c1}, dt));
hfg.add(DecisionTreeFactor(c1, {2, 8}));
hfg.add(DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4"));
// hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(2), 2}, {C(3), 2}}, "1
// 2 3 4"))); hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(3), 2},
// {C(1), 2}}, "1 2 2 1")));
hfg.add(DecisionTreeFactor(m1, {2, 8}));
hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4"));
auto result = hfg.eliminateMultifrontal(
Ordering::ColamdConstrainedLast(hfg, {C(1), C(2)}));
HybridBayesTree::shared_ptr result =
hfg.eliminateMultifrontal(hfg.getHybridOrdering());
GTSAM_PRINT(*result);
GTSAM_PRINT(*result->marginalFactor(C(2)));
// The bayes tree should have 3 cliques
EXPECT_LONGS_EQUAL(3, result->size());
// GTSAM_PRINT(*result);
// GTSAM_PRINT(*result->marginalFactor(M(2)));
}
/* ************************************************************************* */
TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) {
HybridGaussianFactorGraph hfg;
DiscreteKey c(C(1), 2);
DiscreteKey m(M(1), 2);
// Prior on x0
hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1));
// Factor between x0-x1
hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1));
// Decision tree with different modes on x1
DecisionTree<Key, GaussianFactor::shared_ptr> dt(
C(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
M(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor({X(1)}, {c}, dt));
hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8})));
// hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1
// 2 3 4")));
// Hybrid factor P(x1|c1)
hfg.add(GaussianMixtureFactor({X(1)}, {m}, dt));
// Prior factor on c1
hfg.add(HybridDiscreteFactor(DecisionTreeFactor(m, {2, 8})));
auto ordering_full = Ordering::ColamdConstrainedLast(hfg, {C(1)});
// Get a constrained ordering keeping c1 last
auto ordering_full = hfg.getHybridOrdering();
// Returns a Hybrid Bayes Tree with distribution P(x0|x1)P(x1|c1)P(c1)
HybridBayesTree::shared_ptr hbt = hfg.eliminateMultifrontal(ordering_full);
GTSAM_PRINT(*hbt);
/*
Explanation: the Junction tree will need to reeliminate to get to the marginal
on X(1), which is not possible because it involves eliminating discrete before
continuous. The solution to this, however, is in Murphy02. TLDR is that this
is 1. expensive and 2. inexact. neverless it is doable. And I believe that we
should do this.
*/
EXPECT_LONGS_EQUAL(3, hbt->size());
}
/* ************************************************************************* */
@ -233,66 +235,72 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) {
hfg.add(JacobianFactor(X(1), I_3x3, X(2), -I_3x3, Z_3x1));
{
// DecisionTree<Key, GaussianFactor::shared_ptr> dt(
// C(0), boost::make_shared<JacobianFactor>(X(0), I_3x3, Z_3x1),
// boost::make_shared<JacobianFactor>(X(0), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor::FromFactors(
{X(0)}, {{C(0), 2}},
{X(0)}, {{M(0), 2}},
{boost::make_shared<JacobianFactor>(X(0), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(0), I_3x3, Vector3::Ones())}));
DecisionTree<Key, GaussianFactor::shared_ptr> dt1(
C(1), boost::make_shared<JacobianFactor>(X(2), I_3x3, Z_3x1),
M(1), boost::make_shared<JacobianFactor>(X(2), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(2), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor({X(2)}, {{C(1), 2}}, dt1));
hfg.add(GaussianMixtureFactor({X(2)}, {{M(1), 2}}, dt1));
}
// hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8})));
hfg.add(HybridDiscreteFactor(
DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4")));
DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")));
hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1));
hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1));
{
DecisionTree<Key, GaussianFactor::shared_ptr> dt(
C(3), boost::make_shared<JacobianFactor>(X(3), I_3x3, Z_3x1),
M(3), boost::make_shared<JacobianFactor>(X(3), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(3), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor({X(3)}, {{C(3), 2}}, dt));
hfg.add(GaussianMixtureFactor({X(3)}, {{M(3), 2}}, dt));
DecisionTree<Key, GaussianFactor::shared_ptr> dt1(
C(2), boost::make_shared<JacobianFactor>(X(5), I_3x3, Z_3x1),
M(2), boost::make_shared<JacobianFactor>(X(5), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(5), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor({X(5)}, {{C(2), 2}}, dt1));
hfg.add(GaussianMixtureFactor({X(5)}, {{M(2), 2}}, dt1));
}
auto ordering_full =
Ordering::ColamdConstrainedLast(hfg, {C(0), C(1), C(2), C(3)});
GTSAM_PRINT(ordering_full);
Ordering::ColamdConstrainedLast(hfg, {M(0), M(1), M(2), M(3)});
HybridBayesTree::shared_ptr hbt;
HybridGaussianFactorGraph::shared_ptr remaining;
std::tie(hbt, remaining) = hfg.eliminatePartialMultifrontal(ordering_full);
GTSAM_PRINT(*hbt);
// 9 cliques in the bayes tree and 0 remaining variables to eliminate.
EXPECT_LONGS_EQUAL(9, hbt->size());
EXPECT_LONGS_EQUAL(0, remaining->size());
GTSAM_PRINT(*remaining);
hbt->dot(std::cout);
/*
Explanation: the Junction tree will need to reeliminate to get to the marginal
on X(1), which is not possible because it involves eliminating discrete before
continuous. The solution to this, however, is in Murphy02. TLDR is that this
is 1. expensive and 2. inexact. neverless it is doable. And I believe that we
should do this.
(Fan) Explanation: the Junction tree will need to reeliminate to get to the
marginal on X(1), which is not possible because it involves eliminating
discrete before continuous. The solution to this, however, is in Murphy02.
TLDR is that this is 1. expensive and 2. inexact. nevertheless it is doable.
And I believe that we should do this.
*/
}
void dotPrint(const HybridGaussianFactorGraph::shared_ptr &hfg,
const HybridBayesTree::shared_ptr &hbt,
const Ordering &ordering) {
DotWriter dw;
dw.positionHints['c'] = 2;
dw.positionHints['x'] = 1;
std::cout << hfg->dot(DefaultKeyFormatter, dw);
std::cout << "\n";
hbt->dot(std::cout);
std::cout << "\n";
std::cout << hfg->eliminateSequential(ordering)->dot(DefaultKeyFormatter, dw);
}
/* ************************************************************************* */
// TODO(fan): make a graph like Varun's paper one
TEST(HybridGaussianFactorGraph, Switching) {
@ -303,13 +311,13 @@ TEST(HybridGaussianFactorGraph, Switching) {
// X(3), X(7)
// X(2), X(8)
// X(1), X(4), X(6), X(9)
// C(5) will be the center, C(1-4), C(6-8)
// C(3), C(7)
// C(1), C(4), C(2), C(6), C(8)
// M(5) will be the center, M(1-4), M(6-8)
// M(3), M(7)
// M(1), M(4), M(2), M(6), M(8)
// auto ordering_full =
// Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7),
// X(5),
// C(1), C(4), C(2), C(6), C(8), C(3), C(7), C(5)});
// M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)});
KeyVector ordering;
{
@ -326,79 +334,32 @@ TEST(HybridGaussianFactorGraph, Switching) {
for (auto &l : lvls) {
l = -l;
}
std::copy(lvls.begin(), lvls.end(),
std::ostream_iterator<int>(std::cout, ","));
std::cout << "\n";
}
{
std::vector<int> naturalC(N - 1);
std::iota(naturalC.begin(), naturalC.end(), 1);
std::vector<Key> ordC;
std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC),
[](int x) { return C(x); });
[](int x) { return M(x); });
KeyVector ndC;
std::vector<int> lvls;
// std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering));
std::tie(ndC, lvls) = makeBinaryOrdering(ordC);
std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering));
std::copy(lvls.begin(), lvls.end(),
std::ostream_iterator<int>(std::cout, ","));
}
auto ordering_full = Ordering(ordering);
// auto ordering_full =
// Ordering();
// for (int i = 1; i <= 9; i++) {
// ordering_full.push_back(X(i));
// }
// for (int i = 1; i < 9; i++) {
// ordering_full.push_back(C(i));
// }
// auto ordering_full =
// Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7),
// X(5),
// C(1), C(2), C(3), C(4), C(5), C(6), C(7), C(8)});
// GTSAM_PRINT(*hfg);
GTSAM_PRINT(ordering_full);
// GTSAM_PRINT(ordering_full);
HybridBayesTree::shared_ptr hbt;
HybridGaussianFactorGraph::shared_ptr remaining;
std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full);
// GTSAM_PRINT(*hbt);
// GTSAM_PRINT(*remaining);
{
DotWriter dw;
dw.positionHints['c'] = 2;
dw.positionHints['x'] = 1;
std::cout << hfg->dot(DefaultKeyFormatter, dw);
std::cout << "\n";
hbt->dot(std::cout);
}
{
DotWriter dw;
// dw.positionHints['c'] = 2;
// dw.positionHints['x'] = 1;
std::cout << "\n";
std::cout << hfg->eliminateSequential(ordering_full)
->dot(DefaultKeyFormatter, dw);
}
/*
Explanation: the Junction tree will need to reeliminate to get to the marginal
on X(1), which is not possible because it involves eliminating discrete before
continuous. The solution to this, however, is in Murphy02. TLDR is that this
is 1. expensive and 2. inexact. neverless it is doable. And I believe that we
should do this.
*/
hbt->marginalFactor(C(11))->print("HBT: ");
// 12 cliques in the bayes tree and 0 remaining variables to eliminate.
EXPECT_LONGS_EQUAL(12, hbt->size());
EXPECT_LONGS_EQUAL(0, remaining->size());
}
/* ************************************************************************* */
@ -411,13 +372,13 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) {
// X(3), X(7)
// X(2), X(8)
// X(1), X(4), X(6), X(9)
// C(5) will be the center, C(1-4), C(6-8)
// C(3), C(7)
// C(1), C(4), C(2), C(6), C(8)
// M(5) will be the center, M(1-4), M(6-8)
// M(3), M(7)
// M(1), M(4), M(2), M(6), M(8)
// auto ordering_full =
// Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7),
// X(5),
// C(1), C(4), C(2), C(6), C(8), C(3), C(7), C(5)});
// M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)});
KeyVector ordering;
{
@ -434,67 +395,37 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) {
for (auto &l : lvls) {
l = -l;
}
std::copy(lvls.begin(), lvls.end(),
std::ostream_iterator<int>(std::cout, ","));
std::cout << "\n";
}
{
std::vector<int> naturalC(N - 1);
std::iota(naturalC.begin(), naturalC.end(), 1);
std::vector<Key> ordC;
std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC),
[](int x) { return C(x); });
[](int x) { return M(x); });
KeyVector ndC;
std::vector<int> lvls;
// std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering));
std::tie(ndC, lvls) = makeBinaryOrdering(ordC);
std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering));
std::copy(lvls.begin(), lvls.end(),
std::ostream_iterator<int>(std::cout, ","));
}
auto ordering_full = Ordering(ordering);
// GTSAM_PRINT(*hfg);
GTSAM_PRINT(ordering_full);
HybridBayesTree::shared_ptr hbt;
HybridGaussianFactorGraph::shared_ptr remaining;
std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full);
// GTSAM_PRINT(*hbt);
// GTSAM_PRINT(*remaining);
{
DotWriter dw;
dw.positionHints['c'] = 2;
dw.positionHints['x'] = 1;
std::cout << hfg->dot(DefaultKeyFormatter, dw);
std::cout << "\n";
hbt->dot(std::cout);
}
{
DotWriter dw;
// dw.positionHints['c'] = 2;
// dw.positionHints['x'] = 1;
std::cout << "\n";
std::cout << hfg->eliminateSequential(ordering_full)
->dot(DefaultKeyFormatter, dw);
}
auto new_fg = makeSwitchingChain(12);
auto isam = HybridGaussianISAM(*hbt);
{
HybridGaussianFactorGraph factorGraph;
factorGraph.push_back(new_fg->at(new_fg->size() - 2));
factorGraph.push_back(new_fg->at(new_fg->size() - 1));
isam.update(factorGraph);
std::cout << isam.dot();
isam.marginalFactor(C(11))->print();
}
// Run an ISAM update.
HybridGaussianFactorGraph factorGraph;
factorGraph.push_back(new_fg->at(new_fg->size() - 2));
factorGraph.push_back(new_fg->at(new_fg->size() - 1));
isam.update(factorGraph);
// ISAM should have 12 factors after the last update
EXPECT_LONGS_EQUAL(12, isam.size());
}
/* ************************************************************************* */
@ -517,23 +448,8 @@ TEST(HybridGaussianFactorGraph, SwitchingTwoVar) {
ordX.emplace_back(Y(i));
}
// {
// KeyVector ndX;
// std::vector<int> lvls;
// std::tie(ndX, lvls) = makeBinaryOrdering(naturalX);
// std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering));
// std::copy(lvls.begin(), lvls.end(),
// std::ostream_iterator<int>(std::cout, ","));
// std::cout << "\n";
// for (size_t i = 0; i < N; i++) {
// ordX.emplace_back(X(ndX[i]));
// ordX.emplace_back(Y(ndX[i]));
// }
// }
for (size_t i = 1; i <= N - 1; i++) {
ordX.emplace_back(C(i));
ordX.emplace_back(M(i));
}
for (size_t i = 1; i <= N - 1; i++) {
ordX.emplace_back(D(i));
@ -545,8 +461,8 @@ TEST(HybridGaussianFactorGraph, SwitchingTwoVar) {
dw.positionHints['c'] = 0;
dw.positionHints['d'] = 3;
dw.positionHints['y'] = 2;
std::cout << hfg->dot(DefaultKeyFormatter, dw);
std::cout << "\n";
// std::cout << hfg->dot(DefaultKeyFormatter, dw);
// std::cout << "\n";
}
{
@ -555,10 +471,10 @@ TEST(HybridGaussianFactorGraph, SwitchingTwoVar) {
// dw.positionHints['c'] = 0;
// dw.positionHints['d'] = 3;
dw.positionHints['x'] = 1;
std::cout << "\n";
// std::cout << "\n";
// std::cout << hfg->eliminateSequential(Ordering(ordX))
// ->dot(DefaultKeyFormatter, dw);
hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout);
// hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout);
}
Ordering ordering_partial;
@ -566,25 +482,45 @@ TEST(HybridGaussianFactorGraph, SwitchingTwoVar) {
ordering_partial.emplace_back(X(i));
ordering_partial.emplace_back(Y(i));
}
{
HybridBayesNet::shared_ptr hbn;
HybridGaussianFactorGraph::shared_ptr remaining;
std::tie(hbn, remaining) =
hfg->eliminatePartialSequential(ordering_partial);
HybridBayesNet::shared_ptr hbn;
HybridGaussianFactorGraph::shared_ptr remaining;
std::tie(hbn, remaining) = hfg->eliminatePartialSequential(ordering_partial);
// remaining->print();
{
DotWriter dw;
dw.positionHints['x'] = 1;
dw.positionHints['c'] = 0;
dw.positionHints['d'] = 3;
dw.positionHints['y'] = 2;
std::cout << remaining->dot(DefaultKeyFormatter, dw);
std::cout << "\n";
}
EXPECT_LONGS_EQUAL(14, hbn->size());
EXPECT_LONGS_EQUAL(11, remaining->size());
{
DotWriter dw;
dw.positionHints['x'] = 1;
dw.positionHints['c'] = 0;
dw.positionHints['d'] = 3;
dw.positionHints['y'] = 2;
// std::cout << remaining->dot(DefaultKeyFormatter, dw);
// std::cout << "\n";
}
}
TEST(HybridGaussianFactorGraph, optimize) {
HybridGaussianFactorGraph hfg;
DiscreteKey c1(C(1), 2);
hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1));
hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1));
DecisionTree<Key, GaussianFactor::shared_ptr> dt(
C(1), boost::make_shared<JacobianFactor>(X(1), I_3x3, Z_3x1),
boost::make_shared<JacobianFactor>(X(1), I_3x3, Vector3::Ones()));
hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt));
auto result =
hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {C(1)}));
HybridValues hv = result->optimize();
EXPECT(assert_equal(hv.atDiscrete(C(1)), int(0)));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -74,7 +74,7 @@ TEST(GaussianMixtureFactor, Sum) {
// Check that number of keys is 3
EXPECT_LONGS_EQUAL(3, mixtureFactorA.keys().size());
// Check that number of discrete keys is 1 // TODO(Frank): should not exist?
// Check that number of discrete keys is 1
EXPECT_LONGS_EQUAL(1, mixtureFactorA.discreteKeys().size());
// Create sum of two mixture factors: it will be a decision tree now on both
@ -104,7 +104,7 @@ TEST(GaussianMixtureFactor, Printing) {
GaussianMixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors);
std::string expected =
R"(Hybrid x1 x2; 1 ]{
R"(Hybrid [x1 x2; 1]{
Choice(1)
0 Leaf :
A[x1] = [

View File

@ -0,0 +1,189 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testHybridBayesNet.cpp
* @brief Unit tests for HybridBayesNet
* @author Varun Agrawal
* @author Fan Jiang
* @author Frank Dellaert
* @date December 2021
*/
#include <gtsam/base/serializationTestHelpers.h>
#include <gtsam/hybrid/HybridBayesNet.h>
#include <gtsam/hybrid/HybridBayesTree.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include "Switching.h"
// Include for test suite
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
using namespace gtsam::serializationTestHelpers;
using noiseModel::Isotropic;
using symbol_shorthand::M;
using symbol_shorthand::X;
static const DiscreteKey Asia(0, 2);
/* ****************************************************************************/
// Test creation
TEST(HybridBayesNet, Creation) {
HybridBayesNet bayesNet;
bayesNet.add(Asia, "99/1");
DiscreteConditional expected(Asia, "99/1");
CHECK(bayesNet.atDiscrete(0));
auto& df = *bayesNet.atDiscrete(0);
EXPECT(df.equals(expected));
}
/* ****************************************************************************/
// Test choosing an assignment of conditionals
TEST(HybridBayesNet, Choose) {
Switching s(4);
Ordering ordering;
for (auto&& kvp : s.linearizationPoint) {
ordering += kvp.key;
}
HybridBayesNet::shared_ptr hybridBayesNet;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
std::tie(hybridBayesNet, remainingFactorGraph) =
s.linearizedFactorGraph.eliminatePartialSequential(ordering);
DiscreteValues assignment;
assignment[M(1)] = 1;
assignment[M(2)] = 1;
assignment[M(3)] = 0;
GaussianBayesNet gbn = hybridBayesNet->choose(assignment);
EXPECT_LONGS_EQUAL(4, gbn.size());
EXPECT(assert_equal(*(*boost::dynamic_pointer_cast<GaussianMixture>(
hybridBayesNet->atMixture(0)))(assignment),
*gbn.at(0)));
EXPECT(assert_equal(*(*boost::dynamic_pointer_cast<GaussianMixture>(
hybridBayesNet->atMixture(1)))(assignment),
*gbn.at(1)));
EXPECT(assert_equal(*(*boost::dynamic_pointer_cast<GaussianMixture>(
hybridBayesNet->atMixture(2)))(assignment),
*gbn.at(2)));
EXPECT(assert_equal(*(*boost::dynamic_pointer_cast<GaussianMixture>(
hybridBayesNet->atMixture(3)))(assignment),
*gbn.at(3)));
}
/* ****************************************************************************/
// Test bayes net optimize
TEST(HybridBayesNet, OptimizeAssignment) {
Switching s(4);
Ordering ordering;
for (auto&& kvp : s.linearizationPoint) {
ordering += kvp.key;
}
HybridBayesNet::shared_ptr hybridBayesNet;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
std::tie(hybridBayesNet, remainingFactorGraph) =
s.linearizedFactorGraph.eliminatePartialSequential(ordering);
DiscreteValues assignment;
assignment[M(1)] = 1;
assignment[M(2)] = 1;
assignment[M(3)] = 1;
VectorValues delta = hybridBayesNet->optimize(assignment);
// The linearization point has the same value as the key index,
// e.g. X(1) = 1, X(2) = 2,
// but the factors specify X(k) = k-1, so delta should be -1.
VectorValues expected_delta;
expected_delta.insert(make_pair(X(1), -Vector1::Ones()));
expected_delta.insert(make_pair(X(2), -Vector1::Ones()));
expected_delta.insert(make_pair(X(3), -Vector1::Ones()));
expected_delta.insert(make_pair(X(4), -Vector1::Ones()));
EXPECT(assert_equal(expected_delta, delta));
}
/* ****************************************************************************/
// Test bayes net optimize
TEST(HybridBayesNet, Optimize) {
Switching s(4);
Ordering hybridOrdering = s.linearizedFactorGraph.getHybridOrdering();
HybridBayesNet::shared_ptr hybridBayesNet =
s.linearizedFactorGraph.eliminateSequential(hybridOrdering);
HybridValues delta = hybridBayesNet->optimize();
DiscreteValues expectedAssignment;
expectedAssignment[M(1)] = 1;
expectedAssignment[M(2)] = 0;
expectedAssignment[M(3)] = 1;
EXPECT(assert_equal(expectedAssignment, delta.discrete()));
VectorValues expectedValues;
expectedValues.insert(X(1), -0.999904 * Vector1::Ones());
expectedValues.insert(X(2), -0.99029 * Vector1::Ones());
expectedValues.insert(X(3), -1.00971 * Vector1::Ones());
expectedValues.insert(X(4), -1.0001 * Vector1::Ones());
EXPECT(assert_equal(expectedValues, delta.continuous(), 1e-5));
}
/* ****************************************************************************/
// Test bayes net multifrontal optimize
TEST(HybridBayesNet, OptimizeMultifrontal) {
Switching s(4);
Ordering hybridOrdering = s.linearizedFactorGraph.getHybridOrdering();
HybridBayesTree::shared_ptr hybridBayesTree =
s.linearizedFactorGraph.eliminateMultifrontal(hybridOrdering);
HybridValues delta = hybridBayesTree->optimize();
VectorValues expectedValues;
expectedValues.insert(X(1), -0.999904 * Vector1::Ones());
expectedValues.insert(X(2), -0.99029 * Vector1::Ones());
expectedValues.insert(X(3), -1.00971 * Vector1::Ones());
expectedValues.insert(X(4), -1.0001 * Vector1::Ones());
EXPECT(assert_equal(expectedValues, delta.continuous(), 1e-5));
}
/* ****************************************************************************/
// Test HybridBayesNet serialization.
TEST(HybridBayesNet, Serialization) {
Switching s(4);
Ordering ordering = s.linearizedFactorGraph.getHybridOrdering();
HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential(ordering));
EXPECT(equalsObj<HybridBayesNet>(hbn));
EXPECT(equalsXML<HybridBayesNet>(hbn));
EXPECT(equalsBinary<HybridBayesNet>(hbn));
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -0,0 +1,166 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testHybridBayesTree.cpp
* @brief Unit tests for HybridBayesTree
* @author Varun Agrawal
* @date August 2022
*/
#include <gtsam/base/serializationTestHelpers.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/hybrid/HybridBayesTree.h>
#include <gtsam/hybrid/HybridGaussianISAM.h>
#include "Switching.h"
// Include for test suite
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
using noiseModel::Isotropic;
using symbol_shorthand::M;
using symbol_shorthand::X;
/* ****************************************************************************/
// Test for optimizing a HybridBayesTree with a given assignment.
TEST(HybridBayesTree, OptimizeAssignment) {
Switching s(4);
HybridGaussianISAM isam;
HybridGaussianFactorGraph graph1;
// Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4
for (size_t i = 1; i < 4; i++) {
graph1.push_back(s.linearizedFactorGraph.at(i));
}
// Add the Gaussian factors, 1 prior on X(1),
// 3 measurements on X(2), X(3), X(4)
graph1.push_back(s.linearizedFactorGraph.at(0));
for (size_t i = 4; i <= 7; i++) {
graph1.push_back(s.linearizedFactorGraph.at(i));
}
// Add the discrete factors
for (size_t i = 7; i <= 9; i++) {
graph1.push_back(s.linearizedFactorGraph.at(i));
}
isam.update(graph1);
DiscreteValues assignment;
assignment[M(1)] = 1;
assignment[M(2)] = 1;
assignment[M(3)] = 1;
VectorValues delta = isam.optimize(assignment);
// The linearization point has the same value as the key index,
// e.g. X(1) = 1, X(2) = 2,
// but the factors specify X(k) = k-1, so delta should be -1.
VectorValues expected_delta;
expected_delta.insert(make_pair(X(1), -Vector1::Ones()));
expected_delta.insert(make_pair(X(2), -Vector1::Ones()));
expected_delta.insert(make_pair(X(3), -Vector1::Ones()));
expected_delta.insert(make_pair(X(4), -Vector1::Ones()));
EXPECT(assert_equal(expected_delta, delta));
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= s.K; k++) ordering += X(k);
HybridBayesNet::shared_ptr hybridBayesNet;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
std::tie(hybridBayesNet, remainingFactorGraph) =
s.linearizedFactorGraph.eliminatePartialSequential(ordering);
GaussianBayesNet gbn = hybridBayesNet->choose(assignment);
VectorValues expected = gbn.optimize();
EXPECT(assert_equal(expected, delta));
}
/* ****************************************************************************/
// Test for optimizing a HybridBayesTree.
TEST(HybridBayesTree, Optimize) {
Switching s(4);
HybridGaussianISAM isam;
HybridGaussianFactorGraph graph1;
// Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4
for (size_t i = 1; i < 4; i++) {
graph1.push_back(s.linearizedFactorGraph.at(i));
}
// Add the Gaussian factors, 1 prior on X(1),
// 3 measurements on X(2), X(3), X(4)
graph1.push_back(s.linearizedFactorGraph.at(0));
for (size_t i = 4; i <= 6; i++) {
graph1.push_back(s.linearizedFactorGraph.at(i));
}
// Add the discrete factors
for (size_t i = 7; i <= 9; i++) {
graph1.push_back(s.linearizedFactorGraph.at(i));
}
isam.update(graph1);
HybridValues delta = isam.optimize();
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= s.K; k++) ordering += X(k);
HybridBayesNet::shared_ptr hybridBayesNet;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
std::tie(hybridBayesNet, remainingFactorGraph) =
s.linearizedFactorGraph.eliminatePartialSequential(ordering);
DiscreteFactorGraph dfg;
for (auto&& f : *remainingFactorGraph) {
auto factor = dynamic_pointer_cast<HybridDiscreteFactor>(f);
dfg.push_back(
boost::dynamic_pointer_cast<DecisionTreeFactor>(factor->inner()));
}
DiscreteValues expectedMPE = dfg.optimize();
VectorValues expectedValues = hybridBayesNet->optimize(expectedMPE);
EXPECT(assert_equal(expectedMPE, delta.discrete()));
EXPECT(assert_equal(expectedValues, delta.continuous()));
}
/* ****************************************************************************/
// Test HybridBayesTree serialization.
TEST(HybridBayesTree, Serialization) {
Switching s(4);
Ordering ordering = s.linearizedFactorGraph.getHybridOrdering();
HybridBayesTree hbt =
*(s.linearizedFactorGraph.eliminateMultifrontal(ordering));
using namespace gtsam::serializationTestHelpers;
EXPECT(equalsObj<HybridBayesTree>(hbt));
EXPECT(equalsXML<HybridBayesTree>(hbt));
EXPECT(equalsBinary<HybridBayesTree>(hbt));
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -0,0 +1,45 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testHybridFactorGraph.cpp
* @brief Unit tests for HybridFactorGraph
* @author Varun Agrawal
* @date May 2021
*/
#include <CppUnitLite/Test.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/base/utilities.h>
#include <gtsam/hybrid/HybridFactorGraph.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/PriorFactor.h>
using namespace std;
using namespace gtsam;
using noiseModel::Isotropic;
using symbol_shorthand::L;
using symbol_shorthand::M;
using symbol_shorthand::X;
/* ****************************************************************************
* Test that any linearizedFactorGraph gaussian factors are appended to the
* existing gaussian factor graph in the hybrid factor graph.
*/
TEST(HybridFactorGraph, Constructor) {
// Initialize the hybrid factor graph
HybridFactorGraph fg;
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -0,0 +1,563 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testHybridIncremental.cpp
* @brief Unit tests for incremental inference
* @author Fan Jiang, Varun Agrawal, Frank Dellaert
* @date Jan 2021
*/
#include <gtsam/discrete/DiscreteBayesNet.h>
#include <gtsam/discrete/DiscreteDistribution.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/hybrid/HybridConditional.h>
#include <gtsam/hybrid/HybridGaussianISAM.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/nonlinear/PriorFactor.h>
#include <gtsam/sam/BearingRangeFactor.h>
#include <numeric>
#include "Switching.h"
// Include for test suite
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
using noiseModel::Isotropic;
using symbol_shorthand::L;
using symbol_shorthand::M;
using symbol_shorthand::W;
using symbol_shorthand::X;
using symbol_shorthand::Y;
using symbol_shorthand::Z;
/* ****************************************************************************/
// Test if we can perform elimination incrementally.
TEST(HybridGaussianElimination, IncrementalElimination) {
Switching switching(3);
HybridGaussianISAM isam;
HybridGaussianFactorGraph graph1;
// Create initial factor graph
// * * *
// | | |
// X1 -*- X2 -*- X3
// \*-M1-*/
graph1.push_back(switching.linearizedFactorGraph.at(0)); // P(X1)
graph1.push_back(switching.linearizedFactorGraph.at(1)); // P(X1, X2 | M1)
graph1.push_back(switching.linearizedFactorGraph.at(2)); // P(X2, X3 | M2)
graph1.push_back(switching.linearizedFactorGraph.at(5)); // P(M1)
// Run update step
isam.update(graph1);
// Check that after update we have 3 hybrid Bayes net nodes:
// P(X1 | X2, M1) and P(X2, X3 | M1, M2), P(M1, M2)
EXPECT_LONGS_EQUAL(3, isam.size());
EXPECT(isam[X(1)]->conditional()->frontals() == KeyVector{X(1)});
EXPECT(isam[X(1)]->conditional()->parents() == KeyVector({X(2), M(1)}));
EXPECT(isam[X(2)]->conditional()->frontals() == KeyVector({X(2), X(3)}));
EXPECT(isam[X(2)]->conditional()->parents() == KeyVector({M(1), M(2)}));
/********************************************************/
// New factor graph for incremental update.
HybridGaussianFactorGraph graph2;
graph1.push_back(switching.linearizedFactorGraph.at(3)); // P(X2)
graph2.push_back(switching.linearizedFactorGraph.at(4)); // P(X3)
graph2.push_back(switching.linearizedFactorGraph.at(6)); // P(M1, M2)
isam.update(graph2);
// Check that after the second update we have
// 1 additional hybrid Bayes net node:
// P(X2, X3 | M1, M2)
EXPECT_LONGS_EQUAL(3, isam.size());
EXPECT(isam[X(3)]->conditional()->frontals() == KeyVector({X(2), X(3)}));
EXPECT(isam[X(3)]->conditional()->parents() == KeyVector({M(1), M(2)}));
}
/* ****************************************************************************/
// Test if we can incrementally do the inference
TEST(HybridGaussianElimination, IncrementalInference) {
Switching switching(3);
HybridGaussianISAM isam;
HybridGaussianFactorGraph graph1;
// Create initial factor graph
// * * *
// | | |
// X1 -*- X2 -*- X3
// | |
// *-M1 - * - M2
graph1.push_back(switching.linearizedFactorGraph.at(0)); // P(X1)
graph1.push_back(switching.linearizedFactorGraph.at(1)); // P(X1, X2 | M1)
graph1.push_back(switching.linearizedFactorGraph.at(3)); // P(X2)
graph1.push_back(switching.linearizedFactorGraph.at(5)); // P(M1)
// Run update step
isam.update(graph1);
auto discreteConditional_m1 =
isam[M(1)]->conditional()->asDiscreteConditional();
EXPECT(discreteConditional_m1->keys() == KeyVector({M(1)}));
/********************************************************/
// New factor graph for incremental update.
HybridGaussianFactorGraph graph2;
graph2.push_back(switching.linearizedFactorGraph.at(2)); // P(X2, X3 | M2)
graph2.push_back(switching.linearizedFactorGraph.at(4)); // P(X3)
graph2.push_back(switching.linearizedFactorGraph.at(6)); // P(M1, M2)
isam.update(graph2);
/********************************************************/
// Run batch elimination so we can compare results.
Ordering ordering;
ordering += X(1);
ordering += X(2);
ordering += X(3);
// Now we calculate the actual factors using full elimination
HybridBayesTree::shared_ptr expectedHybridBayesTree;
HybridGaussianFactorGraph::shared_ptr expectedRemainingGraph;
std::tie(expectedHybridBayesTree, expectedRemainingGraph) =
switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering);
// The densities on X(1) should be the same
auto x1_conditional =
dynamic_pointer_cast<GaussianMixture>(isam[X(1)]->conditional()->inner());
auto actual_x1_conditional = dynamic_pointer_cast<GaussianMixture>(
(*expectedHybridBayesTree)[X(1)]->conditional()->inner());
EXPECT(assert_equal(*x1_conditional, *actual_x1_conditional));
// The densities on X(2) should be the same
auto x2_conditional =
dynamic_pointer_cast<GaussianMixture>(isam[X(2)]->conditional()->inner());
auto actual_x2_conditional = dynamic_pointer_cast<GaussianMixture>(
(*expectedHybridBayesTree)[X(2)]->conditional()->inner());
EXPECT(assert_equal(*x2_conditional, *actual_x2_conditional));
// The densities on X(3) should be the same
auto x3_conditional =
dynamic_pointer_cast<GaussianMixture>(isam[X(3)]->conditional()->inner());
auto actual_x3_conditional = dynamic_pointer_cast<GaussianMixture>(
(*expectedHybridBayesTree)[X(2)]->conditional()->inner());
EXPECT(assert_equal(*x3_conditional, *actual_x3_conditional));
// We only perform manual continuous elimination for 0,0.
// The other discrete probabilities on M(2) are calculated the same way
Ordering discrete_ordering;
discrete_ordering += M(1);
discrete_ordering += M(2);
HybridBayesTree::shared_ptr discreteBayesTree =
expectedRemainingGraph->eliminateMultifrontal(discrete_ordering);
DiscreteValues m00;
m00[M(1)] = 0, m00[M(2)] = 0;
DiscreteConditional decisionTree =
*(*discreteBayesTree)[M(2)]->conditional()->asDiscreteConditional();
double m00_prob = decisionTree(m00);
auto discreteConditional = isam[M(2)]->conditional()->asDiscreteConditional();
// Test if the probability values are as expected with regression tests.
DiscreteValues assignment;
EXPECT(assert_equal(m00_prob, 0.0619233, 1e-5));
assignment[M(1)] = 0;
assignment[M(2)] = 0;
EXPECT(assert_equal(m00_prob, (*discreteConditional)(assignment), 1e-5));
assignment[M(1)] = 1;
assignment[M(2)] = 0;
EXPECT(assert_equal(0.183743, (*discreteConditional)(assignment), 1e-5));
assignment[M(1)] = 0;
assignment[M(2)] = 1;
EXPECT(assert_equal(0.204159, (*discreteConditional)(assignment), 1e-5));
assignment[M(1)] = 1;
assignment[M(2)] = 1;
EXPECT(assert_equal(0.2, (*discreteConditional)(assignment), 1e-5));
// Check if the clique conditional generated from incremental elimination
// matches that of batch elimination.
auto expectedChordal = expectedRemainingGraph->eliminateMultifrontal();
auto expectedConditional = dynamic_pointer_cast<DecisionTreeFactor>(
(*expectedChordal)[M(2)]->conditional()->inner());
auto actualConditional = dynamic_pointer_cast<DecisionTreeFactor>(
isam[M(2)]->conditional()->inner());
EXPECT(assert_equal(*actualConditional, *expectedConditional, 1e-6));
}
/* ****************************************************************************/
// Test if we can approximately do the inference
TEST(HybridGaussianElimination, Approx_inference) {
Switching switching(4);
HybridGaussianISAM incrementalHybrid;
HybridGaussianFactorGraph graph1;
// Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4
for (size_t i = 1; i < 4; i++) {
graph1.push_back(switching.linearizedFactorGraph.at(i));
}
// Add the Gaussian factors, 1 prior on X(1),
// 3 measurements on X(2), X(3), X(4)
graph1.push_back(switching.linearizedFactorGraph.at(0));
for (size_t i = 4; i <= 7; i++) {
graph1.push_back(switching.linearizedFactorGraph.at(i));
}
// Create ordering.
Ordering ordering;
for (size_t j = 1; j <= 4; j++) {
ordering += X(j);
}
// Now we calculate the actual factors using full elimination
HybridBayesTree::shared_ptr unprunedHybridBayesTree;
HybridGaussianFactorGraph::shared_ptr unprunedRemainingGraph;
std::tie(unprunedHybridBayesTree, unprunedRemainingGraph) =
switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering);
size_t maxNrLeaves = 5;
incrementalHybrid.update(graph1);
incrementalHybrid.prune(M(3), maxNrLeaves);
/*
unpruned factor is:
Choice(m3)
0 Choice(m2)
0 0 Choice(m1)
0 0 0 Leaf 0.11267528
0 0 1 Leaf 0.18576102
0 1 Choice(m1)
0 1 0 Leaf 0.18754662
0 1 1 Leaf 0.30623871
1 Choice(m2)
1 0 Choice(m1)
1 0 0 Leaf 0.18576102
1 0 1 Leaf 0.30622428
1 1 Choice(m1)
1 1 0 Leaf 0.30623871
1 1 1 Leaf 0.5
pruned factors is:
Choice(m3)
0 Choice(m2)
0 0 Leaf 0
0 1 Choice(m1)
0 1 0 Leaf 0.18754662
0 1 1 Leaf 0.30623871
1 Choice(m2)
1 0 Choice(m1)
1 0 0 Leaf 0
1 0 1 Leaf 0.30622428
1 1 Choice(m1)
1 1 0 Leaf 0.30623871
1 1 1 Leaf 0.5
*/
auto discreteConditional_m1 = *dynamic_pointer_cast<DiscreteConditional>(
incrementalHybrid[M(1)]->conditional()->inner());
EXPECT(discreteConditional_m1.keys() == KeyVector({M(1), M(2), M(3)}));
// Get the number of elements which are greater than 0.
auto count = [](const double &value, int count) {
return value > 0 ? count + 1 : count;
};
// Check that the number of leaves after pruning is 5.
EXPECT_LONGS_EQUAL(5, discreteConditional_m1.fold(count, 0));
// Check that the hybrid nodes of the bayes net match those of the pre-pruning
// bayes net, at the same positions.
auto &unprunedLastDensity = *dynamic_pointer_cast<GaussianMixture>(
unprunedHybridBayesTree->clique(X(4))->conditional()->inner());
auto &lastDensity = *dynamic_pointer_cast<GaussianMixture>(
incrementalHybrid[X(4)]->conditional()->inner());
std::vector<std::pair<DiscreteValues, double>> assignments =
discreteConditional_m1.enumerate();
// Loop over all assignments and check the pruned components
for (auto &&av : assignments) {
const DiscreteValues &assignment = av.first;
const double value = av.second;
if (value == 0.0) {
EXPECT(lastDensity(assignment) == nullptr);
} else {
CHECK(lastDensity(assignment));
EXPECT(assert_equal(*unprunedLastDensity(assignment),
*lastDensity(assignment)));
}
}
}
/* ****************************************************************************/
// Test approximate inference with an additional pruning step.
TEST(HybridGaussianElimination, Incremental_approximate) {
Switching switching(5);
HybridGaussianISAM incrementalHybrid;
HybridGaussianFactorGraph graph1;
/***** Run Round 1 *****/
// Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4
for (size_t i = 1; i < 4; i++) {
graph1.push_back(switching.linearizedFactorGraph.at(i));
}
// Add the Gaussian factors, 1 prior on X(1),
// 3 measurements on X(2), X(3), X(4)
graph1.push_back(switching.linearizedFactorGraph.at(0));
for (size_t i = 5; i <= 7; i++) {
graph1.push_back(switching.linearizedFactorGraph.at(i));
}
// Run update with pruning
size_t maxComponents = 5;
incrementalHybrid.update(graph1);
incrementalHybrid.prune(M(3), maxComponents);
// Check if we have a bayes tree with 4 hybrid nodes,
// each with 2, 4, 8, and 5 (pruned) leaves respetively.
EXPECT_LONGS_EQUAL(4, incrementalHybrid.size());
EXPECT_LONGS_EQUAL(
2, incrementalHybrid[X(1)]->conditional()->asMixture()->nrComponents());
EXPECT_LONGS_EQUAL(
4, incrementalHybrid[X(2)]->conditional()->asMixture()->nrComponents());
EXPECT_LONGS_EQUAL(
5, incrementalHybrid[X(3)]->conditional()->asMixture()->nrComponents());
EXPECT_LONGS_EQUAL(
5, incrementalHybrid[X(4)]->conditional()->asMixture()->nrComponents());
/***** Run Round 2 *****/
HybridGaussianFactorGraph graph2;
graph2.push_back(switching.linearizedFactorGraph.at(4));
graph2.push_back(switching.linearizedFactorGraph.at(8));
// Run update with pruning a second time.
incrementalHybrid.update(graph2);
incrementalHybrid.prune(M(4), maxComponents);
// Check if we have a bayes tree with pruned hybrid nodes,
// with 5 (pruned) leaves.
CHECK_EQUAL(5, incrementalHybrid.size());
EXPECT_LONGS_EQUAL(
5, incrementalHybrid[X(4)]->conditional()->asMixture()->nrComponents());
EXPECT_LONGS_EQUAL(
5, incrementalHybrid[X(5)]->conditional()->asMixture()->nrComponents());
}
/* ************************************************************************/
// A GTSAM-only test for running inference on a single-legged robot.
// The leg links are represented by the chain X-Y-Z-W, where X is the base and
// W is the foot.
// We use BetweenFactor<Pose2> as constraints between each of the poses.
TEST(HybridGaussianISAM, NonTrivial) {
/*************** Run Round 1 ***************/
HybridNonlinearFactorGraph fg;
// Add a prior on pose x1 at the origin.
// A prior factor consists of a mean and
// a noise model (covariance matrix)
Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin
auto priorNoise = noiseModel::Diagonal::Sigmas(
Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta
fg.emplace_nonlinear<PriorFactor<Pose2>>(X(0), prior, priorNoise);
// create a noise model for the landmark measurements
auto poseNoise = noiseModel::Isotropic::Sigma(3, 0.1);
// We model a robot's single leg as X - Y - Z - W
// where X is the base link and W is the foot link.
// Add connecting poses similar to PoseFactors in GTD
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(0), Y(0), Pose2(0, 1.0, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Y(0), Z(0), Pose2(0, 1.0, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Z(0), W(0), Pose2(0, 1.0, 0),
poseNoise);
// Create initial estimate
Values initial;
initial.insert(X(0), Pose2(0.0, 0.0, 0.0));
initial.insert(Y(0), Pose2(0.0, 1.0, 0.0));
initial.insert(Z(0), Pose2(0.0, 2.0, 0.0));
initial.insert(W(0), Pose2(0.0, 3.0, 0.0));
HybridGaussianFactorGraph gfg = fg.linearize(initial);
fg = HybridNonlinearFactorGraph();
HybridGaussianISAM inc;
// Update without pruning
// The result is a HybridBayesNet with no discrete variables
// (equivalent to a GaussianBayesNet).
// Factorization is:
// `P(X | measurements) = P(W0|Z0) P(Z0|Y0) P(Y0|X0) P(X0)`
inc.update(gfg);
/*************** Run Round 2 ***************/
using PlanarMotionModel = BetweenFactor<Pose2>;
// Add odometry factor with discrete modes.
Pose2 odometry(1.0, 0.0, 0.0);
KeyVector contKeys = {W(0), W(1)};
auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0);
auto still = boost::make_shared<PlanarMotionModel>(W(0), W(1), Pose2(0, 0, 0),
noise_model),
moving = boost::make_shared<PlanarMotionModel>(W(0), W(1), odometry,
noise_model);
std::vector<PlanarMotionModel::shared_ptr> components = {moving, still};
auto mixtureFactor = boost::make_shared<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components);
fg.push_back(mixtureFactor);
// Add equivalent of ImuFactor
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(0), X(1), Pose2(1.0, 0.0, 0),
poseNoise);
// PoseFactors-like at k=1
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(1), Y(1), Pose2(0, 1, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Y(1), Z(1), Pose2(0, 1, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Z(1), W(1), Pose2(-1, 1, 0),
poseNoise);
initial.insert(X(1), Pose2(1.0, 0.0, 0.0));
initial.insert(Y(1), Pose2(1.0, 1.0, 0.0));
initial.insert(Z(1), Pose2(1.0, 2.0, 0.0));
// The leg link did not move so we set the expected pose accordingly.
initial.insert(W(1), Pose2(0.0, 3.0, 0.0));
gfg = fg.linearize(initial);
fg = HybridNonlinearFactorGraph();
// Update without pruning
// The result is a HybridBayesNet with 1 discrete variable M(1).
// P(X | measurements) = P(W0|Z0, W1, M1) P(Z0|Y0, W1, M1) P(Y0|X0, W1, M1)
// P(X0 | X1, W1, M1) P(W1|Z1, X1, M1) P(Z1|Y1, X1, M1)
// P(Y1 | X1, M1)P(X1 | M1)P(M1)
// The MHS tree is a 1 level tree for time indices (1,) with 2 leaves.
inc.update(gfg);
/*************** Run Round 3 ***************/
// Add odometry factor with discrete modes.
contKeys = {W(1), W(2)};
still = boost::make_shared<PlanarMotionModel>(W(1), W(2), Pose2(0, 0, 0),
noise_model);
moving =
boost::make_shared<PlanarMotionModel>(W(1), W(2), odometry, noise_model);
components = {moving, still};
mixtureFactor = boost::make_shared<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(2), 2)}, components);
fg.push_back(mixtureFactor);
// Add equivalent of ImuFactor
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(1), X(2), Pose2(1.0, 0.0, 0),
poseNoise);
// PoseFactors-like at k=1
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(2), Y(2), Pose2(0, 1, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Y(2), Z(2), Pose2(0, 1, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Z(2), W(2), Pose2(-2, 1, 0),
poseNoise);
initial.insert(X(2), Pose2(2.0, 0.0, 0.0));
initial.insert(Y(2), Pose2(2.0, 1.0, 0.0));
initial.insert(Z(2), Pose2(2.0, 2.0, 0.0));
initial.insert(W(2), Pose2(0.0, 3.0, 0.0));
gfg = fg.linearize(initial);
fg = HybridNonlinearFactorGraph();
// Now we prune!
// P(X | measurements) = P(W0|Z0, W1, M1) P(Z0|Y0, W1, M1) P(Y0|X0, W1, M1)
// P(X0 | X1, W1, M1) P(W1|W2, Z1, X1, M1, M2)
// P(Z1| W2, Y1, X1, M1, M2) P(Y1 | W2, X1, M1, M2)
// P(X1 | W2, X2, M1, M2) P(W2|Z2, X2, M1, M2)
// P(Z2|Y2, X2, M1, M2) P(Y2 | X2, M1, M2)
// P(X2 | M1, M2) P(M1, M2)
// The MHS at this point should be a 2 level tree on (1, 2).
// 1 has 2 choices, and 2 has 4 choices.
inc.update(gfg);
inc.prune(M(2), 2);
/*************** Run Round 4 ***************/
// Add odometry factor with discrete modes.
contKeys = {W(2), W(3)};
still = boost::make_shared<PlanarMotionModel>(W(2), W(3), Pose2(0, 0, 0),
noise_model);
moving =
boost::make_shared<PlanarMotionModel>(W(2), W(3), odometry, noise_model);
components = {moving, still};
mixtureFactor = boost::make_shared<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(3), 2)}, components);
fg.push_back(mixtureFactor);
// Add equivalent of ImuFactor
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(2), X(3), Pose2(1.0, 0.0, 0),
poseNoise);
// PoseFactors-like at k=3
fg.emplace_nonlinear<BetweenFactor<Pose2>>(X(3), Y(3), Pose2(0, 1, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Y(3), Z(3), Pose2(0, 1, 0),
poseNoise);
fg.emplace_nonlinear<BetweenFactor<Pose2>>(Z(3), W(3), Pose2(-3, 1, 0),
poseNoise);
initial.insert(X(3), Pose2(3.0, 0.0, 0.0));
initial.insert(Y(3), Pose2(3.0, 1.0, 0.0));
initial.insert(Z(3), Pose2(3.0, 2.0, 0.0));
initial.insert(W(3), Pose2(0.0, 3.0, 0.0));
gfg = fg.linearize(initial);
fg = HybridNonlinearFactorGraph();
// Keep pruning!
inc.update(gfg);
inc.prune(M(3), 3);
// The final discrete graph should not be empty since we have eliminated
// all continuous variables.
auto discreteTree = inc[M(3)]->conditional()->asDiscreteConditional();
EXPECT_LONGS_EQUAL(3, discreteTree->size());
// Test if the optimal discrete mode assignment is (1, 1, 1).
DiscreteFactorGraph discreteGraph;
discreteGraph.push_back(discreteTree);
DiscreteValues optimal_assignment = discreteGraph.optimize();
DiscreteValues expected_assignment;
expected_assignment[M(1)] = 1;
expected_assignment[M(2)] = 1;
expected_assignment[M(3)] = 1;
EXPECT(assert_equal(expected_assignment, optimal_assignment));
// Test if pruning worked correctly by checking that we only have 3 leaves in
// the last node.
auto lastConditional = inc[X(3)]->conditional()->asMixture();
EXPECT_LONGS_EQUAL(3, lastConditional->nrComponents());
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}

View File

@ -0,0 +1,737 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testHybridNonlinearFactorGraph.cpp
* @brief Unit tests for HybridNonlinearFactorGraph
* @author Varun Agrawal
* @author Fan Jiang
* @author Frank Dellaert
* @date December 2021
*/
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/base/utilities.h>
#include <gtsam/discrete/DiscreteBayesNet.h>
#include <gtsam/discrete/DiscreteDistribution.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/hybrid/HybridEliminationTree.h>
#include <gtsam/hybrid/HybridFactor.h>
#include <gtsam/hybrid/HybridNonlinearFactorGraph.h>
#include <gtsam/hybrid/MixtureFactor.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/PriorFactor.h>
#include <gtsam/sam/BearingRangeFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <numeric>
#include "Switching.h"
// Include for test suite
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
using noiseModel::Isotropic;
using symbol_shorthand::L;
using symbol_shorthand::M;
using symbol_shorthand::X;
/* ****************************************************************************
* Test that any linearizedFactorGraph gaussian factors are appended to the
* existing gaussian factor graph in the hybrid factor graph.
*/
TEST(HybridFactorGraph, GaussianFactorGraph) {
HybridNonlinearFactorGraph fg;
// Add a simple prior factor to the nonlinear factor graph
fg.emplace_nonlinear<PriorFactor<double>>(X(0), 0, Isotropic::Sigma(1, 0.1));
// Linearization point
Values linearizationPoint;
linearizationPoint.insert<double>(X(0), 0);
HybridGaussianFactorGraph ghfg = fg.linearize(linearizationPoint);
// Add a factor to the GaussianFactorGraph
ghfg.add(JacobianFactor(X(0), I_1x1, Vector1(5)));
EXPECT_LONGS_EQUAL(2, ghfg.size());
}
/***************************************************************************
* Test equality for Hybrid Nonlinear Factor Graph.
*/
TEST(HybridNonlinearFactorGraph, Equals) {
HybridNonlinearFactorGraph graph1;
HybridNonlinearFactorGraph graph2;
// Test empty factor graphs
EXPECT(assert_equal(graph1, graph2));
auto f0 = boost::make_shared<PriorFactor<Pose2>>(
1, Pose2(), noiseModel::Isotropic::Sigma(3, 0.001));
graph1.push_back(f0);
graph2.push_back(f0);
auto f1 = boost::make_shared<BetweenFactor<Pose2>>(
1, 2, Pose2(), noiseModel::Isotropic::Sigma(3, 0.1));
graph1.push_back(f1);
graph2.push_back(f1);
// Test non-empty graphs
EXPECT(assert_equal(graph1, graph2));
}
/***************************************************************************
* Test that the resize method works correctly for a HybridNonlinearFactorGraph.
*/
TEST(HybridNonlinearFactorGraph, Resize) {
HybridNonlinearFactorGraph fg;
auto nonlinearFactor = boost::make_shared<BetweenFactor<double>>();
fg.push_back(nonlinearFactor);
auto discreteFactor = boost::make_shared<DecisionTreeFactor>();
fg.push_back(discreteFactor);
auto dcFactor = boost::make_shared<MixtureFactor>();
fg.push_back(dcFactor);
EXPECT_LONGS_EQUAL(fg.size(), 3);
fg.resize(0);
EXPECT_LONGS_EQUAL(fg.size(), 0);
}
/***************************************************************************
* Test that the resize method works correctly for a
* HybridGaussianFactorGraph.
*/
TEST(HybridGaussianFactorGraph, Resize) {
HybridNonlinearFactorGraph nhfg;
auto nonlinearFactor = boost::make_shared<BetweenFactor<double>>(
X(0), X(1), 0.0, Isotropic::Sigma(1, 0.1));
nhfg.push_back(nonlinearFactor);
auto discreteFactor = boost::make_shared<DecisionTreeFactor>();
nhfg.push_back(discreteFactor);
KeyVector contKeys = {X(0), X(1)};
auto noise_model = noiseModel::Isotropic::Sigma(1, 1.0);
auto still = boost::make_shared<MotionModel>(X(0), X(1), 0.0, noise_model),
moving = boost::make_shared<MotionModel>(X(0), X(1), 1.0, noise_model);
std::vector<MotionModel::shared_ptr> components = {still, moving};
auto dcFactor = boost::make_shared<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components);
nhfg.push_back(dcFactor);
Values linearizationPoint;
linearizationPoint.insert<double>(X(0), 0);
linearizationPoint.insert<double>(X(1), 1);
// Generate `HybridGaussianFactorGraph` by linearizing
HybridGaussianFactorGraph gfg = nhfg.linearize(linearizationPoint);
EXPECT_LONGS_EQUAL(gfg.size(), 3);
gfg.resize(0);
EXPECT_LONGS_EQUAL(gfg.size(), 0);
}
/***************************************************************************
* Test that the MixtureFactor reports correctly if the number of continuous
* keys provided do not match the keys in the factors.
*/
TEST(HybridGaussianFactorGraph, MixtureFactor) {
auto nonlinearFactor = boost::make_shared<BetweenFactor<double>>(
X(0), X(1), 0.0, Isotropic::Sigma(1, 0.1));
auto discreteFactor = boost::make_shared<DecisionTreeFactor>();
auto noise_model = noiseModel::Isotropic::Sigma(1, 1.0);
auto still = boost::make_shared<MotionModel>(X(0), X(1), 0.0, noise_model),
moving = boost::make_shared<MotionModel>(X(0), X(1), 1.0, noise_model);
std::vector<MotionModel::shared_ptr> components = {still, moving};
// Check for exception when number of continuous keys are under-specified.
KeyVector contKeys = {X(0)};
THROWS_EXCEPTION(boost::make_shared<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components));
// Check for exception when number of continuous keys are too many.
contKeys = {X(0), X(1), X(2)};
THROWS_EXCEPTION(boost::make_shared<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components));
}
/*****************************************************************************
* Test push_back on HFG makes the correct distinction.
*/
TEST(HybridFactorGraph, PushBack) {
HybridNonlinearFactorGraph fg;
auto nonlinearFactor = boost::make_shared<BetweenFactor<double>>();
fg.push_back(nonlinearFactor);
EXPECT_LONGS_EQUAL(fg.size(), 1);
fg = HybridNonlinearFactorGraph();
auto discreteFactor = boost::make_shared<DecisionTreeFactor>();
fg.push_back(discreteFactor);
EXPECT_LONGS_EQUAL(fg.size(), 1);
fg = HybridNonlinearFactorGraph();
auto dcFactor = boost::make_shared<MixtureFactor>();
fg.push_back(dcFactor);
EXPECT_LONGS_EQUAL(fg.size(), 1);
// Now do the same with HybridGaussianFactorGraph
HybridGaussianFactorGraph ghfg;
auto gaussianFactor = boost::make_shared<JacobianFactor>();
ghfg.push_back(gaussianFactor);
EXPECT_LONGS_EQUAL(ghfg.size(), 1);
ghfg = HybridGaussianFactorGraph();
ghfg.push_back(discreteFactor);
EXPECT_LONGS_EQUAL(ghfg.size(), 1);
ghfg = HybridGaussianFactorGraph();
ghfg.push_back(dcFactor);
HybridGaussianFactorGraph hgfg2;
hgfg2.push_back(ghfg.begin(), ghfg.end());
EXPECT_LONGS_EQUAL(ghfg.size(), 1);
HybridNonlinearFactorGraph hnfg;
NonlinearFactorGraph factors;
auto noise = noiseModel::Isotropic::Sigma(3, 1.0);
factors.emplace_shared<PriorFactor<Pose2>>(0, Pose2(0, 0, 0), noise);
factors.emplace_shared<PriorFactor<Pose2>>(1, Pose2(1, 0, 0), noise);
factors.emplace_shared<PriorFactor<Pose2>>(2, Pose2(2, 0, 0), noise);
// TODO(Varun) This does not currently work. It should work once HybridFactor
// becomes a base class of NonlinearFactor.
// hnfg.push_back(factors.begin(), factors.end());
// EXPECT_LONGS_EQUAL(3, hnfg.size());
}
/****************************************************************************
* Test construction of switching-like hybrid factor graph.
*/
TEST(HybridFactorGraph, Switching) {
Switching self(3);
EXPECT_LONGS_EQUAL(7, self.nonlinearFactorGraph.size());
EXPECT_LONGS_EQUAL(7, self.linearizedFactorGraph.size());
}
/****************************************************************************
* Test linearization on a switching-like hybrid factor graph.
*/
TEST(HybridFactorGraph, Linearization) {
Switching self(3);
// Linearize here:
HybridGaussianFactorGraph actualLinearized =
self.nonlinearFactorGraph.linearize(self.linearizationPoint);
EXPECT_LONGS_EQUAL(7, actualLinearized.size());
}
/****************************************************************************
* Test elimination tree construction
*/
TEST(HybridFactorGraph, EliminationTree) {
Switching self(3);
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= self.K; k++) ordering += X(k);
// Create elimination tree.
HybridEliminationTree etree(self.linearizedFactorGraph, ordering);
EXPECT_LONGS_EQUAL(1, etree.roots().size())
}
/****************************************************************************
*Test elimination function by eliminating x1 in *-x1-*-x2 graph.
*/
TEST(GaussianElimination, Eliminate_x1) {
Switching self(3);
// Gather factors on x1, has a simple Gaussian and a mixture factor.
HybridGaussianFactorGraph factors;
// Add gaussian prior
factors.push_back(self.linearizedFactorGraph[0]);
// Add first hybrid factor
factors.push_back(self.linearizedFactorGraph[1]);
// Eliminate x1
Ordering ordering;
ordering += X(1);
auto result = EliminateHybrid(factors, ordering);
CHECK(result.first);
EXPECT_LONGS_EQUAL(1, result.first->nrFrontals());
CHECK(result.second);
// Has two keys, x2 and m1
EXPECT_LONGS_EQUAL(2, result.second->size());
}
/****************************************************************************
* Test elimination function by eliminating x2 in x1-*-x2-*-x3 chain.
* m1/ \m2
*/
TEST(HybridsGaussianElimination, Eliminate_x2) {
Switching self(3);
// Gather factors on x2, will be two mixture factors (with x1 and x3, resp.).
HybridGaussianFactorGraph factors;
factors.push_back(self.linearizedFactorGraph[1]); // involves m1
factors.push_back(self.linearizedFactorGraph[2]); // involves m2
// Eliminate x2
Ordering ordering;
ordering += X(2);
std::pair<HybridConditional::shared_ptr, HybridFactor::shared_ptr> result =
EliminateHybrid(factors, ordering);
CHECK(result.first);
EXPECT_LONGS_EQUAL(1, result.first->nrFrontals());
CHECK(result.second);
// Note: separator keys should include m1, m2.
EXPECT_LONGS_EQUAL(4, result.second->size());
}
/****************************************************************************
* Helper method to generate gaussian factor graphs with a specific mode.
*/
GaussianFactorGraph::shared_ptr batchGFG(double between,
Values linearizationPoint) {
NonlinearFactorGraph graph;
graph.addPrior<double>(X(1), 0, Isotropic::Sigma(1, 0.1));
auto between_x1_x2 = boost::make_shared<MotionModel>(
X(1), X(2), between, Isotropic::Sigma(1, 1.0));
graph.push_back(between_x1_x2);
return graph.linearize(linearizationPoint);
}
/****************************************************************************
* Test elimination function by eliminating x1 and x2 in graph.
*/
TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) {
Switching self(2, 1.0, 0.1);
auto factors = self.linearizedFactorGraph;
// Eliminate x1
Ordering ordering;
ordering += X(1);
ordering += X(2);
HybridConditional::shared_ptr hybridConditionalMixture;
HybridFactor::shared_ptr factorOnModes;
std::tie(hybridConditionalMixture, factorOnModes) =
EliminateHybrid(factors, ordering);
auto gaussianConditionalMixture =
dynamic_pointer_cast<GaussianMixture>(hybridConditionalMixture->inner());
CHECK(gaussianConditionalMixture);
// Frontals = [x1, x2]
EXPECT_LONGS_EQUAL(2, gaussianConditionalMixture->nrFrontals());
// 1 parent, which is the mode
EXPECT_LONGS_EQUAL(1, gaussianConditionalMixture->nrParents());
// This is now a HybridDiscreteFactor
auto hybridDiscreteFactor =
dynamic_pointer_cast<HybridDiscreteFactor>(factorOnModes);
// Access the type-erased inner object and convert to DecisionTreeFactor
auto discreteFactor =
dynamic_pointer_cast<DecisionTreeFactor>(hybridDiscreteFactor->inner());
CHECK(discreteFactor);
EXPECT_LONGS_EQUAL(1, discreteFactor->discreteKeys().size());
EXPECT(discreteFactor->root_->isLeaf() == false);
// TODO(Varun) Test emplace_discrete
}
/****************************************************************************
* Test partial elimination
*/
TEST(HybridFactorGraph, Partial_Elimination) {
Switching self(3);
auto linearizedFactorGraph = self.linearizedFactorGraph;
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= self.K; k++) ordering += X(k);
// Eliminate partially.
HybridBayesNet::shared_ptr hybridBayesNet;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
std::tie(hybridBayesNet, remainingFactorGraph) =
linearizedFactorGraph.eliminatePartialSequential(ordering);
CHECK(hybridBayesNet);
EXPECT_LONGS_EQUAL(3, hybridBayesNet->size());
EXPECT(hybridBayesNet->at(0)->frontals() == KeyVector{X(1)});
EXPECT(hybridBayesNet->at(0)->parents() == KeyVector({X(2), M(1)}));
EXPECT(hybridBayesNet->at(1)->frontals() == KeyVector{X(2)});
EXPECT(hybridBayesNet->at(1)->parents() == KeyVector({X(3), M(1), M(2)}));
EXPECT(hybridBayesNet->at(2)->frontals() == KeyVector{X(3)});
EXPECT(hybridBayesNet->at(2)->parents() == KeyVector({M(1), M(2)}));
CHECK(remainingFactorGraph);
EXPECT_LONGS_EQUAL(3, remainingFactorGraph->size());
EXPECT(remainingFactorGraph->at(0)->keys() == KeyVector({M(1)}));
EXPECT(remainingFactorGraph->at(1)->keys() == KeyVector({M(2), M(1)}));
EXPECT(remainingFactorGraph->at(2)->keys() == KeyVector({M(1), M(2)}));
}
/****************************************************************************
* Test full elimination
*/
TEST(HybridFactorGraph, Full_Elimination) {
Switching self(3);
auto linearizedFactorGraph = self.linearizedFactorGraph;
// We first do a partial elimination
HybridBayesNet::shared_ptr hybridBayesNet_partial;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph_partial;
DiscreteBayesNet discreteBayesNet;
{
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= self.K; k++) ordering += X(k);
// Eliminate partially.
std::tie(hybridBayesNet_partial, remainingFactorGraph_partial) =
linearizedFactorGraph.eliminatePartialSequential(ordering);
DiscreteFactorGraph discrete_fg;
// TODO(Varun) Make this a function of HybridGaussianFactorGraph?
for (HybridFactor::shared_ptr& factor : (*remainingFactorGraph_partial)) {
auto df = dynamic_pointer_cast<HybridDiscreteFactor>(factor);
discrete_fg.push_back(df->inner());
}
ordering.clear();
for (size_t k = 1; k < self.K; k++) ordering += M(k);
discreteBayesNet =
*discrete_fg.eliminateSequential(ordering, EliminateForMPE);
}
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= self.K; k++) ordering += X(k);
for (size_t k = 1; k < self.K; k++) ordering += M(k);
// Eliminate partially.
HybridBayesNet::shared_ptr hybridBayesNet =
linearizedFactorGraph.eliminateSequential(ordering);
CHECK(hybridBayesNet);
EXPECT_LONGS_EQUAL(5, hybridBayesNet->size());
// p(x1 | x2, m1)
EXPECT(hybridBayesNet->at(0)->frontals() == KeyVector{X(1)});
EXPECT(hybridBayesNet->at(0)->parents() == KeyVector({X(2), M(1)}));
// p(x2 | x3, m1, m2)
EXPECT(hybridBayesNet->at(1)->frontals() == KeyVector{X(2)});
EXPECT(hybridBayesNet->at(1)->parents() == KeyVector({X(3), M(1), M(2)}));
// p(x3 | m1, m2)
EXPECT(hybridBayesNet->at(2)->frontals() == KeyVector{X(3)});
EXPECT(hybridBayesNet->at(2)->parents() == KeyVector({M(1), M(2)}));
// P(m1 | m2)
EXPECT(hybridBayesNet->at(3)->frontals() == KeyVector{M(1)});
EXPECT(hybridBayesNet->at(3)->parents() == KeyVector({M(2)}));
EXPECT(
dynamic_pointer_cast<DiscreteConditional>(hybridBayesNet->at(3)->inner())
->equals(*discreteBayesNet.at(0)));
// P(m2)
EXPECT(hybridBayesNet->at(4)->frontals() == KeyVector{M(2)});
EXPECT_LONGS_EQUAL(0, hybridBayesNet->at(4)->nrParents());
EXPECT(
dynamic_pointer_cast<DiscreteConditional>(hybridBayesNet->at(4)->inner())
->equals(*discreteBayesNet.at(1)));
}
/****************************************************************************
* Test printing
*/
TEST(HybridFactorGraph, Printing) {
Switching self(3);
auto linearizedFactorGraph = self.linearizedFactorGraph;
// Create ordering.
Ordering ordering;
for (size_t k = 1; k <= self.K; k++) ordering += X(k);
// Eliminate partially.
HybridBayesNet::shared_ptr hybridBayesNet;
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
std::tie(hybridBayesNet, remainingFactorGraph) =
linearizedFactorGraph.eliminatePartialSequential(ordering);
string expected_hybridFactorGraph = R"(
size: 7
factor 0: Continuous [x1]
A[x1] = [
10
]
b = [ -10 ]
No noise model
factor 1: Hybrid [x1 x2; m1]{
Choice(m1)
0 Leaf :
A[x1] = [
-1
]
A[x2] = [
1
]
b = [ -1 ]
No noise model
1 Leaf :
A[x1] = [
-1
]
A[x2] = [
1
]
b = [ -0 ]
No noise model
}
factor 2: Hybrid [x2 x3; m2]{
Choice(m2)
0 Leaf :
A[x2] = [
-1
]
A[x3] = [
1
]
b = [ -1 ]
No noise model
1 Leaf :
A[x2] = [
-1
]
A[x3] = [
1
]
b = [ -0 ]
No noise model
}
factor 3: Continuous [x2]
A[x2] = [
10
]
b = [ -10 ]
No noise model
factor 4: Continuous [x3]
A[x3] = [
10
]
b = [ -10 ]
No noise model
factor 5: Discrete [m1]
P( m1 ):
Leaf 0.5
factor 6: Discrete [m2 m1]
P( m2 | m1 ):
Choice(m2)
0 Choice(m1)
0 0 Leaf 0.33333333
0 1 Leaf 0.6
1 Choice(m1)
1 0 Leaf 0.66666667
1 1 Leaf 0.4
)";
EXPECT(assert_print_equal(expected_hybridFactorGraph, linearizedFactorGraph));
// Expected output for hybridBayesNet.
string expected_hybridBayesNet = R"(
size: 3
factor 0: Hybrid P( x1 | x2 m1)
Discrete Keys = (m1, 2),
Choice(m1)
0 Leaf p(x1 | x2)
R = [ 10.0499 ]
S[x2] = [ -0.0995037 ]
d = [ -9.85087 ]
No noise model
1 Leaf p(x1 | x2)
R = [ 10.0499 ]
S[x2] = [ -0.0995037 ]
d = [ -9.95037 ]
No noise model
factor 1: Hybrid P( x2 | x3 m1 m2)
Discrete Keys = (m1, 2), (m2, 2),
Choice(m2)
0 Choice(m1)
0 0 Leaf p(x2 | x3)
R = [ 10.099 ]
S[x3] = [ -0.0990196 ]
d = [ -9.99901 ]
No noise model
0 1 Leaf p(x2 | x3)
R = [ 10.099 ]
S[x3] = [ -0.0990196 ]
d = [ -9.90098 ]
No noise model
1 Choice(m1)
1 0 Leaf p(x2 | x3)
R = [ 10.099 ]
S[x3] = [ -0.0990196 ]
d = [ -10.098 ]
No noise model
1 1 Leaf p(x2 | x3)
R = [ 10.099 ]
S[x3] = [ -0.0990196 ]
d = [ -10 ]
No noise model
factor 2: Hybrid P( x3 | m1 m2)
Discrete Keys = (m1, 2), (m2, 2),
Choice(m2)
0 Choice(m1)
0 0 Leaf p(x3)
R = [ 10.0494 ]
d = [ -10.1489 ]
No noise model
0 1 Leaf p(x3)
R = [ 10.0494 ]
d = [ -10.1479 ]
No noise model
1 Choice(m1)
1 0 Leaf p(x3)
R = [ 10.0494 ]
d = [ -10.0504 ]
No noise model
1 1 Leaf p(x3)
R = [ 10.0494 ]
d = [ -10.0494 ]
No noise model
)";
EXPECT(assert_print_equal(expected_hybridBayesNet, *hybridBayesNet));
}
/****************************************************************************
* Simple PlanarSLAM example test with 2 poses and 2 landmarks (each pose
* connects to 1 landmark) to expose issue with default decision tree creation
* in hybrid elimination. The hybrid factor is between the poses X0 and X1.
* The issue arises if we eliminate a landmark variable first since it is not
* connected to a HybridFactor.
*/
TEST(HybridFactorGraph, DefaultDecisionTree) {
HybridNonlinearFactorGraph fg;
// Add a prior on pose x1 at the origin.
// A prior factor consists of a mean and a noise model (covariance matrix)
Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin
auto priorNoise = noiseModel::Diagonal::Sigmas(
Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta
fg.emplace_nonlinear<PriorFactor<Pose2>>(X(0), prior, priorNoise);
using PlanarMotionModel = BetweenFactor<Pose2>;
// Add odometry factor
Pose2 odometry(2.0, 0.0, 0.0);
KeyVector contKeys = {X(0), X(1)};
auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0);
auto still = boost::make_shared<PlanarMotionModel>(X(0), X(1), Pose2(0, 0, 0),
noise_model),
moving = boost::make_shared<PlanarMotionModel>(X(0), X(1), odometry,
noise_model);
std::vector<PlanarMotionModel::shared_ptr> motion_models = {still, moving};
fg.emplace_hybrid<MixtureFactor>(
contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, motion_models);
// Add Range-Bearing measurements to from X0 to L0 and X1 to L1.
// create a noise model for the landmark measurements
auto measurementNoise = noiseModel::Diagonal::Sigmas(
Vector2(0.1, 0.2)); // 0.1 rad std on bearing, 20cm on range
// create the measurement values - indices are (pose id, landmark id)
Rot2 bearing11 = Rot2::fromDegrees(45), bearing22 = Rot2::fromDegrees(90);
double range11 = std::sqrt(4.0 + 4.0), range22 = 2.0;
// Add Bearing-Range factors
fg.emplace_nonlinear<BearingRangeFactor<Pose2, Point2>>(
X(0), L(0), bearing11, range11, measurementNoise);
fg.emplace_nonlinear<BearingRangeFactor<Pose2, Point2>>(
X(1), L(1), bearing22, range22, measurementNoise);
// Create (deliberately inaccurate) initial estimate
Values initialEstimate;
initialEstimate.insert(X(0), Pose2(0.5, 0.0, 0.2));
initialEstimate.insert(X(1), Pose2(2.3, 0.1, -0.2));
initialEstimate.insert(L(0), Point2(1.8, 2.1));
initialEstimate.insert(L(1), Point2(4.1, 1.8));
// We want to eliminate variables not connected to DCFactors first.
Ordering ordering;
ordering += L(0);
ordering += L(1);
ordering += X(0);
ordering += X(1);
HybridGaussianFactorGraph linearized = fg.linearize(initialEstimate);
gtsam::HybridBayesNet::shared_ptr hybridBayesNet;
gtsam::HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
// This should NOT fail
std::tie(hybridBayesNet, remainingFactorGraph) =
linearized.eliminatePartialSequential(ordering);
EXPECT_LONGS_EQUAL(4, hybridBayesNet->size());
EXPECT_LONGS_EQUAL(1, remainingFactorGraph->size());
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -0,0 +1,58 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testHybridValues.cpp
* @date Jul 28, 2022
* @author Shangjie Xue
*/
#include <gtsam/base/Testable.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/discrete/Assignment.h>
#include <gtsam/discrete/DiscreteKey.h>
#include <gtsam/discrete/DiscreteValues.h>
#include <gtsam/hybrid/HybridValues.h>
#include <gtsam/inference/Key.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/nonlinear/Values.h>
// Include for test suite
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
TEST(HybridValues, basics) {
HybridValues values;
values.insert(99, Vector2(2, 3));
values.insert(100, 3);
EXPECT(assert_equal(values.at(99), Vector2(2, 3)));
EXPECT(assert_equal(values.atDiscrete(100), int(3)));
values.print();
HybridValues values2;
values2.insert(100, 3);
values2.insert(99, Vector2(2, 3));
EXPECT(assert_equal(values2, values));
values2.insert(98, Vector2(2, 3));
EXPECT(!assert_equal(values2, values));
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -116,7 +116,7 @@ class FactorGraph {
using HasDerivedValueType = typename std::enable_if<
std::is_base_of<FactorType, typename T::value_type>::value>::type;
/// Check if T has a value_type derived from FactorType.
/// Check if T has a pointer type derived from FactorType.
template <typename T>
using HasDerivedElementType = typename std::enable_if<std::is_base_of<
FactorType, typename T::value_type::element_type>::value>::type;

View File

@ -33,7 +33,7 @@ struct ConstructorTraversalData {
typedef typename JunctionTree<BAYESTREE, GRAPH>::sharedNode sharedNode;
ConstructorTraversalData* const parentData;
sharedNode myJTNode;
sharedNode junctionTreeNode;
FastVector<SymbolicConditional::shared_ptr> childSymbolicConditionals;
FastVector<SymbolicFactor::shared_ptr> childSymbolicFactors;
@ -53,8 +53,9 @@ struct ConstructorTraversalData {
// a traversal data structure with its own JT node, and create a child
// pointer in its parent.
ConstructorTraversalData myData = ConstructorTraversalData(&parentData);
myData.myJTNode = boost::make_shared<Node>(node->key, node->factors);
parentData.myJTNode->addChild(myData.myJTNode);
myData.junctionTreeNode =
boost::make_shared<Node>(node->key, node->factors);
parentData.junctionTreeNode->addChild(myData.junctionTreeNode);
return myData;
}
@ -91,7 +92,7 @@ struct ConstructorTraversalData {
myData.parentData->childSymbolicConditionals.push_back(myConditional);
myData.parentData->childSymbolicFactors.push_back(mySeparatorFactor);
sharedNode node = myData.myJTNode;
sharedNode node = myData.junctionTreeNode;
const FastVector<SymbolicConditional::shared_ptr>& childConditionals =
myData.childSymbolicConditionals;
node->problemSize_ = (int) (myConditional->size() * symbolicFactors.size());
@ -138,14 +139,14 @@ JunctionTree<BAYESTREE, GRAPH>::JunctionTree(
typedef typename EliminationTree<ETREE_BAYESNET, ETREE_GRAPH>::Node ETreeNode;
typedef ConstructorTraversalData<BAYESTREE, GRAPH, ETreeNode> Data;
Data rootData(0);
rootData.myJTNode = boost::make_shared<typename Base::Node>(); // Make a dummy node to gather
// the junction tree roots
// Make a dummy node to gather the junction tree roots
rootData.junctionTreeNode = boost::make_shared<typename Base::Node>();
treeTraversal::DepthFirstForest(eliminationTree, rootData,
Data::ConstructorTraversalVisitorPre,
Data::ConstructorTraversalVisitorPostAlg2);
// Assign roots from the dummy node
this->addChildrenAsRoots(rootData.myJTNode);
this->addChildrenAsRoots(rootData.junctionTreeNode);
// Transfer remaining factors from elimination tree
Base::remainingFactors_ = eliminationTree.remainingFactors();

View File

@ -10,7 +10,7 @@
* -------------------------------------------------------------------------- */
/**
* @file SymbolicISAM.h
* @file GaussianISAM.h
* @date July 29, 2013
* @author Frank Dellaert
* @author Richard Roberts

View File

@ -198,6 +198,33 @@ TEST (Serialization, gaussian_factor_graph) {
EXPECT(equalsBinary(graph));
}
/* ****************************************************************************/
TEST(Serialization, gaussian_bayes_net) {
// Create an arbitrary Bayes Net
GaussianBayesNet gbn;
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
0, Vector2(1.0, 2.0), (Matrix2() << 3.0, 4.0, 0.0, 6.0).finished(), 3,
(Matrix2() << 7.0, 8.0, 9.0, 10.0).finished(), 4,
(Matrix2() << 11.0, 12.0, 13.0, 14.0).finished()));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
1, Vector2(15.0, 16.0), (Matrix2() << 17.0, 18.0, 0.0, 20.0).finished(),
2, (Matrix2() << 21.0, 22.0, 23.0, 24.0).finished(), 4,
(Matrix2() << 25.0, 26.0, 27.0, 28.0).finished()));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
2, Vector2(29.0, 30.0), (Matrix2() << 31.0, 32.0, 0.0, 34.0).finished(),
3, (Matrix2() << 35.0, 36.0, 37.0, 38.0).finished()));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
3, Vector2(39.0, 40.0), (Matrix2() << 41.0, 42.0, 0.0, 44.0).finished(),
4, (Matrix2() << 45.0, 46.0, 47.0, 48.0).finished()));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
4, Vector2(49.0, 50.0), (Matrix2() << 51.0, 52.0, 0.0, 54.0).finished()));
std::string serialized = serialize(gbn);
GaussianBayesNet actual;
deserialize(serialized, actual);
EXPECT(assert_equal(gbn, actual));
}
/* ************************************************************************* */
TEST (Serialization, gaussian_bayes_tree) {
const Key x1=1, x2=2, x3=3, x4=4;

View File

@ -93,9 +93,14 @@ void PreintegratedCombinedMeasurements::resetIntegration() {
//------------------------------------------------------------------------------
void PreintegratedCombinedMeasurements::integrateMeasurement(
const Vector3& measuredAcc, const Vector3& measuredOmega, double dt) {
if (dt <= 0) {
throw std::runtime_error(
"PreintegratedCombinedMeasurements::integrateMeasurement: dt <=0");
}
// Update preintegrated measurements.
Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx)
Matrix93 B, C;
Matrix9 A; // Jacobian wrt preintegrated measurements without bias (df/dx)
Matrix93 B, C; // Jacobian of state wrpt accel bias and omega bias respectively.
PreintegrationType::update(measuredAcc, measuredOmega, dt, &A, &B, &C);
// Update preintegrated measurements covariance: as in [2] we consider a first
@ -105,47 +110,78 @@ void PreintegratedCombinedMeasurements::integrateMeasurement(
// and preintegrated measurements
// Single Jacobians to propagate covariance
// TODO(frank): should we not also account for bias on position?
Matrix3 theta_H_biasOmega = -C.topRows<3>();
Matrix3 vel_H_biasAcc = -B.bottomRows<3>();
Matrix3 theta_H_biasOmega = C.topRows<3>();
Matrix3 pos_H_biasAcc = B.middleRows<3>(3);
Matrix3 vel_H_biasAcc = B.bottomRows<3>();
Matrix3 theta_H_biasOmegaInit = -theta_H_biasOmega;
Matrix3 pos_H_biasAccInit = -pos_H_biasAcc;
Matrix3 vel_H_biasAccInit = -vel_H_biasAcc;
// overall Jacobian wrt preintegrated measurements (df/dx)
Eigen::Matrix<double, 15, 15> F;
F.setZero();
F.block<9, 9>(0, 0) = A;
F.block<3, 3>(0, 12) = theta_H_biasOmega;
F.block<3, 3>(3, 9) = pos_H_biasAcc;
F.block<3, 3>(6, 9) = vel_H_biasAcc;
F.block<6, 6>(9, 9) = I_6x6;
// Update the uncertainty on the state (matrix F in [4]).
preintMeasCov_ = F * preintMeasCov_ * F.transpose();
// propagate uncertainty
// TODO(frank): use noiseModel routine so we can have arbitrary noise models.
const Matrix3& aCov = p().accelerometerCovariance;
const Matrix3& wCov = p().gyroscopeCovariance;
const Matrix3& iCov = p().integrationCovariance;
const Matrix6& bInitCov = p().biasAccOmegaInt;
// first order uncertainty propagation
// Optimized matrix multiplication (1/dt) * G * measurementCovariance *
// G.transpose()
// Optimized matrix mult: (1/dt) * G * measurementCovariance * G.transpose()
Eigen::Matrix<double, 15, 15> G_measCov_Gt;
G_measCov_Gt.setZero(15, 15);
const Matrix3& bInitCov11 = bInitCov.block<3, 3>(0, 0) / dt;
const Matrix3& bInitCov12 = bInitCov.block<3, 3>(0, 3) / dt;
const Matrix3& bInitCov21 = bInitCov.block<3, 3>(3, 0) / dt;
const Matrix3& bInitCov22 = bInitCov.block<3, 3>(3, 3) / dt;
// BLOCK DIAGONAL TERMS
D_t_t(&G_measCov_Gt) = dt * iCov;
D_v_v(&G_measCov_Gt) = (1 / dt) * vel_H_biasAcc
* (aCov + p().biasAccOmegaInt.block<3, 3>(0, 0))
* (vel_H_biasAcc.transpose());
D_R_R(&G_measCov_Gt) = (1 / dt) * theta_H_biasOmega
* (wCov + p().biasAccOmegaInt.block<3, 3>(3, 3))
* (theta_H_biasOmega.transpose());
D_R_R(&G_measCov_Gt) =
(theta_H_biasOmega * (wCov / dt) * theta_H_biasOmega.transpose()) //
+
(theta_H_biasOmegaInit * bInitCov22 * theta_H_biasOmegaInit.transpose());
D_t_t(&G_measCov_Gt) =
(pos_H_biasAcc * (aCov / dt) * pos_H_biasAcc.transpose()) //
+ (pos_H_biasAccInit * bInitCov11 * pos_H_biasAccInit.transpose()) //
+ (dt * iCov);
D_v_v(&G_measCov_Gt) =
(vel_H_biasAcc * (aCov / dt) * vel_H_biasAcc.transpose()) //
+ (vel_H_biasAccInit * bInitCov11 * vel_H_biasAccInit.transpose());
D_a_a(&G_measCov_Gt) = dt * p().biasAccCovariance;
D_g_g(&G_measCov_Gt) = dt * p().biasOmegaCovariance;
// OFF BLOCK DIAGONAL TERMS
Matrix3 temp = vel_H_biasAcc * p().biasAccOmegaInt.block<3, 3>(3, 0)
* theta_H_biasOmega.transpose();
D_v_R(&G_measCov_Gt) = temp;
D_R_v(&G_measCov_Gt) = temp.transpose();
preintMeasCov_ = F * preintMeasCov_ * F.transpose() + G_measCov_Gt;
D_R_t(&G_measCov_Gt) =
theta_H_biasOmegaInit * bInitCov21 * pos_H_biasAccInit.transpose();
D_R_v(&G_measCov_Gt) =
theta_H_biasOmegaInit * bInitCov21 * vel_H_biasAccInit.transpose();
D_t_R(&G_measCov_Gt) =
pos_H_biasAccInit * bInitCov12 * theta_H_biasOmegaInit.transpose();
D_t_v(&G_measCov_Gt) =
(pos_H_biasAcc * (aCov / dt) * vel_H_biasAcc.transpose()) +
(pos_H_biasAccInit * bInitCov11 * vel_H_biasAccInit.transpose());
D_v_R(&G_measCov_Gt) =
vel_H_biasAccInit * bInitCov12 * theta_H_biasOmegaInit.transpose();
D_v_t(&G_measCov_Gt) =
(vel_H_biasAcc * (aCov / dt) * pos_H_biasAcc.transpose()) +
(vel_H_biasAccInit * bInitCov11 * pos_H_biasAccInit.transpose());
preintMeasCov_.noalias() += G_measCov_Gt;
}
//------------------------------------------------------------------------------
@ -253,6 +289,5 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) {
os << " noise model sigmas: " << f.noiseModel_->sigmas().transpose();
return os;
}
}
/// namespace gtsam
} // namespace gtsam

View File

@ -51,6 +51,7 @@ typedef ManifoldPreintegration PreintegrationType;
* TRO, 28(1):61-76, 2012.
* [3] L. Carlone, S. Williams, R. Roberts, "Preintegrated IMU factor:
* Computation of the Jacobian Matrices", Tech. Report, 2013.
* Available in this repo as "PreintegratedIMUJacobians.pdf".
* [4] C. Forster, L. Carlone, F. Dellaert, D. Scaramuzza, IMU Preintegration on
* Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation,
* Robotics: Science and Systems (RSS), 2015.
@ -61,7 +62,7 @@ typedef ManifoldPreintegration PreintegrationType;
struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams {
Matrix3 biasAccCovariance; ///< continuous-time "Covariance" describing accelerometer bias random walk
Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk
Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration
Matrix6 biasAccOmegaInt; ///< covariance of bias used as initial estimate.
/// Default constructor makes uninitialized params struct.
/// Used for serialization.
@ -92,11 +93,11 @@ struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams {
void setBiasAccCovariance(const Matrix3& cov) { biasAccCovariance=cov; }
void setBiasOmegaCovariance(const Matrix3& cov) { biasOmegaCovariance=cov; }
void setBiasAccOmegaInt(const Matrix6& cov) { biasAccOmegaInt=cov; }
void setBiasAccOmegaInit(const Matrix6& cov) { biasAccOmegaInt=cov; }
const Matrix3& getBiasAccCovariance() const { return biasAccCovariance; }
const Matrix3& getBiasOmegaCovariance() const { return biasOmegaCovariance; }
const Matrix6& getBiasAccOmegaInt() const { return biasAccOmegaInt; }
const Matrix6& getBiasAccOmegaInit() const { return biasAccOmegaInt; }
private:

View File

@ -105,7 +105,7 @@ public:
/// @{
/** identity for group operation */
static ConstantBias identity() {
static ConstantBias Identity() {
return ConstantBias();
}

View File

@ -59,7 +59,7 @@ void PreintegratedImuMeasurements::integrateMeasurement(
// Update preintegrated measurements (also get Jacobian)
Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx)
Matrix93 B, C;
Matrix93 B, C; // Jacobian of state wrpt accel bias and omega bias respectively.
PreintegrationType::update(measuredAcc, measuredOmega, dt, &A, &B, &C);
// first order covariance propagation:
@ -73,11 +73,13 @@ void PreintegratedImuMeasurements::integrateMeasurement(
const Matrix3& iCov = p().integrationCovariance;
// (1/dt) allows to pass from continuous time noise to discrete time noise
// Update the uncertainty on the state (matrix A in [4]).
preintMeasCov_ = A * preintMeasCov_ * A.transpose();
// These 2 updates account for uncertainty on the IMU measurement (matrix B in [4]).
preintMeasCov_.noalias() += B * (aCov / dt) * B.transpose();
preintMeasCov_.noalias() += C * (wCov / dt) * C.transpose();
// NOTE(frank): (Gi*dt)*(C/dt)*(Gi'*dt), with Gi << Z_3x3, I_3x3, Z_3x3
// NOTE(frank): (Gi*dt)*(C/dt)*(Gi'*dt), with Gi << Z_3x3, I_3x3, Z_3x3 (9x3 matrix)
preintMeasCov_.block<3, 3>(3, 3).noalias() += iCov * dt;
}

View File

@ -53,6 +53,7 @@ typedef ManifoldPreintegration PreintegrationType;
* TRO, 28(1):61-76, 2012.
* [3] L. Carlone, S. Williams, R. Roberts, "Preintegrated IMU factor:
* Computation of the Jacobian Matrices", Tech. Report, 2013.
* Available in this repo as "PreintegratedIMUJacobians.pdf".
* [4] C. Forster, L. Carlone, F. Dellaert, D. Scaramuzza, "IMU Preintegration on
* Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation",
* Robotics: Science and Systems (RSS), 2015.

View File

@ -157,9 +157,9 @@ Vector9 PreintegrationBase::computeError(const NavState& state_i,
state_j.localCoordinates(predictedState_j, H2 ? &D_error_state_j : 0,
H1 || H3 ? &D_error_predict : 0);
if (H1) *H1 << D_error_predict* D_predict_state_i;
if (H1) *H1 << D_error_predict * D_predict_state_i;
if (H2) *H2 << D_error_state_j;
if (H3) *H3 << D_error_predict* D_predict_bias_i;
if (H3) *H3 << D_error_predict * D_predict_bias_i;
return error;
}

View File

@ -15,8 +15,8 @@
* @author Frank Dellaert
*/
#include <gtsam/navigation/ScenarioRunner.h>
#include <gtsam/base/timing.h>
#include <gtsam/navigation/ScenarioRunner.h>
#include <boost/assign.hpp>
#include <cmath>
@ -105,4 +105,62 @@ Matrix6 ScenarioRunner::estimateNoiseCovariance(size_t N) const {
return Q / (N - 1);
}
PreintegratedCombinedMeasurements CombinedScenarioRunner::integrate(
double T, const Bias& estimatedBias, bool corrupted) const {
gttic_(integrate);
PreintegratedCombinedMeasurements pim(p_, estimatedBias);
const double dt = imuSampleTime();
const size_t nrSteps = T / dt;
double t = 0;
for (size_t k = 0; k < nrSteps; k++, t += dt) {
Vector3 measuredOmega =
corrupted ? measuredAngularVelocity(t) : actualAngularVelocity(t);
Vector3 measuredAcc =
corrupted ? measuredSpecificForce(t) : actualSpecificForce(t);
pim.integrateMeasurement(measuredAcc, measuredOmega, dt);
}
return pim;
}
NavState CombinedScenarioRunner::predict(
const PreintegratedCombinedMeasurements& pim,
const Bias& estimatedBias) const {
const NavState state_i(scenario().pose(0), scenario().velocity_n(0));
return pim.predict(state_i, estimatedBias);
}
Eigen::Matrix<double, 15, 15> CombinedScenarioRunner::estimateCovariance(
double T, size_t N, const Bias& estimatedBias) const {
gttic_(estimateCovariance);
// Get predict prediction from ground truth measurements
NavState prediction = predict(integrate(T));
// Sample !
Matrix samples(15, N);
Vector15 sum = Vector15::Zero();
for (size_t i = 0; i < N; i++) {
auto pim = integrate(T, estimatedBias, true);
NavState sampled = predict(pim);
Vector15 xi = Vector15::Zero();
xi << sampled.localCoordinates(prediction),
(estimatedBias_.vector() - estimatedBias.vector());
samples.col(i) = xi;
sum += xi;
}
// Compute MC covariance
Vector15 sampleMean = sum / N;
Eigen::Matrix<double, 15, 15> Q;
Q.setZero();
for (size_t i = 0; i < N; i++) {
Vector15 xi = samples.col(i) - sampleMean;
Q += xi * xi.transpose();
}
return Q / (N - 1);
}
} // namespace gtsam

View File

@ -16,9 +16,10 @@
*/
#pragma once
#include <gtsam/linear/Sampler.h>
#include <gtsam/navigation/CombinedImuFactor.h>
#include <gtsam/navigation/ImuFactor.h>
#include <gtsam/navigation/Scenario.h>
#include <gtsam/linear/Sampler.h>
namespace gtsam {
@ -66,10 +67,10 @@ class GTSAM_EXPORT ScenarioRunner {
// also, uses g=10 for easy debugging
const Vector3& gravity_n() const { return p_->n_gravity; }
const Scenario& scenario() const { return scenario_; }
// A gyro simply measures angular velocity in body frame
Vector3 actualAngularVelocity(double t) const {
return scenario_.omega_b(t);
}
Vector3 actualAngularVelocity(double t) const { return scenario_.omega_b(t); }
// An accelerometer measures acceleration in body, but not gravity
Vector3 actualSpecificForce(double t) const {
@ -106,4 +107,39 @@ class GTSAM_EXPORT ScenarioRunner {
Matrix6 estimateNoiseCovariance(size_t N = 1000) const;
};
/*
* Simple class to test navigation scenarios with CombinedImuMeasurements.
* Takes a trajectory scenario as input, and can generate IMU measurements
*/
class GTSAM_EXPORT CombinedScenarioRunner : public ScenarioRunner {
public:
typedef boost::shared_ptr<PreintegrationCombinedParams> SharedParams;
private:
const SharedParams p_;
const Bias estimatedBias_;
public:
CombinedScenarioRunner(const Scenario& scenario, const SharedParams& p,
double imuSampleTime = 1.0 / 100.0,
const Bias& bias = Bias())
: ScenarioRunner(scenario, static_cast<ScenarioRunner::SharedParams>(p),
imuSampleTime, bias),
p_(p),
estimatedBias_(bias) {}
/// Integrate measurements for T seconds into a PIM
PreintegratedCombinedMeasurements integrate(
double T, const Bias& estimatedBias = Bias(),
bool corrupted = false) const;
/// Predict predict given a PIM
NavState predict(const PreintegratedCombinedMeasurements& pim,
const Bias& estimatedBias = Bias()) const;
/// Compute a Monte Carlo estimate of the predict covariance using N samples
Eigen::Matrix<double, 15, 15> estimateCovariance(
double T, size_t N = 1000, const Bias& estimatedBias = Bias()) const;
};
} // namespace gtsam

View File

@ -17,7 +17,7 @@ class ConstantBias {
bool equals(const gtsam::imuBias::ConstantBias& expected, double tol) const;
// Group
static gtsam::imuBias::ConstantBias identity();
static gtsam::imuBias::ConstantBias Identity();
// Operator Overloads
gtsam::imuBias::ConstantBias operator-() const;
@ -165,11 +165,11 @@ virtual class PreintegrationCombinedParams : gtsam::PreintegrationParams {
void setBiasAccCovariance(Matrix cov);
void setBiasOmegaCovariance(Matrix cov);
void setBiasAccOmegaInt(Matrix cov);
void setBiasAccOmegaInit(Matrix cov);
Matrix getBiasAccCovariance() const ;
Matrix getBiasOmegaCovariance() const ;
Matrix getBiasAccOmegaInt() const;
Matrix getBiasAccOmegaInit() const;
};

View File

@ -16,18 +16,19 @@
* @author Frank Dellaert
* @author Richard Roberts
* @author Stephen Williams
* @author Varun Agrawal
*/
#include <gtsam/navigation/ImuFactor.h>
#include <gtsam/navigation/CombinedImuFactor.h>
#include <gtsam/navigation/ImuBias.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/inference/Symbol.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/base/numericalDerivative.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/navigation/CombinedImuFactor.h>
#include <gtsam/navigation/ImuBias.h>
#include <gtsam/navigation/ImuFactor.h>
#include <gtsam/navigation/ScenarioRunner.h>
#include <gtsam/nonlinear/Values.h>
#include <list>
@ -40,12 +41,15 @@ static boost::shared_ptr<PreintegratedCombinedMeasurements::Params> Params() {
p->gyroscopeCovariance = kGyroSigma * kGyroSigma * I_3x3;
p->accelerometerCovariance = kAccelSigma * kAccelSigma * I_3x3;
p->integrationCovariance = 0.0001 * I_3x3;
p->biasAccCovariance = Z_3x3;
p->biasOmegaCovariance = Z_3x3;
p->biasAccOmegaInt = Z_6x6;
return p;
}
}
} // namespace testing
/* ************************************************************************* */
TEST( CombinedImuFactor, PreintegratedMeasurements ) {
TEST(CombinedImuFactor, PreintegratedMeasurements ) {
// Linearization point
Bias bias(Vector3(0, 0, 0), Vector3(0, 0, 0)); ///< Current estimate of acceleration and angular rate biases
@ -71,8 +75,9 @@ TEST( CombinedImuFactor, PreintegratedMeasurements ) {
DOUBLES_EQUAL(expected1.deltaTij(), actual1.deltaTij(), tol);
}
/* ************************************************************************* */
TEST( CombinedImuFactor, ErrorWithBiases ) {
TEST(CombinedImuFactor, ErrorWithBiases ) {
Bias bias(Vector3(0.2, 0, 0), Vector3(0, 0, 0.3)); // Biases (acc, rot)
Bias bias2(Vector3(0.2, 0.2, 0), Vector3(1, 0, 0.3)); // Biases (acc, rot)
Pose3 x1(Rot3::Expmap(Vector3(0, 0, M_PI / 4.0)), Point3(5.0, 1.0, -50.0));
@ -203,6 +208,114 @@ TEST(CombinedImuFactor, PredictRotation) {
EXPECT(assert_equal(expectedPose, actual.pose(), tol));
}
/* ************************************************************************* */
// Testing covariance to check if all the jacobians are accounted for.
TEST(CombinedImuFactor, CheckCovariance) {
auto params = PreintegrationCombinedParams::MakeSharedU(9.81);
params->setAccelerometerCovariance(pow(0.01, 2) * I_3x3);
params->setGyroscopeCovariance(pow(1.75e-4, 2) * I_3x3);
params->setIntegrationCovariance(pow(0.0, 2) * I_3x3);
params->setOmegaCoriolis(Vector3::Zero());
imuBias::ConstantBias currentBias;
PreintegratedCombinedMeasurements actual(params, currentBias);
// Measurements
Vector3 measuredAcc(0.1577, -0.8251, 9.6111);
Vector3 measuredOmega(-0.0210, 0.0311, 0.0145);
double deltaT = 0.01;
actual.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
Eigen::Matrix<double, 15, 15> expected;
expected << 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //
0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //
0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 2.50025e-07, 0, 0, 5.0005e-05, 0, 0, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 0, 2.50025e-07, 0, 0, 5.0005e-05, 0, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 0, 0, 2.50025e-07, 0, 0, 5.0005e-05, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 5.0005e-05, 0, 0, 0.010001, 0, 0, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 0, 5.0005e-05, 0, 0, 0.010001, 0, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 0, 0, 5.0005e-05, 0, 0, 0.010001, 0, 0, 0, 0, 0, 0, //
0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, //
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, //
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, //
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, //
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, //
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01;
// regression
EXPECT(assert_equal(expected, actual.preintMeasCov()));
}
// Test that the covariance values for the ImuFactor and the CombinedImuFactor
// (top-left 9x9) are the same
TEST(CombinedImuFactor, SameCovariance) {
// IMU measurements and time delta
Vector3 accMeas(0.1577, -0.8251, 9.6111);
Vector3 omegaMeas(-0.0210, 0.0311, 0.0145);
double deltaT = 0.01;
// Assume zero bias
imuBias::ConstantBias currentBias;
// Define params for ImuFactor
auto params = PreintegrationParams::MakeSharedU();
params->setAccelerometerCovariance(pow(0.01, 2) * I_3x3);
params->setGyroscopeCovariance(pow(1.75e-4, 2) * I_3x3);
params->setIntegrationCovariance(pow(0, 2) * I_3x3);
params->setOmegaCoriolis(Vector3::Zero());
// The IMU preintegration object for ImuFactor
PreintegratedImuMeasurements pim(params, currentBias);
pim.integrateMeasurement(accMeas, omegaMeas, deltaT);
// Define params for CombinedImuFactor
auto combined_params = PreintegrationCombinedParams::MakeSharedU();
combined_params->setAccelerometerCovariance(pow(0.01, 2) * I_3x3);
combined_params->setGyroscopeCovariance(pow(1.75e-4, 2) * I_3x3);
// Set bias integration covariance explicitly to zero
combined_params->setIntegrationCovariance(Z_3x3);
combined_params->setOmegaCoriolis(Z_3x1);
// Set bias initial covariance explicitly to zero
combined_params->setBiasAccOmegaInit(Z_6x6);
// The IMU preintegration object for CombinedImuFactor
PreintegratedCombinedMeasurements cpim(combined_params, currentBias);
cpim.integrateMeasurement(accMeas, omegaMeas, deltaT);
// Assert if the noise covariance
EXPECT(assert_equal(pim.preintMeasCov(),
cpim.preintMeasCov().block(0, 0, 9, 9)));
}
/* ************************************************************************* */
TEST(CombinedImuFactor, Accelerating) {
const double a = 0.2, v = 50;
// Set up body pointing towards y axis, and start at 10,20,0 with velocity
// going in X The body itself has Z axis pointing down
const Rot3 nRb(Point3(0, 1, 0), Point3(1, 0, 0), Point3(0, 0, -1));
const Point3 initial_position(10, 20, 0);
const Vector3 initial_velocity(v, 0, 0);
const AcceleratingScenario scenario(nRb, initial_position, initial_velocity,
Vector3(a, 0, 0));
const double T = 3.0; // seconds
CombinedScenarioRunner runner(scenario, testing::Params(), T / 10);
PreintegratedCombinedMeasurements pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
auto estimatedCov = runner.estimateCovariance(T, 100);
Eigen::Matrix<double, 15, 15> expected = pim.preintMeasCov();
EXPECT(assert_equal(estimatedCov, expected, 0.1));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -19,8 +19,6 @@
#include <gtsam/nonlinear/NonlinearFactor.h>
using namespace gtsam;
namespace gtsam {
using JacobianVector = std::vector<Matrix>;

View File

@ -42,7 +42,7 @@ static double Chi2inv(const double alpha, const size_t dofs) {
/* ************************************************************************* */
template<class GncParameters>
class GTSAM_EXPORT GncOptimizer {
class GncOptimizer {
public:
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
typedef typename GncParameters::OptimizerType BaseOptimizer;

Some files were not shown because too many files have changed in this diff Show More