Merge branch 'develop' into hybrid/hybrid-factor-graph
commit
f124cccab0
|
@ -43,46 +43,68 @@ if [ -z ${PYTHON_VERSION+x} ]; then
|
|||
exit 127
|
||||
fi
|
||||
|
||||
PYTHON="python${PYTHON_VERSION}"
|
||||
export PYTHON="python${PYTHON_VERSION}"
|
||||
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
function install_dependencies()
|
||||
{
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
brew install wget
|
||||
else
|
||||
else
|
||||
# Install a system package required by our library
|
||||
sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
|
||||
fi
|
||||
fi
|
||||
|
||||
PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
export PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
|
||||
$PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
}
|
||||
|
||||
function build()
|
||||
{
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
|
||||
BUILD_PYBIND="ON"
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
|
||||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
BUILD_PYBIND="ON"
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
|
||||
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON -m pip install --user .
|
||||
}
|
||||
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
function test()
|
||||
{
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
}
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
|
||||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON -m pip install --user .
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
# select between build or test
|
||||
case $1 in
|
||||
-d)
|
||||
install_dependencies
|
||||
;;
|
||||
-b)
|
||||
build
|
||||
;;
|
||||
-t)
|
||||
test
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -19,16 +19,16 @@ jobs:
|
|||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
macOS-10.15-xcode-11.3.1,
|
||||
macos-11-xcode-13.4.1,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
- name: macOS-10.15-xcode-11.3.1
|
||||
os: macOS-10.15
|
||||
- name: macos-11-xcode-13.4.1
|
||||
os: macos-11
|
||||
compiler: xcode
|
||||
version: "11.3.1"
|
||||
version: "13.4.1"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
@ -43,7 +43,7 @@ jobs:
|
|||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
sudo xcode-select -switch /Applications/Xcode.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
ubuntu-18.04-gcc-5,
|
||||
ubuntu-18.04-gcc-9,
|
||||
ubuntu-18.04-clang-9,
|
||||
macOS-10.15-xcode-11.3.1,
|
||||
macOS-11-xcode-13.4.1,
|
||||
ubuntu-18.04-gcc-5-tbb,
|
||||
]
|
||||
|
||||
|
@ -52,10 +52,10 @@ jobs:
|
|||
build_type: Debug
|
||||
python_version: "3"
|
||||
|
||||
- name: macOS-10.15-xcode-11.3.1
|
||||
os: macOS-10.15
|
||||
- name: macOS-11-xcode-13.4.1
|
||||
os: macOS-11
|
||||
compiler: xcode
|
||||
version: "11.3.1"
|
||||
version: "13.4.1"
|
||||
|
||||
- name: ubuntu-18.04-gcc-5-tbb
|
||||
os: ubuntu-18.04
|
||||
|
@ -103,7 +103,7 @@ jobs:
|
|||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
sudo xcode-select -switch /Applications/Xcode.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
|
@ -112,11 +112,17 @@ jobs:
|
|||
run: |
|
||||
echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM Uses TBB"
|
||||
- name: Build (Linux)
|
||||
- name: Set Swap Space
|
||||
if: runner.os == 'Linux'
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 6
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
bash .github/scripts/python.sh
|
||||
- name: Build (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
bash .github/scripts/python.sh -d
|
||||
- name: Build
|
||||
run: |
|
||||
bash .github/scripts/python.sh
|
||||
bash .github/scripts/python.sh -b
|
||||
- name: Test
|
||||
run: |
|
||||
bash .github/scripts/python.sh -t
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -33,6 +33,7 @@ print_build_options_for_target(gtsam)
|
|||
|
||||
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||
print_config("Use System Metis" "${GTSAM_USE_SYSTEM_METIS}")
|
||||
print_config("Using Boost version" "${Boost_VERSION}")
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TriangulationLOSTExample.cpp
|
||||
* @author Akshay Krishnan
|
||||
* @brief This example runs triangulation several times using 3 different
|
||||
* approaches: LOST, DLT, and DLT with optimization. It reports the covariance
|
||||
* and the runtime for each approach.
|
||||
*
|
||||
* @date 2022-07-10
|
||||
*/
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static std::mt19937 rng(42);
|
||||
|
||||
void PrintCovarianceStats(const Matrix& mat, const std::string& method) {
|
||||
Matrix centered = mat.rowwise() - mat.colwise().mean();
|
||||
Matrix cov = (centered.adjoint() * centered) / double(mat.rows() - 1);
|
||||
std::cout << method << " covariance: " << std::endl;
|
||||
std::cout << cov << std::endl;
|
||||
std::cout << "Trace sqrt: " << sqrt(cov.trace()) << std::endl << std::endl;
|
||||
}
|
||||
|
||||
void PrintDuration(const std::chrono::nanoseconds dur, double num_samples,
|
||||
const std::string& method) {
|
||||
double nanoseconds = dur.count() / num_samples;
|
||||
std::cout << "Time taken by " << method << ": " << nanoseconds * 1e-3
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
void GetLargeCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
|
||||
std::vector<Pose3>* poses, Point3* point,
|
||||
Point2Vector* measurements) {
|
||||
const double minXY = -10, maxXY = 10;
|
||||
const double minZ = -20, maxZ = 0;
|
||||
const int nrCameras = 500;
|
||||
cameras->reserve(nrCameras);
|
||||
poses->reserve(nrCameras);
|
||||
measurements->reserve(nrCameras);
|
||||
*point = Point3(0.0, 0.0, 10.0);
|
||||
|
||||
std::uniform_real_distribution<double> rand_xy(minXY, maxXY);
|
||||
std::uniform_real_distribution<double> rand_z(minZ, maxZ);
|
||||
Cal3_S2 identityK;
|
||||
|
||||
for (int i = 0; i < nrCameras; ++i) {
|
||||
Point3 wti(rand_xy(rng), rand_xy(rng), rand_z(rng));
|
||||
Pose3 wTi(Rot3(), wti);
|
||||
|
||||
poses->push_back(wTi);
|
||||
cameras->emplace_back(wTi, identityK);
|
||||
measurements->push_back(cameras->back().project(*point));
|
||||
}
|
||||
}
|
||||
|
||||
void GetSmallCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
|
||||
std::vector<Pose3>* poses, Point3* point,
|
||||
Point2Vector* measurements) {
|
||||
Pose3 pose1;
|
||||
Pose3 pose2(Rot3(), Point3(5., 0., -5.));
|
||||
Cal3_S2 identityK;
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, identityK);
|
||||
PinholeCamera<Cal3_S2> camera2(pose2, identityK);
|
||||
|
||||
*point = Point3(0, 0, 1);
|
||||
cameras->push_back(camera1);
|
||||
cameras->push_back(camera2);
|
||||
*poses = {pose1, pose2};
|
||||
*measurements = {camera1.project(*point), camera2.project(*point)};
|
||||
}
|
||||
|
||||
Point2Vector AddNoiseToMeasurements(const Point2Vector& measurements,
|
||||
const double measurementSigma) {
|
||||
std::normal_distribution<double> normal(0.0, measurementSigma);
|
||||
|
||||
Point2Vector noisyMeasurements;
|
||||
noisyMeasurements.reserve(measurements.size());
|
||||
for (const auto& p : measurements) {
|
||||
noisyMeasurements.emplace_back(p.x() + normal(rng), p.y() + normal(rng));
|
||||
}
|
||||
return noisyMeasurements;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main(int argc, char* argv[]) {
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
std::vector<Pose3> poses;
|
||||
Point3 landmark;
|
||||
Point2Vector measurements;
|
||||
GetLargeCamerasDataset(&cameras, &poses, &landmark, &measurements);
|
||||
// GetSmallCamerasDataset(&cameras, &poses, &landmark, &measurements);
|
||||
const double measurementSigma = 1e-2;
|
||||
SharedNoiseModel measurementNoise =
|
||||
noiseModel::Isotropic::Sigma(2, measurementSigma);
|
||||
|
||||
const long int nrTrials = 1000;
|
||||
Matrix errorsDLT = Matrix::Zero(nrTrials, 3);
|
||||
Matrix errorsLOST = Matrix::Zero(nrTrials, 3);
|
||||
Matrix errorsDLTOpt = Matrix::Zero(nrTrials, 3);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
boost::shared_ptr<Cal3_S2> calib = boost::make_shared<Cal3_S2>();
|
||||
std::chrono::nanoseconds durationDLT;
|
||||
std::chrono::nanoseconds durationDLTOpt;
|
||||
std::chrono::nanoseconds durationLOST;
|
||||
|
||||
for (int i = 0; i < nrTrials; i++) {
|
||||
Point2Vector noisyMeasurements =
|
||||
AddNoiseToMeasurements(measurements, measurementSigma);
|
||||
|
||||
auto lostStart = std::chrono::high_resolution_clock::now();
|
||||
boost::optional<Point3> estimateLOST = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, false, measurementNoise, true);
|
||||
durationLOST += std::chrono::high_resolution_clock::now() - lostStart;
|
||||
|
||||
auto dltStart = std::chrono::high_resolution_clock::now();
|
||||
boost::optional<Point3> estimateDLT = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, false, measurementNoise, false);
|
||||
durationDLT += std::chrono::high_resolution_clock::now() - dltStart;
|
||||
|
||||
auto dltOptStart = std::chrono::high_resolution_clock::now();
|
||||
boost::optional<Point3> estimateDLTOpt = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, true, measurementNoise, false);
|
||||
durationDLTOpt += std::chrono::high_resolution_clock::now() - dltOptStart;
|
||||
|
||||
errorsLOST.row(i) = *estimateLOST - landmark;
|
||||
errorsDLT.row(i) = *estimateDLT - landmark;
|
||||
errorsDLTOpt.row(i) = *estimateDLTOpt - landmark;
|
||||
}
|
||||
PrintCovarianceStats(errorsLOST, "LOST");
|
||||
PrintCovarianceStats(errorsDLT, "DLT");
|
||||
PrintCovarianceStats(errorsDLTOpt, "DLT_OPT");
|
||||
|
||||
PrintDuration(durationLOST, nrTrials, "LOST");
|
||||
PrintDuration(durationDLT, nrTrials, "DLT");
|
||||
PrintDuration(durationDLTOpt, nrTrials, "DLT_OPT");
|
||||
}
|
|
@ -113,6 +113,7 @@ private:
|
|||
template<class Archive>
|
||||
void load(Archive& ar, const unsigned int /*version*/)
|
||||
{
|
||||
this->clear();
|
||||
// Load into STL container and then fill our map
|
||||
FastVector<std::pair<KEY, VALUE> > map;
|
||||
ar & BOOST_SERIALIZATION_NVP(map);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <gtsam/inference/Key.h>
|
||||
|
||||
#include <gtsam/base/ConcurrentMap.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
@ -106,6 +107,39 @@ TEST (Serialization, matrix_vector) {
|
|||
EXPECT(equalityBinary<Matrix>((Matrix(2, 2) << 1.0, 2.0, 3.0, 4.0).finished()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST (Serialization, ConcurrentMap) {
|
||||
|
||||
ConcurrentMap<int, std::string> map;
|
||||
|
||||
map.insert(make_pair(1, "apple"));
|
||||
map.insert(make_pair(2, "banana"));
|
||||
|
||||
std::string binaryPath = "saved_map.dat";
|
||||
try {
|
||||
std::ofstream outputStream(binaryPath);
|
||||
boost::archive::binary_oarchive outputArchive(outputStream);
|
||||
outputArchive << map;
|
||||
} catch(...) {
|
||||
EXPECT(false);
|
||||
}
|
||||
|
||||
// Verify that the existing map contents are replaced by the archive data
|
||||
ConcurrentMap<int, std::string> mapFromDisk;
|
||||
mapFromDisk.insert(make_pair(3, "clam"));
|
||||
EXPECT(mapFromDisk.exists(3));
|
||||
try {
|
||||
std::ifstream ifs(binaryPath);
|
||||
boost::archive::binary_iarchive inputArchive(ifs);
|
||||
inputArchive >> mapFromDisk;
|
||||
} catch(...) {
|
||||
EXPECT(false);
|
||||
}
|
||||
EXPECT(mapFromDisk.exists(1));
|
||||
EXPECT(mapFromDisk.exists(2));
|
||||
EXPECT(!mapFromDisk.exists(3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
||||
|
|
|
@ -36,8 +36,6 @@
|
|||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
|
@ -134,7 +132,7 @@ class GTSAM_EXPORT Chebyshev2 : public Basis<Chebyshev2> {
|
|||
* Create matrix of values at Chebyshev points given vector-valued function.
|
||||
*/
|
||||
template <size_t M>
|
||||
static Matrix matrix(boost::function<Eigen::Matrix<double, M, 1>(double)> f,
|
||||
static Matrix matrix(std::function<Eigen::Matrix<double, M, 1>(double)> f,
|
||||
size_t N, double a = -1, double b = 1) {
|
||||
Matrix Xmat(M, N);
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
|
|
|
@ -137,7 +137,7 @@ class FitBasis {
|
|||
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
This::Parameters parameters() const;
|
||||
gtsam::This::Parameters parameters() const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -118,7 +118,7 @@ TEST(Chebyshev2, InterpolateVector) {
|
|||
EXPECT(assert_equal(expected, fx(X, actualH), 1e-9));
|
||||
|
||||
// Check derivative
|
||||
boost::function<Vector2(ParameterMatrix<2>)> f = boost::bind(
|
||||
std::function<Vector2(ParameterMatrix<2>)> f = boost::bind(
|
||||
&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, _1, boost::none);
|
||||
Matrix numericalH =
|
||||
numericalDerivative11<Vector2, ParameterMatrix<2>, 2 * N>(f, X);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/discrete/Assignment.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/base/VectorSerialization.h>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <numeric>
|
||||
|
||||
|
@ -33,6 +34,7 @@ namespace gtsam {
|
|||
/// As of GTSAM 4, in order to make GTSAM more lean,
|
||||
/// it is now possible to just typedef Point3 to Vector3
|
||||
typedef Vector3 Point3;
|
||||
typedef std::vector<Point3, Eigen::aligned_allocator<Point3> > Point3Vector;
|
||||
|
||||
// Convenience typedef
|
||||
using Point3Pair = std::pair<Point3, Point3>;
|
||||
|
|
|
@ -489,6 +489,11 @@ boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
|
|||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
Pose3 Pose3::slerp(double t, const Pose3& other, OptionalJacobian<6, 6> Hx, OptionalJacobian<6, 6> Hy) const {
|
||||
return interpolate(*this, other, t, Hx, Hy);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::ostream &operator<<(std::ostream &os, const Pose3& pose) {
|
||||
// Both Rot3 and Point3 have ostream definitions so we use them.
|
||||
|
|
|
@ -379,6 +379,14 @@ public:
|
|||
return std::make_pair(0, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Spherical Linear interpolation between *this and other
|
||||
* @param s a value between 0 and 1.5
|
||||
* @param other final point of interpolation geodesic on manifold
|
||||
*/
|
||||
Pose3 slerp(double t, const Pose3& other, OptionalJacobian<6, 6> Hx = boost::none,
|
||||
OptionalJacobian<6, 6> Hy = boost::none) const;
|
||||
|
||||
/// Output stream operator
|
||||
GTSAM_EXPORT
|
||||
friend std::ostream &operator<<(std::ostream &os, const Pose3& p);
|
||||
|
|
|
@ -446,24 +446,43 @@ class Pose3 {
|
|||
// Group
|
||||
static gtsam::Pose3 identity();
|
||||
gtsam::Pose3 inverse() const;
|
||||
gtsam::Pose3 inverse(Eigen::Ref<Eigen::MatrixXd> H) const;
|
||||
gtsam::Pose3 compose(const gtsam::Pose3& pose) const;
|
||||
gtsam::Pose3 compose(const gtsam::Pose3& pose,
|
||||
Eigen::Ref<Eigen::MatrixXd> H1,
|
||||
Eigen::Ref<Eigen::MatrixXd> H2) const;
|
||||
gtsam::Pose3 between(const gtsam::Pose3& pose) const;
|
||||
gtsam::Pose3 between(const gtsam::Pose3& pose,
|
||||
Eigen::Ref<Eigen::MatrixXd> H1,
|
||||
Eigen::Ref<Eigen::MatrixXd> H2) const;
|
||||
gtsam::Pose3 slerp(double t, const gtsam::Pose3& pose) const;
|
||||
gtsam::Pose3 slerp(double t, const gtsam::Pose3& pose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Hx,
|
||||
Eigen::Ref<Eigen::MatrixXd> Hy) const;
|
||||
|
||||
// Operator Overloads
|
||||
gtsam::Pose3 operator*(const gtsam::Pose3& pose) const;
|
||||
|
||||
// Manifold
|
||||
gtsam::Pose3 retract(Vector v) const;
|
||||
gtsam::Pose3 retract(Vector v, Eigen::Ref<Eigen::MatrixXd> Hxi) const;
|
||||
Vector localCoordinates(const gtsam::Pose3& pose) const;
|
||||
Vector localCoordinates(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Hxi) const;
|
||||
|
||||
// Lie Group
|
||||
static gtsam::Pose3 Expmap(Vector v);
|
||||
static gtsam::Pose3 Expmap(Vector v, Eigen::Ref<Eigen::MatrixXd> Hxi);
|
||||
static Vector Logmap(const gtsam::Pose3& pose);
|
||||
static Vector Logmap(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Hpose);
|
||||
gtsam::Pose3 expmap(Vector v);
|
||||
Vector logmap(const gtsam::Pose3& pose);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(Vector xi) const;
|
||||
Vector Adjoint(Vector xi, Eigen::Ref<Eigen::MatrixXd> H_this,
|
||||
Eigen::Ref<Eigen::MatrixXd> H_xib) const;
|
||||
Vector AdjointTranspose(Vector xi) const;
|
||||
Vector AdjointTranspose(Vector xi, Eigen::Ref<Eigen::MatrixXd> H_this,
|
||||
Eigen::Ref<Eigen::MatrixXd> H_x) const;
|
||||
static Matrix adjointMap(Vector xi);
|
||||
static Vector adjoint(Vector xi, Vector y);
|
||||
static Matrix adjointMap_(Vector xi);
|
||||
|
@ -476,7 +495,11 @@ class Pose3 {
|
|||
|
||||
// Group Action on Point3
|
||||
gtsam::Point3 transformFrom(const gtsam::Point3& point) const;
|
||||
gtsam::Point3 transformFrom(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Hself,
|
||||
Eigen::Ref<Eigen::MatrixXd> Hpoint) const;
|
||||
gtsam::Point3 transformTo(const gtsam::Point3& point) const;
|
||||
gtsam::Point3 transformTo(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Hself,
|
||||
Eigen::Ref<Eigen::MatrixXd> Hpoint) const;
|
||||
|
||||
// Matrix versions
|
||||
Matrix transformFrom(const Matrix& points) const;
|
||||
|
@ -484,15 +507,25 @@ class Pose3 {
|
|||
|
||||
// Standard Interface
|
||||
gtsam::Rot3 rotation() const;
|
||||
gtsam::Rot3 rotation(Eigen::Ref<Eigen::MatrixXd> Hself) const;
|
||||
gtsam::Point3 translation() const;
|
||||
gtsam::Point3 translation(Eigen::Ref<Eigen::MatrixXd> Hself) const;
|
||||
double x() const;
|
||||
double y() const;
|
||||
double z() const;
|
||||
Matrix matrix() const;
|
||||
gtsam::Pose3 transformPoseFrom(const gtsam::Pose3& pose) const;
|
||||
gtsam::Pose3 transformPoseFrom(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Hself,
|
||||
Eigen::Ref<Eigen::MatrixXd> HaTb) const;
|
||||
gtsam::Pose3 transformPoseTo(const gtsam::Pose3& pose) const;
|
||||
gtsam::Pose3 transformPoseTo(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Hself,
|
||||
Eigen::Ref<Eigen::MatrixXd> HwTb) const;
|
||||
double range(const gtsam::Point3& point);
|
||||
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Hself,
|
||||
Eigen::Ref<Eigen::MatrixXd> Hpoint);
|
||||
double range(const gtsam::Pose3& pose);
|
||||
double range(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Hself,
|
||||
Eigen::Ref<Eigen::MatrixXd> Hpose);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -1093,10 +1126,10 @@ class TriangulationResult {
|
|||
Status status;
|
||||
TriangulationResult(const gtsam::Point3& p);
|
||||
const gtsam::Point3& get() const;
|
||||
static TriangulationResult Degenerate();
|
||||
static TriangulationResult Outlier();
|
||||
static TriangulationResult FarPoint();
|
||||
static TriangulationResult BehindCamera();
|
||||
static gtsam::TriangulationResult Degenerate();
|
||||
static gtsam::TriangulationResult Outlier();
|
||||
static gtsam::TriangulationResult FarPoint();
|
||||
static gtsam::TriangulationResult BehindCamera();
|
||||
bool valid() const;
|
||||
bool degenerate() const;
|
||||
bool outlier() const;
|
||||
|
@ -1129,7 +1162,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
|||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
const gtsam::SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3_S2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
|
@ -1151,7 +1185,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
|||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3DS2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
const gtsam::SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3DS2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
|
@ -1173,7 +1208,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
|||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
const gtsam::SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Bundler* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
|
@ -1195,7 +1231,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
|||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Fisheye& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
const gtsam::SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Fisheye* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
|
@ -1217,7 +1254,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
|||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Unified& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
const gtsam::SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Unified* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
|
@ -1246,5 +1284,11 @@ class BearingRange {
|
|||
|
||||
typedef gtsam::BearingRange<gtsam::Pose2, gtsam::Point2, gtsam::Rot2, double>
|
||||
BearingRange2D;
|
||||
typedef gtsam::BearingRange<gtsam::Pose2, gtsam::Pose2, gtsam::Rot2, double>
|
||||
BearingRangePose2;
|
||||
typedef gtsam::BearingRange<gtsam::Pose3, gtsam::Point3, gtsam::Unit3, double>
|
||||
BearingRange3D;
|
||||
typedef gtsam::BearingRange<gtsam::Pose3, gtsam::Pose3, gtsam::Unit3, double>
|
||||
BearingRangePose3;
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace gtsam;
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* @date July 30th, 2013
|
||||
* @author Chris Beall (cbeall3)
|
||||
* @author Luca Carlone
|
||||
* @author Akshay Krishnan
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
@ -38,24 +39,24 @@ using namespace boost::assign;
|
|||
|
||||
// Some common constants
|
||||
|
||||
static const boost::shared_ptr<Cal3_S2> sharedCal = //
|
||||
static const boost::shared_ptr<Cal3_S2> kSharedCal = //
|
||||
boost::make_shared<Cal3_S2>(1500, 1200, 0.1, 640, 480);
|
||||
|
||||
// Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
static const Rot3 upright = Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2);
|
||||
static const Pose3 pose1 = Pose3(upright, gtsam::Point3(0, 0, 1));
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
|
||||
static const Pose3 kPose1 = Pose3(upright, gtsam::Point3(0, 0, 1));
|
||||
static const PinholeCamera<Cal3_S2> kCamera1(kPose1, *kSharedCal);
|
||||
|
||||
// create second camera 1 meter to the right of first camera
|
||||
static const Pose3 pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0));
|
||||
PinholeCamera<Cal3_S2> camera2(pose2, *sharedCal);
|
||||
static const Pose3 kPose2 = kPose1 * Pose3(Rot3(), Point3(1, 0, 0));
|
||||
static const PinholeCamera<Cal3_S2> kCamera2(kPose2, *kSharedCal);
|
||||
|
||||
// landmark ~5 meters infront of camera
|
||||
static const Point3 landmark(5, 0.5, 1.2);
|
||||
static const Point3 kLandmark(5, 0.5, 1.2);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
static const Point2 kZ1 = kCamera1.project(kLandmark);
|
||||
static const Point2 kZ2 = kCamera2.project(kLandmark);
|
||||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation
|
||||
|
@ -63,22 +64,22 @@ TEST(triangulation, twoPoses) {
|
|||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += z1, z2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += kZ1, kZ2;
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
// 1. Test simple DLT, perfect in no noise situation
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
|
@ -86,16 +87,79 @@ TEST(triangulation, twoPoses) {
|
|||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4));
|
||||
|
||||
// 4. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4));
|
||||
}
|
||||
|
||||
TEST(triangulation, twoCamerasUsingLOST) {
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
cameras.push_back(kCamera1);
|
||||
cameras.push_back(kCamera2);
|
||||
|
||||
Point2Vector measurements = {kZ1, kZ2};
|
||||
SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-4);
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
// 1. Test simple triangulation, perfect in no noise situation
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<PinholeCamera<Cal3_S2>>(cameras, measurements, rank_tol,
|
||||
/*optimize=*/false,
|
||||
measurementNoise,
|
||||
/*useLOST=*/true);
|
||||
EXPECT(assert_equal(kLandmark, *actual1, 1e-12));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements[0] += Point2(0.1, 0.5);
|
||||
measurements[1] += Point2(-0.2, 0.3);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<PinholeCamera<Cal3_S2>>(
|
||||
cameras, measurements, rank_tol,
|
||||
/*optimize=*/false, measurementNoise,
|
||||
/*use_lost_triangulation=*/true);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual2, 1e-4));
|
||||
}
|
||||
|
||||
TEST(triangulation, twoCamerasLOSTvsDLT) {
|
||||
// LOST has been shown to preform better when the point is much closer to one
|
||||
// camera compared to another. This unit test checks this configuration.
|
||||
const Cal3_S2 identityK;
|
||||
const Pose3 pose1;
|
||||
const Pose3 pose2(Rot3(), Point3(5., 0., -5.));
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
cameras.emplace_back(pose1, identityK);
|
||||
cameras.emplace_back(pose2, identityK);
|
||||
|
||||
Point3 landmark(0, 0, 1);
|
||||
Point2 x1_noisy = cameras[0].project(landmark) + Point2(0.00817, 0.00977);
|
||||
Point2 x2_noisy = cameras[1].project(landmark) + Point2(-0.00610, 0.01969);
|
||||
Point2Vector measurements = {x1_noisy, x2_noisy};
|
||||
|
||||
const double rank_tol = 1e-9;
|
||||
SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-2);
|
||||
|
||||
boost::optional<Point3> estimateLOST = //
|
||||
triangulatePoint3<PinholeCamera<Cal3_S2>>(cameras, measurements, rank_tol,
|
||||
/*optimize=*/false,
|
||||
measurementNoise,
|
||||
/*useLOST=*/true);
|
||||
|
||||
// These values are from a MATLAB implementation.
|
||||
EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), *estimateLOST, 1e-3));
|
||||
|
||||
boost::optional<Point3> estimateDLT =
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements, rank_tol, false);
|
||||
|
||||
// The LOST estimate should have a smaller error.
|
||||
EXPECT((landmark - *estimateLOST).norm() <= (landmark - *estimateDLT).norm());
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation with Cal3DS2 calibration.
|
||||
TEST(triangulation, twoPosesCal3DS2) {
|
||||
|
@ -103,18 +167,18 @@ TEST(triangulation, twoPosesCal3DS2) {
|
|||
boost::make_shared<Cal3DS2>(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001,
|
||||
-0.0003);
|
||||
|
||||
PinholeCamera<Cal3DS2> camera1Distorted(pose1, *sharedDistortedCal);
|
||||
PinholeCamera<Cal3DS2> camera1Distorted(kPose1, *sharedDistortedCal);
|
||||
|
||||
PinholeCamera<Cal3DS2> camera2Distorted(pose2, *sharedDistortedCal);
|
||||
PinholeCamera<Cal3DS2> camera2Distorted(kPose2, *sharedDistortedCal);
|
||||
|
||||
// 0. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1Distorted = camera1Distorted.project(landmark);
|
||||
Point2 z2Distorted = camera2Distorted.project(landmark);
|
||||
Point2 z1Distorted = camera1Distorted.project(kLandmark);
|
||||
Point2 z2Distorted = camera2Distorted.project(kLandmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += z1Distorted, z2Distorted;
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
@ -124,14 +188,14 @@ TEST(triangulation, twoPosesCal3DS2) {
|
|||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
|
@ -160,18 +224,18 @@ TEST(triangulation, twoPosesFisheye) {
|
|||
boost::make_shared<Calibration>(1500, 1200, .1, 640, 480, -.3, 0.1,
|
||||
0.0001, -0.0003);
|
||||
|
||||
PinholeCamera<Calibration> camera1Distorted(pose1, *sharedDistortedCal);
|
||||
PinholeCamera<Calibration> camera1Distorted(kPose1, *sharedDistortedCal);
|
||||
|
||||
PinholeCamera<Calibration> camera2Distorted(pose2, *sharedDistortedCal);
|
||||
PinholeCamera<Calibration> camera2Distorted(kPose2, *sharedDistortedCal);
|
||||
|
||||
// 0. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1Distorted = camera1Distorted.project(landmark);
|
||||
Point2 z2Distorted = camera2Distorted.project(landmark);
|
||||
Point2 z1Distorted = camera1Distorted.project(kLandmark);
|
||||
Point2 z2Distorted = camera2Distorted.project(kLandmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += z1Distorted, z2Distorted;
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
@ -181,14 +245,14 @@ TEST(triangulation, twoPosesFisheye) {
|
|||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
|
@ -213,17 +277,17 @@ TEST(triangulation, twoPosesFisheye) {
|
|||
TEST(triangulation, twoPosesBundler) {
|
||||
boost::shared_ptr<Cal3Bundler> bundlerCal = //
|
||||
boost::make_shared<Cal3Bundler>(1500, 0.1, 0.2, 640, 480);
|
||||
PinholeCamera<Cal3Bundler> camera1(pose1, *bundlerCal);
|
||||
PinholeCamera<Cal3Bundler> camera2(pose2, *bundlerCal);
|
||||
PinholeCamera<Cal3Bundler> camera1(kPose1, *bundlerCal);
|
||||
PinholeCamera<Cal3Bundler> camera2(kPose2, *bundlerCal);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
Point2 z1 = camera1.project(kLandmark);
|
||||
Point2 z2 = camera2.project(kLandmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += z1, z2;
|
||||
|
||||
bool optimize = true;
|
||||
|
@ -232,7 +296,7 @@ TEST(triangulation, twoPosesBundler) {
|
|||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-7));
|
||||
|
||||
// Add some noise and try again
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
|
@ -249,12 +313,12 @@ TEST(triangulation, fourPoses) {
|
|||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += z1, z2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += kZ1, kZ2;
|
||||
|
||||
boost::optional<Point3> actual =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
|
||||
|
||||
// 2. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
|
@ -262,37 +326,37 @@ TEST(triangulation, fourPoses) {
|
|||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-2));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
EXPECT(assert_equal(kLandmark, *actual2, 1e-2));
|
||||
|
||||
// 3. Add a slightly rotated third camera above, again with measurement noise
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, *sharedCal);
|
||||
Point2 z3 = camera3.project(landmark);
|
||||
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, *kSharedCal);
|
||||
Point2 z3 = camera3.project(kLandmark);
|
||||
|
||||
poses += pose3;
|
||||
measurements += z3 + Point2(0.1, -0.1);
|
||||
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
EXPECT(assert_equal(kLandmark, *triangulated_3cameras, 1e-2));
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> triangulated_3cameras_opt =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(kLandmark, *triangulated_3cameras_opt, 1e-2));
|
||||
|
||||
// 4. Test failure: Add a 4th camera facing the wrong way
|
||||
Pose3 pose4 = Pose3(Rot3::Ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1));
|
||||
PinholeCamera<Cal3_S2> camera4(pose4, *sharedCal);
|
||||
PinholeCamera<Cal3_S2> camera4(pose4, *kSharedCal);
|
||||
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
CHECK_EXCEPTION(camera4.project(landmark), CheiralityException);
|
||||
CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException);
|
||||
|
||||
poses += pose4;
|
||||
measurements += Point2(400, 400);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements),
|
||||
TriangulationCheiralityException);
|
||||
#endif
|
||||
}
|
||||
|
@ -300,33 +364,33 @@ TEST(triangulation, fourPoses) {
|
|||
//******************************************************************************
|
||||
TEST(triangulation, threePoses_robustNoiseModel) {
|
||||
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, *sharedCal);
|
||||
Point2 z3 = camera3.project(landmark);
|
||||
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, *kSharedCal);
|
||||
Point2 z3 = camera3.project(kLandmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
poses += pose1, pose2, pose3;
|
||||
measurements += z1, z2, z3;
|
||||
poses += kPose1, kPose2, pose3;
|
||||
measurements += kZ1, kZ2, z3;
|
||||
|
||||
// noise free, so should give exactly the landmark
|
||||
boost::optional<Point3> actual =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
|
||||
|
||||
// Add outlier
|
||||
measurements.at(0) += Point2(100, 120); // very large pixel noise!
|
||||
|
||||
// now estimate does not match landmark
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
// DLT is surprisingly robust, but still off (actual error is around 0.26m):
|
||||
EXPECT( (landmark - *actual2).norm() >= 0.2);
|
||||
EXPECT( (landmark - *actual2).norm() <= 0.5);
|
||||
EXPECT( (kLandmark - *actual2).norm() >= 0.2);
|
||||
EXPECT( (kLandmark - *actual2).norm() <= 0.5);
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> actual3 =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, 1e-9, true);
|
||||
// result from nonlinear (but non-robust optimization) is close to DLT and still off
|
||||
EXPECT(assert_equal(*actual2, *actual3, 0.1));
|
||||
|
||||
|
@ -334,27 +398,27 @@ TEST(triangulation, threePoses_robustNoiseModel) {
|
|||
auto model = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::Huber::Create(1.345), noiseModel::Unit::Create(2));
|
||||
boost::optional<Point3> actual4 = triangulatePoint3<Cal3_S2>(
|
||||
poses, sharedCal, measurements, 1e-9, true, model);
|
||||
poses, kSharedCal, measurements, 1e-9, true, model);
|
||||
// using the Huber loss we now have a quite small error!! nice!
|
||||
EXPECT(assert_equal(landmark, *actual4, 0.05));
|
||||
EXPECT(assert_equal(kLandmark, *actual4, 0.05));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, fourPoses_robustNoiseModel) {
|
||||
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, *sharedCal);
|
||||
Point2 z3 = camera3.project(landmark);
|
||||
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, *kSharedCal);
|
||||
Point2 z3 = camera3.project(kLandmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
poses += pose1, pose1, pose2, pose3; // 2 measurements from pose 1
|
||||
measurements += z1, z1, z2, z3;
|
||||
poses += kPose1, kPose1, kPose2, pose3; // 2 measurements from pose 1
|
||||
measurements += kZ1, kZ1, kZ2, z3;
|
||||
|
||||
// noise free, so should give exactly the landmark
|
||||
boost::optional<Point3> actual =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
|
||||
|
||||
// Add outlier
|
||||
measurements.at(0) += Point2(100, 120); // very large pixel noise!
|
||||
|
@ -365,14 +429,14 @@ TEST(triangulation, fourPoses_robustNoiseModel) {
|
|||
|
||||
// now estimate does not match landmark
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
|
||||
// DLT is surprisingly robust, but still off (actual error is around 0.17m):
|
||||
EXPECT( (landmark - *actual2).norm() >= 0.1);
|
||||
EXPECT( (landmark - *actual2).norm() <= 0.5);
|
||||
EXPECT( (kLandmark - *actual2).norm() >= 0.1);
|
||||
EXPECT( (kLandmark - *actual2).norm() <= 0.5);
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> actual3 =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
|
||||
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, 1e-9, true);
|
||||
// result from nonlinear (but non-robust optimization) is close to DLT and still off
|
||||
EXPECT(assert_equal(*actual2, *actual3, 0.1));
|
||||
|
||||
|
@ -380,24 +444,24 @@ TEST(triangulation, fourPoses_robustNoiseModel) {
|
|||
auto model = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::Huber::Create(1.345), noiseModel::Unit::Create(2));
|
||||
boost::optional<Point3> actual4 = triangulatePoint3<Cal3_S2>(
|
||||
poses, sharedCal, measurements, 1e-9, true, model);
|
||||
poses, kSharedCal, measurements, 1e-9, true, model);
|
||||
// using the Huber loss we now have a quite small error!! nice!
|
||||
EXPECT(assert_equal(landmark, *actual4, 0.05));
|
||||
EXPECT(assert_equal(kLandmark, *actual4, 0.05));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, fourPoses_distinct_Ks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, K1);
|
||||
PinholeCamera<Cal3_S2> camera1(kPose1, K1);
|
||||
|
||||
// create second camera 1 meter to the right of first camera
|
||||
Cal3_S2 K2(1600, 1300, 0, 650, 440);
|
||||
PinholeCamera<Cal3_S2> camera2(pose2, K2);
|
||||
PinholeCamera<Cal3_S2> camera2(kPose2, K2);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
Point2 z1 = camera1.project(kLandmark);
|
||||
Point2 z2 = camera2.project(kLandmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
@ -407,7 +471,7 @@ TEST(triangulation, fourPoses_distinct_Ks) {
|
|||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
|
||||
|
||||
// 2. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
|
@ -416,25 +480,25 @@ TEST(triangulation, fourPoses_distinct_Ks) {
|
|||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-2));
|
||||
EXPECT(assert_equal(kLandmark, *actual2, 1e-2));
|
||||
|
||||
// 3. Add a slightly rotated third camera above, again with measurement noise
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
Cal3_S2 K3(700, 500, 0, 640, 480);
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, K3);
|
||||
Point2 z3 = camera3.project(landmark);
|
||||
Point2 z3 = camera3.project(kLandmark);
|
||||
|
||||
cameras += camera3;
|
||||
measurements += z3 + Point2(0.1, -0.1);
|
||||
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
|
||||
EXPECT(assert_equal(kLandmark, *triangulated_3cameras, 1e-2));
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> triangulated_3cameras_opt =
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
|
||||
EXPECT(assert_equal(kLandmark, *triangulated_3cameras_opt, 1e-2));
|
||||
|
||||
// 4. Test failure: Add a 4th camera facing the wrong way
|
||||
Pose3 pose4 = Pose3(Rot3::Ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1));
|
||||
|
@ -442,7 +506,7 @@ TEST(triangulation, fourPoses_distinct_Ks) {
|
|||
PinholeCamera<Cal3_S2> camera4(pose4, K4);
|
||||
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
CHECK_EXCEPTION(camera4.project(landmark), CheiralityException);
|
||||
CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException);
|
||||
|
||||
cameras += camera4;
|
||||
measurements += Point2(400, 400);
|
||||
|
@ -455,15 +519,15 @@ TEST(triangulation, fourPoses_distinct_Ks) {
|
|||
TEST(triangulation, fourPoses_distinct_Ks_distortion) {
|
||||
Cal3DS2 K1(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001, -0.0003);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3DS2> camera1(pose1, K1);
|
||||
PinholeCamera<Cal3DS2> camera1(kPose1, K1);
|
||||
|
||||
// create second camera 1 meter to the right of first camera
|
||||
Cal3DS2 K2(1600, 1300, 0, 650, 440, -.2, 0.05, 0.0002, -0.0001);
|
||||
PinholeCamera<Cal3DS2> camera2(pose2, K2);
|
||||
PinholeCamera<Cal3DS2> camera2(kPose2, K2);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
Point2 z1 = camera1.project(kLandmark);
|
||||
Point2 z2 = camera2.project(kLandmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3DS2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
@ -473,22 +537,22 @@ TEST(triangulation, fourPoses_distinct_Ks_distortion) {
|
|||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3DS2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, outliersAndFarLandmarks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, K1);
|
||||
PinholeCamera<Cal3_S2> camera1(kPose1, K1);
|
||||
|
||||
// create second camera 1 meter to the right of first camera
|
||||
Cal3_S2 K2(1600, 1300, 0, 650, 440);
|
||||
PinholeCamera<Cal3_S2> camera2(pose2, K2);
|
||||
PinholeCamera<Cal3_S2> camera2(kPose2, K2);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
Point2 z1 = camera1.project(kLandmark);
|
||||
Point2 z2 = camera2.project(kLandmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
@ -501,7 +565,7 @@ TEST(triangulation, outliersAndFarLandmarks) {
|
|||
1.0, false, landmarkDistanceThreshold); // all default except
|
||||
// landmarkDistanceThreshold
|
||||
TriangulationResult actual = triangulateSafe(cameras, measurements, params);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
|
||||
EXPECT(actual.valid());
|
||||
|
||||
landmarkDistanceThreshold = 4; // landmark is farther than that
|
||||
|
@ -513,10 +577,10 @@ TEST(triangulation, outliersAndFarLandmarks) {
|
|||
|
||||
// 3. Add a slightly rotated third camera above with a wrong measurement
|
||||
// (OUTLIER)
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
Cal3_S2 K3(700, 500, 0, 640, 480);
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, K3);
|
||||
Point2 z3 = camera3.project(landmark);
|
||||
Point2 z3 = camera3.project(kLandmark);
|
||||
|
||||
cameras += camera3;
|
||||
measurements += z3 + Point2(10, -10);
|
||||
|
@ -539,18 +603,18 @@ TEST(triangulation, outliersAndFarLandmarks) {
|
|||
//******************************************************************************
|
||||
TEST(triangulation, twoIdenticalPoses) {
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
|
||||
PinholeCamera<Cal3_S2> camera1(kPose1, *kSharedCal);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z1 = camera1.project(kLandmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose1;
|
||||
poses += kPose1, kPose1;
|
||||
measurements += z1, z1;
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
}
|
||||
|
||||
|
@ -565,7 +629,7 @@ TEST(triangulation, onePose) {
|
|||
poses += Pose3();
|
||||
measurements += Point2(0, 0);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
}
|
||||
|
||||
|
@ -681,12 +745,12 @@ TEST(triangulation, twoPoses_sphericalCamera) {
|
|||
std::vector<Unit3> measurements;
|
||||
|
||||
// Project landmark into two cameras and triangulate
|
||||
SphericalCamera cam1(pose1);
|
||||
SphericalCamera cam2(pose2);
|
||||
Unit3 u1 = cam1.project(landmark);
|
||||
Unit3 u2 = cam2.project(landmark);
|
||||
SphericalCamera cam1(kPose1);
|
||||
SphericalCamera cam2(kPose2);
|
||||
Unit3 u1 = cam1.project(kLandmark);
|
||||
Unit3 u2 = cam2.project(kLandmark);
|
||||
|
||||
poses += pose1, pose2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += u1, u2;
|
||||
|
||||
CameraSet<SphericalCamera> cameras;
|
||||
|
@ -698,25 +762,25 @@ TEST(triangulation, twoPoses_sphericalCamera) {
|
|||
// 1. Test linear triangulation via DLT
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
EXPECT(assert_equal(landmark, point, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, point, 1e-7));
|
||||
|
||||
// 2. Test nonlinear triangulation
|
||||
point = triangulateNonlinear<SphericalCamera>(cameras, measurements, point);
|
||||
EXPECT(assert_equal(landmark, point, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, point, 1e-7));
|
||||
|
||||
// 3. Test simple DLT, now within triangulatePoint3
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
|
||||
|
||||
// 4. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
|
||||
|
||||
// 5. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
|
@ -761,7 +825,7 @@ TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) {
|
|||
EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2,
|
||||
1e-7)); // behind and to the right of PoseB
|
||||
|
||||
poses += pose1, pose2;
|
||||
poses += kPose1, kPose2;
|
||||
measurements += u1, u2;
|
||||
|
||||
CameraSet<SphericalCamera> cameras;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* @brief Functions for triangulation
|
||||
* @date July 31, 2013
|
||||
* @author Chris Beall
|
||||
* @author Akshay Krishnan
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
|
@ -24,9 +25,9 @@
|
|||
namespace gtsam {
|
||||
|
||||
Vector4 triangulateHomogeneousDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
|
||||
projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol) {
|
||||
|
||||
// number of cameras
|
||||
size_t m = projection_matrices.size();
|
||||
|
||||
|
@ -39,6 +40,11 @@ Vector4 triangulateHomogeneousDLT(
|
|||
const Point2& p = measurements.at(i);
|
||||
|
||||
// build system of equations
|
||||
// [A_1; A_2; A_3] x = [b_1; b_2; b_3]
|
||||
// [b_3 * A_1 - b_1 * A_3] x = 0
|
||||
// [b_3 * A_2 - b_2 * A_3] x = 0
|
||||
// A' x = 0
|
||||
// A' 2x4 = [b_3 * A_1 - b_1 * A_3; b_3 * A_2 - b_2 * A_3]
|
||||
A.row(row) = p.x() * projection.row(2) - projection.row(0);
|
||||
A.row(row + 1) = p.y() * projection.row(2) - projection.row(1);
|
||||
}
|
||||
|
@ -47,16 +53,15 @@ Vector4 triangulateHomogeneousDLT(
|
|||
Vector v;
|
||||
boost::tie(rank, error, v) = DLT(A, rank_tol);
|
||||
|
||||
if (rank < 3)
|
||||
throw(TriangulationUnderconstrainedException());
|
||||
if (rank < 3) throw(TriangulationUnderconstrainedException());
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
Vector4 triangulateHomogeneousDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
|
||||
projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol) {
|
||||
|
||||
// number of cameras
|
||||
size_t m = projection_matrices.size();
|
||||
|
||||
|
@ -66,7 +71,9 @@ Vector4 triangulateHomogeneousDLT(
|
|||
for (size_t i = 0; i < m; i++) {
|
||||
size_t row = i * 2;
|
||||
const Matrix34& projection = projection_matrices.at(i);
|
||||
const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector
|
||||
const Point3& p =
|
||||
measurements.at(i)
|
||||
.point3(); // to get access to x,y,z of the bearing vector
|
||||
|
||||
// build system of equations
|
||||
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
|
||||
|
@ -77,34 +84,67 @@ Vector4 triangulateHomogeneousDLT(
|
|||
Vector v;
|
||||
boost::tie(rank, error, v) = DLT(A, rank_tol);
|
||||
|
||||
if (rank < 3)
|
||||
throw(TriangulationUnderconstrainedException());
|
||||
if (rank < 3) throw(TriangulationUnderconstrainedException());
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol) {
|
||||
Point3 triangulateLOST(const std::vector<Pose3>& poses,
|
||||
const Point3Vector& calibratedMeasurements,
|
||||
const SharedIsotropic& measurementNoise) {
|
||||
size_t m = calibratedMeasurements.size();
|
||||
assert(m == poses.size());
|
||||
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||
rank_tol);
|
||||
// Construct the system matrices.
|
||||
Matrix A = Matrix::Zero(m * 2, 3);
|
||||
Matrix b = Matrix::Zero(m * 2, 1);
|
||||
|
||||
for (size_t i = 0; i < m; i++) {
|
||||
const Pose3& wTi = poses[i];
|
||||
// TODO(akshay-krishnan): are there better ways to select j?
|
||||
const int j = (i + 1) % m;
|
||||
const Pose3& wTj = poses[j];
|
||||
|
||||
const Point3 d_ij = wTj.translation() - wTi.translation();
|
||||
|
||||
const Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]);
|
||||
const Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]);
|
||||
|
||||
// Note: Setting q_i = 1.0 gives same results as DLT.
|
||||
const double q_i = wZi.cross(wZj).norm() /
|
||||
(measurementNoise->sigma() * d_ij.cross(wZj).norm());
|
||||
|
||||
const Matrix23 coefficientMat =
|
||||
q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) *
|
||||
wTi.rotation().matrix().transpose();
|
||||
|
||||
A.block<2, 3>(2 * i, 0) << coefficientMat;
|
||||
b.block<2, 1>(2 * i, 0) << coefficientMat * wTi.translation();
|
||||
}
|
||||
return A.colPivHouseholderQr().solve(b);
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
|
||||
projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol) {
|
||||
Vector4 v =
|
||||
triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
|
||||
projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol) {
|
||||
|
||||
// contrary to previous triangulateDLT, this is now taking Unit3 inputs
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||
rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
Vector4 v =
|
||||
triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
}
|
||||
|
||||
///
|
||||
/**
|
||||
* Optimize for triangulation
|
||||
* @param graph nonlinear factors for projection
|
||||
|
@ -132,4 +172,4 @@ Point3 optimize(const NonlinearFactorGraph& graph, const Values& values,
|
|||
return result.at<Point3>(landmarkKey);
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* @date July 31, 2013
|
||||
* @author Chris Beall
|
||||
* @author Luca Carlone
|
||||
* @author Akshay Krishnan
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
@ -94,6 +95,20 @@ GTSAM_EXPORT Point3 triangulateDLT(
|
|||
const std::vector<Unit3>& measurements,
|
||||
double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* @brief Triangulation using the LOST (Linear Optimal Sine Triangulation)
|
||||
* algorithm proposed in https://arxiv.org/pdf/2205.12197.pdf by Sebastien Henry
|
||||
* and John Christian.
|
||||
* @param poses camera poses in world frame
|
||||
* @param calibratedMeasurements measurements in homogeneous coordinates in each
|
||||
* camera pose
|
||||
* @param measurementNoise isotropic noise model for the measurements
|
||||
* @return triangulated point in world coordinates
|
||||
*/
|
||||
GTSAM_EXPORT Point3 triangulateLOST(const std::vector<Pose3>& poses,
|
||||
const Point3Vector& calibratedMeasurements,
|
||||
const SharedIsotropic& measurementNoise);
|
||||
|
||||
/**
|
||||
* Create a factor graph with projection factors from poses and one calibration
|
||||
* @param poses Camera poses
|
||||
|
@ -108,17 +123,16 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
|
|||
const std::vector<Pose3>& poses, boost::shared_ptr<CALIBRATION> sharedCal,
|
||||
const Point2Vector& measurements, Key landmarkKey,
|
||||
const Point3& initialEstimate,
|
||||
const SharedNoiseModel& model = nullptr) {
|
||||
const SharedNoiseModel& model = noiseModel::Unit::Create(2)) {
|
||||
Values values;
|
||||
values.insert(landmarkKey, initialEstimate); // Initial landmark value
|
||||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit2(noiseModel::Unit::Create(2));
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
const Pose3& pose_i = poses[i];
|
||||
typedef PinholePose<CALIBRATION> Camera;
|
||||
Camera camera_i(pose_i, sharedCal);
|
||||
graph.emplace_shared<TriangulationFactor<Camera> > //
|
||||
(camera_i, measurements[i], model? model : unit2, landmarkKey);
|
||||
(camera_i, measurements[i], model, landmarkKey);
|
||||
}
|
||||
return std::make_pair(graph, values);
|
||||
}
|
||||
|
@ -302,10 +316,10 @@ template <class CAMERA>
|
|||
typename CAMERA::MeasurementVector undistortMeasurements(
|
||||
const CameraSet<CAMERA>& cameras,
|
||||
const typename CAMERA::MeasurementVector& measurements) {
|
||||
const size_t num_meas = cameras.size();
|
||||
assert(num_meas == measurements.size());
|
||||
typename CAMERA::MeasurementVector undistortedMeasurements(num_meas);
|
||||
for (size_t ii = 0; ii < num_meas; ++ii) {
|
||||
const size_t nrMeasurements = measurements.size();
|
||||
assert(nrMeasurements == cameras.size());
|
||||
typename CAMERA::MeasurementVector undistortedMeasurements(nrMeasurements);
|
||||
for (size_t ii = 0; ii < nrMeasurements; ++ii) {
|
||||
// Calibrate with cal and uncalibrate with pinhole version of cal so that
|
||||
// measurements are undistorted.
|
||||
undistortedMeasurements[ii] =
|
||||
|
@ -331,6 +345,65 @@ inline SphericalCamera::MeasurementVector undistortMeasurements(
|
|||
return measurements;
|
||||
}
|
||||
|
||||
/** Convert pixel measurements in image to homogeneous measurements in the image
|
||||
* plane using shared camera intrinsics.
|
||||
*
|
||||
* @tparam CALIBRATION Calibration type to use.
|
||||
* @param cal Calibration with which measurements were taken.
|
||||
* @param measurements Vector of measurements to undistort.
|
||||
* @return homogeneous measurements in image plane
|
||||
*/
|
||||
template <class CALIBRATION>
|
||||
inline Point3Vector calibrateMeasurementsShared(
|
||||
const CALIBRATION& cal, const Point2Vector& measurements) {
|
||||
Point3Vector calibratedMeasurements;
|
||||
// Calibrate with cal and uncalibrate with pinhole version of cal so that
|
||||
// measurements are undistorted.
|
||||
std::transform(measurements.begin(), measurements.end(),
|
||||
std::back_inserter(calibratedMeasurements),
|
||||
[&cal](const Point2& measurement) {
|
||||
Point3 p;
|
||||
p << cal.calibrate(measurement), 1.0;
|
||||
return p;
|
||||
});
|
||||
return calibratedMeasurements;
|
||||
}
|
||||
|
||||
/** Convert pixel measurements in image to homogeneous measurements in the image
|
||||
* plane using camera intrinsics of each measurement.
|
||||
*
|
||||
* @tparam CAMERA Camera type to use.
|
||||
* @param cameras Cameras corresponding to each measurement.
|
||||
* @param measurements Vector of measurements to undistort.
|
||||
* @return homogeneous measurements in image plane
|
||||
*/
|
||||
template <class CAMERA>
|
||||
inline Point3Vector calibrateMeasurements(
|
||||
const CameraSet<CAMERA>& cameras,
|
||||
const typename CAMERA::MeasurementVector& measurements) {
|
||||
const size_t nrMeasurements = measurements.size();
|
||||
assert(nrMeasurements == cameras.size());
|
||||
Point3Vector calibratedMeasurements(nrMeasurements);
|
||||
for (size_t ii = 0; ii < nrMeasurements; ++ii) {
|
||||
calibratedMeasurements[ii]
|
||||
<< cameras[ii].calibration().calibrate(measurements[ii]),
|
||||
1.0;
|
||||
}
|
||||
return calibratedMeasurements;
|
||||
}
|
||||
|
||||
/** Specialize for SphericalCamera to do nothing. */
|
||||
template <class CAMERA = SphericalCamera>
|
||||
inline Point3Vector calibrateMeasurements(
|
||||
const CameraSet<SphericalCamera>& cameras,
|
||||
const SphericalCamera::MeasurementVector& measurements) {
|
||||
Point3Vector calibratedMeasurements(measurements.size());
|
||||
for (size_t ii = 0; ii < measurements.size(); ++ii) {
|
||||
calibratedMeasurements[ii] << measurements[ii].point3();
|
||||
}
|
||||
return calibratedMeasurements;
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to triangulate 3D landmark point from an arbitrary number
|
||||
* of poses (at least 2) using the DLT. The function checks that the
|
||||
|
@ -341,41 +414,55 @@ inline SphericalCamera::MeasurementVector undistortMeasurements(
|
|||
* @param measurements A vector of camera measurements
|
||||
* @param rank_tol rank tolerance, default 1e-9
|
||||
* @param optimize Flag to turn on nonlinear refinement of triangulation
|
||||
* @param useLOST whether to use the LOST algorithm instead of DLT
|
||||
* @return Returns a Point3
|
||||
*/
|
||||
template<class CALIBRATION>
|
||||
template <class CALIBRATION>
|
||||
Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
||||
boost::shared_ptr<CALIBRATION> sharedCal,
|
||||
const Point2Vector& measurements, double rank_tol = 1e-9,
|
||||
bool optimize = false,
|
||||
const SharedNoiseModel& model = nullptr) {
|
||||
|
||||
boost::shared_ptr<CALIBRATION> sharedCal,
|
||||
const Point2Vector& measurements,
|
||||
double rank_tol = 1e-9, bool optimize = false,
|
||||
const SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false) {
|
||||
assert(poses.size() == measurements.size());
|
||||
if (poses.size() < 2)
|
||||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// construct projection matrices from poses & calibration
|
||||
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
|
||||
|
||||
// Undistort the measurements, leaving only the pinhole elements in effect.
|
||||
auto undistortedMeasurements =
|
||||
undistortMeasurements<CALIBRATION>(*sharedCal, measurements);
|
||||
if (poses.size() < 2) throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// Triangulate linearly
|
||||
Point3 point =
|
||||
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
|
||||
Point3 point;
|
||||
if (useLOST) {
|
||||
// Reduce input noise model to an isotropic noise model using the mean of
|
||||
// the diagonal.
|
||||
const double measurementSigma = model ? model->sigmas().mean() : 1e-4;
|
||||
SharedIsotropic measurementNoise =
|
||||
noiseModel::Isotropic::Sigma(2, measurementSigma);
|
||||
// calibrate the measurements to obtain homogenous coordinates in image
|
||||
// plane.
|
||||
auto calibratedMeasurements =
|
||||
calibrateMeasurementsShared<CALIBRATION>(*sharedCal, measurements);
|
||||
|
||||
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
|
||||
} else {
|
||||
// construct projection matrices from poses & calibration
|
||||
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
|
||||
|
||||
// Undistort the measurements, leaving only the pinhole elements in effect.
|
||||
auto undistortedMeasurements =
|
||||
undistortMeasurements<CALIBRATION>(*sharedCal, measurements);
|
||||
|
||||
point =
|
||||
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
|
||||
}
|
||||
|
||||
// Then refine using non-linear optimization
|
||||
if (optimize)
|
||||
point = triangulateNonlinear<CALIBRATION> //
|
||||
point = triangulateNonlinear<CALIBRATION> //
|
||||
(poses, sharedCal, measurements, point, model);
|
||||
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
// verify that the triangulated point lies in front of all cameras
|
||||
for(const Pose3& pose: poses) {
|
||||
for (const Pose3& pose : poses) {
|
||||
const Point3& p_local = pose.transformTo(point);
|
||||
if (p_local.z() <= 0)
|
||||
throw(TriangulationCheiralityException());
|
||||
if (p_local.z() <= 0) throw(TriangulationCheiralityException());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -392,41 +479,63 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
|||
* @param measurements A vector of camera measurements
|
||||
* @param rank_tol rank tolerance, default 1e-9
|
||||
* @param optimize Flag to turn on nonlinear refinement of triangulation
|
||||
* @param useLOST whether to use the LOST algorithm instead of
|
||||
* DLT
|
||||
* @return Returns a Point3
|
||||
*/
|
||||
template<class CAMERA>
|
||||
Point3 triangulatePoint3(
|
||||
const CameraSet<CAMERA>& cameras,
|
||||
const typename CAMERA::MeasurementVector& measurements, double rank_tol = 1e-9,
|
||||
bool optimize = false,
|
||||
const SharedNoiseModel& model = nullptr) {
|
||||
|
||||
template <class CAMERA>
|
||||
Point3 triangulatePoint3(const CameraSet<CAMERA>& cameras,
|
||||
const typename CAMERA::MeasurementVector& measurements,
|
||||
double rank_tol = 1e-9, bool optimize = false,
|
||||
const SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false) {
|
||||
size_t m = cameras.size();
|
||||
assert(measurements.size() == m);
|
||||
|
||||
if (m < 2)
|
||||
throw(TriangulationUnderconstrainedException());
|
||||
if (m < 2) throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// construct projection matrices from poses & calibration
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
// Triangulate linearly
|
||||
Point3 point;
|
||||
if (useLOST) {
|
||||
// Reduce input noise model to an isotropic noise model using the mean of
|
||||
// the diagonal.
|
||||
const double measurementSigma = model ? model->sigmas().mean() : 1e-4;
|
||||
SharedIsotropic measurementNoise =
|
||||
noiseModel::Isotropic::Sigma(2, measurementSigma);
|
||||
|
||||
// Undistort the measurements, leaving only the pinhole elements in effect.
|
||||
auto undistortedMeasurements =
|
||||
undistortMeasurements<CAMERA>(cameras, measurements);
|
||||
// construct poses from cameras.
|
||||
std::vector<Pose3> poses;
|
||||
poses.reserve(cameras.size());
|
||||
for (const auto& camera : cameras) poses.push_back(camera.pose());
|
||||
|
||||
Point3 point =
|
||||
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
|
||||
// calibrate the measurements to obtain homogenous coordinates in image
|
||||
// plane.
|
||||
auto calibratedMeasurements =
|
||||
calibrateMeasurements<CAMERA>(cameras, measurements);
|
||||
|
||||
// The n refine using non-linear optimization
|
||||
if (optimize)
|
||||
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
|
||||
} else {
|
||||
// construct projection matrices from poses & calibration
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
|
||||
// Undistort the measurements, leaving only the pinhole elements in effect.
|
||||
auto undistortedMeasurements =
|
||||
undistortMeasurements<CAMERA>(cameras, measurements);
|
||||
|
||||
point =
|
||||
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
|
||||
}
|
||||
|
||||
// Then refine using non-linear optimization
|
||||
if (optimize) {
|
||||
point = triangulateNonlinear<CAMERA>(cameras, measurements, point, model);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
// verify that the triangulated point lies in front of all cameras
|
||||
for(const CAMERA& camera: cameras) {
|
||||
for (const CAMERA& camera : cameras) {
|
||||
const Point3& p_local = camera.pose().transformTo(point);
|
||||
if (p_local.z() <= 0)
|
||||
throw(TriangulationCheiralityException());
|
||||
if (p_local.z() <= 0) throw(TriangulationCheiralityException());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -434,14 +543,14 @@ Point3 triangulatePoint3(
|
|||
}
|
||||
|
||||
/// Pinhole-specific version
|
||||
template<class CALIBRATION>
|
||||
Point3 triangulatePoint3(
|
||||
const CameraSet<PinholeCamera<CALIBRATION> >& cameras,
|
||||
const Point2Vector& measurements, double rank_tol = 1e-9,
|
||||
bool optimize = false,
|
||||
const SharedNoiseModel& model = nullptr) {
|
||||
return triangulatePoint3<PinholeCamera<CALIBRATION> > //
|
||||
(cameras, measurements, rank_tol, optimize, model);
|
||||
template <class CALIBRATION>
|
||||
Point3 triangulatePoint3(const CameraSet<PinholeCamera<CALIBRATION>>& cameras,
|
||||
const Point2Vector& measurements,
|
||||
double rank_tol = 1e-9, bool optimize = false,
|
||||
const SharedNoiseModel& model = nullptr,
|
||||
const bool useLOST = false) {
|
||||
return triangulatePoint3<PinholeCamera<CALIBRATION>> //
|
||||
(cameras, measurements, rank_tol, optimize, model, useLOST);
|
||||
}
|
||||
|
||||
struct GTSAM_EXPORT TriangulationParameters {
|
||||
|
|
|
@ -11,4 +11,7 @@
|
|||
|
||||
@}
|
||||
|
||||
*/
|
||||
\defgroup SLAM Useful SLAM components
|
||||
@{ @}
|
||||
|
||||
*/
|
||||
|
|
|
@ -66,27 +66,6 @@ class KeySet {
|
|||
void serialize() const;
|
||||
};
|
||||
|
||||
// Actually a vector<Key>
|
||||
class KeyVector {
|
||||
KeyVector();
|
||||
KeyVector(const gtsam::KeyVector& other);
|
||||
|
||||
// Note: no print function
|
||||
|
||||
// common STL methods
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
void clear();
|
||||
|
||||
// structure specific methods
|
||||
size_t at(size_t i) const;
|
||||
size_t front() const;
|
||||
size_t back() const;
|
||||
void push_back(size_t key) const;
|
||||
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
// Actually a FastMap<Key,int>
|
||||
class KeyGroupMap {
|
||||
KeyGroupMap();
|
||||
|
|
|
@ -697,6 +697,12 @@ namespace gtsam {
|
|||
return robust_->loss(std::sqrt(squared_distance));
|
||||
}
|
||||
|
||||
// NOTE: This is special because in whiten the base version will do the reweighting
|
||||
// which is incorrect!
|
||||
double squaredMahalanobisDistance(const Vector& v) const override {
|
||||
return noise_->squaredMahalanobisDistance(v);
|
||||
}
|
||||
|
||||
// These are really robust iterated re-weighting support functions
|
||||
virtual void WhitenSystem(Vector& b) const;
|
||||
void WhitenSystem(std::vector<Matrix>& A, Vector& b) const override;
|
||||
|
|
|
@ -279,7 +279,6 @@ virtual class GaussianFactor {
|
|||
virtual class JacobianFactor : gtsam::GaussianFactor {
|
||||
//Constructors
|
||||
JacobianFactor();
|
||||
JacobianFactor(const gtsam::GaussianFactor& factor);
|
||||
JacobianFactor(Vector b_in);
|
||||
JacobianFactor(size_t i1, Matrix A1, Vector b,
|
||||
const gtsam::noiseModel::Diagonal* model);
|
||||
|
@ -295,6 +294,7 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
|||
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
|
||||
const gtsam::Ordering& ordering,
|
||||
const gtsam::VariableSlots& p_variableSlots);
|
||||
JacobianFactor(const gtsam::GaussianFactor& factor);
|
||||
|
||||
//Testable
|
||||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
|
|
|
@ -661,12 +661,13 @@ TEST(NoiseModel, robustNoiseDCS)
|
|||
TEST(NoiseModel, robustNoiseL2WithDeadZone)
|
||||
{
|
||||
double dead_zone_size = 1.0;
|
||||
SharedNoiseModel robust = noiseModel::Robust::Create(
|
||||
auto robust = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size),
|
||||
Unit::Create(3));
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
Vector3 error = Vector3(i, 0, 0);
|
||||
Vector error = Vector3(i, 0, 0);
|
||||
robust->WhitenSystem(error);
|
||||
DOUBLES_EQUAL(std::fmax(0, i - dead_zone_size) * i,
|
||||
robust->squaredMahalanobisDistance(error), 1e-8);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ public:
|
|||
* it is received from the IMU) so as to avoid costly integration at time of
|
||||
* factor construction.
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType {
|
||||
|
||||
|
@ -252,7 +252,7 @@ public:
|
|||
* the correlation between the bias uncertainty and the preintegrated
|
||||
* measurements uncertainty.
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_EXPORT CombinedImuFactor: public NoiseModelFactor6<Pose3, Vector3, Pose3,
|
||||
Vector3, imuBias::ConstantBias, imuBias::ConstantBias> {
|
||||
|
|
|
@ -66,7 +66,7 @@ typedef ManifoldPreintegration PreintegrationType;
|
|||
* as soon as it is received from the IMU) so as to avoid costly integration
|
||||
* at time of factor construction.
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_EXPORT PreintegratedImuMeasurements: public PreintegrationType {
|
||||
|
||||
|
@ -165,7 +165,7 @@ public:
|
|||
* (which are usually slowly varying quantities), which is up to the caller.
|
||||
* See also CombinedImuFactor for a class that does this for you.
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_EXPORT ImuFactor: public NoiseModelFactor5<Pose3, Vector3, Pose3, Vector3,
|
||||
imuBias::ConstantBias> {
|
||||
|
@ -256,7 +256,7 @@ public:
|
|||
|
||||
/**
|
||||
* ImuFactor2 is a ternary factor that uses NavStates rather than Pose/Velocity.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_EXPORT ImuFactor2 : public NoiseModelFactor3<NavState, NavState, imuBias::ConstantBias> {
|
||||
private:
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using JacobianVector = std::vector<Matrix>;
|
||||
|
|
|
@ -42,7 +42,7 @@ static double Chi2inv(const double alpha, const size_t dofs) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<class GncParameters>
|
||||
class GTSAM_EXPORT GncOptimizer {
|
||||
class GncOptimizer {
|
||||
public:
|
||||
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||
typedef typename GncParameters::OptimizerType BaseOptimizer;
|
||||
|
@ -207,9 +207,11 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
std::cout << "GNC Optimizer stopped because all measurements are already known to be inliers or outliers"
|
||||
<< std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::MU) {
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
result.print("result\n");
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -218,12 +220,16 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
for (iter = 0; iter < params_.maxIterations; iter++) {
|
||||
|
||||
// display info
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
if (params_.verbosity >= GncParameters::Verbosity::MU) {
|
||||
std::cout << "iter: " << iter << std::endl;
|
||||
result.print("result\n");
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
|
||||
std::cout << "weights: " << weights_ << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
result.print("result\n");
|
||||
}
|
||||
// weights update
|
||||
weights_ = calculateWeights(result, mu);
|
||||
|
||||
|
@ -255,10 +261,12 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
|
||||
std::cout << "final iterations: " << iter << std::endl;
|
||||
std::cout << "final mu: " << mu << std::endl;
|
||||
std::cout << "final weights: " << weights_ << std::endl;
|
||||
std::cout << "previous cost: " << prev_cost << std::endl;
|
||||
std::cout << "current cost: " << cost << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
|
||||
std::cout << "final weights: " << weights_ << std::endl;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -293,6 +301,11 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init;
|
||||
}
|
||||
}
|
||||
if (mu_init >= 0 && mu_init < 1e-6){
|
||||
mu_init = 1e-6; // if mu ~ 0 (but positive), that means we have measurements with large errors,
|
||||
// i.e., rk > barcSq_[k] and rk very large, hence we threshold to 1e-6 to avoid mu = 0
|
||||
}
|
||||
|
||||
return mu_init > 0 && !std::isinf(mu_init) ? mu_init : -1; // if mu <= 0 or mu = inf, return -1,
|
||||
// which leads to termination of the main gnc loop. In this case, all residuals are already below the threshold
|
||||
// and there is no need to robustify (TLS = least squares)
|
||||
|
@ -340,8 +353,10 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
bool checkCostConvergence(const double cost, const double prev_cost) const {
|
||||
bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7)
|
||||
< params_.relativeCostTol;
|
||||
if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||
std::cout << "checkCostConvergence = true " << std::endl;
|
||||
if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY){
|
||||
std::cout << "checkCostConvergence = true (prev. cost = " << prev_cost
|
||||
<< ", curr. cost = " << cost << ")" << std::endl;
|
||||
}
|
||||
return costConverged;
|
||||
}
|
||||
|
||||
|
@ -436,18 +451,16 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
return weights;
|
||||
}
|
||||
case GncLossType::TLS: { // use eq (14) in GNC paper
|
||||
double upperbound = (mu + 1) / mu * barcSq_.maxCoeff();
|
||||
double lowerbound = mu / (mu + 1) * barcSq_.minCoeff();
|
||||
for (size_t k : unknownWeights) {
|
||||
if (nfg_[k]) {
|
||||
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||
if (u2_k >= upperbound) {
|
||||
double upperbound = (mu + 1) / mu * barcSq_[k];
|
||||
double lowerbound = mu / (mu + 1) * barcSq_[k];
|
||||
weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) - mu;
|
||||
if (u2_k >= upperbound || weights[k] < 0) {
|
||||
weights[k] = 0;
|
||||
} else if (u2_k <= lowerbound) {
|
||||
} else if (u2_k <= lowerbound || weights[k] > 1) {
|
||||
weights[k] = 1;
|
||||
} else {
|
||||
weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k)
|
||||
- mu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ enum GncLossType {
|
|||
};
|
||||
|
||||
template<class BaseOptimizerParameters>
|
||||
class GTSAM_EXPORT GncParams {
|
||||
class GncParams {
|
||||
public:
|
||||
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||
typedef typename BaseOptimizerParameters::OptimizerType OptimizerType;
|
||||
|
@ -48,6 +48,8 @@ class GTSAM_EXPORT GncParams {
|
|||
enum Verbosity {
|
||||
SILENT = 0,
|
||||
SUMMARY,
|
||||
MU,
|
||||
WEIGHTS,
|
||||
VALUES
|
||||
};
|
||||
|
||||
|
@ -70,8 +72,14 @@ class GTSAM_EXPORT GncParams {
|
|||
double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating
|
||||
double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
|
||||
Verbosity verbosity = SILENT; ///< Verbosity level
|
||||
std::vector<size_t> knownInliers = std::vector<size_t>(); ///< Slots in the factor graph corresponding to measurements that we know are inliers
|
||||
std::vector<size_t> knownOutliers = std::vector<size_t>(); ///< Slots in the factor graph corresponding to measurements that we know are outliers
|
||||
|
||||
//TODO(Varun) replace IndexVector with vector<size_t> once pybind11/stl.h is globally enabled.
|
||||
/// Use IndexVector for inliers and outliers since it is fast + wrapping
|
||||
using IndexVector = FastVector<uint64_t>;
|
||||
///< Slots in the factor graph corresponding to measurements that we know are inliers
|
||||
IndexVector knownInliers = IndexVector();
|
||||
///< Slots in the factor graph corresponding to measurements that we know are outliers
|
||||
IndexVector knownOutliers = IndexVector();
|
||||
|
||||
/// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
|
||||
void setLossType(const GncLossType type) {
|
||||
|
@ -112,7 +120,7 @@ class GTSAM_EXPORT GncParams {
|
|||
* This functionality is commonly used in SLAM when one may assume the odometry is outlier free, and
|
||||
* only apply GNC to prune outliers from the loop closures.
|
||||
* */
|
||||
void setKnownInliers(const std::vector<size_t>& knownIn) {
|
||||
void setKnownInliers(const IndexVector& knownIn) {
|
||||
for (size_t i = 0; i < knownIn.size(); i++){
|
||||
knownInliers.push_back(knownIn[i]);
|
||||
}
|
||||
|
@ -123,7 +131,7 @@ class GTSAM_EXPORT GncParams {
|
|||
* corresponds to the slots in the factor graph. For instance, if you have a nonlinear factor graph nfg,
|
||||
* and you provide knownOut = {0, 2, 15}, GNC will not apply outlier rejection to nfg[0], nfg[2], and nfg[15].
|
||||
* */
|
||||
void setKnownOutliers(const std::vector<size_t>& knownOut) {
|
||||
void setKnownOutliers(const IndexVector& knownOut) {
|
||||
for (size_t i = 0; i < knownOut.size(); i++){
|
||||
knownOutliers.push_back(knownOut[i]);
|
||||
}
|
||||
|
@ -161,7 +169,7 @@ class GTSAM_EXPORT GncParams {
|
|||
std::cout << "knownInliers: " << knownInliers[i] << "\n";
|
||||
for (size_t i = 0; i < knownOutliers.size(); i++)
|
||||
std::cout << "knownOutliers: " << knownOutliers[i] << "\n";
|
||||
baseOptimizerParams.print(str);
|
||||
baseOptimizerParams.print("Base optimizer params: ");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -167,8 +167,9 @@ boost::shared_ptr<GaussianFactor> NoiseModelFactor::linearize(
|
|||
return GaussianFactor::shared_ptr(
|
||||
new JacobianFactor(terms, b,
|
||||
boost::static_pointer_cast<Constrained>(noiseModel_)->unit()));
|
||||
else
|
||||
else {
|
||||
return GaussianFactor::shared_ptr(new JacobianFactor(terms, b));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* A class for a soft prior on any Value type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class VALUE>
|
||||
class PriorFactor: public NoiseModelFactor1<VALUE> {
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
//*************************************************************************
|
||||
// Custom Factor wrapping
|
||||
//*************************************************************************
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
#include <gtsam/nonlinear/CustomFactor.h>
|
||||
virtual class CustomFactor : gtsam::NoiseModelFactor {
|
||||
/*
|
||||
* Note CustomFactor will not be wrapped for MATLAB, as there is no supporting
|
||||
* machinery there. This is achieved by adding `gtsam::CustomFactor` to the
|
||||
* ignore list in `matlab/CMakeLists.txt`.
|
||||
*/
|
||||
CustomFactor();
|
||||
/*
|
||||
* Example:
|
||||
* ```
|
||||
* def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
|
||||
* <calculated error>
|
||||
* if not H is None:
|
||||
* <calculate the Jacobian>
|
||||
* H[0] = J1 # 2-d numpy array for a Jacobian block
|
||||
* H[1] = J2
|
||||
* ...
|
||||
* return error # 1-d numpy array
|
||||
*
|
||||
* cf = CustomFactor(noise_model, keys, error_func)
|
||||
* ```
|
||||
*/
|
||||
CustomFactor(const gtsam::SharedNoiseModel& noiseModel,
|
||||
const gtsam::KeyVector& keys,
|
||||
const gtsam::CustomErrorFunction& errorFunction);
|
||||
|
||||
void print(string s = "",
|
||||
gtsam::KeyFormatter keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
};
|
||||
|
||||
}
|
|
@ -99,11 +99,11 @@ class NonlinearFactorGraph {
|
|||
string dot(
|
||||
const gtsam::Values& values,
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
const GraphvizFormatting& writer = GraphvizFormatting());
|
||||
const gtsam::GraphvizFormatting& writer = gtsam::GraphvizFormatting());
|
||||
void saveGraph(
|
||||
const string& s, const gtsam::Values& values,
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
const GraphvizFormatting& writer = GraphvizFormatting()) const;
|
||||
const gtsam::GraphvizFormatting& writer = gtsam::GraphvizFormatting()) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -135,37 +135,6 @@ virtual class NoiseModelFactor : gtsam::NonlinearFactor {
|
|||
Vector whitenedError(const gtsam::Values& x) const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/CustomFactor.h>
|
||||
virtual class CustomFactor : gtsam::NoiseModelFactor {
|
||||
/*
|
||||
* Note CustomFactor will not be wrapped for MATLAB, as there is no supporting
|
||||
* machinery there. This is achieved by adding `gtsam::CustomFactor` to the
|
||||
* ignore list in `matlab/CMakeLists.txt`.
|
||||
*/
|
||||
CustomFactor();
|
||||
/*
|
||||
* Example:
|
||||
* ```
|
||||
* def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
|
||||
* <calculated error>
|
||||
* if not H is None:
|
||||
* <calculate the Jacobian>
|
||||
* H[0] = J1 # 2-d numpy array for a Jacobian block
|
||||
* H[1] = J2
|
||||
* ...
|
||||
* return error # 1-d numpy array
|
||||
*
|
||||
* cf = CustomFactor(noise_model, keys, error_func)
|
||||
* ```
|
||||
*/
|
||||
CustomFactor(const gtsam::SharedNoiseModel& noiseModel,
|
||||
const gtsam::KeyVector& keys,
|
||||
const gtsam::CustomErrorFunction& errorFunction);
|
||||
|
||||
void print(string s = "",
|
||||
gtsam::KeyFormatter keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
class Values {
|
||||
Values();
|
||||
|
@ -544,12 +513,34 @@ virtual class DoglegParams : gtsam::NonlinearOptimizerParams {
|
|||
};
|
||||
|
||||
#include <gtsam/nonlinear/GncParams.h>
|
||||
enum GncLossType {
|
||||
GM /*Geman McClure*/,
|
||||
TLS /*Truncated least squares*/
|
||||
};
|
||||
|
||||
template<PARAMS>
|
||||
virtual class GncParams {
|
||||
GncParams(const PARAMS& baseOptimizerParams);
|
||||
GncParams();
|
||||
void setVerbosityGNC(const This::Verbosity value);
|
||||
void print(const string& str) const;
|
||||
BaseOptimizerParameters baseOptimizerParams;
|
||||
gtsam::GncLossType lossType;
|
||||
size_t maxIterations;
|
||||
double muStep;
|
||||
double relativeCostTol;
|
||||
double weightsTol;
|
||||
Verbosity verbosity;
|
||||
gtsam::KeyVector knownInliers;
|
||||
gtsam::KeyVector knownOutliers;
|
||||
|
||||
void setLossType(const gtsam::GncLossType type);
|
||||
void setMaxIterations(const size_t maxIter);
|
||||
void setMuStep(const double step);
|
||||
void setRelativeCostTol(double value);
|
||||
void setWeightsTol(double value);
|
||||
void setVerbosityGNC(const gtsam::This::Verbosity value);
|
||||
void setKnownInliers(const gtsam::KeyVector& knownIn);
|
||||
void setKnownOutliers(const gtsam::KeyVector& knownOut);
|
||||
void print(const string& str = "GncParams: ") const;
|
||||
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
|
@ -597,6 +588,11 @@ virtual class GncOptimizer {
|
|||
GncOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& initialValues,
|
||||
const PARAMS& params);
|
||||
void setInlierCostThresholds(const double inth);
|
||||
const Vector& getInlierCostThresholds();
|
||||
void setInlierCostThresholdsAtProbability(const double alpha);
|
||||
void setWeights(const Vector w);
|
||||
const Vector& getWeights();
|
||||
gtsam::Values optimize();
|
||||
};
|
||||
|
||||
|
@ -705,7 +701,7 @@ class ISAM2Result {
|
|||
/** Getters and Setters for all properties */
|
||||
size_t getVariablesRelinearized() const;
|
||||
size_t getVariablesReeliminated() const;
|
||||
FactorIndices getNewFactorsIndices() const;
|
||||
gtsam::FactorIndices getNewFactorsIndices() const;
|
||||
size_t getCliques() const;
|
||||
double getErrorBefore() const;
|
||||
double getErrorAfter() const;
|
||||
|
@ -873,7 +869,7 @@ template <T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2,
|
|||
gtsam::PinholeCamera<gtsam::Cal3Unified>,
|
||||
gtsam::imuBias::ConstantBias}>
|
||||
virtual class NonlinearEquality2 : gtsam::NoiseModelFactor {
|
||||
NonlinearEquality2(Key key1, Key key2, double mu = 1e4);
|
||||
NonlinearEquality2(gtsam::Key key1, gtsam::Key key2, double mu = 1e4);
|
||||
gtsam::Vector evaluateError(const T& x1, const T& x2);
|
||||
};
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Binary factor for a bearing/range measurement
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template <typename A1, typename A2,
|
||||
typename B = typename Bearing<A1, A2>::result_type,
|
||||
|
@ -79,9 +79,8 @@ class BearingRangeFactor
|
|||
{
|
||||
std::vector<Matrix> Hs(2);
|
||||
const auto &keys = Factor::keys();
|
||||
const Vector error = unwhitenedError(
|
||||
{{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}},
|
||||
Hs);
|
||||
const Vector error = this->unwhitenedError(
|
||||
{{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, Hs);
|
||||
if (H1) *H1 = Hs[0];
|
||||
if (H2) *H2 = Hs[1];
|
||||
return error;
|
||||
|
|
|
@ -108,5 +108,11 @@ typedef gtsam::BearingRangeFactor<gtsam::Pose2, gtsam::Point2, gtsam::Rot2,
|
|||
typedef gtsam::BearingRangeFactor<gtsam::Pose2, gtsam::Pose2, gtsam::Rot2,
|
||||
double>
|
||||
BearingRangeFactorPose2;
|
||||
typedef gtsam::BearingRangeFactor<gtsam::Pose3, gtsam::Point3, gtsam::Unit3,
|
||||
double>
|
||||
BearingRangeFactor3D;
|
||||
typedef gtsam::BearingRangeFactor<gtsam::Pose3, gtsam::Pose3, gtsam::Unit3,
|
||||
double>
|
||||
BearingRangeFactorPose3;
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -26,7 +26,7 @@ namespace gtsam {
|
|||
* A class for downdating an existing factor from a graph. The AntiFactor returns the same
|
||||
* linearized Hessian matrices of the original factor, but with the opposite sign. This effectively
|
||||
* cancels out any affects of the original factor during optimization."
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class AntiFactor: public NonlinearFactor {
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for a measurement predicted by "between(config[key1],config[key2])"
|
||||
* @tparam VALUE the Value type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class VALUE>
|
||||
class BetweenFactor: public NoiseModelFactor2<VALUE, VALUE> {
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace gtsam {
|
|||
* greater/less than a fixed threshold. The function
|
||||
* will need to have its value function implemented to return
|
||||
* a scalar for comparison.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class VALUE>
|
||||
struct BoundingConstraint1: public NoiseModelFactor1<VALUE> {
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Binary factor between two Pose3 variables induced by an EssentialMatrix measurement
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactor2<Pose3, Pose3> {
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Non-linear factor for a constraint derived from a 2D measurement.
|
||||
* The calibration is unknown here compared to GenericProjectionFactor
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class CAMERA, class LANDMARK>
|
||||
class GeneralSFMFactor: public NoiseModelFactor2<CAMERA, LANDMARK> {
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace gtsam {
|
|||
* Non-linear factor for a constraint derived from a 2D measurement.
|
||||
* The calibration is known here.
|
||||
* The main building block for visual SLAM.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template <class POSE = Pose3, class LANDMARK = Point3,
|
||||
class CALIBRATION = Cal3_S2>
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating conditionally
|
||||
|
@ -39,7 +39,7 @@ namespace gtsam {
|
|||
* The factor only constrains poses (variable dimension is 6).
|
||||
* This factor requires that values contains the involved poses (Pose3).
|
||||
* If the calibration should be optimized, as well, use SmartProjectionFactor instead!
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template <class CALIBRATION>
|
||||
class SmartProjectionPoseFactor
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating
|
||||
|
@ -46,7 +46,7 @@ namespace gtsam {
|
|||
* calibration (i.e., are from the same camera), use SmartProjectionPoseFactor
|
||||
* instead! If the calibration should be optimized, as well, use
|
||||
* SmartProjectionFactor instead!
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template <class CAMERA>
|
||||
class SmartProjectionRigFactor : public SmartProjectionFactor<CAMERA> {
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* A Generic Stereo Factor
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class POSE, class LANDMARK>
|
||||
class GenericStereoFactor: public NoiseModelFactor2<POSE, LANDMARK> {
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Non-linear factor for a constraint derived from a 2D measurement.
|
||||
* The calibration and pose are assumed known.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class CAMERA>
|
||||
class TriangulationFactor: public NoiseModelFactor1<Point3> {
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
set(ignore_test "testNestedDissection.cpp")
|
||||
gtsamAddTestsGlob(partition "test*.cpp" "${ignore_test}" "gtsam_unstable;gtsam;metis-gtsam")
|
||||
gtsamAddTestsGlob(partition "test*.cpp" "${ignore_test}" "gtsam_unstable;gtsam;metis-gtsam-if")
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for a measurement predicted by "between(config[key1],config[key2])"
|
||||
* @tparam VALUE the Value type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class VALUE>
|
||||
class BetweenFactorEM: public NonlinearFactor {
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class to model GPS measurements, including a bias term which models
|
||||
* common-mode errors and that can be partially corrected if other sensors are used
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class BiasedGPSFactor: public NoiseModelFactor2<Pose3, Point3> {
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Non-linear factor for a constraint derived from a 2D measurement. The calibration is known here.
|
||||
* i.e. the main building block for visual SLAM.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class POSE, class LANDMARK, class CALIBRATION = Cal3_S2>
|
||||
class MultiProjectionFactor: public NoiseModelFactor {
|
||||
|
|
|
@ -26,7 +26,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for a measurement predicted by "between(config[key1],config[key2])"
|
||||
* @tparam POSE the Pose type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class POSE>
|
||||
class PoseBetweenFactor: public NoiseModelFactor2<POSE, POSE> {
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* A class for a soft prior on any Value type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class POSE>
|
||||
class PosePriorFactor: public NoiseModelFactor1<POSE> {
|
||||
|
|
|
@ -18,7 +18,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* A class for a measurement between a pose and a point.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<typename POSE = Pose3, typename POINT = Point3>
|
||||
class PoseToPointFactor : public NoiseModelFactor2<POSE, POINT> {
|
||||
|
@ -61,6 +61,12 @@ class PoseToPointFactor : public NoiseModelFactor2<POSE, POINT> {
|
|||
traits<POINT>::Equals(this->measured_, e->measured_, tol);
|
||||
}
|
||||
|
||||
/// @return a deep copy of this factor
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
|
||||
}
|
||||
|
||||
/** implement functions needed to derive from Factor */
|
||||
|
||||
/** vector of errors
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Non-linear factor for a constraint derived from a 2D measurement. The calibration is known here.
|
||||
* i.e. the main building block for visual SLAM.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class POSE, class LANDMARK, class CALIBRATION = Cal3_S2>
|
||||
class ProjectionFactorPPP: public NoiseModelFactor3<POSE, POSE, LANDMARK> {
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Non-linear factor for a constraint derived from a 2D measurement. This factor
|
||||
* estimates the body pose, body-camera transform, 3D landmark, and calibration.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template <class POSE, class LANDMARK, class CALIBRATION = Cal3_S2>
|
||||
class GTSAM_UNSTABLE_EXPORT ProjectionFactorPPPC
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace gtsam {
|
|||
* define the alpha = (t_p - t_A) / (t_B - t_A), we will use the pose
|
||||
* interpolated between A and B by the alpha to project the corresponding
|
||||
* landmark to Point2.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
|
||||
class GTSAM_UNSTABLE_EXPORT ProjectionFactorRollingShutter
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert,
|
||||
|
@ -39,7 +39,7 @@ namespace gtsam {
|
|||
* shutter model of the camera with given readout time. The factor requires that
|
||||
* values contain (for each pixel observation) two consecutive camera poses from
|
||||
* which the pixel observation pose can be interpolated.
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template <class CAMERA>
|
||||
class GTSAM_UNSTABLE_EXPORT SmartProjectionPoseFactorRollingShutter
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Smart factor for range SLAM
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class SmartRangeFactor: public NoiseModelFactor {
|
||||
protected:
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert,
|
||||
|
@ -38,7 +38,7 @@ namespace gtsam {
|
|||
* calibration or the same calibration can be shared by multiple cameras. This
|
||||
* factor requires that values contain the involved poses and extrinsics (both
|
||||
* are Pose3 variables).
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
|
||||
: public SmartStereoProjectionFactor {
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert,
|
||||
|
@ -41,7 +41,7 @@ namespace gtsam {
|
|||
* has its own calibration.
|
||||
* The factor only constrains poses (variable dimension is 6).
|
||||
* This factor requires that values contains the involved poses (Pose3).
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionPoseFactor
|
||||
: public SmartStereoProjectionFactor {
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for a measurement predicted by "between(config[key1],config[key2])"
|
||||
* @tparam VALUE the Value type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class VALUE>
|
||||
class TransformBtwRobotsUnaryFactor: public NonlinearFactor { // TODO why not NoiseModelFactor1 ?
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for a measurement predicted by "between(config[key1],config[key2])"
|
||||
* @tparam VALUE the Value type
|
||||
* @addtogroup SLAM
|
||||
* @ingroup SLAM
|
||||
*/
|
||||
template<class VALUE>
|
||||
class TransformBtwRobotsUnaryFactorEM: public NonlinearFactor {
|
||||
|
|
|
@ -73,6 +73,7 @@ set(interface_files
|
|||
${GTSAM_SOURCE_DIR}/gtsam/geometry/geometry.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/linear/linear.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/nonlinear/nonlinear.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/nonlinear/custom.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/symbolic/symbolic.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/sam/sam.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/slam/slam.i
|
||||
|
@ -98,7 +99,6 @@ endif(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
# Record the root dir for gtsam - needed during external builds, e.g., ROS
|
||||
set(GTSAM_SOURCE_ROOT_DIR ${GTSAM_SOURCE_DIR})
|
||||
message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]")
|
||||
|
||||
# Tests message(STATUS "Installing Matlab Toolbox")
|
||||
install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "*.m;*.fig")
|
||||
|
|
|
@ -61,6 +61,7 @@ set(interface_headers
|
|||
${PROJECT_SOURCE_DIR}/gtsam/geometry/geometry.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/linear/linear.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/nonlinear/nonlinear.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/nonlinear/custom.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/symbolic/symbolic.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/sam/sam.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/slam/slam.i
|
||||
|
@ -104,7 +105,8 @@ copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
|||
# Hack to get python test and util files copied every time they are modified
|
||||
file(GLOB GTSAM_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/tests/*.py")
|
||||
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
|
||||
configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file}" COPYONLY)
|
||||
get_filename_component(test_file_name ${test_file} NAME)
|
||||
configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file_name}" COPYONLY)
|
||||
endforeach()
|
||||
file(GLOB GTSAM_PYTHON_UTIL_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/utils/*.py")
|
||||
foreach(util_file ${GTSAM_PYTHON_UTIL_FILES})
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `PYBIND11_MAKE_OPAQUE` will mark the type as "opaque" for the pybind11
|
||||
* automatic STL binding, such that the raw objects can be accessed in Python.
|
||||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
|
@ -9,4 +9,4 @@
|
|||
* automatic STL binding, such that the raw objects can be accessed in Python.
|
||||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `py::bind_vector` and similar machinery gives the std container a Python-like
|
||||
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
|
||||
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
|
||||
* and saves one copy operation.
|
||||
*/
|
|
@ -9,4 +9,4 @@
|
|||
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
|
||||
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
|
||||
* and saves one copy operation.
|
||||
*/
|
||||
*/
|
||||
|
|
|
@ -11,9 +11,8 @@ Author: Varun Agrawal
|
|||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
|
@ -21,6 +20,7 @@ class TestNonlinearEquality2Factor(GtsamTestCase):
|
|||
"""
|
||||
Test various instantiations of NonlinearEquality2.
|
||||
"""
|
||||
|
||||
def test_point3(self):
|
||||
"""Test for Point3 version."""
|
||||
factor = gtsam.NonlinearEquality2Point3(0, 1)
|
||||
|
@ -30,5 +30,23 @@ class TestNonlinearEquality2Factor(GtsamTestCase):
|
|||
np.testing.assert_allclose(error, np.zeros(3))
|
||||
|
||||
|
||||
class TestJacobianFactor(GtsamTestCase):
|
||||
"""Test JacobianFactor"""
|
||||
|
||||
def test_gaussian_factor_graph(self):
|
||||
"""Test construction from GaussianFactorGraph."""
|
||||
gfg = gtsam.GaussianFactorGraph()
|
||||
jf = gtsam.JacobianFactor(gfg)
|
||||
self.assertIsInstance(jf, gtsam.JacobianFactor)
|
||||
|
||||
nfg = gtsam.NonlinearFactorGraph()
|
||||
nfg.push_back(gtsam.PriorFactorDouble(1, 0.0, gtsam.noiseModel.Isotropic.Sigma(1, 1.0)))
|
||||
values = gtsam.Values()
|
||||
values.insert(1, 0.0)
|
||||
gfg = nfg.linearize(values)
|
||||
jf = gtsam.JacobianFactor(gfg)
|
||||
self.assertIsInstance(jf, gtsam.JacobianFactor)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -15,10 +15,12 @@ from __future__ import print_function
|
|||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams, DummyPreconditionerParameters,
|
||||
GaussNewtonOptimizer, GaussNewtonParams, GncLMParams, GncLMOptimizer,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph,
|
||||
Ordering, PCGSolverParameters, Point2, PriorFactorPoint2, Values)
|
||||
from gtsam import (
|
||||
DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, GncLMParams, GncLossType, GncLMOptimizer, LevenbergMarquardtOptimizer,
|
||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2,
|
||||
PriorFactorPoint2, Values
|
||||
)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
KEY1 = 1
|
||||
|
@ -27,7 +29,6 @@ KEY2 = 2
|
|||
|
||||
class TestScenario(GtsamTestCase):
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up the optimization problem and ordering"""
|
||||
# create graph
|
||||
|
@ -83,16 +84,83 @@ class TestScenario(GtsamTestCase):
|
|||
actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
def test_gnc_params(self):
|
||||
base_params = LevenbergMarquardtParams()
|
||||
# Test base params
|
||||
for base_max_iters in (50, 100):
|
||||
base_params.setMaxIterations(base_max_iters)
|
||||
params = GncLMParams(base_params)
|
||||
self.assertEqual(params.baseOptimizerParams.getMaxIterations(), base_max_iters)
|
||||
|
||||
# Test printing
|
||||
params_str = str(params)
|
||||
for s in (
|
||||
"lossType",
|
||||
"maxIterations",
|
||||
"muStep",
|
||||
"relativeCostTol",
|
||||
"weightsTol",
|
||||
"verbosity",
|
||||
):
|
||||
self.assertTrue(s in params_str)
|
||||
|
||||
# Test each parameter
|
||||
for loss_type in (GncLossType.TLS, GncLossType.GM):
|
||||
params.setLossType(loss_type) # Default is TLS
|
||||
self.assertEqual(params.lossType, loss_type)
|
||||
for max_iter in (1, 10, 100):
|
||||
params.setMaxIterations(max_iter)
|
||||
self.assertEqual(params.maxIterations, max_iter)
|
||||
for mu_step in (1.1, 1.2, 1.5):
|
||||
params.setMuStep(mu_step)
|
||||
self.assertEqual(params.muStep, mu_step)
|
||||
for rel_cost_tol in (1e-5, 1e-6, 1e-7):
|
||||
params.setRelativeCostTol(rel_cost_tol)
|
||||
self.assertEqual(params.relativeCostTol, rel_cost_tol)
|
||||
for weights_tol in (1e-4, 1e-3, 1e-2):
|
||||
params.setWeightsTol(weights_tol)
|
||||
self.assertEqual(params.weightsTol, weights_tol)
|
||||
for i in (0, 1, 2):
|
||||
verb = GncLMParams.Verbosity(i)
|
||||
params.setVerbosityGNC(verb)
|
||||
self.assertEqual(params.verbosity, verb)
|
||||
for inl in ([], [10], [0, 100]):
|
||||
params.setKnownInliers(inl)
|
||||
self.assertEqual(params.knownInliers, inl)
|
||||
params.knownInliers = []
|
||||
for out in ([], [1], [0, 10]):
|
||||
params.setKnownInliers(out)
|
||||
self.assertEqual(params.knownInliers, out)
|
||||
params.knownInliers = []
|
||||
|
||||
# Test optimizer params
|
||||
optimizer = GncLMOptimizer(self.fg, self.initial_values, params)
|
||||
for ict_factor in (0.9, 1.1):
|
||||
new_ict = ict_factor * optimizer.getInlierCostThresholds()
|
||||
optimizer.setInlierCostThresholds(new_ict)
|
||||
self.assertAlmostEqual(optimizer.getInlierCostThresholds(), new_ict)
|
||||
for w_factor in (0.8, 0.9):
|
||||
new_weights = w_factor * optimizer.getWeights()
|
||||
optimizer.setWeights(new_weights)
|
||||
self.assertAlmostEqual(optimizer.getWeights(), new_weights)
|
||||
optimizer.setInlierCostThresholdsAtProbability(0.9)
|
||||
w1 = optimizer.getInlierCostThresholds()
|
||||
optimizer.setInlierCostThresholdsAtProbability(0.8)
|
||||
w2 = optimizer.getInlierCostThresholds()
|
||||
self.assertLess(w2, w1)
|
||||
|
||||
def test_iteration_hook(self):
|
||||
# set up iteration hook to track some testable values
|
||||
iteration_count = 0
|
||||
final_error = 0
|
||||
final_values = None
|
||||
|
||||
def iteration_hook(iter, error_before, error_after):
|
||||
nonlocal iteration_count, final_error, final_values
|
||||
iteration_count = iter
|
||||
final_error = error_after
|
||||
final_values = optimizer.values()
|
||||
|
||||
# optimize
|
||||
params = LevenbergMarquardtParams.CeresDefaults()
|
||||
params.setOrdering(self.ordering)
|
||||
|
@ -104,5 +172,6 @@ class TestScenario(GtsamTestCase):
|
|||
self.assertEqual(self.fg.error(actual), final_error)
|
||||
self.assertEqual(optimizer.iterations(), iteration_count)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -19,6 +19,62 @@ from gtsam import Point3, Pose3, Rot3, Point3Pairs
|
|||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
def numerical_derivative_pose(pose, method, delta=1e-5):
|
||||
jacobian = np.zeros((6, 6))
|
||||
for idx in range(6):
|
||||
xplus = np.zeros(6)
|
||||
xplus[idx] = delta
|
||||
xminus = np.zeros(6)
|
||||
xminus[idx] = -delta
|
||||
pose_plus = pose.retract(xplus).__getattribute__(method)()
|
||||
pose_minus = pose.retract(xminus).__getattribute__(method)()
|
||||
jacobian[:, idx] = pose_minus.localCoordinates(pose_plus) / (2 * delta)
|
||||
return jacobian
|
||||
|
||||
|
||||
def numerical_derivative_2_poses(pose, other_pose, method, delta=1e-5, inputs=()):
|
||||
jacobian = np.zeros((6, 6))
|
||||
other_jacobian = np.zeros((6, 6))
|
||||
for idx in range(6):
|
||||
xplus = np.zeros(6)
|
||||
xplus[idx] = delta
|
||||
xminus = np.zeros(6)
|
||||
xminus[idx] = -delta
|
||||
|
||||
pose_plus = pose.retract(xplus).__getattribute__(method)(*inputs, other_pose)
|
||||
pose_minus = pose.retract(xminus).__getattribute__(method)(*inputs, other_pose)
|
||||
jacobian[:, idx] = pose_minus.localCoordinates(pose_plus) / (2 * delta)
|
||||
|
||||
other_pose_plus = pose.__getattribute__(method)(*inputs, other_pose.retract(xplus))
|
||||
other_pose_minus = pose.__getattribute__(method)(*inputs, other_pose.retract(xminus))
|
||||
other_jacobian[:, idx] = other_pose_minus.localCoordinates(other_pose_plus) / (2 * delta)
|
||||
return jacobian, other_jacobian
|
||||
|
||||
|
||||
def numerical_derivative_pose_point(pose, point, method, delta=1e-5):
|
||||
jacobian = np.zeros((3, 6))
|
||||
point_jacobian = np.zeros((3, 3))
|
||||
for idx in range(6):
|
||||
xplus = np.zeros(6)
|
||||
xplus[idx] = delta
|
||||
xminus = np.zeros(6)
|
||||
xminus[idx] = -delta
|
||||
|
||||
point_plus = pose.retract(xplus).__getattribute__(method)(point)
|
||||
point_minus = pose.retract(xminus).__getattribute__(method)(point)
|
||||
jacobian[:, idx] = (point_plus - point_minus) / (2 * delta)
|
||||
|
||||
if idx < 3:
|
||||
xplus = np.zeros(3)
|
||||
xplus[idx] = delta
|
||||
xminus = np.zeros(3)
|
||||
xminus[idx] = -delta
|
||||
point_plus = pose.__getattribute__(method)(point + xplus)
|
||||
point_minus = pose.__getattribute__(method)(point + xminus)
|
||||
point_jacobian[:, idx] = (point_plus - point_minus) / (2 * delta)
|
||||
return jacobian, point_jacobian
|
||||
|
||||
|
||||
class TestPose3(GtsamTestCase):
|
||||
"""Test selected Pose3 methods."""
|
||||
|
||||
|
@ -30,6 +86,47 @@ class TestPose3(GtsamTestCase):
|
|||
actual = T2.between(T3)
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
#test jacobians
|
||||
jacobian = np.zeros((6, 6), order='F')
|
||||
jacobian_other = np.zeros((6, 6), order='F')
|
||||
T2.between(T3, jacobian, jacobian_other)
|
||||
jacobian_numerical, jacobian_numerical_other = numerical_derivative_2_poses(T2, T3, 'between')
|
||||
self.gtsamAssertEquals(jacobian, jacobian_numerical)
|
||||
self.gtsamAssertEquals(jacobian_other, jacobian_numerical_other)
|
||||
|
||||
def test_inverse(self):
|
||||
"""Test between method."""
|
||||
pose = Pose3(Rot3.Rodrigues(0, 0, -math.pi/2), Point3(2, 4, 0))
|
||||
expected = Pose3(Rot3.Rodrigues(0, 0, math.pi/2), Point3(4, -2, 0))
|
||||
actual = pose.inverse()
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
#test jacobians
|
||||
jacobian = np.zeros((6, 6), order='F')
|
||||
pose.inverse(jacobian)
|
||||
jacobian_numerical = numerical_derivative_pose(pose, 'inverse')
|
||||
self.gtsamAssertEquals(jacobian, jacobian_numerical)
|
||||
|
||||
def test_slerp(self):
|
||||
"""Test slerp method."""
|
||||
pose0 = gtsam.Pose3()
|
||||
pose1 = Pose3(Rot3.Rodrigues(0, 0, -math.pi/2), Point3(2, 4, 0))
|
||||
actual_alpha_0 = pose0.slerp(0, pose1)
|
||||
self.gtsamAssertEquals(actual_alpha_0, pose0)
|
||||
actual_alpha_1 = pose0.slerp(1, pose1)
|
||||
self.gtsamAssertEquals(actual_alpha_1, pose1)
|
||||
actual_alpha_half = pose0.slerp(0.5, pose1)
|
||||
expected_alpha_half = Pose3(Rot3.Rodrigues(0, 0, -math.pi/4), Point3(0.17157288, 2.41421356, 0))
|
||||
self.gtsamAssertEquals(actual_alpha_half, expected_alpha_half, tol=1e-6)
|
||||
|
||||
# test jacobians
|
||||
jacobian = np.zeros((6, 6), order='F')
|
||||
jacobian_other = np.zeros((6, 6), order='F')
|
||||
pose0.slerp(0.5, pose1, jacobian, jacobian_other)
|
||||
jacobian_numerical, jacobian_numerical_other = numerical_derivative_2_poses(pose0, pose1, 'slerp', inputs=[0.5])
|
||||
self.gtsamAssertEquals(jacobian, jacobian_numerical)
|
||||
self.gtsamAssertEquals(jacobian_other, jacobian_numerical_other)
|
||||
|
||||
def test_transformTo(self):
|
||||
"""Test transformTo method."""
|
||||
pose = Pose3(Rot3.Rodrigues(0, 0, -math.pi/2), Point3(2, 4, 0))
|
||||
|
@ -37,6 +134,15 @@ class TestPose3(GtsamTestCase):
|
|||
expected = Point3(2, 1, 10)
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
#test jacobians
|
||||
point = Point3(3, 2, 10)
|
||||
jacobian_pose = np.zeros((3, 6), order='F')
|
||||
jacobian_point = np.zeros((3, 3), order='F')
|
||||
pose.transformTo(point, jacobian_pose, jacobian_point)
|
||||
jacobian_numerical_pose, jacobian_numerical_point = numerical_derivative_pose_point(pose, point, 'transformTo')
|
||||
self.gtsamAssertEquals(jacobian_pose, jacobian_numerical_pose)
|
||||
self.gtsamAssertEquals(jacobian_point, jacobian_numerical_point)
|
||||
|
||||
# multi-point version
|
||||
points = np.stack([Point3(3, 2, 10), Point3(3, 2, 10)]).T
|
||||
actual_array = pose.transformTo(points)
|
||||
|
@ -51,6 +157,15 @@ class TestPose3(GtsamTestCase):
|
|||
expected = Point3(3, 2, 10)
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
#test jacobians
|
||||
point = Point3(3, 2, 10)
|
||||
jacobian_pose = np.zeros((3, 6), order='F')
|
||||
jacobian_point = np.zeros((3, 3), order='F')
|
||||
pose.transformFrom(point, jacobian_pose, jacobian_point)
|
||||
jacobian_numerical_pose, jacobian_numerical_point = numerical_derivative_pose_point(pose, point, 'transformFrom')
|
||||
self.gtsamAssertEquals(jacobian_pose, jacobian_numerical_pose)
|
||||
self.gtsamAssertEquals(jacobian_point, jacobian_numerical_point)
|
||||
|
||||
# multi-point version
|
||||
points = np.stack([Point3(2, 1, 10), Point3(2, 1, 10)]).T
|
||||
actual_array = pose.transformFrom(points)
|
||||
|
@ -122,4 +237,4 @@ class TestPose3(GtsamTestCase):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
unittest.main()
|
|
@ -50,6 +50,34 @@ class TestSam(GtsamTestCase):
|
|||
self.assertEqual(range_measurement, measurement.range())
|
||||
self.gtsamAssertEquals(bearing_measurement, measurement.bearing())
|
||||
|
||||
def test_BearingRangeFactor3D(self):
|
||||
"""
|
||||
Test that `measured` works as expected for BearingRangeFactor3D.
|
||||
"""
|
||||
bearing_measurement = gtsam.Unit3()
|
||||
range_measurement = 10.0
|
||||
factor = gtsam.BearingRangeFactor3D(
|
||||
1, 2, bearing_measurement, range_measurement,
|
||||
gtsam.noiseModel.Isotropic.Sigma(3, 1))
|
||||
measurement = factor.measured()
|
||||
|
||||
self.assertEqual(range_measurement, measurement.range())
|
||||
self.gtsamAssertEquals(bearing_measurement, measurement.bearing())
|
||||
|
||||
def test_BearingRangeFactorPose3(self):
|
||||
"""
|
||||
Test that `measured` works as expected for BearingRangeFactorPose3.
|
||||
"""
|
||||
range_measurement = 10.0
|
||||
bearing_measurement = gtsam.Unit3()
|
||||
factor = gtsam.BearingRangeFactorPose3(
|
||||
1, 2, bearing_measurement, range_measurement,
|
||||
gtsam.noiseModel.Isotropic.Sigma(3, 1))
|
||||
measurement = factor.measured()
|
||||
|
||||
self.assertEqual(range_measurement, measurement.range())
|
||||
self.gtsamAssertEquals(bearing_measurement, measurement.bearing())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -567,7 +567,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) {
|
|||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
@ -644,7 +644,7 @@ TEST(GncOptimizer, barcsq) {
|
|||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
@ -691,7 +691,7 @@ TEST(GncOptimizer, setInlierCostThresholds) {
|
|||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
@ -763,7 +763,7 @@ TEST(GncOptimizer, optimizeSmallPoseGraph) {
|
|||
// GNC
|
||||
// Note: in difficult instances, we set the odometry measurements to be
|
||||
// inliers, but this problem is simple enought to succeed even without that
|
||||
// assumption std::vector<size_t> knownInliers;
|
||||
// assumption GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(*graph, *initial,
|
||||
gncParams);
|
||||
|
@ -784,12 +784,12 @@ TEST(GncOptimizer, knownInliersAndOutliers) {
|
|||
// nonconvexity with known inliers and known outliers (check early stopping
|
||||
// when all measurements are known to be inliers or outliers)
|
||||
{
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
|
@ -813,11 +813,11 @@ TEST(GncOptimizer, knownInliersAndOutliers) {
|
|||
|
||||
// nonconvexity with known inliers and known outliers
|
||||
{
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(2);
|
||||
knownInliers.push_back(0);
|
||||
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
|
@ -841,7 +841,7 @@ TEST(GncOptimizer, knownInliersAndOutliers) {
|
|||
|
||||
// only known outliers
|
||||
{
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
|
@ -916,11 +916,11 @@ TEST(GncOptimizer, setWeights) {
|
|||
// initialize weights and also set known inliers/outliers
|
||||
{
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownInliers;
|
||||
knownInliers.push_back(2);
|
||||
knownInliers.push_back(0);
|
||||
|
||||
std::vector<size_t> knownOutliers;
|
||||
GncParams<GaussNewtonParams>::IndexVector knownOutliers;
|
||||
knownOutliers.push_back(3);
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setKnownOutliers(knownOutliers);
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testRobust.cpp
|
||||
* @brief Unit tests for Robust loss functions
|
||||
* @author Fan Jiang
|
||||
* @author Yetong Zhang
|
||||
* @date Apr 7, 2022
|
||||
**/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/nonlinear/PriorFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
TEST(RobustNoise, loss) {
|
||||
// Keys.
|
||||
gtsam::Key x1_key = 1;
|
||||
gtsam::Key x2_key = 2;
|
||||
|
||||
auto gm = noiseModel::mEstimator::GemanMcClure::Create(1.0);
|
||||
auto noise = noiseModel::Robust::Create(gm, noiseModel::Unit::Create(1));
|
||||
|
||||
auto factor = PriorFactor<double>(x1_key, 0.0, noise);
|
||||
auto between_factor = BetweenFactor<double>(x1_key, x2_key, 0.0, noise);
|
||||
|
||||
Values values;
|
||||
values.insert(x1_key, 10.0);
|
||||
values.insert(x2_key, 0.0);
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0.49505, factor.error(values), 1e-5);
|
||||
EXPECT_DOUBLES_EQUAL(0.49505, between_factor.error(values), 1e-5);
|
||||
EXPECT_DOUBLES_EQUAL(0.49505, gm->loss(10.0), 1e-5);
|
||||
}
|
||||
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
|
@ -242,6 +242,13 @@ function(wrap_library_internal interfaceHeader moduleName linkLibraries extraInc
|
|||
find_package(PythonInterp ${WRAP_PYTHON_VERSION} EXACT)
|
||||
find_package(PythonLibs ${WRAP_PYTHON_VERSION} EXACT)
|
||||
|
||||
# Set the path separator for PYTHONPATH
|
||||
if(UNIX)
|
||||
set(GTWRAP_PATH_SEPARATOR ":")
|
||||
else()
|
||||
set(GTWRAP_PATH_SEPARATOR ";")
|
||||
endif()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${generated_cpp_file}
|
||||
DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets}
|
||||
|
|
Loading…
Reference in New Issue