new iterative.h/cpp compilation unit
							parent
							
								
									d9fd502656
								
							
						
					
					
						commit
						863ee58c0f
					
				| 
						 | 
				
			
			@ -101,7 +101,7 @@ testBinaryBayesNet_LDADD      = libgtsam.la
 | 
			
		|||
 | 
			
		||||
# Gaussian inference 
 | 
			
		||||
headers += GaussianFactorSet.h
 | 
			
		||||
sources += Errors.cpp VectorConfig.cpp GaussianFactor.cpp GaussianFactorGraph.cpp GaussianConditional.cpp GaussianBayesNet.cpp
 | 
			
		||||
sources += Errors.cpp VectorConfig.cpp GaussianFactor.cpp GaussianFactorGraph.cpp GaussianConditional.cpp GaussianBayesNet.cpp iterative.cpp
 | 
			
		||||
check_PROGRAMS += testVectorConfig testGaussianFactor testGaussianFactorGraph testGaussianConditional testGaussianBayesNet testIterative 
 | 
			
		||||
testVectorConfig_SOURCES        = testVectorConfig.cpp
 | 
			
		||||
testVectorConfig_LDADD          = libgtsam.la
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -0,0 +1,109 @@
 | 
			
		|||
/*
 | 
			
		||||
 * iterative.cpp
 | 
			
		||||
 * @brief Iterative methods, implementation
 | 
			
		||||
 * @author Frank Dellaert
 | 
			
		||||
 * Created on: Dec 28, 2009
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "GaussianFactorGraph.h"
 | 
			
		||||
#include "iterative.h"
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
 | 
			
		||||
namespace gtsam {
 | 
			
		||||
 | 
			
		||||
	/* ************************************************************************* */
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * gradient of objective function 0.5*|Ax-b|^2 at x = A'*(Ax-b)
 | 
			
		||||
	 */
 | 
			
		||||
	Vector gradient(const System& Ab, const Vector& x) {
 | 
			
		||||
		const Matrix& A = Ab.first;
 | 
			
		||||
		const Vector& b = Ab.second;
 | 
			
		||||
		return A ^ (A * x - b);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * Apply operator A
 | 
			
		||||
	 */
 | 
			
		||||
	Vector operator*(const System& Ab, const Vector& x) {
 | 
			
		||||
		const Matrix& A = Ab.first;
 | 
			
		||||
		return A * x;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * Apply operator A^T
 | 
			
		||||
	 */
 | 
			
		||||
	Vector operator^(const System& Ab, const Vector& x) {
 | 
			
		||||
		const Matrix& A = Ab.first;
 | 
			
		||||
		return A ^ x;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* ************************************************************************* */
 | 
			
		||||
	// Method of conjugate gradients (CG) template
 | 
			
		||||
	// "System" class S needs gradient(S,v), e=S*v, v=S^e
 | 
			
		||||
	// "Vector" class V needs dot(v,v), -v, v+v, s*v
 | 
			
		||||
	// "Vector" class E needs dot(v,v)
 | 
			
		||||
	template<class S, class V, class E>
 | 
			
		||||
	V CGD(const S& Ab, V x, double threshold = 1e-9) {
 | 
			
		||||
 | 
			
		||||
		// Start with g0 = A'*(A*x0-b), d0 = - g0
 | 
			
		||||
		// i.e., first step is in direction of negative gradient
 | 
			
		||||
		V g = gradient(Ab, x);
 | 
			
		||||
		V d = -g;
 | 
			
		||||
		double prev_dotg = dot(g, g);
 | 
			
		||||
 | 
			
		||||
		// loop max n times
 | 
			
		||||
		size_t n = x.size();
 | 
			
		||||
		for (int k = 1; k <= n; k++) {
 | 
			
		||||
 | 
			
		||||
			// calculate optimal step-size
 | 
			
		||||
			E Ad = Ab * d;
 | 
			
		||||
			double alpha = -dot(d, g) / dot(Ad, Ad);
 | 
			
		||||
 | 
			
		||||
			// do step in new search direction
 | 
			
		||||
			x = x + alpha * d;
 | 
			
		||||
			if (k == n) break;
 | 
			
		||||
 | 
			
		||||
			// update gradient
 | 
			
		||||
			g = g + alpha * (Ab ^ Ad);
 | 
			
		||||
 | 
			
		||||
			// check for convergence
 | 
			
		||||
			double dotg = dot(g, g);
 | 
			
		||||
			if (dotg < threshold) break;
 | 
			
		||||
 | 
			
		||||
			// calculate new search direction
 | 
			
		||||
			double beta = dotg / prev_dotg;
 | 
			
		||||
			prev_dotg = dotg;
 | 
			
		||||
			d = -g + beta * d;
 | 
			
		||||
		}
 | 
			
		||||
		return x;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* ************************************************************************* */
 | 
			
		||||
	Vector conjugateGradientDescent(const System& Ab, const Vector& x,
 | 
			
		||||
			double threshold) {
 | 
			
		||||
		return CGD<System, Vector, Vector> (Ab, x);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* ************************************************************************* */
 | 
			
		||||
	Vector conjugateGradientDescent(const Matrix& A, const Vector& b,
 | 
			
		||||
			const Vector& x, double threshold) {
 | 
			
		||||
		System Ab = make_pair(A, b);
 | 
			
		||||
		return CGD<System, Vector, Vector> (Ab, x);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* ************************************************************************* */
 | 
			
		||||
	VectorConfig gradient(const GaussianFactorGraph& fg, const VectorConfig& x) {
 | 
			
		||||
		return fg.gradient(x);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* ************************************************************************* */
 | 
			
		||||
	VectorConfig conjugateGradientDescent(const GaussianFactorGraph& fg,
 | 
			
		||||
			const VectorConfig& x, double threshold) {
 | 
			
		||||
		return CGD<GaussianFactorGraph, VectorConfig, Errors> (fg, x);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
 | 
			
		||||
} // namespace gtsam
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,34 @@
 | 
			
		|||
/*
 | 
			
		||||
 * iterative.h
 | 
			
		||||
 * @brief Iterative methods, implementation
 | 
			
		||||
 * @author Frank Dellaert
 | 
			
		||||
 * Created on: Dec 28, 2009
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "Matrix.h"
 | 
			
		||||
namespace gtsam {
 | 
			
		||||
 | 
			
		||||
	class GaussianFactorGraph;
 | 
			
		||||
	class VectorConfig;
 | 
			
		||||
 | 
			
		||||
	typedef std::pair<Matrix, Vector> System;
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * Method of conjugate gradients (CG), System version
 | 
			
		||||
	 */
 | 
			
		||||
	Vector conjugateGradientDescent(const System& Ab, const Vector& x,
 | 
			
		||||
			double threshold = 1e-9);
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * Method of conjugate gradients (CG), Matrix version
 | 
			
		||||
	 */
 | 
			
		||||
	Vector conjugateGradientDescent(const Matrix& A, const Vector& b,
 | 
			
		||||
			const Vector& x, double threshold = 1e-9);
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * Method of conjugate gradients (CG), Gaussian Factor Graph version
 | 
			
		||||
	 * */
 | 
			
		||||
	VectorConfig conjugateGradientDescent(const GaussianFactorGraph& fg,
 | 
			
		||||
			const VectorConfig& x, double threshold = 1e-9);
 | 
			
		||||
 | 
			
		||||
} // namespace gtsam
 | 
			
		||||
| 
						 | 
				
			
			@ -10,102 +10,14 @@ using namespace boost::assign;
 | 
			
		|||
#include <CppUnitLite/TestHarness.h>
 | 
			
		||||
 | 
			
		||||
#include "Ordering.h"
 | 
			
		||||
#include "iterative.h"
 | 
			
		||||
#include "smallExample.h"
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace gtsam;
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
VectorConfig gradient(const GaussianFactorGraph& Ab, const VectorConfig& x) {
 | 
			
		||||
	return Ab.gradient(x);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
typedef pair<Matrix,Vector> System;
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * gradient of objective function 0.5*|Ax-b|^2 at x = A'*(Ax-b)
 | 
			
		||||
 */
 | 
			
		||||
Vector gradient(const System& Ab, const Vector& x) {
 | 
			
		||||
	const Matrix& A = Ab.first;
 | 
			
		||||
	const Vector& b = Ab.second;
 | 
			
		||||
	return A ^ (A * x - b);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Apply operator A
 | 
			
		||||
 */
 | 
			
		||||
Vector operator*(const System& Ab, const Vector& x) {
 | 
			
		||||
	const Matrix& A = Ab.first;
 | 
			
		||||
	return A * x;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Apply operator A^T
 | 
			
		||||
 */
 | 
			
		||||
Vector operator^(const System& Ab, const Vector& x) {
 | 
			
		||||
	const Matrix& A = Ab.first;
 | 
			
		||||
	return A ^ x;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
// Method of conjugate gradients (CG)
 | 
			
		||||
// "System" class S needs gradient(S,v), e=S*v, v=S^e
 | 
			
		||||
// "Vector" class V needs dot(v,v), -v, v+v, s*v
 | 
			
		||||
// "Vector" class E needs dot(v,v)
 | 
			
		||||
template <class S, class V, class E>
 | 
			
		||||
V CGD(const S& Ab, V x, double threshold = 1e-9) {
 | 
			
		||||
 | 
			
		||||
	// Start with g0 = A'*(A*x0-b), d0 = - g0
 | 
			
		||||
	// i.e., first step is in direction of negative gradient
 | 
			
		||||
	V g = gradient(Ab, x);
 | 
			
		||||
	V d = -g;
 | 
			
		||||
	double prev_dotg = dot(g, g);
 | 
			
		||||
 | 
			
		||||
	// loop max n times
 | 
			
		||||
	size_t n = x.size();
 | 
			
		||||
	for (int k = 1; k <= n; k++) {
 | 
			
		||||
 | 
			
		||||
		// calculate optimal step-size
 | 
			
		||||
		E Ad = Ab * d;
 | 
			
		||||
		double alpha = -dot(d, g) / dot(Ad, Ad);
 | 
			
		||||
 | 
			
		||||
		// do step in new search direction
 | 
			
		||||
		x = x + alpha * d;
 | 
			
		||||
		if (k == n) break;
 | 
			
		||||
 | 
			
		||||
		// update gradient
 | 
			
		||||
		g = g + alpha * (Ab ^ Ad);
 | 
			
		||||
 | 
			
		||||
		// check for convergence
 | 
			
		||||
		double dotg = dot(g, g);
 | 
			
		||||
		if (dotg < threshold) break;
 | 
			
		||||
 | 
			
		||||
		// calculate new search direction
 | 
			
		||||
		double beta = dotg / prev_dotg;
 | 
			
		||||
		prev_dotg = dotg;
 | 
			
		||||
		d = -g + beta * d;
 | 
			
		||||
	}
 | 
			
		||||
	return x;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
// Method of conjugate gradients (CG), Matrix version
 | 
			
		||||
Vector conjugateGradientDescent(const Matrix& A, const Vector& b,
 | 
			
		||||
		const Vector& x, double threshold = 1e-9) {
 | 
			
		||||
	System Ab = make_pair(A, b);
 | 
			
		||||
	return CGD<System, Vector, Vector> (Ab, x);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
// Method of conjugate gradients (CG), Gaussian Factor Graph version
 | 
			
		||||
VectorConfig conjugateGradientDescent(const GaussianFactorGraph& Ab,
 | 
			
		||||
		const VectorConfig& x, double threshold = 1e-9) {
 | 
			
		||||
	return CGD<GaussianFactorGraph, VectorConfig, Errors> (Ab, x);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* ************************************************************************* */
 | 
			
		||||
TEST( GaussianFactorGraph, gradientDescent )
 | 
			
		||||
TEST( Iterative, gradientDescent )
 | 
			
		||||
{
 | 
			
		||||
	// Expected solution
 | 
			
		||||
	Ordering ord;
 | 
			
		||||
| 
						 | 
				
			
			@ -136,7 +48,7 @@ TEST( GaussianFactorGraph, gradientDescent )
 | 
			
		|||
 | 
			
		||||
	// Do conjugate gradient descent, System version
 | 
			
		||||
	System Ab = make_pair(A,b);
 | 
			
		||||
	Vector actualX2 = CGD<System,Vector,Vector>(Ab,x0);
 | 
			
		||||
	Vector actualX2 = conjugateGradientDescent(Ab,x0);
 | 
			
		||||
	CHECK(assert_equal(expectedX,actualX2,1e-9));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue