gtsam/gtsam_unstable/linear/iterative-inl.h

145 lines
4.7 KiB
C
Raw Normal View History

/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file iterative-inl.h
* @brief Iterative methods, template implementation
* @author Frank Dellaert
* @date Dec 28, 2009
*/
2010-01-11 06:15:34 +08:00
#pragma once
2012-06-04 04:45:00 +08:00
#include <gtsam_unstable/linear/iterative.h>
2012-06-09 00:45:16 +08:00
#include <gtsam/linear/ConjugateGradientSolver.h>
2012-06-04 04:45:00 +08:00
#include <boost/shared_ptr.hpp>
namespace gtsam {
/* ************************************************************************* */
2010-02-14 13:52:20 +08:00
// state for CG method
template<class S, class V, class E>
2010-02-14 13:52:20 +08:00
struct CGState {
2012-06-09 00:45:16 +08:00
typedef ConjugateGradientParameters Parameters;
2010-10-28 11:26:03 +08:00
const Parameters &parameters_;
2012-06-07 10:19:26 +08:00
int k; ///< iteration
bool steepest; ///< flag to indicate we are doing steepest descent
V g, d; ///< gradient g and search direction d for CG
double gamma, threshold; ///< gamma (squared L2 norm of g) and convergence threshold
2010-02-14 13:52:20 +08:00
E Ad;
2010-02-21 07:43:41 +08:00
/* ************************************************************************* */
// Constructor
2010-10-28 11:26:03 +08:00
CGState(const S& Ab, const V& x, const Parameters &parameters, bool steep):
parameters_(parameters),k(0),steepest(steep) {
2010-02-14 13:52:20 +08:00
// Start with g0 = A'*(A*x0-b), d0 = - g0
// i.e., first step is in direction of negative gradient
g = gradient(Ab,x);
2010-02-14 13:52:20 +08:00
d = g; // instead of negating gradient, alpha will be negated
// init gamma and calculate threshold
2010-10-14 06:41:26 +08:00
gamma = dot(g,g) ;
threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma);
2010-02-14 13:52:20 +08:00
// Allocate and calculate A*d for first iteration
2010-10-28 11:26:03 +08:00
if (gamma > parameters_.epsilon_abs()) Ad = Ab * d;
2010-02-14 13:52:20 +08:00
}
2010-02-21 07:43:41 +08:00
/* ************************************************************************* */
// print
2010-02-14 15:14:42 +08:00
void print(const V& x) {
std::cout << "iteration = " << k << std::endl;
2010-02-14 15:14:42 +08:00
gtsam::print(x,"x");
gtsam::print(g, "g");
std::cout << "dotg = " << gamma << std::endl;
2010-02-14 15:14:42 +08:00
gtsam::print(d, "d");
gtsam::print(Ad, "Ad");
2010-02-14 13:52:20 +08:00
}
2010-02-21 07:43:41 +08:00
/* ************************************************************************* */
// step the solution
2010-02-14 13:52:20 +08:00
double takeOptimalStep(V& x) {
2010-02-14 15:14:42 +08:00
// TODO: can we use gamma instead of dot(d,g) ????? Answer not trivial
2010-02-14 13:52:20 +08:00
double alpha = -dot(d, g) / dot(Ad, Ad); // calculate optimal step-size
axpy(alpha, d, x); // // do step in new search direction, x += alpha*d
return alpha;
}
2010-02-21 07:43:41 +08:00
/* ************************************************************************* */
// take a step, return true if converged
2010-02-14 13:52:20 +08:00
bool step(const S& Ab, V& x) {
if ((++k) >= ((int)parameters_.maxIterations())) return true;
//---------------------------------->
2010-02-14 13:52:20 +08:00
double alpha = takeOptimalStep(x);
2009-12-31 18:27:39 +08:00
// update gradient (or re-calculate at reset time)
if (k % parameters_.reset() == 0) g = gradient(Ab,x);
// axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
else transposeMultiplyAdd(Ab, alpha, Ad, g);
// check for convergence
2010-02-14 13:52:20 +08:00
double new_gamma = dot(g, g);
2012-06-09 00:45:16 +08:00
if (parameters_.verbosity() != ConjugateGradientParameters::SILENT)
std::cout << "iteration " << k << ": alpha = " << alpha
<< ", dotg = " << new_gamma << std::endl;
if (new_gamma < threshold) return true;
// calculate new search direction
if (steepest) d = g;
else {
2010-02-14 13:52:20 +08:00
double beta = new_gamma / gamma;
// d = g + d*beta;
2010-02-14 13:52:20 +08:00
scal(beta, d);
axpy(1.0, g, d);
}
2010-02-14 15:14:42 +08:00
gamma = new_gamma;
// In-place recalculation Ad <- A*d to avoid re-allocating Ad
multiplyInPlace(Ab, d, Ad);
2010-02-14 13:52:20 +08:00
return false;
}
2010-02-14 13:52:20 +08:00
2010-02-21 07:43:41 +08:00
}; // CGState Class
/* ************************************************************************* */
// conjugate gradient method.
// S: linear system, V: step vector, E: errors
2010-02-14 13:52:20 +08:00
template<class S, class V, class E>
2012-06-09 10:42:45 +08:00
V conjugateGradients(const S& Ab, V x, const ConjugateGradientParameters &parameters, bool steepest = false) {
CGState<S, V, E> state(Ab, x, parameters, steepest);
2012-06-09 10:42:45 +08:00
2012-06-09 00:45:16 +08:00
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)
std::cout << "CG: epsilon = " << parameters.epsilon()
2010-10-28 11:26:03 +08:00
<< ", maxIterations = " << parameters.maxIterations()
<< ", ||g0||^2 = " << state.gamma
<< ", threshold = " << state.threshold << std::endl;
if ( state.gamma < state.threshold ) {
2012-06-09 00:45:16 +08:00
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)
std::cout << "||g0||^2 < threshold, exiting immediately !" << std::endl;
2010-10-28 11:26:03 +08:00
2010-03-07 08:10:02 +08:00
return x;
}
2010-02-14 13:52:20 +08:00
// loop maxIterations times
while (!state.step(Ab, x)) {}
return x;
}
/* ************************************************************************* */
} // namespace gtsam