Changes in progress
parent
8aa04312df
commit
6c4bd1160a
|
|
@ -25,49 +25,39 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizer::SharedState GaussNewtonOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const {
|
||||
void GaussNewtonOptimizer::iterate() const {
|
||||
|
||||
const GaussNewtonState& current = dynamic_cast<const GaussNewtonState&>(*_current);
|
||||
const NonlinearOptimizerState& current = state_;
|
||||
|
||||
// Linearize graph
|
||||
const Ordering& ordering = this->ordering(current.values);
|
||||
GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering);
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering);
|
||||
|
||||
// Check whether to use QR
|
||||
bool useQR;
|
||||
if(params_->factorization == GaussNewtonParams::LDL)
|
||||
if(params_.factorization == GaussNewtonParams::LDL)
|
||||
useQR = false;
|
||||
else if(params_->factorization == GaussNewtonParams::QR)
|
||||
else if(params_.factorization == GaussNewtonParams::QR)
|
||||
useQR = true;
|
||||
else
|
||||
throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization");
|
||||
|
||||
// Optimize
|
||||
VectorValues::shared_ptr delta;
|
||||
if(params_->elimination == GaussNewtonParams::MULTIFRONTAL)
|
||||
if(params_.elimination == GaussNewtonParams::MULTIFRONTAL)
|
||||
delta = GaussianMultifrontalSolver(*linear, useQR).optimize();
|
||||
else if(params_->elimination == GaussNewtonParams::SEQUENTIAL)
|
||||
else if(params_.elimination == GaussNewtonParams::SEQUENTIAL)
|
||||
delta = GaussianSequentialSolver(*linear, useQR).optimize();
|
||||
else
|
||||
throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination");
|
||||
|
||||
// Maybe show output
|
||||
if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta");
|
||||
if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta");
|
||||
|
||||
// Create new state with new values and new error
|
||||
SharedState newState = boost::make_shared<GaussNewtonState>();
|
||||
newState->values = current.values.retract(*delta, ordering);
|
||||
newState->error = graph_->error(newState->values);
|
||||
newState->iterations = current.iterations + 1;
|
||||
|
||||
return newState;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizer::SharedState GaussNewtonOptimizer::initialState(const Values& initialValues) const {
|
||||
SharedState initial = boost::make_shared<GaussNewtonState>();
|
||||
defaultInitialState(initialValues, *initial);
|
||||
return initial;
|
||||
NonlinearOptimizerState newState;
|
||||
state_.values = current.values.retract(*delta, *params_.ordering);
|
||||
state_.error = graph_.error(state_.values);
|
||||
++ state_.iterations;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
|
|
|||
|
|
@ -22,25 +22,13 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/** Parameters for Gauss-Newton optimization, inherits from
|
||||
* NonlinearOptimizationParams.
|
||||
*/
|
||||
class GaussNewtonParams : public SuccessiveLinearizationParams {
|
||||
};
|
||||
|
||||
class GaussNewtonState : public SuccessiveLinearizationState {
|
||||
};
|
||||
|
||||
/**
|
||||
* This class performs Gauss-Newton nonlinear optimization
|
||||
* TODO: use make_shared
|
||||
*/
|
||||
class GaussNewtonOptimizer : public SuccessiveLinearizationOptimizer {
|
||||
class GaussNewtonOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<GaussNewtonParams> SharedParams;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
|
|
@ -52,10 +40,9 @@ public:
|
|||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph,
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const GaussNewtonParams& params = GaussNewtonParams()) :
|
||||
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
|
||||
params_(new GaussNewtonParams(params)) {}
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
|
|
@ -65,24 +52,9 @@ public:
|
|||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) :
|
||||
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
|
||||
params_(new GaussNewtonParams()) {
|
||||
params_->ordering = ordering; }
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
GaussNewtonOptimizer(const SharedGraph& graph,
|
||||
const GaussNewtonParams& params = GaussNewtonParams()) :
|
||||
SuccessiveLinearizationOptimizer(graph),
|
||||
params_(new GaussNewtonParams(params)) {}
|
||||
|
||||
/** Access the parameters */
|
||||
virtual NonlinearOptimizer::SharedParams params() const { return params_; }
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), state_(graph, initialValues) {
|
||||
params_.ordering = ordering; }
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
@ -96,18 +68,40 @@ public:
|
|||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const;
|
||||
virtual void iterate() const;
|
||||
|
||||
/** Create an initial state with the specified variable assignment values and
|
||||
* all other default state.
|
||||
*/
|
||||
virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const;
|
||||
/** Access the parameters */
|
||||
const GaussNewtonParams& params() const { return params_; }
|
||||
|
||||
/** Access the last state */
|
||||
const NonlinearOptimizerState& state() const { return state_; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
const SharedParams params_;
|
||||
GaussNewtonParams params_;
|
||||
NonlinearOptimizerState state_;
|
||||
|
||||
/** Access the parameters (base class version) */
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
|
||||
/** Access the state (base class version) */
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
|
||||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/** Parameters for Gauss-Newton optimization, inherits from
|
||||
* NonlinearOptimizationParams.
|
||||
*/
|
||||
class GaussNewtonParams : public SuccessiveLinearizationParams {
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,88 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* This class performs Levenberg-Marquardt nonlinear optimization
|
||||
* TODO: use make_shared
|
||||
*/
|
||||
class LevenbergMarquardtOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<LevenbergMarquardtOptimizer> shared_ptr;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), state_(graph, initialValues) {
|
||||
params_.ordering = ordering; }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced interface
|
||||
/// @{
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~LevenbergMarquardtOptimizer() {}
|
||||
|
||||
/** Perform a single iteration, returning a new NonlinearOptimizer class
|
||||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate() const;
|
||||
|
||||
/** Access the parameters */
|
||||
const GaussNewtonParams& params() const { return params_; }
|
||||
|
||||
/** Access the last state */
|
||||
const NonlinearOptimizerState& state() const { return state_; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
typedef boost::shared_ptr<const std::vector<size_t> > SharedDimensions;
|
||||
|
||||
mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it
|
||||
|
||||
LevenbergMarquardtParams params_;
|
||||
LevenbergMarquardtState state_;
|
||||
|
||||
/** Access the parameters (base class version) */
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
|
||||
/** Access the state (base class version) */
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
|
||||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||
* common to all nonlinear optimization algorithms. This class also contains
|
||||
|
|
@ -61,95 +143,11 @@ public:
|
|||
/**
|
||||
* State for LevenbergMarquardtOptimizer
|
||||
*/
|
||||
class LevenbergMarquardtState : public SuccessiveLinearizationState {
|
||||
class LevenbergMarquardtState : public NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
double lambda;
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* This class performs Levenberg-Marquardt nonlinear optimization
|
||||
* TODO: use make_shared
|
||||
*/
|
||||
class LevenbergMarquardtOptimizer : public SuccessiveLinearizationOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<LevenbergMarquardtParams> SharedParams;
|
||||
typedef boost::shared_ptr<LevenbergMarquardtState> SharedState;
|
||||
typedef boost::shared_ptr<LevenbergMarquardtOptimizer> shared_ptr;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph,
|
||||
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
|
||||
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
|
||||
params_(new LevenbergMarquardtParams(params)) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) :
|
||||
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
|
||||
params_(new LevenbergMarquardtParams()) {
|
||||
params_->ordering = ordering; }
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param values The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const SharedGraph& graph,
|
||||
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
|
||||
SuccessiveLinearizationOptimizer(graph),
|
||||
params_(new LevenbergMarquardtParams(params)) {}
|
||||
|
||||
/** Access the parameters */
|
||||
virtual NonlinearOptimizer::SharedParams params() const { return params_; }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced interface
|
||||
/// @{
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~LevenbergMarquardtOptimizer() {}
|
||||
|
||||
/** Perform a single iteration, returning a new NonlinearOptimizer class
|
||||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const;
|
||||
|
||||
/** Create an initial state with the specified variable assignment values and
|
||||
* all other default state.
|
||||
*/
|
||||
virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const;
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
typedef boost::shared_ptr<const std::vector<size_t> > SharedDimensions;
|
||||
|
||||
SharedParams params_;
|
||||
mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,54 +25,43 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const SharedState& initial) const {
|
||||
void NonlinearOptimizer::defaultOptimize() {
|
||||
|
||||
const SharedParams& params = this->params();
|
||||
double currentError = initial->error;
|
||||
const NonlinearOptimizerParams& params = this->params();
|
||||
double currentError = this->error();
|
||||
|
||||
// check if we're already close enough
|
||||
if(currentError <= params->errorTol) {
|
||||
if (params->verbosity >= NonlinearOptimizerParams::ERROR)
|
||||
cout << "Exiting, as error = " << currentError << " < " << params->errorTol << endl;
|
||||
return initial;
|
||||
if(currentError <= params.errorTol) {
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
|
||||
cout << "Exiting, as error = " << currentError << " < " << params.errorTol << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
// Maybe show output
|
||||
if (params->verbosity >= NonlinearOptimizerParams::VALUES) initial->values.print("Initial values");
|
||||
if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << initial->error << endl;
|
||||
if (params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("Initial values");
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << currentError << endl;
|
||||
|
||||
// Return if we already have too many iterations
|
||||
if(initial->iterations >= params->maxIterations)
|
||||
return initial;
|
||||
if(this->iterations() >= params.maxIterations)
|
||||
return;
|
||||
|
||||
// Iterative loop
|
||||
SharedState next = initial;
|
||||
do {
|
||||
// Do next iteration
|
||||
currentError = next->error;
|
||||
next = this->iterate(next);
|
||||
currentError = this->error();
|
||||
this->iterate();
|
||||
|
||||
// Maybe show output
|
||||
if(params->verbosity >= NonlinearOptimizerParams::VALUES) next->values.print("newValues");
|
||||
if(params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl;
|
||||
} while(next->iterations < params->maxIterations &&
|
||||
!checkConvergence(params->relativeErrorTol, params->absoluteErrorTol,
|
||||
params->errorTol, currentError, next->error, params->verbosity));
|
||||
if(params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("newValues");
|
||||
if(params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << this->error() << endl;
|
||||
} while(this->iterations() < params.maxIterations &&
|
||||
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
|
||||
params.errorTol, currentError, this->error(), params.verbosity));
|
||||
|
||||
// Printing if verbose
|
||||
if (params->verbosity >= NonlinearOptimizerParams::ERROR &&
|
||||
next->iterations >= params->maxIterations)
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
|
||||
this->iterations() >= params.maxIterations)
|
||||
cout << "Terminating because reached maximum iterations" << endl;
|
||||
|
||||
// Return optimizer from final iteration
|
||||
return next;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void NonlinearOptimizer::defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const {
|
||||
initialState.values = initialValues;
|
||||
initialState.error = graph_->error(initialValues);
|
||||
initialState.iterations = 0;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -22,67 +22,6 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/** The common parameters for Nonlinear optimizers. Most optimizers
|
||||
* deriving from NonlinearOptimizer also subclass the parameters.
|
||||
*/
|
||||
class NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See NonlinearOptimizerParams::verbosity */
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
ERROR,
|
||||
VALUES,
|
||||
DELTA,
|
||||
LINEAR
|
||||
};
|
||||
|
||||
size_t maxIterations; ///< The maximum iterations to stop iterating (default 100)
|
||||
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
|
||||
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
|
||||
double errorTol; ///< The maximum total error to stop iterating (default 0.0)
|
||||
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT)
|
||||
|
||||
NonlinearOptimizerParams() :
|
||||
maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5),
|
||||
errorTol(0.0), verbosity(SILENT) {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
std::cout << str << "\n";
|
||||
std::cout << "relative decrease threshold: " << relativeErrorTol << "\n";
|
||||
std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n";
|
||||
std::cout << " total error threshold: " << errorTol << "\n";
|
||||
std::cout << " maximum iterations: " << maxIterations << "\n";
|
||||
std::cout << " verbosity level: " << verbosity << std::endl;
|
||||
}
|
||||
|
||||
virtual ~NonlinearOptimizerParams() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Base class for a nonlinear optimization state, including the current estimate
|
||||
* of the variable values, error, and number of iterations. Optimizers derived
|
||||
* from NonlinearOptimizer usually also define a derived state class containing
|
||||
* additional state specific to the algorithm (for example, Dogleg state
|
||||
* contains the current trust region radius).
|
||||
*/
|
||||
class NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
/** The current estimate of the variable values. */
|
||||
Values values;
|
||||
|
||||
/** The factor graph error on the current values. */
|
||||
double error;
|
||||
|
||||
/** The number of optimization iterations performed. */
|
||||
unsigned int iterations;
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~NonlinearOptimizerState() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* This is the abstract interface for classes that can optimize for the
|
||||
* maximum-likelihood estimate of a NonlinearFactorGraph.
|
||||
|
|
@ -162,15 +101,6 @@ public:
|
|||
/** A shared pointer to this class */
|
||||
typedef boost::shared_ptr<const NonlinearOptimizer> shared_ptr;
|
||||
|
||||
/** A const shared_ptr to a NonlinearFactorGraph */
|
||||
typedef boost::shared_ptr<const NonlinearFactorGraph> SharedGraph;
|
||||
|
||||
/** A const shared_ptr to the parameters */
|
||||
typedef boost::shared_ptr<const NonlinearOptimizerParams> SharedParams;
|
||||
|
||||
/** A shared_ptr to an optimizer state */
|
||||
typedef boost::shared_ptr<NonlinearOptimizerState> SharedState;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
|
|
@ -183,21 +113,13 @@ public:
|
|||
* process, you may call iterate() and check_convergence() yourself, and if
|
||||
* needed modify the optimization state between iterations.
|
||||
*/
|
||||
virtual SharedState optimize(const SharedState& initial) const { return defaultOptimize(initial); }
|
||||
virtual const Values& optimize() const { return defaultOptimize(); }
|
||||
|
||||
SharedState optimize(const Values& initialization) const { return optimize(initialState(initialization)); }
|
||||
double error() const { return _state().error; }
|
||||
|
||||
/** Shortcut to optimize and return the resulting Values of the maximum-
|
||||
* likelihood estimate. To access statistics and information such as the
|
||||
* final error and number of iterations, use optimize() instead.
|
||||
* @return The maximum-likelihood estimate.
|
||||
*/
|
||||
virtual Values optimized(const SharedState& initial) const { return this->optimize(initial)->values; }
|
||||
unsigned int iterations() const { return _state().iterations; }
|
||||
|
||||
Values optimized(const Values& initialization) const { return optimized(initialState(initialization)); }
|
||||
|
||||
/** Retrieve the parameters. */
|
||||
virtual SharedParams params() const = 0;
|
||||
const Values& values() const { return _state().values; }
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
@ -211,31 +133,93 @@ public:
|
|||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual SharedState iterate(const SharedState& current) const = 0;
|
||||
|
||||
/** Create an initial state from a variable assignment Values, with all
|
||||
* other state values at their default initial values.
|
||||
*/
|
||||
virtual SharedState initialState(const Values& initialValues) const = 0;
|
||||
virtual void iterate() const = 0;
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
const SharedGraph graph_;
|
||||
NonlinearFactorGraph graph_;
|
||||
|
||||
/** A default implementation of the optimization loop, which calls iterate()
|
||||
* until checkConvergence returns true.
|
||||
*/
|
||||
SharedState defaultOptimize(const SharedState& initial) const;
|
||||
void defaultOptimize();
|
||||
|
||||
/** Initialize a state, using the current error and 0 iterations */
|
||||
void defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const;
|
||||
virtual const NonlinearOptimizerState& _state() const = 0;
|
||||
|
||||
/** Constructor for initial construction of base classes.
|
||||
*/
|
||||
NonlinearOptimizer(const SharedGraph& graph) : graph_(graph) {}
|
||||
virtual const NonlinearOptimizerParams& _params() const = 0;
|
||||
|
||||
/** Constructor for initial construction of base classes. */
|
||||
NonlinearOptimizer(const NonlinearFactorGraph& graph) : graph_(graph) {}
|
||||
|
||||
};
|
||||
|
||||
|
||||
/** The common parameters for Nonlinear optimizers. Most optimizers
|
||||
* deriving from NonlinearOptimizer also subclass the parameters.
|
||||
*/
|
||||
class NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See NonlinearOptimizerParams::verbosity */
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
ERROR,
|
||||
VALUES,
|
||||
DELTA,
|
||||
LINEAR
|
||||
};
|
||||
|
||||
size_t maxIterations; ///< The maximum iterations to stop iterating (default 100)
|
||||
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
|
||||
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
|
||||
double errorTol; ///< The maximum total error to stop iterating (default 0.0)
|
||||
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT)
|
||||
|
||||
NonlinearOptimizerParams() :
|
||||
maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5),
|
||||
errorTol(0.0), verbosity(SILENT) {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
std::cout << str << "\n";
|
||||
std::cout << "relative decrease threshold: " << relativeErrorTol << "\n";
|
||||
std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n";
|
||||
std::cout << " total error threshold: " << errorTol << "\n";
|
||||
std::cout << " maximum iterations: " << maxIterations << "\n";
|
||||
std::cout << " verbosity level: " << verbosity << std::endl;
|
||||
}
|
||||
|
||||
virtual ~NonlinearOptimizerParams() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Base class for a nonlinear optimization state, including the current estimate
|
||||
* of the variable values, error, and number of iterations. Optimizers derived
|
||||
* from NonlinearOptimizer usually also define a derived state class containing
|
||||
* additional state specific to the algorithm (for example, Dogleg state
|
||||
* contains the current trust region radius).
|
||||
*/
|
||||
class NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
/** The current estimate of the variable values. */
|
||||
Values values;
|
||||
|
||||
/** The factor graph error on the current values. */
|
||||
double error;
|
||||
|
||||
/** The number of optimization iterations performed. */
|
||||
unsigned int iterations;
|
||||
|
||||
NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) :
|
||||
values(values), error(graph.error(values)), iterations(iterations) {}
|
||||
|
||||
NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) :
|
||||
values(values), error(error), iterations(iterations) {}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~NonlinearOptimizerState() {}
|
||||
};
|
||||
|
||||
/** Check whether the relative error decrease is less than relativeErrorTreshold,
|
||||
|
|
|
|||
|
|
@ -1,41 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DirectOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @date Apr 1, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
const Ordering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const {
|
||||
|
||||
if(!ordering_) {
|
||||
SharedParams params =
|
||||
boost::dynamic_pointer_cast<const SuccessiveLinearizationParams>(this->params());
|
||||
|
||||
// If we're using a COLAMD ordering, compute it
|
||||
if(params->ordering)
|
||||
ordering_ = params->ordering;
|
||||
else
|
||||
ordering_ = *graph_->orderingCOLAMD(values);
|
||||
}
|
||||
|
||||
return *ordering_;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
|
@ -18,38 +18,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/variant.hpp>
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class SuccessiveLinearizationParams;
|
||||
class SuccessiveLinearizationState;
|
||||
|
||||
/**
|
||||
* This is an intermediate base class for NonlinearOptimizers that use direct
|
||||
* solving, i.e. factorization. This class is here to reduce code duplication
|
||||
* for storing the ordering, having factorization and elimination parameters,
|
||||
* etc.
|
||||
*/
|
||||
class SuccessiveLinearizationOptimizer : public NonlinearOptimizer {
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<const SuccessiveLinearizationParams> SharedParams;
|
||||
typedef boost::shared_ptr<const SuccessiveLinearizationOptimizer> shared_ptr;
|
||||
|
||||
protected:
|
||||
|
||||
SuccessiveLinearizationOptimizer(const SharedGraph& graph) : NonlinearOptimizer(graph) {}
|
||||
|
||||
const Ordering& ordering(const Values& values) const;
|
||||
|
||||
private:
|
||||
|
||||
mutable boost::optional<Ordering> ordering_; // Mutable because we set/compute it when needed and cache it
|
||||
};
|
||||
|
||||
class SuccessiveLinearizationParams : public NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See SuccessiveLinearizationParams::elimination */
|
||||
|
|
@ -98,7 +70,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
class SuccessiveLinearizationState : public NonlinearOptimizerState {
|
||||
};
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
|
|
|||
Loading…
Reference in New Issue