From fdc4cc586df8cf67d24fc782e93976d517ee4529 Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Mon, 14 May 2012 18:11:52 +0000 Subject: [PATCH] Incremental modifications to the new Nonlinear Optimizer interface. --- gtsam/nonlinear/DoglegOptimizer.cpp | 41 ++--- gtsam/nonlinear/DoglegOptimizer.h | 158 +++++++++--------- gtsam/nonlinear/GaussNewtonOptimizer.h | 6 +- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 73 +++----- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 22 +-- 5 files changed, 126 insertions(+), 174 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index f910c9661..9d52fb7c1 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -27,19 +27,17 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { - - const DoglegState& current = dynamic_cast(*_current); +void DoglegOptimizer::iterate(void) const { // Linearize graph - const Ordering& ordering = this->ordering(current.values); - GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); + const Ordering& ordering = *params_.ordering; + GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, ordering); // Check whether to use QR bool useQR; - if(params_->factorization == DoglegParams::LDL) + if(params_.factorization == DoglegParams::LDL) useQR = false; - else if(params_->factorization == DoglegParams::QR) + else if(params_.factorization == DoglegParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); @@ -50,37 +48,26 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize // Do Dogleg iteration with either Multifrontal or Sequential elimination DoglegOptimizerImpl::IterationResult result; - if(params_->elimination == DoglegParams::MULTIFRONTAL) { + if(params_.elimination == DoglegParams::MULTIFRONTAL) { GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, current.values, ordering, current.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); - } else if(params_->elimination == DoglegParams::SEQUENTIAL) { + } else if(params_.elimination == DoglegParams::SEQUENTIAL) { GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, current.values, ordering, current.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); } else { throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); } // Maybe show output - if(params_->verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); + if(params_.verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); // Create new state with new values and new error - SharedState newState = boost::make_shared(); - newState->values = current.values.retract(result.dx_d, ordering); - newState->error = result.f_error; - newState->iterations = current.iterations + 1; - newState->Delta = result.Delta; - - return newState; -} - -/* ************************************************************************* */ -NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const { - SharedState initial = boost::make_shared(); - defaultInitialState(initialValues, *initial); - initial->Delta = params_->deltaInitial; - return initial; + state_.values = state_.values.retract(result.dx_d, ordering); + state_.error = result.f_error; + state_.delta = result.delta; + ++state_.iterations; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index badd021e1..af9c91179 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -22,6 +22,82 @@ namespace gtsam { +/** + * This class performs Dogleg nonlinear optimization + */ +class DoglegOptimizer : public SuccessiveLinearizationOptimizer { + +public: + + typedef boost::shared_ptr shared_ptr; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const DoglegParams& params = DoglegParams()) : + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph), state_(graph, initialValues) { + *params_.ordering = ordering; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~DoglegOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate() const; + + /** Access the parameters */ + const DoglegParams& params() const { return params_; } + + /** Access the last state */ + const DoglegState& state() const { return state_; } + + /// @} + +protected: + DoglegParams params_; + DoglegState state_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = graph.orderingCOLAMD(values); + return params; + } +}; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -56,88 +132,8 @@ public: class DoglegState : public SuccessiveLinearizationState { public: - double Delta; + double delta; }; -/** - * This class performs Dogleg nonlinear optimization - */ -class DoglegOptimizer : public SuccessiveLinearizationOptimizer { - -public: - - typedef boost::shared_ptr SharedParams; - typedef boost::shared_ptr SharedState; - typedef boost::shared_ptr shared_ptr; - - /// @name Standard interface - /// @{ - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. For convenience this - * version takes plain objects instead of shared pointers, but internally - * copies the objects. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - DoglegOptimizer(const NonlinearFactorGraph& graph, - const DoglegParams& params = DoglegParams()) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new DoglegParams(params)) {} - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. For convenience this - * version takes plain objects instead of shared pointers, but internally - * copies the objects. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new DoglegParams()) { - params_->ordering = ordering; } - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - DoglegOptimizer(const SharedGraph& graph, - const DoglegParams& params = DoglegParams()) : - SuccessiveLinearizationOptimizer(graph), - params_(new DoglegParams(params)) {} - - /** Access the parameters */ - virtual NonlinearOptimizer::SharedParams params() const { return params_; } - - /// @} - - /// @name Advanced interface - /// @{ - - /** Virtual destructor */ - virtual ~DoglegOptimizer() {} - - /** Perform a single iteration, returning a new NonlinearOptimizer class - * containing the updated variable assignments, which may be retrieved with - * values(). - */ - virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; - - /** Create an initial state with the specified variable assignment values and - * all other default state. - */ - virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; - - /// @} - -protected: - - SharedParams params_; -}; - } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 1528eb1df..6258366af 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -37,7 +37,7 @@ public: * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, @@ -49,12 +49,12 @@ public: * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : NonlinearOptimizer(graph), state_(graph, initialValues) { - params_.ordering = ordering; } + *params_.ordering = ordering; } /// @} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 0e641db29..fc25a697d 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -27,36 +27,23 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { - - const LevenbergMarquardtState& current = dynamic_cast(*_current); +void LevenbergMarquardtOptimizer::iterate() const { // Linearize graph - const Ordering& ordering = this->ordering(current.values); - GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, *params_.ordering); // Check whether to use QR bool useQR; - if(params_->factorization == LevenbergMarquardtParams::LDL) + if(params_.factorization == LevenbergMarquardtParams::LDL) useQR = false; - else if(params_->factorization == LevenbergMarquardtParams::QR) + else if(params_.factorization == LevenbergMarquardtParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); // Pull out parameters we'll use - const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity; - const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_->lmVerbosity; - const double lambdaFactor = params_->lambdaFactor; - - // Variables to update during try_lambda loop - double lambda = current.lambda; - double next_error = current.error; - Values next_values = current.values; - - // Compute dimensions if we haven't done it yet - if(!dimensions_) - dimensions_.reset(new vector(current.values.dims(ordering))); + const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity; + const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_.lmVerbosity; // Keep increasing lambda until we make make progress while(true) { @@ -68,10 +55,10 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli GaussianFactorGraph dampedSystem(*linear); { double sigma = 1.0 / sqrt(lambda); - dampedSystem.reserve(dampedSystem.size() + dimensions_->size()); + dampedSystem.reserve(dampedSystem.size() + dimensions_.size()); // for each of the variables, add a prior - for(Index j=0; jsize(); ++j) { - size_t dim = (*dimensions_)[j]; + for(Index j=0; jelimination == LevenbergMarquardtParams::MULTIFRONTAL) + if(params_.elimination == LevenbergMarquardtParams::MULTIFRONTAL) delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); - else if(params_->elimination == LevenbergMarquardtParams::SEQUENTIAL) + else if(params_.elimination == LevenbergMarquardtParams::SEQUENTIAL) delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); @@ -97,28 +84,28 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); // update values - Values newValues = current.values.retract(*delta, ordering); + Values newValues = state_.values.retract(*delta, *params_.ordering); // create new optimization state with more adventurous lambda - double error = graph_->error(newValues); + double error = graph_.error(newValues); if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; - if(error <= current.error) { - next_values.swap(newValues); - next_error = error; - lambda /= lambdaFactor; + if(error <= state_.error) { + state_.values.swap(newValues); + state_.error = error; + state_.lambda /= params_.lambdaFactor; break; } else { // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambda >= params_->lambdaUpperBound) { + if(state_.lambda >= params_.lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; } else { - lambda *= lambdaFactor; + state_.lambda *= params_.lambdaFactor; } } } catch(const NegativeMatrixException& e) { @@ -127,34 +114,20 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambda >= params_->lambdaUpperBound) { + if(lambda >= params_.lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; } else { - lambda *= lambdaFactor; + state_.lambda *= params_.lambdaFactor; } } catch(...) { throw; } } // end while - // Create new state with new values and new error - LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared(); - newState->values = next_values; - newState->error = next_error; - newState->iterations = current.iterations + 1; - newState->lambda = lambda; - - return newState; -} - -/* ************************************************************************* */ -NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const { - SharedState initial = boost::make_shared(); - defaultInitialState(initialValues, *initial); - initial->lambda = params_->lambdaInitial; - return initial; + // Increment the iteration counter + ++state_.iterations; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 5ce5f5f1f..b5b6d21f6 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -24,7 +24,6 @@ namespace gtsam { /** * This class performs Levenberg-Marquardt nonlinear optimization - * TODO: use make_shared */ class LevenbergMarquardtOptimizer : public NonlinearOptimizer { @@ -40,24 +39,24 @@ public: * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : - NonlinearOptimizer(graph), state_(graph, initialValues) { - params_.ordering = ordering; } + NonlinearOptimizer(graph), state_(graph, initialValues), dimensions_(initialValues.dims(ordering)) { + *params_.ordering = ordering; } /// @} @@ -74,21 +73,18 @@ public: virtual void iterate() const; /** Access the parameters */ - const GaussNewtonParams& params() const { return params_; } + const LevenbergMarquardtParams& params() const { return params_; } /** Access the last state */ - const NonlinearOptimizerState& state() const { return state_; } + const LevenbergMarquardtState& state() const { return state_; } /// @} protected: - typedef boost::shared_ptr > SharedDimensions; - - mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it - LevenbergMarquardtParams params_; LevenbergMarquardtState state_; + std::vector dimensions_; /** Access the parameters (base class version) */ virtual const NonlinearOptimizerParams& _params() const { return params_; } @@ -97,7 +93,7 @@ protected: virtual const NonlinearOptimizerState& _state() const { return state_; } /** Internal function for computing a COLAMD ordering if no ordering is specified */ - GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { + LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) params.ordering = graph.orderingCOLAMD(values); return params;