Incremental modifications to the new Nonlinear Optimizer interface.

release/4.3a0
Stephen Williams 2012-05-14 18:11:52 +00:00
parent 6c4bd1160a
commit fdc4cc586d
5 changed files with 126 additions and 174 deletions

View File

@ -27,19 +27,17 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { void DoglegOptimizer::iterate(void) const {
const DoglegState& current = dynamic_cast<const DoglegState&>(*_current);
// Linearize graph // Linearize graph
const Ordering& ordering = this->ordering(current.values); const Ordering& ordering = *params_.ordering;
GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, ordering);
// Check whether to use QR // Check whether to use QR
bool useQR; bool useQR;
if(params_->factorization == DoglegParams::LDL) if(params_.factorization == DoglegParams::LDL)
useQR = false; useQR = false;
else if(params_->factorization == DoglegParams::QR) else if(params_.factorization == DoglegParams::QR)
useQR = true; useQR = true;
else else
throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization");
@ -50,37 +48,26 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize
// Do Dogleg iteration with either Multifrontal or Sequential elimination // Do Dogleg iteration with either Multifrontal or Sequential elimination
DoglegOptimizerImpl::IterationResult result; DoglegOptimizerImpl::IterationResult result;
if(params_->elimination == DoglegParams::MULTIFRONTAL) { if(params_.elimination == DoglegParams::MULTIFRONTAL) {
GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate();
result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, current.values, ordering, current.error, dlVerbose); result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose);
} else if(params_->elimination == DoglegParams::SEQUENTIAL) { } else if(params_.elimination == DoglegParams::SEQUENTIAL) {
GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate();
result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, current.values, ordering, current.error, dlVerbose); result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose);
} else { } else {
throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination");
} }
// Maybe show output // Maybe show output
if(params_->verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); if(params_.verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta");
// Create new state with new values and new error // Create new state with new values and new error
SharedState newState = boost::make_shared<DoglegState>(); state_.values = state_.values.retract(result.dx_d, ordering);
newState->values = current.values.retract(result.dx_d, ordering); state_.error = result.f_error;
newState->error = result.f_error; state_.delta = result.delta;
newState->iterations = current.iterations + 1; ++state_.iterations;
newState->Delta = result.Delta;
return newState;
}
/* ************************************************************************* */
NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const {
SharedState initial = boost::make_shared<DoglegState>();
defaultInitialState(initialValues, *initial);
initial->Delta = params_->deltaInitial;
return initial;
} }
} /* namespace gtsam */ } /* namespace gtsam */

View File

@ -22,6 +22,82 @@
namespace gtsam { namespace gtsam {
/**
* This class performs Dogleg nonlinear optimization
*/
class DoglegOptimizer : public SuccessiveLinearizationOptimizer {
public:
typedef boost::shared_ptr<DoglegOptimizer> shared_ptr;
/// @name Standard interface
/// @{
/** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. For convenience this
* version takes plain objects instead of shared pointers, but internally
* copies the objects.
* @param graph The nonlinear factor graph to optimize
* @param initialValues The initial variable assignments
* @param params The optimization parameters
*/
DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
const DoglegParams& params = DoglegParams()) :
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {}
/** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. For convenience this
* version takes plain objects instead of shared pointers, but internally
* copies the objects.
* @param graph The nonlinear factor graph to optimize
* @param initialValues The initial variable assignments
* @param params The optimization parameters
*/
DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
NonlinearOptimizer(graph), state_(graph, initialValues) {
*params_.ordering = ordering; }
/// @}
/// @name Advanced interface
/// @{
/** Virtual destructor */
virtual ~DoglegOptimizer() {}
/** Perform a single iteration, returning a new NonlinearOptimizer class
* containing the updated variable assignments, which may be retrieved with
* values().
*/
virtual void iterate() const;
/** Access the parameters */
const DoglegParams& params() const { return params_; }
/** Access the last state */
const DoglegState& state() const { return state_; }
/// @}
protected:
DoglegParams params_;
DoglegState state_;
/** Access the parameters (base class version) */
virtual const NonlinearOptimizerParams& _params() const { return params_; }
/** Access the state (base class version) */
virtual const NonlinearOptimizerState& _state() const { return state_; }
/** Internal function for computing a COLAMD ordering if no ordering is specified */
DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const {
if(!params.ordering)
params.ordering = graph.orderingCOLAMD(values);
return params;
}
};
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters /** Parameters for Levenberg-Marquardt optimization. Note that this parameters
* class inherits from NonlinearOptimizerParams, which specifies the parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters
* common to all nonlinear optimization algorithms. This class also contains * common to all nonlinear optimization algorithms. This class also contains
@ -56,88 +132,8 @@ public:
class DoglegState : public SuccessiveLinearizationState { class DoglegState : public SuccessiveLinearizationState {
public: public:
double Delta; double delta;
}; };
/**
* This class performs Dogleg nonlinear optimization
*/
class DoglegOptimizer : public SuccessiveLinearizationOptimizer {
public:
typedef boost::shared_ptr<DoglegParams> SharedParams;
typedef boost::shared_ptr<DoglegState> SharedState;
typedef boost::shared_ptr<DoglegOptimizer> shared_ptr;
/// @name Standard interface
/// @{
/** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. For convenience this
* version takes plain objects instead of shared pointers, but internally
* copies the objects.
* @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments
* @param params The optimization parameters
*/
DoglegOptimizer(const NonlinearFactorGraph& graph,
const DoglegParams& params = DoglegParams()) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new DoglegParams(params)) {}
/** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. For convenience this
* version takes plain objects instead of shared pointers, but internally
* copies the objects.
* @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments
* @param params The optimization parameters
*/
DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new DoglegParams()) {
params_->ordering = ordering; }
/** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters.
* @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments
* @param params The optimization parameters
*/
DoglegOptimizer(const SharedGraph& graph,
const DoglegParams& params = DoglegParams()) :
SuccessiveLinearizationOptimizer(graph),
params_(new DoglegParams(params)) {}
/** Access the parameters */
virtual NonlinearOptimizer::SharedParams params() const { return params_; }
/// @}
/// @name Advanced interface
/// @{
/** Virtual destructor */
virtual ~DoglegOptimizer() {}
/** Perform a single iteration, returning a new NonlinearOptimizer class
* containing the updated variable assignments, which may be retrieved with
* values().
*/
virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const;
/** Create an initial state with the specified variable assignment values and
* all other default state.
*/
virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const;
/// @}
protected:
SharedParams params_;
};
} }

View File

@ -37,7 +37,7 @@ public:
* version takes plain objects instead of shared pointers, but internally * version takes plain objects instead of shared pointers, but internally
* copies the objects. * copies the objects.
* @param graph The nonlinear factor graph to optimize * @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments * @param initialValues The initial variable assignments
* @param params The optimization parameters * @param params The optimization parameters
*/ */
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
@ -49,12 +49,12 @@ public:
* version takes plain objects instead of shared pointers, but internally * version takes plain objects instead of shared pointers, but internally
* copies the objects. * copies the objects.
* @param graph The nonlinear factor graph to optimize * @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments * @param initialValues The initial variable assignments
* @param params The optimization parameters * @param params The optimization parameters
*/ */
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
NonlinearOptimizer(graph), state_(graph, initialValues) { NonlinearOptimizer(graph), state_(graph, initialValues) {
params_.ordering = ordering; } *params_.ordering = ordering; }
/// @} /// @}

View File

@ -27,36 +27,23 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { void LevenbergMarquardtOptimizer::iterate() const {
const LevenbergMarquardtState& current = dynamic_cast<const LevenbergMarquardtState&>(*_current);
// Linearize graph // Linearize graph
const Ordering& ordering = this->ordering(current.values); GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, *params_.ordering);
GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering);
// Check whether to use QR // Check whether to use QR
bool useQR; bool useQR;
if(params_->factorization == LevenbergMarquardtParams::LDL) if(params_.factorization == LevenbergMarquardtParams::LDL)
useQR = false; useQR = false;
else if(params_->factorization == LevenbergMarquardtParams::QR) else if(params_.factorization == LevenbergMarquardtParams::QR)
useQR = true; useQR = true;
else else
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization");
// Pull out parameters we'll use // Pull out parameters we'll use
const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity; const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity;
const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_->lmVerbosity; const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_.lmVerbosity;
const double lambdaFactor = params_->lambdaFactor;
// Variables to update during try_lambda loop
double lambda = current.lambda;
double next_error = current.error;
Values next_values = current.values;
// Compute dimensions if we haven't done it yet
if(!dimensions_)
dimensions_.reset(new vector<size_t>(current.values.dims(ordering)));
// Keep increasing lambda until we make make progress // Keep increasing lambda until we make make progress
while(true) { while(true) {
@ -68,10 +55,10 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
GaussianFactorGraph dampedSystem(*linear); GaussianFactorGraph dampedSystem(*linear);
{ {
double sigma = 1.0 / sqrt(lambda); double sigma = 1.0 / sqrt(lambda);
dampedSystem.reserve(dampedSystem.size() + dimensions_->size()); dampedSystem.reserve(dampedSystem.size() + dimensions_.size());
// for each of the variables, add a prior // for each of the variables, add a prior
for(Index j=0; j<dimensions_->size(); ++j) { for(Index j=0; j<dimensions_.size(); ++j) {
size_t dim = (*dimensions_)[j]; size_t dim = (dimensions_)[j];
Matrix A = eye(dim); Matrix A = eye(dim);
Vector b = zero(dim); Vector b = zero(dim);
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
@ -86,9 +73,9 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
// Optimize // Optimize
VectorValues::shared_ptr delta; VectorValues::shared_ptr delta;
if(params_->elimination == LevenbergMarquardtParams::MULTIFRONTAL) if(params_.elimination == LevenbergMarquardtParams::MULTIFRONTAL)
delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize();
else if(params_->elimination == LevenbergMarquardtParams::SEQUENTIAL) else if(params_.elimination == LevenbergMarquardtParams::SEQUENTIAL)
delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); delta = GaussianSequentialSolver(dampedSystem, useQR).optimize();
else else
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination");
@ -97,28 +84,28 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta");
// update values // update values
Values newValues = current.values.retract(*delta, ordering); Values newValues = state_.values.retract(*delta, *params_.ordering);
// create new optimization state with more adventurous lambda // create new optimization state with more adventurous lambda
double error = graph_->error(newValues); double error = graph_.error(newValues);
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl;
if(error <= current.error) { if(error <= state_.error) {
next_values.swap(newValues); state_.values.swap(newValues);
next_error = error; state_.error = error;
lambda /= lambdaFactor; state_.lambda /= params_.lambdaFactor;
break; break;
} else { } else {
// Either we're not cautious, or the same lambda was worse than the current error. // Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative // The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values. // and keep the same values.
if(lambda >= params_->lambdaUpperBound) { if(state_.lambda >= params_.lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::ERROR) if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break; break;
} else { } else {
lambda *= lambdaFactor; state_.lambda *= params_.lambdaFactor;
} }
} }
} catch(const NegativeMatrixException& e) { } catch(const NegativeMatrixException& e) {
@ -127,34 +114,20 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
// Either we're not cautious, or the same lambda was worse than the current error. // Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative // The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values. // and keep the same values.
if(lambda >= params_->lambdaUpperBound) { if(lambda >= params_.lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::ERROR) if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break; break;
} else { } else {
lambda *= lambdaFactor; state_.lambda *= params_.lambdaFactor;
} }
} catch(...) { } catch(...) {
throw; throw;
} }
} // end while } // end while
// Create new state with new values and new error // Increment the iteration counter
LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared<LevenbergMarquardtState>(); ++state_.iterations;
newState->values = next_values;
newState->error = next_error;
newState->iterations = current.iterations + 1;
newState->lambda = lambda;
return newState;
}
/* ************************************************************************* */
NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const {
SharedState initial = boost::make_shared<LevenbergMarquardtState>();
defaultInitialState(initialValues, *initial);
initial->lambda = params_->lambdaInitial;
return initial;
} }
} /* namespace gtsam */ } /* namespace gtsam */

View File

@ -24,7 +24,6 @@ namespace gtsam {
/** /**
* This class performs Levenberg-Marquardt nonlinear optimization * This class performs Levenberg-Marquardt nonlinear optimization
* TODO: use make_shared
*/ */
class LevenbergMarquardtOptimizer : public NonlinearOptimizer { class LevenbergMarquardtOptimizer : public NonlinearOptimizer {
@ -40,24 +39,24 @@ public:
* version takes plain objects instead of shared pointers, but internally * version takes plain objects instead of shared pointers, but internally
* copies the objects. * copies the objects.
* @param graph The nonlinear factor graph to optimize * @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments * @param initialValues The initial variable assignments
* @param params The optimization parameters * @param params The optimization parameters
*/ */
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {}
/** Standard constructor, requires a nonlinear factor graph, initial /** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. For convenience this * variable assignments, and optimization parameters. For convenience this
* version takes plain objects instead of shared pointers, but internally * version takes plain objects instead of shared pointers, but internally
* copies the objects. * copies the objects.
* @param graph The nonlinear factor graph to optimize * @param graph The nonlinear factor graph to optimize
* @param values The initial variable assignments * @param initialValues The initial variable assignments
* @param params The optimization parameters * @param params The optimization parameters
*/ */
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
NonlinearOptimizer(graph), state_(graph, initialValues) { NonlinearOptimizer(graph), state_(graph, initialValues), dimensions_(initialValues.dims(ordering)) {
params_.ordering = ordering; } *params_.ordering = ordering; }
/// @} /// @}
@ -74,21 +73,18 @@ public:
virtual void iterate() const; virtual void iterate() const;
/** Access the parameters */ /** Access the parameters */
const GaussNewtonParams& params() const { return params_; } const LevenbergMarquardtParams& params() const { return params_; }
/** Access the last state */ /** Access the last state */
const NonlinearOptimizerState& state() const { return state_; } const LevenbergMarquardtState& state() const { return state_; }
/// @} /// @}
protected: protected:
typedef boost::shared_ptr<const std::vector<size_t> > SharedDimensions;
mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it
LevenbergMarquardtParams params_; LevenbergMarquardtParams params_;
LevenbergMarquardtState state_; LevenbergMarquardtState state_;
std::vector<size_t> dimensions_;
/** Access the parameters (base class version) */ /** Access the parameters (base class version) */
virtual const NonlinearOptimizerParams& _params() const { return params_; } virtual const NonlinearOptimizerParams& _params() const { return params_; }
@ -97,7 +93,7 @@ protected:
virtual const NonlinearOptimizerState& _state() const { return state_; } virtual const NonlinearOptimizerState& _state() const { return state_; }
/** Internal function for computing a COLAMD ordering if no ordering is specified */ /** Internal function for computing a COLAMD ordering if no ordering is specified */
GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const {
if(!params.ordering) if(!params.ordering)
params.ordering = graph.orderingCOLAMD(values); params.ordering = graph.orderingCOLAMD(values);
return params; return params;