Continuing code simplification / restructuring

release/4.3a0
Richard Roberts 2012-04-05 02:45:47 +00:00
parent 48e277a095
commit c83a399bba
10 changed files with 98 additions and 114 deletions

View File

@ -27,11 +27,13 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& current) const { NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const {
const DoglegState& current = dynamic_cast<const DoglegState&>(*_current);
// Linearize graph // Linearize graph
const Ordering& ordering = *ordering(current->values); const Ordering& ordering = this->ordering(current.values);
GaussianFactorGraph::shared_ptr linear = graph_->linearize(current->values, ordering); GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering);
// Check whether to use QR // Check whether to use QR
bool useQR; bool useQR;
@ -50,24 +52,24 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize
if(params_->elimination == DoglegParams::MULTIFRONTAL) { if(params_->elimination == DoglegParams::MULTIFRONTAL) {
GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate();
result = DoglegOptimizerImpl::Iterate(current->Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, *values(), ordering, error(), dlVerbose); result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, current.values, ordering, current.error, dlVerbose);
} else if(params_->elimination == DoglegParams::SEQUENTIAL) { } else if(params_->elimination == DoglegParams::SEQUENTIAL) {
GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate();
result = DoglegOptimizerImpl::Iterate(current->Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, *values(), ordering, error(), dlVerbose); result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, current.values, ordering, current.error, dlVerbose);
} else { } else {
throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination");
} }
// Maybe show output
if(params_->verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta");
// Create new state with new values and new error // Create new state with new values and new error
DoglegOptimizer::SharedState newState = boost::make_shared<DoglegState>(); SharedState newState = boost::make_shared<DoglegState>();
newState->values = current.values.retract(result.dx_d, ordering);
// Update values
newState->values = current->values.retract(result.dx_d, ordering);
newState->error = result.f_error; newState->error = result.f_error;
newState->iterations = current->iterations + 1; newState->iterations = current.iterations + 1;
newState->Delta = result.Delta; newState->Delta = result.Delta;
return newState; return newState;
@ -76,7 +78,7 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const { NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const {
SharedState initial = boost::make_shared<DoglegState>(); SharedState initial = boost::make_shared<DoglegState>();
defaultInitialState(initialValues); defaultInitialState(initialValues, *initial);
initial->Delta = params_->deltaInitial; initial->Delta = params_->deltaInitial;
return initial; return initial;
} }

View File

@ -67,7 +67,7 @@ class DoglegOptimizer : public SuccessiveLinearizationOptimizer {
public: public:
typedef boost::shared_ptr<const DoglegParams> SharedParams; typedef boost::shared_ptr<DoglegParams> SharedParams;
typedef boost::shared_ptr<DoglegState> SharedState; typedef boost::shared_ptr<DoglegState> SharedState;
typedef boost::shared_ptr<DoglegOptimizer> shared_ptr; typedef boost::shared_ptr<DoglegOptimizer> shared_ptr;
@ -83,8 +83,7 @@ public:
* @param params The optimization parameters * @param params The optimization parameters
*/ */
DoglegOptimizer(const NonlinearFactorGraph& graph, DoglegOptimizer(const NonlinearFactorGraph& graph,
const DoglegParams& params = DoglegParams(), const DoglegParams& params = DoglegParams()) :
const Ordering& ordering = Ordering()) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new DoglegParams(params)) {} params_(new DoglegParams(params)) {}
@ -98,7 +97,8 @@ public:
*/ */
DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new DoglegParams()) {} params_(new DoglegParams()) {
params_->ordering = ordering; }
/** Standard constructor, requires a nonlinear factor graph, initial /** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. * variable assignments, and optimization parameters.
@ -107,8 +107,7 @@ public:
* @param params The optimization parameters * @param params The optimization parameters
*/ */
DoglegOptimizer(const SharedGraph& graph, DoglegOptimizer(const SharedGraph& graph,
const DoglegParams& params = DoglegParams(), const DoglegParams& params = DoglegParams()) :
const SharedOrdering& ordering = SharedOrdering()) :
SuccessiveLinearizationOptimizer(graph), SuccessiveLinearizationOptimizer(graph),
params_(new DoglegParams(params)) {} params_(new DoglegParams(params)) {}

View File

@ -25,25 +25,28 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { NonlinearOptimizer::SharedState GaussNewtonOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const {
const GaussNewtonState& current = dynamic_cast<const GaussNewtonState&>(*_current);
// Linearize graph // Linearize graph
GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); const Ordering& ordering = this->ordering(current.values);
GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering);
// Check whether to use QR // Check whether to use QR
bool useQR; bool useQR;
if(gnParams_->factorization == GaussNewtonParams::LDL) if(params_->factorization == GaussNewtonParams::LDL)
useQR = false; useQR = false;
else if(gnParams_->factorization == GaussNewtonParams::QR) else if(params_->factorization == GaussNewtonParams::QR)
useQR = true; useQR = true;
else else
throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization"); throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization");
// Optimize // Optimize
VectorValues::shared_ptr delta; VectorValues::shared_ptr delta;
if(gnParams_->elimination == GaussNewtonParams::MULTIFRONTAL) if(params_->elimination == GaussNewtonParams::MULTIFRONTAL)
delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); delta = GaussianMultifrontalSolver(*linear, useQR).optimize();
else if(gnParams_->elimination == GaussNewtonParams::SEQUENTIAL) else if(params_->elimination == GaussNewtonParams::SEQUENTIAL)
delta = GaussianSequentialSolver(*linear, useQR).optimize(); delta = GaussianSequentialSolver(*linear, useQR).optimize();
else else
throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination");
@ -51,21 +54,19 @@ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const {
// Maybe show output // Maybe show output
if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta");
// Update values // Create new state with new values and new error
SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); SharedState newState = boost::make_shared<GaussNewtonState>();
double newError = graph_->error(*newValues); newState->values = current.values.retract(*delta, ordering);
newState->error = graph_->error(newState->values);
newState->iterations = current.iterations + 1;
// Create new optimizer with new values and new error return newState;
NonlinearOptimizer::auto_ptr newOptimizer(new GaussNewtonOptimizer(
*this, newValues, newError));
return newOptimizer;
} }
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState GaussNewtonOptimizer::initialState(const Values& initialValues) const { NonlinearOptimizer::SharedState GaussNewtonOptimizer::initialState(const Values& initialValues) const {
SharedState initial = boost::make_shared<GaussNewtonState>(); SharedState initial = boost::make_shared<GaussNewtonState>();
defaultInitialState(*initial); defaultInitialState(initialValues, *initial);
return initial; return initial;
} }

View File

@ -39,7 +39,7 @@ class GaussNewtonOptimizer : public SuccessiveLinearizationOptimizer {
public: public:
typedef boost::shared_ptr<const GaussNewtonParams> SharedParams; typedef boost::shared_ptr<GaussNewtonParams> SharedParams;
/// @name Standard interface /// @name Standard interface
/// @{ /// @{
@ -53,8 +53,7 @@ public:
* @param params The optimization parameters * @param params The optimization parameters
*/ */
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, GaussNewtonOptimizer(const NonlinearFactorGraph& graph,
const GaussNewtonParams& params = GaussNewtonParams(), const GaussNewtonParams& params = GaussNewtonParams()) :
const Ordering& ordering = Ordering()) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new GaussNewtonParams(params)) {} params_(new GaussNewtonParams(params)) {}
@ -68,7 +67,8 @@ public:
*/ */
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new GaussNewtonParams()) {} params_(new GaussNewtonParams()) {
params_->ordering = ordering; }
/** Standard constructor, requires a nonlinear factor graph, initial /** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. * variable assignments, and optimization parameters.
@ -77,8 +77,7 @@ public:
* @param params The optimization parameters * @param params The optimization parameters
*/ */
GaussNewtonOptimizer(const SharedGraph& graph, GaussNewtonOptimizer(const SharedGraph& graph,
const GaussNewtonParams& params = GaussNewtonParams(), const GaussNewtonParams& params = GaussNewtonParams()) :
const SharedOrdering& ordering = SharedOrdering()) :
SuccessiveLinearizationOptimizer(graph), SuccessiveLinearizationOptimizer(graph),
params_(new GaussNewtonParams(params)) {} params_(new GaussNewtonParams(params)) {}

View File

@ -27,29 +27,36 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& current) const { NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const {
const LevenbergMarquardtState& current = dynamic_cast<const LevenbergMarquardtState&>(*_current);
// Linearize graph // Linearize graph
GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); const Ordering& ordering = this->ordering(current.values);
GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering);
// Check whether to use QR // Check whether to use QR
bool useQR; bool useQR;
if(lmParams_->factorization == LevenbergMarquardtParams::LDL) if(params_->factorization == LevenbergMarquardtParams::LDL)
useQR = false; useQR = false;
else if(lmParams_->factorization == LevenbergMarquardtParams::QR) else if(params_->factorization == LevenbergMarquardtParams::QR)
useQR = true; useQR = true;
else else
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization");
// Pull out parameters we'll use // Pull out parameters we'll use
const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity; const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity;
const LevenbergMarquardtParams::LMVerbosity lmVerbosity = lmParams_->lmVerbosity; const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_->lmVerbosity;
const double lambdaFactor = lmParams_->lambdaFactor; const double lambdaFactor = params_->lambdaFactor;
// Variables to update during try_lambda loop // Variables to update during try_lambda loop
double lambda = lambda_; double lambda = current.lambda;
double next_error = error(); double next_error = current.error;
SharedValues next_values = values(); Values next_values = current.values;
// Compute dimensions if we haven't done it yet
if(!dimensions_)
dimensions_.reset(new vector<size_t>(current.values.dims(ordering)));
// Keep increasing lambda until we make make progress // Keep increasing lambda until we make make progress
while(true) { while(true) {
@ -79,9 +86,9 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
// Optimize // Optimize
VectorValues::shared_ptr delta; VectorValues::shared_ptr delta;
if(lmParams_->elimination == LevenbergMarquardtParams::MULTIFRONTAL) if(params_->elimination == LevenbergMarquardtParams::MULTIFRONTAL)
delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize();
else if(lmParams_->elimination == LevenbergMarquardtParams::SEQUENTIAL) else if(params_->elimination == LevenbergMarquardtParams::SEQUENTIAL)
delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); delta = GaussianSequentialSolver(dampedSystem, useQR).optimize();
else else
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination");
@ -90,15 +97,15 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta");
// update values // update values
SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); Values newValues = current.values.retract(*delta, ordering);
// create new optimization state with more adventurous lambda // create new optimization state with more adventurous lambda
double error = graph_->error(*newValues); double error = graph_->error(newValues);
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl;
if( error <= error_ ) { if(error <= current.error) {
next_values = newValues; next_values.swap(newValues);
next_error = error; next_error = error;
lambda /= lambdaFactor; lambda /= lambdaFactor;
break; break;
@ -106,7 +113,7 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
// Either we're not cautious, or the same lambda was worse than the current error. // Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative // The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values. // and keep the same values.
if(lambda >= lmParams_->lambdaUpperBound) { if(lambda >= params_->lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::ERROR) if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break; break;
@ -120,7 +127,7 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
// Either we're not cautious, or the same lambda was worse than the current error. // Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative // The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values. // and keep the same values.
if(lambda >= lmParams_->lambdaUpperBound) { if(lambda >= params_->lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::ERROR) if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break; break;
@ -136,8 +143,8 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared<LevenbergMarquardtState>(); LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared<LevenbergMarquardtState>();
newState->values = next_values; newState->values = next_values;
newState->error = next_error; newState->error = next_error;
newState->iterations = current->iterations + 1; newState->iterations = current.iterations + 1;
newState->Delta = lambda; newState->lambda = lambda;
return newState; return newState;
} }
@ -145,7 +152,7 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const { NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const {
SharedState initial = boost::make_shared<LevenbergMarquardtState>(); SharedState initial = boost::make_shared<LevenbergMarquardtState>();
defaultInitialState(*initial); defaultInitialState(initialValues, *initial);
initial->lambda = params_->lambdaInitial; initial->lambda = params_->lambdaInitial;
return initial; return initial;
} }

View File

@ -76,7 +76,7 @@ class LevenbergMarquardtOptimizer : public SuccessiveLinearizationOptimizer {
public: public:
typedef boost::shared_ptr<const LevenbergMarquardtParams> SharedParams; typedef boost::shared_ptr<LevenbergMarquardtParams> SharedParams;
typedef boost::shared_ptr<LevenbergMarquardtState> SharedState; typedef boost::shared_ptr<LevenbergMarquardtState> SharedState;
typedef boost::shared_ptr<LevenbergMarquardtOptimizer> shared_ptr; typedef boost::shared_ptr<LevenbergMarquardtOptimizer> shared_ptr;
@ -92,8 +92,7 @@ public:
* @param params The optimization parameters * @param params The optimization parameters
*/ */
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph,
const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
const Ordering& ordering = Ordering()) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new LevenbergMarquardtParams(params)) {} params_(new LevenbergMarquardtParams(params)) {}
@ -105,10 +104,10 @@ public:
* @param values The initial variable assignments * @param values The initial variable assignments
* @param params The optimization parameters * @param params The optimization parameters
*/ */
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) :
const Ordering& ordering) :
SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))),
params_(new LevenbergMarquardtParams()) {} params_(new LevenbergMarquardtParams()) {
params_->ordering = ordering; }
/** Standard constructor, requires a nonlinear factor graph, initial /** Standard constructor, requires a nonlinear factor graph, initial
* variable assignments, and optimization parameters. * variable assignments, and optimization parameters.
@ -117,8 +116,7 @@ public:
* @param params The optimization parameters * @param params The optimization parameters
*/ */
LevenbergMarquardtOptimizer(const SharedGraph& graph, LevenbergMarquardtOptimizer(const SharedGraph& graph,
const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
const SharedOrdering& ordering = SharedOrdering()) :
SuccessiveLinearizationOptimizer(graph), SuccessiveLinearizationOptimizer(graph),
params_(new LevenbergMarquardtParams(params)) {} params_(new LevenbergMarquardtParams(params)) {}
@ -151,7 +149,7 @@ protected:
typedef boost::shared_ptr<const std::vector<size_t> > SharedDimensions; typedef boost::shared_ptr<const std::vector<size_t> > SharedDimensions;
SharedParams params_; SharedParams params_;
const SharedDimensions dimensions_; mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it
}; };
} }

View File

@ -24,25 +24,11 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */
NonlinearOptimizer::shared_ptr NonlinearOptimizer::update(const NonlinearOptimizer::SharedGraph& newGraph) const {
shared_ptr result(this->clone());
result->graph_ = newGraph;
return result;
}
/* ************************************************************************* */
NonlinearOptimizer::shared_ptr NonlinearOptimizer::update(const NonlinearOptimizer::SharedParams& newParams) const {
shared_ptr result(this->clone());
result->params_ = newParams;
return result;
}
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const SharedState& initial) const { NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const SharedState& initial) const {
const SharedParams& params = this->params(); const SharedParams& params = this->params();
double currentError = initial->error(); double currentError = initial->error;
// check if we're already close enough // check if we're already close enough
if(currentError <= params->errorTol) { if(currentError <= params->errorTol) {
@ -52,11 +38,11 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared
} }
// Maybe show output // Maybe show output
if (params->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("Initial values"); if (params->verbosity >= NonlinearOptimizerParams::VALUES) initial->values.print("Initial values");
if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << this->error() << endl; if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << initial->error << endl;
// Return if we already have too many iterations // Return if we already have too many iterations
if(this->iterations() >= params->maxIterations) if(initial->iterations >= params->maxIterations)
return initial; return initial;
// Iterative loop // Iterative loop
@ -67,8 +53,8 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared
next = this->iterate(next); next = this->iterate(next);
// Maybe show output // Maybe show output
if (params->verbosity >= NonlinearOptimizerParams::VALUES) next->values->print("newValues"); if(params->verbosity >= NonlinearOptimizerParams::VALUES) next->values.print("newValues");
if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl; if(params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl;
} while(next->iterations < params->maxIterations && } while(next->iterations < params->maxIterations &&
!checkConvergence(params->relativeErrorTol, params->absoluteErrorTol, !checkConvergence(params->relativeErrorTol, params->absoluteErrorTol,
params->errorTol, currentError, next->error, params->verbosity)); params->errorTol, currentError, next->error, params->verbosity));
@ -83,10 +69,10 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared
} }
/* ************************************************************************* */ /* ************************************************************************* */
void NonlinearOptimizer::defaultInitialState(NonlinearOptimizerState& initial) const { void NonlinearOptimizer::defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const {
state.values = initialValues; initialState.values = initialValues;
state.error = graph_->error(initialValues); initialState.error = graph_->error(initialValues);
state.iterations = 0; initialState.iterations = 0;
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -36,7 +36,7 @@ public:
LINEAR LINEAR
}; };
int maxIterations; ///< The maximum iterations to stop iterating (default 100) size_t maxIterations; ///< The maximum iterations to stop iterating (default 100)
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
double errorTol; ///< The maximum total error to stop iterating (default 0.0) double errorTol; ///< The maximum total error to stop iterating (default 0.0)
@ -80,9 +80,6 @@ public:
/** Virtual destructor */ /** Virtual destructor */
virtual ~NonlinearOptimizerState() {} virtual ~NonlinearOptimizerState() {}
/** Clone the state (i.e. make a copy of the derived class) */
virtual boost::shared_ptr<NonlinearOptimizerState> clone() = 0;
}; };
@ -233,7 +230,7 @@ protected:
SharedState defaultOptimize(const SharedState& initial) const; SharedState defaultOptimize(const SharedState& initial) const;
/** Initialize a state, using the current error and 0 iterations */ /** Initialize a state, using the current error and 0 iterations */
void defaultInitialState(SharedState& initial) const; void defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const;
/** Constructor for initial construction of base classes. /** Constructor for initial construction of base classes.
*/ */

View File

@ -22,20 +22,20 @@
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
const SuccessiveLinearizationOptimizer::SharedOrdering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const { const Ordering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const {
if(!ordering_) { if(!ordering_) {
SharedParams params = SharedParams params =
boost::dynamic_pointer_cast<const SuccessiveLinearizationParams>(params()); boost::dynamic_pointer_cast<const SuccessiveLinearizationParams>(this->params());
// If we're using a COLAMD ordering, compute it // If we're using a COLAMD ordering, compute it
if(!params.ordering) if(params->ordering)
ordering_ = graph_->orderingCOLAMD(values); ordering_ = params->ordering;
else else
ordering_ = params.ordering; ordering_ = *graph_->orderingCOLAMD(values);
} }
return ordering_; return *ordering_;
} }
} /* namespace gtsam */ } /* namespace gtsam */

View File

@ -24,6 +24,9 @@
namespace gtsam { namespace gtsam {
class SuccessiveLinearizationParams;
class SuccessiveLinearizationState;
/** /**
* This is an intermediate base class for NonlinearOptimizers that use direct * This is an intermediate base class for NonlinearOptimizers that use direct
* solving, i.e. factorization. This class is here to reduce code duplication * solving, i.e. factorization. This class is here to reduce code duplication
@ -33,23 +36,18 @@ namespace gtsam {
class SuccessiveLinearizationOptimizer : public NonlinearOptimizer { class SuccessiveLinearizationOptimizer : public NonlinearOptimizer {
public: public:
class SuccessiveLinearizationParams;
class SuccessiveLinearizationState;
typedef boost::shared_ptr<const SuccessiveLinearizationParams> SharedParams; typedef boost::shared_ptr<const SuccessiveLinearizationParams> SharedParams;
typedef boost::shared_ptr<const SuccessiveLinearizationOptimizer> shared_ptr; typedef boost::shared_ptr<const SuccessiveLinearizationOptimizer> shared_ptr;
protected: protected:
typedef boost::shared_ptr<const Ordering> SharedOrdering;
SuccessiveLinearizationOptimizer(const SharedGraph& graph) : NonlinearOptimizer(graph) {} SuccessiveLinearizationOptimizer(const SharedGraph& graph) : NonlinearOptimizer(graph) {}
const SharedOrdering& ordering(const Values& values) const; const Ordering& ordering(const Values& values) const;
private: private:
SharedOrdering ordering_; mutable boost::optional<Ordering> ordering_; // Mutable because we set/compute it when needed and cache it
}; };
class SuccessiveLinearizationParams : public NonlinearOptimizerParams { class SuccessiveLinearizationParams : public NonlinearOptimizerParams {
@ -66,12 +64,9 @@ public:
QR, QR,
}; };
/** See SuccessiveLinearizationParams::ordering */
typedef boost::shared_ptr<const Ordering> SharedOrdering;
Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL)
Factorization factorization; ///< The numerical factorization (default: LDL) Factorization factorization; ///< The numerical factorization (default: LDL)
SharedOrdering ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty) boost::optional<Ordering> ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty)
SuccessiveLinearizationParams() : SuccessiveLinearizationParams() :
elimination(MULTIFRONTAL), factorization(LDL) {} elimination(MULTIFRONTAL), factorization(LDL) {}