added "disableInnerIterations" and "modelFidelity" computations

release/4.3a0
Luca 2014-02-12 16:16:25 -05:00
parent b89c33977a
commit d13ef17ce8
2 changed files with 21 additions and 24 deletions

View File

@ -99,7 +99,7 @@ void LevenbergMarquardtOptimizer::iterate() {
cout << "linearizing = " << endl; cout << "linearizing = " << endl;
GaussianFactorGraph::shared_ptr linear = linearize(); GaussianFactorGraph::shared_ptr linear = linearize();
double modelMismatch = std::numeric_limits<size_t>::max(); double modelFidelity = std::numeric_limits<size_t>::max();
// Keep increasing lambda until we make make progress // Keep increasing lambda until we make make progress
while (true) { while (true) {
@ -156,32 +156,25 @@ void LevenbergMarquardtOptimizer::iterate() {
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl;
// oldCost - newCost // cost change in the original, possibly nonlinear system (old - new)
double costChange = state_.error - error; double costChange = state_.error - error;
std::cout << "costChange " << costChange << std::endl;
// newLinearizedCost (scalar) = 1/2 [f + J * step]^2 = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ] // cost change in the linearized system (old - new)
// linearizedCostChange = oldCost - newLinearizedCost = f'f/2 - 1/2 [ f'f + 2f'J * step + step' * J' * J * step] std::cout << "graph_ " << graph_.size() << std::endl;
// = -f'J * step - step' * J' * J * step / 2 = -(f' + modelResidual') * (modelResidual) std::cout << "linear " << linear->size() << std::endl;
// (with modelResidual = J * step) linear->print("linear");
// Errors modelResidualList = (*linear) * delta; // modelResidual = A * delta std::cout << "linear->error(delta) " << linear->error(delta) << std::endl;
// modelResidualList.print(""); double linearizedCostChange = state_.error - linear->error(delta);
// Vector modelResidual = concatVectors(modelResidualList); // TODO: is this an ordered list? std::cout << "linearizedCostChange " << linearizedCostChange << std::endl;
//cout << "modelResidual: " << modelResidual << endl;
// cout << "linear->jacobian().second: " << linear->jacobian().second << endl;
// cout << "linear->augmentedJacobian().second: " << linear->augmentedJacobian() << endl;
// cout << "linear->augmentedHessian().second: " << linear->augmentedHessian() << endl;
// Vector residuals = linear->jacobian().second; // TODO: optimize this computation, TODO: is there a minus sign?
// double linearizedCostChange = dot(- modelResidual, (- residuals + modelResidual / 2.0) );
//
// // Measure of mismatch between original (usually nonlinear) system and its linearized version
// modelMismatch = costChange / linearizedCostChange;
modelFidelity = costChange / linearizedCostChange;
std::cout << "modelFidelity " << modelFidelity << std::endl;
if (error <= state_.error) { if (error <= state_.error) {
state_.values.swap(newValues); state_.values.swap(newValues);
state_.error = error; state_.error = error;
decreaseLambda(modelMismatch); decreaseLambda(modelFidelity);
break; break;
} else { } else {
// Either we're not cautious, or the same lambda was worse than the current error. // Either we're not cautious, or the same lambda was worse than the current error.
@ -195,7 +188,7 @@ void LevenbergMarquardtOptimizer::iterate() {
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "increasing lambda: old error (" << state_.error << ") new error (" << error << ")" << endl; cout << "increasing lambda: old error (" << state_.error << ") new error (" << error << ")" << endl;
increaseLambda(modelMismatch); increaseLambda(modelFidelity);
} }
} }
} catch (IndeterminantLinearSystemException& e) { } catch (IndeterminantLinearSystemException& e) {
@ -210,9 +203,12 @@ void LevenbergMarquardtOptimizer::iterate() {
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break; break;
} else { } else {
increaseLambda(modelMismatch); increaseLambda(modelFidelity);
} }
} }
if(params_.disableInnerIterations)
break;
// Frank asks: why would we do that? // Frank asks: why would we do that?
// catch(...) { // catch(...) {
// throw; // throw;

View File

@ -49,11 +49,12 @@ public:
double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5)
double lambdaLowerBound; ///< The minimum lambda used in LM (default: 0) double lambdaLowerBound; ///< The minimum lambda used in LM (default: 0)
VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity
bool disableInnerIterations; ///< If enabled inner iterations on the linearized system are performed
std::string logFile; ///< an optional CSV log file, with [iteration, time, error, labda] std::string logFile; ///< an optional CSV log file, with [iteration, time, error, labda]
LevenbergMarquardtParams() : LevenbergMarquardtParams() :
lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lambdaLowerBound(0.0), verbosityLM( lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lambdaLowerBound(0.0),
SILENT) { verbosityLM(SILENT), disableInnerIterations(false) {
} }
virtual ~LevenbergMarquardtParams() { virtual ~LevenbergMarquardtParams() {
} }