added timing instrumentation

release/4.3a0
Chris Beall 2012-02-23 04:17:17 +00:00
parent 850c9560d9
commit e7562ac07f
2 changed files with 30 additions and 0 deletions

View File

@ -158,10 +158,16 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose) { const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose) {
// Compute steepest descent and Newton's method points // Compute steepest descent and Newton's method points
tic(0, "Steepest Descent");
VectorValues dx_u = ComputeSteepestDescentPoint(Rd); VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
toc(0, "Steepest Descent");
tic(1, "optimize");
VectorValues dx_n = optimize(Rd); VectorValues dx_n = optimize(Rd);
toc(1, "optimize");
tic(2, "jfg error");
const GaussianFactorGraph jfg(Rd); const GaussianFactorGraph jfg(Rd);
const double M_error = jfg.error(VectorValues::Zero(dx_u)); const double M_error = jfg.error(VectorValues::Zero(dx_u));
toc(2, "jfg error");
// Result to return // Result to return
IterationResult result; IterationResult result;
@ -169,19 +175,27 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
bool stay = true; bool stay = true;
enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction; // Used to prevent alternating between increasing and decreasing in one iteration enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction; // Used to prevent alternating between increasing and decreasing in one iteration
while(stay) { while(stay) {
tic(3, "Dog leg point");
// Compute dog leg point // Compute dog leg point
result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n, verbose); result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n, verbose);
toc(3, "Dog leg point");
if(verbose) cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl; if(verbose) cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl;
tic(4, "retract");
// Compute expmapped solution // Compute expmapped solution
const VALUES x_d(x0.retract(result.dx_d, ordering)); const VALUES x_d(x0.retract(result.dx_d, ordering));
toc(4, "retract");
tic(5, "decrease in f");
// Compute decrease in f // Compute decrease in f
result.f_error = f.error(x_d); result.f_error = f.error(x_d);
toc(5, "decrease in f");
tic(6, "decrease in M");
// Compute decrease in M // Compute decrease in M
const double new_M_error = jfg.error(result.dx_d); const double new_M_error = jfg.error(result.dx_d);
toc(6, "decrease in M");
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl; if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl; if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
@ -195,6 +209,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
if(verbose) cout << "rho = " << rho << endl; if(verbose) cout << "rho = " << rho << endl;
if(rho >= 0.75) { if(rho >= 0.75) {
tic(7, "Rho >= 0.75");
// M agrees very well with f, so try to increase lambda // M agrees very well with f, so try to increase lambda
const double dx_d_norm = result.dx_d.vector().norm(); const double dx_d_norm = result.dx_d.vector().norm();
const double newDelta = std::max(Delta, 3.0 * dx_d_norm); // Compute new Delta const double newDelta = std::max(Delta, 3.0 * dx_d_norm); // Compute new Delta
@ -212,12 +227,14 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
assert(false); } assert(false); }
Delta = newDelta; // Update Delta from new Delta Delta = newDelta; // Update Delta from new Delta
toc(7, "Rho >= 0.75");
} else if(0.75 > rho && rho >= 0.25) { } else if(0.75 > rho && rho >= 0.25) {
// M agrees so-so with f, keep the same Delta // M agrees so-so with f, keep the same Delta
stay = false; stay = false;
} else if(0.25 > rho && rho >= 0.0) { } else if(0.25 > rho && rho >= 0.0) {
tic(8, "0.25 > Rho >= 0.75");
// M does not agree well with f, decrease Delta until it does // M does not agree well with f, decrease Delta until it does
double newDelta; double newDelta;
if(Delta > 1e-5) if(Delta > 1e-5)
@ -233,9 +250,11 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
assert(false); } assert(false); }
Delta = newDelta; // Update Delta from new Delta Delta = newDelta; // Update Delta from new Delta
toc(8, "0.25 > Rho >= 0.75");
} }
else { else {
tic(9, "Rho < 0");
// f actually increased, so keep decreasing Delta until f does not decrease // f actually increased, so keep decreasing Delta until f does not decrease
assert(0.0 > rho); assert(0.0 > rho);
if(Delta > 1e-5) { if(Delta > 1e-5) {
@ -246,6 +265,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
if(verbose) cout << "Warning: Dog leg stopping because cannot decrease error with minimum Delta" << endl; if(verbose) cout << "Warning: Dog leg stopping because cannot decrease error with minimum Delta" << endl;
stay = false; stay = false;
} }
toc(9, "Rho < 0");
} }
} }
@ -258,20 +278,28 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
template<class M> template<class M>
VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) { VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) {
tic(0, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems) // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
VectorValues grad = *allocateVectorValues(Rd); VectorValues grad = *allocateVectorValues(Rd);
gradientAtZero(Rd, grad); gradientAtZero(Rd, grad);
double gradientSqNorm = grad.dot(grad); double gradientSqNorm = grad.dot(grad);
toc(0, "Compute Gradient");
tic(1, "Compute R*g");
// Compute R * g // Compute R * g
FactorGraph<JacobianFactor> Rd_jfg(Rd); FactorGraph<JacobianFactor> Rd_jfg(Rd);
Errors Rg = Rd_jfg * grad; Errors Rg = Rd_jfg * grad;
toc(1, "Compute R*g");
tic(2, "Compute minimizing step size");
// Compute minimizing step size // Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg); double step = -gradientSqNorm / dot(Rg, Rg);
toc(2, "Compute minimizing step size");
tic(3, "Compute point");
// Compute steepest descent point // Compute steepest descent point
scal(step, grad); scal(step, grad);
toc(3, "Compute point");
return grad; return grad;
} }

View File

@ -559,8 +559,10 @@ ISAM2Result ISAM2<CONDITIONAL, VALUES, GRAPH>::update(
const ISAM2DoglegParams& doglegParams = const ISAM2DoglegParams& doglegParams =
boost::get<ISAM2DoglegParams>(params_.optimizationParams); boost::get<ISAM2DoglegParams>(params_.optimizationParams);
// Do one Dogleg iteration // Do one Dogleg iteration
tic(1, "Dogleg Iterate");
DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate( DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate(
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose); *doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose);
toc(1, "Dogleg Iterate");
// Update Delta and linear step // Update Delta and linear step
doglegDelta_ = doglegResult.Delta; doglegDelta_ = doglegResult.Delta;
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation