From 56300ca23c292f2e1142600fd3de8c0b7dbedc8d Mon Sep 17 00:00:00 2001 From: jingwuOUO Date: Thu, 22 Oct 2020 18:05:09 -0400 Subject: [PATCH] Fixed not solve errors and detailed documentation --- gtsam/linear/AcceleratedPowerMethod.h | 33 +++++++++---------- gtsam/linear/PowerMethod.h | 6 ++-- .../tests/testAcceleratedPowerMethod.cpp | 2 +- gtsam/sfm/ShonanAveraging.cpp | 28 ++++++++-------- 4 files changed, 33 insertions(+), 36 deletions(-) diff --git a/gtsam/linear/AcceleratedPowerMethod.h b/gtsam/linear/AcceleratedPowerMethod.h index 2e54775d9..a5148e1d4 100644 --- a/gtsam/linear/AcceleratedPowerMethod.h +++ b/gtsam/linear/AcceleratedPowerMethod.h @@ -63,16 +63,10 @@ class AcceleratedPowerMethod : public PowerMethod { const Operator &A, const boost::optional initial = boost::none, const double initialBeta = 0.0) : PowerMethod(A, initial) { - Vector x0; - // initialize ritz vector - x0 = initial ? initial.get() : Vector::Random(this->dim_); - Vector x00 = Vector::Random(this->dim_); - x0.normalize(); - x00.normalize(); - // initialize Ritz eigen vector and previous vector - previousVector_ = powerIteration(x0, x00, beta_); - this->ritzVector_ = powerIteration(previousVector_, x0, beta_); + this->ritzVector_ = initial ? initial.get() : Vector::Random(this->dim_); + this->ritzVector_.normalize(); + previousVector_ = Vector::Zero(this->dim_); // initialize beta_ if (!initialBeta) { @@ -80,9 +74,10 @@ class AcceleratedPowerMethod : public PowerMethod { } else { beta_ = initialBeta; } - } + } - // Update the ritzVector with beta and previous two ritzVector + // Update the ritzVector with beta and previous two ritzVector, and return + // x_{k+1} = A * x_k - \beta * x_{k-1} / || A * x_k - \beta * x_{k-1} || Vector powerIteration(const Vector &x1, const Vector &x0, const double beta) const { Vector y = this->A_ * x1 - beta * x0; @@ -90,7 +85,8 @@ class AcceleratedPowerMethod : public PowerMethod { return y; } - // Update the ritzVector with beta and previous two ritzVector + // Update the ritzVector with beta and previous two ritzVector, and return + // x_{k+1} = A * x_k - \beta * x_{k-1} / || A * x_k - \beta * x_{k-1} || Vector powerIteration() const { Vector y = powerIteration(this->ritzVector_, previousVector_, beta_); previousVector_ = this->ritzVector_; @@ -101,8 +97,8 @@ class AcceleratedPowerMethod : public PowerMethod { void estimateBeta() { // set beta Vector init_resid = this->ritzVector_; - const double up = init_resid.transpose() * this->A_ * init_resid; - const double down = init_resid.transpose().dot(init_resid); + const double up = init_resid.dot( this->A_ * init_resid ); + const double down = init_resid.dot(init_resid); const double mu = up / down; double maxBeta = mu * mu / 4; size_t maxIndex; @@ -111,11 +107,12 @@ class AcceleratedPowerMethod : public PowerMethod { Matrix R = Matrix::Zero(this->dim_, 10); for (size_t i = 0; i < 10; i++) { + Vector x0 = Vector::Random(this->dim_); + x0.normalize(); + Vector x00 = Vector::Zero(this->dim_); for (size_t k = 0; k < betas.size(); ++k) { for (size_t j = 1; j < 10; j++) { if (j < 2) { - Vector x0 = this->ritzVector_; - Vector x00 = previousVector_; R.col(0) = powerIteration(x0, x00, betas[k]); R.col(1) = powerIteration(R.col(0), x0, betas[k]); } else { @@ -123,8 +120,8 @@ class AcceleratedPowerMethod : public PowerMethod { } } const Vector x = R.col(9); - const double up = x.transpose() * this->A_ * x; - const double down = x.transpose().dot(x); + const double up = x.dot(this->A_ * x); + const double down = x.dot(x); const double mu = up / down; if (mu * mu / 4 > maxBeta) { maxIndex = k; diff --git a/gtsam/linear/PowerMethod.h b/gtsam/linear/PowerMethod.h index acb583296..8a577aa92 100644 --- a/gtsam/linear/PowerMethod.h +++ b/gtsam/linear/PowerMethod.h @@ -61,19 +61,19 @@ class PowerMethod { // initialize Ritz eigen value ritzValue_ = 0.0; - // initialize Ritz eigen vectors + // initialize Ritz eigen vector ritzVector_ = Vector::Zero(dim_); ritzVector_ = powerIteration(x0); } - // Update the vector by dot product with A_ + // Update the vector by dot product with A_, and return A * x / || A * x || Vector powerIteration(const Vector &x) const { Vector y = A_ * x; y.normalize(); return y; } - // Update the vector by dot product with A_ + // Update the vector by dot product with A_, and return A * x / || A * x || Vector powerIteration() const { return powerIteration(ritzVector_); } // After Perform power iteration on a single Ritz value, if the error is less diff --git a/gtsam/linear/tests/testAcceleratedPowerMethod.cpp b/gtsam/linear/tests/testAcceleratedPowerMethod.cpp index b72556e0a..6179d6ca6 100644 --- a/gtsam/linear/tests/testAcceleratedPowerMethod.cpp +++ b/gtsam/linear/tests/testAcceleratedPowerMethod.cpp @@ -96,7 +96,7 @@ TEST(AcceleratedPowerMethod, useFactorGraph) { AcceleratedPowerMethod apf(L.first, initial); apf.compute(50, 1e-4); EXPECT_DOUBLES_EQUAL(ev1, apf.eigenvalue(), 1e-8); - EXPECT(assert_equal(ev2, apf.eigenvector(), 3e-5)); + EXPECT(assert_equal(ev2, apf.eigenvector(), 4e-5)); } /* ************************************************************************* */ diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index 50b38f75b..8d2bf4653 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -515,42 +515,42 @@ static bool PowerMinimumEigenValue( double minEigenvalueNonnegativityTolerance = 10e-4) { // a. Compute dominant eigenpair of S using power method - PowerMethod lmOperator(A); + PowerMethod pmOperator(A); - const bool lmConverged = lmOperator.compute( + const bool pmConverged = pmOperator.compute( maxIterations, 1e-5); // Check convergence and bail out if necessary - if (!lmConverged) return false; + if (!pmConverged) return false; - const double lmEigenValue = lmOperator.eigenvalue(); + const double pmEigenValue = pmOperator.eigenvalue(); - if (lmEigenValue < 0) { + if (pmEigenValue < 0) { // The largest-magnitude eigenvalue is negative, and therefore also the // minimum eigenvalue, so just return this solution - *minEigenValue = lmEigenValue; + *minEigenValue = pmEigenValue; if (minEigenVector) { - *minEigenVector = lmOperator.eigenvector(); + *minEigenVector = pmOperator.eigenvector(); minEigenVector->normalize(); // Ensure that this is a unit vector } return true; } - const Sparse C = lmEigenValue * Matrix::Identity(A.rows(), A.cols()) - A; + const Sparse C = pmEigenValue * Matrix::Identity(A.rows(), A.cols()) - A; const boost::optional initial = perturb(S.row(0)); - AcceleratedPowerMethod minShiftedOperator(C, initial); + AcceleratedPowerMethod apmShiftedOperator(C, initial); - const bool minConverged = minShiftedOperator.compute( - maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue); + const bool minConverged = apmShiftedOperator.compute( + maxIterations, minEigenvalueNonnegativityTolerance / pmEigenValue); if (!minConverged) return false; - *minEigenValue = lmEigenValue - minShiftedOperator.eigenvalue(); + *minEigenValue = pmEigenValue - apmShiftedOperator.eigenvalue(); if (minEigenVector) { - *minEigenVector = minShiftedOperator.eigenvector(); + *minEigenVector = apmShiftedOperator.eigenvector(); minEigenVector->normalize(); // Ensure that this is a unit vector } - if (numIterations) *numIterations = minShiftedOperator.nrIterations(); + if (numIterations) *numIterations = apmShiftedOperator.nrIterations(); return true; }