| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277 | // Ceres Solver - A fast non-linear least squares minimizer// Copyright 2015 Google Inc. All rights reserved.// http://ceres-solver.org///// Redistribution and use in source and binary forms, with or without// modification, are permitted provided that the following conditions are met://// * Redistributions of source code must retain the above copyright notice,//   this list of conditions and the following disclaimer.// * Redistributions in binary form must reproduce the above copyright notice,//   this list of conditions and the following disclaimer in the documentation//   and/or other materials provided with the distribution.// * Neither the name of Google Inc. nor the names of its contributors may be//   used to endorse or promote products derived from this software without//   specific prior written permission.//// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE// POSSIBILITY OF SUCH DAMAGE.//// Author: sameeragarwal@google.com (Sameer Agarwal)#include "ceres/covariance.h"#include <algorithm>#include <cmath>#include <map>#include <utility>#include "ceres/compressed_row_sparse_matrix.h"#include "ceres/cost_function.h"#include "ceres/covariance_impl.h"#include "ceres/local_parameterization.h"#include "ceres/map_util.h"#include "ceres/problem_impl.h"#include "gtest/gtest.h"namespace ceres {namespace internal {using std::make_pair;using std::map;using std::pair;using std::vector;class UnaryCostFunction: public CostFunction { public:  UnaryCostFunction(const int num_residuals,                    const int32 parameter_block_size,                    const double* jacobian)      : jacobian_(jacobian, jacobian + num_residuals * parameter_block_size) {    set_num_residuals(num_residuals);    mutable_parameter_block_sizes()->push_back(parameter_block_size);  }  virtual bool Evaluate(double const* const* parameters,                        double* residuals,                        double** jacobians) const {    for (int i = 0; i < num_residuals(); ++i) {      residuals[i] = 1;    }    if (jacobians == NULL) {      return true;    }    if (jacobians[0] != NULL) {      copy(jacobian_.begin(), jacobian_.end(), jacobians[0]);    }    return true;  } private:  vector<double> jacobian_;};class BinaryCostFunction: public CostFunction { public:  BinaryCostFunction(const int num_residuals,                     const int32 parameter_block1_size,                     const int32 parameter_block2_size,                     const double* jacobian1,                     const double* jacobian2)      : jacobian1_(jacobian1,                   jacobian1 + num_residuals * parameter_block1_size),        jacobian2_(jacobian2,                   jacobian2 + num_residuals * parameter_block2_size) {    set_num_residuals(num_residuals);    mutable_parameter_block_sizes()->push_back(parameter_block1_size);    mutable_parameter_block_sizes()->push_back(parameter_block2_size);  }  virtual bool Evaluate(double const* const* parameters,                        double* residuals,                        double** jacobians) const {    for (int i = 0; i < num_residuals(); ++i) {      residuals[i] = 2;    }    if (jacobians == NULL) {      return true;    }    if (jacobians[0] != NULL) {      copy(jacobian1_.begin(), jacobian1_.end(), jacobians[0]);    }    if (jacobians[1] != NULL) {      copy(jacobian2_.begin(), jacobian2_.end(), jacobians[1]);    }    return true;  } private:  vector<double> jacobian1_;  vector<double> jacobian2_;};// x_plus_delta = delta * x;class PolynomialParameterization : public LocalParameterization { public:  virtual ~PolynomialParameterization() {}  virtual bool Plus(const double* x,                    const double* delta,                    double* x_plus_delta) const {    x_plus_delta[0] = delta[0] * x[0];    x_plus_delta[1] = delta[0] * x[1];    return true;  }  virtual bool ComputeJacobian(const double* x, double* jacobian) const {    jacobian[0] = x[0];    jacobian[1] = x[1];    return true;  }  virtual int GlobalSize() const { return 2; }  virtual int LocalSize() const { return 1; }};TEST(CovarianceImpl, ComputeCovarianceSparsity) {  double parameters[10];  double* block1 = parameters;  double* block2 = block1 + 1;  double* block3 = block2 + 2;  double* block4 = block3 + 3;  ProblemImpl problem;  // Add in random order  Vector junk_jacobian = Vector::Zero(10);  problem.AddResidualBlock(      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);  problem.AddResidualBlock(      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);  problem.AddResidualBlock(      new UnaryCostFunction(1, 3, junk_jacobian.data()), NULL, block3);  problem.AddResidualBlock(      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);  // Sparsity pattern  //  // Note that the problem structure does not imply this sparsity  // pattern since all the residual blocks are unary. But the  // ComputeCovarianceSparsity function in its current incarnation  // does not pay attention to this fact and only looks at the  // parameter block pairs that the user provides.  //  //  X . . . . . X X X X  //  . X X X X X . . . .  //  . X X X X X . . . .  //  . . . X X X . . . .  //  . . . X X X . . . .  //  . . . X X X . . . .  //  . . . . . . X X X X  //  . . . . . . X X X X  //  . . . . . . X X X X  //  . . . . . . X X X X  int expected_rows[] = {0, 5, 10, 15, 18, 21, 24, 28, 32, 36, 40};  int expected_cols[] = {0, 6, 7, 8, 9,                         1, 2, 3, 4, 5,                         1, 2, 3, 4, 5,                         3, 4, 5,                         3, 4, 5,                         3, 4, 5,                         6, 7, 8, 9,                         6, 7, 8, 9,                         6, 7, 8, 9,                         6, 7, 8, 9};  vector<pair<const double*, const double*> > covariance_blocks;  covariance_blocks.push_back(make_pair(block1, block1));  covariance_blocks.push_back(make_pair(block4, block4));  covariance_blocks.push_back(make_pair(block2, block2));  covariance_blocks.push_back(make_pair(block3, block3));  covariance_blocks.push_back(make_pair(block2, block3));  covariance_blocks.push_back(make_pair(block4, block1));  // reversed  Covariance::Options options;  CovarianceImpl covariance_impl(options);  EXPECT_TRUE(covariance_impl              .ComputeCovarianceSparsity(covariance_blocks, &problem));  const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();  EXPECT_EQ(crsm->num_rows(), 10);  EXPECT_EQ(crsm->num_cols(), 10);  EXPECT_EQ(crsm->num_nonzeros(), 40);  const int* rows = crsm->rows();  for (int r = 0; r < crsm->num_rows() + 1; ++r) {    EXPECT_EQ(rows[r], expected_rows[r])        << r << " "        << rows[r] << " "        << expected_rows[r];  }  const int* cols = crsm->cols();  for (int c = 0; c < crsm->num_nonzeros(); ++c) {    EXPECT_EQ(cols[c], expected_cols[c])        << c << " "        << cols[c] << " "        << expected_cols[c];  }}TEST(CovarianceImpl, ComputeCovarianceSparsityWithConstantParameterBlock) {  double parameters[10];  double* block1 = parameters;  double* block2 = block1 + 1;  double* block3 = block2 + 2;  double* block4 = block3 + 3;  ProblemImpl problem;  // Add in random order  Vector junk_jacobian = Vector::Zero(10);  problem.AddResidualBlock(      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);  problem.AddResidualBlock(      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);  problem.AddResidualBlock(      new UnaryCostFunction(1, 3, junk_jacobian.data()), NULL, block3);  problem.AddResidualBlock(      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);  problem.SetParameterBlockConstant(block3);  // Sparsity pattern  //  // Note that the problem structure does not imply this sparsity  // pattern since all the residual blocks are unary. But the  // ComputeCovarianceSparsity function in its current incarnation  // does not pay attention to this fact and only looks at the  // parameter block pairs that the user provides.  //  //  X . . X X X X  //  . X X . . . .  //  . X X . . . .  //  . . . X X X X  //  . . . X X X X  //  . . . X X X X  //  . . . X X X X  int expected_rows[] = {0, 5, 7, 9, 13, 17, 21, 25};  int expected_cols[] = {0, 3, 4, 5, 6,                         1, 2,                         1, 2,                         3, 4, 5, 6,                         3, 4, 5, 6,                         3, 4, 5, 6,                         3, 4, 5, 6};  vector<pair<const double*, const double*> > covariance_blocks;  covariance_blocks.push_back(make_pair(block1, block1));  covariance_blocks.push_back(make_pair(block4, block4));  covariance_blocks.push_back(make_pair(block2, block2));  covariance_blocks.push_back(make_pair(block3, block3));  covariance_blocks.push_back(make_pair(block2, block3));  covariance_blocks.push_back(make_pair(block4, block1));  // reversed  Covariance::Options options;  CovarianceImpl covariance_impl(options);  EXPECT_TRUE(covariance_impl              .ComputeCovarianceSparsity(covariance_blocks, &problem));  const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();  EXPECT_EQ(crsm->num_rows(), 7);  EXPECT_EQ(crsm->num_cols(), 7);  EXPECT_EQ(crsm->num_nonzeros(), 25);  const int* rows = crsm->rows();  for (int r = 0; r < crsm->num_rows() + 1; ++r) {    EXPECT_EQ(rows[r], expected_rows[r])        << r << " "        << rows[r] << " "        << expected_rows[r];  }  const int* cols = crsm->cols();  for (int c = 0; c < crsm->num_nonzeros(); ++c) {    EXPECT_EQ(cols[c], expected_cols[c])        << c << " "        << cols[c] << " "        << expected_cols[c];  }}TEST(CovarianceImpl, ComputeCovarianceSparsityWithFreeParameterBlock) {  double parameters[10];  double* block1 = parameters;  double* block2 = block1 + 1;  double* block3 = block2 + 2;  double* block4 = block3 + 3;  ProblemImpl problem;  // Add in random order  Vector junk_jacobian = Vector::Zero(10);  problem.AddResidualBlock(      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);  problem.AddResidualBlock(      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);  problem.AddParameterBlock(block3, 3);  problem.AddResidualBlock(      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);  // Sparsity pattern  //  // Note that the problem structure does not imply this sparsity  // pattern since all the residual blocks are unary. But the  // ComputeCovarianceSparsity function in its current incarnation  // does not pay attention to this fact and only looks at the  // parameter block pairs that the user provides.  //  //  X . . X X X X  //  . X X . . . .  //  . X X . . . .  //  . . . X X X X  //  . . . X X X X  //  . . . X X X X  //  . . . X X X X  int expected_rows[] = {0, 5, 7, 9, 13, 17, 21, 25};  int expected_cols[] = {0, 3, 4, 5, 6,                         1, 2,                         1, 2,                         3, 4, 5, 6,                         3, 4, 5, 6,                         3, 4, 5, 6,                         3, 4, 5, 6};  vector<pair<const double*, const double*> > covariance_blocks;  covariance_blocks.push_back(make_pair(block1, block1));  covariance_blocks.push_back(make_pair(block4, block4));  covariance_blocks.push_back(make_pair(block2, block2));  covariance_blocks.push_back(make_pair(block3, block3));  covariance_blocks.push_back(make_pair(block2, block3));  covariance_blocks.push_back(make_pair(block4, block1));  // reversed  Covariance::Options options;  CovarianceImpl covariance_impl(options);  EXPECT_TRUE(covariance_impl              .ComputeCovarianceSparsity(covariance_blocks, &problem));  const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();  EXPECT_EQ(crsm->num_rows(), 7);  EXPECT_EQ(crsm->num_cols(), 7);  EXPECT_EQ(crsm->num_nonzeros(), 25);  const int* rows = crsm->rows();  for (int r = 0; r < crsm->num_rows() + 1; ++r) {    EXPECT_EQ(rows[r], expected_rows[r])        << r << " "        << rows[r] << " "        << expected_rows[r];  }  const int* cols = crsm->cols();  for (int c = 0; c < crsm->num_nonzeros(); ++c) {    EXPECT_EQ(cols[c], expected_cols[c])        << c << " "        << cols[c] << " "        << expected_cols[c];  }}class CovarianceTest : public ::testing::Test { protected:  typedef map<const double*, pair<int, int> > BoundsMap;  virtual void SetUp() {    double* x = parameters_;    double* y = x + 2;    double* z = y + 3;    x[0] = 1;    x[1] = 1;    y[0] = 2;    y[1] = 2;    y[2] = 2;    z[0] = 3;    {      double jacobian[] = { 1.0, 0.0, 0.0, 1.0};      problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);    }    {      double jacobian[] = { 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0 };      problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);    }    {      double jacobian = 5.0;      problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian),                                NULL,                                z);    }    {      double jacobian1[] = { 1.0, 2.0, 3.0 };      double jacobian2[] = { -5.0, -6.0 };      problem_.AddResidualBlock(          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2),          NULL,          y,          x);    }    {      double jacobian1[] = {2.0 };      double jacobian2[] = { 3.0, -2.0 };      problem_.AddResidualBlock(          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2),          NULL,          z,          x);    }    all_covariance_blocks_.push_back(make_pair(x, x));    all_covariance_blocks_.push_back(make_pair(y, y));    all_covariance_blocks_.push_back(make_pair(z, z));    all_covariance_blocks_.push_back(make_pair(x, y));    all_covariance_blocks_.push_back(make_pair(x, z));    all_covariance_blocks_.push_back(make_pair(y, z));    column_bounds_[x] = make_pair(0, 2);    column_bounds_[y] = make_pair(2, 5);    column_bounds_[z] = make_pair(5, 6);  }  // Computes covariance in ambient space.  void ComputeAndCompareCovarianceBlocks(const Covariance::Options& options,                                         const double* expected_covariance) {    ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(        options,        true,  // ambient        expected_covariance);  }  // Computes covariance in tangent space.  void ComputeAndCompareCovarianceBlocksInTangentSpace(                                         const Covariance::Options& options,                                         const double* expected_covariance) {    ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(        options,        false,  // tangent        expected_covariance);  }  void ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(      const Covariance::Options& options,      bool lift_covariance_to_ambient_space,      const double* expected_covariance) {    // Generate all possible combination of block pairs and check if the    // covariance computation is correct.    for (int i = 0; i <= 64; ++i) {      vector<pair<const double*, const double*> > covariance_blocks;      if (i & 1) {        covariance_blocks.push_back(all_covariance_blocks_[0]);      }      if (i & 2) {        covariance_blocks.push_back(all_covariance_blocks_[1]);      }      if (i & 4) {        covariance_blocks.push_back(all_covariance_blocks_[2]);      }      if (i & 8) {        covariance_blocks.push_back(all_covariance_blocks_[3]);      }      if (i & 16) {        covariance_blocks.push_back(all_covariance_blocks_[4]);      }      if (i & 32) {        covariance_blocks.push_back(all_covariance_blocks_[5]);      }      Covariance covariance(options);      EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem_));      for (int i = 0; i < covariance_blocks.size(); ++i) {        const double* block1 = covariance_blocks[i].first;        const double* block2 = covariance_blocks[i].second;        // block1, block2        GetCovarianceBlockAndCompare(block1,                                     block2,                                     lift_covariance_to_ambient_space,                                     covariance,                                     expected_covariance);        // block2, block1        GetCovarianceBlockAndCompare(block2,                                     block1,                                     lift_covariance_to_ambient_space,                                     covariance,                                     expected_covariance);      }    }  }  void GetCovarianceBlockAndCompare(const double* block1,                                    const double* block2,                                    bool lift_covariance_to_ambient_space,                                    const Covariance& covariance,                                    const double* expected_covariance) {    const BoundsMap& column_bounds = lift_covariance_to_ambient_space ?        column_bounds_ : local_column_bounds_;    const int row_begin = FindOrDie(column_bounds, block1).first;    const int row_end = FindOrDie(column_bounds, block1).second;    const int col_begin = FindOrDie(column_bounds, block2).first;    const int col_end = FindOrDie(column_bounds, block2).second;    Matrix actual(row_end - row_begin, col_end - col_begin);    if (lift_covariance_to_ambient_space) {      EXPECT_TRUE(covariance.GetCovarianceBlock(block1,                                                block2,                                                actual.data()));    } else {      EXPECT_TRUE(covariance.GetCovarianceBlockInTangentSpace(block1,                                                              block2,                                                              actual.data()));    }    int dof = 0;  // degrees of freedom = sum of LocalSize()s    for (BoundsMap::const_iterator iter = column_bounds.begin();         iter != column_bounds.end(); ++iter) {      dof = std::max(dof, iter->second.second);    }    ConstMatrixRef expected(expected_covariance, dof, dof);    double diff_norm = (expected.block(row_begin,                                       col_begin,                                       row_end - row_begin,                                       col_end - col_begin) - actual).norm();    diff_norm /= (row_end - row_begin) * (col_end - col_begin);    const double kTolerance = 1e-5;    EXPECT_NEAR(diff_norm, 0.0, kTolerance)        << "rows: " << row_begin << " " << row_end << "  "        << "cols: " << col_begin << " " << col_end << "  "        << "\n\n expected: \n " << expected.block(row_begin,                                                  col_begin,                                                  row_end - row_begin,                                                  col_end - col_begin)        << "\n\n actual: \n " << actual        << "\n\n full expected: \n" << expected;  }  double parameters_[6];  Problem problem_;  vector<pair<const double*, const double*> > all_covariance_blocks_;  BoundsMap column_bounds_;  BoundsMap local_column_bounds_;};TEST_F(CovarianceTest, NormalBehavior) {  // J  //  //   1  0  0  0  0  0  //   0  1  0  0  0  0  //   0  0  2  0  0  0  //   0  0  0  2  0  0  //   0  0  0  0  2  0  //   0  0  0  0  0  5  //  -5 -6  1  2  3  0  //   3 -2  0  0  0  2  // J'J  //  //   35  24 -5 -10 -15  6  //   24  41 -6 -12 -18 -4  //   -5  -6  5   2   3  0  //  -10 -12  2   8   6  0  //  -15 -18  3   6  13  0  //    6  -4  0   0   0 29  // inv(J'J) computed using octave.  double expected_covariance[] = {     7.0747e-02,  -8.4923e-03,   1.6821e-02,   3.3643e-02,   5.0464e-02,  -1.5809e-02,  // NOLINT    -8.4923e-03,   8.1352e-02,   2.4758e-02,   4.9517e-02,   7.4275e-02,   1.2978e-02,  // NOLINT     1.6821e-02,   2.4758e-02,   2.4904e-01,  -1.9271e-03,  -2.8906e-03,  -6.5325e-05,  // NOLINT     3.3643e-02,   4.9517e-02,  -1.9271e-03,   2.4615e-01,  -5.7813e-03,  -1.3065e-04,  // NOLINT     5.0464e-02,   7.4275e-02,  -2.8906e-03,  -5.7813e-03,   2.4133e-01,  -1.9598e-04,  // NOLINT    -1.5809e-02,   1.2978e-02,  -6.5325e-05,  -1.3065e-04,  -1.9598e-04,   3.9544e-02,  // NOLINT  };  Covariance::Options options;#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}#ifdef CERES_USE_OPENMPTEST_F(CovarianceTest, ThreadedNormalBehavior) {  // J  //  //   1  0  0  0  0  0  //   0  1  0  0  0  0  //   0  0  2  0  0  0  //   0  0  0  2  0  0  //   0  0  0  0  2  0  //   0  0  0  0  0  5  //  -5 -6  1  2  3  0  //   3 -2  0  0  0  2  // J'J  //  //   35  24 -5 -10 -15  6  //   24  41 -6 -12 -18 -4  //   -5  -6  5   2   3  0  //  -10 -12  2   8   6  0  //  -15 -18  3   6  13  0  //    6  -4  0   0   0 29  // inv(J'J) computed using octave.  double expected_covariance[] = {     7.0747e-02,  -8.4923e-03,   1.6821e-02,   3.3643e-02,   5.0464e-02,  -1.5809e-02,  // NOLINT    -8.4923e-03,   8.1352e-02,   2.4758e-02,   4.9517e-02,   7.4275e-02,   1.2978e-02,  // NOLINT     1.6821e-02,   2.4758e-02,   2.4904e-01,  -1.9271e-03,  -2.8906e-03,  -6.5325e-05,  // NOLINT     3.3643e-02,   4.9517e-02,  -1.9271e-03,   2.4615e-01,  -5.7813e-03,  -1.3065e-04,  // NOLINT     5.0464e-02,   7.4275e-02,  -2.8906e-03,  -5.7813e-03,   2.4133e-01,  -1.9598e-04,  // NOLINT    -1.5809e-02,   1.2978e-02,  -6.5325e-05,  -1.3065e-04,  -1.9598e-04,   3.9544e-02,  // NOLINT  };  Covariance::Options options;  options.num_threads = 4;#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}#endif  // CERES_USE_OPENMPTEST_F(CovarianceTest, ConstantParameterBlock) {  problem_.SetParameterBlockConstant(parameters_);  // J  //  //  0  0  0  0  0  0  //  0  0  0  0  0  0  //  0  0  2  0  0  0  //  0  0  0  2  0  0  //  0  0  0  0  2  0  //  0  0  0  0  0  5  //  0  0  1  2  3  0  //  0  0  0  0  0  2  // J'J  //  //  0  0  0  0  0  0  //  0  0  0  0  0  0  //  0  0  5  2  3  0  //  0  0  2  8  6  0  //  0  0  3  6 13  0  //  0  0  0  0  0 29  // pinv(J'J) computed using octave.  double expected_covariance[] = {              0,            0,            0,            0,            0,            0,  // NOLINT              0,            0,            0,            0,            0,            0,  // NOLINT              0,            0,      0.23611,     -0.02778,     -0.04167,     -0.00000,  // NOLINT              0,            0,     -0.02778,      0.19444,     -0.08333,     -0.00000,  // NOLINT              0,            0,     -0.04167,     -0.08333,      0.12500,     -0.00000,  // NOLINT              0,            0,     -0.00000,     -0.00000,     -0.00000,      0.03448   // NOLINT  };  Covariance::Options options;#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}TEST_F(CovarianceTest, LocalParameterization) {  double* x = parameters_;  double* y = x + 2;  problem_.SetParameterization(x, new PolynomialParameterization);  vector<int> subset;  subset.push_back(2);  problem_.SetParameterization(y, new SubsetParameterization(3, subset));  // Raw Jacobian: J  //  //   1   0  0  0  0  0  //   0   1  0  0  0  0  //   0   0  2  0  0  0  //   0   0  0  2  0  0  //   0   0  0  0  2  0  //   0   0  0  0  0  5  //  -5  -6  1  2  3  0  //   3  -2  0  0  0  2  // Local to global jacobian: A  //  //  1   0   0   0  //  1   0   0   0  //  0   1   0   0  //  0   0   1   0  //  0   0   0   0  //  0   0   0   1  // A * inv((J*A)'*(J*A)) * A'  // Computed using octave.  double expected_covariance[] = {    0.01766,   0.01766,   0.02158,   0.04316,   0.00000,  -0.00122,    0.01766,   0.01766,   0.02158,   0.04316,   0.00000,  -0.00122,    0.02158,   0.02158,   0.24860,  -0.00281,   0.00000,  -0.00149,    0.04316,   0.04316,  -0.00281,   0.24439,   0.00000,  -0.00298,    0.00000,   0.00000,   0.00000,   0.00000,   0.00000,   0.00000,   -0.00122,  -0.00122,  -0.00149,  -0.00298,   0.00000,   0.03457  };  Covariance::Options options;#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}TEST_F(CovarianceTest, LocalParameterizationInTangentSpace) {  double* x = parameters_;  double* y = x + 2;  double* z = y + 3;  problem_.SetParameterization(x, new PolynomialParameterization);  vector<int> subset;  subset.push_back(2);  problem_.SetParameterization(y, new SubsetParameterization(3, subset));  local_column_bounds_[x] = make_pair(0, 1);  local_column_bounds_[y] = make_pair(1, 3);  local_column_bounds_[z] = make_pair(3, 4);  // Raw Jacobian: J  //  //   1   0  0  0  0  0  //   0   1  0  0  0  0  //   0   0  2  0  0  0  //   0   0  0  2  0  0  //   0   0  0  0  2  0  //   0   0  0  0  0  5  //  -5  -6  1  2  3  0  //   3  -2  0  0  0  2  // Local to global jacobian: A  //  //  1   0   0   0  //  1   0   0   0  //  0   1   0   0  //  0   0   1   0  //  0   0   0   0  //  0   0   0   1  // inv((J*A)'*(J*A))  // Computed using octave.  double expected_covariance[] = {    0.01766,   0.02158,   0.04316,   -0.00122,    0.02158,   0.24860,  -0.00281,   -0.00149,    0.04316,  -0.00281,   0.24439,   -0.00298,   -0.00122,  -0.00149,  -0.00298,    0.03457  // NOLINT  };  Covariance::Options options;#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);}TEST_F(CovarianceTest, LocalParameterizationInTangentSpaceWithConstantBlocks) {  double* x = parameters_;  double* y = x + 2;  double* z = y + 3;  problem_.SetParameterization(x, new PolynomialParameterization);  problem_.SetParameterBlockConstant(x);  vector<int> subset;  subset.push_back(2);  problem_.SetParameterization(y, new SubsetParameterization(3, subset));  problem_.SetParameterBlockConstant(y);  local_column_bounds_[x] = make_pair(0, 1);  local_column_bounds_[y] = make_pair(1, 3);  local_column_bounds_[z] = make_pair(3, 4);  // Raw Jacobian: J  //  //   1   0  0  0  0  0  //   0   1  0  0  0  0  //   0   0  2  0  0  0  //   0   0  0  2  0  0  //   0   0  0  0  2  0  //   0   0  0  0  0  5  //  -5  -6  1  2  3  0  //   3  -2  0  0  0  2  // Local to global jacobian: A  //  //  0   0   0   0  //  0   0   0   0  //  0   0   0   0  //  0   0   0   0  //  0   0   0   0  //  0   0   0   1  // pinv((J*A)'*(J*A))  // Computed using octave.  double expected_covariance[] = {    0.0, 0.0, 0.0, 0.0,    0.0, 0.0, 0.0, 0.0,    0.0, 0.0, 0.0, 0.0,    0.0, 0.0, 0.0, 0.034482 // NOLINT  };  Covariance::Options options;#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);}TEST_F(CovarianceTest, TruncatedRank) {  // J  //  //   1  0  0  0  0  0  //   0  1  0  0  0  0  //   0  0  2  0  0  0  //   0  0  0  2  0  0  //   0  0  0  0  2  0  //   0  0  0  0  0  5  //  -5 -6  1  2  3  0  //   3 -2  0  0  0  2  // J'J  //  //   35  24 -5 -10 -15  6  //   24  41 -6 -12 -18 -4  //   -5  -6  5   2   3  0  //  -10 -12  2   8   6  0  //  -15 -18  3   6  13  0  //    6  -4  0   0   0 29  // 3.4142 is the smallest eigen value of J'J. The following matrix  // was obtained by dropping the eigenvector corresponding to this  // eigenvalue.  double expected_covariance[] = {     5.4135e-02,  -3.5121e-02,   1.7257e-04,   3.4514e-04,   5.1771e-04,  -1.6076e-02,  // NOLINT    -3.5121e-02,   3.8667e-02,  -1.9288e-03,  -3.8576e-03,  -5.7864e-03,   1.2549e-02,  // NOLINT     1.7257e-04,  -1.9288e-03,   2.3235e-01,  -3.5297e-02,  -5.2946e-02,  -3.3329e-04,  // NOLINT     3.4514e-04,  -3.8576e-03,  -3.5297e-02,   1.7941e-01,  -1.0589e-01,  -6.6659e-04,  // NOLINT     5.1771e-04,  -5.7864e-03,  -5.2946e-02,  -1.0589e-01,   9.1162e-02,  -9.9988e-04,  // NOLINT    -1.6076e-02,   1.2549e-02,  -3.3329e-04,  -6.6659e-04,  -9.9988e-04,   3.9539e-02   // NOLINT  };  {    Covariance::Options options;    options.algorithm_type = DENSE_SVD;    // Force dropping of the smallest eigenvector.    options.null_space_rank = 1;    ComputeAndCompareCovarianceBlocks(options, expected_covariance);  }  {    Covariance::Options options;    options.algorithm_type = DENSE_SVD;    // Force dropping of the smallest eigenvector via the ratio but    // automatic truncation.    options.min_reciprocal_condition_number = 0.044494;    options.null_space_rank = -1;    ComputeAndCompareCovarianceBlocks(options, expected_covariance);  }}TEST_F(CovarianceTest, DenseCovarianceMatrixFromSetOfParameters) {  Covariance::Options options;  Covariance covariance(options);  double* x = parameters_;  double* y = x + 2;  double* z = y + 3;  vector<const double*> parameter_blocks;  parameter_blocks.push_back(x);  parameter_blocks.push_back(y);  parameter_blocks.push_back(z);  covariance.Compute(parameter_blocks, &problem_);  double expected_covariance[36];  covariance.GetCovarianceMatrix(parameter_blocks, expected_covariance);#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}TEST_F(CovarianceTest, DenseCovarianceMatrixFromSetOfParametersThreaded) {  Covariance::Options options;  options.num_threads = 4;  Covariance covariance(options);  double* x = parameters_;  double* y = x + 2;  double* z = y + 3;  vector<const double*> parameter_blocks;  parameter_blocks.push_back(x);  parameter_blocks.push_back(y);  parameter_blocks.push_back(z);  covariance.Compute(parameter_blocks, &problem_);  double expected_covariance[36];  covariance.GetCovarianceMatrix(parameter_blocks, expected_covariance);#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}TEST_F(CovarianceTest, DenseCovarianceMatrixFromSetOfParametersInTangentSpace) {  Covariance::Options options;  Covariance covariance(options);  double* x = parameters_;  double* y = x + 2;  double* z = y + 3;  problem_.SetParameterization(x, new PolynomialParameterization);  vector<int> subset;  subset.push_back(2);  problem_.SetParameterization(y, new SubsetParameterization(3, subset));  local_column_bounds_[x] = make_pair(0, 1);  local_column_bounds_[y] = make_pair(1, 3);  local_column_bounds_[z] = make_pair(3, 4);  vector<const double*> parameter_blocks;  parameter_blocks.push_back(x);  parameter_blocks.push_back(y);  parameter_blocks.push_back(z);  covariance.Compute(parameter_blocks, &problem_);  double expected_covariance[16];  covariance.GetCovarianceMatrixInTangentSpace(parameter_blocks,                                               expected_covariance);#ifndef CERES_NO_SUITESPARSE  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = SUITE_SPARSE;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);#endif  options.algorithm_type = DENSE_SVD;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);  options.algorithm_type = SPARSE_QR;  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);}TEST_F(CovarianceTest, ComputeCovarianceFailure) {  Covariance::Options options;  Covariance covariance(options);  double* x = parameters_;  double* y = x + 2;  vector<const double*> parameter_blocks;  parameter_blocks.push_back(x);  parameter_blocks.push_back(x);  parameter_blocks.push_back(y);  parameter_blocks.push_back(y);  EXPECT_DEATH_IF_SUPPORTED(covariance.Compute(parameter_blocks, &problem_),                            "Covariance::Compute called with duplicate blocks "                            "at indices \\(0, 1\\) and \\(2, 3\\)");  vector<pair<const double*, const double*> > covariance_blocks;  covariance_blocks.push_back(make_pair(x, x));  covariance_blocks.push_back(make_pair(x, x));  covariance_blocks.push_back(make_pair(y, y));  covariance_blocks.push_back(make_pair(y, y));  EXPECT_DEATH_IF_SUPPORTED(covariance.Compute(covariance_blocks, &problem_),                            "Covariance::Compute called with duplicate blocks "                            "at indices \\(0, 1\\) and \\(2, 3\\)");}class RankDeficientCovarianceTest : public CovarianceTest { protected:  virtual void SetUp() {    double* x = parameters_;    double* y = x + 2;    double* z = y + 3;    {      double jacobian[] = { 1.0, 0.0, 0.0, 1.0};      problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);    }    {      double jacobian[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };      problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);    }    {      double jacobian = 5.0;      problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian),                                NULL,                                z);    }    {      double jacobian1[] = { 0.0, 0.0, 0.0 };      double jacobian2[] = { -5.0, -6.0 };      problem_.AddResidualBlock(          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2),          NULL,          y,          x);    }    {      double jacobian1[] = {2.0 };      double jacobian2[] = { 3.0, -2.0 };      problem_.AddResidualBlock(          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2),          NULL,          z,          x);    }    all_covariance_blocks_.push_back(make_pair(x, x));    all_covariance_blocks_.push_back(make_pair(y, y));    all_covariance_blocks_.push_back(make_pair(z, z));    all_covariance_blocks_.push_back(make_pair(x, y));    all_covariance_blocks_.push_back(make_pair(x, z));    all_covariance_blocks_.push_back(make_pair(y, z));    column_bounds_[x] = make_pair(0, 2);    column_bounds_[y] = make_pair(2, 5);    column_bounds_[z] = make_pair(5, 6);  }};TEST_F(RankDeficientCovarianceTest, AutomaticTruncation) {  // J  //  //   1  0  0  0  0  0  //   0  1  0  0  0  0  //   0  0  0  0  0  0  //   0  0  0  0  0  0  //   0  0  0  0  0  0  //   0  0  0  0  0  5  //  -5 -6  0  0  0  0  //   3 -2  0  0  0  2  // J'J  //  //  35 24  0  0  0  6  //  24 41  0  0  0 -4  //   0  0  0  0  0  0  //   0  0  0  0  0  0  //   0  0  0  0  0  0  //   6 -4  0  0  0 29  // pinv(J'J) computed using octave.  double expected_covariance[] = {     0.053998,  -0.033145,   0.000000,   0.000000,   0.000000,  -0.015744,    -0.033145,   0.045067,   0.000000,   0.000000,   0.000000,   0.013074,     0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,     0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,     0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,    -0.015744,   0.013074,   0.000000,   0.000000,   0.000000,   0.039543  };  Covariance::Options options;  options.algorithm_type = DENSE_SVD;  options.null_space_rank = -1;  ComputeAndCompareCovarianceBlocks(options, expected_covariance);}class LargeScaleCovarianceTest : public ::testing::Test { protected:  virtual void SetUp() {    num_parameter_blocks_ = 2000;    parameter_block_size_ = 5;    parameters_.reset(        new double[parameter_block_size_ * num_parameter_blocks_]);    Matrix jacobian(parameter_block_size_, parameter_block_size_);    for (int i = 0; i < num_parameter_blocks_; ++i) {      jacobian.setIdentity();      jacobian *= (i + 1);      double* block_i = parameters_.get() + i * parameter_block_size_;      problem_.AddResidualBlock(new UnaryCostFunction(parameter_block_size_,                                                      parameter_block_size_,                                                      jacobian.data()),                                NULL,                                block_i);      for (int j = i; j < num_parameter_blocks_; ++j) {        double* block_j = parameters_.get() + j * parameter_block_size_;        all_covariance_blocks_.push_back(make_pair(block_i, block_j));      }    }  }  void ComputeAndCompare(      CovarianceAlgorithmType algorithm_type,      SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,      int num_threads) {    Covariance::Options options;    options.algorithm_type = algorithm_type;    options.sparse_linear_algebra_library_type =        sparse_linear_algebra_library_type;    options.num_threads = num_threads;    Covariance covariance(options);    EXPECT_TRUE(covariance.Compute(all_covariance_blocks_, &problem_));    Matrix expected(parameter_block_size_, parameter_block_size_);    Matrix actual(parameter_block_size_, parameter_block_size_);    const double kTolerance = 1e-16;    for (int i = 0; i < num_parameter_blocks_; ++i) {      expected.setIdentity();      expected /= (i + 1.0) * (i + 1.0);      double* block_i = parameters_.get() + i * parameter_block_size_;      covariance.GetCovarianceBlock(block_i, block_i, actual.data());      EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance)          << "block: " << i << ", " << i << "\n"          << "expected: \n" << expected << "\n"          << "actual: \n" << actual;      expected.setZero();      for (int j = i + 1; j < num_parameter_blocks_; ++j) {        double* block_j = parameters_.get() + j * parameter_block_size_;        covariance.GetCovarianceBlock(block_i, block_j, actual.data());        EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance)            << "block: " << i << ", " << j << "\n"            << "expected: \n" << expected << "\n"            << "actual: \n" << actual;      }    }  }  scoped_array<double> parameters_;  int parameter_block_size_;  int num_parameter_blocks_;  Problem problem_;  vector<pair<const double*, const double*> > all_covariance_blocks_;};#if !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)TEST_F(LargeScaleCovarianceTest, Parallel) {  ComputeAndCompare(SPARSE_QR, SUITE_SPARSE, 4);}#endif  // !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)}  // namespace internal}  // namespace ceres
 |