OR-Tools  8.1
sparse.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 #include "ortools/lp_data/sparse.h"
15 
16 #include <algorithm>
17 
18 #include "absl/strings/str_format.h"
19 #include "ortools/base/logging.h"
22 
23 namespace operations_research {
24 namespace glop {
25 
26 namespace {
27 
29 
30 template <typename Matrix>
31 EntryIndex ComputeNumEntries(const Matrix& matrix) {
32  EntryIndex num_entries(0);
33  const ColIndex num_cols(matrix.num_cols());
34  for (ColIndex col(0); col < num_cols; ++col) {
35  num_entries += matrix.column(col).num_entries();
36  }
37  return num_entries;
38 }
39 
40 // Computes the 1-norm of the matrix.
41 // The 1-norm |A| is defined as max_j sum_i |a_ij| or
42 // max_col sum_row |a(row,col)|.
43 template <typename Matrix>
44 Fractional ComputeOneNormTemplate(const Matrix& matrix) {
45  Fractional norm(0.0);
46  const ColIndex num_cols(matrix.num_cols());
47  for (ColIndex col(0); col < num_cols; ++col) {
48  Fractional column_norm(0);
49  for (const SparseColumn::Entry e : matrix.column(col)) {
50  // Compute sum_i |a_ij|.
51  column_norm += fabs(e.coefficient());
52  }
53  // Compute max_j sum_i |a_ij|
54  norm = std::max(norm, column_norm);
55  }
56  return norm;
57 }
58 
59 // Computes the oo-norm (infinity-norm) of the matrix.
60 // The oo-norm |A| is defined as max_i sum_j |a_ij| or
61 // max_row sum_col |a(row,col)|.
62 template <typename Matrix>
63 Fractional ComputeInfinityNormTemplate(const Matrix& matrix) {
64  DenseColumn row_sum(matrix.num_rows(), 0.0);
65  const ColIndex num_cols(matrix.num_cols());
66  for (ColIndex col(0); col < num_cols; ++col) {
67  for (const SparseColumn::Entry e : matrix.column(col)) {
68  // Compute sum_j |a_ij|.
69  row_sum[e.row()] += fabs(e.coefficient());
70  }
71  }
72 
73  // Compute max_i sum_j |a_ij|
74  Fractional norm = 0.0;
75  const RowIndex num_rows(matrix.num_rows());
76  for (RowIndex row(0); row < num_rows; ++row) {
77  norm = std::max(norm, row_sum[row]);
78  }
79  return norm;
80 }
81 
82 } // namespace
83 
84 // --------------------------------------------------------
85 // SparseMatrix
86 // --------------------------------------------------------
87 SparseMatrix::SparseMatrix() : columns_(), num_rows_(0) {}
88 
89 #if (!defined(_MSC_VER) || (_MSC_VER >= 1800))
91  std::initializer_list<std::initializer_list<Fractional>> init_list) {
92  ColIndex num_cols(0);
93  num_rows_ = RowIndex(init_list.size());
94  RowIndex row(0);
95  for (std::initializer_list<Fractional> init_row : init_list) {
96  num_cols = std::max(num_cols, ColIndex(init_row.size()));
97  columns_.resize(num_cols, SparseColumn());
98  ColIndex col(0);
99  for (Fractional value : init_row) {
100  if (value != 0.0) {
101  columns_[col].SetCoefficient(row, value);
102  }
103  ++col;
104  }
105  ++row;
106  }
107 }
108 #endif
109 
111  columns_.clear();
112  num_rows_ = RowIndex(0);
113 }
114 
115 bool SparseMatrix::IsEmpty() const {
116  return columns_.empty() || num_rows_ == 0;
117 }
118 
120  const ColIndex num_cols(columns_.size());
121  for (ColIndex col(0); col < num_cols; ++col) {
122  columns_[col].CleanUp();
123  }
124 }
125 
127  DenseBooleanColumn boolean_column;
128  const ColIndex num_cols(columns_.size());
129  for (ColIndex col(0); col < num_cols; ++col) {
130  if (!columns_[col].CheckNoDuplicates(&boolean_column)) return false;
131  }
132  return true;
133 }
134 
136  const ColIndex num_cols(columns_.size());
137  for (ColIndex col(0); col < num_cols; ++col) {
138  if (!columns_[col].IsCleanedUp()) return false;
139  }
140  return true;
141 }
142 
143 void SparseMatrix::SetNumRows(RowIndex num_rows) { num_rows_ = num_rows; }
144 
146  const ColIndex result = columns_.size();
147  columns_.push_back(SparseColumn());
148  return result;
149 }
150 
152  DCHECK_LT(row, num_rows_);
153  SparseColumn new_col;
154  new_col.SetCoefficient(row, value);
155  columns_.push_back(std::move(new_col));
156 }
157 
159  // We do not need to swap the different mutable scratchpads we use.
160  columns_.swap(matrix->columns_);
161  std::swap(num_rows_, matrix->num_rows_);
162 }
163 
164 void SparseMatrix::PopulateFromZero(RowIndex num_rows, ColIndex num_cols) {
165  columns_.resize(num_cols, SparseColumn());
166  for (ColIndex col(0); col < num_cols; ++col) {
167  columns_[col].Clear();
168  }
169  num_rows_ = num_rows;
170 }
171 
172 void SparseMatrix::PopulateFromIdentity(ColIndex num_cols) {
174  for (ColIndex col(0); col < num_cols; ++col) {
175  const RowIndex row = ColToRowIndex(col);
176  columns_[col].SetCoefficient(row, Fractional(1.0));
177  }
178 }
179 
180 template <typename Matrix>
182  Reset(RowToColIndex(input.num_rows()), ColToRowIndex(input.num_cols()));
183 
184  // We do a first pass on the input matrix to resize the new columns properly.
185  StrictITIVector<RowIndex, EntryIndex> row_degree(input.num_rows(),
186  EntryIndex(0));
187  for (ColIndex col(0); col < input.num_cols(); ++col) {
188  for (const SparseColumn::Entry e : input.column(col)) {
189  ++row_degree[e.row()];
190  }
191  }
192  for (RowIndex row(0); row < input.num_rows(); ++row) {
193  columns_[RowToColIndex(row)].Reserve(row_degree[row]);
194  }
195 
196  for (ColIndex col(0); col < input.num_cols(); ++col) {
197  const RowIndex transposed_row = ColToRowIndex(col);
198  for (const SparseColumn::Entry e : input.column(col)) {
199  const ColIndex transposed_col = RowToColIndex(e.row());
200  columns_[transposed_col].SetCoefficient(transposed_row, e.coefficient());
201  }
202  }
203  DCHECK(IsCleanedUp());
204 }
205 
207  Reset(ColIndex(0), matrix.num_rows_);
208  columns_ = matrix.columns_;
209 }
210 
211 template <typename Matrix>
213  const Matrix& a, const RowPermutation& row_perm,
214  const ColumnPermutation& inverse_col_perm) {
215  const ColIndex num_cols = a.num_cols();
216  Reset(num_cols, a.num_rows());
217  for (ColIndex col(0); col < num_cols; ++col) {
218  for (const auto e : a.column(inverse_col_perm[col])) {
219  columns_[col].SetCoefficient(row_perm[e.row()], e.coefficient());
220  }
221  }
223 }
224 
226  const SparseMatrix& a,
227  Fractional beta,
228  const SparseMatrix& b) {
229  DCHECK_EQ(a.num_cols(), b.num_cols());
230  DCHECK_EQ(a.num_rows(), b.num_rows());
231 
232  const ColIndex num_cols = a.num_cols();
233  Reset(num_cols, a.num_rows());
234 
235  const RowIndex num_rows = a.num_rows();
236  RandomAccessSparseColumn dense_column(num_rows);
237  for (ColIndex col(0); col < num_cols; ++col) {
238  for (const SparseColumn::Entry e : a.columns_[col]) {
239  dense_column.AddToCoefficient(e.row(), alpha * e.coefficient());
240  }
241  for (const SparseColumn::Entry e : b.columns_[col]) {
242  dense_column.AddToCoefficient(e.row(), beta * e.coefficient());
243  }
244  dense_column.PopulateSparseColumn(&columns_[col]);
245  columns_[col].CleanUp();
246  dense_column.Clear();
247  }
248 }
249 
251  const SparseMatrix& b) {
252  const ColIndex num_cols = b.num_cols();
253  const RowIndex num_rows = a.num_rows();
254  Reset(num_cols, num_rows);
255 
257  for (ColIndex col_b(0); col_b < num_cols; ++col_b) {
258  for (const SparseColumn::Entry eb : b.columns_[col_b]) {
259  if (eb.coefficient() == 0.0) {
260  continue;
261  }
262  const ColIndex col_a = RowToColIndex(eb.row());
263  for (const SparseColumn::Entry ea : a.columns_[col_a]) {
264  const Fractional value = ea.coefficient() * eb.coefficient();
265  tmp_column.AddToCoefficient(ea.row(), value);
266  }
267  }
268 
269  // Populate column col_b.
270  tmp_column.PopulateSparseColumn(&columns_[col_b]);
271  columns_[col_b].CleanUp();
272  tmp_column.Clear();
273  }
274 }
275 
276 void SparseMatrix::DeleteColumns(const DenseBooleanRow& columns_to_delete) {
277  if (columns_to_delete.empty()) return;
278  ColIndex new_index(0);
279  const ColIndex num_cols = columns_.size();
280  for (ColIndex col(0); col < num_cols; ++col) {
281  if (col >= columns_to_delete.size() || !columns_to_delete[col]) {
282  columns_[col].Swap(&(columns_[new_index]));
283  ++new_index;
284  }
285  }
286  columns_.resize(new_index);
287 }
288 
289 void SparseMatrix::DeleteRows(RowIndex new_num_rows,
290  const RowPermutation& permutation) {
291  DCHECK_EQ(num_rows_, permutation.size());
292  for (RowIndex row(0); row < num_rows_; ++row) {
293  DCHECK_LT(permutation[row], new_num_rows);
294  }
295  const ColIndex end = num_cols();
296  for (ColIndex col(0); col < end; ++col) {
297  columns_[col].ApplyPartialRowPermutation(permutation);
298  }
299  SetNumRows(new_num_rows);
300 }
301 
303  const ColIndex end = num_cols();
304  if (end != matrix.num_cols()) {
305  return false;
306  }
307  const RowIndex offset = num_rows();
308  for (ColIndex col(0); col < end; ++col) {
309  const SparseColumn& source_column = matrix.columns_[col];
310  columns_[col].AppendEntriesWithOffset(source_column, offset);
311  }
312  SetNumRows(offset + matrix.num_rows());
313  return true;
314 }
315 
317  const ColIndex num_cols(columns_.size());
318  for (ColIndex col(0); col < num_cols; ++col) {
319  columns_[col].ApplyRowPermutation(row_perm);
320  }
321 }
322 
323 Fractional SparseMatrix::LookUpValue(RowIndex row, ColIndex col) const {
324  return columns_[col].LookUpCoefficient(row);
325 }
326 
327 bool SparseMatrix::Equals(const SparseMatrix& a, Fractional tolerance) const {
328  if (num_cols() != a.num_cols() || num_rows() != a.num_rows()) {
329  return false;
330  }
331 
332  RandomAccessSparseColumn dense_column(num_rows());
333  RandomAccessSparseColumn dense_column_a(num_rows());
334  const ColIndex num_cols = a.num_cols();
335  for (ColIndex col(0); col < num_cols; ++col) {
336  // Store all entries of current matrix in a dense column.
337  for (const SparseColumn::Entry e : columns_[col]) {
338  dense_column.AddToCoefficient(e.row(), e.coefficient());
339  }
340 
341  // Check all entries of a are those stored in the dense column.
342  for (const SparseColumn::Entry e : a.columns_[col]) {
343  if (fabs(e.coefficient() - dense_column.GetCoefficient(e.row())) >
344  tolerance) {
345  return false;
346  }
347  }
348 
349  // Store all entries of matrix a in a dense column.
350  for (const SparseColumn::Entry e : a.columns_[col]) {
351  dense_column_a.AddToCoefficient(e.row(), e.coefficient());
352  }
353 
354  // Check all entries are those stored in the dense column a.
355  for (const SparseColumn::Entry e : columns_[col]) {
356  if (fabs(e.coefficient() - dense_column_a.GetCoefficient(e.row())) >
357  tolerance) {
358  return false;
359  }
360  }
361 
362  dense_column.Clear();
363  dense_column_a.Clear();
364  }
365 
366  return true;
367 }
368 
370  Fractional* max_magnitude) const {
371  RETURN_IF_NULL(min_magnitude);
372  RETURN_IF_NULL(max_magnitude);
373  *min_magnitude = kInfinity;
374  *max_magnitude = 0.0;
375  for (ColIndex col(0); col < num_cols(); ++col) {
376  for (const SparseColumn::Entry e : columns_[col]) {
377  const Fractional magnitude = fabs(e.coefficient());
378  if (magnitude != 0.0) {
379  *min_magnitude = std::min(*min_magnitude, magnitude);
380  *max_magnitude = std::max(*max_magnitude, magnitude);
381  }
382  }
383  }
384  if (*max_magnitude == 0.0) {
385  *min_magnitude = 0.0;
386  }
387 }
388 
389 EntryIndex SparseMatrix::num_entries() const {
390  return ComputeNumEntries(*this);
391 }
393  return ComputeOneNormTemplate(*this);
394 }
396  return ComputeInfinityNormTemplate(*this);
397 }
398 
399 std::string SparseMatrix::Dump() const {
400  std::string result;
401  const ColIndex num_cols(columns_.size());
402 
403  for (RowIndex row(0); row < num_rows_; ++row) {
404  result.append("{ ");
405  for (ColIndex col(0); col < num_cols; ++col) {
406  absl::StrAppendFormat(&result, "%g ", ToDouble(LookUpValue(row, col)));
407  }
408  result.append("}\n");
409  }
410  return result;
411 }
412 
413 void SparseMatrix::Reset(ColIndex num_cols, RowIndex num_rows) {
414  Clear();
415  columns_.resize(num_cols, SparseColumn());
416  num_rows_ = num_rows;
417 }
418 
419 EntryIndex MatrixView::num_entries() const { return ComputeNumEntries(*this); }
421  return ComputeOneNormTemplate(*this);
422 }
424  return ComputeInfinityNormTemplate(*this);
425 }
426 
427 // Instantiate needed templates.
428 template void SparseMatrix::PopulateFromTranspose<SparseMatrix>(
429  const SparseMatrix& input);
430 template void SparseMatrix::PopulateFromPermutedMatrix<SparseMatrix>(
431  const SparseMatrix& a, const RowPermutation& row_perm,
432  const ColumnPermutation& inverse_col_perm);
433 template void SparseMatrix::PopulateFromPermutedMatrix<CompactSparseMatrixView>(
434  const CompactSparseMatrixView& a, const RowPermutation& row_perm,
435  const ColumnPermutation& inverse_col_perm);
436 
438  num_cols_ = input.num_cols();
439  num_rows_ = input.num_rows();
440  const EntryIndex num_entries = input.num_entries();
441  starts_.assign(num_cols_ + 1, EntryIndex(0));
443  rows_.assign(num_entries, RowIndex(0));
444  EntryIndex index(0);
445  for (ColIndex col(0); col < input.num_cols(); ++col) {
446  starts_[col] = index;
447  for (const SparseColumn::Entry e : input.column(col)) {
448  coefficients_[index] = e.coefficient();
449  rows_[index] = e.row();
450  ++index;
451  }
452  }
453  starts_[input.num_cols()] = index;
454 }
455 
457  const CompactSparseMatrix& input) {
458  num_cols_ = RowToColIndex(input.num_rows());
459  num_rows_ = ColToRowIndex(input.num_cols());
460 
461  // Fill the starts_ vector by computing the number of entries of each rows and
462  // then doing a cummulative sum. After this step starts_[col + 1] will be the
463  // actual start of the column col when we are done.
464  starts_.assign(num_cols_ + 2, EntryIndex(0));
465  for (const RowIndex row : input.rows_) {
466  ++starts_[RowToColIndex(row) + 2];
467  }
468  for (ColIndex col(2); col < starts_.size(); ++col) {
469  starts_[col] += starts_[col - 1];
470  }
473  starts_.pop_back();
474 
475  // Use starts_ to fill the matrix. Note that starts_ is modified so that at
476  // the end it has its final values.
477  for (ColIndex col(0); col < input.num_cols(); ++col) {
478  const RowIndex transposed_row = ColToRowIndex(col);
479  for (const EntryIndex i : input.Column(col)) {
480  const ColIndex transposed_col = RowToColIndex(input.EntryRow(i));
481  const EntryIndex index = starts_[transposed_col + 1]++;
482  coefficients_[index] = input.EntryCoefficient(i);
483  rows_[index] = transposed_row;
484  }
485  }
486 
487  DCHECK_EQ(starts_.front(), 0);
489 }
490 
493 
494  // This takes care of the triangular special case.
495  diagonal_coefficients_ = input.diagonal_coefficients_;
496  all_diagonal_coefficients_are_one_ = input.all_diagonal_coefficients_are_one_;
497 
498  // The elimination structure of the transpose is not the same.
499  pruned_ends_.resize(num_cols_, EntryIndex(0));
500  for (ColIndex col(0); col < num_cols_; ++col) {
501  pruned_ends_[col] = starts_[col + 1];
502  }
503 
504  // Compute first_non_identity_column_. Note that this is not necessarily the
505  // same as input.first_non_identity_column_ for an upper triangular matrix.
506  first_non_identity_column_ = 0;
507  const ColIndex end = diagonal_coefficients_.size();
508  while (first_non_identity_column_ < end &&
509  ColumnNumEntries(first_non_identity_column_) == 0 &&
510  diagonal_coefficients_[first_non_identity_column_] == 1.0) {
511  ++first_non_identity_column_;
512  }
513 }
514 
515 void CompactSparseMatrix::Reset(RowIndex num_rows) {
517  num_cols_ = 0;
518  rows_.clear();
520  starts_.clear();
521  starts_.push_back(EntryIndex(0));
522 }
523 
524 void TriangularMatrix::Reset(RowIndex num_rows, ColIndex col_capacity) {
526  first_non_identity_column_ = 0;
527  all_diagonal_coefficients_are_one_ = true;
528 
529  pruned_ends_.resize(col_capacity);
530  diagonal_coefficients_.resize(col_capacity);
531  starts_.resize(col_capacity + 1);
532  // Non-zero entries in the first column always have an offset of 0.
533  starts_[ColIndex(0)] = 0;
534 }
535 
536 ColIndex CompactSparseMatrix::AddDenseColumn(const DenseColumn& dense_column) {
537  return AddDenseColumnPrefix(dense_column, RowIndex(0));
538 }
539 
541  const DenseColumn& dense_column, RowIndex start) {
542  const RowIndex num_rows(dense_column.size());
543  for (RowIndex row(start); row < num_rows; ++row) {
544  if (dense_column[row] != 0.0) {
546  coefficients_.push_back(dense_column[row]);
547  }
548  }
550  ++num_cols_;
551  return num_cols_ - 1;
552 }
553 
555  const DenseColumn& dense_column, const std::vector<RowIndex>& non_zeros) {
556  if (non_zeros.empty()) return AddDenseColumn(dense_column);
557  for (const RowIndex row : non_zeros) {
558  const Fractional value = dense_column[row];
559  if (value != 0.0) {
562  }
563  }
565  ++num_cols_;
566  return num_cols_ - 1;
567 }
568 
570  DenseColumn* column, std::vector<RowIndex>* non_zeros) {
571  for (const RowIndex row : *non_zeros) {
572  const Fractional value = (*column)[row];
573  if (value != 0.0) {
576  (*column)[row] = 0.0;
577  }
578  }
579  non_zeros->clear();
581  ++num_cols_;
582  return num_cols_ - 1;
583 }
584 
586  std::swap(num_rows_, other->num_rows_);
587  std::swap(num_cols_, other->num_cols_);
589  rows_.swap(other->rows_);
590  starts_.swap(other->starts_);
591 }
592 
595  diagonal_coefficients_.swap(other->diagonal_coefficients_);
596  std::swap(first_non_identity_column_, other->first_non_identity_column_);
597  std::swap(all_diagonal_coefficients_are_one_,
598  other->all_diagonal_coefficients_are_one_);
599 }
600 
602  return ComputeNumEntries(*this);
603 }
605  return ComputeOneNormTemplate(*this);
606 }
608  return ComputeInfinityNormTemplate(*this);
609 }
610 
611 // Internal function used to finish adding one column to a triangular matrix.
612 // This sets the diagonal coefficient to the given value, and prepares the
613 // matrix for the next column addition.
614 void TriangularMatrix::CloseCurrentColumn(Fractional diagonal_value) {
615  DCHECK_NE(diagonal_value, 0.0);
616  // The vectors diagonal_coefficients, pruned_ends, and starts_ should have all
617  // been preallocated by a call to SetTotalNumberOfColumns().
618  DCHECK_LT(num_cols_, diagonal_coefficients_.size());
619  diagonal_coefficients_[num_cols_] = diagonal_value;
620 
621  // TODO(user): This is currently not used by all matrices. It will be good
622  // to fill it only when needed.
623  DCHECK_LT(num_cols_, pruned_ends_.size());
624  pruned_ends_[num_cols_] = coefficients_.size();
625  ++num_cols_;
628  if (first_non_identity_column_ == num_cols_ - 1 && coefficients_.empty() &&
629  diagonal_value == 1.0) {
630  first_non_identity_column_ = num_cols_;
631  }
632  all_diagonal_coefficients_are_one_ =
633  all_diagonal_coefficients_are_one_ && (diagonal_value == 1.0);
634 }
635 
637  CloseCurrentColumn(diagonal_value);
638 }
639 
641  RowIndex diagonal_row) {
642  Fractional diagonal_value = 0.0;
643  for (const SparseColumn::Entry e : column) {
644  if (e.row() == diagonal_row) {
645  diagonal_value = e.coefficient();
646  } else {
647  DCHECK_NE(0.0, e.coefficient());
648  rows_.push_back(e.row());
649  coefficients_.push_back(e.coefficient());
650  }
651  }
652  CloseCurrentColumn(diagonal_value);
653 }
654 
656  const SparseColumn& column, RowIndex diagonal_row,
657  Fractional diagonal_coefficient) {
658  // TODO(user): use division by a constant using multiplication.
659  for (const SparseColumn::Entry e : column) {
660  if (e.row() != diagonal_row) {
661  if (e.coefficient() != 0.0) {
662  rows_.push_back(e.row());
663  coefficients_.push_back(e.coefficient() / diagonal_coefficient);
664  }
665  } else {
666  DCHECK_EQ(e.coefficient(), diagonal_coefficient);
667  }
668  }
669  CloseCurrentColumn(1.0);
670 }
671 
673  const SparseColumn& column, RowIndex diagonal_row,
674  Fractional diagonal_value) {
675  for (SparseColumn::Entry e : column) {
676  DCHECK_NE(e.row(), diagonal_row);
677  rows_.push_back(e.row());
678  coefficients_.push_back(e.coefficient());
679  }
680  CloseCurrentColumn(diagonal_value);
681 }
682 
684  const SparseMatrix& input) {
685  Reset(input.num_rows(), input.num_cols());
686  for (ColIndex col(0); col < input.num_cols(); ++col) {
688  }
690 }
691 
693  for (ColIndex col(0); col < num_cols_; ++col) {
694  if (diagonal_coefficients_[col] == 0.0) return false;
695  for (EntryIndex i : Column(col)) {
696  if (EntryRow(i) <= ColToRowIndex(col)) return false;
697  }
698  }
699  return true;
700 }
701 
703  for (ColIndex col(0); col < num_cols_; ++col) {
704  if (diagonal_coefficients_[col] == 0.0) return false;
705  for (EntryIndex i : Column(col)) {
706  if (EntryRow(i) >= ColToRowIndex(col)) return false;
707  }
708  }
709  return true;
710 }
711 
713  const RowPermutation& row_perm) {
714  EntryIndex num_entries = rows_.size();
715  for (EntryIndex i(0); i < num_entries; ++i) {
716  rows_[i] = row_perm[rows_[i]];
717  }
718 }
719 
721  SparseColumn* output) const {
722  output->Clear();
723  for (const EntryIndex i : Column(col)) {
724  output->SetCoefficient(EntryRow(i), EntryCoefficient(i));
725  }
726  output->SetCoefficient(ColToRowIndex(col), diagonal_coefficients_[col]);
727  output->CleanUp();
728 }
729 
732  for (ColIndex col(0); col < num_cols_; ++col) {
734  }
735 }
736 
738  LowerSolveStartingAt(ColIndex(0), rhs);
739 }
740 
742  DenseColumn* rhs) const {
743  if (all_diagonal_coefficients_are_one_) {
744  LowerSolveStartingAtInternal<true>(start, rhs);
745  } else {
746  LowerSolveStartingAtInternal<false>(start, rhs);
747  }
748 }
749 
750 template <bool diagonal_of_ones>
751 void TriangularMatrix::LowerSolveStartingAtInternal(ColIndex start,
752  DenseColumn* rhs) const {
753  RETURN_IF_NULL(rhs);
754  const ColIndex begin = std::max(start, first_non_identity_column_);
755  const ColIndex end = diagonal_coefficients_.size();
756  for (ColIndex col(begin); col < end; ++col) {
757  const Fractional value = (*rhs)[ColToRowIndex(col)];
758  if (value == 0.0) continue;
759  const Fractional coeff =
760  diagonal_of_ones ? value : value / diagonal_coefficients_[col];
761  if (!diagonal_of_ones) {
762  (*rhs)[ColToRowIndex(col)] = coeff;
763  }
764  for (const EntryIndex i : Column(col)) {
765  (*rhs)[EntryRow(i)] -= coeff * EntryCoefficient(i);
766  }
767  }
768 }
769 
771  if (all_diagonal_coefficients_are_one_) {
772  UpperSolveInternal<true>(rhs);
773  } else {
774  UpperSolveInternal<false>(rhs);
775  }
776 }
777 
778 template <bool diagonal_of_ones>
779 void TriangularMatrix::UpperSolveInternal(DenseColumn* rhs) const {
780  RETURN_IF_NULL(rhs);
781  const ColIndex end = first_non_identity_column_;
782  for (ColIndex col(diagonal_coefficients_.size() - 1); col >= end; --col) {
783  const Fractional value = (*rhs)[ColToRowIndex(col)];
784  if (value == 0.0) continue;
785  const Fractional coeff =
786  diagonal_of_ones ? value : value / diagonal_coefficients_[col];
787  if (!diagonal_of_ones) {
788  (*rhs)[ColToRowIndex(col)] = coeff;
789  }
790 
791  // It is faster to iterate this way (instead of i : Column(col)) because of
792  // cache locality. Note that the floating-point computations are exactly the
793  // same in both cases.
794  const EntryIndex i_end = starts_[col];
795  for (EntryIndex i(starts_[col + 1] - 1); i >= i_end; --i) {
796  (*rhs)[EntryRow(i)] -= coeff * EntryCoefficient(i);
797  }
798  }
799 }
800 
802  if (all_diagonal_coefficients_are_one_) {
803  TransposeUpperSolveInternal<true>(rhs);
804  } else {
805  TransposeUpperSolveInternal<false>(rhs);
806  }
807 }
808 
809 template <bool diagonal_of_ones>
810 void TriangularMatrix::TransposeUpperSolveInternal(DenseColumn* rhs) const {
811  RETURN_IF_NULL(rhs);
812  const ColIndex end = num_cols_;
813  EntryIndex i = starts_[first_non_identity_column_];
814  for (ColIndex col(first_non_identity_column_); col < end; ++col) {
815  Fractional sum = (*rhs)[ColToRowIndex(col)];
816 
817  // Note that this is a bit faster than the simpler
818  // for (const EntryIndex i : Column(col)) {
819  // EntryIndex i is explicitly not modified in outer iterations, since
820  // the last entry in column col is stored contiguously just before the
821  // first entry in column col+1.
822  const EntryIndex i_end = starts_[col + 1];
823  for (; i < i_end; ++i) {
824  sum -= EntryCoefficient(i) * (*rhs)[EntryRow(i)];
825  }
826  (*rhs)[ColToRowIndex(col)] =
827  diagonal_of_ones ? sum : sum / diagonal_coefficients_[col];
828  }
829 }
830 
832  if (all_diagonal_coefficients_are_one_) {
833  TransposeLowerSolveInternal<true>(rhs);
834  } else {
835  TransposeLowerSolveInternal<false>(rhs);
836  }
837 }
838 
839 template <bool diagonal_of_ones>
840 void TriangularMatrix::TransposeLowerSolveInternal(DenseColumn* rhs) const {
841  RETURN_IF_NULL(rhs);
842  const ColIndex end = first_non_identity_column_;
843 
844  // We optimize a bit the solve by skipping the last 0.0 positions.
845  ColIndex col = num_cols_ - 1;
846  while (col >= end && (*rhs)[ColToRowIndex(col)] == 0.0) {
847  --col;
848  }
849 
850  EntryIndex i = starts_[col + 1] - 1;
851  for (; col >= end; --col) {
852  Fractional sum = (*rhs)[ColToRowIndex(col)];
853 
854  // Note that this is a bit faster than the simpler
855  // for (const EntryIndex i : Column(col)) {
856  // mainly because we iterate in a good direction for the cache.
857  // EntryIndex i is explicitly not modified in outer iterations, since
858  // the last entry in column col is stored contiguously just before the
859  // first entry in column col+1.
860  const EntryIndex i_end = starts_[col];
861  for (; i >= i_end; --i) {
862  sum -= EntryCoefficient(i) * (*rhs)[EntryRow(i)];
863  }
864  (*rhs)[ColToRowIndex(col)] =
865  diagonal_of_ones ? sum : sum / diagonal_coefficients_[col];
866  }
867 }
868 
870  RowIndexVector* non_zero_rows) const {
871  if (all_diagonal_coefficients_are_one_) {
872  HyperSparseSolveInternal<true>(rhs, non_zero_rows);
873  } else {
874  HyperSparseSolveInternal<false>(rhs, non_zero_rows);
875  }
876 }
877 
878 template <bool diagonal_of_ones>
879 void TriangularMatrix::HyperSparseSolveInternal(
880  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
881  RETURN_IF_NULL(rhs);
882  int new_size = 0;
883  for (const RowIndex row : *non_zero_rows) {
884  if ((*rhs)[row] == 0.0) continue;
885  const ColIndex row_as_col = RowToColIndex(row);
886  const Fractional coeff =
887  diagonal_of_ones ? (*rhs)[row]
888  : (*rhs)[row] / diagonal_coefficients_[row_as_col];
889  (*rhs)[row] = coeff;
890  for (const EntryIndex i : Column(row_as_col)) {
891  (*rhs)[EntryRow(i)] -= coeff * EntryCoefficient(i);
892  }
893  (*non_zero_rows)[new_size] = row;
894  ++new_size;
895  }
896  non_zero_rows->resize(new_size);
897 }
898 
900  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
901  if (all_diagonal_coefficients_are_one_) {
902  HyperSparseSolveWithReversedNonZerosInternal<true>(rhs, non_zero_rows);
903  } else {
904  HyperSparseSolveWithReversedNonZerosInternal<false>(rhs, non_zero_rows);
905  }
906 }
907 
908 template <bool diagonal_of_ones>
909 void TriangularMatrix::HyperSparseSolveWithReversedNonZerosInternal(
910  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
911  RETURN_IF_NULL(rhs);
912  int new_start = non_zero_rows->size();
913  for (const RowIndex row : Reverse(*non_zero_rows)) {
914  if ((*rhs)[row] == 0.0) continue;
915  const ColIndex row_as_col = RowToColIndex(row);
916  const Fractional coeff =
917  diagonal_of_ones ? (*rhs)[row]
918  : (*rhs)[row] / diagonal_coefficients_[row_as_col];
919  (*rhs)[row] = coeff;
920  for (const EntryIndex i : Column(row_as_col)) {
921  (*rhs)[EntryRow(i)] -= coeff * EntryCoefficient(i);
922  }
923  --new_start;
924  (*non_zero_rows)[new_start] = row;
925  }
926  non_zero_rows->erase(non_zero_rows->begin(),
927  non_zero_rows->begin() + new_start);
928 }
929 
931  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
932  if (all_diagonal_coefficients_are_one_) {
933  TransposeHyperSparseSolveInternal<true>(rhs, non_zero_rows);
934  } else {
935  TransposeHyperSparseSolveInternal<false>(rhs, non_zero_rows);
936  }
937 }
938 
939 template <bool diagonal_of_ones>
940 void TriangularMatrix::TransposeHyperSparseSolveInternal(
941  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
942  RETURN_IF_NULL(rhs);
943  int new_size = 0;
944  for (const RowIndex row : *non_zero_rows) {
945  Fractional sum = (*rhs)[row];
946  const ColIndex row_as_col = RowToColIndex(row);
947  for (const EntryIndex i : Column(row_as_col)) {
948  sum -= EntryCoefficient(i) * (*rhs)[EntryRow(i)];
949  }
950  (*rhs)[row] =
951  diagonal_of_ones ? sum : sum / diagonal_coefficients_[row_as_col];
952  if (sum != 0.0) {
953  (*non_zero_rows)[new_size] = row;
954  ++new_size;
955  }
956  }
957  non_zero_rows->resize(new_size);
958 }
959 
961  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
962  if (all_diagonal_coefficients_are_one_) {
963  TransposeHyperSparseSolveWithReversedNonZerosInternal<true>(rhs,
964  non_zero_rows);
965  } else {
966  TransposeHyperSparseSolveWithReversedNonZerosInternal<false>(rhs,
967  non_zero_rows);
968  }
969 }
970 
971 template <bool diagonal_of_ones>
972 void TriangularMatrix::TransposeHyperSparseSolveWithReversedNonZerosInternal(
973  DenseColumn* rhs, RowIndexVector* non_zero_rows) const {
974  RETURN_IF_NULL(rhs);
975  int new_start = non_zero_rows->size();
976  for (const RowIndex row : Reverse(*non_zero_rows)) {
977  Fractional sum = (*rhs)[row];
978  const ColIndex row_as_col = RowToColIndex(row);
979 
980  // We do the loops this way so that the floating point operations are
981  // exactly the same as the ones perfomed by TransposeLowerSolveInternal().
982  EntryIndex i = starts_[row_as_col + 1] - 1;
983  const EntryIndex i_end = starts_[row_as_col];
984  for (; i >= i_end; --i) {
985  sum -= EntryCoefficient(i) * (*rhs)[EntryRow(i)];
986  }
987  (*rhs)[row] =
988  diagonal_of_ones ? sum : sum / diagonal_coefficients_[row_as_col];
989  if (sum != 0.0) {
990  --new_start;
991  (*non_zero_rows)[new_start] = row;
992  }
993  }
994  non_zero_rows->erase(non_zero_rows->begin(),
995  non_zero_rows->begin() + new_start);
996 }
997 
999  const SparseColumn& rhs, const RowPermutation& row_perm,
1000  const RowMapping& partial_inverse_row_perm, SparseColumn* lower,
1001  SparseColumn* upper) const {
1002  DCHECK(all_diagonal_coefficients_are_one_);
1003  RETURN_IF_NULL(lower);
1004  RETURN_IF_NULL(upper);
1005 
1006  initially_all_zero_scratchpad_.resize(num_rows_, 0.0);
1007  for (const SparseColumn::Entry e : rhs) {
1008  initially_all_zero_scratchpad_[e.row()] = e.coefficient();
1009  }
1010 
1011  const RowIndex end_row(partial_inverse_row_perm.size());
1012  for (RowIndex row(ColToRowIndex(first_non_identity_column_)); row < end_row;
1013  ++row) {
1014  const RowIndex permuted_row = partial_inverse_row_perm[row];
1015  const Fractional pivot = initially_all_zero_scratchpad_[permuted_row];
1016  if (pivot == 0.0) continue;
1017  for (EntryIndex i : Column(RowToColIndex(row))) {
1018  initially_all_zero_scratchpad_[EntryRow(i)] -=
1019  EntryCoefficient(i) * pivot;
1020  }
1021  }
1022 
1023  lower->Clear();
1024  const RowIndex num_rows = num_rows_;
1025  for (RowIndex row(0); row < num_rows; ++row) {
1026  if (initially_all_zero_scratchpad_[row] != 0.0) {
1027  if (row_perm[row] < 0) {
1028  lower->SetCoefficient(row, initially_all_zero_scratchpad_[row]);
1029  } else {
1030  upper->SetCoefficient(row, initially_all_zero_scratchpad_[row]);
1031  }
1032  initially_all_zero_scratchpad_[row] = 0.0;
1033  }
1034  }
1035  DCHECK(lower->CheckNoDuplicates());
1036 }
1037 
1039  const RowPermutation& row_perm,
1040  SparseColumn* lower_column,
1041  SparseColumn* upper_column) {
1042  DCHECK(all_diagonal_coefficients_are_one_);
1043  RETURN_IF_NULL(lower_column);
1044  RETURN_IF_NULL(upper_column);
1045 
1046  // Compute the set of rows that will be non zero in the result (lower_column,
1047  // upper_column).
1048  PermutedComputeRowsToConsider(rhs, row_perm, &lower_column_rows_,
1049  &upper_column_rows_);
1050 
1051  // Copy rhs into initially_all_zero_scratchpad_.
1052  initially_all_zero_scratchpad_.resize(num_rows_, 0.0);
1053  for (const auto e : rhs) {
1054  initially_all_zero_scratchpad_[e.row()] = e.coefficient();
1055  }
1056 
1057  // We clear lower_column first in case upper_column and lower_column point to
1058  // the same underlying SparseColumn.
1059  lower_column->Clear();
1060 
1061  // rows_to_consider_ contains the row to process in reverse order. Note in
1062  // particular that each "permuted_row" will never be touched again and so its
1063  // value is final. We copy the result in (lower_column, upper_column) and
1064  // clear initially_all_zero_scratchpad_ at the same time.
1065  upper_column->Reserve(upper_column->num_entries() +
1066  EntryIndex(upper_column_rows_.size()));
1067  for (const RowIndex permuted_row : Reverse(upper_column_rows_)) {
1068  const Fractional pivot = initially_all_zero_scratchpad_[permuted_row];
1069  if (pivot == 0.0) continue;
1070  // Note that permuted_row will not appear in the loop below so we
1071  // already know the value of the solution at this position.
1072  initially_all_zero_scratchpad_[permuted_row] = 0.0;
1073  const ColIndex row_as_col = RowToColIndex(row_perm[permuted_row]);
1074  DCHECK_GE(row_as_col, 0);
1075  upper_column->SetCoefficient(permuted_row, pivot);
1076  DCHECK_EQ(diagonal_coefficients_[row_as_col], 1.0);
1077  for (const auto e : column(row_as_col)) {
1078  initially_all_zero_scratchpad_[e.row()] -= e.coefficient() * pivot;
1079  }
1080  }
1081 
1082  // TODO(user): The size of lower is exact, so we could be slighly faster here.
1083  lower_column->Reserve(EntryIndex(lower_column_rows_.size()));
1084  for (const RowIndex permuted_row : lower_column_rows_) {
1085  const Fractional pivot = initially_all_zero_scratchpad_[permuted_row];
1086  initially_all_zero_scratchpad_[permuted_row] = 0.0;
1087  lower_column->SetCoefficient(permuted_row, pivot);
1088  }
1089  DCHECK(lower_column->CheckNoDuplicates());
1090  DCHECK(upper_column->CheckNoDuplicates());
1091 }
1092 
1093 // The goal is to find which rows of the working column we will need to look
1094 // at in PermutedLowerSparseSolve() when solving P^{-1}.L.P.x = rhs, 'P' being a
1095 // row permutation, 'L' a lower triangular matrix and 'this' being 'P^{-1}.L'.
1096 // Note that the columns of L that are identity columns (this is the case for
1097 // the ones corresponding to a kNonPivotal in P) can be skipped since they will
1098 // leave the working column unchanged.
1099 //
1100 // Let G denote the graph G = (V,E) of the column-to-row adjacency of A:
1101 // - 'V' is the set of nodes, one node i corresponds to a both a row
1102 // and a column (the matrix is square).
1103 // - 'E' is the set of arcs. There is an arc from node i to node j iff the
1104 // coefficient of i-th column, j-th row of A = P^{-1}.L.P is non zero.
1105 //
1106 // Let S denote the set of nodes i such that rhs_i != 0.
1107 // Let R denote the set of all accessible nodes from S in G.
1108 // x_k is possibly non-zero iff k is in R, i.e. if k is not in R then x_k = 0
1109 // for sure, and there is no need to look a the row k during the solve.
1110 //
1111 // So, to solve P^{-1}.L.P.x = rhs, only rows corresponding to P.R have to be
1112 // considered (ignoring the one that map to identity column of L). A topological
1113 // sort of P.R is used to decide in which order one should iterate on them. This
1114 // will be given by upper_column_rows_ and it will be populated in reverse
1115 // order.
1117  const ColumnView& rhs, const RowPermutation& row_perm,
1118  RowIndexVector* lower_column_rows, RowIndexVector* upper_column_rows) {
1119  stored_.resize(num_rows_, false);
1120  marked_.resize(num_rows_, false);
1121  lower_column_rows->clear();
1122  upper_column_rows->clear();
1123  nodes_to_explore_.clear();
1124 
1125  for (SparseColumn::Entry e : rhs) {
1126  const ColIndex col = RowToColIndex(row_perm[e.row()]);
1127  if (col < 0) {
1128  stored_[e.row()] = true;
1129  lower_column_rows->push_back(e.row());
1130  } else {
1131  nodes_to_explore_.push_back(e.row());
1132  }
1133  }
1134 
1135  // Topological sort based on Depth-First-Search.
1136  // A few notes:
1137  // - By construction, if the matrix can be permuted into a lower triangular
1138  // form, there is no cycle. This code does nothing to test for cycles, but
1139  // there is a DCHECK() to detect them during debugging.
1140  // - This version uses sentinels (kInvalidRow) on nodes_to_explore_ to know
1141  // when a node has been explored (i.e. when the recursive dfs goes back in
1142  // the call stack). This is faster than an alternate implementation that
1143  // uses another Boolean array to detect when we go back in the
1144  // depth-first search.
1145  while (!nodes_to_explore_.empty()) {
1146  const RowIndex row = nodes_to_explore_.back();
1147 
1148  // If the depth-first search from the current node is finished (i.e. there
1149  // is a sentinel on the stack), we store the node (which is just before on
1150  // the stack). This will store the nodes in reverse topological order.
1151  if (row < 0) {
1152  nodes_to_explore_.pop_back();
1153  const RowIndex explored_row = nodes_to_explore_.back();
1154  nodes_to_explore_.pop_back();
1155  DCHECK(!stored_[explored_row]);
1156  stored_[explored_row] = true;
1157  upper_column_rows->push_back(explored_row);
1158 
1159  // Unmark and prune the nodes that are already unmarked. See the header
1160  // comment on marked_ for the algorithm description.
1161  //
1162  // Complexity note: The only difference with the "normal" DFS doing no
1163  // pruning is this extra loop here and the marked_[entry_row] = true in
1164  // the loop later in this function. On an already pruned graph, this is
1165  // probably between 1 and 2 times slower than the "normal" DFS.
1166  const ColIndex col = RowToColIndex(row_perm[explored_row]);
1167  EntryIndex i = starts_[col];
1168  EntryIndex end = pruned_ends_[col];
1169  while (i < end) {
1170  const RowIndex entry_row = EntryRow(i);
1171  if (!marked_[entry_row]) {
1172  --end;
1173 
1174  // Note that we could keep the pruned row in a separate vector and
1175  // not touch the triangular matrix. But the current solution seems
1176  // better cache-wise and memory-wise.
1177  std::swap(rows_[i], rows_[end]);
1178  std::swap(coefficients_[i], coefficients_[end]);
1179  } else {
1180  marked_[entry_row] = false;
1181  ++i;
1182  }
1183  }
1184  pruned_ends_[col] = end;
1185  continue;
1186  }
1187 
1188  // If the node is already stored, skip.
1189  if (stored_[row]) {
1190  nodes_to_explore_.pop_back();
1191  continue;
1192  }
1193 
1194  // Expand only if we are not on a kNonPivotal row.
1195  // Otherwise we can store the node right away.
1196  const ColIndex col = RowToColIndex(row_perm[row]);
1197  if (col < 0) {
1198  stored_[row] = true;
1199  lower_column_rows->push_back(row);
1200  nodes_to_explore_.pop_back();
1201  continue;
1202  }
1203 
1204  // Go one level forward in the depth-first search, and store the 'adjacent'
1205  // node on nodes_to_explore_ for further processing.
1206  nodes_to_explore_.push_back(kInvalidRow);
1207  const EntryIndex end = pruned_ends_[col];
1208  for (EntryIndex i = starts_[col]; i < end; ++i) {
1209  const RowIndex entry_row = EntryRow(i);
1210  if (!stored_[entry_row]) {
1211  nodes_to_explore_.push_back(entry_row);
1212  }
1213  marked_[entry_row] = true;
1214  }
1215 
1216  // The graph contains cycles? this is not supposed to happen.
1217  DCHECK_LE(nodes_to_explore_.size(), 2 * num_rows_.value() + rows_.size());
1218  }
1219 
1220  // Clear stored_.
1221  for (const RowIndex row : *lower_column_rows) {
1222  stored_[row] = false;
1223  }
1224  for (const RowIndex row : *upper_column_rows) {
1225  stored_[row] = false;
1226  }
1227 }
1228 
1230  RowIndexVector* non_zero_rows) const {
1231  if (non_zero_rows->empty()) return;
1232 
1233  // We don't start the DFS if the initial number of non-zeros is under the
1234  // sparsity_threshold. During the DFS, we abort it if the number of floating
1235  // points operations get larger than the num_ops_threshold.
1236  //
1237  // In both cases, we make sure to clear non_zero_rows so that the solving part
1238  // will use the non-hypersparse version of the code.
1239  //
1240  // TODO(user): Investigate the best thresholds.
1241  const int sparsity_threshold =
1242  static_cast<int>(0.025 * static_cast<double>(num_rows_.value()));
1243  const int num_ops_threshold =
1244  static_cast<int>(0.05 * static_cast<double>(num_rows_.value()));
1245  int num_ops = non_zero_rows->size();
1246  if (num_ops > sparsity_threshold) {
1247  non_zero_rows->clear();
1248  return;
1249  }
1250 
1251  // Initialize using the non-zero positions of the input.
1252  stored_.resize(num_rows_, false);
1253  nodes_to_explore_.clear();
1254  nodes_to_explore_.swap(*non_zero_rows);
1255 
1256  // Topological sort based on Depth-First-Search.
1257  // Same remarks as the version implemented in PermutedComputeRowsToConsider().
1258  while (!nodes_to_explore_.empty()) {
1259  const RowIndex row = nodes_to_explore_.back();
1260 
1261  // If the depth-first search from the current node is finished, we store the
1262  // node. This will store the node in reverse topological order.
1263  if (row < 0) {
1264  nodes_to_explore_.pop_back();
1265  const RowIndex explored_row = -row - 1;
1266  stored_[explored_row] = true;
1267  non_zero_rows->push_back(explored_row);
1268  continue;
1269  }
1270 
1271  // If the node is already stored, skip.
1272  if (stored_[row]) {
1273  nodes_to_explore_.pop_back();
1274  continue;
1275  }
1276 
1277  // Go one level forward in the depth-first search, and store the 'adjacent'
1278  // node on nodes_to_explore_ for further processing.
1279  //
1280  // We reverse the sign of nodes_to_explore_.back() to detect when the
1281  // DFS will be back on this node.
1282  nodes_to_explore_.back() = -row - 1;
1283  for (const EntryIndex i : Column(RowToColIndex(row))) {
1284  ++num_ops;
1285  const RowIndex entry_row = EntryRow(i);
1286  if (!stored_[entry_row]) {
1287  nodes_to_explore_.push_back(entry_row);
1288  }
1289  }
1290 
1291  // Abort if the number of operations is not negligible compared to the
1292  // number of rows. Note that this test also prevents the code from cycling
1293  // in case the matrix is actually not triangular.
1294  if (num_ops > num_ops_threshold) break;
1295  }
1296 
1297  // Clear stored_.
1298  for (const RowIndex row : *non_zero_rows) {
1299  stored_[row] = false;
1300  }
1301 
1302  // If we aborted, clear the result.
1303  if (num_ops > num_ops_threshold) non_zero_rows->clear();
1304 }
1305 
1307  RowIndexVector* non_zero_rows) const {
1308  static const Fractional kDefaultSparsityRatio = 0.025;
1309  static const Fractional kDefaultNumOpsRatio = 0.05;
1310  ComputeRowsToConsiderInSortedOrder(non_zero_rows, kDefaultSparsityRatio,
1311  kDefaultNumOpsRatio);
1312 }
1313 
1315  RowIndexVector* non_zero_rows, Fractional sparsity_ratio,
1316  Fractional num_ops_ratio) const {
1317  if (non_zero_rows->empty()) return;
1318 
1319  // TODO(user): Investigate the best thresholds.
1320  const int sparsity_threshold =
1321  static_cast<int>(0.025 * static_cast<double>(num_rows_.value()));
1322  const int num_ops_threshold =
1323  static_cast<int>(0.05 * static_cast<double>(num_rows_.value()));
1324  int num_ops = non_zero_rows->size();
1325  if (num_ops > sparsity_threshold) {
1326  non_zero_rows->clear();
1327  return;
1328  }
1329 
1330  stored_.resize(num_rows_, false);
1331  for (const RowIndex row : *non_zero_rows) stored_[row] = true;
1332  for (int i = 0; i < non_zero_rows->size(); ++i) {
1333  const RowIndex row = (*non_zero_rows)[i];
1334  for (const EntryIndex i : Column(RowToColIndex(row))) {
1335  ++num_ops;
1336  const RowIndex entry_row = EntryRow(i);
1337  if (!stored_[entry_row]) {
1338  non_zero_rows->push_back(entry_row);
1339  stored_[entry_row] = true;
1340  }
1341  }
1342  if (num_ops > num_ops_threshold) break;
1343  }
1344 
1345  for (const RowIndex row : *non_zero_rows) stored_[row] = false;
1346  if (num_ops > num_ops_threshold) {
1347  non_zero_rows->clear();
1348  } else {
1349  std::sort(non_zero_rows->begin(), non_zero_rows->end());
1350  }
1351 }
1352 
1353 // A known upper bound for the infinity norm of T^{-1} is the
1354 // infinity norm of y where T'*y = x with:
1355 // - x the all 1s vector.
1356 // - Each entry in T' is the absolute value of the same entry in T.
1358  if (first_non_identity_column_ == num_cols_) {
1359  // Identity matrix
1360  return 1.0;
1361  }
1362 
1363  const bool is_upper = IsUpperTriangular();
1364  DenseColumn row_norm_estimate(num_rows_, 1.0);
1365  const int num_cols = num_cols_.value();
1366 
1367  for (int i = 0; i < num_cols; ++i) {
1368  const ColIndex col(is_upper ? num_cols - 1 - i : i);
1369  DCHECK_NE(diagonal_coefficients_[col], 0.0);
1370  const Fractional coeff = row_norm_estimate[ColToRowIndex(col)] /
1371  std::abs(diagonal_coefficients_[col]);
1372 
1373  row_norm_estimate[ColToRowIndex(col)] = coeff;
1374  for (const EntryIndex i : Column(col)) {
1375  row_norm_estimate[EntryRow(i)] += coeff * std::abs(EntryCoefficient(i));
1376  }
1377  }
1378 
1379  return *std::max_element(row_norm_estimate.begin(), row_norm_estimate.end());
1380 }
1381 
1383  const bool is_upper = IsUpperTriangular();
1384 
1385  DenseColumn row_sum(num_rows_, 0.0);
1386  DenseColumn right_hand_side;
1387  for (ColIndex col(0); col < num_cols_; ++col) {
1388  right_hand_side.assign(num_rows_, 0);
1389  right_hand_side[ColToRowIndex(col)] = 1.0;
1390 
1391  // Get the col-th column of the matrix inverse.
1392  if (is_upper) {
1393  UpperSolve(&right_hand_side);
1394  } else {
1395  LowerSolve(&right_hand_side);
1396  }
1397 
1398  // Compute sum_j |inverse_ij|.
1399  for (RowIndex row(0); row < num_rows_; ++row) {
1400  row_sum[row] += std::abs(right_hand_side[row]);
1401  }
1402  }
1403  // Compute max_i sum_j |inverse_ij|.
1404  Fractional norm = 0.0;
1405  for (RowIndex row(0); row < num_rows_; ++row) {
1406  norm = std::max(norm, row_sum[row]);
1407  }
1408 
1409  return norm;
1410 }
1411 } // namespace glop
1412 } // namespace operations_research
operations_research::glop::TriangularMatrix::TransposeLowerSolve
void TransposeLowerSolve(DenseColumn *rhs) const
Definition: sparse.cc:831
operations_research::glop::CompactSparseMatrix::starts_
StrictITIVector< ColIndex, EntryIndex > starts_
Definition: sparse.h:461
operations_research::glop::CompactSparseMatrix::Reset
void Reset(RowIndex num_rows)
Definition: sparse.cc:515
absl::StrongVector::end
iterator end()
Definition: strong_vector.h:140
operations_research::glop::SparseMatrix::PopulateFromZero
void PopulateFromZero(RowIndex num_rows, ColIndex num_cols)
Definition: sparse.cc:164
operations_research::glop::CompactSparseMatrix
Definition: sparse.h:288
operations_research::glop::StrictITIVector::resize
void resize(IntType size)
Definition: lp_types.h:269
min
int64 min
Definition: alldiff_cst.cc:138
DCHECK_LT
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
operations_research::glop::TriangularMatrix::Swap
void Swap(TriangularMatrix *other)
Definition: sparse.cc:593
absl::StrongVector::push_back
void push_back(const value_type &x)
Definition: strong_vector.h:158
operations_research::glop::TriangularMatrix::PermutedLowerSolve
void PermutedLowerSolve(const SparseColumn &rhs, const RowPermutation &row_perm, const RowMapping &partial_inverse_row_perm, SparseColumn *lower, SparseColumn *upper) const
Definition: sparse.cc:998
operations_research::glop::SparseMatrix::AppendRowsFromSparseMatrix
bool AppendRowsFromSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.cc:302
max
int64 max
Definition: alldiff_cst.cc:139
operations_research::glop::SparseVector::Clear
void Clear()
Definition: sparse_vector.h:479
operations_research::glop::TriangularMatrix::AddTriangularColumnWithGivenDiagonalEntry
void AddTriangularColumnWithGivenDiagonalEntry(const SparseColumn &column, RowIndex diagonal_row, Fractional diagonal_value)
Definition: sparse.cc:672
operations_research::glop::TriangularMatrix::TransposeUpperSolve
void TransposeUpperSolve(DenseColumn *rhs) const
Definition: sparse.cc:801
num_entries
EntryIndex num_entries
Definition: preprocessor.cc:1306
operations_research::glop::RandomAccessSparseColumn::Clear
void Clear()
Definition: sparse_column.cc:31
operations_research::glop::SparseMatrix::AppendUnitVector
void AppendUnitVector(RowIndex row, Fractional value)
Definition: sparse.cc:151
operations_research::glop::CompactSparseMatrix::column
ColumnView column(ColIndex col) const
Definition: sparse.h:364
operations_research::glop::CompactSparseMatrix::AddDenseColumnWithNonZeros
ColIndex AddDenseColumnWithNonZeros(const DenseColumn &dense_column, const std::vector< RowIndex > &non_zeros)
Definition: sparse.cc:554
operations_research::glop::TriangularMatrix::HyperSparseSolveWithReversedNonZeros
void HyperSparseSolveWithReversedNonZeros(DenseColumn *rhs, RowIndexVector *non_zero_rows) const
Definition: sparse.cc:899
operations_research::glop::SparseMatrix::Equals
bool Equals(const SparseMatrix &a, Fractional tolerance) const
Definition: sparse.cc:327
operations_research::glop::TriangularMatrix::AddAndNormalizeTriangularColumn
void AddAndNormalizeTriangularColumn(const SparseColumn &column, RowIndex diagonal_row, Fractional diagonal_coefficient)
Definition: sparse.cc:655
operations_research::glop::MatrixView::ComputeOneNorm
Fractional ComputeOneNorm() const
Definition: sparse.cc:420
logging.h
operations_research::glop::CompactSparseMatrixView
Definition: sparse.h:471
operations_research::glop::CompactSparseMatrix::PopulateFromMatrixView
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
operations_research::glop::CompactSparseMatrixView::ComputeInfinityNorm
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:607
operations_research::glop::CompactSparseMatrixView::num_entries
EntryIndex num_entries() const
Definition: sparse.cc:601
value
int64 value
Definition: demon_profiler.cc:43
operations_research::glop::CompactSparseMatrix::num_rows
RowIndex num_rows() const
Definition: sparse.h:344
operations_research::glop::CompactSparseMatrix::EntryCoefficient
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse.h:361
operations_research::glop::TriangularMatrix::PermutedComputeRowsToConsider
void PermutedComputeRowsToConsider(const ColumnView &rhs, const RowPermutation &row_perm, RowIndexVector *lower_column_rows, RowIndexVector *upper_column_rows)
Definition: sparse.cc:1116
operations_research::glop::RandomAccessSparseColumn::GetCoefficient
Fractional GetCoefficient(RowIndex row) const
Definition: sparse_column.h:171
operations_research::glop::TriangularMatrix::TransposeHyperSparseSolveWithReversedNonZeros
void TransposeHyperSparseSolveWithReversedNonZeros(DenseColumn *rhs, RowIndexVector *non_zero_rows) const
Definition: sparse.cc:960
operations_research::glop::TriangularMatrix::LowerSolveStartingAt
void LowerSolveStartingAt(ColIndex start, DenseColumn *rhs) const
Definition: sparse.cc:741
operations_research::glop::SparseVector::CheckNoDuplicates
bool CheckNoDuplicates() const
Definition: sparse_vector.h:669
operations_research::glop::StrictITIVector::size
IntType size() const
Definition: lp_types.h:276
operations_research
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Definition: dense_doubly_linked_list.h:21
operations_research::glop::SparseMatrix::PopulateFromProduct
void PopulateFromProduct(const SparseMatrix &a, const SparseMatrix &b)
Definition: sparse.cc:250
operations_research::glop::SparseMatrix::LookUpValue
Fractional LookUpValue(RowIndex row, ColIndex col) const
Definition: sparse.cc:323
operations_research::glop::MatrixView
Definition: sparse.h:218
operations_research::glop::TriangularMatrix::AddTriangularColumn
void AddTriangularColumn(const ColumnView &column, RowIndex diagonal_row)
Definition: sparse.cc:640
operations_research::glop::TriangularMatrix::num_entries
EntryIndex num_entries() const
Definition: sparse.h:514
operations_research::glop::SparseMatrix::PopulateFromSparseMatrix
void PopulateFromSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.cc:206
operations_research::glop::SparseVector::Reserve
void Reserve(EntryIndex new_capacity)
Definition: sparse_vector.h:495
operations_research::glop::CompactSparseMatrix::num_entries
EntryIndex num_entries() const
Definition: sparse.h:340
operations_research::glop::ToDouble
static double ToDouble(double f)
Definition: lp_types.h:68
operations_research::glop::CompactSparseMatrix::AddAndClearColumnWithNonZeros
ColIndex AddAndClearColumnWithNonZeros(DenseColumn *column, std::vector< RowIndex > *non_zeros)
Definition: sparse.cc:569
operations_research::glop::SparseMatrix::DeleteRows
void DeleteRows(RowIndex num_rows, const RowPermutation &permutation)
Definition: sparse.cc:289
operations_research::glop::SparseColumn
Definition: sparse_column.h:44
operations_research::glop::SparseMatrix::ComputeInfinityNorm
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:395
operations_research::glop::RandomAccessSparseColumn::PopulateSparseColumn
void PopulateSparseColumn(SparseColumn *sparse_column) const
Definition: sparse_column.cc:57
operations_research::glop::TriangularMatrix::ComputeRowsToConsiderInSortedOrder
void ComputeRowsToConsiderInSortedOrder(RowIndexVector *non_zero_rows, Fractional sparsity_ratio, Fractional num_ops_ratio) const
Definition: sparse.cc:1314
RETURN_IF_NULL
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
index
int index
Definition: pack.cc:508
operations_research::glop::TriangularMatrix::AddDiagonalOnlyColumn
void AddDiagonalOnlyColumn(Fractional diagonal_value)
Definition: sparse.cc:636
operations_research::glop::CompactSparseMatrix::num_rows_
RowIndex num_rows_
Definition: sparse.h:453
operations_research::glop::Fractional
double Fractional
Definition: lp_types.h:77
DCHECK_NE
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
operations_research::glop::TriangularMatrix::Reset
void Reset(RowIndex num_rows, ColIndex col_capacity)
Definition: sparse.cc:524
operations_research::glop::SparseMatrix::ApplyRowPermutation
void ApplyRowPermutation(const RowPermutation &row_perm)
Definition: sparse.cc:316
operations_research::glop::kInfinity
const double kInfinity
Definition: lp_types.h:83
operations_research::glop::RowToColIndex
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
operations_research::glop::TriangularMatrix::PermutedLowerSparseSolve
void PermutedLowerSparseSolve(const ColumnView &rhs, const RowPermutation &row_perm, SparseColumn *lower, SparseColumn *upper)
Definition: sparse.cc:1038
a
int64 a
Definition: constraint_solver/table.cc:42
operations_research::glop::SparseMatrix::AppendEmptyColumn
ColIndex AppendEmptyColumn()
Definition: sparse.cc:145
operations_research::glop::StrictITIVector::assign
void assign(IntType size, const T &v)
Definition: lp_types.h:274
operations_research::glop::TriangularMatrix::num_cols
ColIndex num_cols() const
Definition: sparse.h:513
operations_research::glop::SparseMatrix::num_cols
ColIndex num_cols() const
Definition: sparse.h:177
operations_research::glop::SparseMatrix::DeleteColumns
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: sparse.cc:276
operations_research::glop::CompactSparseMatrix::EntryRow
RowIndex EntryRow(EntryIndex i) const
Definition: sparse.h:362
operations_research::glop::SparseMatrix::CleanUp
void CleanUp()
Definition: sparse.cc:119
operations_research::glop::SparseMatrix::PopulateFromIdentity
void PopulateFromIdentity(ColIndex num_cols)
Definition: sparse.cc:172
operations_research::glop::SparseMatrix
Definition: sparse.h:61
operations_research::glop::SparseMatrix::mutable_column
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:181
operations_research::glop::SparseMatrix::PopulateFromPermutedMatrix
void PopulateFromPermutedMatrix(const Matrix &a, const RowPermutation &row_perm, const ColumnPermutation &inverse_col_perm)
Definition: sparse.cc:212
operations_research::glop::TriangularMatrix::LowerSolve
void LowerSolve(DenseColumn *rhs) const
Definition: sparse.cc:737
operations_research::glop::SparseMatrix::IsCleanedUp
bool IsCleanedUp() const
Definition: sparse.cc:135
operations_research::glop::TriangularMatrix::num_rows
RowIndex num_rows() const
Definition: sparse.h:512
operations_research::glop::SparseMatrix::SparseMatrix
SparseMatrix()
Definition: sparse.cc:87
operations_research::glop::TriangularMatrix::CopyColumnToSparseColumn
void CopyColumnToSparseColumn(ColIndex col, SparseColumn *output) const
Definition: sparse.cc:720
absl::StrongVector::pop_back
void pop_back()
Definition: strong_vector.h:168
operations_research::glop::TriangularMatrix::PopulateFromTranspose
void PopulateFromTranspose(const TriangularMatrix &input)
Definition: sparse.cc:491
operations_research::glop::SparseMatrix::num_rows
RowIndex num_rows() const
Definition: sparse.h:176
operations_research::glop::StrictITIVector< RowIndex, bool >
operations_research::glop::SparseMatrix::Clear
void Clear()
Definition: sparse.cc:110
operations_research::glop::SparseMatrix::CheckNoDuplicates
bool CheckNoDuplicates() const
Definition: sparse.cc:126
operations_research::glop::TriangularMatrix::HyperSparseSolve
void HyperSparseSolve(DenseColumn *rhs, RowIndexVector *non_zero_rows) const
Definition: sparse.cc:869
operations_research::glop::SparseMatrix::ComputeOneNorm
Fractional ComputeOneNorm() const
Definition: sparse.cc:392
operations_research::glop::SparseMatrix::Swap
void Swap(SparseMatrix *matrix)
Definition: sparse.cc:158
operations_research::glop::CompactSparseMatrix::rows_
StrictITIVector< EntryIndex, RowIndex > rows_
Definition: sparse.h:460
operations_research::glop::CompactSparseMatrix::AddDenseColumnPrefix
ColIndex AddDenseColumnPrefix(const DenseColumn &dense_column, RowIndex start)
Definition: sparse.cc:540
absl::StrongVector::begin
iterator begin()
Definition: strong_vector.h:138
operations_research::glop::CompactSparseMatrix::ColumnNumEntries
EntryIndex ColumnNumEntries(ColIndex col) const
Definition: sparse.h:335
operations_research::glop::TriangularMatrix::ComputeRowsToConsiderWithDfs
void ComputeRowsToConsiderWithDfs(RowIndexVector *non_zero_rows) const
Definition: sparse.cc:1229
DCHECK
#define DCHECK(condition)
Definition: base/logging.h:884
operations_research::glop::TriangularMatrix::PopulateFromTriangularSparseMatrix
void PopulateFromTriangularSparseMatrix(const SparseMatrix &input)
Definition: sparse.cc:683
operations_research::glop::TriangularMatrix
Definition: sparse.h:502
operations_research::glop::SparseMatrix::PopulateFromLinearCombination
void PopulateFromLinearCombination(Fractional alpha, const SparseMatrix &a, Fractional beta, const SparseMatrix &b)
Definition: sparse.cc:225
operations_research::glop::CompactSparseMatrixView::ComputeOneNorm
Fractional ComputeOneNorm() const
Definition: sparse.cc:604
operations_research::glop::SparseVector::SetCoefficient
void SetCoefficient(Index index, Fractional value)
Definition: sparse_vector.h:680
operations_research::glop::SparseMatrix::PopulateFromTranspose
void PopulateFromTranspose(const Matrix &input)
Definition: sparse.cc:181
operations_research::glop::CompactSparseMatrix::PopulateFromTranspose
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:456
operations_research::glop::Permutation< RowIndex >
operations_research::glop::TriangularMatrix::IsUpperTriangular
bool IsUpperTriangular() const
Definition: sparse.cc:702
operations_research::glop::TriangularMatrix::IsLowerTriangular
bool IsLowerTriangular() const
Definition: sparse.cc:692
operations_research::glop::TriangularMatrix::ComputeInverseInfinityNormUpperBound
Fractional ComputeInverseInfinityNormUpperBound() const
Definition: sparse.cc:1357
operations_research::glop::SparseVector< RowIndex, SparseColumnIterator >::Entry
typename Iterator::Entry Entry
Definition: sparse_vector.h:91
DCHECK_GE
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
absl::StrongVector::clear
void clear()
Definition: strong_vector.h:170
operations_research::glop::CompactSparseMatrix::coefficients_
StrictITIVector< EntryIndex, Fractional > coefficients_
Definition: sparse.h:459
col
ColIndex col
Definition: markowitz.cc:176
operations_research::glop::SparseVector::num_entries
EntryIndex num_entries() const
Definition: sparse_vector.h:270
operations_research::glop::CompactSparseMatrix::Column
::util::IntegerRange< EntryIndex > Column(ColIndex col) const
Definition: sparse.h:358
input
static int input(yyscan_t yyscanner)
row
RowIndex row
Definition: markowitz.cc:175
operations_research::glop::SparseMatrix::ComputeMinAndMaxMagnitudes
void ComputeMinAndMaxMagnitudes(Fractional *min_magnitude, Fractional *max_magnitude) const
Definition: sparse.cc:369
operations_research::glop::DenseColumn
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:328
absl::StrongVector::swap
void swap(StrongVector &x)
Definition: strong_vector.h:169
absl::StrongVector::back
reference back()
Definition: strong_vector.h:174
operations_research::glop::Permutation::size
IndexType size() const
Definition: lp_data/permutation.h:49
DCHECK_EQ
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
operations_research::glop::CompactSparseMatrix::AddDenseColumn
ColIndex AddDenseColumn(const DenseColumn &dense_column)
Definition: sparse.cc:536
b
int64 b
Definition: constraint_solver/table.cc:43
operations_research::glop::ColumnView
Definition: sparse_column.h:65
operations_research::glop::TriangularMatrix::ApplyRowPermutationToNonDiagonalEntries
void ApplyRowPermutationToNonDiagonalEntries(const RowPermutation &row_perm)
Definition: sparse.cc:712
operations_research::glop::MatrixView::ComputeInfinityNorm
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:423
operations_research::glop::MatrixView::num_entries
EntryIndex num_entries() const
Definition: sparse.cc:419
operations_research::glop::SparseMatrix::IsEmpty
bool IsEmpty() const
Definition: sparse.cc:115
operations_research::glop::CompactSparseMatrix::num_cols_
ColIndex num_cols_
Definition: sparse.h:454
operations_research::glop::RandomAccessSparseColumn
Definition: sparse_column.h:130
operations_research::glop::TriangularMatrix::CopyToSparseMatrix
void CopyToSparseMatrix(SparseMatrix *output) const
Definition: sparse.cc:730
permutation.h
operations_research::glop::RandomAccessSparseColumn::AddToCoefficient
void AddToCoefficient(RowIndex row, Fractional value)
Definition: sparse_column.h:152
operations_research::glop::TriangularMatrix::UpperSolve
void UpperSolve(DenseColumn *rhs) const
Definition: sparse.cc:770
util::Reverse
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
operations_research::glop::SparseMatrix::num_entries
EntryIndex num_entries() const
Definition: sparse.cc:389
lp_types.h
operations_research::glop::kInvalidRow
const RowIndex kInvalidRow(-1)
operations_research::glop::SparseMatrix::Dump
std::string Dump() const
Definition: sparse.cc:399
operations_research::glop::ColToRowIndex
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:51
DCHECK_LE
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
absl::StrongVector::empty
bool empty() const
Definition: strong_vector.h:156
absl::StrongVector::front
reference front()
Definition: strong_vector.h:172
operations_research::glop::RowIndexVector
std::vector< RowIndex > RowIndexVector
Definition: lp_types.h:309
operations_research::glop::TriangularMatrix::TransposeHyperSparseSolve
void TransposeHyperSparseSolve(DenseColumn *rhs, RowIndexVector *non_zero_rows) const
Definition: sparse.cc:930
operations_research::glop::TriangularMatrix::ComputeInverseInfinityNorm
Fractional ComputeInverseInfinityNorm() const
Definition: sparse.cc:1382
sparse.h
operations_research::glop::SparseVector::CleanUp
void CleanUp()
Definition: sparse_vector.h:544
operations_research::glop::CompactSparseMatrix::Swap
void Swap(CompactSparseMatrix *other)
Definition: sparse.cc:585
operations_research::glop::SparseMatrix::SetNumRows
void SetNumRows(RowIndex num_rows)
Definition: sparse.cc:143