Skip to content

Commit 3e2352b

Browse files
committed
keep the dense vector path alive
1 parent cc54886 commit 3e2352b

File tree

1 file changed

+42
-19
lines changed

1 file changed

+42
-19
lines changed

cpp/src/dual_simplex/crossover.cpp

Lines changed: 42 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,8 @@ i_t dual_push(const lp_problem_t<i_t, f_t>& lp,
389389
const std::vector<f_t>& x = solution.x;
390390
i_t num_pushes = 0;
391391
std::vector<f_t> delta_zN(n - m);
392-
std::vector<f_t> delta_expanded(n);
392+
std::vector<f_t> delta_expanded; // workspace for sparse path (delta_y is sparse enough)
393+
std::vector<f_t> delta_y_dense; // workspace for dense path (delta_y is not sparse enough)
393394
while (superbasic_list.size() > 0) {
394395
const i_t s = superbasic_list.back();
395396
const i_t basic_leaving_index = superbasic_list_index.back();
@@ -417,24 +418,38 @@ i_t dual_push(const lp_problem_t<i_t, f_t>& lp,
417418
}
418419

419420
// delta_zN = -N^T delta_y
420-
std::fill(delta_expanded.begin(), delta_expanded.end(), 0.);
421+
// Choose sparse vs dense method by delta_y sparsity (match dual simplex: sparse if <= 30% nnz)
421422
std::fill(delta_zN.begin(), delta_zN.end(), 0.);
422-
423-
// Iterate directly over sparse delta_y instead of checking zeros
424-
for (i_t nnz_idx = 0; nnz_idx < delta_y_sparse.i.size(); ++nnz_idx) {
425-
const i_t row = delta_y_sparse.i[nnz_idx];
426-
const f_t val = delta_y_sparse.x[nnz_idx];
427-
428-
// Accumulate contributions from this row to all columns
429-
const i_t row_start = Arow.row_start[row];
430-
const i_t row_end = Arow.row_start[row + 1];
431-
for (i_t p = row_start; p < row_end; ++p) {
432-
const i_t col = Arow.j[p];
433-
delta_expanded[col] += Arow.x[p] * val;
423+
const bool use_sparse = (delta_y_sparse.i.size() * 1.0 / m) <= 0.3;
424+
425+
if (use_sparse) {
426+
delta_expanded.resize(n);
427+
std::fill(delta_expanded.begin(), delta_expanded.end(), 0.);
428+
for (i_t nnz_idx = 0; nnz_idx < static_cast<i_t>(delta_y_sparse.i.size()); ++nnz_idx) {
429+
const i_t row = delta_y_sparse.i[nnz_idx];
430+
const f_t val = delta_y_sparse.x[nnz_idx];
431+
const i_t row_start = Arow.row_start[row];
432+
const i_t row_end = Arow.row_start[row + 1];
433+
for (i_t p = row_start; p < row_end; ++p) {
434+
const i_t col = Arow.j[p];
435+
delta_expanded[col] += Arow.x[p] * val;
436+
}
437+
}
438+
for (i_t k = 0; k < n - m; ++k) {
439+
delta_zN[k] = -delta_expanded[nonbasic_list[k]];
440+
}
441+
} else {
442+
delta_y_sparse.to_dense(delta_y_dense);
443+
for (i_t k = 0; k < n - m; ++k) {
444+
const i_t j = nonbasic_list[k];
445+
f_t dot = 0.0;
446+
const i_t c_start = lp.A.col_start[j];
447+
const i_t c_end = lp.A.col_start[j + 1];
448+
for (i_t p = c_start; p < c_end; ++p) {
449+
dot += lp.A.x[p] * delta_y_dense[lp.A.i[p]];
450+
}
451+
delta_zN[k] = -dot;
434452
}
435-
}
436-
for (i_t k = 0; k < n - m; ++k) {
437-
delta_zN[k] = -delta_expanded[nonbasic_list[k]];
438453
}
439454

440455
i_t entering_index = -1;
@@ -1345,8 +1360,16 @@ crossover_status_t crossover(const lp_problem_t<i_t, f_t>& lp,
13451360
basis_update_mpf_t ft(L, U, p, settings.refactor_frequency);
13461361
verify_basis<i_t, f_t>(m, n, vstatus);
13471362
compare_vstatus_with_lists<i_t, f_t>(m, n, basic_list, nonbasic_list, vstatus);
1348-
i_t dual_push_status = dual_push(
1349-
lp, Arow, settings, start_time, solution, ft, basic_list, nonbasic_list, superbasic_list, vstatus);
1363+
i_t dual_push_status = dual_push(lp,
1364+
Arow,
1365+
settings,
1366+
start_time,
1367+
solution,
1368+
ft,
1369+
basic_list,
1370+
nonbasic_list,
1371+
superbasic_list,
1372+
vstatus);
13501373
if (dual_push_status < 0) { return return_to_status(dual_push_status); }
13511374
settings.log.debug("basic list size %ld m %d\n", basic_list.size(), m);
13521375
settings.log.debug("nonbasic list size %ld n - m %d\n", nonbasic_list.size(), n - m);

0 commit comments

Comments
 (0)