diff --git a/.github/workflows/amd64_linux_bazel.yml b/.github/workflows/amd64_linux_bazel.yml index 7e4818dbbd4..69625b4d1c3 100644 --- a/.github/workflows/amd64_linux_bazel.yml +++ b/.github/workflows/amd64_linux_bazel.yml @@ -9,7 +9,6 @@ jobs: strategy: matrix: python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, diff --git a/.github/workflows/amd64_macos_bazel.yml b/.github/workflows/amd64_macos_bazel.yml index a478b5b6040..2f5990146d0 100644 --- a/.github/workflows/amd64_macos_bazel.yml +++ b/.github/workflows/amd64_macos_bazel.yml @@ -9,7 +9,6 @@ jobs: strategy: matrix: python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, diff --git a/.github/workflows/amd64_windows_bazel.yml b/.github/workflows/amd64_windows_bazel.yml index f33ca41ab52..30c92458781 100644 --- a/.github/workflows/amd64_windows_bazel.yml +++ b/.github/workflows/amd64_windows_bazel.yml @@ -10,7 +10,6 @@ jobs: matrix: runner: [windows-2022] python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, diff --git a/.github/workflows/arm64_macos_bazel.yml b/.github/workflows/arm64_macos_bazel.yml index 59d67a5d1ee..f76442597db 100644 --- a/.github/workflows/arm64_macos_bazel.yml +++ b/.github/workflows/arm64_macos_bazel.yml @@ -9,7 +9,6 @@ jobs: strategy: matrix: python: [ - {version: '3.9'}, {version: '3.10'}, {version: '3.11'}, {version: '3.12'}, diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index b4285bbff70..1c78f86eb4f 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -26,3 +26,4 @@ jupyter-server==2.14.2 tornado==6.4.1 Pygments==2.15.0 jsonschema==4.19.0 +jinja2==3.1.4 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index afcaede6aea..f038789bb4a 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -1,10 +1,10 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # bazel run //bazel:notebook_requirements.update # -absl-py==2.0.0 +absl-py==2.1.0 # via -r bazel/notebook_requirements.in anyio==4.0.0 # via @@ -85,8 +85,9 @@ isoduration==20.11.0 # via jsonschema jedi==0.19.0 # via ipython -jinja2==3.1.3 +jinja2==3.1.4 # via + # -r bazel/notebook_requirements.in # jupyter-server # jupyterlab # jupyterlab-server @@ -176,7 +177,7 @@ notebook-shim==0.2.3 # via # jupyterlab # notebook -numpy==2.1.0 +numpy==2.1.1 # via # -r bazel/notebook_requirements.in # pandas @@ -215,7 +216,7 @@ prometheus-client==0.17.1 # via jupyter-server prompt-toolkit==3.0.39 # via ipython -protobuf==5.27.3 +protobuf==5.27.5 # via # -r bazel/notebook_requirements.in # mypy-protobuf diff --git a/examples/java/LinearProgramming.java b/examples/java/LinearProgramming.java index 1c764f4fd00..79d96369766 100644 --- a/examples/java/LinearProgramming.java +++ b/examples/java/LinearProgramming.java @@ -65,7 +65,7 @@ private static void runLinearProgrammingExample(String solverType, boolean print System.out.println("Number of constraints = " + solver.numConstraints()); if (printModel) { - String model = solver.exportModelAsLpFormat(); + String model = solver.exportModelAsLpFormat(/* obfuscate = */false); System.out.println(model); } diff --git a/ortools/linear_solver/java/linear_solver.i b/ortools/linear_solver/java/linear_solver.i index 2df3c50df6e..83d9db7e560 100644 --- a/ortools/linear_solver/java/linear_solver.i +++ b/ortools/linear_solver/java/linear_solver.i @@ -188,7 +188,7 @@ PROTO2_RETURN( /** * Export the loaded model in LP format. */ - std::string exportModelAsLpFormat(bool obfuscate) { + std::string exportModelAsLpFormat(bool obfuscate = false) { operations_research::MPModelExportOptions options; options.obfuscate = obfuscate; operations_research::MPModelProto model; diff --git a/ortools/linear_solver/python/linear_solver.i b/ortools/linear_solver/python/linear_solver.i index 7a5bf5ef839..a475c55a777 100644 --- a/ortools/linear_solver/python/linear_solver.i +++ b/ortools/linear_solver/python/linear_solver.i @@ -374,8 +374,8 @@ PY_CONVERT(MPVariable); %rename (LookupVariable) operations_research::MPSolver::LookupVariableOrNull; %unignore operations_research::MPSolver::SetSolverSpecificParametersAsString; %unignore operations_research::MPSolver::NextSolution; -%unignore operations_research::MPSolver::ExportModelAsLpFormat; -%unignore operations_research::MPSolver::ExportModelAsMpsFormat; +%unignore operations_research::MPSolver::ExportModelAsLpFormat(bool); +%unignore operations_research::MPSolver::ExportModelAsMpsFormat(bool, bool); %unignore operations_research::MPSolver::WriteModelToMpsFile; %unignore operations_research::MPSolver::Write; diff --git a/ortools/linear_solver/python/lp_test.py b/ortools/linear_solver/python/lp_test.py index a52dc3467b1..5b7c1888808 100755 --- a/ortools/linear_solver/python/lp_test.py +++ b/ortools/linear_solver/python/lp_test.py @@ -349,7 +349,7 @@ def testExportToMps(self): sum_of_vars = sum([x1, x2, x3]) c2 = solver.Add(sum_of_vars <= 100.0, "OtherConstraintName") - mps_str = solver.ExportModelAsMpsFormat(fixed_format=False, obfuscated=False) + mps_str = solver.ExportModelAsMpsFormat(fixed_format=False, obfuscate=False) self.assertIn("ExportMps", mps_str) diff --git a/ortools/lp_data/lp_data_utils.cc b/ortools/lp_data/lp_data_utils.cc index 126121e65b6..d234ff189c1 100644 --- a/ortools/lp_data/lp_data_utils.cc +++ b/ortools/lp_data/lp_data_utils.cc @@ -184,6 +184,14 @@ Fractional LpScalingHelper::UnscaleDualValue(RowIndex row, return value / (RowUnscalingFactor(row) * objective_scaling_factor_); } +Fractional LpScalingHelper::UnscaleLeftSolveValue(RowIndex row, + Fractional value) const { + // In the scaled domain, we are takeing a sum coeff * scaling * row, + // so to get the same effect in the unscaled domain, we want to multiply by + // (coeff * scaling). + return value / RowUnscalingFactor(row); +} + Fractional LpScalingHelper::UnscaleConstraintActivity(RowIndex row, Fractional value) const { // The activity move with the row_scale and the bound_scaling_factor. diff --git a/ortools/lp_data/lp_data_utils.h b/ortools/lp_data/lp_data_utils.h index 37e94249ce0..e1c94bce254 100644 --- a/ortools/lp_data/lp_data_utils.h +++ b/ortools/lp_data/lp_data_utils.h @@ -70,6 +70,7 @@ class LpScalingHelper { Fractional UnscaleVariableValue(ColIndex col, Fractional value) const; Fractional UnscaleReducedCost(ColIndex col, Fractional value) const; Fractional UnscaleDualValue(RowIndex row, Fractional value) const; + Fractional UnscaleLeftSolveValue(RowIndex row, Fractional value) const; Fractional UnscaleConstraintActivity(RowIndex row, Fractional value) const; // Unscale a row vector v such that v.B = unit_row. When basis_col is the diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 5454548cebf..9ae2eca5e43 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -1787,6 +1787,8 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto* ct) { linear_for_true); AddLinearExpressionToLinearConstraint(ct->int_prod().target(), -1, linear_for_true); + context_->CanonicalizeLinearConstraint(constraint_for_false); + context_->CanonicalizeLinearConstraint(constraint_for_true); context_->UpdateRuleStats("int_prod: boolean affine term"); context_->UpdateNewConstraintsVariableUsage(); return RemoveConstraint(ct); diff --git a/ortools/sat/cuts.h b/ortools/sat/cuts.h index 245a440c87a..28359dc474c 100644 --- a/ortools/sat/cuts.h +++ b/ortools/sat/cuts.h @@ -63,6 +63,9 @@ struct CutTerm { bool IsBoolean() const { return bound_diff == 1; } bool IsSimple() const { return expr_coeffs[1] == 0; } bool HasRelevantLpValue() const { return lp_value > 1e-2; } + bool IsFractional() const { + return std::abs(lp_value - std::round(lp_value)) > 1e-4; + } double LpDistToMaxValue() const { return static_cast(bound_diff.value()) - lp_value; } diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index d308b1ae9d1..52fb78652c8 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -288,7 +288,6 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( expanded_reduced_costs_(*model->GetOrCreate()) { // Tweak the default parameters to make the solve incremental. simplex_params_.set_use_dual_simplex(true); - simplex_params_.set_cost_scaling(glop::GlopParameters::MEAN_COST_SCALING); simplex_params_.set_primal_feasibility_tolerance( parameters_.lp_primal_tolerance()); simplex_params_.set_dual_feasibility_tolerance( @@ -1060,7 +1059,7 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, } bool some_fixed_terms = false; - bool some_relevant_positions = false; + bool some_fractional_positions = false; for (CutTerm& term : cut->terms) { const absl::int128 magnitude128 = term.coeff.value(); const absl::int128 range = @@ -1138,8 +1137,8 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, if (term.bound_diff == 0) { some_fixed_terms = true; } else { - if (term.HasRelevantLpValue()) { - some_relevant_positions = true; + if (term.IsFractional()) { + some_fractional_positions = true; } } } @@ -1153,7 +1152,7 @@ bool LinearProgrammingConstraint::PreprocessCut(IntegerVariable first_slack, } cut->terms.resize(new_size); } - return some_relevant_positions; + return some_fractional_positions; } bool LinearProgrammingConstraint::AddCutFromConstraints( @@ -1192,14 +1191,16 @@ bool LinearProgrammingConstraint::AddCutFromConstraints( ImpliedBoundsProcessor* ib_processor = nullptr; { bool some_ints = false; - bool some_relevant_positions = false; + bool some_fractional_positions = false; for (const CutTerm& term : base_ct_.terms) { if (term.bound_diff > 1) some_ints = true; - if (term.HasRelevantLpValue()) some_relevant_positions = true; + if (term.IsFractional()) { + some_fractional_positions = true; + } } // If all value are integer, we will not be able to cut anything. - if (!some_relevant_positions) return false; + if (!some_fractional_positions) return false; if (some_ints) ib_processor = &implied_bounds_processor_; } @@ -1356,7 +1357,7 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( // TODO(user): Ideally we should detect this even earlier during the cut // generation. if (cut.ComputeViolation() < 1e-4) { - VLOG(2) << "Bad cut " << name << " " << info; + VLOG(3) << "Bad cut " << name << " " << info; ++num_bad_cuts_; return false; } @@ -1428,13 +1429,6 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( // it triggers. We should add heuristics to abort earlier if a cut is not // promising. Or only test a few positions and not all rows. void LinearProgrammingConstraint::AddCGCuts() { - // We used not to do "classical" gomory and instead used this heuristic. - // It is usually faster but on some problem like neos*creuse, this do not find - // good cut though. - // - // TODO(user): Make the cut generation lighter and try this at false. - const bool old_gomory = true; - // Note that the index is permuted and do not correspond to a row. const RowIndex num_rows(integer_lp_.size()); for (RowIndex index(0); index < num_rows; ++index) { @@ -1442,12 +1436,18 @@ void LinearProgrammingConstraint::AddCGCuts() { const ColIndex basis_col = simplex_.GetBasis(index); - // If this variable is a slack, we ignore it. This is because the - // corresponding row is not tight under the given lp values. - if (old_gomory && basis_col >= integer_variables_.size()) continue; + // We used to skip slack and also not to do "classical" gomory and instead + // call IgnoreTrivialConstraintMultipliers() heuristic. It is usually faster + // but on some problem like neos*creuse, this do not find good cut though. + // + // TODO(user): Tune this. + if (basis_col >= integer_variables_.size()) continue; - // TODO(user): If the variable is a slack, the unscaling is wrong! - const Fractional lp_value = GetVariableValueAtCpScale(basis_col); + // Get he variable value at cp-scale. Similar to GetVariableValueAtCpScale() + // but this works for slack variable too. + const Fractional lp_value = + simplex_.GetVariableValue(basis_col) / + scaler_.VariableScalingFactorWithSlack(basis_col); // Only consider fractional basis element. We ignore element that are close // to an integer to reduce the amount of positions we try. @@ -1457,6 +1457,9 @@ void LinearProgrammingConstraint::AddCGCuts() { // also be just under it. if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue; + // We multiply by row_factors_ directly, which might be slighly more precise + // than dividing by 1/factor like UnscaleLeftSolveValue() does. + // // TODO(user): Avoid code duplication between the sparse/dense path. tmp_lp_multipliers_.clear(); const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(index); @@ -1464,14 +1467,14 @@ void LinearProgrammingConstraint::AddCGCuts() { for (RowIndex row(0); row < num_rows; ++row) { const double value = lambda.values[glop::RowToColIndex(row)]; if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value}); } } else { for (const ColIndex col : lambda.non_zeros) { const RowIndex row = glop::ColToRowIndex(col); const double value = lambda.values[col]; if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value}); } } @@ -1480,22 +1483,30 @@ void LinearProgrammingConstraint::AddCGCuts() { IntegerValue scaling; for (int i = 0; i < 2; ++i) { + tmp_cg_multipliers_ = tmp_lp_multipliers_; if (i == 1) { // Try other sign. // // TODO(user): Maybe add an heuristic to know beforehand which sign to // use? - for (std::pair& p : tmp_lp_multipliers_) { + for (std::pair& p : tmp_cg_multipliers_) { p.second = -p.second; } } - // TODO(user): We use a lower value here otherwise we might run into - // overflow while computing the cut. This should be fixable. - tmp_integer_multipliers_ = ScaleLpMultiplier( - /*take_objective_into_account=*/false, - /*ignore_trivial_constraints=*/old_gomory, tmp_lp_multipliers_, - &scaling); + // Remove constraints that shouldn't be helpful. + // + // In practice, because we can complement the slack, it might still be + // useful to have some constraint with a trivial upper bound. That said, + // this does looks weird, maybe we miss something in our one-constraint + // cut generation if it is useful to add such a term. Investigate on + // neos-555884. + if (true) { + IgnoreTrivialConstraintMultipliers(&tmp_cg_multipliers_); + if (tmp_cg_multipliers_.size() <= 1) continue; + } + tmp_integer_multipliers_ = ScaleMultipliers( + tmp_cg_multipliers_, /*take_objective_into_account=*/false, &scaling); if (scaling != 0) { AddCutFromConstraints("CG", tmp_integer_multipliers_); } @@ -2090,50 +2101,36 @@ bool LinearProgrammingConstraint::ScalingCanOverflow( return bound >= overflow_cap; } -std::vector> -LinearProgrammingConstraint::ScaleLpMultiplier( - bool take_objective_into_account, bool ignore_trivial_constraints, - absl::Span> lp_multipliers, - IntegerValue* scaling, int64_t overflow_cap) const { - *scaling = 0; - - // First unscale the values with the LP scaling and remove bad cases. - tmp_cp_multipliers_.clear(); - for (const std::pair& p : lp_multipliers) { +void LinearProgrammingConstraint::IgnoreTrivialConstraintMultipliers( + std::vector>* lp_multipliers) { + int new_size = 0; + for (const std::pair& p : *lp_multipliers) { const RowIndex row = p.first; const Fractional lp_multi = p.second; - - // We ignore small values since these are likely errors and will not - // contribute much to the new lp constraint anyway. - if (std::abs(lp_multi) < kZeroTolerance) continue; - - // Remove constraints that shouldn't be helpful. - // - // In practice, because we can complement the slack, it might still be - // useful to have some constraint with a trivial upper bound. - if (ignore_trivial_constraints) { - if (lp_multi > 0.0 && integer_lp_[row].ub_is_trivial) { - continue; - } - if (lp_multi < 0.0 && integer_lp_[row].lb_is_trivial) { - continue; - } - } - - tmp_cp_multipliers_.push_back( - {row, scaler_.UnscaleDualValue(row, lp_multi)}); + if (lp_multi > 0.0 && integer_lp_[row].ub_is_trivial) continue; + if (lp_multi < 0.0 && integer_lp_[row].lb_is_trivial) continue; + (*lp_multipliers)[new_size++] = p; } + lp_multipliers->resize(new_size); +} + +std::vector> +LinearProgrammingConstraint::ScaleMultipliers( + absl::Span> lp_multipliers, + bool take_objective_into_account, IntegerValue* scaling) const { + *scaling = 0; std::vector> integer_multipliers; - if (tmp_cp_multipliers_.empty()) { + if (lp_multipliers.empty()) { // Empty linear combinaison. return integer_multipliers; } // TODO(user): we currently do not support scaling down, so we just abort // if with a scaling of 1, we reach the overflow_cap. + const int64_t overflow_cap = std::numeric_limits::max(); if (ScalingCanOverflow(/*power=*/0, take_objective_into_account, - tmp_cp_multipliers_, overflow_cap)) { + lp_multipliers, overflow_cap)) { ++num_scaling_issues_; return integer_multipliers; } @@ -2151,7 +2148,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier( if (candidate >= 63) return false; return !ScalingCanOverflow(candidate, take_objective_into_account, - tmp_cp_multipliers_, overflow_cap); + lp_multipliers, overflow_cap); }); *scaling = int64_t{1} << power; @@ -2159,7 +2156,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier( // Note that we use the exact same formula as in ScalingCanOverflow(). int64_t gcd = scaling->value(); const double scaling_as_double = static_cast(scaling->value()); - for (const auto [row, double_coeff] : tmp_cp_multipliers_) { + for (const auto [row, double_coeff] : lp_multipliers) { const IntegerValue coeff(std::round(double_coeff * scaling_as_double)); if (coeff != 0) { gcd = std::gcd(gcd, std::abs(coeff.value())); @@ -2431,7 +2428,7 @@ bool LinearProgrammingConstraint::PropagateExactLpReason() { for (RowIndex row(0); row < num_rows; ++row) { const double value = -simplex_.GetDualValue(row); if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + tmp_lp_multipliers_.push_back({row, scaler_.UnscaleDualValue(row, value)}); } // In this case, the LP lower bound match the basic objective "constraint" @@ -2454,9 +2451,9 @@ bool LinearProgrammingConstraint::PropagateExactLpReason() { } IntegerValue scaling = 0; - tmp_integer_multipliers_ = ScaleLpMultiplier( - take_objective_into_account, - /*ignore_trivial_constraints=*/true, tmp_lp_multipliers_, &scaling); + IgnoreTrivialConstraintMultipliers(&tmp_lp_multipliers_); + tmp_integer_multipliers_ = ScaleMultipliers( + tmp_lp_multipliers_, take_objective_into_account, &scaling); if (scaling == 0) { VLOG(1) << simplex_.GetProblemStatus(); VLOG(1) << "Issue while computing the exact LP reason. Aborting."; @@ -2514,11 +2511,14 @@ bool LinearProgrammingConstraint::PropagateExactDualRay() { for (RowIndex row(0); row < ray.size(); ++row) { const double value = ray[row]; if (std::abs(value) < kZeroTolerance) continue; - tmp_lp_multipliers_.push_back({row, value}); + + // This is the same as UnscaleLeftSolveValue(). Note that we don't need to + // scale by the objective factor here like we do in UnscaleDualValue(). + tmp_lp_multipliers_.push_back({row, row_factors_[row.value()] * value}); } - tmp_integer_multipliers_ = ScaleLpMultiplier( - /*take_objective_into_account=*/false, - /*ignore_trivial_constraints=*/true, tmp_lp_multipliers_, &scaling); + IgnoreTrivialConstraintMultipliers(&tmp_lp_multipliers_); + tmp_integer_multipliers_ = ScaleMultipliers( + tmp_lp_multipliers_, /*take_objective_into_account=*/false, &scaling); if (scaling == 0) { VLOG(1) << "Isse while computing the exact dual ray reason. Aborting."; return true; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index bdf34fc6869..a3f34096e96 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -328,11 +328,11 @@ class LinearProgrammingConstraint : public PropagatorInterface, // // Note that this will loose some precision, but our subsequent computation // will still be exact as it will work for any set of multiplier. - std::vector> ScaleLpMultiplier( - bool take_objective_into_account, bool ignore_trivial_constraints, + void IgnoreTrivialConstraintMultipliers( + std::vector>* lp_multipliers); + std::vector> ScaleMultipliers( absl::Span> lp_multipliers, - IntegerValue* scaling, - int64_t overflow_cap = std::numeric_limits::max()) const; + bool take_objective_into_account, IntegerValue* scaling) const; // Can we have an overflow if we scale each coefficients with // std::round(std::ldexp(coeff, power)) ? @@ -489,13 +489,11 @@ class LinearProgrammingConstraint : public PropagatorInterface, std::vector tmp_slack_rows_; std::vector> tmp_terms_; - // Used by AddCGCuts(). + // Used by ScaleMultipliers(). std::vector> tmp_lp_multipliers_; + std::vector> tmp_cg_multipliers_; std::vector> tmp_integer_multipliers_; - // Used by ScaleLpMultiplier(). - mutable std::vector> tmp_cp_multipliers_; - // Structures used for mirroring IntegerVariables inside the underlying LP // solver: an integer variable var is mirrored by mirror_lp_variable_[var]. // Note that these indices are dense in [0, mirror_lp_variable_.size()] so diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 870ba4004ca..5a819566a3e 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -863,36 +863,29 @@ bool PresolveContext::AddRelation(int x, int y, int64_t c, int64_t o, return repo->TryAdd(x, y, c, o, allow_rep_x, allow_rep_y); } -bool PresolveContext::PropagateAffineRelation(int ref) { - const int var = PositiveRef(ref); +bool PresolveContext::PropagateAffineRelation(int var) { + DCHECK(RefIsPositive(var)); const AffineRelation::Relation r = GetAffineRelation(var); if (r.representative == var) return true; return PropagateAffineRelation(var, r.representative, r.coeff, r.offset); } -bool PresolveContext::PropagateAffineRelation(int ref, int rep, int64_t coeff, +bool PresolveContext::PropagateAffineRelation(int var, int rep, int64_t coeff, int64_t offset) { - DCHECK(!DomainIsEmpty(ref)); + DCHECK(RefIsPositive(var)); + DCHECK(RefIsPositive(rep)); + DCHECK(!DomainIsEmpty(var)); DCHECK(!DomainIsEmpty(rep)); - if (!RefIsPositive(rep)) { - rep = NegatedRef(rep); - coeff = -coeff; - } - if (!RefIsPositive(ref)) { - ref = NegatedRef(ref); - offset = -offset; - coeff = -coeff; - } // Propagate domains both ways. // var = coeff * rep + offset - if (!IntersectDomainWith(rep, DomainOf(ref) + if (!IntersectDomainWith(rep, DomainOf(var) .AdditionWith(Domain(-offset)) .InverseMultiplicationBy(coeff))) { return false; } if (!IntersectDomainWith( - ref, + var, DomainOf(rep).MultiplicationBy(coeff).AdditionWith(Domain(offset)))) { return false; } @@ -1059,21 +1052,19 @@ void PresolveContext::PermuteHintValues(const SparsePermutation& perm) { perm.ApplyToDenseCollection(hint_has_value_); } -bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, +bool PresolveContext::StoreAffineRelation(int var_x, int var_y, int64_t coeff, int64_t offset, bool debug_no_recursion) { - CHECK_NE(coeff, 0); + DCHECK(RefIsPositive(var_x)); + DCHECK(RefIsPositive(var_y)); + DCHECK_NE(coeff, 0); if (is_unsat_) return false; if (hint_is_loaded_) { - const int var_x = PositiveRef(ref_x); - const int var_y = PositiveRef(ref_y); if (!hint_has_value_[var_y] && hint_has_value_[var_x]) { hint_has_value_[var_y] = true; - const int64_t x_mult = RefIsPositive(ref_x) ? 1 : -1; - const int64_t y_mult = RefIsPositive(ref_y) ? 1 : -1; - hint_[var_y] = (hint_[var_x] * x_mult - offset) / coeff * y_mult; - if (hint_[var_y] * coeff * y_mult + offset != hint_[var_x] * x_mult) { + hint_[var_y] = (hint_[var_x] - offset) / coeff; + if (hint_[var_y] * coeff + offset != hint_[var_x]) { // TODO(user): Do we implement a rounding to closest instead of // routing towards 0. UpdateRuleStats( @@ -1083,10 +1074,8 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, } #ifdef CHECK_HINT - const int64_t vx = - RefIsPositive(ref_x) ? hint_[ref_x] : -hint_[NegatedRef(ref_x)]; - const int64_t vy = - RefIsPositive(ref_y) ? hint_[ref_y] : -hint_[NegatedRef(ref_y)]; + const int64_t vx = hint_[var_x]; + const int64_t vy = hint_[var_y]; if (vx != vy * coeff + offset) { LOG(FATAL) << "Affine relation incompatible with hint: " << vx << " != " << vy << " * " << coeff << " + " << offset; @@ -1094,30 +1083,30 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, #endif // TODO(user): I am not 100% sure why, but sometimes the representative is - // fixed but that is not propagated to ref_x or ref_y and this causes issues. - if (!PropagateAffineRelation(ref_x)) return false; - if (!PropagateAffineRelation(ref_y)) return false; - if (!PropagateAffineRelation(ref_x, ref_y, coeff, offset)) return false; + // fixed but that is not propagated to var_x or var_y and this causes issues. + if (!PropagateAffineRelation(var_x)) return false; + if (!PropagateAffineRelation(var_y)) return false; + if (!PropagateAffineRelation(var_x, var_y, coeff, offset)) return false; - if (IsFixed(ref_x)) { - const int64_t lhs = DomainOf(ref_x).FixedValue() - offset; + if (IsFixed(var_x)) { + const int64_t lhs = DomainOf(var_x).FixedValue() - offset; if (lhs % std::abs(coeff) != 0) { return NotifyThatModelIsUnsat(); } UpdateRuleStats("affine: fixed"); - return IntersectDomainWith(ref_y, Domain(lhs / coeff)); + return IntersectDomainWith(var_y, Domain(lhs / coeff)); } - if (IsFixed(ref_y)) { - const int64_t value_x = DomainOf(ref_y).FixedValue() * coeff + offset; + if (IsFixed(var_y)) { + const int64_t value_x = DomainOf(var_y).FixedValue() * coeff + offset; UpdateRuleStats("affine: fixed"); - return IntersectDomainWith(ref_x, Domain(value_x)); + return IntersectDomainWith(var_x, Domain(value_x)); } // If both are already in the same class, we need to make sure the relations // are compatible. - const AffineRelation::Relation rx = GetAffineRelation(ref_x); - const AffineRelation::Relation ry = GetAffineRelation(ref_y); + const AffineRelation::Relation rx = GetAffineRelation(var_x); + const AffineRelation::Relation ry = GetAffineRelation(var_y); if (rx.representative == ry.representative) { // x = rx.coeff * rep + rx.offset; // y = ry.coeff * rep + ry.offset; @@ -1138,18 +1127,18 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, if (!IntersectDomainWith(rx.representative, Domain(unique_value))) { return false; } - if (!IntersectDomainWith(ref_x, + if (!IntersectDomainWith(var_x, Domain(unique_value * rx.coeff + rx.offset))) { return false; } - if (!IntersectDomainWith(ref_y, + if (!IntersectDomainWith(var_y, Domain(unique_value * ry.coeff + ry.offset))) { return false; } return true; } - // ref_x = coeff * ref_y + offset; + // var_x = coeff * var_y + offset; // rx.coeff * rep_x + rx.offset = // coeff * (ry.coeff * rep_y + ry.offset) + offset // @@ -1179,7 +1168,7 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, } // Re-add the relation now that a will resolve to a multiple of b. - return StoreAffineRelation(ref_x, ref_y, coeff, offset, + return StoreAffineRelation(var_x, var_y, coeff, offset, /*debug_no_recursion=*/true); } @@ -1239,8 +1228,8 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, // as possible and not all call site do it. // // TODO(user): I am not sure this is needed given the propagation above. - if (!PropagateAffineRelation(ref_x)) return false; - if (!PropagateAffineRelation(ref_y)) return false; + if (!PropagateAffineRelation(var_x)) return false; + if (!PropagateAffineRelation(var_y)) return false; // These maps should only contains representative, so only need to remap // either x or y. diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index fc05904c091..7b11b6b66ef 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -324,7 +324,7 @@ class PresolveContext { bool CanonicalizeAffineVariable(int ref, int64_t coeff, int64_t mod, int64_t rhs); - // Adds the relation (ref_x = coeff * ref_y + offset) to the repository. + // Adds the relation (var_x = coeff * var_y + offset) to the repository. // Returns false if we detect infeasability because of this. // // Once the relation is added, it doesn't need to be enforced by a constraint @@ -332,7 +332,7 @@ class PresolveContext { // them to the proto at the end of the presolve. // // Note that this should always add a relation, even though it might need to - // create a new representative for both ref_x and ref_y in some cases. Like if + // create a new representative for both var_x and var_y in some cases. Like if // x = 3z and y = 5t are already added, if we add x = 2y, we have 3z = 10t and // can only resolve this by creating a new variable r such that z = 10r and t // = 3r. @@ -340,7 +340,7 @@ class PresolveContext { // All involved variables will be marked to appear in the special // kAffineRelationConstraint. This will allow to identify when a variable is // no longer needed (only appear there and is not a representative). - bool StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, int64_t offset, + bool StoreAffineRelation(int var_x, int var_y, int64_t coeff, int64_t offset, bool debug_no_recursion = false); // Adds the fact that ref_a == ref_b using StoreAffineRelation() above. @@ -362,8 +362,8 @@ class PresolveContext { // Makes sure the domain of ref and of its representative (ref = coeff * rep + // offset) are in sync. Returns false on unsat. - bool PropagateAffineRelation(int ref); - bool PropagateAffineRelation(int ref, int rep, int64_t coeff, int64_t offset); + bool PropagateAffineRelation(int var); + bool PropagateAffineRelation(int var, int rep, int64_t coeff, int64_t offset); // Creates the internal structure for any new variables in working_model. void InitializeNewDomains(); diff --git a/ortools/sat/work_assignment_test.cc b/ortools/sat/work_assignment_test.cc index 83de91dc081..8e3d785a210 100644 --- a/ortools/sat/work_assignment_test.cc +++ b/ortools/sat/work_assignment_test.cc @@ -146,9 +146,9 @@ class SharedTreeSolveTest : public testing::TestWithParam { params.set_num_workers(4); params.set_shared_tree_num_workers(4); params.set_cp_model_presolve(false); - params.MergeFrom(SatParameters{ - google::protobuf::contrib::parse_proto::ParseTextProtoOrDie( - GetParam())}); + const SatParameters to_merge = + google::protobuf::contrib::parse_proto::ParseTextProtoOrDie(GetParam()); + params.MergeFrom(to_merge); return params; } };