diff --git a/ortools/sat/2d_orthogonal_packing.cc b/ortools/sat/2d_orthogonal_packing.cc index f8bd5b3d42..f352359832 100644 --- a/ortools/sat/2d_orthogonal_packing.cc +++ b/ortools/sat/2d_orthogonal_packing.cc @@ -70,8 +70,8 @@ std::optional> FindPairwiseConflict( absl::Span sizes_x, absl::Span sizes_y, std::pair bounding_box_size, - const std::vector& index_by_decreasing_x_size, - const std::vector& index_by_decreasing_y_size) { + absl::Span index_by_decreasing_x_size, + absl::Span index_by_decreasing_y_size) { // Look for pairwise incompatible pairs by using the logic such conflict can // only happen between a "tall" item a "wide" item. int x_idx = 0; diff --git a/ortools/sat/2d_orthogonal_packing_testing.cc b/ortools/sat/2d_orthogonal_packing_testing.cc index 4fc9789b1c..18b911136e 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.cc +++ b/ortools/sat/2d_orthogonal_packing_testing.cc @@ -180,7 +180,7 @@ std::vector MakeItemsFromRectangles( std::vector GenerateItemsRectanglesWithNoPairwiseConflict( - const std::vector& rectangles, double slack_factor, + absl::Span rectangles, double slack_factor, absl::BitGenRef random) { const std::vector range_items = MakeItemsFromRectangles(rectangles, slack_factor, random); diff --git a/ortools/sat/2d_orthogonal_packing_testing.h b/ortools/sat/2d_orthogonal_packing_testing.h index 72de30a910..848e971e2f 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.h +++ b/ortools/sat/2d_orthogonal_packing_testing.h @@ -40,7 +40,7 @@ std::vector MakeItemsFromRectangles( std::vector GenerateItemsRectanglesWithNoPairwiseConflict( - const std::vector& rectangles, double slack_factor, + absl::Span rectangles, double slack_factor, absl::BitGenRef random); std::vector diff --git a/ortools/sat/2d_rectangle_presolve_test.cc b/ortools/sat/2d_rectangle_presolve_test.cc index 20700a826d..7650f9bb27 100644 --- a/ortools/sat/2d_rectangle_presolve_test.cc +++ b/ortools/sat/2d_rectangle_presolve_test.cc @@ -207,7 +207,7 @@ TEST(RectanglePresolve, RandomTest) { } } -Neighbours NaiveBuildNeighboursGraph(const std::vector& rectangles) { +Neighbours NaiveBuildNeighboursGraph(absl::Span rectangles) { auto interval_intersect = [](IntegerValue begin1, IntegerValue end1, IntegerValue begin2, IntegerValue end2) { return std::max(begin1, begin2) < std::min(end1, end2); diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index e4d9c5c1f9..4ecad430ff 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -18,6 +18,7 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_python//python:proto.bzl", "py_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") package(default_visibility = ["//visibility:public"]) @@ -410,6 +411,7 @@ cc_library( ":intervals", ":lb_tree_search", ":linear_constraint", + ":linear_constraint_manager", ":linear_model", ":linear_programming_constraint", ":linear_relaxation", @@ -429,9 +431,11 @@ cc_library( ":simplification", ":stat_tables", ":subsolver", + ":symmetry_util", ":synchronization", ":util", ":work_assignment", + "//ortools/algorithms:sparse_permutation", "//ortools/base", "//ortools/base:status_macros", "//ortools/base:strong_vector", @@ -1284,6 +1288,7 @@ cc_library( srcs = ["symmetry_util.cc"], hdrs = ["symmetry_util.h"], deps = [ + ":cp_model_cc_proto", "//ortools/algorithms:dynamic_partition", "//ortools/algorithms:sparse_permutation", "//ortools/base", @@ -1301,6 +1306,7 @@ cc_test( ":symmetry_util", "//ortools/algorithms:sparse_permutation", "//ortools/base:gmock_main", + "//ortools/base:parse_test_proto", "@com_google_absl//absl/types:span", ], ) @@ -2153,6 +2159,7 @@ cc_library( "//ortools/util:saturated_arithmetic", "//ortools/util:strong_integers", "//ortools/util:time_limit", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/log:check", "@com_google_absl//absl/numeric:int128", @@ -2176,7 +2183,6 @@ cc_library( "//ortools/base", "//ortools/base:hash", "//ortools/base:strong_vector", - "//ortools/glop:revised_simplex", "//ortools/glop:variables_info", "//ortools/lp_data:base", "//ortools/util:logging", @@ -2185,10 +2191,11 @@ cc_library( "//ortools/util:time_limit", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", ], ) @@ -3011,7 +3018,6 @@ cc_library( ":diffn_util", ":integer", ":linear_constraint_manager", - ":linear_programming_constraint", ":model", ":presolve_context", ":rins", @@ -3032,6 +3038,7 @@ cc_library( "@com_google_absl//absl/base:log_severity", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", @@ -3357,8 +3364,8 @@ cc_library( hdrs = ["inclusion.h"], deps = [ "//ortools/base", + "//ortools/util:time_limit", "@com_google_absl//absl/log:check", - "@com_google_absl//absl/types:span", ], ) @@ -3434,6 +3441,7 @@ cc_test( ":inclusion", ":util", "//ortools/base:gmock_main", + "//ortools/util:time_limit", "@com_google_absl//absl/random", "@com_google_absl//absl/types:span", ], diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index f7de79c194..79b6c3edd2 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -1475,17 +1475,20 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { // using the binary implication graph only. // // TODO(user): Track which literal have new implications, and only process -// the antecedants of these. +// the antecedents of these. bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) { DCHECK_EQ(trail_->CurrentDecisionLevel(), 0); + if (time_limit_->LimitReached()) return true; if (!DetectEquivalences()) return false; // TODO(user): the situation with fixed variable is not really "clean". // Simplify the code so we are sure we don't run into issue or have to deal // with any of that here. + if (time_limit_->LimitReached()) return true; if (!Propagate(trail_)) return false; RemoveFixedVariables(); DCHECK(InvariantsAreOk()); + if (time_limit_->LimitReached()) return true; log_info |= VLOG_IS_ON(1); WallTimer wall_timer; @@ -1708,7 +1711,7 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( // Data to detect inclusion of base amo into extend amo. std::vector detector_clique_index; CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(1e9); std::vector dense_index_to_index; @@ -2503,6 +2506,7 @@ void BinaryImplicationGraph::CleanupAllRemovedAndFixedVariables() { } bool BinaryImplicationGraph::InvariantsAreOk() { + if (time_limit_->LimitReached()) return true; // We check that if a => b then not(b) => not(a). absl::flat_hash_set> seen; int num_redundant = 0; diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index 457d62e2c2..1d755bf12d 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -332,6 +332,13 @@ class ClauseManager : public SatPropagator { add_clause_callback_ = std::move(add_clause_callback); } + // Removes the add clause callback and returns it. This can be used to + // temporarily disable the callback. + absl::AnyInvocable)> + TakeAddClauseCallback() { + return std::move(add_clause_callback_); + } + private: // Attaches the given clause. This eventually propagates a literal which is // enqueued on the trail. Returns false if a contradiction was encountered. diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 48404abdfd..eb135cfc0e 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -29,6 +29,7 @@ #include "absl/base/log_severity.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/meta/type_traits.h" #include "absl/random/bit_gen_ref.h" @@ -49,7 +50,6 @@ #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" #include "ortools/sat/linear_constraint_manager.h" -#include "ortools/sat/linear_programming_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/presolve_context.h" #include "ortools/sat/rins.h" @@ -933,7 +933,8 @@ NeighborhoodGeneratorHelper::GetSchedulingPrecedences( return result; } -std::vector> NeighborhoodGeneratorHelper::GetRoutingPaths( +std::vector> +NeighborhoodGeneratorHelper::GetRoutingPathVariables( const CpSolverResponse& initial_solution) const { struct HeadAndArcLiteral { int head; @@ -1901,6 +1902,15 @@ Neighborhood LocalBranchingLpBasedNeighborhoodGenerator::Generate( local_cp_model.mutable_objective()->set_integer_scaling_factor(0); } + // Dump? + if (absl::GetFlag(FLAGS_cp_model_dump_submodels)) { + const std::string dump_name = + absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), + "lb_relax_lns_lp_", data.task_id, ".pb.txt"); + LOG(INFO) << "Dumping linear relaxed model to '" << dump_name << "'."; + CHECK(WriteModelProtoToFile(local_cp_model, dump_name)); + } + // Solve. // // TODO(user): Shall we pass the objective upper bound so we have more @@ -2445,26 +2455,24 @@ Neighborhood RoutingRandomNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector> all_paths = - helper_.GetRoutingPaths(initial_solution); + helper_.GetRoutingPathVariables(initial_solution); // Collect all unique variables. - absl::flat_hash_set all_path_variables; - for (auto& path : all_paths) { - all_path_variables.insert(path.begin(), path.end()); + std::vector variables_to_fix; + for (const auto& path : all_paths) { + variables_to_fix.insert(variables_to_fix.end(), path.begin(), path.end()); } - std::vector fixed_variables(all_path_variables.begin(), - all_path_variables.end()); - std::sort(fixed_variables.begin(), fixed_variables.end()); - GetRandomSubset(1.0 - data.difficulty, &fixed_variables, random); + gtl::STLSortAndRemoveDuplicates(&variables_to_fix); + GetRandomSubset(1.0 - data.difficulty, &variables_to_fix, random); return helper_.FixGivenVariables( - initial_solution, {fixed_variables.begin(), fixed_variables.end()}); + initial_solution, {variables_to_fix.begin(), variables_to_fix.end()}); } Neighborhood RoutingPathNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = - helper_.GetRoutingPaths(initial_solution); + helper_.GetRoutingPathVariables(initial_solution); // Collect all unique variables. absl::flat_hash_set all_path_variables; @@ -2510,7 +2518,7 @@ Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = - helper_.GetRoutingPaths(initial_solution); + helper_.GetRoutingPathVariables(initial_solution); // Remove a corner case where all paths are empty. if (all_paths.empty()) { return helper_.NoNeighborhood(); @@ -2572,6 +2580,32 @@ Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( return helper_.FixGivenVariables(initial_solution, fixed_variables); } +Neighborhood RoutingStartsNeighborhoodGenerator::Generate( + const CpSolverResponse& initial_solution, SolveData& data, + absl::BitGenRef random) { + std::vector> all_paths = + helper_.GetRoutingPathVariables(initial_solution); + // TODO(user): Maybe enable some routes to be non empty? + if (all_paths.empty()) return helper_.NoNeighborhood(); + + // Collect all unique path variables, except the start and end of the paths. + // For circuit constraints, we approximate this with the arc going in and out + // of the smallest node. This works well for model where the zero node is an + // artificial node. + std::vector variables_to_fix; + for (const auto& path : all_paths) { + for (const int ref : path) { + if (ref == path.front() || ref == path.back()) continue; + variables_to_fix.push_back(PositiveRef(ref)); + } + } + gtl::STLSortAndRemoveDuplicates(&variables_to_fix); + GetRandomSubset(1.0 - data.difficulty, &variables_to_fix, random); + + return helper_.FixGivenVariables( + initial_solution, {variables_to_fix.begin(), variables_to_fix.end()}); +} + bool RelaxationInducedNeighborhoodGenerator::ReadyToGenerate() const { return (incomplete_solutions_->HasSolution() || lp_solutions_->NumSolutions() > 0); diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index 00856cbe1c..ac69d2562b 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -238,7 +238,7 @@ class NeighborhoodGeneratorHelper : public SubSolver { // self-looping arcs. Path are sorted, starting from the arc with the lowest // tail index, and going in sequence up to the last arc before the circuit is // closed. Each entry correspond to the arc literal on the circuit. - std::vector> GetRoutingPaths( + std::vector> GetRoutingPathVariables( const CpSolverResponse& initial_solution) const; // Returns all precedences extracted from the scheduling constraint and the @@ -392,6 +392,9 @@ class NeighborhoodGenerator { IntegerValue base_objective = IntegerValue(0); IntegerValue new_objective = IntegerValue(0); + // For debugging. + int task_id = 0; + // This is just used to construct a deterministic order for the updates. bool operator<(const SolveData& o) const { return std::tie(status, difficulty, deterministic_limit, @@ -782,7 +785,7 @@ class RoutingPathNeighborhoodGenerator : public NeighborhoodGenerator { SolveData& data, absl::BitGenRef random) final; }; -// This routing based LNS generator aims are relaxing one full path, and make +// This routing based LNS generator aims at relaxing one full path, and make // some room on the other paths to absorb the nodes of the relaxed path. // // In order to do so, it will relax the first and the last arc of each path in @@ -799,6 +802,19 @@ class RoutingFullPathNeighborhoodGenerator : public NeighborhoodGenerator { SolveData& data, absl::BitGenRef random) final; }; +// This routing based LNS generator performs like the +// RoutingRandomNeighborhoodGenerator, but always relax the arcs going in and +// out of the depot for routes constraints, and of the node with the minimal +// index for circuit constraints. +class RoutingStartsNeighborhoodGenerator : public NeighborhoodGenerator { + public: + RoutingStartsNeighborhoodGenerator(NeighborhoodGeneratorHelper const* helper, + absl::string_view name) + : NeighborhoodGenerator(name, helper) {} + Neighborhood Generate(const CpSolverResponse& initial_solution, + SolveData& data, absl::BitGenRef random) final; +}; + // Generates a neighborhood by fixing the variables to solutions reported in // various repositories. This is inspired from RINS published in "Exploring // relaxation induced neighborhoods to improve MIP solutions" 2004 by E. Danna diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 9ae2eca5e4..a283115e39 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -8507,7 +8507,7 @@ void CpModelPresolver::ProcessSetPPC() { // TODO(user): compute on the fly instead of temporary storing variables? CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); // We use an encoding of literal that allows to index arrays. @@ -8618,7 +8618,7 @@ void CpModelPresolver::DetectIncludedEnforcement() { // TODO(user): compute on the fly instead of temporary storing variables? std::vector relevant_constraints; CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); std::vector temp_literals; @@ -9021,6 +9021,20 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( continue; } + const int a = rep_ct->enforcement_literal(0); + const int b = dup_ct->enforcement_literal(0); + + if (a == NegatedRef(b) && rep_ct->enforcement_literal().size() == 1 && + dup_ct->enforcement_literal().size() == 1) { + context_->UpdateRuleStats( + "duplicate: both with enforcement and its negation"); + rep_ct->mutable_enforcement_literal()->Clear(); + context_->UpdateConstraintVariableUsage(rep); + dup_ct->Clear(); + context_->UpdateConstraintVariableUsage(dup); + continue; + } + // Special case. This looks specific but users might reify with a cost // a duplicate constraint. In this case, no need to have two variables, // we can make them equal by duality argument. @@ -9034,8 +9048,6 @@ void CpModelPresolver::DetectDuplicateConstraintsWithDifferentEnforcements( // we can also add the equality. Alternatively, we can just introduce a new // variable and merge all duplicate constraint into 1 + bunch of boolean // constraints liking enforcements. - const int a = rep_ct->enforcement_literal(0); - const int b = dup_ct->enforcement_literal(0); if (context_->VariableWithCostIsUniqueAndRemovable(a) && context_->VariableWithCostIsUniqueAndRemovable(b)) { // Both these case should be presolved before, but it is easy to deal with @@ -9580,7 +9592,7 @@ void CpModelPresolver::DetectDominatedLinearConstraints() { const CpModelProto& proto_; }; Storage storage(context_->working_model); - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); // Because we use the constraint <-> variable graph, we cannot modify it @@ -10759,7 +10771,7 @@ void CpModelPresolver::ExtractEncodingFromLinear() { // TODO(user): compute on the fly instead of temporary storing variables? std::vector relevant_constraints; CompactVectorVector storage; - InclusionDetector detector(storage); + InclusionDetector detector(storage, time_limit_); detector.SetWorkLimit(context_->params().presolve_inclusion_work_limit()); // Loop over the constraints and fill the structures above. @@ -11815,8 +11827,7 @@ void CpModelPresolver::PresolveToFixPoint() { if (ProcessChangedVariables(&in_queue, &queue)) continue; - // TODO(user): Uncomment this line once the tests pass. - // DCHECK(!context_->HasUnusedAffineVariable()); + DCHECK(!context_->HasUnusedAffineVariable()); // Deal with integer variable only appearing in an encoding. for (int v = 0; v < context_->working_model->variables().size(); ++v) { @@ -11999,6 +12010,9 @@ bool ModelCopy::ImportAndSimplifyConstraints( case ConstraintProto::kLinear: if (!CopyLinear(ct)) return CreateUnsatModel(c, ct); break; + case ConstraintProto::kIntProd: + if (!CopyIntProd(ct, ignore_names)) return CreateUnsatModel(c, ct); + break; case ConstraintProto::kAtMostOne: if (!CopyAtMostOne(ct)) return CreateUnsatModel(c, ct); break; @@ -12258,6 +12272,38 @@ bool ModelCopy::CopyBoolAndWithDupSupport(const ConstraintProto& ct) { return true; } +bool ModelCopy::CopyLinearExpression(const LinearExpressionProto& expr, + LinearExpressionProto* dst) { + non_fixed_variables_.clear(); + non_fixed_coefficients_.clear(); + int64_t offset = expr.offset(); + for (int i = 0; i < expr.vars_size(); ++i) { + const int ref = expr.vars(i); + const int64_t coeff = expr.coeffs(i); + if (coeff == 0) continue; + if (context_->IsFixed(ref)) { + offset += coeff * context_->MinOf(ref); + continue; + } + + // Make sure we never have negative ref in a linear constraint. + if (RefIsPositive(ref)) { + non_fixed_variables_.push_back(ref); + non_fixed_coefficients_.push_back(coeff); + } else { + non_fixed_variables_.push_back(NegatedRef(ref)); + non_fixed_coefficients_.push_back(-coeff); + } + } + + dst->set_offset(offset); + dst->mutable_vars()->Add(non_fixed_variables_.begin(), + non_fixed_variables_.end()); + dst->mutable_coeffs()->Add(non_fixed_coefficients_.begin(), + non_fixed_coefficients_.end()); + return true; +} + bool ModelCopy::CopyLinear(const ConstraintProto& ct) { non_fixed_variables_.clear(); non_fixed_coefficients_.clear(); @@ -12375,13 +12421,29 @@ bool ModelCopy::CopyInterval(const ConstraintProto& ct, int c, "supported."; interval_mapping_[c] = context_->working_model->constraints_size(); ConstraintProto* new_ct = context_->working_model->add_constraints(); - if (ignore_names) { - *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); - *new_ct->mutable_interval() = ct.interval(); - } else { - *new_ct = ct; - } + if (!ignore_names) { + new_ct->set_name(ct.name()); + } + *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); + CopyLinearExpression(ct.interval().start(), + new_ct->mutable_interval()->mutable_start()); + CopyLinearExpression(ct.interval().size(), + new_ct->mutable_interval()->mutable_size()); + CopyLinearExpression(ct.interval().end(), + new_ct->mutable_interval()->mutable_end()); + return true; +} +bool ModelCopy::CopyIntProd(const ConstraintProto& ct, bool ignore_names) { + ConstraintProto* new_ct = context_->working_model->add_constraints(); + if (!ignore_names) { + new_ct->set_name(ct.name()); + } + for (const LinearExpressionProto& expr : ct.int_prod().exprs()) { + CopyLinearExpression(expr, new_ct->mutable_int_prod()->add_exprs()); + } + CopyLinearExpression(ct.int_prod().target(), + new_ct->mutable_int_prod()->mutable_target()); return true; } @@ -12399,26 +12461,29 @@ void ModelCopy::AddLinearConstraintForInterval(const ConstraintProto& ct) { absl::Span(itv.end().coeffs())) { // Trivial constraint, nothing to do. } else { - ConstraintProto* new_ct = context_->working_model->add_constraints(); - *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); + tmp_constraint_.Clear(); + *tmp_constraint_.mutable_enforcement_literal() = ct.enforcement_literal(); + LinearConstraintProto* mutable_linear = tmp_constraint_.mutable_linear(); - LinearConstraintProto* mutable_linear = new_ct->mutable_linear(); mutable_linear->add_domain(0); mutable_linear->add_domain(0); AddLinearExpressionToLinearConstraint(itv.start(), 1, mutable_linear); AddLinearExpressionToLinearConstraint(itv.size(), 1, mutable_linear); AddLinearExpressionToLinearConstraint(itv.end(), -1, mutable_linear); + CopyLinear(tmp_constraint_); } // An enforced interval must have is size non-negative. const LinearExpressionProto& size_expr = itv.size(); if (context_->MinOf(size_expr) < 0) { - ConstraintProto* new_ct = context_->working_model->add_constraints(); - *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); - *new_ct->mutable_linear()->mutable_vars() = size_expr.vars(); - *new_ct->mutable_linear()->mutable_coeffs() = size_expr.coeffs(); - new_ct->mutable_linear()->add_domain(-size_expr.offset()); - new_ct->mutable_linear()->add_domain(std::numeric_limits::max()); + tmp_constraint_.Clear(); + *tmp_constraint_.mutable_enforcement_literal() = ct.enforcement_literal(); + *tmp_constraint_.mutable_linear()->mutable_vars() = size_expr.vars(); + *tmp_constraint_.mutable_linear()->mutable_coeffs() = size_expr.coeffs(); + tmp_constraint_.mutable_linear()->add_domain(-size_expr.offset()); + tmp_constraint_.mutable_linear()->add_domain( + std::numeric_limits::max()); + CopyLinear(tmp_constraint_); } } diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 7c9dff19cc..6759df2d0e 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -428,6 +428,9 @@ class ModelCopy { bool CopyBoolAnd(const ConstraintProto& ct); bool CopyBoolAndWithDupSupport(const ConstraintProto& ct); + bool CopyLinearExpression(const LinearExpressionProto& expr, + LinearExpressionProto* dst); + bool CopyIntProd(const ConstraintProto& ct, bool ignore_names); bool CopyLinear(const ConstraintProto& ct); bool CopyAtMostOne(const ConstraintProto& ct); bool CopyExactlyOne(const ConstraintProto& ct); @@ -458,6 +461,8 @@ class ModelCopy { std::vector temp_literals_; absl::flat_hash_set temp_literals_set_; + + ConstraintProto tmp_constraint_; }; // Copy in_model to the model in the presolve context. diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index d6ffc9f60a..21da620201 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -24,6 +24,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" @@ -496,6 +497,8 @@ absl::flat_hash_map GetNamedParameters( new_params.set_linearization_level(2); new_params.set_add_lp_constraints_lazily(false); strategies["max_lp"] = new_params; + new_params.set_use_symmetry_in_lp(true); + strategies["max_lp_sym"] = new_params; } // Core. Note that we disable the lp here because it is faster on the minizinc diff --git a/ortools/sat/cp_model_search.h b/ortools/sat/cp_model_search.h index 4f57c3f741..2e005b818c 100644 --- a/ortools/sat/cp_model_search.h +++ b/ortools/sat/cp_model_search.h @@ -20,7 +20,7 @@ #include #include "absl/container/flat_hash_map.h" -#include "absl/strings/string_view.h" +#include "ortools/base/types.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/integer.h" diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index dd60f67fa3..9420aa4c6d 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1105,6 +1105,7 @@ class LnsSolver : public SubSolver { random_engine_t random(seed); NeighborhoodGenerator::SolveData data; + data.task_id = task_id; data.difficulty = generator_->difficulty(); data.deterministic_limit = generator_->deterministic_limit(); @@ -1213,6 +1214,13 @@ class LnsSolver : public SubSolver { helper_->ModelProto(), context.get()); lns_fragment.set_name(absl::StrCat("lns_", task_id, "_", source_info)); + // Tricky: we don't want to use the symmetry of the main problem in the + // LNS presolved problem ! And currently no code clears/update it. + // + // TODO(user): Find a cleaner way like clear it as part of the presolve. + // Also, do not copy that in the first place. + lns_fragment.clear_symmetry(); + // Overwrite solution hinting. if (neighborhood.delta.has_solution_hint()) { *lns_fragment.mutable_solution_hint() = @@ -1366,7 +1374,7 @@ class LnsSolver : public SubSolver { // TODO(user): We could however fix it in the LNS Helper! if (data.status == CpSolverStatus::OPTIMAL && !shared_->model_proto.has_symmetry() && !solution_values.empty() && - neighborhood.is_simple && + neighborhood.is_simple && shared_->bounds != nullptr && !neighborhood.variables_that_can_be_fixed_to_local_optimum .empty()) { display_lns_info = true; @@ -1710,6 +1718,12 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { helper, name_filter.LastName()), lns_params, helper, shared)); } + if (name_filter.Keep("routing_starts_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } } if (num_routes > 0 || num_circuit > 1) { if (name_filter.Keep("routing_full_path_lns")) { diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 405d4b58b5..b961bcf0f7 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -41,13 +41,13 @@ #include "absl/time/time.h" #include "absl/types/span.h" #include "google/protobuf/arena.h" +#include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" #include "ortools/graph/connected_components.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/clause.h" #include "ortools/sat/cp_model.pb.h" -#include "ortools/sat/cp_model_checker.h" #include "ortools/sat/cp_model_loader.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_postsolve.h" @@ -62,6 +62,7 @@ #include "ortools/sat/intervals.h" #include "ortools/sat/lb_tree_search.h" #include "ortools/sat/linear_constraint.h" +#include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/linear_programming_constraint.h" #include "ortools/sat/linear_relaxation.h" #include "ortools/sat/max_hs.h" @@ -72,6 +73,7 @@ #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/sat_solver.h" +#include "ortools/sat/symmetry_util.h" #include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" #include "ortools/sat/work_assignment.h" @@ -383,6 +385,53 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, LinearRelaxation relaxation = ComputeLinearRelaxation(model_proto, m); if (m->GetOrCreate()->ModelIsUnsat()) return kNoIntegerVariable; + // In the presence of symmetry, will will create an extra integer variable per + // orbit. + auto* mapping = m->GetOrCreate(); + auto* params = m->GetOrCreate(); + auto* symmetrizer = m->GetOrCreate(); + if (model_proto.has_symmetry() && params->linearization_level() > 1 && + params->use_symmetry_in_lp()) { + // Convert to SparsePermutation. + const int num_vars = model_proto.variables().size(); + std::vector> generators; + for (const SparsePermutationProto& perm : + model_proto.symmetry().permutations()) { + generators.emplace_back(CreateSparsePermutationFromProto(num_vars, perm)); + } + + // Get orbits in term of IntegerVariable. + const std::vector var_to_orbit_index = GetOrbits(num_vars, generators); + std::vector orbit_is_ok; + std::vector> orbits; + for (int proto_var = 0; proto_var < num_vars; ++proto_var) { + const int orbit_index = var_to_orbit_index[proto_var]; + if (orbit_index == -1) continue; + if (orbit_index >= orbits.size()) { + orbits.resize(orbit_index + 1); + orbit_is_ok.resize(orbit_index + 1, true); + } + + // In linerization level >=2, all variables should have a view. + // Otherwise revisit and skip orbit without a full view. + const IntegerVariable var = mapping->Integer(proto_var); + CHECK_NE(var, kNoIntegerVariable); + orbits[orbit_index].push_back(var); + } + + // Lets create the orbit sum vars and register each orbit. + std::vector> terms; + for (const std::vector& orbit : orbits) { + terms.clear(); + for (const IntegerVariable var : orbit) { + terms.push_back({var, 1}); + } + const IntegerVariable sum_var = + GetOrCreateVariableLinkedToSumOf(terms, true, true, m); + symmetrizer->AddSymmetryOrbit(sum_var, orbit); + } + } + // The bipartite graph of LP constraints might be disconnected: // make a partition of the variables into connected components. // Constraint nodes are indexed by [0..num_lp_constraints), @@ -420,6 +469,14 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, } } + // Make sure variables from the same orbit end up in same components. + for (int i = 0; i < symmetrizer->NumOrbits(); ++i) { + const int representative = get_var_index(symmetrizer->OrbitSumVar(i)); + for (const IntegerVariable var : symmetrizer->Orbit(i)) { + components.AddEdge(representative, get_var_index(var)); + } + } + const int num_components = components.GetNumberOfComponents(); std::vector component_sizes(num_components, 0); const std::vector index_to_component = components.GetComponentIds(); @@ -442,7 +499,6 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, // as much as possible the objective bound by using any bounds the LP give // us on one of its components. This is critical on the zephyrus problems for // instance. - auto* mapping = m->GetOrCreate(); for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { const IntegerVariable var = mapping->Integer(model_proto.objective().vars(i)); @@ -482,12 +538,45 @@ IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, std::vector> top_level_cp_terms; int num_components_containing_objective = 0; if (model_proto.has_objective()) { + // First convert the proto objective to an IntegerVariable one. In case of + // "use_symmetry_in_lp", we also rewrite it in terms of the sum of the + // variables in the orbits. + std::vector> objective; + const int num_orbits = symmetrizer->NumOrbits(); + if (num_orbits > 0) { + // We use the orbit_sum var instead. + std::vector orbit_obj_coeff(num_orbits, 0); + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + const int64_t coeff = model_proto.objective().coeffs(i); + const int orbit_index = symmetrizer->OrbitIndex(var); + if (orbit_index != -1) { + if (orbit_obj_coeff[orbit_index] == 0) { + orbit_obj_coeff[orbit_index] = coeff; + } else { + CHECK_EQ(orbit_obj_coeff[orbit_index], coeff); + } + continue; + } + objective.push_back({var, coeff}); + } + for (int i = 0; i < num_orbits; ++i) { + if (orbit_obj_coeff[i] == 0) continue; + objective.push_back({symmetrizer->OrbitSumVar(i), orbit_obj_coeff[i]}); + } + } else { + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + const int64_t coeff = model_proto.objective().coeffs(i); + objective.push_back({var, coeff}); + } + } + // First pass: set objective coefficients on the lp constraints, and store // the cp terms in one vector per component. - for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { - const IntegerVariable var = - mapping->Integer(model_proto.objective().vars(i)); - const int64_t coeff = model_proto.objective().coeffs(i); + for (const auto [var, coeff] : objective) { const int c = index_to_component[get_var_index(var)]; if (lp_constraints[c] != nullptr) { lp_constraints[c]->SetObjectiveCoefficient(var, IntegerValue(coeff)); @@ -863,9 +952,10 @@ int RegisterClausesLevelZeroImport(int id, auto* clause_stream = share_glue_clauses ? shared_clauses_manager->GetClauseStream(id) : nullptr; + auto* clause_manager = model->GetOrCreate(); const auto& import_level_zero_clauses = [shared_clauses_manager, id, mapping, sat_solver, implications, - clause_stream, + clause_stream, clause_manager, minimize_shared_clauses]() { std::vector> new_binary_clauses; shared_clauses_manager->GetUnseenBinaryClauses(id, &new_binary_clauses); @@ -883,6 +973,9 @@ int RegisterClausesLevelZeroImport(int id, int new_clauses = 0; std::array local_clause; sat_solver->EnsureNewClauseIndexInitialized(); + // Temporarily disable clause sharing so we don't immediately re-export the + // clauses we just imported. + auto callback = clause_manager->TakeAddClauseCallback(); for (const absl::Span shared_clause : shared_clauses_manager->GetUnseenClauses(id)) { // Check this clause was not already learned by this worker. @@ -900,6 +993,7 @@ int RegisterClausesLevelZeroImport(int id, } ++new_clauses; } + clause_manager->SetAddClauseCallback(std::move(callback)); clause_stream->RemoveWorstClauses(); if (minimize_shared_clauses && new_clauses > 0) { // The new clauses may be subsumed, so try to minimize them to reduce diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index b489d655b6..5b6046e23c 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -739,6 +739,31 @@ void FindCpModelSymmetries( } } +namespace { + +void LogOrbitInformation(absl::Span var_to_orbit_index, + SolverLogger* logger) { + if (logger == nullptr || !logger->LoggingIsEnabled()) return; + + int num_touched_vars = 0; + std::vector orbit_sizes; + for (int var = 0; var < var_to_orbit_index.size(); ++var) { + const int rep = var_to_orbit_index[var]; + if (rep == -1) continue; + if (rep >= orbit_sizes.size()) orbit_sizes.resize(rep + 1, 0); + ++num_touched_vars; + orbit_sizes[rep]++; + } + std::sort(orbit_sizes.begin(), orbit_sizes.end(), std::greater()); + const int num_orbits = orbit_sizes.size(); + if (num_orbits > 10) orbit_sizes.resize(10); + SOLVER_LOG(logger, "[Symmetry] ", num_orbits, " orbits on ", num_touched_vars, + " variables with sizes: ", absl::StrJoin(orbit_sizes, ","), + (num_orbits > orbit_sizes.size() ? ",..." : "")); +} + +} // namespace + void DetectAndAddSymmetryToProto(const SatParameters& params, CpModelProto* proto, SolverLogger* logger) { SymmetryProto* symmetry = proto->mutable_symmetry(); @@ -752,6 +777,14 @@ void DetectAndAddSymmetryToProto(const SatParameters& params, return; } + // Log orbit information. + // + // TODO(user): It might be nice to just add this to the proto rather than + // re-reading the generators and recomputing this in a few places. + const int num_vars = proto->variables().size(); + const std::vector orbits = GetOrbits(num_vars, generators); + LogOrbitInformation(orbits, logger); + for (const std::unique_ptr& perm : generators) { SparsePermutationProto* perm_proto = symmetry->add_permutations(); const int num_cycle = perm->NumCycles(); @@ -897,7 +930,7 @@ std::vector BuildInequalityCoeffsForOrbitope( void UpdateHintAfterFixingBoolToBreakSymmetry( PresolveContext* context, int var, bool fixed_value, - const std::vector>& generators) { + absl::Span> generators) { if (!context->VarHasSolutionHint(var)) { return; } @@ -1021,18 +1054,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } // Log orbit info. - if (context->logger()->LoggingIsEnabled()) { - std::vector sorted_sizes; - for (const int s : orbit_sizes) { - if (s != 0) sorted_sizes.push_back(s); - } - std::sort(sorted_sizes.begin(), sorted_sizes.end(), std::greater()); - const int num_orbits = sorted_sizes.size(); - if (num_orbits > 10) sorted_sizes.resize(10); - SOLVER_LOG(context->logger(), "[Symmetry] ", num_orbits, - " orbits with sizes: ", absl::StrJoin(sorted_sizes, ","), - (num_orbits > sorted_sizes.size() ? ",..." : "")); - } + LogOrbitInformation(orbits, context->logger()); // First heuristic based on propagation, see the function comment. if (max_orbit_size > 2) { diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index af4aec65de..ab3988287e 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -888,6 +888,9 @@ bool IntegerRoundingCutHelper::ComputeCut( } // Re try complementation on the transformed cut. + // TODO(user): This can be quadratic! we don't want to try too much of them. + // Or optimize the algo, we should be able to be more incremental here. + // see on g200x740.pb.gz for instance. for (CutTerm& entry : cut_.terms) { if (!entry.HasRelevantLpValue()) break; if (entry.coeff % best_divisor == 0) continue; diff --git a/ortools/sat/diffn_cuts.cc b/ortools/sat/diffn_cuts.cc index 8c4b77b2d1..dfd535e419 100644 --- a/ortools/sat/diffn_cuts.cc +++ b/ortools/sat/diffn_cuts.cc @@ -310,7 +310,7 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator( SchedulingConstraintHelper* x_helper, SchedulingConstraintHelper* y_helper, SchedulingDemandHelper* x_demands_helper, SchedulingDemandHelper* y_demands_helper, - const std::vector>& energies, Model* model) { + absl::Span> energies, Model* model) { CutGenerator result; result.only_run_at_level_zero = true; AddIntegerVariableFromIntervals(x_helper, model, &result.vars); @@ -319,7 +319,10 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator( result.generate_cuts = [x_helper, y_helper, x_demands_helper, y_demands_helper, model, - energies](LinearConstraintManager* manager) { + energies = + std::vector>( + energies.begin(), energies.end())]( + LinearConstraintManager* manager) { if (!x_helper->SynchronizeAndSetTimeDirection(true)) return false; if (!y_helper->SynchronizeAndSetTimeDirection(true)) return false; x_demands_helper->CacheAllEnergyValues(); diff --git a/ortools/sat/diffn_cuts.h b/ortools/sat/diffn_cuts.h index 8b15f8352d..86a8e40e6b 100644 --- a/ortools/sat/diffn_cuts.h +++ b/ortools/sat/diffn_cuts.h @@ -17,6 +17,7 @@ #include #include +#include "absl/types/span.h" #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/intervals.h" @@ -49,7 +50,7 @@ CutGenerator CreateNoOverlap2dEnergyCutGenerator( SchedulingConstraintHelper* x_helper, SchedulingConstraintHelper* y_helper, SchedulingDemandHelper* x_demands_helper, SchedulingDemandHelper* y_demands_helper, - const std::vector>& energies, Model* model); + absl::Span> energies, Model* model); // Internal methods and data structures, useful for testing. diff --git a/ortools/sat/go/cpmodel/cp_solver_c.cc b/ortools/sat/go/cpmodel/cp_solver_c.cc index e8bba37b13..6c3dd6c558 100644 --- a/ortools/sat/go/cpmodel/cp_solver_c.cc +++ b/ortools/sat/go/cpmodel/cp_solver_c.cc @@ -14,6 +14,7 @@ #include "ortools/sat/go/cpmodel/cp_solver_c.h" #include +#include #include "absl/log/check.h" #include "ortools/base/memutil.h" diff --git a/ortools/sat/inclusion.h b/ortools/sat/inclusion.h index 4570b23486..5f5ac3012c 100644 --- a/ortools/sat/inclusion.h +++ b/ortools/sat/inclusion.h @@ -27,6 +27,7 @@ #include "absl/log/check.h" #include "ortools/base/logging.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -53,7 +54,8 @@ namespace sat { template class InclusionDetector { public: - explicit InclusionDetector(const Storage& storage) : storage_(storage) {} + explicit InclusionDetector(const Storage& storage, TimeLimit* time_limit) + : storage_(storage), time_limit_(time_limit) {} // Resets the class to an empty state. void Reset() { @@ -97,7 +99,7 @@ class InclusionDetector { const std::function& process); // Function that should only be used from within "process()". - // Returns the bitset corresponsing to the elements of the current superset + // Returns the bitset corresponding to the elements of the current superset // passed to the process() function. std::vector IsInSuperset() const { return is_in_superset_; } @@ -128,6 +130,8 @@ class InclusionDetector { // Allows to access the elements of each candidates via storage_[index]; const Storage& storage_; + TimeLimit* time_limit_; + // List of candidates, this will be sorted. struct Candidate { int index; // Storage index. @@ -216,6 +220,10 @@ inline void InclusionDetector::DetectInclusions( DCHECK(signatures_.empty()); DCHECK(one_watcher_.empty()); + // We check each time our work_done_ has increased by more than this. + constexpr int64_t kCheckTimeLimitInterval = 1000; + int64_t next_time_limit_check = kCheckTimeLimitInterval; + // Main algo. work_done_ = 0; std::stable_sort(candidates_.begin(), candidates_.end()); @@ -250,6 +258,10 @@ inline void InclusionDetector::DetectInclusions( // Find any subset included in current superset. work_done_ += 2 * superset.size; if (work_done_ > work_limit_) return Stop(); + if (work_done_ > next_time_limit_check) { + if (time_limit_->LimitReached()) return Stop(); + next_time_limit_check = work_done_ + kCheckTimeLimitInterval; + } // We make a copy because process() might alter the content of the // storage when it returns "stop_with_current_superset_" and we need @@ -278,6 +290,10 @@ inline void InclusionDetector::DetectInclusions( bool is_included = true; work_done_ += subset.size; if (work_done_ > work_limit_) return Stop(); + if (work_done_ > next_time_limit_check) { + if (time_limit_->LimitReached()) return Stop(); + next_time_limit_check = work_done_ + kCheckTimeLimitInterval; + } for (const int subset_e : storage_[subset.index]) { if (!is_in_superset_[subset_e]) { is_included = false; @@ -291,6 +307,10 @@ inline void InclusionDetector::DetectInclusions( if (stop_) return; if (work_done_ > work_limit_) return Stop(); + if (work_done_ > next_time_limit_check) { + if (time_limit_->LimitReached()) return Stop(); + next_time_limit_check = work_done_ + kCheckTimeLimitInterval; + } if (stop_with_current_subset_) { // Remove from the watcher list. diff --git a/ortools/sat/inclusion_test.cc b/ortools/sat/inclusion_test.cc index 7f5276708f..94465b95f7 100644 --- a/ortools/sat/inclusion_test.cc +++ b/ortools/sat/inclusion_test.cc @@ -21,6 +21,7 @@ #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/sat/util.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace sat { @@ -28,7 +29,8 @@ namespace { TEST(InclusionDetectorTest, SymmetricExample) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 3})); detector.AddPotentialSet(storage.Add({1, 2, 3})); @@ -47,7 +49,8 @@ TEST(InclusionDetectorTest, SymmetricExample) { // If sets are duplicates, we do not detect both inclusions, but just one. TEST(InclusionDetectorTest, DuplicateBehavior) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 2})); @@ -65,7 +68,8 @@ TEST(InclusionDetectorTest, DuplicateBehavior) { TEST(InclusionDetectorTest, NonSymmetricExample) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); // Index 0, 1, 2 detector.AddPotentialSubset(storage.Add({1, 2})); @@ -119,7 +123,8 @@ TEST(InclusionDetectorTest, NonSymmetricExample) { TEST(InclusionDetectorTest, InclusionChain) { CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); detector.AddPotentialSet(storage.Add({1})); detector.AddPotentialSet(storage.Add({1, 2})); detector.AddPotentialSet(storage.Add({1, 2, 3})); @@ -147,7 +152,8 @@ TEST(InclusionDetectorTest, InclusionChain) { TEST(InclusionDetectorTest, RandomTest) { absl::BitGen random; CompactVectorVector storage; - InclusionDetector detector(storage); + TimeLimit time_limit; + InclusionDetector detector(storage, &time_limit); std::vector temp; for (int i = 0; i < 1000; ++i) { diff --git a/ortools/sat/integer_expr_test.cc b/ortools/sat/integer_expr_test.cc index 93c02494e5..ac26d54423 100644 --- a/ortools/sat/integer_expr_test.cc +++ b/ortools/sat/integer_expr_test.cc @@ -73,8 +73,8 @@ void AddWeightedSumGreaterOrEqualReif(Literal is_ge, // Weighted sum == constant reified. // TODO(user): Simplify if the constant is at the edge of the possible values. void AddFixedWeightedSumReif(Literal is_eq, - const std::vector& vars, - const std::vector& coefficients, + absl::Span vars, + absl::Span coefficients, int64_t value, Model* model) { // We creates two extra Boolean variables in this case. The alternative is // to code a custom propagator for the direction equality => reified. diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index 046f27f968..f4a645f1c5 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -155,6 +155,13 @@ LinearConstraint LinearConstraintBuilder::BuildConstraint(IntegerValue lb, return result; } +bool LinearConstraintBuilder::BuildIntoConstraintAndCheckOverflow( + IntegerValue lb, IntegerValue ub, LinearConstraint* ct) { + ct->lb = lb > kMinIntegerValue ? lb - offset_ : lb; + ct->ub = ub < kMaxIntegerValue ? ub - offset_ : ub; + return MergePositiveVariableTermsAndCheckForOverflow(&terms_, ct); +} + LinearExpression LinearConstraintBuilder::BuildExpression() { LinearExpression result; CleanTermsAndFillConstraint(&terms_, &result); diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index 10a7411b96..9a88f4aaf8 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -274,6 +274,11 @@ class LinearConstraintBuilder { LinearConstraint Build(); LinearConstraint BuildConstraint(IntegerValue lb, IntegerValue ub); + // Similar to BuildConstraint() but make sure we don't overflow while we merge + // terms referring to the same variables. + bool BuildIntoConstraintAndCheckOverflow(IntegerValue lb, IntegerValue ub, + LinearConstraint* ct); + // Returns the linear expression part of the constraint only, without the // bounds. LinearExpression BuildExpression(); @@ -398,6 +403,41 @@ inline void CleanTermsAndFillConstraint( output->resize(new_size); } +inline bool MergePositiveVariableTermsAndCheckForOverflow( + std::vector>* terms, + LinearConstraint* output) { + // Sort and add coeff of duplicate variables. Note that a variable and + // its negation will appear one after another in the natural order. + int new_size = 0; + output->resize(terms->size()); + std::sort(terms->begin(), terms->end()); + IntegerVariable previous_var = kNoIntegerVariable; + int64_t current_coeff = 0; + for (const std::pair& entry : *terms) { + DCHECK(VariableIsPositive(entry.first)); + if (previous_var == entry.first) { + if (AddIntoOverflow(entry.second.value(), ¤t_coeff)) { + return false; + } + } else { + if (current_coeff != 0) { + output->vars[new_size] = previous_var; + output->coeffs[new_size] = current_coeff; + ++new_size; + } + previous_var = entry.first; + current_coeff = entry.second.value(); + } + } + if (current_coeff != 0) { + output->vars[new_size] = previous_var; + output->coeffs[new_size] = current_coeff; + ++new_size; + } + output->resize(new_size); + return true; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index ec2d67e595..f9025e06dc 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" +#include "absl/types/span.h" #include "ortools/base/hash.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" @@ -47,6 +49,167 @@ namespace operations_research { namespace sat { +LinearConstraintSymmetrizer::~LinearConstraintSymmetrizer() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"symmetrizer/overflows", num_overflows_}); + shared_stats_->AddStats(stats); +} + +void LinearConstraintSymmetrizer::AddSymmetryOrbit( + IntegerVariable sum_var, absl::Span orbit) { + CHECK_GT(orbit.size(), 1); + + // Store the orbit info. + const int orbit_index = orbits_.size(); + has_symmetry_ = true; + orbit_sum_vars_.push_back(sum_var); + orbits_.Add(orbit); + + // And fill var_to_orbit_index_. + const int new_size = integer_trail_->NumIntegerVariables().value() / 2; + if (var_to_orbit_index_.size() < new_size) { + var_to_orbit_index_.resize(new_size, -1); + } + DCHECK(VariableIsPositive(sum_var)); + DCHECK_EQ(var_to_orbit_index_[GetPositiveOnlyIndex(sum_var)], -1); + var_to_orbit_index_[GetPositiveOnlyIndex(sum_var)] = orbit_index; + for (const IntegerVariable var : orbit) { + DCHECK(VariableIsPositive(var)); + DCHECK_EQ(var_to_orbit_index_[GetPositiveOnlyIndex(var)], -1); + var_to_orbit_index_[GetPositiveOnlyIndex(var)] = orbit_index; + } +} + +// What we do here is basically equivalent to adding all the possible +// permutations (under the problem symmetry group) of the constraint together. +// When we do that all variables in the same orbit will have the same +// coefficient (TODO(user): think how to prove this properly and especially +// that the scaling is in 1/orbit_size) and will each appear once. We then +// substitute each sum by the sum over the orbit, and divide coefficient by +// their gcd. +// +// Any solution of the original LP can be transformed to a solution of the +// folded LP with the same objective. So the folded LP will give us a tight and +// valid objective lower bound but with a lot less variables! This is an +// adaptation of "LP folding" to use in a MIP context. Introducing the orbit sum +// allow to propagates and add cuts as these sum are still integer for us. +// +// The only issue is regarding scaling of the constraints. Basically each +// orbit sum variable will appear with a factor 1/orbit_size in the original +// constraint. +// +// We will remap & scale the constraint. +// If not possible, we will drop it for now. +bool LinearConstraintSymmetrizer::FoldLinearConstraint(LinearConstraint* ct) { + if (!has_symmetry_) return true; + + // We assume the constraint had basic preprocessing with tight lb/ub for + // instance. First pass is to compute the scaling factor. + int64_t scaling_factor = 1; + for (int i = 0; i < ct->num_terms; ++i) { + const IntegerVariable var = ct->vars[i]; + CHECK(VariableIsPositive(var)); + + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + if (orbit_index == -1 || orbit_sum_vars_[orbit_index] == var) { + // If we have an orbit of size one, or the variable is its own + // representative (orbit sum), skip. + continue; + } + + // Update the scaling factor. + const int orbit_size = orbits_[orbit_index].size(); + if (AtMinOrMaxInt64(CapProd(orbit_size, scaling_factor))) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + scaling_factor = std::lcm(scaling_factor, orbit_size); + } + + if (scaling_factor == 1) { + // No symmetric variables. + return true; + } + + // We need to multiply each term by scaling_factor / orbit_size. + // + // TODO(user): Now that we know the actual coefficient we could scale less. + // Maybe the coefficient of an orbit_var is already divisible by orbit_size. + builder_.Clear(); + for (int i = 0; i < ct->num_terms; ++i) { + const IntegerVariable var = ct->vars[i]; + const IntegerValue coeff = ct->coeffs[i]; + + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + if (orbit_index == -1 || orbit_sum_vars_[orbit_index] == var) { + const int64_t scaled_coeff = CapProd(coeff.value(), scaling_factor); + if (AtMinOrMaxInt64(scaled_coeff)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + builder_.AddTerm(var, scaled_coeff); + } else { + const int64_t orbit_size = orbits_[orbit_index].size(); + const int64_t factor = scaling_factor / orbit_size; + const int64_t scaled_coeff = CapProd(coeff.value(), factor); + if (AtMinOrMaxInt64(scaled_coeff)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + builder_.AddTerm(orbit_sum_vars_[orbit_index], scaled_coeff); + } + } + + if (AtMinOrMaxInt64(CapProd(ct->lb.value(), scaling_factor)) || + AtMinOrMaxInt64(CapProd(ct->ub.value(), scaling_factor))) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to lb/ub overflow"; + return false; + } + if (!builder_.BuildIntoConstraintAndCheckOverflow( + ct->lb * scaling_factor, ct->ub * scaling_factor, ct)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow"; + return false; + } + + // Dividing by gcd can help. + DivideByGCD(ct); + if (PossibleOverflow(*integer_trail_, *ct)) { + ++num_overflows_; + VLOG(2) << "SYMMETRY skip constraint due to overflow factor = " + << scaling_factor; + return false; + } + + // TODO(user): In some cases, this constraint will propagate/fix directly + // the orbit sum variables, we might want to propagate this in the cp world? + // This migth also remove bad scaling. + return true; +} + +int LinearConstraintSymmetrizer::OrbitIndex(IntegerVariable var) const { + if (!has_symmetry_) return -1; + return var_to_orbit_index_[GetPositiveOnlyIndex(var)]; +} + +bool LinearConstraintSymmetrizer::IsOrbitSumVar(IntegerVariable var) const { + if (!has_symmetry_) return false; + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + return orbit_index >= 0 && orbit_sum_vars_[orbit_index] == var; +} + +bool LinearConstraintSymmetrizer::AppearInFoldedProblem( + IntegerVariable var) const { + if (!has_symmetry_) return true; + const int orbit_index = var_to_orbit_index_[GetPositiveOnlyIndex(var)]; + return orbit_index == -1 || orbit_sum_vars_[orbit_index] == var; +} + namespace { const LinearConstraintManager::ConstraintIndex kInvalidConstraintIndex(-1); @@ -146,9 +309,15 @@ LinearConstraintManager::ConstraintIndex LinearConstraintManager::Add( SimplifyConstraint(&ct); DivideByGCD(&ct); MakeAllVariablesPositive(&ct); - CHECK(std::is_sorted(ct.VarsAsSpan().begin(), ct.VarsAsSpan().end())); DCHECK(DebugCheckConstraint(ct)); + // If configured, store instead the folded version of this constraint. + // TODO(user): Shall we simplify again? + if (symmetrizer_->HasSymmetry() && !symmetrizer_->FoldLinearConstraint(&ct)) { + return kInvalidConstraintIndex; + } + CHECK(std::is_sorted(ct.VarsAsSpan().begin(), ct.VarsAsSpan().end())); + // If an identical constraint exists, only updates its bound. const size_t key = ComputeHashOfTerms(ct); if (equiv_constraints_.contains(key)) { diff --git a/ortools/sat/linear_constraint_manager.h b/ortools/sat/linear_constraint_manager.h index 2085fa63d4..a58d09dc59 100644 --- a/ortools/sat/linear_constraint_manager.h +++ b/ortools/sat/linear_constraint_manager.h @@ -29,6 +29,7 @@ #include "ortools/sat/linear_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" #include "ortools/util/logging.h" #include "ortools/util/strong_integers.h" @@ -54,6 +55,82 @@ struct ModelReducedCosts ModelReducedCosts() = default; }; +// Knowing the symmetry of the IP problem should allow us to +// solve the LP faster via "folding" techniques. +// +// You can read this for the LP part: "Dimension Reduction via Colour +// Refinement", Martin Grohe, Kristian Kersting, Martin Mladenov, Erkal +// Selman, https://arxiv.org/abs/1307.5697 +// +// In the presence of symmetry, by considering all symmetric version of a +// constraint and summing them, we can derive a new constraint using the sum +// of the variable on each orbit instead of the individual variables. +// +// For the integration in a MIP solver, I couldn't find any reference. The way +// I did it here is to introduce for each orbit a variable representing the +// sum of the orbit variable. This allows to represent the folded LP in terms +// of these variables (that are connected to the rest of the solver) and just +// reuse the full machinery. +class LinearConstraintSymmetrizer { + public: + explicit LinearConstraintSymmetrizer(Model* model) + : shared_stats_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()) {} + ~LinearConstraintSymmetrizer(); + + // This must be called with all orbits before we call FoldLinearConstraint(). + // Note that sum_var MUST not be in any of the orbits. All orbits must also be + // disjoint. + // + // Precondition: All IntegerVariable must be positive. + void AddSymmetryOrbit(IntegerVariable sum_var, + absl::Span orbit); + + // If there are no symmetry, we shouldn't bother calling the functions below. + // Note that they will still work, but be no-op. + bool HasSymmetry() const { return has_symmetry_; } + + // Accessors by orbit index in [0, num_orbits). + int NumOrbits() const { return orbits_.size(); } + IntegerVariable OrbitSumVar(int i) const { return orbit_sum_vars_[i]; } + absl::Span Orbit(int i) const { return orbits_[i]; } + + // Returns the orbit number in [0, num_orbits) if var belong to a non-trivial + // orbit or if it is a "orbit_sum_var". Returns -1 otherwise. + int OrbitIndex(IntegerVariable var) const; + + // Returns true iff var is one of the sum_var passed to AddSymmetryOrbit(). + bool IsOrbitSumVar(IntegerVariable var) const; + + // This will be only true for variable not appearing in any orbit and for + // the orbit sum variables. + bool AppearInFoldedProblem(IntegerVariable var) const; + + // Given a constraint on the "original" model variables, try to construct a + // symmetric version of it using the orbit sum variables. This might fail if + // we encounter integer overflow. Returns true on success. On failure, the + // original constraints will not be usable. + // + // Preconditions: All IntegerVariable must be positive. And the constraint + // lb/ub must be tight and not +/- int64_t max. + bool FoldLinearConstraint(LinearConstraint* ct); + + private: + SharedStatistics* shared_stats_; + IntegerTrail* integer_trail_; + + bool has_symmetry_ = false; + int64_t num_overflows_ = 0; + LinearConstraintBuilder builder_; + + // We index our vector by positive variable only. + util_intops::StrongVector var_to_orbit_index_; + + // Orbit info index by number in [0, num_orbits); + std::vector orbit_sum_vars_; + CompactVectorVector orbits_; +}; + // This class holds a list of globally valid linear constraints and has some // logic to decide which one should be part of the LP relaxation. We want more // for a better relaxation, but for efficiency we do not want to have too much @@ -106,7 +183,7 @@ class LinearConstraintManager { expanded_lp_solution_(*model->GetOrCreate()), expanded_reduced_costs_(*model->GetOrCreate()), model_(model), - logger_(model->GetOrCreate()) {} + symmetrizer_(model->GetOrCreate()) {} ~LinearConstraintManager(); // Add a new constraint to the manager. Note that we canonicalize constraints @@ -279,7 +356,7 @@ class LinearConstraintManager { ModelLpValues& expanded_lp_solution_; ModelReducedCosts& expanded_reduced_costs_; Model* model_; - SolverLogger* logger_; + LinearConstraintSymmetrizer* symmetrizer_; // We want to decay the active counts of all constraints at each call and // increase the active counts of active/violated constraints. However this can diff --git a/ortools/sat/linear_constraint_manager_test.cc b/ortools/sat/linear_constraint_manager_test.cc index e842599ca5..3e1c182d1e 100644 --- a/ortools/sat/linear_constraint_manager_test.cc +++ b/ortools/sat/linear_constraint_manager_test.cc @@ -39,6 +39,64 @@ using ::testing::StartsWith; using ::testing::UnorderedElementsAre; using ConstraintIndex = LinearConstraintManager::ConstraintIndex; +TEST(LinearConstraintSymmetrizerTest, BasicFolding) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable a = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable b = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable c = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable sum_xy = model.Add(NewIntegerVariable(-20, 20)); + const IntegerVariable sum_abc = model.Add(NewIntegerVariable(-30, 30)); + auto* symmetrizer = model.GetOrCreate(); + symmetrizer->AddSymmetryOrbit(sum_xy, {x, y}); + symmetrizer->AddSymmetryOrbit(sum_abc, {a, b, c}); + + LinearConstraintBuilder builder(0, 10); + builder.AddTerm(x, IntegerValue(2)); + builder.AddTerm(y, IntegerValue(1)); + builder.AddTerm(z, IntegerValue(3)); + builder.AddTerm(a, IntegerValue(2)); + builder.AddTerm(c, IntegerValue(5)); + LinearConstraint ct = builder.Build(); + symmetrizer->FoldLinearConstraint(&ct); + + // We will scale by 6 (one orbit of size 2 and one of size 3). + builder.Clear(); + builder.AddTerm(z, IntegerValue(6 * 3)); + builder.AddTerm(sum_xy, IntegerValue(6 / 2 * (2 + 1))); + builder.AddTerm(sum_abc, IntegerValue(6 / 3 * (2 + 5))); + LinearConstraint expected = builder.BuildConstraint(0, 6 * 10); + EXPECT_EQ(ct.DebugString(), expected.DebugString()); +} + +TEST(LinearConstraintSymmetrizerTest, FoldingWithSumVariableOriginallyPresent) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(-10, 10)); + const IntegerVariable sum_xy = model.Add(NewIntegerVariable(-20, 20)); + auto* symmetrizer = model.GetOrCreate(); + symmetrizer->AddSymmetryOrbit(sum_xy, {x, y}); + + LinearConstraintBuilder builder(0, 10); + builder.AddTerm(x, IntegerValue(2)); + builder.AddTerm(y, IntegerValue(1)); + builder.AddTerm(z, IntegerValue(3)); + builder.AddTerm(sum_xy, IntegerValue(7)); + LinearConstraint ct = builder.Build(); + symmetrizer->FoldLinearConstraint(&ct); + + // We will scale by 2 the original sum_xy, and by 1 the one coming from the + // orbit with coeff (2 + 1) from x and y. + builder.Clear(); + builder.AddTerm(z, IntegerValue(2 * 3)); + builder.AddTerm(sum_xy, IntegerValue(2 * 7 + 2 / 2 * (2 + 1))); + LinearConstraint expected = builder.BuildConstraint(0, 2 * 10); + EXPECT_EQ(ct.DebugString(), expected.DebugString()); +} + TEST(LinearConstraintManagerTest, DuplicateDetection) { Model model; LinearConstraintManager manager(&model); diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 52fb78652c..c2382745cb 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -27,6 +27,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/numeric/int128.h" @@ -280,6 +281,7 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( shared_stats_(model->GetOrCreate()), shared_response_manager_(model->GetOrCreate()), random_(model->GetOrCreate()), + symmetrizer_(model->GetOrCreate()), rlt_cut_helper_(model), implied_bounds_processor_({}, integer_trail_, model->GetOrCreate()), @@ -311,18 +313,22 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( // Initialize the IntegerVariable -> ColIndex mapping. CHECK(std::is_sorted(vars.begin(), vars.end())); - integer_variables_.assign(vars.begin(), vars.end()); + // TODO(user): We shouldn't need to add variable from the orbit here in the + // presence of symmetry. However they can still appear in cut, so it is a + // bit tricky and require some refactoring to be tried. ColIndex col{0}; + integer_variables_.assign(vars.begin(), vars.end()); for (const IntegerVariable positive_variable : vars) { CHECK(VariableIsPositive(positive_variable)); implied_bounds_processor_.AddLpVariable(positive_variable); (*dispatcher_)[positive_variable] = this; mirror_lp_variable_[positive_variable] = col; - ++col; } - lp_solution_.assign(vars.size(), std::numeric_limits::infinity()); - lp_reduced_cost_.assign(vars.size(), 0.0); + + lp_solution_.assign(integer_variables_.size(), + std::numeric_limits::infinity()); + lp_reduced_cost_.assign(integer_variables_.size(), 0.0); if (!vars.empty()) { const int max_index = NegationOf(vars.back()).value(); @@ -340,6 +346,63 @@ void LinearProgrammingConstraint::AddLinearConstraint(LinearConstraint ct) { constraint_manager_.Add(std::move(ct)); } +void LinearProgrammingConstraint::RegisterWith(Model* model) { + DCHECK(!lp_constraint_is_registered_); + lp_constraint_is_registered_ = true; + model->GetOrCreate()->push_back(this); + + // Copy objective data to the constraint_manager_. + // + // Note(user): the sort is not really needed but should lead to better cache + // locality. + std::sort(integer_objective_.begin(), integer_objective_.end()); + objective_infinity_norm_ = 0; + for (const auto [col, coeff] : integer_objective_) { + constraint_manager_.SetObjectiveCoefficient(integer_variables_[col.value()], + coeff); + objective_infinity_norm_ = + std::max(objective_infinity_norm_, IntTypeAbs(coeff)); + } + + // Set the LP to its initial content. + // + // Note that we always add LP constraint lazily if we have A LOT of them. + // This is because currently on large problem with millions of constraints, + // our LP is usually not fast enough anyway. + if (!parameters_.add_lp_constraints_lazily() && + constraint_manager_.num_constraints() < 1e6) { + constraint_manager_.AddAllConstraintsToLp(); + } + if (!CreateLpFromConstraintManager()) { + model->GetOrCreate()->NotifyThatModelIsUnsat(); + return; + } + + watcher_id_ = watcher_->Register(this); + const int num_vars = integer_variables_.size(); + orbit_indices_.clear(); + for (int i = 0; i < num_vars; i++) { + const IntegerVariable pos_var = integer_variables_[i]; + if (symmetrizer_->AppearInFoldedProblem(pos_var)) { + watcher_->WatchIntegerVariable(pos_var, watcher_id_, i); + } + + if (symmetrizer_->IsOrbitSumVar(pos_var)) { + orbit_indices_.push_back(symmetrizer_->OrbitIndex(pos_var)); + } + } + if (objective_is_defined_) { + watcher_->WatchUpperBound(objective_cp_, watcher_id_); + } + watcher_->SetPropagatorPriority(watcher_id_, 2); + watcher_->AlwaysCallAtLevelZero(watcher_id_); + + // Registering it with the trail make sure this class is always in sync when + // it is used in the decision heuristics. + integer_trail_->RegisterReversibleClass(this); + watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_); +} + glop::ColIndex LinearProgrammingConstraint::GetMirrorVariable( IntegerVariable positive_variable) { DCHECK(VariableIsPositive(positive_variable)); @@ -352,12 +415,7 @@ void LinearProgrammingConstraint::SetObjectiveCoefficient(IntegerVariable ivar, objective_is_defined_ = true; IntegerVariable pos_var = VariableIsPositive(ivar) ? ivar : NegationOf(ivar); if (ivar != pos_var) coeff = -coeff; - - constraint_manager_.SetObjectiveCoefficient(pos_var, coeff); - const glop::ColIndex col = GetMirrorVariable(pos_var); - integer_objective_.push_back({col, coeff}); - objective_infinity_norm_ = - std::max(objective_infinity_norm_, IntTypeAbs(coeff)); + integer_objective_.push_back({GetMirrorVariable(pos_var), coeff}); } // TODO(user): As the search progress, some variables might get fixed. Exploit @@ -670,46 +728,6 @@ void LinearProgrammingConstraint::FillReducedCostReasonIn( integer_trail_->RemoveLevelZeroBounds(integer_reason); } -void LinearProgrammingConstraint::RegisterWith(Model* model) { - DCHECK(!lp_constraint_is_registered_); - lp_constraint_is_registered_ = true; - model->GetOrCreate()->push_back(this); - - // Note fdid, this is not really needed by should lead to better cache - // locality. - std::sort(integer_objective_.begin(), integer_objective_.end()); - - // Set the LP to its initial content. - // - // Note that we always add LP constraint lazily if we have A LOT of them. - // This is because currently on large problem with millions of constraints, - // our LP is usually not fast enough anyway. - if (!parameters_.add_lp_constraints_lazily() && - constraint_manager_.num_constraints() < 1e6) { - constraint_manager_.AddAllConstraintsToLp(); - } - if (!CreateLpFromConstraintManager()) { - model->GetOrCreate()->NotifyThatModelIsUnsat(); - return; - } - - watcher_id_ = watcher_->Register(this); - const int num_vars = integer_variables_.size(); - for (int i = 0; i < num_vars; i++) { - watcher_->WatchIntegerVariable(integer_variables_[i], watcher_id_, i); - } - if (objective_is_defined_) { - watcher_->WatchUpperBound(objective_cp_, watcher_id_); - } - watcher_->SetPropagatorPriority(watcher_id_, 2); - watcher_->AlwaysCallAtLevelZero(watcher_id_); - - // Registering it with the trail make sure this class is always in sync when - // it is used in the decision heuristics. - integer_trail_->RegisterReversibleClass(this); - watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_); -} - void LinearProgrammingConstraint::SetLevel(int level) { // Get rid of all optimal constraint each time we go back to level zero. if (level == 0) rev_optimal_constraints_size_ = 0; @@ -820,11 +838,6 @@ double LinearProgrammingConstraint::GetSolutionValue( return lp_solution_[mirror_lp_variable_.at(variable).value()]; } -double LinearProgrammingConstraint::GetSolutionReducedCost( - IntegerVariable variable) const { - return lp_reduced_cost_[mirror_lp_variable_.at(variable).value()]; -} - void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() { const int num_vars = integer_variables_.size(); Fractional* lb_with_slack = simplex_.MutableLowerBounds()->data(); @@ -853,10 +866,19 @@ bool LinearProgrammingConstraint::SolveLp() { const auto status = simplex_.MinimizeFromTransposedMatrixWithSlack( obj_with_slack_, unscaling_factor, offset_before_unscaling, time_limit_); + // Lets resolve from scratch if we encounter this status. + if (simplex_.GetProblemStatus() == glop::ProblemStatus::ABNORMAL) { + VLOG(2) << "The LP solver returned abnormal, resolving from scratch"; + simplex_.ClearStateForNextSolve(); + const auto status = simplex_.MinimizeFromTransposedMatrixWithSlack( + obj_with_slack_, unscaling_factor, offset_before_unscaling, + time_limit_); + } + state_ = simplex_.GetState(); total_num_simplex_iterations_ += simplex_.GetNumberOfIterations(); if (!status.ok()) { - VLOG(1) << "The LP solver encountered an error: " << status.error_message(); + VLOG(2) << "The LP solver encountered an error: " << status.error_message(); simplex_.ClearStateForNextSolve(); return false; } @@ -895,15 +917,67 @@ bool LinearProgrammingConstraint::SolveLp() { const auto reduced_costs = simplex_.GetReducedCosts().const_view(); for (int i = 0; i < num_vars; i++) { const glop::ColIndex col(i); + const IntegerVariable var = integer_variables_[i]; + const glop::Fractional value = GetVariableValueAtCpScale(col); lp_solution_[i] = value; - expanded_lp_solution_[integer_variables_[i]] = value; - expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; + expanded_lp_solution_[var] = value; + expanded_lp_solution_[NegationOf(var)] = -value; const glop::Fractional rc = scaler_.UnscaleReducedCost(col, reduced_costs[col]); - expanded_reduced_costs_[integer_variables_[i]] = rc; - expanded_reduced_costs_[NegationOf(integer_variables_[i])] = -rc; + lp_reduced_cost_[i] = rc; + expanded_reduced_costs_[var] = rc; + expanded_reduced_costs_[NegationOf(var)] = -rc; + } + + // Lets fix the result in case of symmetry since the variable in symmetry + // are actually not part of the LP, they will just be at their bounds. + for (const int orbit_index : orbit_indices_) { + const IntegerVariable sum_var = symmetrizer_->OrbitSumVar(orbit_index); + const absl::Span orbit = + symmetrizer_->Orbit(orbit_index); + + // We assign sum / orbit_size to each variables. + // This is still an LP optimal, but not necessarily a good heuristic. + // + // TODO(user): using sum / orbit_size is good for the cut generation that + // might still use these variables, any violated cuts on the original + // problem where all variables in the orbit have the same value will + // result in a violated cut for the folded problem. However it is probably + // not so good for the heuristics that uses the LP values. In particular + // it might result in LP value not even within the bounds of the + // individual variable since as we branch, we don't have an identical + // domain for all variables in an orbit. Maybe we can generate two + // solutions vectors, one for the cuts and one for the heuristics, or we + // can add custom code to the cuts so that they don't depend on this. + const double new_value = + expanded_lp_solution_[sum_var] / static_cast(orbit.size()); + + // For the reduced costs, they are the same. There should be no + // complication there. + const double new_rc = expanded_reduced_costs_[sum_var]; + + for (const IntegerVariable var : orbit) { + const glop::ColIndex col = GetMirrorVariable(var); + lp_solution_[col.value()] = new_value; + expanded_lp_solution_[var] = new_value; + expanded_lp_solution_[NegationOf(var)] = -new_value; + + lp_reduced_cost_[col.value()] = new_rc; + expanded_reduced_costs_[var] = new_rc; + expanded_reduced_costs_[NegationOf(var)] = -new_rc; + } + } + + // Compute integrality. + lp_solution_is_integer_ = true; + for (int i = 0; i < num_vars; i++) { + if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) > + kCpEpsilon) { + lp_solution_is_integer_ = false; + break; + } } if (lp_solution_level_ == 0) { @@ -994,22 +1068,10 @@ bool LinearProgrammingConstraint::AnalyzeLp() { } // Copy more info about the current solution. - if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { + if (compute_reduced_cost_averages_ && + simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { CHECK(lp_solution_is_set_); - lp_solution_is_integer_ = true; - const int num_vars = integer_variables_.size(); - for (int i = 0; i < num_vars; i++) { - lp_reduced_cost_[i] = scaler_.UnscaleReducedCost( - glop::ColIndex(i), simplex_.GetReducedCost(glop::ColIndex(i))); - if (std::abs(lp_solution_[i] - std::round(lp_solution_[i])) > - kCpEpsilon) { - lp_solution_is_integer_ = false; - } - } - - if (compute_reduced_cost_averages_) { - UpdateAverageReducedCosts(); - } + UpdateAverageReducedCosts(); } // On some problem, LP solves and cut rounds can be slow, so we report @@ -1429,18 +1491,19 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( // it triggers. We should add heuristics to abort earlier if a cut is not // promising. Or only test a few positions and not all rows. void LinearProgrammingConstraint::AddCGCuts() { - // Note that the index is permuted and do not correspond to a row. + std::vector> sorted_columns; const RowIndex num_rows(integer_lp_.size()); + glop::DenseColumn::ConstView norms = simplex_.GetDualSquaredNorms(); for (RowIndex index(0); index < num_rows; ++index) { - if (time_limit_->LimitReached()) break; - const ColIndex basis_col = simplex_.GetBasis(index); // We used to skip slack and also not to do "classical" gomory and instead // call IgnoreTrivialConstraintMultipliers() heuristic. It is usually faster - // but on some problem like neos*creuse, this do not find good cut though. + // but on some problem like neos*creuse or neos-888544, this do not find + // good cut though. // - // TODO(user): Tune this. + // TODO(user): Tune this. It seems better but we need to handle nicely the + // extra amount of cuts this produces. if (basis_col >= integer_variables_.size()) continue; // Get he variable value at cp-scale. Similar to GetVariableValueAtCpScale() @@ -1455,10 +1518,22 @@ void LinearProgrammingConstraint::AddCGCuts() { // TODO(user): We could just look at the diff with std::floor() in the hope // that when we are just under an integer, the exact computation below will // also be just under it. - if (std::abs(lp_value - std::round(lp_value)) < 0.01) continue; + const double fractionality = std::abs(lp_value - std::round(lp_value)); + if (fractionality < 0.01) continue; - // We multiply by row_factors_ directly, which might be slighly more precise - // than dividing by 1/factor like UnscaleLeftSolveValue() does. + const double score = fractionality * (1.0 - fractionality) / norms[index]; + sorted_columns.push_back({index, score}); + } + absl::c_sort(sorted_columns, [](const std::pair& a, + const std::pair& b) { + return a.second > b.second; + }); + + int num_added = 0; + for (const auto [index, _] : sorted_columns) { + if (time_limit_->LimitReached()) return; + // We multiply by row_factors_ directly, which might be slightly more + // precise than dividing by 1/factor like UnscaleLeftSolveValue() does. // // TODO(user): Avoid code duplication between the sparse/dense path. tmp_lp_multipliers_.clear(); @@ -1497,10 +1572,9 @@ void LinearProgrammingConstraint::AddCGCuts() { // Remove constraints that shouldn't be helpful. // // In practice, because we can complement the slack, it might still be - // useful to have some constraint with a trivial upper bound. That said, - // this does looks weird, maybe we miss something in our one-constraint - // cut generation if it is useful to add such a term. Investigate on - // neos-555884. + // useful to have some constraint with a trivial upper bound. Also + // removing this seem to generate a lot more cuts, so we need to be more + // efficient in dealing with them. if (true) { IgnoreTrivialConstraintMultipliers(&tmp_cg_multipliers_); if (tmp_cg_multipliers_.size() <= 1) continue; @@ -1508,9 +1582,14 @@ void LinearProgrammingConstraint::AddCGCuts() { tmp_integer_multipliers_ = ScaleMultipliers( tmp_cg_multipliers_, /*take_objective_into_account=*/false, &scaling); if (scaling != 0) { - AddCutFromConstraints("CG", tmp_integer_multipliers_); + if (AddCutFromConstraints("CG", tmp_integer_multipliers_)) { + ++num_added; + } } } + + // Stop if we already added more than 10 cuts this round. + if (num_added > 10) break; } } diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index a3f34096e9..715de7ba38 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -166,7 +166,6 @@ class LinearProgrammingConstraint : public PropagatorInterface, // at the current decision level. We "erase" it when we backtrack over it. bool HasSolution() const { return lp_solution_is_set_; } double GetSolutionValue(IntegerVariable variable) const; - double GetSolutionReducedCost(IntegerVariable variable) const; bool SolutionIsInteger() const { return lp_solution_is_integer_; } // Returns a valid lp lower bound for the current branch, and indicates if @@ -500,10 +499,14 @@ class LinearProgrammingConstraint : public PropagatorInterface, // they can be used as vector indices. // // TODO(user): This should be util_intops::StrongVector Except if we have too many LinearProgrammingConstraint. + // IntegerVariable>. std::vector integer_variables_; absl::flat_hash_map mirror_lp_variable_; + // This is only used if we use symmetry folding. + // Refer to relevant orbit in the LinearConstraintSymmetrizer. + std::vector orbit_indices_; + // We need to remember what to optimize if an objective is given, because // then we will switch the objective between feasibility and optimization. bool objective_is_defined_ = false; @@ -526,6 +529,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, SharedStatistics* shared_stats_; SharedResponseManager* shared_response_manager_; ModelRandomGenerator* random_; + LinearConstraintSymmetrizer* symmetrizer_; int watcher_id_; diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 5a819566a3..d844d2f08a 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -743,8 +743,11 @@ void PresolveContext::UpdateNewConstraintsVariableUsage() { } bool PresolveContext::HasUnusedAffineVariable() const { + if (is_unsat_) return false; // We do not care in this case. + if (keep_all_feasible_solutions) return false; for (int var = 0; var < working_model->variables_size(); ++var) { if (VariableIsNotUsedAnymore(var)) continue; + if (IsFixed(var)) continue; const auto& constraints = VarToConstraints(var); if (constraints.size() == 1 && constraints.contains(kAffineRelationConstraint) && @@ -1509,9 +1512,40 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, // Creates the linking sets on demand. // Insert the enforcement literal in the half encoding map. auto& direct_set = imply_eq ? eq_half_encoding_ : neq_half_encoding_; - if (!direct_set.insert({literal, var, value}).second) { + auto insert_result = direct_set.insert({{literal, var}, value}); + if (!insert_result.second) { + if (insert_result.first->second != value && imply_eq) { + UpdateRuleStats("variables: detect half reified incompatible value"); + return SetLiteralToFalse(literal); + } return false; // Already there. } + if (imply_eq) { + // We are adding b => x=v. Check if we already have ~b => x=u. + auto negated_encoding = direct_set.find({NegatedRef(literal), var}); + if (negated_encoding != direct_set.end()) { + if (negated_encoding->second == value) { + UpdateRuleStats( + "variables: both boolean and its negation imply same equality"); + if (!IntersectDomainWith(var, Domain(value))) { + return false; + } + } else { + const int64_t other_value = negated_encoding->second; + // b => var == value + // !b => var == other_value + // var = (value - other_value) * b + other_value + UpdateRuleStats( + "variables: both boolean and its negation fix the same variable"); + if (RefIsPositive(literal)) { + StoreAffineRelation(var, literal, value - other_value, other_value); + } else { + StoreAffineRelation(var, NegatedRef(literal), other_value - value, + value); + } + } + } + } VLOG(2) << "Collect lit(" << literal << ") implies var(" << var << (imply_eq ? ") == " : ") != ") << value; UpdateRuleStats("variables: detect half reified value encoding"); @@ -1519,7 +1553,8 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, // Note(user): We don't expect a lot of literals in these sets, so doing // a scan should be okay. auto& other_set = imply_eq ? neq_half_encoding_ : eq_half_encoding_; - if (other_set.contains({NegatedRef(literal), var, value})) { + auto it = other_set.find({NegatedRef(literal), var}); + if (it != other_set.end() && it->second == value) { UpdateRuleStats("variables: detect fully reified value encoding"); const int imply_eq_literal = imply_eq ? literal : NegatedRef(literal); if (!InsertVarValueEncodingInternal(imply_eq_literal, var, value, @@ -1549,8 +1584,12 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, /*add_constraints=*/true)) { return false; } - eq_half_encoding_.insert({literal, var, value}); - neq_half_encoding_.insert({NegatedRef(literal), var, value}); + if (!StoreLiteralImpliesVarEqValue(literal, var, value)) { + return false; + } + if (!StoreLiteralImpliesVarNEqValue(NegatedRef(literal), var, value)) { + return false; + } if (hint_is_loaded_) { const int bool_var = PositiveRef(literal); @@ -1797,6 +1836,10 @@ bool PresolveContext::CanonicalizeOneObjectiveVariable(int var) { RemoveVariableFromObjective(var); + // After we removed the variable from the objective it might have become a + // unused affine. Add it to the list of variables to check so we reprocess it. + modified_domains.Set(var); + // Do the substitution. AddToObjectiveOffset(coeff * r.offset); const int64_t new_coeff = objective_map_[r.representative] += coeff * r.coeff; diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 7b11b6b66e..318cc895e0 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -741,8 +741,8 @@ class PresolveContext { // (literal, var, value), i.e.: literal => var ==/!= value // The state is accumulated (adding x => var == value then !x => var != value) // will deduce that x equivalent to var == value. - absl::flat_hash_set> eq_half_encoding_; - absl::flat_hash_set> neq_half_encoding_; + absl::flat_hash_map, int64_t> eq_half_encoding_; + absl::flat_hash_map, int64_t> neq_half_encoding_; // This regroups all the affine relations between variables. Note that the // constraints used to detect such relations will be removed from the model at diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index b7e73d92ac..6e03c28e2d 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 300 +// NEXT TAG: 302 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -1327,6 +1327,13 @@ message SatParameters { // symmetry as possible in presolve. optional int32 symmetry_level = 183 [default = 2]; + // When we have symmetry, it is possible to "fold" all variables from the same + // orbit into a single variable, while having the same power of LP relaxation. + // This can help significantly on symmetric problem. However there is + // currently a bit of overhead as the rest of the solver need to do some + // translation between the folded LP and the rest of the problem. + optional bool use_symmetry_in_lp = 301 [default = false]; + // The new linear propagation code treat all constraints at once and use // an adaptation of Bellman-Ford-Tarjan to propagate constraint in a smarter // order and potentially detect propagation cycle earlier. diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index 23a8b92bac..da3741ef55 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -500,7 +500,7 @@ TEST(ComputeMinSumOfEndMinsTest, Infeasible) { kMinIntegerValue, kMinIntegerValue)); } -int64_t ExactMakespan(const std::vector& sizes, std::vector& demands, +int64_t ExactMakespan(absl::Span sizes, std::vector& demands, int capacity) { const int64_t kHorizon = 1000; CpModelBuilder builder; diff --git a/ortools/sat/symmetry_util.cc b/ortools/sat/symmetry_util.cc index c1d96e0a38..3189d75b2e 100644 --- a/ortools/sat/symmetry_util.cc +++ b/ortools/sat/symmetry_util.cc @@ -156,12 +156,15 @@ std::vector GetOrbits( MergingPartition union_find; union_find.Reset(n); for (const std::unique_ptr& perm : generators) { + DCHECK(perm != nullptr); const int num_cycles = perm->NumCycles(); for (int i = 0; i < num_cycles; ++i) { // Note that there is currently no random access api like cycle[j]. int first; bool is_first = true; for (const int x : perm->Cycle(i)) { + DCHECK_GE(x, 0); + DCHECK_LT(x, n); if (is_first) { first = x; is_first = false; @@ -232,5 +235,21 @@ std::vector TracePoint( return result; } +std::unique_ptr CreateSparsePermutationFromProto( + int n, const SparsePermutationProto& proto) { + auto perm = std::make_unique(n); + int support_index = 0; + const int num_cycle = proto.cycle_sizes().size(); + for (int i = 0; i < num_cycle; ++i) { + const int size = proto.cycle_sizes(i); + for (int j = 0; j < size; ++j) { + DCHECK_LT(proto.support(support_index), n); + perm->AddToCurrentCycle(proto.support(support_index++)); + } + perm->CloseCurrentCycle(); + } + return perm; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/symmetry_util.h b/ortools/sat/symmetry_util.h index 5e5e813d6e..aeed85a60c 100644 --- a/ortools/sat/symmetry_util.h +++ b/ortools/sat/symmetry_util.h @@ -19,6 +19,7 @@ #include "absl/types/span.h" #include "ortools/algorithms/sparse_permutation.h" +#include "ortools/sat/cp_model.pb.h" namespace operations_research { namespace sat { @@ -75,6 +76,10 @@ std::vector TracePoint( int point, absl::Span schrier_vector, absl::Span> generators); +// Creates a SparsePermutation on [0, n) from its proto representation. +std::unique_ptr CreateSparsePermutationFromProto( + int n, const SparsePermutationProto& proto); + // Given the generators for a permutation group of [0, n-1], update it to // a set of generators of the group stabilizing the given element. // diff --git a/ortools/sat/symmetry_util_test.cc b/ortools/sat/symmetry_util_test.cc index 9b3a5b19f5..cdd57f6975 100644 --- a/ortools/sat/symmetry_util_test.cc +++ b/ortools/sat/symmetry_util_test.cc @@ -22,11 +22,13 @@ #include "gtest/gtest.h" #include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/gmock.h" +#include "ortools/base/parse_test_proto.h" namespace operations_research { namespace sat { namespace { +using ::google::protobuf::contrib::parse_proto::ParseTestProto; using ::testing::ElementsAre; using ::testing::UnorderedElementsAre; @@ -155,6 +157,16 @@ TEST(GetSchreierVectorTest, ProjectivePlaneOrderTwo) { EXPECT_THAT(schrier_vector, ElementsAre(-1, -1, 0, 0, 1, 2, 2)); } +TEST(CreateSparsePermutationFromProtoTest, BasicReading) { + const SparsePermutationProto input = ParseTestProto(R"pb( + support: [ 1, 0, 3, 2, 7, 8, 9 ] + cycle_sizes: [ 2, 2, 3 ] + )pb"); + std::unique_ptr sp = + CreateSparsePermutationFromProto(10, input); + EXPECT_EQ(sp->DebugString(), "(0 1) (2 3) (7 8 9)"); +} + } // namespace } // namespace sat } // namespace operations_research