diff --git a/doc/sphinx/docs/cpp/miscellanea/type_traits.rst b/doc/sphinx/docs/cpp/miscellanea/type_traits.rst index 938a8317c..8b94ee7e0 100644 --- a/doc/sphinx/docs/cpp/miscellanea/type_traits.rst +++ b/doc/sphinx/docs/cpp/miscellanea/type_traits.rst @@ -76,5 +76,8 @@ Type traits and enums used in PaGMO .. doxygenclass:: pagmo::override_has_set_seed :members: +.. doxygenclass:: pagmo::has_run_evolve + :members: + .. doxygenclass:: pagmo::is_udi :members: diff --git a/doc/sphinx/docs/images/nlopt_basic_lv1.png b/doc/sphinx/docs/images/nlopt_basic_lv1.png index 5754d9a00..81f1213dc 100644 Binary files a/doc/sphinx/docs/images/nlopt_basic_lv1.png and b/doc/sphinx/docs/images/nlopt_basic_lv1.png differ diff --git a/doc/sphinx/docs/python/algorithms/py_algorithms.rst b/doc/sphinx/docs/python/algorithms/py_algorithms.rst index 9474746ef..cbbe8d9ea 100644 --- a/doc/sphinx/docs/python/algorithms/py_algorithms.rst +++ b/doc/sphinx/docs/python/algorithms/py_algorithms.rst @@ -16,16 +16,16 @@ Heuristic Optimization ========================================================== ========================================= =============== =================================================================== Common Name Name in PyGMO Type Comments ========================================================== ========================================= =============== =================================================================== -The null algorithm :class:`pygmo.null_algorithm` SM-CU Exposed from C++, Used for testing purposes, does nothing +The null algorithm :class:`pygmo.null_algorithm` SM-CU Exposed from C++, used for initialization purposes, does nothing Differential Evolution (DE) :class:`pygmo.de` S-U Exposed from C++ Self-adaptive DE (jDE and iDE) :class:`pygmo.sade` S-U Exposed from C++ Self-adaptive DE (de_1220 aka pDE) :class:`pygmo.de1220` S-U Exposed from C++ Particle Swarm Optimization (PSO) :class:`pygmo.pso` S-U Exposed from C++ (N+1)-ES Simple Evolutionary Algorithm :class:`pygmo.sea` S-U (sto) Exposed from C++ -Corana's Simulated Annealing (SA) :class:`pygmo.sa_corana` S-U Exposed from C++ +Corana's Simulated Annealing (SA) :class:`pygmo.simulated_annealing` S-U Exposed from C++ Artificial Bee Colony (ABC) :class:`pygmo.bee_colony` S-U Exposed from C++ Covariance Matrix Adaptation Evo. Strategy (CMA-ES) :class:`pygmo.cmaes` S-U Exposed from C++ -Non-dominated Sorting GA (NSGA2) :class:`pygmo.nsga_II` M-U Exposed from C++ +Non-dominated Sorting GA (NSGA2) :class:`pygmo.nsga2` M-U Exposed from C++ Multi-objective EA vith Decomposition (MOEA/D) :class:`pygmo.moead` M-U Exposed from C++ ========================================================== ========================================= =============== =================================================================== @@ -43,19 +43,20 @@ Local optimization ====================================================== ========================================= =============== ===================================================================== Common Name Name in PyGMO Type Comments ====================================================== ========================================= =============== ===================================================================== -Compass Search (CS) :class:`pygmo.cs` S-CU Exposed from C++ -COBYLA (from nlopt) :class:`pygmo.nlopt` S-CU Exposed from C++ -BOBYQA (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -NEWUOA + bound constraints (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -PRAXIS (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -Nelder-Mead simplex (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -sbplx (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -MMA (Method of Moving Asymptotes) (from nlopt) :class:`pygmo.nlopt` S-CU Exposed from C++ -CCSA (from nlopt) :class:`pygmo.nlopt` S-CU Exposed from C++ -SLSQP (from nlopt) :class:`pygmo.nlopt` S-CU Exposed from C++ -low-storage BFGS (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -preconditioned truncated Newton (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ -shifted limited-memory variable-metric (from nlopt) :class:`pygmo.nlopt` S-U Exposed from C++ +Compass Search (CS) :class:`pygmo.compass_search` S-CU Exposed from C++ +COBYLA (from NLopt) :class:`pygmo.nlopt` S-CU Exposed from C++ +BOBYQA (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +NEWUOA + bound constraints (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +PRAXIS (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +Nelder-Mead simplex (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +sbplx (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +MMA (Method of Moving Asymptotes) (from NLopt) :class:`pygmo.nlopt` S-CU Exposed from C++ +CCSA (from NLopt) :class:`pygmo.nlopt` S-CU Exposed from C++ +SLSQP (from NLopt) :class:`pygmo.nlopt` S-CU Exposed from C++ +low-storage BFGS (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +preconditioned truncated Newton (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +Shifted limited-memory variable-metric (from NLopt) :class:`pygmo.nlopt` S-U Exposed from C++ +Augmented Lagrangian algorithm (from NLopt) :class:`pygmo.nlopt` S-CU Exposed from C++ ====================================================== ========================================= =============== ===================================================================== ---------------------------------------------------------------------------------------------------------------------- diff --git a/doc/sphinx/docs/python/tutorials/nlopt_basics.rst b/doc/sphinx/docs/python/tutorials/nlopt_basics.rst index e6e080595..221d6cefa 100644 --- a/doc/sphinx/docs/python/tutorials/nlopt_basics.rst +++ b/doc/sphinx/docs/python/tutorials/nlopt_basics.rst @@ -5,7 +5,7 @@ A first tutorial on the use of NLopt solvers In this tutorial we show the basic usage pattern of :class:`pygmo.nlopt`. This user defined algorithm (UDA) wraps the NLopt library making it easily accessible via the pygmo common -:class:`pygmo.algorithm` interface. Let see how this miracle occur. +:class:`pygmo.algorithm` interface. Let us see how this miracle occurs. I have the gradient ^^^^^^^^^^^^^^^^^^^ @@ -15,12 +15,13 @@ I have the gradient >>> import pygmo as pg >>> uda = pg.nlopt("slsqp") >>> algo = pg.algorithm(uda) - >>> print(algo) # doctest: +NORMALIZE_WHITESPACE + >>> print(algo) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Algorithm name: NLopt - slsqp [deterministic] Thread safety: basic Extra info: - NLopt version: 2.4.2 + NLopt version: ... + Solver: 'slsqp' Last optimisation return code: NLOPT_SUCCESS (value = 1, Generic success return value) Verbosity: 0 Individual selection policy: best @@ -36,9 +37,9 @@ I have the gradient -In a few lines we have constructed a :class:`pygmo.algorithm` containing the slsqp solver from +In a few lines we have constructed a :class:`pygmo.algorithm` containing the ``"slsqp"`` solver from NLopt. For a list of solvers availbale via the NLopt library check the docs of :class:`~pygmo.nlopt`. -In this tutorial we will make use of slsqp, a Sequential Quadratic Programming algorithm suited for +In this tutorial we will make use of ``"slsqp"``, a Sequential Quadratic Programming algorithm suited for generic Non Linear Programming problems (i.e. non linearly constrained single objective problems). All the stopping criteria used by the NLopt library are available via dedicated attributes, so that we may, for @@ -55,7 +56,7 @@ Let us algo activate some verbosity as to store a log and have a screen output: >>> algo.set_verbosity(1) We now need a problem to solve. Let us start with one of the UDPs provided in pygmo. The -:class:`pygmo.luksan_vlcek1` provides a classic, scalable, equally constrained problem. It +:class:`pygmo.luksan_vlcek1` problem is a classic, scalable, equality-constrained problem. It also has its gradient implemented so that we do not have to worry about that for the moment. .. doctest:: @@ -72,40 +73,35 @@ The lines above can be shortened and are equivalent to: >>> pop = pg.population(pg.luksan_vlcek1(dim = 20), size = 1) >>> pop.problem.c_tol = [1e-8] * pop.problem.get_nc() -.. image:: ../../images/nlopt_basic_lv1.png - :scale: 80 % - :alt: slsqp performance - :align: right - We now solve this problem by writing: .. doctest:: >>> pop = algo.evolve(pop) # doctest: +SKIP - fevals: fitness: violated: viol. norm: - 1 250153 18 2619.51 i - 2 132280 18 931.767 i - 3 26355.2 18 357.548 i - 4 14509 18 140.055 i - 5 77119 18 378.603 i - 6 9104.25 18 116.19 i - 7 9104.25 18 116.19 i - 8 2219.94 18 42.8747 i - 9 947.637 18 16.7015 i - 10 423.519 18 7.73746 i - 11 82.8658 18 1.39111 i - 12 34.2733 15 0.227267 i - 13 11.9797 11 0.0309227 i - 14 42.7256 7 0.27342 i - 15 1.66949 11 0.042859 i - 16 1.66949 11 0.042859 i - 17 0.171358 7 0.00425765 i - 18 0.00186583 5 0.000560166 i - 19 1.89265e-06 3 4.14711e-06 i - 20 1.28773e-09 0 0 - 21 7.45125e-14 0 0 - 22 3.61388e-18 0 0 - 23 1.16145e-23 0 0 + objevals: objval: violated: viol. norm: + 1 250153 18 2619.51 i + 2 132280 18 931.767 i + 3 26355.2 18 357.548 i + 4 14509 18 140.055 i + 5 77119 18 378.603 i + 6 9104.25 18 116.19 i + 7 9104.25 18 116.19 i + 8 2219.94 18 42.8747 i + 9 947.637 18 16.7015 i + 10 423.519 18 7.73746 i + 11 82.8658 18 1.39111 i + 12 34.2733 15 0.227267 i + 13 11.9797 11 0.0309227 i + 14 42.7256 7 0.27342 i + 15 1.66949 11 0.042859 i + 16 1.66949 11 0.042859 i + 17 0.171358 7 0.00425765 i + 18 0.00186583 5 0.000560166 i + 19 1.89265e-06 3 4.14711e-06 i + 20 1.28773e-09 0 0 + 21 7.45125e-14 0 0 + 22 3.61388e-18 0 0 + 23 1.16145e-23 0 0 Optimisation return status: NLOPT_XTOL_REACHED (value = 4, Optimization stopped because xtol_rel or xtol_abs was reached) @@ -118,9 +114,13 @@ shown here. >>> from matplotlib import pyplot as plt # doctest: +SKIP >>> plt.semilogy([line[0] for line in log], [line[1] for line in log], label = "obj") # doctest: +SKIP >>> plt.semilogy([line[0] for line in log], [line[3] for line in log], label = "con") # doctest: +SKIP - >>> plt.xlabel("fevals") # doctest: +SKIP + >>> plt.xlabel("objevals") # doctest: +SKIP >>> plt.ylabel("value") # doctest: +SKIP >>> plt.show() # doctest: +SKIP +.. image:: ../../images/nlopt_basic_lv1.png + :scale: 100 % + :alt: slsqp performance + I do not have the gradient ^^^^^^^^^^^^^^^^^^^^^^^^^^ \ No newline at end of file diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index dc81ae351..d796996ae 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -34,6 +34,8 @@ see https://www.gnu.org/licenses/. */ #if defined(PAGMO_WITH_NLOPT) #include +#include +#include #include #include #include @@ -58,6 +60,7 @@ see https://www.gnu.org/licenses/. */ #include #include +#include #include #include #include @@ -116,6 +119,8 @@ inline typename nlopt_data<>::names_map_t nlopt_names_map() retval.insert(value_type("tnewton", NLOPT_LD_TNEWTON)); retval.insert(value_type("var2", NLOPT_LD_VAR2)); retval.insert(value_type("var1", NLOPT_LD_VAR1)); + retval.insert(value_type("auglag", NLOPT_AUGLAG)); + retval.insert(value_type("auglag_eq", NLOPT_AUGLAG_EQ)); return retval; } @@ -153,7 +158,7 @@ inline std::string nlopt_res2string(::nlopt_result err) } struct nlopt_obj { - // Single entry of the log (feval, fitness, n of unsatisfied const, constr. violation, feasibility). + // Single entry of the log (objevals, objval, n of unsatisfied const, constr. violation, feasibility). using log_line_type = std::tuple; // The log. using log_type = std::vector; @@ -178,13 +183,8 @@ struct nlopt_obj { using data = nlopt_data<>; explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, double xtol_rel, double xtol_abs, int maxeval, int maxtime, unsigned verbosity) - : m_prob(prob), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) + : m_algo(algo), m_prob(prob), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) { - // If needed, init the sparsity pattern. - if (prob.has_gradient_sparsity()) { - m_sp = prob.gradient_sparsity(); - } - // Extract and set problem dimension. const auto n = boost::numeric_cast(prob.get_nx()); // Try to init the nlopt_obj. @@ -198,34 +198,109 @@ struct nlopt_obj { pagmo_throw(std::invalid_argument, "NLopt algorithms cannot handle multi-objective optimization"); } - // Variable to hold the result of various operations. - ::nlopt_result res; + // This is just a vector_double that is re-used across objfun invocations. + // It will hold the current decision vector. + m_dv.resize(prob.get_nx()); - // Box bounds. - const auto bounds = prob.get_bounds(); - res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + // Handle the various stopping criteria. + auto res = ::nlopt_set_stopval(m_value.get(), stopval); if (res != NLOPT_SUCCESS) { // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" + pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " + + std::to_string(stopval) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); // LCOV_EXCL_STOP } - res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); if (res != NLOPT_SUCCESS) { // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" + pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " + + std::to_string(ftol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " + + std::to_string(ftol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " + + std::to_string(xtol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " + + std::to_string(xtol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_maxeval(m_value.get(), maxeval); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " + + std::to_string(maxeval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_maxtime(m_value.get(), maxtime); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " + + std::to_string(maxtime) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); // LCOV_EXCL_STOP } + } - // This is just a vector_double that is re-used across objfun invocations. - // It will hold the current decision vector. - m_dv.resize(prob.get_nx()); + // Set box bounds. + void set_bounds() + { + const auto bounds = m_prob.get_bounds(); + auto res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" + + data::names.right.at(m_algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" + + data::names.right.at(m_algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + } - // Set the objfun + gradient. - res = ::nlopt_set_min_objective( + // Set the objfun + gradient. + void set_objfun() + { + // If needed, init the sparsity pattern. + // NOTE: we do it here so that, in case this is a local optimiser, + // we don't waste memory (set_objfun() etc. are not called when setting up a local + // optimiser). + if (m_prob.has_gradient_sparsity()) { + m_sp = m_prob.gradient_sparsity(); + } + auto res = ::nlopt_set_min_objective( m_value.get(), [](unsigned dim, const double *x, double *grad, void *f_data) -> double { // Get *this back from the function data. @@ -252,11 +327,12 @@ struct nlopt_obj { // If grad is not null, it means we are in an algorithm // that needs the gradient. If the problem does not support it, // we error out. - pagmo_throw(std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' a fitness gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an objective function gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); } // Copy the decision vector in our temporary dv vector_double, @@ -316,8 +392,8 @@ struct nlopt_obj { if (!(f_count / verb % 50u)) { // Every 50 lines print the column names. - print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", - std::setw(15), "viol. norm:", '\n'); + print("\n", std::setw(10), "objevals:", std::setw(15), "objval:", std::setw(15), + "violated:", std::setw(15), "viol. norm:", '\n'); } // Print to screen the log line. print(std::setw(10), f_count + 1u, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), @@ -343,20 +419,19 @@ struct nlopt_obj { if (res != NLOPT_SUCCESS) { // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " + + data::names.right.at(m_algo) + "', the error is: " + nlopt_res2string(res)); // LCOV_EXCL_STOP } + } - // Vector-valued constraints. - const auto nic = boost::numeric_cast(prob.get_nic()); - const auto nec = boost::numeric_cast(prob.get_nec()); - const auto c_tol = prob.get_c_tol(); - - // Inequality. - if (nic) { - res = ::nlopt_add_inequality_mconstraint( - m_value.get(), nic, + // Inequality constraints. + void set_ineq_constraints() + { + if (m_prob.get_nic()) { + const auto c_tol = m_prob.get_c_tol(); + auto res = ::nlopt_add_inequality_mconstraint( + m_value.get(), boost::numeric_cast(m_prob.get_nic()), [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { // Get *this back from the function data. auto &nlo = *static_cast(f_data); @@ -444,19 +519,23 @@ struct nlopt_obj { ::nlopt_force_stop(nlo.m_value.get()); } }, - static_cast(this), c_tol.data() + nec); + static_cast(this), c_tol.data() + m_prob.get_nec()); if (res != NLOPT_SUCCESS) { pagmo_throw(std::invalid_argument, "could not set the inequality constraints for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res) + + data::names.right.at(m_algo) + "', the error is: " + nlopt_res2string(res) + "\nThis usually means that the algorithm does not support inequality constraints"); } } + } - // Equality. - if (nec) { - res = ::nlopt_add_equality_mconstraint( - m_value.get(), nec, + // Equality constraints. + void set_eq_constraints() + { + if (m_prob.get_nec()) { + const auto c_tol = m_prob.get_c_tol(); + auto res = ::nlopt_add_equality_mconstraint( + m_value.get(), boost::numeric_cast(m_prob.get_nec()), [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { // Get *this back from the function data. auto &nlo = *static_cast(f_data); @@ -551,75 +630,10 @@ struct nlopt_obj { if (res != NLOPT_SUCCESS) { pagmo_throw(std::invalid_argument, "could not set the equality constraints for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res) + + data::names.right.at(m_algo) + "', the error is: " + nlopt_res2string(res) + "\nThis usually means that the algorithm does not support equality constraints"); } } - - // Handle the various stopping criteria. - res = ::nlopt_set_stopval(m_value.get(), stopval); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " - + std::to_string(stopval) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } - res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " - + std::to_string(ftol_rel) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } - res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " - + std::to_string(ftol_abs) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } - res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " - + std::to_string(xtol_rel) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } - res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " - + std::to_string(xtol_abs) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } - res = ::nlopt_set_maxeval(m_value.get(), maxeval); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " - + std::to_string(maxeval) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } - res = ::nlopt_set_maxtime(m_value.get(), maxtime); - if (res != NLOPT_SUCCESS) { - // LCOV_EXCL_START - pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " - + std::to_string(maxtime) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - // LCOV_EXCL_STOP - } } // Delete all other ctors/assignment ops. @@ -629,6 +643,7 @@ struct nlopt_obj { nlopt_obj &operator=(nlopt_obj &&) = delete; // Data members. + ::nlopt_algorithm m_algo; problem &m_prob; sparsity_pattern m_sp; std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; @@ -663,7 +678,8 @@ struct nlopt_obj { * - SLSQP, * - low-storage BFGS, * - preconditioned truncated Newton, - * - shifted limited-memory variable-metric. + * - shifted limited-memory variable-metric, + * - augmented Lagrangian algorithm. * * The desired NLopt solver is selected upon construction of a pagmo::nlopt algorithm. Various properties * of the solver (e.g., the stopping criteria) can be configured after construction via methods provided @@ -677,7 +693,8 @@ struct nlopt_obj { * * In order to support pagmo's population-based optimisation model, nlopt::evolve() will select * a single individual from the input pagmo::population to be optimised by the NLopt solver. - * The optimised individual will then be inserted back into the population at the end of the optimisation. + * If the optimisation produces a better individual (as established by pagmo::compare_fc()), + * the optimised individual will be inserted back into the population. * The selection and replacement strategies can be configured via set_selection(const std::string &), * set_selection(population::size_type), set_replacement(const std::string &) and * set_replacement(population::size_type). @@ -765,6 +782,8 @@ class nlopt * ``"tnewton"`` ``NLOPT_LD_TNEWTON`` * ``"var2"`` ``NLOPT_LD_VAR2`` * ``"var1"`` ``NLOPT_LD_VAR1`` + * ``"auglag"`` ``NLOPT_AUGLAG`` + * ``"auglag_eq"`` ``NLOPT_AUGLAG_EQ`` * ================================ ==================================== * \endverbatim * The parameters of the selected algorithm can be specified via the methods of this class. @@ -804,6 +823,30 @@ class nlopt + "'. The supported algorithms are:\n" + oss.str()); } } + /// Copy constructor. + /** + * The copy constructor will deep-copy the state of \p other. + * + * @param other the construction argument. + * + * @throws unspecified any exception thrown by copying the internal state of \p other. + */ + nlopt(const nlopt &other) + : m_algo(other.m_algo), m_select(other.m_select), m_replace(other.m_replace), + m_rselect_seed(other.m_rselect_seed), m_e(other.m_e), m_last_opt_result(other.m_last_opt_result), + m_sc_stopval(other.m_sc_stopval), m_sc_ftol_rel(other.m_sc_ftol_rel), m_sc_ftol_abs(other.m_sc_ftol_abs), + m_sc_xtol_rel(other.m_sc_xtol_rel), m_sc_xtol_abs(other.m_sc_xtol_abs), m_sc_maxeval(other.m_sc_maxeval), + m_sc_maxtime(other.m_sc_maxtime), m_verbosity(other.m_verbosity), m_log(other.m_log), + m_loc_opt(other.m_loc_opt ? detail::make_unique(*other.m_loc_opt) : nullptr) + { + } + /// Move constructor. + nlopt(nlopt &&) = default; + /// Move assignment operator. + /** + * @return a reference to \p this. + */ + nlopt &operator=(nlopt &&) = default; /// Set the seed for the ``"random"`` selection/replacement policies. /** * @param seed the value that will be used to seed the random number generator used by the ``"random"`` @@ -949,25 +992,41 @@ class nlopt } auto &prob = pop.get_problem(); - const auto nc = prob.get_nc(); // Create the nlopt obj. // NOTE: this will check also the problem's properties. nlopt_obj no(nlopt_data::names.left.at(m_algo), prob, m_sc_stopval, m_sc_ftol_rel, m_sc_ftol_abs, m_sc_xtol_rel, m_sc_xtol_abs, m_sc_maxeval, m_sc_maxtime, m_verbosity); + no.set_bounds(); + no.set_objfun(); + no.set_eq_constraints(); + no.set_ineq_constraints(); + + // Set the local optimiser, if appropriate. + if (m_loc_opt) { + nlopt_obj no_loc(nlopt_data::names.left.at(m_loc_opt->m_algo), prob, m_loc_opt->m_sc_stopval, + m_loc_opt->m_sc_ftol_rel, m_loc_opt->m_sc_ftol_abs, m_loc_opt->m_sc_xtol_rel, + m_loc_opt->m_sc_xtol_abs, m_loc_opt->m_sc_maxeval, m_loc_opt->m_sc_maxtime, 0); + ::nlopt_set_local_optimizer(no.m_value.get(), no_loc.m_value.get()); + } - // Setup of the initial guess. - vector_double initial_guess; + // Setup of the initial guess. Store also the original fitness + // of the selected individual, old_f, for later use. + vector_double initial_guess, old_f; if (boost::any_cast(&m_select)) { const auto &s_select = boost::any_cast(m_select); if (s_select == "best") { initial_guess = pop.get_x()[pop.best_idx()]; + old_f = pop.get_f()[pop.best_idx()]; } else if (s_select == "worst") { initial_guess = pop.get_x()[pop.worst_idx()]; + old_f = pop.get_f()[pop.worst_idx()]; } else { assert(s_select == "random"); std::uniform_int_distribution dist(0, pop.size() - 1u); - initial_guess = pop.get_x()[dist(m_e)]; + const auto idx = dist(m_e); + initial_guess = pop.get_x()[idx]; + old_f = pop.get_f()[idx]; } } else { const auto idx = boost::any_cast(m_select); @@ -977,6 +1036,7 @@ class nlopt + std::to_string(pop.size())); } initial_guess = pop.get_x()[idx]; + old_f = pop.get_f()[idx]; } // Check the initial guess. // NOTE: this should be guaranteed by the population's invariants. @@ -994,8 +1054,8 @@ class nlopt } // Run the optimisation and store the status returned by NLopt. - double fitness; - m_last_opt_result = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &fitness); + double objval; + m_last_opt_result = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &objval); if (m_verbosity) { // Print to screen the result of the optimisation, if we are being verbose. std::cout << "\nOptimisation return status: " << detail::nlopt_res2string(m_last_opt_result) << '\n'; @@ -1008,41 +1068,30 @@ class nlopt std::rethrow_exception(no.m_eptr); } - // Store the new individual into the population. - if (boost::any_cast(&m_replace)) { - const auto &s_replace = boost::any_cast(m_replace); - if (s_replace == "best") { - if (nc) { - pop.set_x(pop.best_idx(), initial_guess); - } else { - pop.set_xf(pop.best_idx(), initial_guess, {fitness}); - } - } else if (s_replace == "worst") { - if (nc) { - pop.set_x(pop.worst_idx(), initial_guess); + // Compute the new fitness vector. + const auto new_f = pop.get_problem().fitness(initial_guess); + + // Store the new individual into the population, but only if better. + if (compare_fc(new_f, old_f, pop.get_problem().get_nec(), pop.get_problem().get_c_tol())) { + if (boost::any_cast(&m_replace)) { + const auto &s_replace = boost::any_cast(m_replace); + if (s_replace == "best") { + pop.set_xf(pop.best_idx(), initial_guess, new_f); + } else if (s_replace == "worst") { + pop.set_xf(pop.worst_idx(), initial_guess, new_f); } else { - pop.set_xf(pop.worst_idx(), initial_guess, {fitness}); + assert(s_replace == "random"); + std::uniform_int_distribution dist(0, pop.size() - 1u); + pop.set_xf(dist(m_e), initial_guess, new_f); } } else { - assert(s_replace == "random"); - std::uniform_int_distribution dist(0, pop.size() - 1u); - if (nc) { - pop.set_x(dist(m_e), initial_guess); - } else { - pop.set_xf(dist(m_e), initial_guess, {fitness}); + const auto idx = boost::any_cast(m_replace); + if (idx >= pop.size()) { + pagmo_throw(std::invalid_argument, "cannot replace the individual at index " + std::to_string(idx) + + " after evolution: the population has a size of only " + + std::to_string(pop.size())); } - } - } else { - const auto idx = boost::any_cast(m_replace); - if (idx >= pop.size()) { - pagmo_throw(std::invalid_argument, "cannot replace the individual at index " + std::to_string(idx) - + " after evolution: the population has a size of only " - + std::to_string(pop.size())); - } - if (nc) { - pop.set_x(idx, initial_guess); - } else { - pop.set_xf(idx, initial_guess, {fitness}); + pop.set_xf(idx, initial_guess, new_f); } } @@ -1066,11 +1115,11 @@ class nlopt * * Example (verbosity 5): * @code{.unparsed} - * fevals: fitness: violated: viol. norm: - * 1 47.9474 1 2.07944 i - * 6 17.1986 2 0.150557 i - * 11 17.014 0 0 - * 16 17.014 0 0 + * objevals: objval: violated: viol. norm: + * 1 47.9474 1 2.07944 i + * 6 17.1986 2 0.150557 i + * 11 17.014 0 0 + * 16 17.014 0 0 * @endcode * The ``i`` at the end of some rows indicates that the decision vector is infeasible. Feasibility * is checked against the problem's tolerance. @@ -1092,24 +1141,38 @@ class nlopt { int major, minor, bugfix; ::nlopt_version(&major, &minor, &bugfix); - return "\tNLopt version: " + std::to_string(major) + "." + std::to_string(minor) + "." + std::to_string(bugfix) - + "\n\tLast optimisation return code: " + detail::nlopt_res2string(m_last_opt_result) + "\n\tVerbosity: " - + std::to_string(m_verbosity) + "\n\tIndividual selection " - + (boost::any_cast(&m_select) - ? "idx: " + std::to_string(boost::any_cast(m_select)) - : "policy: " + boost::any_cast(m_select)) - + "\n\tIndividual replacement " - + (boost::any_cast(&m_replace) - ? "idx: " + std::to_string(boost::any_cast(m_replace)) - : "policy: " + boost::any_cast(m_replace)) - + "\n\tStopping criteria:\n\t\tstopval: " - + (m_sc_stopval == -HUGE_VAL ? "disabled" : detail::to_string(m_sc_stopval)) + "\n\t\tftol_rel: " - + (m_sc_ftol_rel <= 0. ? "disabled" : detail::to_string(m_sc_ftol_rel)) + "\n\t\tftol_abs: " - + (m_sc_ftol_abs <= 0. ? "disabled" : detail::to_string(m_sc_ftol_abs)) + "\n\t\txtol_rel: " - + (m_sc_xtol_rel <= 0. ? "disabled" : detail::to_string(m_sc_xtol_rel)) + "\n\t\txtol_abs: " - + (m_sc_xtol_abs <= 0. ? "disabled" : detail::to_string(m_sc_xtol_abs)) + "\n\t\tmaxeval: " - + (m_sc_maxeval <= 0. ? "disabled" : detail::to_string(m_sc_maxeval)) + "\n\t\tmaxtime: " - + (m_sc_maxtime <= 0. ? "disabled" : detail::to_string(m_sc_maxtime)) + "\n"; + auto retval = "\tNLopt version: " + std::to_string(major) + "." + std::to_string(minor) + "." + + std::to_string(bugfix) + "\n\tSolver: '" + m_algo + "'\n\tLast optimisation return code: " + + detail::nlopt_res2string(m_last_opt_result) + "\n\tVerbosity: " + std::to_string(m_verbosity) + + "\n\tIndividual selection " + + (boost::any_cast(&m_select) + ? "idx: " + std::to_string(boost::any_cast(m_select)) + : "policy: " + boost::any_cast(m_select)) + + "\n\tIndividual replacement " + + (boost::any_cast(&m_replace) + ? "idx: " + std::to_string(boost::any_cast(m_replace)) + : "policy: " + boost::any_cast(m_replace)) + + "\n\tStopping criteria:\n\t\tstopval: " + + (m_sc_stopval == -HUGE_VAL ? "disabled" : detail::to_string(m_sc_stopval)) + "\n\t\tftol_rel: " + + (m_sc_ftol_rel <= 0. ? "disabled" : detail::to_string(m_sc_ftol_rel)) + "\n\t\tftol_abs: " + + (m_sc_ftol_abs <= 0. ? "disabled" : detail::to_string(m_sc_ftol_abs)) + "\n\t\txtol_rel: " + + (m_sc_xtol_rel <= 0. ? "disabled" : detail::to_string(m_sc_xtol_rel)) + "\n\t\txtol_abs: " + + (m_sc_xtol_abs <= 0. ? "disabled" : detail::to_string(m_sc_xtol_abs)) + "\n\t\tmaxeval: " + + (m_sc_maxeval <= 0. ? "disabled" : detail::to_string(m_sc_maxeval)) + "\n\t\tmaxtime: " + + (m_sc_maxtime <= 0. ? "disabled" : detail::to_string(m_sc_maxtime)) + "\n"; + if (m_loc_opt) { + // Add a tab to the output of the extra_info() of the local opt, + // and append the result. + retval += "\tLocal optimizer:\n"; + const auto loc_info = m_loc_opt->get_extra_info(); + std::vector split_v; + boost::algorithm::split(split_v, loc_info, boost::algorithm::is_any_of("\n"), + boost::algorithm::token_compress_on); + for (const auto &s : split_v) { + retval += "\t" + s + "\n"; + } + } + return retval; } /// Get the optimisation log. /** @@ -1302,6 +1365,127 @@ class nlopt { m_sc_maxtime = n; } + /// Set the local optimizer. + /** + * Some NLopt algorithms rely on other NLopt algorithms as local/subsidiary optimizers. + * This method allows to set such local optimizer. By default, no local optimizer is specified. + * + * **NOTE**: at the present time, only the ``"auglag"`` and ``"auglag_eq"`` solvers make use + * of a local optimizer. Setting a local optimizer on any other solver will have no effect. + * + * **NOTE**: the objective function, bounds, and nonlinear-constraint parameters of the local + * optimizer are ignored (as they are provided by the parent optimizer). Conversely, the stopping + * criteria should be specified in the local optimizer. The verbosity of + * the local optimizer is also forcibly set to zero during the optimisation. + * + * @param n the local optimizer that will be used by this pagmo::nlopt algorithm. + */ + void set_local_optimizer(nlopt n) + { + m_loc_opt = detail::make_unique(std::move(n)); + } + /// Get the local optimizer. + /** + * This method returns a raw const pointer to the local optimizer, if it has been set via set_local_optimizer(). + * Otherwise, \p nullptr will be returned. + * + * **NOTE**: the returned value is a raw non-owning pointer: the lifetime of the pointee is tied to the lifetime + * of \p this, and \p delete must never be called on the pointer. + * + * @return a const pointer to the local optimizer. + */ + const nlopt *get_local_optimizer() const + { + return m_loc_opt.get(); + } + /// Get the local optimizer. + /** + * This method returns a raw pointer to the local optimizer, if it has been set via set_local_optimizer(). + * Otherwise, \p nullptr will be returned. + * + * **NOTE**: the returned value is a raw non-owning pointer: the lifetime of the pointee is tied to the lifetime + * of \p this, and \p delete must never be called on the pointer. + * + * @return a pointer to the local optimizer. + */ + nlopt *get_local_optimizer() + { + return m_loc_opt.get(); + } + /// Unset the local optimizer. + /** + * After a call to this method, get_local_optimizer() and get_local_optimizer() const will return \p nullptr. + */ + void unset_local_optimizer() + { + m_loc_opt.reset(nullptr); + } + /// Save to archive. + /** + * @param ar the target archive. + * + * @throws unspecified any exception thrown by the serialization of primitive types. + */ + template + void save(Archive &ar) const + { + ar(m_algo); + if (boost::any_cast(&m_select)) { + // NOTE: true -> string, false -> idx. + ar(true); + ar(boost::any_cast(m_select)); + } else { + ar(false); + ar(boost::any_cast(m_select)); + } + if (boost::any_cast(&m_replace)) { + // NOTE: true -> string, false -> idx. + ar(true); + ar(boost::any_cast(m_replace)); + } else { + ar(false); + ar(boost::any_cast(m_replace)); + } + ar(m_rselect_seed, m_e, m_last_opt_result, m_sc_stopval, m_sc_ftol_rel, m_sc_ftol_abs, m_sc_xtol_rel, + m_sc_xtol_abs, m_sc_maxeval, m_sc_maxtime, m_verbosity, m_log, m_loc_opt); + } + /// Load from archive. + /** + * In case of exceptions, \p this will be unaffected. + * + * @param ar the source archive. + * + * @throws unspecified any exception thrown by the deserialization of primitive types. + */ + template + void load(Archive &ar) + { + nlopt tmp; + ar(tmp.m_algo); + bool flag; + std::string str; + population::size_type idx; + ar(flag); + if (flag) { + ar(str); + tmp.m_select = str; + } else { + ar(idx); + tmp.m_select = idx; + } + ar(flag); + if (flag) { + ar(str); + tmp.m_replace = str; + } else { + ar(idx); + tmp.m_replace = idx; + } + ar(tmp.m_rselect_seed, tmp.m_e, tmp.m_last_opt_result, tmp.m_sc_stopval, tmp.m_sc_ftol_rel, tmp.m_sc_ftol_abs, + tmp.m_sc_xtol_rel, tmp.m_sc_xtol_abs, tmp.m_sc_maxeval, tmp.m_sc_maxtime, tmp.m_verbosity, tmp.m_log, + tmp.m_loc_opt); + *this = std::move(tmp); + } private: std::string m_algo; @@ -1321,9 +1505,13 @@ class nlopt // Verbosity/log. unsigned m_verbosity = 0; mutable log_type m_log; + // Local/subsidiary optimizer. + std::unique_ptr m_loc_opt; }; } +PAGMO_REGISTER_ALGORITHM(pagmo::nlopt) + #else // PAGMO_WITH_NLOPT #error The nlopt.hpp header was included, but pagmo was not compiled with NLopt support diff --git a/include/pagmo/island.hpp b/include/pagmo/island.hpp index b67efbe26..4d0d35f11 100644 --- a/include/pagmo/island.hpp +++ b/include/pagmo/island.hpp @@ -1383,6 +1383,36 @@ class archipelago stream(os, t); return os; } + /// Get the fitness vectors of the islands' champions. + /** + * @return a collection of the fitness vectors of the islands' champions. + * + * @throws unspecified any exception thrown by population::champion_f() or + * by memory errors in standard containers. + */ + std::vector get_champions_f() const + { + std::vector retval; + for (const auto &isl_ptr : m_islands) { + retval.emplace_back(isl_ptr->get_population().champion_f()); + } + return retval; + } + /// Get the decision vectors of the islands' champions. + /** + * @return a collection of the decision vectors of the islands' champions. + * + * @throws unspecified any exception thrown by population::champion_x() or + * by memory errors in standard containers. + */ + std::vector get_champions_x() const + { + std::vector retval; + for (const auto &isl_ptr : m_islands) { + retval.emplace_back(isl_ptr->get_population().champion_x()); + } + return retval; + } /// Save to archive. /** * This method will save to \p ar the islands of the archipelago. diff --git a/pygmo/__init__.py b/pygmo/__init__.py index cf7dadcab..1993bf3d3 100644 --- a/pygmo/__init__.py +++ b/pygmo/__init__.py @@ -469,7 +469,7 @@ def _archi_init(self, n=0, **kwargs): >>> archi.evolve() >>> archi.wait() - >>> res = [isl.get_population().champion_f for isl in archi] + >>> res = archi.get_champions_f() >>> res #doctest: +SKIP [array([ 475165.1020545]), array([ 807090.7156793]), diff --git a/pygmo/_island_test.py b/pygmo/_island_test.py index 72751ab01..8a7d751f1 100644 --- a/pygmo/_island_test.py +++ b/pygmo/_island_test.py @@ -293,4 +293,7 @@ def run_basic_tests(self): self.assertEqual(str(isl3), str(isl)) # Pickle. - self.assertEqual(str(loads(dumps(isl))), str(isl)) + pisl = loads(dumps(isl)) + self.assertEqual(str(pisl.get_population()), str(isl.get_population())) + self.assertEqual(str(pisl.get_algorithm()), str(isl.get_algorithm())) + self.assertEqual(str(pisl.get_name()), str(isl.get_name())) diff --git a/pygmo/core.cpp b/pygmo/core.cpp index 9398055cd..552f7c079 100644 --- a/pygmo/core.cpp +++ b/pygmo/core.cpp @@ -809,5 +809,24 @@ BOOST_PYTHON_MODULE(core) .def("__getitem__", lcast([](archipelago &archi, archipelago::size_type n) -> island & { return archi[n]; }), pygmo::archipelago_getitem_docstring().c_str(), bp::return_internal_reference<>()) // NOTE: docs for push_back() are in the Python reimplementation. - .def("_push_back", lcast([](archipelago &archi, const island &isl) { archi.push_back(isl); })); + .def("_push_back", lcast([](archipelago &archi, const island &isl) { archi.push_back(isl); })) + // Champions. + .def("get_champions_f", lcast([](const archipelago &archi) -> bp::list { + bp::list retval; + auto fs = archi.get_champions_f(); + for (const auto &f : fs) { + retval.append(pygmo::v_to_a(f)); + } + return retval; + }), + pygmo::archipelago_get_champions_f_docstring().c_str()) + .def("get_champions_x", lcast([](const archipelago &archi) -> bp::list { + bp::list retval; + auto xs = archi.get_champions_x(); + for (const auto &x : xs) { + retval.append(pygmo::v_to_a(x)); + } + return retval; + }), + pygmo::archipelago_get_champions_x_docstring().c_str()); } diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index bfeba0405..0e4885f09 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -997,7 +997,7 @@ This method will check the feasibility of a fitness vector *f* against the toler Raises: ValueError: if the size of *f* is not the same as the output of - :func:`~pymog.core.problem.get_nf()` + :func:`~pygmo.problem.get_nf()` )"; } @@ -3954,6 +3954,38 @@ inserted via :func:`~pygmo.archipelago.push_back()`). )"; } +std::string archipelago_get_champions_f_docstring() +{ + return R"(get_champions_f() + +Get the fitness vectors of the islands' champions. + +Returns: + ``list`` of 1D NumPy float arrays: the fitness vectors of the islands' champions + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors, + mismatched function signatures, etc.) + +)"; +} + +std::string archipelago_get_champions_x_docstring() +{ + return R"(get_champions_x() + +Get the decision vectors of the islands' champions. + +Returns: + ``list`` of 1D NumPy float arrays: the decision vectors of the islands' champions + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., type conversion errors, + mismatched function signatures, etc.) + +)"; +} + std::string nlopt_docstring() { return R"(__init__(solver = "cobyla") @@ -3976,7 +4008,8 @@ NLopt algorithms is: * SLSQP, * low-storage BFGS, * preconditioned truncated Newton, -* shifted limited-memory variable-metric. +* shifted limited-memory variable-metric, +* augmented Lagrangian algorithm. The desired NLopt solver is selected upon construction of an :class:`~pygmo.nlopt` algorithm. Various properties of the solver (e.g., the stopping criteria) can be configured via class attributes. Note that multiple @@ -3989,7 +4022,8 @@ Some solvers support equality and/or inequality constaints. In order to support pagmo's population-based optimisation model, the ``evolve()`` method will select a single individual from the input :class:`~pygmo.population` to be optimised by the NLopt solver. -The optimised individual will then be inserted back into the population at the end of the optimisation. +If the optimisation produces a better individual (as established by :func:`~pygmo.compare_fc()`), +the optimised individual will be inserted back into the population. The selection and replacement strategies can be configured via the :attr:`~pygmo.nlopt.selection` and :attr:`~pygmo.nlopt.replacement` attributes. @@ -4028,6 +4062,8 @@ translation table: ``"tnewton"`` ``NLOPT_LD_TNEWTON`` ``"var2"`` ``NLOPT_LD_VAR2`` ``"var1"`` ``NLOPT_LD_VAR1`` +``"auglag"`` ``NLOPT_AUGLAG`` +``"auglag_eq"`` ``NLOPT_AUGLAG_EQ`` ================================ ==================================== The parameters of the selected solver can be configured via the attributes of this class. @@ -4060,37 +4096,37 @@ See also the docs of the C++ class :cpp:class:`pagmo::nlopt`. >>> prob.c_tol = [1E-6] * 18 # Set constraints tolerance to 1E-6 >>> pop = population(prob, 20) >>> pop = algo.evolve(pop) # doctest: +SKIP - fevals: fitness: violated: viol. norm: - 1 95959.4 18 538.227 i - 2 89282.7 18 5177.42 i - 3 75580 18 464.206 i - 4 75580 18 464.206 i - 5 77737.6 18 1095.94 i - 6 41162 18 350.446 i - 7 41162 18 350.446 i - 8 67881 18 362.454 i - 9 30502.2 18 249.762 i - 10 30502.2 18 249.762 i - 11 7266.73 18 95.5946 i - 12 4510.3 18 42.2385 i - 13 2400.66 18 35.2507 i - 14 34051.9 18 749.355 i - 15 1657.41 18 32.1575 i - 16 1657.41 18 32.1575 i - 17 1564.44 18 12.5042 i - 18 275.987 14 6.22676 i - 19 232.765 12 12.442 i - 20 161.892 15 4.00744 i - 21 161.892 15 4.00744 i - 22 17.6821 11 1.78909 i - 23 7.71103 5 0.130386 i - 24 6.24758 4 0.00736759 i - 25 6.23325 1 5.12547e-05 i - 26 6.2325 0 0 - 27 6.23246 0 0 - 28 6.23246 0 0 - 29 6.23246 0 0 - 30 6.23246 0 0 + objevals: objval: violated: viol. norm: + 1 95959.4 18 538.227 i + 2 89282.7 18 5177.42 i + 3 75580 18 464.206 i + 4 75580 18 464.206 i + 5 77737.6 18 1095.94 i + 6 41162 18 350.446 i + 7 41162 18 350.446 i + 8 67881 18 362.454 i + 9 30502.2 18 249.762 i + 10 30502.2 18 249.762 i + 11 7266.73 18 95.5946 i + 12 4510.3 18 42.2385 i + 13 2400.66 18 35.2507 i + 14 34051.9 18 749.355 i + 15 1657.41 18 32.1575 i + 16 1657.41 18 32.1575 i + 17 1564.44 18 12.5042 i + 18 275.987 14 6.22676 i + 19 232.765 12 12.442 i + 20 161.892 15 4.00744 i + 21 161.892 15 4.00744 i + 22 17.6821 11 1.78909 i + 23 7.71103 5 0.130386 i + 24 6.24758 4 0.00736759 i + 25 6.23325 1 5.12547e-05 i + 26 6.2325 0 0 + 27 6.23246 0 0 + 28 6.23246 0 0 + 29 6.23246 0 0 + 30 6.23246 0 0 Optimisation return status: NLOPT_XTOL_REACHED (value = 4, Optimization stopped because xtol_rel or xtol_abs was reached) @@ -4366,4 +4402,34 @@ Get the name of the NLopt solver used during construction. )"; } +std::string nlopt_local_optimizer_docstring() +{ + return R"(Local optimizer. + +Some NLopt algorithms rely on other NLopt algorithms as local/subsidiary optimizers. +This property, of type :class:`~pygmo.nlopt`, allows to set such local optimizer. +By default, no local optimizer is specified, and the property is set to ``None``. + +.. note:: + + At the present time, only the ``"auglag"`` and ``"auglag_eq"`` solvers make use + of a local optimizer. Setting a local optimizer on any other solver will have no effect. + +.. note:: + + The objective function, bounds, and nonlinear-constraint parameters of the local + optimizer are ignored (as they are provided by the parent optimizer). Conversely, the stopping + criteria should be specified in the local optimizer.The verbosity of + the local optimizer is also forcibly set to zero during the optimisation. + +Returns: + :class:`~pygmo.nlopt`: the local optimizer, or ``None`` if not set + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python + (e.g., type conversion errors, mismatched function signatures, etc.), when setting the property + +)"; +} + } // namespace diff --git a/pygmo/docstrings.hpp b/pygmo/docstrings.hpp index 1f039e76c..ad2779ae0 100644 --- a/pygmo/docstrings.hpp +++ b/pygmo/docstrings.hpp @@ -157,6 +157,7 @@ std::string nlopt_set_random_sr_seed_docstring(); std::string nlopt_get_log_docstring(); std::string nlopt_get_last_opt_result_docstring(); std::string nlopt_get_solver_name_docstring(); +std::string nlopt_local_optimizer_docstring(); // utilities // hypervolume @@ -218,6 +219,8 @@ std::string archipelago_busy_docstring(); std::string archipelago_wait_docstring(); std::string archipelago_get_docstring(); std::string archipelago_getitem_docstring(); +std::string archipelago_get_champions_f_docstring(); +std::string archipelago_get_champions_x_docstring(); } #endif diff --git a/pygmo/expose_algorithms.cpp b/pygmo/expose_algorithms.cpp index 30052c903..3025d8d7d 100644 --- a/pygmo/expose_algorithms.cpp +++ b/pygmo/expose_algorithms.cpp @@ -404,6 +404,16 @@ void expose_algorithms() nlopt_.def("get_last_opt_result", lcast([](const nlopt &n) { return static_cast(n.get_last_opt_result()); }), nlopt_get_last_opt_result_docstring().c_str()); nlopt_.def("get_solver_name", &nlopt::get_solver_name, nlopt_get_solver_name_docstring().c_str()); + nlopt_.add_property("local_optimizer", bp::make_function(lcast([](nlopt &n) { return n.get_local_optimizer(); }), + bp::return_internal_reference<>()), + lcast([](nlopt &n, const nlopt *ptr) { + if (ptr) { + n.set_local_optimizer(*ptr); + } else { + n.unset_local_optimizer(); + } + }), + nlopt_local_optimizer_docstring().c_str()); #endif } } diff --git a/pygmo/test.py b/pygmo/test.py index d357b09f6..bfdd779b7 100644 --- a/pygmo/test.py +++ b/pygmo/test.py @@ -474,6 +474,54 @@ def _(): pop = algo.evolve(pop) self.assertTrue(len(algo.extract(nlopt).get_log()) != 0) + # Pickling. + from pickle import dumps, loads + algo = algorithm(nlopt("slsqp")) + algo.set_verbosity(5) + prob = problem(luksan_vlcek1(20)) + prob.c_tol = [1E-6] * 18 + pop = population(prob, 20) + algo.evolve(pop) + self.assertEqual(str(algo), str(loads(dumps(algo)))) + self.assertEqual(algo.extract(nlopt).get_log(), loads( + dumps(algo)).extract(nlopt).get_log()) + + # Local optimizer. + self.assertTrue(nlopt("slsqp").local_optimizer is None) + self.assertTrue(nlopt("auglag").local_optimizer is None) + n = nlopt("auglag") + loc = nlopt("slsqp") + n.local_optimizer = loc + self.assertFalse(n.local_optimizer is None) + self.assertEqual(str(algorithm(loc)), str( + algorithm(n.local_optimizer))) + pop = population(prob, 20, seed=4) + algo = algorithm(n) + algo.evolve(pop) + self.assertTrue(algo.extract(nlopt).get_last_opt_result() >= 0) + n = nlopt("auglag_eq") + loc = nlopt("slsqp") + n.local_optimizer = loc + self.assertFalse(n.local_optimizer is None) + self.assertEqual(str(algorithm(loc)), str( + algorithm(n.local_optimizer))) + pop = population(prob, 20, seed=4) + algo = algorithm(n) + algo.evolve(pop) + self.assertTrue(algo.extract(nlopt).get_last_opt_result() >= 0) + + # Refcount. + import sys + nl = nlopt("auglag") + loc = nlopt("slsqp") + nl.local_optimizer = loc + old_rc = sys.getrefcount(nl) + foo = nl.local_optimizer + self.assertEqual(old_rc + 1, sys.getrefcount(nl)) + del nl + self.assertTrue(len(str(foo)) != 0) + del foo + class null_problem_test_case(_ut.TestCase): """Test case for the null problem @@ -558,8 +606,10 @@ class con_utils_test_case(_ut.TestCase): def runTest(self): from .core import compare_fc, sort_population_con - compare_fc(f1 = [1,1,1], f2 = [1,2.1,-1.2], nec = 1, tol = [0]*2) - sort_population_con(input_f = [[1.2,0.1,-1],[0.2,1.1,1.1],[2,-0.5,-2]], nec = 1, tol = [1e-8]*2) + compare_fc(f1=[1, 1, 1], f2=[1, 2.1, -1.2], nec=1, tol=[0] * 2) + sort_population_con( + input_f=[[1.2, 0.1, -1], [0.2, 1.1, 1.1], [2, -0.5, -2]], nec=1, tol=[1e-8] * 2) + class global_rng_test_case(_ut.TestCase): """Test case for the global random number generator @@ -568,14 +618,15 @@ class global_rng_test_case(_ut.TestCase): def runTest(self): from .core import set_global_rng_seed, population, ackley - set_global_rng_seed(seed = 32) - pop = population(prob = ackley(5), size = 20) + set_global_rng_seed(seed=32) + pop = population(prob=ackley(5), size=20) f1 = pop.champion_f - set_global_rng_seed(seed = 32) - pop = population(prob = ackley(5), size = 20) + set_global_rng_seed(seed=32) + pop = population(prob=ackley(5), size=20) f2 = pop.champion_f self.assertTrue(f1 == f2) + class hypervolume_test_case(_ut.TestCase): """Test case for the hypervolume utilities @@ -1021,6 +1072,7 @@ def runTest(self): self.run_push_back_tests() self.run_io_tests() self.run_pickle_tests() + self.run_champions_tests() def run_init_tests(self): from . import archipelago, de, rosenbrock, population, null_problem, thread_island, mp_island @@ -1132,6 +1184,7 @@ def run_evolve_tests(self): def run_access_tests(self): from . import archipelago, de, rosenbrock + import sys a = archipelago(5, algo=de(), prob=rosenbrock(), pop_size=10) i0, i1, i2 = a[0], a[1], a[2] a.push_back(algo=de(), prob=rosenbrock(), size=11) @@ -1151,6 +1204,17 @@ def run_access_tests(self): self.assertTrue(isl.get_algorithm().is_(de)) self.assertTrue(isl.get_population().problem.is_(rosenbrock)) self.assertEqual(len(isl.get_population()), 10) + # Check refcount when returning internal ref. + a = archipelago(5, algo=de(), prob=rosenbrock(), pop_size=10) + old_rc = sys.getrefcount(a) + i0, i1, i2, i3 = a[0], a[1], a[2], a[3] + self.assertEqual(sys.getrefcount(a) - 4, old_rc) + del a + self.assertTrue(str(i0) != "") + self.assertTrue(str(i1) != "") + self.assertTrue(str(i2) != "") + self.assertTrue(str(i3) != "") + del i0, i1, i2, i3 def run_push_back_tests(self): from . import archipelago, de, rosenbrock @@ -1202,6 +1266,26 @@ def run_pickle_tests(self): pop_size=10, udi=mp_island()) self.assertEqual(repr(a), repr(loads(dumps(a)))) + def run_champions_tests(self): + from . import archipelago, de, rosenbrock, zdt + from numpy import ndarray + a = archipelago(5, algo=de(), prob=rosenbrock(), pop_size=10) + cf = a.get_champions_f() + self.assertEqual(type(cf), list) + self.assertEqual(len(cf), 5) + self.assertEqual(type(cf[0]), ndarray) + cx = a.get_champions_x() + self.assertEqual(type(cx), list) + self.assertEqual(len(cx), 5) + self.assertEqual(type(cx[0]), ndarray) + a.push_back(algo=de(), prob=rosenbrock(10), size=20) + cx = a.get_champions_x() + self.assertEqual(len(cx[4]), 2) + self.assertEqual(len(cx[5]), 10) + a.push_back(algo=de(), prob=zdt(), size=20) + self.assertRaises(ValueError, lambda: a.get_champions_x()) + self.assertRaises(ValueError, lambda: a.get_champions_f()) + def run_test_suite(): """Run the full test suite. @@ -1226,9 +1310,9 @@ def run_test_suite(): suite.addTest(archipelago_test_case()) suite.addTest(null_problem_test_case()) suite.addTest(hypervolume_test_case()) - suite.addTest(mo_utils_test_case()) - suite.addTest(con_utils_test_case()) - suite.addTest(global_rng_test_case()) + suite.addTest(mo_utils_test_case()) + suite.addTest(con_utils_test_case()) + suite.addTest(global_rng_test_case()) suite.addTest(estimate_sparsity_test_case()) suite.addTest(estimate_gradient_test_case()) try: diff --git a/tests/archipelago.cpp b/tests/archipelago.cpp index b22edd153..142bdb3ee 100644 --- a/tests/archipelago.cpp +++ b/tests/archipelago.cpp @@ -53,6 +53,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -378,3 +379,25 @@ BOOST_AUTO_TEST_CASE(archipelago_iterator_tests) } BOOST_CHECK(archi.begin() + 4 == archi.end()); } + +BOOST_AUTO_TEST_CASE(archipelago_champion_tests) +{ + archipelago archi; + BOOST_CHECK(archi.get_champions_f().empty()); + BOOST_CHECK(archi.get_champions_x().empty()); + archi.push_back(de{}, rosenbrock{}, 20u); + archi.push_back(de{}, rosenbrock{}, 20u); + archi.push_back(de{}, rosenbrock{}, 20u); + BOOST_CHECK_EQUAL(archi.get_champions_f().size(), 3u); + BOOST_CHECK_EQUAL(archi.get_champions_x().size(), 3u); + for (auto i = 0u; i < 3u; ++i) { + BOOST_CHECK(archi[i].get_population().champion_x() == archi.get_champions_x()[i]); + BOOST_CHECK(archi[i].get_population().champion_f() == archi.get_champions_f()[i]); + } + archi.push_back(de{}, rosenbrock{10}, 20u); + BOOST_CHECK(archi.get_champions_x()[2].size() == 2u); + BOOST_CHECK(archi.get_champions_x()[3].size() == 10u); + archi.push_back(de{}, zdt{}, 20u); + BOOST_CHECK_THROW(archi.get_champions_f(), std::invalid_argument); + BOOST_CHECK_THROW(archi.get_champions_x(), std::invalid_argument); +} diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index e093bdcce..f45e62528 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -26,14 +26,25 @@ You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with the PaGMO library. If not, see https://www.gnu.org/licenses/. */ +#if defined(_MSC_VER) + +// Disable the checked iterators feature in MSVC. We want it for the source code +// (so it should not be disabled in the headers), but dealing with it in the tests is +// not as useful and quite painful. +#define _SCL_SECURE_NO_WARNINGS + +#endif + #define BOOST_TEST_MODULE nlopt_test #include #include +#include #include #include #include #include +#include #include #include #include @@ -47,6 +58,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include using namespace pagmo; @@ -263,3 +275,116 @@ BOOST_AUTO_TEST_CASE(nlopt_set_sc) } a.set_maxtime(123); } + +BOOST_AUTO_TEST_CASE(nlopt_serialization) +{ + for (auto r : {"best", "worst", "random"}) { + for (auto s : {"best", "worst", "random"}) { + auto n = nlopt{"slsqp"}; + n.set_replacement(r); + n.set_selection(s); + algorithm algo{n}; + algo.set_verbosity(5); + auto pop = population(hs71{}, 10); + algo.evolve(pop); + auto s_log = algo.extract()->get_log(); + // Store the string representation of p. + std::stringstream ss; + auto before_text = boost::lexical_cast(algo); + // Now serialize, deserialize and compare the result. + { + cereal::JSONOutputArchive oarchive(ss); + oarchive(algo); + } + // Change the content of p before deserializing. + algo = algorithm{null_algorithm{}}; + { + cereal::JSONInputArchive iarchive(ss); + iarchive(algo); + } + auto after_text = boost::lexical_cast(algo); + BOOST_CHECK_EQUAL(before_text, after_text); + BOOST_CHECK(s_log == algo.extract()->get_log()); + } + } + for (auto r : {0u, 4u, 7u}) { + for (auto s : {0u, 4u, 7u}) { + auto n = nlopt{"slsqp"}; + n.set_replacement(r); + n.set_selection(s); + algorithm algo{n}; + algo.set_verbosity(5); + auto pop = population(hs71{}, 10); + algo.evolve(pop); + auto s_log = algo.extract()->get_log(); + // Store the string representation of p. + std::stringstream ss; + auto before_text = boost::lexical_cast(algo); + // Now serialize, deserialize and compare the result. + { + cereal::JSONOutputArchive oarchive(ss); + oarchive(algo); + } + // Change the content of p before deserializing. + algo = algorithm{null_algorithm{}}; + { + cereal::JSONInputArchive iarchive(ss); + iarchive(algo); + } + auto after_text = boost::lexical_cast(algo); + BOOST_CHECK_EQUAL(before_text, after_text); + BOOST_CHECK(s_log == algo.extract()->get_log()); + } + } +} + +BOOST_AUTO_TEST_CASE(nlopt_loc_opt) +{ + for (const auto &str : {"auglag", "auglag_eq"}) { + nlopt n{str}; + n.set_local_optimizer(nlopt{"slsqp"}); + BOOST_CHECK(n.get_local_optimizer()); + BOOST_CHECK(static_cast(n).get_local_optimizer()); + // Test serialization. + algorithm algo{n}; + std::stringstream ss; + auto before_text = boost::lexical_cast(algo); + // Now serialize, deserialize and compare the result. + { + cereal::JSONOutputArchive oarchive(ss); + oarchive(algo); + } + // Change the content of p before deserializing. + algo = algorithm{null_algorithm{}}; + { + cereal::JSONInputArchive iarchive(ss); + iarchive(algo); + } + auto after_text = boost::lexical_cast(algo); + BOOST_CHECK_EQUAL(before_text, after_text); + // Test small evolution. + auto pop = population{hs71{}, 1}; + pop.set_x(0, {2., 2., 2., 2.}); + pop.get_problem().set_c_tol({1E-6, 1E-6}); + algo.evolve(pop); + BOOST_CHECK(algo.extract()->get_last_opt_result() >= 0); + // Unset the local optimizer. + algo.extract()->unset_local_optimizer(); + BOOST_CHECK(!algo.extract()->get_local_optimizer()); + algo.evolve(pop); + BOOST_CHECK(algo.extract()->get_last_opt_result() == NLOPT_INVALID_ARGS); + // Auglag inside auglag. Not sure if this is supposed to work, it gives an error + // currently. + algo.extract()->set_local_optimizer(nlopt{str}); + algo.extract()->get_local_optimizer()->set_local_optimizer(nlopt{"lbfgs"}); + algo.evolve(pop); + BOOST_CHECK(algo.extract()->get_last_opt_result() < 0); + } + // Check setting a local opt does not do anythig for normal solvers. + nlopt n{"slsqp"}; + n.set_local_optimizer(nlopt{"lbfgs"}); + algorithm algo{n}; + auto pop = population{rosenbrock{20}, 1}; + algo.evolve(pop); + BOOST_CHECK(algo.extract()->get_last_opt_result() >= 0); +}