From fd37d22da25387b341c5c25baf519f0b8a9845ea Mon Sep 17 00:00:00 2001 From: reneeotten Date: Tue, 20 Mar 2018 22:03:55 -0400 Subject: [PATCH 01/15] ENH: add AMPGO minimization algorithm and update docs - add AMPGO code from Andrea Gavana --- doc/fitting.rst | 68 +++++---- lmfit/_ampgo.py | 344 +++++++++++++++++++++++++++++++++++++++++++++ lmfit/minimizer.py | 129 ++++++++++++++++- 3 files changed, 509 insertions(+), 32 deletions(-) create mode 100644 lmfit/_ampgo.py diff --git a/doc/fitting.rst b/doc/fitting.rst index 26de49b6f..4054b2b85 100644 --- a/doc/fitting.rst +++ b/doc/fitting.rst @@ -125,37 +125,41 @@ class as listed in the :ref:`Table of Supported Fitting Methods Table of Supported Fitting Methods: - +-----------------------+------------------------------------------------------------------+ - | Fitting Method | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` | - +=======================+==================================================================+ - | Levenberg-Marquardt | ``leastsq`` or ``least_squares`` | - +-----------------------+------------------------------------------------------------------+ - | Nelder-Mead | ``nelder`` | - +-----------------------+------------------------------------------------------------------+ - | L-BFGS-B | ``lbfgsb`` | - +-----------------------+------------------------------------------------------------------+ - | Powell | ``powell`` | - +-----------------------+------------------------------------------------------------------+ - | Conjugate Gradient | ``cg`` | - +-----------------------+------------------------------------------------------------------+ - | Newton-CG | ``newton`` | - +-----------------------+------------------------------------------------------------------+ - | COBYLA | ``cobyla`` | - +-----------------------+------------------------------------------------------------------+ - | Truncated Newton | ``tnc`` | - +-----------------------+------------------------------------------------------------------+ - | Dogleg | ``dogleg`` | - +-----------------------+------------------------------------------------------------------+ - | Sequential Linear | ``slsqp`` | - | Squares Programming | | - +-----------------------+------------------------------------------------------------------+ - | Differential | ``differential_evolution`` | - | Evolution | | - +-----------------------+------------------------------------------------------------------+ - | Brute force method | ``brute`` | - +-----------------------+------------------------------------------------------------------+ - | Basinhopping | ``basinhopping`` | - +-----------------------+------------------------------------------------------------------+ + +------------------------+------------------------------------------------------------------+ + | Fitting Method | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` | + +========================+==================================================================+ + | Levenberg-Marquardt | ``leastsq`` or ``least_squares`` | + +------------------------+------------------------------------------------------------------+ + | Nelder-Mead | ``nelder`` | + +------------------------+------------------------------------------------------------------+ + | L-BFGS-B | ``lbfgsb`` | + +------------------------+------------------------------------------------------------------+ + | Powell | ``powell`` | + +------------------------+------------------------------------------------------------------+ + | Conjugate Gradient | ``cg`` | + +------------------------+------------------------------------------------------------------+ + | Newton-CG | ``newton`` | + +------------------------+------------------------------------------------------------------+ + | COBYLA | ``cobyla`` | + +------------------------+------------------------------------------------------------------+ + | Truncated Newton | ``tnc`` | + +------------------------+------------------------------------------------------------------+ + | Dogleg | ``dogleg`` | + +------------------------+------------------------------------------------------------------+ + | Sequential Linear | ``slsqp`` | + | Squares Programming | | + +------------------------+------------------------------------------------------------------+ + | Differential | ``differential_evolution`` | + | Evolution | | + +------------------------+------------------------------------------------------------------+ + | Brute force method | ``brute`` | + +------------------------+------------------------------------------------------------------+ + | Basinhopping | ``basinhopping`` | + +------------------------+------------------------------------------------------------------+ + | Adaptive Memory | ``ampgo`` | + | Programming for Global | | + | Optimization | | + +------------------------+------------------------------------------------------------------+ .. note:: @@ -370,6 +374,8 @@ For more information, check the examples in ``examples/lmfit_brute_example.ipynb .. automethod:: Minimizer.basinhopping +.. automethod:: Minimizer.ampgo + .. automethod:: Minimizer.emcee .. _label-emcee: diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py new file mode 100644 index 000000000..d8149a3ba --- /dev/null +++ b/lmfit/_ampgo.py @@ -0,0 +1,344 @@ +from __future__ import print_function + +import numpy + +OPENOPT = SCIPY = True + +try: + from openopt import NLP +except ImportError: + OPENOPT = False + +try: + from scipy.optimize import minimize +except ImportError: + SCIPY = False + +SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] +OPENOPT_LOCAL_SOLVERS = ['bobyqa', 'ptn', 'slmvm2', 'ralg', 'mma', 'auglag', 'sqlcp'] + + +def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, + totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5, + tabustrategy='farthest', fmin=-numpy.inf, disp=None): + """ + Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for + Global Optimization) algorithm. + + :param `objfun`: Function to be optimized, in the form ``f(x, *args)``. + :type `objfun`: callable + :param `args`: Additional arguments passed to `objfun`. + :type `args`: tuple + :param `local`: The local minimization method (e.g. ``"L-BFGS-B"``). It can be one of the available + `scipy` local solvers or `OpenOpt` solvers. + :type `local`: string + :param `bounds`: A list of tuples specifying the lower and upper bound for each independent variable + [(`xl0`, `xu0`), (`xl1`, `xu1`), ...] + :type `bounds`: list + :param `maxfunevals`: The maximum number of function evaluations allowed. + :type `maxfunevals`: integer + :param `totaliter`: The maximum number of global iterations allowed. + :type `totaliter`: integer + :param `maxiter`: The maximum number of `Tabu Tunnelling` iterations allowed during each global iteration. + :type `maxiter`: integer + :param `glbtol`: The optimization will stop if the absolute difference between the current minimum objective + function value and the provided global optimum (`fmin`) is less than `glbtol`. + :type `glbtol`: float + :param `eps1`: A constant used to define an aspiration value for the objective function during the Tunnelling phase. + :type `eps1`: float + :param `eps2`: Perturbation factor used to move away from the latest local minimum at the start of a Tunnelling phase. + :type `eps2`: float + :param `tabulistsize`: The size of the tabu search list (a circular list). + :type `tabulistsize`: integer + :param `tabustrategy`: The strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be + 'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from + the last local minimum found. + :type `tabustrategy`: string + :param `fmin`: If known, the objective function global optimum value. + :type `fmin`: float + :param `disp`: If zero or defaulted, then no output is printed on screen. If a positive number, then status + messages are printed. + :type `disp`: integer + + :returns: A tuple of 5 elements, in the following order: + + 1. **best_x** (`array_like`): the estimated position of the global minimum. + 2. **best_f** (`float`): the value of `objfun` at the minimum. + 3. **evaluations** (`integer`): the number of function evaluations. + 4. **msg** (`string`): a message describes the cause of the termination. + 5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunnelling phases performed and the + successful ones. + + :rtype: `tuple` + + The detailed implementation of AMPGO is described in the paper + "Adaptive Memory Programming for Constrained Global Optimization" located here: + + http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf + + Copyright 2014 Andrea Gavana + """ + + if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS: + raise Exception('Invalid local solver selected: %s'%local) + + if local in SCIPY_LOCAL_SOLVERS and not SCIPY: + raise Exception('The selected solver %s is not available as there is no scipy installation'%local) + + if local in OPENOPT_LOCAL_SOLVERS and not OPENOPT: + raise Exception('The selected solver %s is not available as there is no OpenOpt installation'%local) + + x0 = numpy.atleast_1d(x0) + n = len(x0) + + if bounds is None: + bounds = [(None, None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + + low = [0]*n + up = [0]*n + for i in range(n): + if bounds[i] is None: + l, u = -numpy.inf, numpy.inf + else: + l, u = bounds[i] + if l is None: + low[i] = -numpy.inf + else: + low[i] = l + if u is None: + up[i] = numpy.inf + else: + up[i] = u + + if maxfunevals is None: + maxfunevals = max(100, 10*len(x0)) + + if tabulistsize < 1: + raise Exception('Invalid tabulistsize specified: %s. It should be an integer greater than zero.'%tabulistsize) + if tabustrategy not in ['oldest', 'farthest']: + raise Exception('Invalid tabustrategy specified: %s. It must be one of "oldest" or "farthest"'%tabustrategy) + + iprint = 50 + if disp is None or disp <= 0: + disp = 0 + iprint = -1 + + low = numpy.asarray(low) + up = numpy.asarray(up) + + tabulist = [] + best_f = numpy.inf + best_x = x0 + + global_iter = 0 + all_tunnel = success_tunnel = 0 + evaluations = 0 + + if glbtol < 1e-8: + local_tol = glbtol + else: + local_tol = 1e-8 + + while 1: + + if disp > 0: + print('\n') + print('='*72) + print('Starting MINIMIZATION Phase %-3d'%(global_iter+1)) + print('='*72) + + if local in OPENOPT_LOCAL_SOLVERS: + problem = NLP(objfun, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint) + problem.args = args + + results = problem.solve(local) + xf, yf, num_fun = results.xf, results.ff, results.evals['f'] + else: + options = {'maxiter': max(1, maxfunevals), 'disp': disp} + if local_opts is not None: + options.update(local_opts) + res = minimize(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol, options=options) + xf, yf, num_fun = res['x'], res['fun'], res['nfev'] + + maxfunevals -= num_fun + evaluations += num_fun + + if yf < best_f: + best_f = yf + best_x = xf + + if disp > 0: + print('\n\n ==> Reached local minimum: %s\n'%yf) + + if best_f < fmin + glbtol: + if disp > 0: + print('='*72) + return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + if maxfunevals <= 0: + if disp > 0: + print('='*72) + return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) + + tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) + tabulist.append(xf) + + i = improve = 0 + + while i < maxiter and improve == 0: + + if disp > 0: + print('-'*72) + print('Starting TUNNELLING Phase (%3d-%3d)'%(global_iter+1, i+1)) + print('-'*72) + + all_tunnel += 1 + + r = numpy.random.uniform(-1.0, 1.0, size=(n, )) + beta = eps2*numpy.linalg.norm(xf)/numpy.linalg.norm(r) + + if numpy.abs(beta) < 1e-8: + beta = eps2 + + x0 = xf + beta*r + + x0 = numpy.where(x0 < low, low, x0) + x0 = numpy.where(x0 > up , up , x0) + + aspiration = best_f - eps1*(1.0 + numpy.abs(best_f)) + + tunnel_args = tuple([objfun, aspiration, tabulist] + list(args)) + + if local in OPENOPT_LOCAL_SOLVERS: + problem = NLP(tunnel, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint) + problem.args = tunnel_args + + results = problem.solve(local) + xf, yf, num_fun = results.xf, results.ff, results.evals['f'] + else: + options = {'maxiter': max(1, maxfunevals), 'disp': disp} + if local_opts is not None: + options.update(local_opts) + + res = minimize(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=local_tol, options=options) + xf, yf, num_fun = res['x'], res['fun'], res['nfev'] + + maxfunevals -= num_fun + evaluations += num_fun + + yf = inverse_tunnel(xf, yf, aspiration, tabulist) + + if yf <= best_f + glbtol: + oldf = best_f + best_f = yf + best_x = xf + improve = 1 + success_tunnel += 1 + + if disp > 0: + print('\n\n ==> Successful tunnelling phase. Reached local minimum: %s < %s\n'%(yf, oldf)) + + if best_f < fmin + glbtol: + return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + + i += 1 + + if maxfunevals <= 0: + return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) + + tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) + tabulist.append(xf) + + if disp > 0: + print('='*72) + + global_iter += 1 + x0 = xf.copy() + + if global_iter >= totaliter: + return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel) + + if best_f < fmin + glbtol: + return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + + +def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): + + if len(tabulist) < tabulistsize: + return tabulist + + if tabustrategy == 'oldest': + tabulist.pop(0) + else: + distance = numpy.sqrt(numpy.sum((tabulist-xf)**2, axis=1)) + index = numpy.argmax(distance) + tabulist.pop(index) + + return tabulist + + +def tunnel(x0, *args): + + objfun, aspiration, tabulist = args[0:3] + + fun_args = () + if len(args) > 3: + fun_args = tuple(args[3:]) + + numerator = (objfun(x0, *fun_args) - aspiration)**2 + denominator = 1.0 + + for tabu in tabulist: + denominator = denominator*numpy.sqrt(numpy.sum((x0 - tabu)**2)) + + ytf = numerator/denominator + + return ytf + + +def inverse_tunnel(xtf, ytf, aspiration, tabulist): + + denominator = 1.0 + + for tabu in tabulist: + denominator = denominator*numpy.sqrt(numpy.sum((xtf - tabu)**2)) + + numerator = ytf*denominator + + yf = aspiration + numpy.sqrt(ytf*denominator) + return yf + + +if __name__ == '__main__': + + import os + import go_benchmark + + os.system('cls') + + for tests in ['Bird']: + + klass = getattr(go_benchmark, tests)() + + x0 = klass.generator() + fmin = klass.fglob + bounds = klass.bounds + tolfun = 1e-6 + + xf, yf, fun_evals, msg, tt = AMPGO(klass.evaluator, x0, args=(), local='L-BFGS-B', bounds=bounds, + maxfunevals=20000, totaliter=2000, maxiter=5, eps1=0.02, eps2=0.1, + tabulistsize=5, tabustrategy='farthest', fmin=fmin, disp=1, glbtol=tolfun) + + xb = numpy.asarray(klass.global_optimum) + if xb.ndim == 2: + xb = xb[0, :] + + print('\n\n') + print('F_glob :', klass.evaluator(xb)) + print('F_best :', yf) + print('X_best :', xf) + print('F_evals:', fun_evals) + print('Message:', msg) + print('Tunnels:', tt) + diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index d47401742..6d825bc30 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -35,6 +35,8 @@ from .parameter import Parameter, Parameters +from ._ampgo import AMPGO as ampgo + # scipy version notes: # currently scipy 0.17 is required. # feature scipy version added @@ -1500,7 +1502,7 @@ def brute(self, params=None, Ns=20, keep=50): Parameters ---------- - params : :class:`~lmfit.parameter.Parameters` object, optional + params : :class:`~lmfit.parameter.Parameters`, optional Contains the Parameters for the model. If None, then the Parameters used to initialize the Minimizer object are used. Ns : int, optional @@ -1646,6 +1648,127 @@ def brute(self, params=None, Ns=20, keep=50): return result + def ampgo(self, params=None, **kws): + """Finds the global minimum of a multivariate function using the AMPGO + (Adaptive Memory Programming for Global Optimization) algorithm. + + Parameters + ---------- + params : :class:`~lmfit.parameter.Parameters`, optional + Contains the Parameters for the model. If None, then the + Parameters used to initialize the Minimizer object are used. + **kws : dict, optional + Minimizer options to pass to the ampgo algorithm, the options are + listed below:: + + local: str (default is 'L-BFGS-B') + Name of the local minimization method. Valid options are: + - 'L-BFGS-B' + - 'Nelder-Mead' + - 'Powell' + - 'TNC' + - 'SLSQP' + local_opts: dict (default is None) + Options to pass to the local minimizer. + maxfunevals: int (default is None) + Maximum number of function evaluations. If None, it will be max(10*npars, 100). + totaliter: int (default is 20) + Maximum number of global iterations. + maxiter: int (default is 5) + Maximum number of `Tabu Tunneling` iterations during each global iteration. + glbtol: float (default is 1e-5) + The optimization will stop if the absolute difference between the current + minimum objective function value and the provided global optimum (`fmin`) + is less than `glbtol`. + eps1: float (default is 0.02) + Constant used to define an aspiration value for the objective function during + the Tunneling phase. + eps2: float (default is 0.1) + Perturbation factor used to move away from the latest local minimum at the + start of a Tunneling phase. + tabulistsize: int (default is 5) + Size of the (circular) tabu search list. + tabustrategy: str (default is 'farthest') + Strategy to use when the size of the tabu list exceeds `tabulistsize`. It + can be 'oldest' to drop the oldest point from the tabu list or 'farthest' + to drop the element farthest from the last local minimum found. + fmin: float (default is -numpy.inf) + Objective function's global optimum value (if known). + disp: bool (default is False) + Set to True to print convergence messages. + + Returns + ------- + :class:`MinimizerResult` + Object containing the parameters from the ampgo method, with fit + parameters, statistics and such. The return values (`x0`, `fval`, + `eval`, `msg`, `tunnel`) are stored as `ampgo_` attributes. + + + .. versionadded:: 0.9.10 + + + Notes + ---- + The Python implementation was written by Andrea Gavana in 2014 + (http://infinity77.net/global_optimization/index.html). + + The details of the AMPGO algorithm are described in the paper + "Adaptive Memory Programming for Constrained Global Optimization" + located here: + + http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf + + """ + result = self.prepare_fit(params=params) + + ampgo_kws = dict(local='L-BFGS-B', local_opts=None, + maxfunevals=None, totaliter=20, + maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, + tabulistsize=5, tabustrategy='farthest', + fmin=-np.inf, disp=False) + ampgo_kws.update(self.kws) + ampgo_kws.update(kws) + + varying = np.asarray([par.vary for par in self.params.values()]) + replace_none = lambda x, sign: sign*np.inf if x is None else x + lower_bounds = np.asarray([replace_none(i.min, -1) for i in + self.params.values()])[varying] + upper_bounds = np.asarray([replace_none(i.max, 1) for i in + self.params.values()])[varying] + values = np.asarray([i.value for i in self.params.values()])[varying] + + bounds = [(lb, up) for lb, up in zip(lower_bounds, upper_bounds)] + + ampgo_kws['bounds'] = bounds + result.method = "ampgo, with {} as local solver".format(ampgo_kws['local']) + + ret = ampgo(self.penalty, values, **ampgo_kws) + + result.ampgo_x0 = ret[0] + result.ampgo_fval = ret[1] + result.ampgo_eval = ret[2] + result.ampgo_msg = ret[3] + result.ampgo_tunnel = ret[4] + + for i, par in enumerate(result.var_names): + result.params[par].value = result.ampgo_x0[i] + + result.chisqr = ret[1] + result.nvarys = len(result.var_names) + result.residual = self.__residual(result.ampgo_x0, + apply_bounds_transformation=False) + result.nfev -= 1 + result.ndata = len(result.residual) + result.nfree = result.ndata - result.nvarys + result.redchi = result.chisqr / result.nfree + # this is -2*loglikelihood + _neg2_log_likel = result.ndata * np.log(result.chisqr / result.ndata) + result.aic = _neg2_log_likel + 2 * result.nvarys + result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys + + return result + def minimize(self, method='leastsq', params=None, **kws): """Perform the minimization. @@ -1658,6 +1781,7 @@ def minimize(self, method='leastsq', params=None, **kws): - `'least_squares'`: Least-Squares minimization, using Trust Region Reflective method by default - `'differential_evolution'`: differential evolution - `'brute'`: brute force method + - `'ampgo'`: Adaptive Memory Programming for Global Optimization - '`nelder`': Nelder-Mead - `'lbfgsb'`: L-BFGS-B - `'powell'`: Powell @@ -1711,6 +1835,8 @@ def minimize(self, method='leastsq', params=None, **kws): function = self.brute elif user_method == 'basinhopping': function = self.basinhopping + elif user_method == 'ampgo': + function = self.ampgo else: function = self.scalar_minimize for key, val in SCALAR_METHODS.items(): @@ -1960,6 +2086,7 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, - `'differential_evolution'`: differential evolution - `'brute'`: brute force method - `'basinhopping'`: basinhopping + - `'ampgo'`: Adaptive Memory Programming for Global Optimization - '`nelder`': Nelder-Mead - `'lbfgsb'`: L-BFGS-B - `'powell'`: Powell From f7c290c24d29cb82b11c37c91be27ad2d152acd1 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Fri, 30 Mar 2018 22:14:36 -0400 Subject: [PATCH 02/15] MAINT: work on the duplicated AMPGO code. - remove main() function - add module docstring - remove OPENOPT solvers - add/update function docstrings - update "disp" statements (now boolean) - add comments to the code --- lmfit/_ampgo.py | 311 +++++++++++++++++++++++++++++++++++++++------ lmfit/minimizer.py | 2 +- 2 files changed, 275 insertions(+), 38 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index d8149a3ba..7337d3dc7 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -1,21 +1,20 @@ -from __future__ import print_function +"""Adaptive Memory Programming for Global Optimization (AMPGO). -import numpy +added to lmfit by Renee Otten (2018) -OPENOPT = SCIPY = True +based on the Python implementation of Andrea Gavana +(see: http://infinity77.net/global_optimization/) -try: - from openopt import NLP -except ImportError: - OPENOPT = False +Implementation details can be found in this paper: + http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf +""" -try: - from scipy.optimize import minimize -except ImportError: - SCIPY = False +from __future__ import print_function + +import numpy +from scipy.optimize import minimize SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] -OPENOPT_LOCAL_SOLVERS = ['bobyqa', 'ptn', 'slmvm2', 'ralg', 'mma', 'auglag', 'sqlcp'] def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, @@ -78,6 +77,23 @@ def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, m Copyright 2014 Andrea Gavana """ + import numpy + + OPENOPT = SCIPY = True + + try: + from openopt import NLP + except ImportError: + OPENOPT = False + + try: + from scipy.optimize import minimize + except ImportError: + SCIPY = False + + SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] + OPENOPT_LOCAL_SOLVERS = ['bobyqa', 'ptn', 'slmvm2', 'ralg', 'mma', 'auglag', 'sqlcp'] + if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS: raise Exception('Invalid local solver selected: %s'%local) @@ -264,7 +280,7 @@ def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, m def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): - + """Drop a point from the tabu search list.""" if len(tabulist) < tabulistsize: return tabulist @@ -279,7 +295,13 @@ def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): def tunnel(x0, *args): + """Tunneling objective function. + This function has a global minimum of zero at any feasible point where + `f(x) = aspiration`, and minimizing this expression tends to move away + from all points in `tabulist`. + + """ objfun, aspiration, tabulist = args[0:3] fun_args = () @@ -298,7 +320,7 @@ def tunnel(x0, *args): def inverse_tunnel(xtf, ytf, aspiration, tabulist): - + """Calculate the function value after a tunneling phase step.""" denominator = 1.0 for tabu in tabulist: @@ -310,35 +332,250 @@ def inverse_tunnel(xtf, ytf, aspiration, tabulist): return yf -if __name__ == '__main__': +def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, + maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, + eps2=0.1, tabulistsize=5, tabustrategy='farthest', fmin=-numpy.inf, + disp=False): + """Find the global minimum of a multivariate function using the AMPGO + (Adaptive Memory Programming for Global Optimization) algorithm. + + Parameters + ---------- + objfun: callable + Objective function to be minimized. The function must have the signature: + objfun(params, *args, **kws) + x0: numpy.ndarray + Initial guesses for parameter values. + args: tuple, optional + Additional arguments passed to `objfun`. + local: str, optional + Name of the local minimization method. Valid options are: + + - `'L-BFGS-B'` (default) + - `Nelder-Mead'` + - `'Powell'` + - `'TNC'` + - `'SLSQP'` + + local_opts: dict, optional + Options to pass to the local minimizer. + bounds: sequence, optional + List of tuples specifying the lower and upper bound for each + independent variable [(`xl0`, `xu0`), (`xl1`, `xu1`), ...]. + maxfunevals: int, optional + Maximum number of function evaluations. + totaliter: int, optional + Maximum number of global iterations. + maxiter: int, optional + Maximum number of `Tabu Tunneling` iterations during each global + iteration. + glbtol: float, optional + The optimization will stop if the absolute difference between the + current minimum objective function value and the provided global + optimum (`fmin`) is less than `glbtol`. + eps1: float, optional + Constant used to define an aspiration value for the objective function + during the Tunneling phase. + eps2: float, optional + Perturbation factor used to move away from the latest local minimum + at the start of a Tunneling phase. + tabulistsize: int, optional + Size of the (circular) tabu search list. + tabustrategy: str, optional + Strategy to use when the size of the tabu list exceeds `tabulistsize`. + It can be 'oldest' to drop the oldest point from the tabu list or + 'farthest' to drop the element farthest from the last local minimum + found. + fmin: float, optional + Objective function's global optimum value (if known, the default + is -numpy.inf). + disp: bool, optional + Set to True to print convergence messages. + + Returns + ------- + tuple: + A tuple of 5 elements, in the following order: + 1. **best_x** (`array_like`): the estimated position of the global + minimum. + 2. **best_f** (`float`): the value of `objfun` at the minimum. + 3. **evaluations** (`integer`): the number of function evaluations. + 4. **msg** (`string`): a message describes the cause of the termination. + 5. **tunnel_info** (`tuple`): a tuple containing the total number of + Tunneling phases performed and the successful ones. + + Notes + ----- + The detailed implementation of AMPGO is described in the paper + "Adaptive Memory Programming for Constrained Global Optimization" located + here: + + http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf + + """ + if local not in SCIPY_LOCAL_SOLVERS: + raise Exception('Invalid local solver selected: {}'.format(local)) + + x0 = numpy.atleast_1d(x0) + n = len(x0) + + if bounds is None: + bounds = [(None, None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + + low = [0]*n + up = [0]*n + for i in range(n): + if bounds[i] is None: + l, u = -numpy.inf, numpy.inf + else: + l, u = bounds[i] + if l is None: + low[i] = -numpy.inf + else: + low[i] = l + if u is None: + up[i] = numpy.inf + else: + up[i] = u + + if maxfunevals is None: + maxfunevals = max(100, 10*len(x0)) + + if tabulistsize < 1: + raise Exception('Invalid tabulistsize specified: {:d}. It should be ' + 'an integer greater than zero.'.format(tabulistsize)) + if tabustrategy not in ['oldest', 'farthest']: + raise Exception('Invalid tabustrategy specified: {:s}. It must be one ' + 'of "oldest" or "farthest".'.format(tabustrategy)) + + low = numpy.asarray(low) + up = numpy.asarray(up) + + tabulist = [] + best_f = numpy.inf + best_x = x0 + + global_iter = 0 + all_tunnel = success_tunnel = 0 + evaluations = 0 + + if glbtol < 1e-8: + local_tol = glbtol + else: + local_tol = 1e-8 + + while 1: + + # minimization to find local minimum, either from initial values or + # after a successful tunneling loop + if disp: + print('\n{0}\nStarting MINIMIZATION Phase {1:d}\n{0}' + .format('='*72, global_iter+1)) + + options = {'maxiter': max(1, maxfunevals), 'disp': disp} + if local_opts is not None: + options.update(local_opts) + res = minimize(objfun, x0, args=args, method=local, bounds=bounds, + tol=local_tol, options=options) + xf, yf, num_fun = res['x'], res['fun'], res['nfev'] - import os - import go_benchmark + maxfunevals -= num_fun + evaluations += num_fun + + if yf < best_f: + best_f = yf + best_x = xf + + if disp: + print('\n\n ==> Reached local minimum: {:.5g}\n'.format(yf)) + + if best_f < fmin + glbtol: + if disp: + print('='*72) + return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + if maxfunevals <= 0: + if disp: + print('='*72) + return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) + + # if needed, drop a value from the tabu tunneling list and add the + # current best solution + tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) + tabulist.append(xf) + + i = improve = 0 + + while i < maxiter and improve == 0: + + if disp: + print('{0}\nStarting TUNNELING Phase ({1:d}-{2:d})\n{0}' + .format('='*72, global_iter+1, i+1)) + + all_tunnel += 1 + + # generate a new starting point away from current best solution + r = numpy.random.uniform(-1.0, 1.0, size=(n, )) + beta = eps2*numpy.linalg.norm(xf)/numpy.linalg.norm(r) + + if numpy.abs(beta) < 1e-8: + beta = eps2 + + x0 = xf + beta*r + + # make sure that the new starting point is within the bounds + x0 = numpy.where(x0 < low, low, x0) + x0 = numpy.where(x0 > up, up, x0) + + # aspired value of the objective function for the tunneling loop + aspiration = best_f - eps1*(1.0 + numpy.abs(best_f)) - os.system('cls') + tunnel_args = tuple([objfun, aspiration, tabulist] + list(args)) - for tests in ['Bird']: + options = {'maxiter': max(1, maxfunevals), 'disp': disp} + if local_opts is not None: + options.update(local_opts) - klass = getattr(go_benchmark, tests)() + res = minimize(tunnel, x0, args=tunnel_args, method=local, + bounds=bounds, tol=local_tol, options=options) + xf, yf, num_fun = res['x'], res['fun'], res['nfev'] - x0 = klass.generator() - fmin = klass.fglob - bounds = klass.bounds - tolfun = 1e-6 + maxfunevals -= num_fun + evaluations += num_fun + + yf = inverse_tunnel(xf, yf, aspiration, tabulist) - xf, yf, fun_evals, msg, tt = AMPGO(klass.evaluator, x0, args=(), local='L-BFGS-B', bounds=bounds, - maxfunevals=20000, totaliter=2000, maxiter=5, eps1=0.02, eps2=0.1, - tabulistsize=5, tabustrategy='farthest', fmin=fmin, disp=1, glbtol=tolfun) + if yf <= best_f + glbtol: + oldf = best_f + best_f = yf + best_x = xf + improve = 1 + success_tunnel += 1 - xb = numpy.asarray(klass.global_optimum) - if xb.ndim == 2: - xb = xb[0, :] + if disp: + print('\n\n ==> Successful tunnelling phase. Reached new ' + 'local minimum: {:.5g} < {:.5g}\n'.format(yf, oldf)) - print('\n\n') - print('F_glob :', klass.evaluator(xb)) - print('F_best :', yf) - print('X_best :', xf) - print('F_evals:', fun_evals) - print('Message:', msg) - print('Tunnels:', tt) + if best_f < fmin + glbtol: + return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + i += 1 + + if maxfunevals <= 0: + return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) + + tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) + tabulist.append(xf) + + if disp: + print('='*72) + + global_iter += 1 + x0 = xf.copy() + + if global_iter >= totaliter: + return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel) + + if best_f < fmin + glbtol: + return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index 6d825bc30..6695851cc 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -35,7 +35,7 @@ from .parameter import Parameter, Parameters -from ._ampgo import AMPGO as ampgo +from ._ampgo import ampgo # scipy version notes: # currently scipy 0.17 is required. From 04ff52722d57085de8e5ac97159b9c818fd7f12b Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sat, 31 Mar 2018 13:06:45 -0400 Subject: [PATCH 03/15] MAINT: use "n" instead of calculating "len(x0)" again". --- lmfit/_ampgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index 7337d3dc7..a5211bcf6 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -441,7 +441,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, up[i] = u if maxfunevals is None: - maxfunevals = max(100, 10*len(x0)) + maxfunevals = max(100, 10*n) if tabulistsize < 1: raise Exception('Invalid tabulistsize specified: {:d}. It should be ' From 7a357cefdee7d3483e1f3b2b35451bf3cb00be24 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sat, 31 Mar 2018 16:04:42 -0400 Subject: [PATCH 04/15] MAINT: simplify code to handle bounds. --- lmfit/_ampgo.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index a5211bcf6..9180ae642 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -424,21 +424,10 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, if len(bounds) != n: raise ValueError('length of x0 != length of bounds') - low = [0]*n - up = [0]*n - for i in range(n): - if bounds[i] is None: - l, u = -numpy.inf, numpy.inf - else: - l, u = bounds[i] - if l is None: - low[i] = -numpy.inf - else: - low[i] = l - if u is None: - up[i] = numpy.inf - else: - up[i] = u + bounds = [b if b is not None else (None, None) for b in bounds] + _bounds = [(-numpy.inf if l is None else l, numpy.inf if u is None else u) + for l, u in bounds] + low, up = numpy.array(_bounds).T if maxfunevals is None: maxfunevals = max(100, 10*n) @@ -450,9 +439,6 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, raise Exception('Invalid tabustrategy specified: {:s}. It must be one ' 'of "oldest" or "farthest".'.format(tabustrategy)) - low = numpy.asarray(low) - up = numpy.asarray(up) - tabulist = [] best_f = numpy.inf best_x = x0 From 92424f5cb438bfe13c9feebf54509ff740442c8b Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 1 Apr 2018 16:30:20 -0400 Subject: [PATCH 05/15] MAINT: simplify setting of local_tol. --- lmfit/_ampgo.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index 9180ae642..da2d07a95 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -447,10 +447,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, all_tunnel = success_tunnel = 0 evaluations = 0 - if glbtol < 1e-8: - local_tol = glbtol - else: - local_tol = 1e-8 + local_tol = min(1e-8, glbtol) while 1: From 50cc7426f49931d1908274f2c13134b1fd5e26d2 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 1 Apr 2018 21:05:29 -0400 Subject: [PATCH 06/15] MAINT: import numpy as np. --- lmfit/_ampgo.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index da2d07a95..b2f87e1ee 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -11,7 +11,7 @@ from __future__ import print_function -import numpy +import numpy as np from scipy.optimize import minimize SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] @@ -19,7 +19,7 @@ def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5, - tabustrategy='farthest', fmin=-numpy.inf, disp=None): + tabustrategy='farthest', fmin=-np.inf, disp=None): """ Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for Global Optimization) algorithm. @@ -287,8 +287,8 @@ def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): if tabustrategy == 'oldest': tabulist.pop(0) else: - distance = numpy.sqrt(numpy.sum((tabulist-xf)**2, axis=1)) - index = numpy.argmax(distance) + distance = np.sqrt(np.sum((tabulist-xf)**2, axis=1)) + index = np.argmax(distance) tabulist.pop(index) return tabulist @@ -312,7 +312,7 @@ def tunnel(x0, *args): denominator = 1.0 for tabu in tabulist: - denominator = denominator*numpy.sqrt(numpy.sum((x0 - tabu)**2)) + denominator = denominator*np.sqrt(np.sum((x0 - tabu)**2)) ytf = numerator/denominator @@ -324,17 +324,17 @@ def inverse_tunnel(xtf, ytf, aspiration, tabulist): denominator = 1.0 for tabu in tabulist: - denominator = denominator*numpy.sqrt(numpy.sum((xtf - tabu)**2)) + denominator = denominator*np.sqrt(np.sum((xtf - tabu)**2)) numerator = ytf*denominator - yf = aspiration + numpy.sqrt(ytf*denominator) + yf = aspiration + np.sqrt(ytf*denominator) return yf def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, - eps2=0.1, tabulistsize=5, tabustrategy='farthest', fmin=-numpy.inf, + eps2=0.1, tabulistsize=5, tabustrategy='farthest', fmin=-np.inf, disp=False): """Find the global minimum of a multivariate function using the AMPGO (Adaptive Memory Programming for Global Optimization) algorithm. @@ -416,7 +416,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, if local not in SCIPY_LOCAL_SOLVERS: raise Exception('Invalid local solver selected: {}'.format(local)) - x0 = numpy.atleast_1d(x0) + x0 = np.atleast_1d(x0) n = len(x0) if bounds is None: @@ -425,9 +425,9 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, raise ValueError('length of x0 != length of bounds') bounds = [b if b is not None else (None, None) for b in bounds] - _bounds = [(-numpy.inf if l is None else l, numpy.inf if u is None else u) + _bounds = [(-np.inf if l is None else l, np.inf if u is None else u) for l, u in bounds] - low, up = numpy.array(_bounds).T + low, up = np.array(_bounds).T if maxfunevals is None: maxfunevals = max(100, 10*n) @@ -440,7 +440,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, 'of "oldest" or "farthest".'.format(tabustrategy)) tabulist = [] - best_f = numpy.inf + best_f = np.inf best_x = x0 global_iter = 0 @@ -499,20 +499,20 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, all_tunnel += 1 # generate a new starting point away from current best solution - r = numpy.random.uniform(-1.0, 1.0, size=(n, )) - beta = eps2*numpy.linalg.norm(xf)/numpy.linalg.norm(r) + r = np.random.uniform(-1.0, 1.0, size=(n, )) + beta = eps2*np.linalg.norm(xf)/np.linalg.norm(r) - if numpy.abs(beta) < 1e-8: + if np.abs(beta) < 1e-8: beta = eps2 x0 = xf + beta*r # make sure that the new starting point is within the bounds - x0 = numpy.where(x0 < low, low, x0) - x0 = numpy.where(x0 > up, up, x0) + x0 = np.where(x0 < low, low, x0) + x0 = np.where(x0 > up, up, x0) # aspired value of the objective function for the tunneling loop - aspiration = best_f - eps1*(1.0 + numpy.abs(best_f)) + aspiration = best_f - eps1*(1.0 + np.abs(best_f)) tunnel_args = tuple([objfun, aspiration, tabulist] + list(args)) From 205fafbfb1ea4f8d96b5a69e9ff3da39dba48d61 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Mon, 2 Apr 2018 16:31:38 -0400 Subject: [PATCH 07/15] MAINT: use the calculated numerator in inverse_tunnel function. --- lmfit/_ampgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index b2f87e1ee..d3271c564 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -327,8 +327,8 @@ def inverse_tunnel(xtf, ytf, aspiration, tabulist): denominator = denominator*np.sqrt(np.sum((xtf - tabu)**2)) numerator = ytf*denominator + yf = aspiration + np.sqrt(numerator) - yf = aspiration + np.sqrt(ytf*denominator) return yf From 2b3b5a5a29b0773c9f89bfac787dc532878c1264 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Mon, 2 Apr 2018 16:39:02 -0400 Subject: [PATCH 08/15] STY: minor PEP8 style changes and comment updates. --- lmfit/_ampgo.py | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index d3271c564..7bcfd5cb6 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -14,7 +14,7 @@ import numpy as np from scipy.optimize import minimize -SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] +SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, @@ -287,7 +287,7 @@ def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): if tabustrategy == 'oldest': tabulist.pop(0) else: - distance = np.sqrt(np.sum((tabulist-xf)**2, axis=1)) + distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1)) index = np.argmax(distance) tabulist.pop(index) @@ -312,7 +312,7 @@ def tunnel(x0, *args): denominator = 1.0 for tabu in tabulist: - denominator = denominator*np.sqrt(np.sum((x0 - tabu)**2)) + denominator = denominator * np.sqrt(np.sum((x0 - tabu)**2)) ytf = numerator/denominator @@ -324,7 +324,7 @@ def inverse_tunnel(xtf, ytf, aspiration, tabulist): denominator = 1.0 for tabu in tabulist: - denominator = denominator*np.sqrt(np.sum((xtf - tabu)**2)) + denominator = denominator * np.sqrt(np.sum((xtf - tabu)**2)) numerator = ytf*denominator yf = aspiration + np.sqrt(numerator) @@ -477,14 +477,18 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, if best_f < fmin + glbtol: if disp: print('='*72) - return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + return (best_x, best_f, evaluations, + 'Optimization terminated successfully', + (all_tunnel, success_tunnel)) if maxfunevals <= 0: if disp: print('='*72) - return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) + return (best_x, best_f, evaluations, + 'Maximum number of function evaluations exceeded', + (all_tunnel, success_tunnel)) # if needed, drop a value from the tabu tunneling list and add the - # current best solution + # current solution tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) tabulist.append(xf) @@ -498,16 +502,16 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, all_tunnel += 1 - # generate a new starting point away from current best solution + # generate a new starting point away from the current solution r = np.random.uniform(-1.0, 1.0, size=(n, )) - beta = eps2*np.linalg.norm(xf)/np.linalg.norm(r) + beta = eps2*np.linalg.norm(xf) / np.linalg.norm(r) if np.abs(beta) < 1e-8: beta = eps2 x0 = xf + beta*r - # make sure that the new starting point is within the bounds + # make sure that the new starting point is within bounds x0 = np.where(x0 < low, low, x0) x0 = np.where(x0 > up, up, x0) @@ -541,12 +545,16 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, 'local minimum: {:.5g} < {:.5g}\n'.format(yf, oldf)) if best_f < fmin + glbtol: - return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + return (best_x, best_f, evaluations, + 'Optimization terminated successfully', + (all_tunnel, success_tunnel)) i += 1 if maxfunevals <= 0: - return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) + return (best_x, best_f, evaluations, + 'Maximum number of function evaluations exceeded', + (all_tunnel, success_tunnel)) tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) tabulist.append(xf) @@ -558,7 +566,11 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, x0 = xf.copy() if global_iter >= totaliter: - return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel) + return (best_x, best_f, evaluations, + 'Maximum number of global iterations exceeded', + (all_tunnel, success_tunnel)) if best_f < fmin + glbtol: - return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) + return (best_x, best_f, evaluations, + 'Optimization terminated successfully', + (all_tunnel, success_tunnel)) From 07c5519020178d9d0ff9cf74d5496c00985fcdd1 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Thu, 5 Apr 2018 18:51:33 -0400 Subject: [PATCH 09/15] MAINT: remove original AMPGO function. --- lmfit/_ampgo.py | 368 +++++++----------------------------------------- 1 file changed, 53 insertions(+), 315 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index 7bcfd5cb6..f68822602 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -17,321 +17,6 @@ SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] -def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, - totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5, - tabustrategy='farthest', fmin=-np.inf, disp=None): - """ - Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for - Global Optimization) algorithm. - - :param `objfun`: Function to be optimized, in the form ``f(x, *args)``. - :type `objfun`: callable - :param `args`: Additional arguments passed to `objfun`. - :type `args`: tuple - :param `local`: The local minimization method (e.g. ``"L-BFGS-B"``). It can be one of the available - `scipy` local solvers or `OpenOpt` solvers. - :type `local`: string - :param `bounds`: A list of tuples specifying the lower and upper bound for each independent variable - [(`xl0`, `xu0`), (`xl1`, `xu1`), ...] - :type `bounds`: list - :param `maxfunevals`: The maximum number of function evaluations allowed. - :type `maxfunevals`: integer - :param `totaliter`: The maximum number of global iterations allowed. - :type `totaliter`: integer - :param `maxiter`: The maximum number of `Tabu Tunnelling` iterations allowed during each global iteration. - :type `maxiter`: integer - :param `glbtol`: The optimization will stop if the absolute difference between the current minimum objective - function value and the provided global optimum (`fmin`) is less than `glbtol`. - :type `glbtol`: float - :param `eps1`: A constant used to define an aspiration value for the objective function during the Tunnelling phase. - :type `eps1`: float - :param `eps2`: Perturbation factor used to move away from the latest local minimum at the start of a Tunnelling phase. - :type `eps2`: float - :param `tabulistsize`: The size of the tabu search list (a circular list). - :type `tabulistsize`: integer - :param `tabustrategy`: The strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be - 'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from - the last local minimum found. - :type `tabustrategy`: string - :param `fmin`: If known, the objective function global optimum value. - :type `fmin`: float - :param `disp`: If zero or defaulted, then no output is printed on screen. If a positive number, then status - messages are printed. - :type `disp`: integer - - :returns: A tuple of 5 elements, in the following order: - - 1. **best_x** (`array_like`): the estimated position of the global minimum. - 2. **best_f** (`float`): the value of `objfun` at the minimum. - 3. **evaluations** (`integer`): the number of function evaluations. - 4. **msg** (`string`): a message describes the cause of the termination. - 5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunnelling phases performed and the - successful ones. - - :rtype: `tuple` - - The detailed implementation of AMPGO is described in the paper - "Adaptive Memory Programming for Constrained Global Optimization" located here: - - http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf - - Copyright 2014 Andrea Gavana - """ - import numpy - - OPENOPT = SCIPY = True - - try: - from openopt import NLP - except ImportError: - OPENOPT = False - - try: - from scipy.optimize import minimize - except ImportError: - SCIPY = False - - SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP'] - OPENOPT_LOCAL_SOLVERS = ['bobyqa', 'ptn', 'slmvm2', 'ralg', 'mma', 'auglag', 'sqlcp'] - - - if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS: - raise Exception('Invalid local solver selected: %s'%local) - - if local in SCIPY_LOCAL_SOLVERS and not SCIPY: - raise Exception('The selected solver %s is not available as there is no scipy installation'%local) - - if local in OPENOPT_LOCAL_SOLVERS and not OPENOPT: - raise Exception('The selected solver %s is not available as there is no OpenOpt installation'%local) - - x0 = numpy.atleast_1d(x0) - n = len(x0) - - if bounds is None: - bounds = [(None, None)] * n - if len(bounds) != n: - raise ValueError('length of x0 != length of bounds') - - low = [0]*n - up = [0]*n - for i in range(n): - if bounds[i] is None: - l, u = -numpy.inf, numpy.inf - else: - l, u = bounds[i] - if l is None: - low[i] = -numpy.inf - else: - low[i] = l - if u is None: - up[i] = numpy.inf - else: - up[i] = u - - if maxfunevals is None: - maxfunevals = max(100, 10*len(x0)) - - if tabulistsize < 1: - raise Exception('Invalid tabulistsize specified: %s. It should be an integer greater than zero.'%tabulistsize) - if tabustrategy not in ['oldest', 'farthest']: - raise Exception('Invalid tabustrategy specified: %s. It must be one of "oldest" or "farthest"'%tabustrategy) - - iprint = 50 - if disp is None or disp <= 0: - disp = 0 - iprint = -1 - - low = numpy.asarray(low) - up = numpy.asarray(up) - - tabulist = [] - best_f = numpy.inf - best_x = x0 - - global_iter = 0 - all_tunnel = success_tunnel = 0 - evaluations = 0 - - if glbtol < 1e-8: - local_tol = glbtol - else: - local_tol = 1e-8 - - while 1: - - if disp > 0: - print('\n') - print('='*72) - print('Starting MINIMIZATION Phase %-3d'%(global_iter+1)) - print('='*72) - - if local in OPENOPT_LOCAL_SOLVERS: - problem = NLP(objfun, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint) - problem.args = args - - results = problem.solve(local) - xf, yf, num_fun = results.xf, results.ff, results.evals['f'] - else: - options = {'maxiter': max(1, maxfunevals), 'disp': disp} - if local_opts is not None: - options.update(local_opts) - res = minimize(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol, options=options) - xf, yf, num_fun = res['x'], res['fun'], res['nfev'] - - maxfunevals -= num_fun - evaluations += num_fun - - if yf < best_f: - best_f = yf - best_x = xf - - if disp > 0: - print('\n\n ==> Reached local minimum: %s\n'%yf) - - if best_f < fmin + glbtol: - if disp > 0: - print('='*72) - return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) - if maxfunevals <= 0: - if disp > 0: - print('='*72) - return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) - - tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) - tabulist.append(xf) - - i = improve = 0 - - while i < maxiter and improve == 0: - - if disp > 0: - print('-'*72) - print('Starting TUNNELLING Phase (%3d-%3d)'%(global_iter+1, i+1)) - print('-'*72) - - all_tunnel += 1 - - r = numpy.random.uniform(-1.0, 1.0, size=(n, )) - beta = eps2*numpy.linalg.norm(xf)/numpy.linalg.norm(r) - - if numpy.abs(beta) < 1e-8: - beta = eps2 - - x0 = xf + beta*r - - x0 = numpy.where(x0 < low, low, x0) - x0 = numpy.where(x0 > up , up , x0) - - aspiration = best_f - eps1*(1.0 + numpy.abs(best_f)) - - tunnel_args = tuple([objfun, aspiration, tabulist] + list(args)) - - if local in OPENOPT_LOCAL_SOLVERS: - problem = NLP(tunnel, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint) - problem.args = tunnel_args - - results = problem.solve(local) - xf, yf, num_fun = results.xf, results.ff, results.evals['f'] - else: - options = {'maxiter': max(1, maxfunevals), 'disp': disp} - if local_opts is not None: - options.update(local_opts) - - res = minimize(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=local_tol, options=options) - xf, yf, num_fun = res['x'], res['fun'], res['nfev'] - - maxfunevals -= num_fun - evaluations += num_fun - - yf = inverse_tunnel(xf, yf, aspiration, tabulist) - - if yf <= best_f + glbtol: - oldf = best_f - best_f = yf - best_x = xf - improve = 1 - success_tunnel += 1 - - if disp > 0: - print('\n\n ==> Successful tunnelling phase. Reached local minimum: %s < %s\n'%(yf, oldf)) - - if best_f < fmin + glbtol: - return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) - - i += 1 - - if maxfunevals <= 0: - return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel) - - tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy) - tabulist.append(xf) - - if disp > 0: - print('='*72) - - global_iter += 1 - x0 = xf.copy() - - if global_iter >= totaliter: - return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel) - - if best_f < fmin + glbtol: - return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel) - - -def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): - """Drop a point from the tabu search list.""" - if len(tabulist) < tabulistsize: - return tabulist - - if tabustrategy == 'oldest': - tabulist.pop(0) - else: - distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1)) - index = np.argmax(distance) - tabulist.pop(index) - - return tabulist - - -def tunnel(x0, *args): - """Tunneling objective function. - - This function has a global minimum of zero at any feasible point where - `f(x) = aspiration`, and minimizing this expression tends to move away - from all points in `tabulist`. - - """ - objfun, aspiration, tabulist = args[0:3] - - fun_args = () - if len(args) > 3: - fun_args = tuple(args[3:]) - - numerator = (objfun(x0, *fun_args) - aspiration)**2 - denominator = 1.0 - - for tabu in tabulist: - denominator = denominator * np.sqrt(np.sum((x0 - tabu)**2)) - - ytf = numerator/denominator - - return ytf - - -def inverse_tunnel(xtf, ytf, aspiration, tabulist): - """Calculate the function value after a tunneling phase step.""" - denominator = 1.0 - - for tabu in tabulist: - denominator = denominator * np.sqrt(np.sum((xtf - tabu)**2)) - - numerator = ytf*denominator - yf = aspiration + np.sqrt(numerator) - - return yf - - def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5, tabustrategy='farthest', fmin=-np.inf, @@ -574,3 +259,56 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, return (best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)) + + +def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): + """Drop a point from the tabu search list.""" + if len(tabulist) < tabulistsize: + return tabulist + + if tabustrategy == 'oldest': + tabulist.pop(0) + else: + distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1)) + index = np.argmax(distance) + tabulist.pop(index) + + return tabulist + + +def tunnel(x0, *args): + """Tunneling objective function. + + This function has a global minimum of zero at any feasible point where + `f(x) = aspiration`, and minimizing this expression tends to move away + from all points in `tabulist`. + + """ + objfun, aspiration, tabulist = args[0:3] + + fun_args = () + if len(args) > 3: + fun_args = tuple(args[3:]) + + numerator = (objfun(x0, *fun_args) - aspiration)**2 + denominator = 1.0 + + for tabu in tabulist: + denominator = denominator * np.sqrt(np.sum((x0 - tabu)**2)) + + ytf = numerator/denominator + + return ytf + + +def inverse_tunnel(xtf, ytf, aspiration, tabulist): + """Calculate the function value after a tunneling phase step.""" + denominator = 1.0 + + for tabu in tabulist: + denominator = denominator * np.sqrt(np.sum((xtf - tabu)**2)) + + numerator = ytf*denominator + yf = aspiration + np.sqrt(numerator) + + return yf From b032feb2e026bf00238ab654c91ce0057b247fa8 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Mon, 9 Apr 2018 17:49:13 -0400 Subject: [PATCH 10/15] MAINT: remove 'fmin' parameter and update description of 'glbtol'. --- lmfit/_ampgo.py | 26 ++------------------------ lmfit/minimizer.py | 15 +++++---------- 2 files changed, 7 insertions(+), 34 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index f68822602..fe6a74753 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -19,8 +19,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, - eps2=0.1, tabulistsize=5, tabustrategy='farthest', fmin=-np.inf, - disp=False): + eps2=0.1, tabulistsize=5, tabustrategy='farthest', disp=False): """Find the global minimum of a multivariate function using the AMPGO (Adaptive Memory Programming for Global Optimization) algorithm. @@ -55,9 +54,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, Maximum number of `Tabu Tunneling` iterations during each global iteration. glbtol: float, optional - The optimization will stop if the absolute difference between the - current minimum objective function value and the provided global - optimum (`fmin`) is less than `glbtol`. + Tolerance whether or not to accept a solution after a tunneling phase. eps1: float, optional Constant used to define an aspiration value for the objective function during the Tunneling phase. @@ -71,9 +68,6 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, It can be 'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from the last local minimum found. - fmin: float, optional - Objective function's global optimum value (if known, the default - is -numpy.inf). disp: bool, optional Set to True to print convergence messages. @@ -159,12 +153,6 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, if disp: print('\n\n ==> Reached local minimum: {:.5g}\n'.format(yf)) - if best_f < fmin + glbtol: - if disp: - print('='*72) - return (best_x, best_f, evaluations, - 'Optimization terminated successfully', - (all_tunnel, success_tunnel)) if maxfunevals <= 0: if disp: print('='*72) @@ -229,11 +217,6 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, print('\n\n ==> Successful tunnelling phase. Reached new ' 'local minimum: {:.5g} < {:.5g}\n'.format(yf, oldf)) - if best_f < fmin + glbtol: - return (best_x, best_f, evaluations, - 'Optimization terminated successfully', - (all_tunnel, success_tunnel)) - i += 1 if maxfunevals <= 0: @@ -255,11 +238,6 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel)) - if best_f < fmin + glbtol: - return (best_x, best_f, evaluations, - 'Optimization terminated successfully', - (all_tunnel, success_tunnel)) - def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy): """Drop a point from the tabu search list.""" diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index 6695851cc..c79a309cd 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -1677,9 +1677,7 @@ def ampgo(self, params=None, **kws): maxiter: int (default is 5) Maximum number of `Tabu Tunneling` iterations during each global iteration. glbtol: float (default is 1e-5) - The optimization will stop if the absolute difference between the current - minimum objective function value and the provided global optimum (`fmin`) - is less than `glbtol`. + Tolerance whether or not to accept a solution after a tunneling phase. eps1: float (default is 0.02) Constant used to define an aspiration value for the objective function during the Tunneling phase. @@ -1692,8 +1690,6 @@ def ampgo(self, params=None, **kws): Strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be 'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from the last local minimum found. - fmin: float (default is -numpy.inf) - Objective function's global optimum value (if known). disp: bool (default is False) Set to True to print convergence messages. @@ -1722,11 +1718,10 @@ def ampgo(self, params=None, **kws): """ result = self.prepare_fit(params=params) - ampgo_kws = dict(local='L-BFGS-B', local_opts=None, - maxfunevals=None, totaliter=20, - maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, - tabulistsize=5, tabustrategy='farthest', - fmin=-np.inf, disp=False) + ampgo_kws = dict(local='L-BFGS-B', local_opts=None, maxfunevals=None, + totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, + eps2=0.1, tabulistsize=5, tabustrategy='farthest', + disp=False) ampgo_kws.update(self.kws) ampgo_kws.update(kws) From 64ebd1f0334b26f2497b0e56b36848b9770d9477 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Apr 2018 10:02:10 -0400 Subject: [PATCH 11/15] MAINT: use apply_bounds_transformation. Do not pass "bounds" to the underlying minimizer anymore; use the "lmfit" way of making sure that parameters stay within bounds. --- lmfit/minimizer.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index c79a309cd..ac30b3004 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -1725,17 +1725,7 @@ def ampgo(self, params=None, **kws): ampgo_kws.update(self.kws) ampgo_kws.update(kws) - varying = np.asarray([par.vary for par in self.params.values()]) - replace_none = lambda x, sign: sign*np.inf if x is None else x - lower_bounds = np.asarray([replace_none(i.min, -1) for i in - self.params.values()])[varying] - upper_bounds = np.asarray([replace_none(i.max, 1) for i in - self.params.values()])[varying] - values = np.asarray([i.value for i in self.params.values()])[varying] - - bounds = [(lb, up) for lb, up in zip(lower_bounds, upper_bounds)] - - ampgo_kws['bounds'] = bounds + values = result.init_vals result.method = "ampgo, with {} as local solver".format(ampgo_kws['local']) ret = ampgo(self.penalty, values, **ampgo_kws) @@ -1751,8 +1741,7 @@ def ampgo(self, params=None, **kws): result.chisqr = ret[1] result.nvarys = len(result.var_names) - result.residual = self.__residual(result.ampgo_x0, - apply_bounds_transformation=False) + result.residual = self.__residual(result.ampgo_x0) result.nfev -= 1 result.ndata = len(result.residual) result.nfree = result.ndata - result.nvarys From 58588f87d18b5331eb2e4032e0d815a761d269e6 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Apr 2018 21:10:01 -0400 Subject: [PATCH 12/15] MAINT: handle AbortFitException. --- lmfit/minimizer.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index ac30b3004..2415f318d 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -1728,21 +1728,28 @@ def ampgo(self, params=None, **kws): values = result.init_vals result.method = "ampgo, with {} as local solver".format(ampgo_kws['local']) - ret = ampgo(self.penalty, values, **ampgo_kws) + try: + ret = ampgo(self.penalty, values, **ampgo_kws) + except AbortFitException: + pass + + if not result.aborted: + result.ampgo_x0 = ret[0] + result.ampgo_fval = ret[1] + result.ampgo_eval = ret[2] + result.ampgo_msg = ret[3] + result.ampgo_tunnel = ret[4] - result.ampgo_x0 = ret[0] - result.ampgo_fval = ret[1] - result.ampgo_eval = ret[2] - result.ampgo_msg = ret[3] - result.ampgo_tunnel = ret[4] + for i, par in enumerate(result.var_names): + result.params[par].value = result.ampgo_x0[i] - for i, par in enumerate(result.var_names): - result.params[par].value = result.ampgo_x0[i] + result.chisqr = ret[1] + result.residual = self.__residual(result.ampgo_x0) + result.nfev -= 1 + else: + result.chisqr = (result.residual**2).sum() - result.chisqr = ret[1] result.nvarys = len(result.var_names) - result.residual = self.__residual(result.ampgo_x0) - result.nfev -= 1 result.ndata = len(result.residual) result.nfree = result.ndata - result.nvarys result.redchi = result.chisqr / result.nfree From 24415477fb5c4c1a8b7cbf0205f545770e5adaf3 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Apr 2018 10:36:44 -0400 Subject: [PATCH 13/15] MAINT: function value should be a scalar not an array. When fitting a function (i.e., in the test) AMPGO returns an array instead of a scalar for the function value. --- lmfit/_ampgo.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index fe6a74753..704c8a455 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -142,6 +142,8 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, res = minimize(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol, options=options) xf, yf, num_fun = res['x'], res['fun'], res['nfev'] + if isinstance(yf, np.ndarray): + yf = yf[0] maxfunevals -= num_fun evaluations += num_fun @@ -200,6 +202,8 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, res = minimize(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=local_tol, options=options) xf, yf, num_fun = res['x'], res['fun'], res['nfev'] + if isinstance(yf, np.ndarray): + yf = yf[0] maxfunevals -= num_fun evaluations += num_fun From 1626e8caf402d230f27040c87a6d6e0b77af9a60 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Apr 2018 16:10:02 -0400 Subject: [PATCH 14/15] ENH: maxfuneval is None means no limit on the number of function evaluations. The optimization will stop after the specified number of iterations ("totaliter"). --- lmfit/_ampgo.py | 5 +++-- lmfit/minimizer.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py index 704c8a455..5ffdf9cd4 100644 --- a/lmfit/_ampgo.py +++ b/lmfit/_ampgo.py @@ -47,7 +47,8 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, List of tuples specifying the lower and upper bound for each independent variable [(`xl0`, `xu0`), (`xl1`, `xu1`), ...]. maxfunevals: int, optional - Maximum number of function evaluations. + Maximum number of function evaluations. If None, the optimization will + stop after `totaliter` number of iterations. totaliter: int, optional Maximum number of global iterations. maxiter: int, optional @@ -109,7 +110,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, low, up = np.array(_bounds).T if maxfunevals is None: - maxfunevals = max(100, 10*n) + maxfunevals = np.inf if tabulistsize < 1: raise Exception('Invalid tabulistsize specified: {:d}. It should be ' diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index 2415f318d..d4bb610e5 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -1671,7 +1671,8 @@ def ampgo(self, params=None, **kws): local_opts: dict (default is None) Options to pass to the local minimizer. maxfunevals: int (default is None) - Maximum number of function evaluations. If None, it will be max(10*npars, 100). + Maximum number of function evaluations. If None, the optimization will stop + after `totaliter` number of iterations. totaliter: int (default is 20) Maximum number of global iterations. maxiter: int (default is 5) From 0bf71d04a9f48a77e49ac9e5b1085ac6ee228771 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Apr 2018 10:36:52 -0400 Subject: [PATCH 15/15] TST: add test for ampgo method --- tests/test_ampgo.py | 55 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 tests/test_ampgo.py diff --git a/tests/test_ampgo.py b/tests/test_ampgo.py new file mode 100644 index 000000000..a5ba5fcd8 --- /dev/null +++ b/tests/test_ampgo.py @@ -0,0 +1,55 @@ +import numpy as np +from numpy.testing import assert_allclose + +import lmfit + + +def test_ampgo_Alpine02(): + """Test AMPGO algorithm on Alpine02 function.""" + + global_optimum = [7.91705268, 4.81584232] + fglob = -6.12950 + + def residual_Alpine02(params): + x0 = params['x0'].value + x1 = params['x1'].value + return np.prod(np.sqrt(x0) * np.sin(x0)) * np.prod(np.sqrt(x1) * + np.sin(x1)) + + pars = lmfit.Parameters() + pars.add_many(('x0', 1., True, 0.0, 10.0), + ('x1', 1., True, 0.0, 10.0)) + + mini = lmfit.Minimizer(residual_Alpine02, pars) + out = mini.minimize(method='ampgo') + out_x = np.array([out.params['x0'].value, out.params['x1'].value]) + + assert_allclose(out.chisqr, fglob, rtol=1e-5) + assert_allclose(min(out_x), min(global_optimum), rtol=1e-3) + assert_allclose(max(out_x), max(global_optimum), rtol=1e-3) + assert('global' in out.ampgo_msg) + + + +def test_ampgo_Alpine02_maxfunevals(): + """Test AMPGO algorithm on Alpine02 function.""" + + def residual_Alpine02(params): + x0 = params['x0'].value + x1 = params['x1'].value + return np.prod(np.sqrt(x0) * np.sin(x0)) * np.prod(np.sqrt(x1) * + np.sin(x1)) + + pars = lmfit.Parameters() + pars.add_many(('x0', 1., True, 0.0, 10.0), + ('x1', 1., True, 0.0, 10.0)) + + mini = lmfit.Minimizer(residual_Alpine02, pars) + kws = {'maxfunevals': 50} + out = mini.minimize(method='ampgo', **kws) + assert('function' in out.ampgo_msg) + + +if __name__ == '__main__': + test_ampgo_Alpine02() + test_ampgo_Alpine02_maxfunevals()