diff --git a/.github/workflows/win-build-and-pytest.yml b/.github/workflows/win-build-and-pytest.yml index 8f1ba9f..e43eb82 100644 --- a/.github/workflows/win-build-and-pytest.yml +++ b/.github/workflows/win-build-and-pytest.yml @@ -30,7 +30,7 @@ jobs: pip install flake8 pytest pytest-cov pip install wheel python setup.py sdist bdist_wheel - pip install dist/OMADS-2408.1-py3-none-any.whl + pip install dist/OMADS-2410-py3-none-any.whl - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names diff --git a/.gitignore b/.gitignore index dc04558..4f9ea96 100644 --- a/.gitignore +++ b/.gitignore @@ -26,4 +26,6 @@ visualize.py vis.py deeplearning.mplstyle CFD -temp \ No newline at end of file +temp +tests/*.log +testing* \ No newline at end of file diff --git a/dist/OMADS-2408.1-py3-none-any.whl b/dist/OMADS-2408.1-py3-none-any.whl deleted file mode 100644 index 5d0df8f..0000000 Binary files a/dist/OMADS-2408.1-py3-none-any.whl and /dev/null differ diff --git a/dist/OMADS-2410-py3-none-any.whl b/dist/OMADS-2410-py3-none-any.whl new file mode 100644 index 0000000..5c01531 Binary files /dev/null and b/dist/OMADS-2410-py3-none-any.whl differ diff --git a/pyproject.toml b/pyproject.toml index c044f1a..2d5ddd8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ requires = [ build-backend = "setuptools.build_meta" [tool.pytest.ini_options] -addopts = "--cov=OMADS" testpaths = [ "tests", ] diff --git a/setup.cfg b/setup.cfg index 264ebf1..8ca0629 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = OMADS -version = 2408.1 +version = 2410 author = Ahmed H. Bayoumy author_email = ahmed.bayoumy@mail.mcgill.ca description = "Python package for DFO; an implementation of the mesh adaptive direct search (MADS)." diff --git a/setup.py b/setup.py index d27eb4d..ff8fa12 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ name="OMADS", author="Ahmed H. Bayoumy", author_email="ahmed.bayoumy@mail.mcgill.ca", - version='2408.1', + version='2410', packages=find_packages(include=['OMADS', 'OMADS.*']), description="Mesh Adaptive Direct Search (MADS)", install_requires=[ @@ -14,7 +14,8 @@ 'scipy', 'pyDOE2', 'samplersLib>=2408', - 'paramiko>=3.4.0' + 'paramiko>=3.4.0', + 'deap==1.4' ], extras_require={ 'interactive': ['matplotlib>=3.5.2', 'plotly>=5.14.1'], diff --git a/src/OMADS/Barrier.py b/src/OMADS/Barrier.py index 1e80c20..51b08e1 100644 --- a/src/OMADS/Barrier.py +++ b/src/OMADS/Barrier.py @@ -1,28 +1,27 @@ import copy from dataclasses import dataclass, field -from typing import List, Any +from typing import List, Optional from .CandidatePoint import CandidatePoint from .Point import Point -from ._globals import * +from ._globals import DType, DESIGN_STATUS, EVAL_TYPE import numpy as np from typing import Protocol -from .Parameters import Parameters from .Cache import Cache @dataclass class BarrierData(Protocol): - _xFeas: List[CandidatePoint] = None - _xInf: List[CandidatePoint] = None + _xFeas: Optional[List[CandidatePoint]] = None + _xInf: Optional[List[CandidatePoint]] = None - _xIncFeas: List[CandidatePoint] = None - _xIncInf: List[CandidatePoint] = None + _xIncFeas: Optional[List[CandidatePoint]] = None + _xIncInf: Optional[List[CandidatePoint]] = None - _refBestFeas: CandidatePoint = None - _refBestInf: CandidatePoint = None + _refBestFeas: Optional[CandidatePoint] = None + _refBestInf: Optional[CandidatePoint] = None - _dtype: DType = None + _dtype: Optional[DType] = None - def init(self, xFeas: CandidatePoint = None, evalType: EVAL_TYPE = None, barrierInitializedFromCache: bool = True): + def init(self, eval_point_list: Optional[List[Point]] = None): ... def getAllXFeas(self): @@ -91,7 +90,7 @@ def getSuccessTypeOfPoints(self): def updateWithPoints(self): ... - def findPoint(self, Point: Point, foundEvalPoint: CandidatePoint): + def findPoint(self, point: Point): ... def setN(self): @@ -112,12 +111,12 @@ def findEvalPoint(self): class BarrierBase(BarrierData): - _hMax: float = np.inf + _h_max: float = np.inf _n: int = 0 - def __init__(self, hMax: float = np.inf): - self._hMax = hMax + def __init__(self, h_max: float = np.inf): + self._h_max = h_max self._n = 0 self._dtype = DType() self._xInf = [] @@ -126,25 +125,25 @@ def __init__(self, hMax: float = np.inf): self._xIncInf = [] def setN(self): - isSet: bool = False + is_set: bool = False s: str for cp in self.getAllPoints(): - if not isSet: + if not is_set: self._n = cp._n - isSet = True + is_set = True elif cp._n != self._n: s = f"Barrier has points of size {self._n} and of size {cp._n}" raise IOError(s) - if not isSet: + if not is_set: raise IOError("Barrier could not set point size") - def checkCache(self, cache: Cache): + def checkCache(self, cache: Cache = None): if cache == None: raise IOError("Cache must be instantiated before initializing Barrier.") def checkHMax(self): - if self._hMax is None or self._hMax < self._dtype.zero: + if self._h_max is None or self._h_max < self._dtype.zero: raise IOError("Barrier: hMax must be positive.") def clearXFeas(self): @@ -155,19 +154,19 @@ def clearXInf(self): del self._xIncInf def getAllPoints(self) -> List[CandidatePoint]: - allPoints: List[CandidatePoint] = [] + all_points: List[CandidatePoint] = [] if self._xFeas is None: self._xFeas = [] for cp in self._xFeas: - allPoints.append(cp) + all_points.append(cp) if self._xInf is None: self._xInf = [] for cp in self._xInf: - allPoints.append(cp) + all_points.append(cp) - return allPoints + return all_points - def getFirstPoint(self) -> CandidatePoint: + def getFirstPoint(self) -> Optional[CandidatePoint]: if self._xIncFeas and len(self._xIncFeas) > 0: return self._xIncFeas[0] elif self._xFeas and len(self._xFeas) > 0: @@ -180,43 +179,38 @@ def getFirstPoint(self) -> CandidatePoint: return None def findEvalPoint(self, cps: List[CandidatePoint] = None, cp: CandidatePoint = None): - ind = 0 for p in cps: if p.signature == cp.signature: return True, p - ind+=1 return False, p - def findPoint(self, Point: Point, foundEvalPoint: CandidatePoint) -> bool: + def findPoint(self, point: Point) -> bool: found: bool = False - evalPointList: List[CandidatePoint] = self.getAllPoints() - for cp in evalPointList: - if cp._n != Point._n: + eval_point_list: List[CandidatePoint] = self.getAllPoints() + for cp in eval_point_list: + if cp._n != point._n: raise IOError("Error: Eval points have different dimensions") - if Point == cp.coordinates: - foundEvalPoint = copy.deepcopy(cp) + if point == cp.coordinates: found = True break return found - def checkXFeas(self, xFeas: CandidatePoint = None, evalType: EVAL_TYPE = None): - if xFeas.evaluated: - self.checkXFeasIsFeas(xFeas=xFeas, evalType=evalType) - + def checkXFeas(self, x_feas: CandidatePoint = None, eval_type: EVAL_TYPE = None): + if x_feas.evaluated: + self.checkXFeasIsFeas(x_feas=x_feas, eval_type=eval_type) def getAllXFeas(self): return self._xFeas - def checkXFeasIsFeas(self, xFeas: CandidatePoint=None, evalType: EVAL_TYPE = None): - if xFeas.evaluated and xFeas.status != DESIGN_STATUS.ERROR: - h = xFeas.h - if h is None or h!= 0.0: + def checkXFeasIsFeas(self, x_feas: CandidatePoint=None, eval_type: EVAL_TYPE = None): + if x_feas.evaluated and x_feas.status != DESIGN_STATUS.ERROR: + h = x_feas.h + if h is None or not np.isclose(h, 0.0, rtol=1e-09, atol=1e-09): raise IOError(f"Error: Barrier: xFeas' h value must be 0.0, got: {h}") - - def checkXInf(self, xInf: CandidatePoint = None, evalType: EVAL_TYPE = None): - if not xInf.evaluated: + def checkXInf(self, x_inf: CandidatePoint = None, eval_type: EVAL_TYPE = None): + if not x_inf.evaluated: raise IOError("Barrier: xInf must be evaluated before being set.") diff --git a/src/OMADS/Barriers.py b/src/OMADS/Barriers.py index f3f5a34..cf6ebde 100644 --- a/src/OMADS/Barriers.py +++ b/src/OMADS/Barriers.py @@ -23,10 +23,10 @@ import copy from dataclasses import dataclass, field -from typing import List, Any, Tuple +from typing import List, Tuple, Optional from .CandidatePoint import CandidatePoint from .Point import Point -from ._globals import * +from ._globals import DType, VAR_TYPE, BARRIER_TYPES, SUCCESS_TYPES, DESIGN_STATUS, EVAL_TYPE, COMPARE_TYPE import numpy as np from .Parameters import Parameters from .Barrier import BarrierBase @@ -34,22 +34,22 @@ @dataclass class Barrier: - _params: Parameters = None + _params: Optional[Parameters] = None _eval_type: int = 1 _h_max: float = 0 - _best_feasible: CandidatePoint = None - _ref: CandidatePoint = None - _filter: List[CandidatePoint] = None + _best_feasible: Optional[CandidatePoint] = None + _ref: Optional[CandidatePoint] = None + _filter: Optional[List[CandidatePoint]] = None _prefilter: int = 0 _rho_leaps: float = 0.1 - _prim_poll_center: CandidatePoint = None - _sec_poll_center: CandidatePoint = None + _prim_poll_center: Optional[CandidatePoint] = None + _sec_poll_center: Optional[CandidatePoint] = None _peb_changes: int = 0 _peb_filter_reset: int = 0 - _peb_lop: List[CandidatePoint] = None - _all_inserted: List[CandidatePoint] = None - _one_eval_succ: SUCCESS_TYPES = None - _success: SUCCESS_TYPES = None + _peb_lop: Optional[List[CandidatePoint]] = None + _all_inserted: Optional[List[CandidatePoint]] = None + _one_eval_succ: Optional[SUCCESS_TYPES] = None + _success: Optional[SUCCESS_TYPES] = None def __init__(self, p: Parameters, eval_type: int = 1): self._h_max = p.get_h_max_0() @@ -78,7 +78,7 @@ def insert_feasible(self, x: CandidatePoint) -> SUCCESS_TYPES: def filter_insertion(self, x:CandidatePoint) -> bool: if not x._is_EB_passed: - return + return False if self._filter is None: self._filter = [] self._filter.append(x) @@ -107,7 +107,7 @@ def filter_insertion(self, x:CandidatePoint) -> bool: def insert_infeasible(self, x: CandidatePoint): - insert: bool = self.filter_insertion(x=x) + _ = self.filter_insertion(x=x) if not self._ref: return SUCCESS_TYPES.PS @@ -150,7 +150,7 @@ def select_poll_center(self): self._prim_poll_center = best_infeasible return - last_poll_center: CandidatePoint = CandidatePoint() + last_poll_center: Optional[CandidatePoint] = None if self._params.get_barrier_type() == BARRIER_TYPES.PB: last_poll_center = self._prim_poll_center if best_infeasible.fobj[0] < (self._best_feasible.fobj[0]-self._rho_leaps): @@ -165,10 +165,9 @@ def select_poll_center(self): def set_h_max(self, h_max): self._h_max = np.round(h_max, 2) - if self._filter is not None: - if self._filter[0].h > self._h_max: - self._filter = None - return + if self._filter is not None and self._filter[0].h > self._h_max: + self._filter = None + return if self._filter is not None: it = 0 while it != len(self._filter): @@ -190,7 +189,6 @@ def insert(self, x: CandidatePoint): if self._all_inserted is None: self._all_inserted = [] self._all_inserted.append(x) - h = x.h if x.status == DESIGN_STATUS.INFEASIBLE and (not x.is_EB_passed or x.h > self._h_max): self._one_eval_succ = SUCCESS_TYPES.US return @@ -202,7 +200,9 @@ def insert(self, x: CandidatePoint): self._success = self._one_eval_succ - def insert_VNS(self): + def insert_vns(self): + """ Not required here + """ pass def update_and_reset_success(self): @@ -221,7 +221,6 @@ def update_and_reset_success(self): break if it == 0: break - # raise RuntimeError("could not find a filter point with h < h_max after a partial success") it -= 1 if self._filter is not None: self._ref = self.get_best_infeasible() @@ -233,7 +232,7 @@ def update_and_reset_success(self): if self._ref.status is DESIGN_STATUS.FEASIBLE: self.insert_feasible(self._ref) - if not (self._ref.status is DESIGN_STATUS.INFEASIBLE or self._ref.status is DESIGN_STATUS.INFEASIBLE): + if not (self._ref.status is DESIGN_STATUS.INFEASIBLE or self._ref.status is DESIGN_STATUS.FEASIBLE): self.insert(self._ref) @@ -250,14 +249,12 @@ def reset(self): self._prefilter = None self._filter = None - # self._h_max = self._params._h_max_0() self._best_feasible = None self._ref = None self._rho_leaps = 0 self._poll_center = None self._sec_poll_center = None - # if ( self._peb_changes > 0 ): # self._params.reset_PEB_changes() self._peb_changes = 0 @@ -271,15 +268,15 @@ def reset(self): @dataclass class BarrierMO(BarrierBase): """ """ - _currentIncumbentFeas: CandidatePoint = None - _currentIncumbentInf: CandidatePoint = None - _fixedVariables: CandidatePoint = None - _xFilterInf: List[CandidatePoint] = None + _currentIncumbentFeas: Optional[CandidatePoint] = None + _currentIncumbentInf: Optional[CandidatePoint] = None + _fixedVariables: Optional[CandidatePoint] = None + _xFilterInf: Optional[List[CandidatePoint]] = None _nobj: int = 0 - _bbInputsType: List[VAR_TYPE] = None + _bbInputsType: Optional[List[VAR_TYPE]] = None _incumbentSelectionParam: int = 0 - def __init__(self, param: Parameters, options: Options, evalPointList: List[CandidatePoint]= None): + def __init__(self, param: Parameters, options: Options, eval_point_list: Optional[List[CandidatePoint]]= None): super(BarrierBase, self).__init__(hMax=param.h_max) self._nobj = param.nobj @@ -294,26 +291,25 @@ def __init__(self, param: Parameters, options: Options, evalPointList: List[Cand self.checkHMax() - if evalPointList: - self.init(fixedVariables=self._fixedVariables, evalType=None,evalPointList=evalPointList) + if eval_point_list: + self.init(eval_point_list=eval_point_list) - def init(self, fixedVariables: Point = None, evalType: EVAL_TYPE = None, evalPointList: List[Point] = None): - updated: bool - updated, _, _ = self.updateWithPoints(evalPointList) + def init(self, eval_point_list: Optional[List[Point]] = None): + _, _, _ = self.updateWithPoints(eval_point_list) def checkMeshParameters(self, x: CandidatePoint = None): mesh = copy.deepcopy(x.mesh) - meshSizeCorrection: int = 0 + mesh_size_correction: int = 0 if mesh.getdeltaMeshSize().size != x._n: - meshSizeCorrection = sum(self._fixedVariables.defined) + mesh_size_correction = sum(self._fixedVariables.defined) - if (mesh.getdeltaMeshSize().size + meshSizeCorrection != x._n - or mesh.getDeltaFrameSize().size + meshSizeCorrection != x._n - or mesh.getMeshIndex().size + meshSizeCorrection != x._n): + if (mesh.getdeltaMeshSize().size + mesh_size_correction != x._n + or mesh.getDeltaFrameSize().size + mesh_size_correction != x._n + or mesh.getMeshIndex().size + mesh_size_correction != x._n): raise IOError("Error: Mesh parameters dimensions are not compatible with EvalPoint dimension.") if not mesh.getdeltaMeshSize().is_all_defined(): @@ -326,15 +322,12 @@ def checkMeshParameters(self, x: CandidatePoint = None): raise IOError("Error: some MeshIndex components of EvalPoint passed to MO Barrier ") - def updateWithPoints(self, evalPointList: List[CandidatePoint], evalType: EVAL_TYPE = None, keepAllPoints: bool = None, updateInfeasibleIncumbentAndHmax : bool = None): + def updateWithPoints(self, eval_point_list: List[CandidatePoint]=None, keep_all_points: bool = None): updated = False - updatedFeas = False - updatedInf = False + updated_feas = False + updated_inf = False - s: str - xInfTmp: CandidatePoint - - for cp in evalPointList: + for cp in eval_point_list: self.checkMeshParameters(cp) if not cp.evaluated or cp.status == DESIGN_STATUS.ERROR: @@ -343,18 +336,18 @@ def updateWithPoints(self, evalPointList: List[CandidatePoint], evalType: EVAL_T if cp.fs.size != self._nobj: raise IOError(f"Barrier update: number of objectives is equal to {self._nobj}. Trying to add this point with number of objectives {cp.fs.size}") - updatedFeas = self.updateFeasWithPoint(evalPoint=cp, evalType=evalType, keepAllPoints=keepAllPoints) or updatedFeas + updated_feas = self.updateFeasWithPoint(eval_point=cp, keep_all_points=keep_all_points) or updated_feas # // Do separate loop on evalPointList # // Second loop update the bestInfeasible. # // Use the flag oneFeasEvalFullSuccess. # // If the flag is true hmax will not change. A point improving the best infeasible should not replace it. - for cp in evalPointList: + for cp in eval_point_list: if not cp.evaluated or cp.status == DESIGN_STATUS.ERROR: continue - updatedInf = self.updateInfWithPoint(evalPoint=cp, evalType=evalType, keepAllPoints=keepAllPoints, feasHasBeenUpdated=updatedFeas) or updatedInf + updated_inf = self.updateInfWithPoint(eval_point=cp, keep_all_points=keep_all_points) or updated_inf - updated = updated or updatedFeas or updatedInf + updated = updated or updated_feas or updated_inf if updated: self.setN() @@ -362,17 +355,17 @@ def updateWithPoints(self, evalPointList: List[CandidatePoint], evalType: EVAL_T - return updated, updatedFeas, updatedInf + return updated, updated_feas, updated_inf def updateCurrentIncumbents(self): self.updateCurrentIncumbentFeas() self.updateCurrentIncumbentInf() - def setHMax(self, hMax): - oldHMax = self._hMax - self._hMax = hMax + def setHMax(self, h_max=np.inf): + old_h_max = self._h_max + self._h_max = h_max self.checkHMax() - if hMax < oldHMax: + if h_max < old_h_max: self.updateXInfAndFilterInfAfterHMaxSet() self.updateCurrentIncumbentInf() @@ -381,68 +374,64 @@ def updateXInfAndFilterInfAfterHMaxSet(self): if len(self._xInf) == 0: return - currentInd = 0 - - isInXInf = [True] * len(self._xInf) - for xInf in self._xInf: - h = xInf.h - if h > self._hMax: - isInXInf[currentInd] = False - currentInd += 1 + current_ind = 0 + + is_in_x_inf = [True] * len(self._xInf) + for x_inf in self._xInf: + h = x_inf.h + if h > self._h_max: + is_in_x_inf[current_ind] = False + current_ind += 1 - currentInd = 0 + current_ind = 0 for _ in self._xInf: - if not isInXInf[currentInd]: - self._xInf.pop(currentInd) - currentInd += 1 + if not is_in_x_inf[current_ind]: + self._xInf.pop(current_ind) + current_ind += 1 - currentInd = 0 - isInXFilterInf = [True] *len(self._xFilterInf) - - for xFilterInf in self._xFilterInf: - h = xFilterInf.h - if h >self._hMax: - isInXFilterInf[currentInd] = False - currentInd += 1 + current_ind = 0 + is_in_x_filter_inf = [True] *len(self._xFilterInf) + + for x_filter_inf in self._xFilterInf: + h = x_filter_inf.h + if h >self._h_max: + is_in_x_filter_inf[current_ind] = False + current_ind += 1 - currentInd = 0 + current_ind = 0 for _ in self._xFilterInf: - if not isInXFilterInf[currentInd]: - self._xFilterInf.pop(currentInd) - currentInd += 1 + if not is_in_x_filter_inf[current_ind]: + self._xFilterInf.pop(current_ind) + current_ind += 1 self._xFilterInf = self.non_dominated_sort(self._xFilterInf) # // And reinsert potential infeasible non dominated points into the set of infeasible # // solutions. - currentInd = 0 - isInXinf = [False] * len(self._xFilterInf) + current_ind = 0 - for evalPoint in self._xFilterInf: - if len(self._xInf) > 0 and self.findEvalPoint(self._xFilterInf, evalPoint)[1] == self._xInf[-1]: - currentIndTmp = 0 + for eval_point in self._xFilterInf: + if len(self._xInf) > 0 and self.findEvalPoint(self._xFilterInf, eval_point)[1] == self._xInf[-1]: + current_ind_tmp = 0 insert = True - for evalPointInf in self._xFilterInf: - if currentIndTmp != currentInd: - compFlag = evalPoint.__comMO__(evalPointInf, True) - if compFlag == COMPARE_TYPE.DOMINATED: + for eval_point_inf in self._xFilterInf: + if current_ind_tmp != current_ind: + comp_flag = eval_point.__comp_mo__(eval_point_inf, True) + if comp_flag == COMPARE_TYPE.DOMINATED: insert = False break - elif compFlag == COMPARE_TYPE.DOMINATING: - isInXInf[currentIndTmp] = False - currentIndTmp += 1 - isInXInf[currentInd] = insert - currentInd += 1 + elif comp_flag == COMPARE_TYPE.DOMINATING: + is_in_x_inf[current_ind_tmp] = False + current_ind_tmp += 1 + is_in_x_inf[current_ind] = insert + current_ind += 1 - for i in range(len(isInXInf)): - if isInXInf[i]: + for i in range(len(is_in_x_inf)): + if is_in_x_inf[i]: self._xInf.append(self._xFilterInf[i]) self._xInf = self.non_dominated_sort(self._xInf) - return - - def clearXFeas(self): self._xFeas.clear() @@ -454,13 +443,13 @@ def clearXInf(self): # Update the current incumbent inf. Only the infeasible one depends on XInf (not the case for the feasible one). self.updateCurrentIncumbentInf() - def computeSuccessType(self, eval1: CandidatePoint=None, eval2: CandidatePoint=None, hMax: int=np.inf): + def computeSuccessType(self, eval1: CandidatePoint=None, eval2: CandidatePoint=None, h_max: int=np.inf): """ """ success: SUCCESS_TYPES = SUCCESS_TYPES.US if eval1 is not None: if eval2 is None: h = eval1.h - if h > hMax or h == np.inf: + if h > h_max or h == np.inf: success = SUCCESS_TYPES.US else: if eval1.status == DESIGN_STATUS.FEASIBLE: @@ -475,77 +464,74 @@ def computeSuccessType(self, eval1: CandidatePoint=None, eval2: CandidatePoint=N elif eval1.status == DESIGN_STATUS.FEASIBLE and eval2.status == DESIGN_STATUS.FEASIBLE: success = SUCCESS_TYPES.US elif eval1.status != DESIGN_STATUS.FEASIBLE and eval2.status != DESIGN_STATUS.FEASIBLE: - if eval1.h <= hMax and eval1.h < eval2.h and eval1.f > eval2.f: + if eval1.h <= h_max and eval1.h < eval2.h and eval1.f > eval2.f: success = SUCCESS_TYPES.PS else: success = SUCCESS_TYPES.US return success - def defaultComputeSuccessType(self, evalPoint1: CandidatePoint, evalPoint2: CandidatePoint, hMax: float): + def defaultComputeSuccessType(self, eval_point1: CandidatePoint, eval_point2: CandidatePoint, h_max: float): success: SUCCESS_TYPES = SUCCESS_TYPES.US - if evalPoint1: - if evalPoint2: - h = evalPoint1.h - if h > hMax or h == np.inf: - # // Even if evalPoint2 is NULL, this case is still - # // not a success. - success = SUCCESS_TYPES.US - elif evalPoint1.status == DESIGN_STATUS.FEASIBLE: - success = SUCCESS_TYPES.FS - else: - success = self.defaultComputeSuccessType(evalPoint1, evalPoint2, hMax) + if eval_point1 and eval_point2: + h = eval_point1.h + if h > h_max or h == np.inf: + # // Even if evalPoint2 is NULL, this case is still + # // not a success. + success = SUCCESS_TYPES.US + elif eval_point1.status == DESIGN_STATUS.FEASIBLE: + success = SUCCESS_TYPES.FS + else: + success = self.defaultComputeSuccessType(eval_point1, eval_point2, h_max) return success - def getSuccessTypeOfPoints(self, xFeas: CandidatePoint, xInf: CandidatePoint): - successType = SUCCESS_TYPES.US - successType2 = SUCCESS_TYPES.US - newBestFeas: CandidatePoint = CandidatePoint() - newBestInf: CandidatePoint = CandidatePoint() + def getSuccessTypeOfPoints(self, x_feas: CandidatePoint = None, x_inf: CandidatePoint = None): + success_type = SUCCESS_TYPES.US + success_type2 = SUCCESS_TYPES.US if self._currentIncumbentFeas != None or self._currentIncumbentInf != None: if not self._currentIncumbentFeas: - successType = self.defaultComputeSuccessType(xFeas, self._currentIncumbentFeas, self._hMax) + success_type = self.defaultComputeSuccessType(x_feas, self._currentIncumbentFeas, self._h_max) if not self._currentIncumbentInf: - successType = self.defaultComputeSuccessType(xInf, self._currentIncumbentInf, self._hMax) - if successType2.value > successType.value: - successType = successType2 + success_type = self.defaultComputeSuccessType(x_inf, self._currentIncumbentInf, self._h_max) + if success_type2.value > success_type.value: + success_type = success_type2 - return successType + return success_type - def checkXFeasIsFeas(self, xFeas: CandidatePoint, evalType: DESIGN_STATUS): - if xFeas.evaluated and xFeas.status != DESIGN_STATUS.ERROR: - h = xFeas.h + def checkXFeasIsFeas(self, x_feas: CandidatePoint = None, eval_type: DESIGN_STATUS = None): + if x_feas.evaluated and x_feas.status != DESIGN_STATUS.ERROR: + h = x_feas.h if h != 0: raise IOError("Error: DMultiMadsBarrier: xFeas' h value must be 0.0") - if xFeas.fs.size != self._nobj: + if x_feas.fs.size != self._nobj: raise IOError("Error: DMultiMadsBarrier: xFeas' F must be of size") def getMeshMaxFrameSize(self, pt:CandidatePoint): - maxRealVal = -1.0 - maxIntegerVal = -1.0 + max_real_val = -1.0 + max_integer_val = -1.0 # Detect if mesh is sub dimension and pt are in full dimension. - meshIsInSubDimension = False + mesh_is_in_subdimension = False mesh = pt.mesh if pt.mesh._n < pt._n: - meshIsInSubDimension = True + mesh_is_in_subdimension = True shift = 0 for i in range(pt._n): # Do not use access the frame size for fixed variables. - if meshIsInSubDimension and self._fixedVariables.defined[i]: + if mesh_is_in_subdimension and self._fixedVariables.defined[i]: shift += 1 if self._bbInputsType[i] == VAR_TYPE.REAL: - maxRealVal = max(maxRealVal, mesh.getDeltaFrameSize(i-shift)) + max_real_val = max(max_real_val, mesh.getDeltaFrameSize(i-shift)) elif self._bbInputsType[i] == VAR_TYPE.INTEGER: - maxIntegerVal = max(maxIntegerVal, mesh.getDeltaFrameSize(i-shift)) - if maxRealVal > 0.0: - return maxRealVal # Some values are real: get norm inf on these values only. - elif maxIntegerVal > 0.0: - return maxIntegerVal # No real value but some integer values: get norm inf on these values only + max_integer_val = max(max_integer_val, mesh.getDeltaFrameSize(i-shift)) + if max_real_val > 0.0: + return max_real_val # Some values are real: get norm inf on these values only. + elif max_integer_val > 0.0: + return max_integer_val # No real value but some integer values: get norm inf on these values only else: return 1.0 # Only binary variables: any elements of the iterate list can be chosen @@ -559,36 +545,36 @@ def updateCurrentIncumbentFeas(self): self._currentIncumbentFeas = self._xFeas[0] return - maxFrameSizeFeasElts = -1.0 + max_frame_size_feas_elts = -1.0 # Set max frame size of all elements for xf in self._xFeas: - maxFrameSizeFeasElts = max(self.getMeshMaxFrameSize(xf), maxFrameSizeFeasElts) + max_frame_size_feas_elts = max(self.getMeshMaxFrameSize(xf), max_frame_size_feas_elts) # Select candidates - canBeFrameCenter: List[bool] = [False] * len(self._xFeas) - nbSelectedCandidates = 0 + can_be_frame_center: List[bool] = [False] * len(self._xFeas) + nb_selected_candidates = 0 # see article DMultiMads Algorithm 4. for i in range(len(self._xFeas)): - maxFrameSizeElt = self.getMeshMaxFrameSize(self._xFeas[i]) + max_frame_size_elt = self.getMeshMaxFrameSize(self._xFeas[i]) - if (10**(-float(self._incumbentSelectionParam)) * maxFrameSizeFeasElts) <= maxFrameSizeElt: - canBeFrameCenter[i] = True - nbSelectedCandidates += 1 + if (10**(-float(self._incumbentSelectionParam)) * max_frame_size_feas_elts) <= max_frame_size_elt: + can_be_frame_center[i] = True + nb_selected_candidates += 1 # Only one point in the barrier. - if (nbSelectedCandidates == 1): - for it in range(len(canBeFrameCenter)): - if canBeFrameCenter[it]: + if (nb_selected_candidates == 1): + for it in range(len(can_be_frame_center)): + if can_be_frame_center[it]: break - if it == len(canBeFrameCenter): + if it == len(can_be_frame_center): raise IOError("Error: DMultiMadsBarrier, should not reach this condition") else: - selectedInd = it - self._currentIncumbentFeas = self._xFeas[selectedInd] + selected_ind = it + self._currentIncumbentFeas = self._xFeas[selected_ind] # Only two points in the barrier. - elif ((nbSelectedCandidates == 2) and (len(self._xFeas) == 2)): + elif ((nb_selected_candidates == 2) and (len(self._xFeas) == 2)): eval1 = self._xFeas[0] eval2 = self._xFeas[1] @@ -604,9 +590,9 @@ def updateCurrentIncumbentFeas(self): else: # First case: biobjective optimization. Points are already ranked by lexicographic order. if self._nobj: - currentBestInd = 0 - maxGap = -1.0 - currentGap: float + current_best_ind = 0 + max_gap = -1.0 + current_gap: float for obj in range(self._nobj): # Get extreme values value according to one objective fmin: float = self._xFeas[0].fs[obj] @@ -617,45 +603,45 @@ def updateCurrentIncumbentFeas(self): # Intermediate points for i in range(1, self._xFeas-1): - currentGap = self._xFeas[i+1].fs[obj]-self._xFeas[i-1].fs[obj] + current_gap = self._xFeas[i+1].fs[obj]-self._xFeas[i-1].fs[obj] self._xFeas[i-1].fs[obj] - currentGap /= (fmax-fmin) - if (canBeFrameCenter[i] and currentGap >= maxGap): - maxGap = currentGap - currentBestInd = i + current_gap /= (fmax-fmin) + if (can_be_frame_center[i] and current_gap >= max_gap): + max_gap = current_gap + current_best_ind = i # Extreme points - currentGap = 2 * (self._xFeas[len(self._xFeas)-1]).fs[obj] - (self._xFeas[len(self._xFeas)-2]).fs[obj] - currentGap /= (fmax - fmin) - if canBeFrameCenter[len(self._xFeas)-1] and currentGap >= maxGap: - maxGap = currentGap - currentBestInd = len(self._xFeas)-1 + current_gap = 2 * (self._xFeas[len(self._xFeas)-1]).fs[obj] - (self._xFeas[len(self._xFeas)-2]).fs[obj] + current_gap /= (fmax - fmin) + if can_be_frame_center[len(self._xFeas)-1] and current_gap >= max_gap: + max_gap = current_gap + current_best_ind = len(self._xFeas)-1 - currentGap = 2 * (self._xFeas[1]).fs[obj] - (self._xFeas[0]).fs[obj] - currentGap /= (fmax -fmin) + current_gap = 2 * (self._xFeas[1]).fs[obj] - (self._xFeas[0]).fs[obj] + current_gap /= (fmax -fmin) - if canBeFrameCenter[0] and currentGap >= maxGap: - maxGap = currentGap - currentBestInd = 0 - self._currentIncumbentFeas = self._xFeas[currentBestInd] + if can_be_frame_center[0] and current_gap >= max_gap: + max_gap = current_gap + current_best_ind = 0 + self._currentIncumbentFeas = self._xFeas[current_best_ind] # // More than 2 objectives else: - tmpXFeasPInd: List[Tuple[CandidatePoint, int]] = [(CandidatePoint(), 0)]*len(self._xFeas) - for i in range(len(tmpXFeasPInd)): - tmpXFeasPInd[i] = (self._xFeas[i], i) - currentBestInd = 0 - maxGap = -1.0 - currentGap: float - + tmp_x_feas_p_ind: List[Tuple[CandidatePoint, int]] = [(CandidatePoint(), 0)]*len(self._xFeas) + for i in range(len(tmp_x_feas_p_ind)): + tmp_x_feas_p_ind[i] = (self._xFeas[i], i) + current_best_ind = 0 + max_gap = -1.0 + current_gap: float + for obj in range(self._nobj): # Sort elements of tmpXFeasPInd according to objective obj (in ascending order) - tmpXFeasPInd = sorted(tmpXFeasPInd, key=lambda x: x[0].fs[obj]) + tmp_x_feas_p_ind = sorted(tmp_x_feas_p_ind, key=lambda x: x[0].fs[obj]) # Get extreme values value according to one objective - fmin = tmpXFeasPInd[0][0].fs[obj] - fmax = tmpXFeasPInd[len(tmpXFeasPInd)-1][0].fs[obj] + fmin = tmp_x_feas_p_ind[0][0].fs[obj] + fmax = tmp_x_feas_p_ind[len(tmp_x_feas_p_ind)-1][0].fs[obj] # Can happen for exemple when we have several minima or for more than three objectives if fmin == fmax: @@ -663,139 +649,139 @@ def updateCurrentIncumbentFeas(self): fmax = 1. # Intermediate points - for i in range(1, len(tmpXFeasPInd)-1): - currentGap = tmpXFeasPInd[i+1][0].fs[obj]-tmpXFeasPInd[i-1][0].fs[obj] - currentGap /= (fmax - fmin) - if canBeFrameCenter[tmpXFeasPInd[i][1]] and currentGap >= maxGap: - maxGap = currentGap - currentBestInd = tmpXFeasPInd[i][1] + for i in range(1, len(tmp_x_feas_p_ind)-1): + current_gap = tmp_x_feas_p_ind[i+1][0].fs[obj]-tmp_x_feas_p_ind[i-1][0].fs[obj] + current_gap /= (fmax - fmin) + if can_be_frame_center[tmp_x_feas_p_ind[i][1]] and current_gap >= max_gap: + max_gap = current_gap + current_best_ind = tmp_x_feas_p_ind[i][1] # Extreme points - currentGap = 2*(tmpXFeasPInd[len(tmpXFeasPInd)-1][0].fs[obj]) - tmpXFeasPInd[len(tmpXFeasPInd)-2][0].fs[obj] - currentGap /= (fmax - fmin) + current_gap = 2*(tmp_x_feas_p_ind[len(tmp_x_feas_p_ind)-1][0].fs[obj]) - tmp_x_feas_p_ind[len(tmp_x_feas_p_ind)-2][0].fs[obj] + current_gap /= (fmax - fmin) - if (canBeFrameCenter[tmpXFeasPInd[len(tmpXFeasPInd)-1][1]] and currentGap > maxGap): - maxGap = currentGap - currentBestInd = tmpXFeasPInd[len(tmpXFeasPInd)-1][1] + if (can_be_frame_center[tmp_x_feas_p_ind[len(tmp_x_feas_p_ind)-1][1]] and current_gap > max_gap): + max_gap = current_gap + current_best_ind = tmp_x_feas_p_ind[len(tmp_x_feas_p_ind)-1][1] - currentGap = 2 * tmpXFeasPInd[1][0].fs[obj] - tmpXFeasPInd[0][0].fs[obj] - currentGap /= (fmax -fmin) + current_gap = 2 * tmp_x_feas_p_ind[1][0].fs[obj] - tmp_x_feas_p_ind[0][0].fs[obj] + current_gap /= (fmax -fmin) - if (canBeFrameCenter[tmpXFeasPInd[0][1]] and currentGap > maxGap): - maxGap = currentGap - currentBestInd = tmpXFeasPInd[0][1] - self._currentIncumbentFeas = self._xFeas[currentBestInd] + if (can_be_frame_center[tmp_x_feas_p_ind[0][1]] and current_gap > max_gap): + max_gap = current_gap + current_best_ind = tmp_x_feas_p_ind[0][1] + self._currentIncumbentFeas = self._xFeas[current_best_ind] def updateCurrentIncumbentInf(self): self._currentIncumbentInf = None if len(self._xFeas) > 0 and len(self._xInf) > 0: # // Get the infeasible solution with maximum dominance move below the _hMax threshold, # // according to the set of best feasible incumbent solutions. - currentInd = 0 - maxDomMove = -np.inf + current_ind = 0 + max_dom_move = -np.inf for j in range(len(self._xInf)): # // Compute dominance move # // = min \sum_{1}^m max(fi(y) - fi(x), 0) # // y \in Fk - tmpDomMove = np.inf - evalInf = self._xInf[j] - h = evalInf.h + tmp_dom_move = np.inf + eval_inf = self._xInf[j] + h = eval_inf.h - if h <= self._hMax: - for xFeas in self._xFeas: - sumVal = 0. - evalFeas = xFeas + if h <= self._h_max: + for x_feas in self._xFeas: + sum_val = 0. + eval_feas = x_feas for i in range(self._nobj): - sumVal += max(evalFeas.fs[i]-evalInf.fs[i], 0) - if tmpDomMove > sumVal: - tmpDomMove = sumVal + sum_val += max(eval_feas.fs[i]-eval_inf.fs[i], 0) + if tmp_dom_move > sum_val: + tmp_dom_move = sum_val # Get the maximum dominance move index - if maxDomMove < tmpDomMove: - maxDomMove = tmpDomMove - currentInd = j + if max_dom_move < tmp_dom_move: + max_dom_move = tmp_dom_move + current_ind = j # // In this case, all infeasible solutions are "dominated" in terms of fvalues # // by at least one element of Fk - if maxDomMove == 0.: + if np.isclose(max_dom_move, 0., rtol=1e-09, atol=1e-09): # // In this case, get the infeasible solution below the _hMax threshold which has # // minimal dominance move, when considered a maximization problem. - minDomMove = np.inf - currentInd = 0 + min_dom_move = np.inf + current_ind = 0 for j in range(len(self._xInf)): # // Compute dominance move # // = min \sum_{1}^m max(fi(x) - fi(y), 0) # // y \in Fk - tmpDomMove = np.inf - evalInf = self._xInf[j] - h = evalInf.h - if h<= self._hMax: - for xFeas in self._xFeas: - sumVal = 0. - evalFeas = xFeas + tmp_dom_move = np.inf + eval_inf = self._xInf[j] + h = eval_inf.h + if h<= self._h_max: + for x_feas in self._xFeas: + sum_val = 0. + eval_feas = x_feas # Compute \sum_{1}^m max (fi(x) - fi(y), 0) for i in range(self._nobj): - sumVal += max(evalInf.fs[i] - evalFeas.fs[i], 0.) - if tmpDomMove > sumVal: - tmpDomMove = sumVal + sum_val += max(eval_inf.fs[i] - eval_feas.fs[i], 0.) + if tmp_dom_move > sum_val: + tmp_dom_move = sum_val # Get the minimal dominance move index - if minDomMove > tmpDomMove: - minDomMove = tmpDomMove - currentInd = j - self._currentIncumbentInf = self._xInf[currentInd] + if min_dom_move > tmp_dom_move: + min_dom_move = tmp_dom_move + current_ind = j + self._currentIncumbentInf = self._xInf[current_ind] else: self._currentIncumbentInf = self.getFirstXIncInfNoXFeas() if len(self._xInf) > 0 else None def getXInfMinH(self): - indXInfMinH = 0 - hMinVal = np.inf + ind_x_inf_min_h = 0 + h_min_val = np.inf for i in range(len(self._xInf)): - eval = self._xInf[i] - h = eval.h + my_eval = self._xInf[i] + h = my_eval.h # // By definition, all elements of _xInf or _xFilterInf have a well-defined # // h value. So, no need to check. - if h max(np.abs(objv2.coordinates)): - xInf = self._xInf[0] + x_inf = self._xInf[0] else: - xInf = self._xInf[1] + x_inf = self._xInf[1] else: if self._nobj == 2: - currentBestInd = 0 - maxGap = -1. - currentGap: float + current_best_ind = 0 + max_gap = -1. + current_gap: float for obj in range(self._nobj): # Get extreme values value according to one objective @@ -823,43 +809,43 @@ def getFirstXIncInfNoXFeas(self): # Intermediate points for i in range(1, self._xInf-1): - currentGap = self._xInf[i+1].fs[obj]-self._xInf[i-1].fs[obj] + current_gap = self._xInf[i+1].fs[obj]-self._xInf[i-1].fs[obj] self._xInf[i-1].fs[obj] - currentGap /= (fmax-fmin) - if (canBeFrameCenter[i] and currentGap >= maxGap): - maxGap = currentGap - currentBestInd = i + current_gap /= (fmax-fmin) + if (can_be_frame_center[i] and current_gap >= max_gap): + max_gap = current_gap + current_best_ind = i # Extreme points - currentGap = 2 * (self._xInf[len(self._xInf)-1]).fs[obj] - (self._xInf[len(self._xInf)-2]).fs[obj] - currentGap /= (fmax - fmin) - if canBeFrameCenter[len(self._xInf)-1] and currentGap > maxGap: - maxGap = currentGap - currentBestInd = len(self._xInf)-1 + current_gap = 2 * (self._xInf[len(self._xInf)-1]).fs[obj] - (self._xInf[len(self._xInf)-2]).fs[obj] + current_gap /= (fmax - fmin) + if can_be_frame_center[len(self._xInf)-1] and current_gap > max_gap: + max_gap = current_gap + current_best_ind = len(self._xInf)-1 - currentGap = 2 * (self._xInf[1]).fs[obj] - (self._xInf[0]).fs[obj] - currentGap /= (fmax -fmin) + current_gap = 2 * (self._xInf[1]).fs[obj] - (self._xInf[0]).fs[obj] + current_gap /= (fmax -fmin) - if canBeFrameCenter[0] and currentGap > maxGap: - maxGap = currentGap - currentBestInd = 0 - xInf = self._xInf[currentBestInd] + if can_be_frame_center[0] and current_gap > max_gap: + max_gap = current_gap + current_best_ind = 0 + x_inf = self._xInf[current_best_ind] # // More than 2 objectives else: - tmpXInfPInd: List[Tuple[CandidatePoint, int]] = [(CandidatePoint(), 0)]*len(self._xInf) - for i in range(len(tmpXInfPInd)): - tmpXInfPInd[i] = (self._xInf[i], i) - currentBestInd = 0 - maxGap = -1.0 - currentGap: float + tmp_x_inf_p_ind: List[Tuple[CandidatePoint, int]] = [(CandidatePoint(), 0)]*len(self._xInf) + for i in range(len(tmp_x_inf_p_ind)): + tmp_x_inf_p_ind[i] = (self._xInf[i], i) + current_best_ind = 0 + max_gap = -1.0 + current_gap: float for obj in range(self._nobj): # Sort elements of tmpXFeasPInd according to objective obj (in ascending order) - tmpXInfPInd = sorted(tmpXInfPInd, key=lambda x: x[0].fs[obj]) + tmp_x_inf_p_ind = sorted(tmp_x_inf_p_ind, key=lambda x: x[0].fs[obj]) # Get extreme values value according to one objective - fmin = tmpXInfPInd[0][0].fs[obj] - fmax = tmpXInfPInd[len(tmpXInfPInd)-1][0].fs[obj] + fmin = tmp_x_inf_p_ind[0][0].fs[obj] + fmax = tmp_x_inf_p_ind[len(tmp_x_inf_p_ind)-1][0].fs[obj] # Can happen for exemple when we have several minima or for more than three objectives if fmin == fmax: @@ -867,39 +853,39 @@ def getFirstXIncInfNoXFeas(self): fmax = 1. # Intermediate points - for i in range(1, len(tmpXInfPInd)-1): - currentGap = tmpXInfPInd[i+1][0].fs[obj]-tmpXInfPInd[i-1][0].fs[obj] - currentGap /= (fmax - fmin) - if canBeFrameCenter[tmpXInfPInd[i][1]] and currentGap >= maxGap: - maxGap = currentGap - currentBestInd = tmpXInfPInd[i][1] + for i in range(1, len(tmp_x_inf_p_ind)-1): + current_gap = tmp_x_inf_p_ind[i+1][0].fs[obj]-tmp_x_inf_p_ind[i-1][0].fs[obj] + current_gap /= (fmax - fmin) + if can_be_frame_center[tmp_x_inf_p_ind[i][1]] and current_gap >= max_gap: + max_gap = current_gap + current_best_ind = tmp_x_inf_p_ind[i][1] # Extreme points - currentGap = 2*(tmpXInfPInd[len(tmpXInfPInd)-1][0].fs[obj]) - tmpXInfPInd[len(tmpXInfPInd)-2][0].fs[obj] - currentGap /= (fmax - fmin) + current_gap = 2*(tmp_x_inf_p_ind[len(tmp_x_inf_p_ind)-1][0].fs[obj]) - tmp_x_inf_p_ind[len(tmp_x_inf_p_ind)-2][0].fs[obj] + current_gap /= (fmax - fmin) - if (canBeFrameCenter[tmpXInfPInd[len(tmpXInfPInd)-1][1]] and currentGap > maxGap): - maxGap = currentGap - currentBestInd = tmpXInfPInd[len(tmpXInfPInd)-1][1] + if (can_be_frame_center[tmp_x_inf_p_ind[len(tmp_x_inf_p_ind)-1][1]] and current_gap > max_gap): + max_gap = current_gap + current_best_ind = tmp_x_inf_p_ind[len(tmp_x_inf_p_ind)-1][1] - currentGap = 2 * tmpXInfPInd[1][0].fs[obj] - tmpXInfPInd[0][0].fs[obj] - currentGap /= (fmax -fmin) + current_gap = 2 * tmp_x_inf_p_ind[1][0].fs[obj] - tmp_x_inf_p_ind[0][0].fs[obj] + current_gap /= (fmax -fmin) - if (canBeFrameCenter[tmpXInfPInd[0][1]] and currentGap > maxGap): - maxGap = currentGap - currentBestInd = tmpXInfPInd[0][1] - xInf = self._xInf[currentBestInd] + if (can_be_frame_center[tmp_x_inf_p_ind[0][1]] and current_gap > max_gap): + max_gap = current_gap + current_best_ind = tmp_x_inf_p_ind[0][1] + x_inf = self._xInf[current_best_ind] - return xInf + return x_inf - def updateInfWithPoint(self, evalPoint: CandidatePoint = None, evalType: EVAL_TYPE = None, keepAllPoints: bool = None, feasHasBeenUpdated: bool = False): + def updateInfWithPoint(self, eval_point: CandidatePoint = None, keep_all_points: bool = None): updated = False - if evalPoint.evaluated and evalPoint.status != DESIGN_STATUS.FEASIBLE: + if eval_point.evaluated and eval_point.status != DESIGN_STATUS.FEASIBLE: s: str - h = evalPoint.h + h = eval_point.h - if h == np.inf or (self._hMax < np.inf and h > self._hMax): + if h == np.inf or (self._h_max < np.inf and h > self._h_max): return False else: self.setHMax(h) @@ -911,40 +897,40 @@ def updateInfWithPoint(self, evalPoint: CandidatePoint = None, evalType: EVAL_TY self._xFilterInf = [] if len(self._xInf) <= 0: - self._xInf.append(evalPoint) - self._xFilterInf.append(evalPoint) + self._xInf.append(eval_point) + self._xFilterInf.append(eval_point) self._currentIncumbentInf = self._xInf[0] updated = True else: insert = True - isInXinfFilter: List[bool] = [True] * len(self._xFilterInf) - currentInd = 0 - for xFilterInf in self._xFilterInf: - compFlag = evalPoint.__comMO__(xFilterInf) - if compFlag == COMPARE_TYPE.DOMINATED: + is_in_x_inf_filter: List[bool] = [True] * len(self._xFilterInf) + current_ind = 0 + for x_filter_inf in self._xFilterInf: + comp_flag = eval_point.__comp_mo__(x_filter_inf) + if comp_flag == COMPARE_TYPE.DOMINATED: insert = False break - elif compFlag == COMPARE_TYPE.DOMINATING: + elif comp_flag == COMPARE_TYPE.DOMINATING: updated = True - isInXinfFilter[currentInd] = False - elif compFlag == COMPARE_TYPE.EQUAL: - if (not keepAllPoints): + is_in_x_inf_filter[current_ind] = False + elif comp_flag == COMPARE_TYPE.EQUAL: + if (not keep_all_points): insert = False break - if self.findEvalPoint(self._xFilterInf, evalPoint)[0]: + if self.findEvalPoint(self._xFilterInf, eval_point)[0]: insert = False else: updated = True break - currentInd += 1 + current_ind += 1 if insert: indices_to_remove = [] for i in range(len(self._xFilterInf)): - if not isInXinfFilter[i]: + if not is_in_x_inf_filter[i]: indices_to_remove.append(i) - self._xFilterInf.append(evalPoint) + self._xFilterInf.append(eval_point) for index in sorted(indices_to_remove, reverse=True): del self._xFilterInf[index] @@ -952,26 +938,26 @@ def updateInfWithPoint(self, evalPoint: CandidatePoint = None, evalType: EVAL_TY self._xFilterInf = self.non_dominated_sort(self._xFilterInf) insert = True - currentInd = 0 - isInXinf = [True * self._xInf] + current_ind = 0 + is_in_x_inf = [True * self._xInf] - for xInf in self._xInf: - compFlag = evalPoint.__comMO__(xInf, True) - if compFlag == COMPARE_TYPE.DOMINATED: + for x_inf in self._xInf: + comp_flag = eval_point.__comp_mo__(x_inf, True) + if comp_flag == COMPARE_TYPE.DOMINATED: insert = False break - elif compFlag == COMPARE_TYPE.DOMINATING or evalPoint.__comMO__(xInf): + elif comp_flag == COMPARE_TYPE.DOMINATING or eval_point.__comp_mo__(x_inf): updated = True - isInXinf[currentInd] = False - currentInd += 1 + is_in_x_inf[current_ind] = False + current_ind += 1 if insert: indices_to_remove = [] for i in range(len(self._xInf)): - if not isInXinf[i]: + if not is_in_x_inf[i]: indices_to_remove.append(i) updated = True - self._xInf.append(evalPoint) + self._xInf.append(eval_point) for index in sorted(indices_to_remove, reverse=True): del self._xInf[index] @@ -991,7 +977,7 @@ def non_dominated_sort(self, points: List[CandidatePoint] = None): for i, p in enumerate(points): for j, q in enumerate(points): - if i != j and p.__comMO__(q) == COMPARE_TYPE.DOMINATED: + if i != j and p.__comp_mo__(q) == COMPARE_TYPE.DOMINATED: dominated_count[i] += 1 if dominated_count[i] == 0: @@ -1006,55 +992,55 @@ def non_dominated_sort(self, points: List[CandidatePoint] = None): return sorted_points - def updateFeasWithPoint(self, evalPoint: CandidatePoint = None, evalType: EVAL_TYPE = None, keepAllPoints: bool = None): + def updateFeasWithPoint(self, eval_point: CandidatePoint = None, keep_all_points: bool = None): updated = False - if evalPoint.evaluated and evalPoint.status == DESIGN_STATUS.FEASIBLE: - if evalPoint.fs.size != self._nobj: - raise IOError(f"Barrier update: number of objectives is equal to {self._nobj}. Trying to add this point with number of objectives {evalPoint.fs.size}") + if eval_point.evaluated and eval_point.status == DESIGN_STATUS.FEASIBLE: + if eval_point.fs.size != self._nobj: + raise IOError(f"Barrier update: number of objectives is equal to {self._nobj}. Trying to add this point with number of objectives {eval_point.fs.size}") if self._xFeas is None: self._xFeas = [] if len(self._xFeas) == 0: - self._xFeas.append(evalPoint) + self._xFeas.append(eval_point) updated = True self._currentIncumbentFeas = self._xFeas[0] else: insert = True - keepInXFeas = [True] * len(self._xFeas) - currentInd = 0 + keep_in_x_feas = [True] * len(self._xFeas) + current_ind = 0 for xf in self._xFeas: - compFlag: COMPARE_TYPE = evalPoint.__comMO__(xf) - if compFlag == COMPARE_TYPE.DOMINATED: + comp_flag: COMPARE_TYPE = eval_point.__comp_mo__(xf) + if comp_flag == COMPARE_TYPE.DOMINATED: insert = False break - elif compFlag == COMPARE_TYPE.DOMINATING: + elif comp_flag == COMPARE_TYPE.DOMINATING: updated = True - keepInXFeas[currentInd] = False - elif compFlag == COMPARE_TYPE.EQUAL: - if not keepAllPoints: + keep_in_x_feas[current_ind] = False + elif comp_flag == COMPARE_TYPE.EQUAL: + if not keep_all_points: insert = False break - if self.findEvalPoint(self._xFeas, evalPoint)[0]: + if self.findEvalPoint(self._xFeas, eval_point)[0]: insert = False else: updated = True break - currentInd += 1 + current_ind += 1 if insert: - currentInd = 0 + current_ind = 0 for cp in self._xFeas: - if cp.__comMO__(evalPoint) == COMPARE_TYPE.DOMINATED: - self._xFeas.pop(currentInd) - currentInd += 1 + if cp.__comp_mo__(eval_point) == COMPARE_TYPE.DOMINATED: + self._xFeas.pop(current_ind) + current_ind += 1 updated = True - dir = copy.deepcopy(evalPoint.direction) - if dir is not None: - evalPoint.mesh.enlargeDeltaFrameSize(direction=dir) + my_dir = copy.deepcopy(eval_point.direction) + if my_dir is not None: + eval_point.mesh.enlargeDeltaFrameSize(direction=my_dir) - self._xFeas.append(evalPoint) + self._xFeas.append(eval_point) # Sort according to lexicographic order. self._xFeas = self.non_dominated_sort(self._xFeas) diff --git a/src/OMADS/Cache.py b/src/OMADS/Cache.py index 8309e9a..a9d625b 100644 --- a/src/OMADS/Cache.py +++ b/src/OMADS/Cache.py @@ -1,20 +1,20 @@ import copy from dataclasses import dataclass, field import operator -from typing import List, Dict, Any +from typing import List, Dict, Any, Optional import numpy as np from .CandidatePoint import CandidatePoint -from ._globals import * +from ._globals import DESIGN_STATUS @dataclass class Cache: """ In computing, a hash table (hash map) is a data structure that implements an associative array abstract data type, a structure that can map keys to values. A hash table uses a hash function to compute an index, also called a hash code, into an array of buckets or slots, from which the desired value can be found. During lookup, the key is hashed and the resulting hash indicates where the corresponding value is stored.""" - _hash_ID: List[int] = field(default_factory=list) - _best_hash_ID: List[int] = field(default_factory=list) + _hash_id: List[int] = field(default_factory=list) + _best_hash_id: List[int] = field(default_factory=list) _cache_dict: Dict[Any, Any] = field(default_factory=lambda: {}) _n_dim: int = 0 - _isPareto: bool = False - ND_points: List[CandidatePoint] = None + _is_pareto: bool = False + nd_points: Optional[List[CandidatePoint]] = None @property def cache_dict(self)->Dict: @@ -30,24 +30,24 @@ def hash_id(self)->List[int]: :rtype: List[int] """ - return self._hash_ID + return self._hash_id @hash_id.setter def hash_id(self, other: CandidatePoint): - if hash(tuple(other.coordinates)) not in self._hash_ID: - self._hash_ID.append(hash(tuple(other.coordinates))) + if hash(tuple(other.coordinates)) not in self._hash_id: + self._hash_id.append(hash(tuple(other.coordinates))) @property - def best_hash_ID(self)->List[int]: + def best_hash_id(self)->List[int]: """A getter to return the list of hash IDs :rtype: List[int] """ - return self._best_hash_ID + return self._best_hash_id - @best_hash_ID.setter - def best_hash_ID(self, id: int): - self._best_hash_ID.append(id) + @best_hash_id.setter + def best_hash_id(self, id: int): + self._best_hash_id.append(id) @property def size(self)->int: @@ -93,53 +93,48 @@ def add_to_cache(self, x: CandidatePoint): if not isinstance(x, list): hash_value: int = hash(tuple(x.coordinates)) self._cache_dict[hash_value] = x - self._hash_ID.append(hash(tuple(x.coordinates))) + self._hash_id.append(hash(tuple(x.coordinates))) else: for i in range(len(x)): hash_value: int = hash(tuple(x[i].coordinates)) self._cache_dict[hash_value] = x[i] - self._hash_ID.append(hash(tuple(x[i].coordinates))) + self._hash_id.append(hash(tuple(x[i].coordinates))) def add_to_best_cache(self, x: CandidatePoint): - if not self._isPareto: - if x.signature in self._best_hash_ID: + if not self._is_pareto: + if x.signature in self._best_hash_id: return - if len(self._best_hash_ID) <= 0 and len(self._cache_dict) >= 1: - self._best_hash_ID.append(list(self.cache_dict.keys())[0]) + if len(self._best_hash_id) <= 0 and len(self._cache_dict) >= 1: + self._best_hash_id.append(list(self.cache_dict.keys())[0]) if not isinstance(x, list): if len(self._cache_dict) > 1: - is_infeas_dom: bool = (x.status == DESIGN_STATUS.INFEASIBLE and (x.h < self._cache_dict[self._best_hash_ID[-1]].h) ) - is_feas_dom: bool = (x.status == DESIGN_STATUS.FEASIBLE and x.fobj < self._cache_dict[self._best_hash_ID[-1]].fobj) + is_infeas_dom: bool = (x.status == DESIGN_STATUS.INFEASIBLE and (x.h < self._cache_dict[self._best_hash_id[-1]].h) ) + is_feas_dom: bool = (x.status == DESIGN_STATUS.FEASIBLE and x.fobj < self._cache_dict[self._best_hash_id[-1]].fobj) else: is_infeas_dom: bool = False is_feas_dom: bool = False if is_infeas_dom or is_feas_dom: self._n_dim = len(x.coordinates) - self._best_hash_ID.append(x.signature) + self._best_hash_id.append(x.signature) else: for i in range(len(x)): - is_infeas_dom: bool = (x[i].status == DESIGN_STATUS.INFEASIBLE and (x[i].h < self._cache_dict[self._best_hash_ID[0]].h) ) - is_feas_dom: bool = (x[i].status == DESIGN_STATUS.FEASIBLE and x[i].fobj < self._cache_dict[self._best_hash_ID[0]].fobj) + is_infeas_dom: bool = (x[i].status == DESIGN_STATUS.INFEASIBLE and (x[i].h < self._cache_dict[self._best_hash_id[0]].h) ) + is_feas_dom: bool = (x[i].status == DESIGN_STATUS.FEASIBLE and x[i].fobj < self._cache_dict[self._best_hash_id[0]].fobj) if len(self._cache_dict) == 1 or is_infeas_dom or is_feas_dom: self._n_dim = len(x[i].coordinates) - self._best_hash_ID.append(self._hash_ID[-1]) + self._best_hash_id.append(self._hash_id[-1]) else: - self.ND_points = copy.deepcopy(x) - self._best_hash_ID = [] - for i in range(len(self.ND_points)): - self._best_hash_ID.append(self.ND_points[i].signature) + self.nd_points = copy.deepcopy(x) + self._best_hash_id = [] + for i in range(len(self.nd_points)): + self._best_hash_id.append(self.nd_points[i].signature) def get_best_cache_points(self, nsamples): """ Get best points """ temp = np.zeros((nsamples, self._n_dim)) index = 0 - if not self._isPareto: - - # for i in range(len(self._best_hash_ID)-1, len(self._best_hash_ID) - nsamples, -1): - # temp[index, :] = self._cache_dict[self._best_hash_ID[i]].coordinates - # index += 1 - + if not self._is_pareto: cache_temp = dict(sorted(self._cache_dict.items(), key=operator.itemgetter(1))) for k in cache_temp: @@ -149,20 +144,20 @@ def get_best_cache_points(self, nsamples): else: break else: - for k in self.ND_points: - if index < len(temp): - temp[index, :] = k.coordinates - index += 1 - else: - break + for k in self.nd_points: + # if index < len(temp): + temp[index, :] = k.coordinates + index += 1 + # else: + # break return temp def get_cache_points(self): """ Get best points """ - temp = np.zeros((len(self._hash_ID)-1, self._n_dim)) - for i in range(1, len(self._hash_ID)): - temp[i-1, :] = self._cache_dict[self._hash_ID[i]].coordinates + temp = np.zeros((len(self._hash_id)-1, self._n_dim)) + for i in range(1, len(self._hash_id)): + temp[i-1, :] = self._cache_dict[self._hash_id[i]].coordinates return temp def get_point(self, key): diff --git a/src/OMADS/CandidatePoint.py b/src/OMADS/CandidatePoint.py index c1838b8..b2fe7b7 100644 --- a/src/OMADS/CandidatePoint.py +++ b/src/OMADS/CandidatePoint.py @@ -26,7 +26,7 @@ from typing import List, Dict, Any, Optional from numpy import sum, subtract, add, maximum, power, inf import numpy as np -from ._globals import * +from ._globals import DType, BARRIER_TYPES, MPP, DESIGN_STATUS, COMPARE_TYPE from .Gmesh import Gmesh from .Point import Point @@ -67,42 +67,42 @@ class CandidatePoint: # hash signature, in the cache memory _signature: int = 0 # numpy double data type precision - _dtype: DType = None + _dtype: Optional[DType] = None # Variables type - _var_type: List[int] = None + _var_type: Optional[List[int]] = None # Discrete set - _sets: Dict = None + _sets: Optional[Dict] = None - _var_link: List[str] = None + _var_link: Optional[List[str]] = None _status: DESIGN_STATUS = DESIGN_STATUS.UNEVALUATED - _constraints_type: List[BARRIER_TYPES] = None + _constraints_type: Optional[List[BARRIER_TYPES]] = None _is_EB_passed: bool = False - _LAMBDA: List[float] = None + _LAMBDA: Optional[List[float]] = None _RHO: float = MPP.RHO.value _hmax: float = 1. _hmin: float = inf - Eval_time: float = 0. + eval_time: float = 0. source: str = "Current run" - Model: str = "Simulation" + model: str = "Simulation" - _hzero: float = None + _hzero: Optional[float] = None - _mesh: Gmesh = None + _mesh: Optional[Gmesh] = None - _direction: Point = None + _direction: Optional[Point] = None - _fs: Point = None + _fs: Optional[Point] = None - evalNo: int = 0 + eval_no: int = 0 def __post_init__(self): self._dtype = DType() @@ -147,30 +147,30 @@ def hzero(self, value: Any) -> Any: @property - def hmax(self) -> float: - if self._hmax == 0.: + def h_max(self) -> float: + if np.isclose(self._hmax, 0., rtol=1e-09, atol=1e-09): return self._dtype.zero return self._hmax - @hmax.setter - def hmax(self, value: float): + @h_max.setter + def h_max(self, value: float): self._hmax = value @property - def RHO(self) -> float: + def rho(self) -> float: return self._RHO - @RHO.setter - def RHO(self, value: float): + @rho.setter + def rho(self, value: float): self._RHO = value @property - def LAMBDA(self) -> float: + def lambda_multipliers(self) -> float: return self._LAMBDA - @LAMBDA.setter - def LAMBDA(self, value: float): + @lambda_multipliers.setter + def lambda_multipliers(self, value: float): self._LAMBDA = value @@ -305,16 +305,11 @@ def f(self): return self._f @f.setter - def f(self, val: auto): + def f(self, val: Any): if isinstance(val, list): self._f = val else: self._f = [val] - # if self.fs is None or self.fs.size <= 0: - # self.fs = Point(len(self.f)) - # self.fs.coordinates = self._f - # else: - # self.fs.coordinates = self.f @f.deleter def f(self): @@ -325,7 +320,7 @@ def fobj(self): return self._freal @fobj.setter - def fobj(self, other: auto): + def fobj(self, other: Any): if isinstance(other, list): self._freal = other else: @@ -394,8 +389,8 @@ def __eq__(self, other) -> bool: and self.f is other.f and self.h is other.h def __lt__(self, other): - return (other.h > (self.hmax if self._is_EB_passed else self._dtype.zero) > self.__dh__(other=other)) or \ - (((self.hmax if self._is_EB_passed else self._dtype.zero) >= self.h >= 0.0) and + return (other.h > (self.h_max if self._is_EB_passed else self._dtype.zero) > self.__dh__(other=other)) or \ + (((self.h_max if self._is_EB_passed else self._dtype.zero) >= self.h >= 0.0) and max(self.__df__(other=other)) < 0) def __le__(self, other): @@ -440,14 +435,14 @@ def __eval__(self, bb_output): self.c_ineq = [self.c_ineq] self.evaluated = True """ Check the multiplier matrix """ - if self.LAMBDA is None: - self.LAMBDA = [] + if self.lambda_multipliers is None: + self.lambda_multipliers = [] for _ in range(len(self.c_ineq)): - self.LAMBDA.append(MPP.LAMBDA.value) + self.lambda_multipliers.append(MPP.LAMBDA.value) else: - if len(self.c_ineq) != len(self.LAMBDA): - for _ in range(len(self.LAMBDA), len(self.c_ineq)): - self.LAMBDA.append(MPP.LAMBDA.value) + if len(self.c_ineq) != len(self.lambda_multipliers): + for _ in range(len(self.lambda_multipliers), len(self.c_ineq)): + self.lambda_multipliers.append(MPP.LAMBDA.value) """ Check and adapt the barriers matrix""" if self.constraints_type is not None: if len(self.c_ineq) != len(self.constraints_type): @@ -486,8 +481,8 @@ def __eval__(self, bb_output): if hPB > self.hzero: self.status = DESIGN_STATUS.INFEASIBLE self.h = copy.deepcopy(hPB) - if hPB < self.hmax: - self.hmax = copy.deepcopy(hPB) + if hPB < self.h_max: + self.h_max = copy.deepcopy(hPB) else: self.is_EB_passed = False self.status = DESIGN_STATUS.INFEASIBLE @@ -495,31 +490,28 @@ def __eval__(self, bb_output): self.__penalize__(extreme= True) return """ Aggregate all constraints """ - # self.h = sum(power(maximum(self.c_ineq, self._dtype.zero, - # dtype=self._dtype.dtype), 2, dtype=self._dtype.dtype)) if np.isnan(self.h) or np.any(np.isnan(self.c_ineq)): self.h = inf self.status = DESIGN_STATUS.ERROR """ Penalize relaxable constraints violation """ if any(np.isnan(self.f)) or self.h > self.hzero: - if self.h > np.round(self.hmax, 2): + if self.h > np.round(self.h_max, 2): self.__penalize__(extreme=False) self.status = DESIGN_STATUS.INFEASIBLE else: - self.hmax = copy.deepcopy(self.h) + self.h_max = copy.deepcopy(self.h) self.status = DESIGN_STATUS.FEASIBLE def __penalize__(self, extreme: bool=True): - if len(self.cPB) > len(self.LAMBDA): - self.LAMBDA += [self.LAMBDA[-1]] * abs(len(self.LAMBDA)-len(self.cPB)) - if 0 < len(self.cPB) < len(self.LAMBDA): - del self.LAMBDA[len(self.cPB):] + if len(self.cPB) > len(self.lambda_multipliers): + self.lambda_multipliers += [self.lambda_multipliers[-1]] * abs(len(self.lambda_multipliers)-len(self.cPB)) + if 0 < len(self.cPB) < len(self.lambda_multipliers): + del self.lambda_multipliers[len(self.cPB):] if extreme: - # self.f = [inf]*len(self.f) self.hmin = inf else: - self.hmin = np.dot(self.LAMBDA, self.cPB) + ((1/(2*self.RHO)) * self.h if self.RHO > 0. else np.inf) + self.hmin = np.dot(self.lambda_multipliers, self.cPB) + ((1/(2*self.rho)) * self.h if self.rho > 0. else np.inf) self.f = [self.fobj[i] * (1./len(self.fobj)) + self.hmin for i in range(len(self.fobj))] def __is_duplicate__(self, other) -> bool: @@ -537,15 +529,15 @@ def __df__(self, other): def __dh__(self, other): return subtract(self.h, other.h, dtype=self._dtype.dtype) - def __comMO__(self, other, onlyfvalues: bool = False): - compareFlag: COMPARE_TYPE = COMPARE_TYPE.UNDEFINED + def __comp_mo__(self, other, onlyfvalues: bool = False): + compare_flag: COMPARE_TYPE = COMPARE_TYPE.UNDEFINED f1 = self.fs h1 = self.h f2 = other.fs h2 = other.h if f1.size != f2.size: - return compareFlag + return compare_flag # // The comparison code has been adapted from # // Jaszkiewicz, A., & Lust, T. (2018). @@ -563,9 +555,9 @@ def __comMO__(self, other, onlyfvalues: bool = False): if isworse and isbetter: break if isworse: - compareFlag = COMPARE_TYPE.INDIFFERENT if isbetter else COMPARE_TYPE.DOMINATED + compare_flag = COMPARE_TYPE.INDIFFERENT if isbetter else COMPARE_TYPE.DOMINATED else: - compareFlag = COMPARE_TYPE.DOMINATING if isbetter else COMPARE_TYPE.EQUAL + compare_flag = COMPARE_TYPE.DOMINATING if isbetter else COMPARE_TYPE.EQUAL elif (self.status != DESIGN_STATUS.FEASIBLE and other.status != DESIGN_STATUS.FEASIBLE): if h1 != np.inf: isbetter = False @@ -583,9 +575,9 @@ def __comMO__(self, other, onlyfvalues: bool = False): if h2 < h1: isworse = True if isworse: - compareFlag = COMPARE_TYPE.INDIFFERENT if isbetter else COMPARE_TYPE.DOMINATED + compare_flag = COMPARE_TYPE.INDIFFERENT if isbetter else COMPARE_TYPE.DOMINATED else: - compareFlag = COMPARE_TYPE.DOMINATING if isbetter else COMPARE_TYPE.EQUAL + compare_flag = COMPARE_TYPE.DOMINATING if isbetter else COMPARE_TYPE.EQUAL - return compareFlag + return compare_flag diff --git a/src/OMADS/Directions.py b/src/OMADS/Directions.py index f36e0d1..15b1297 100644 --- a/src/OMADS/Directions.py +++ b/src/OMADS/Directions.py @@ -23,19 +23,13 @@ import copy -import time from .CandidatePoint import CandidatePoint from .Point import Point -# from .Barriers import Barrier, BarrierMO, BarrierBase -from ._common import logger -from dataclasses import dataclass, field -from typing import List, Dict, Any -from .Gmesh import Gmesh -from .Cache import Cache -from .Evaluator import Evaluator -from ._globals import * -from .Optimizer import GenericSamplerBase, ConstraintsRelaxationParameters - +from dataclasses import dataclass +from typing import List, Dict +from ._globals import DType, VAR_TYPE, BARRIER_TYPES, SUCCESS_TYPES, DESIGN_STATUS, MSG_TYPE +from .Optimizer import GenericSamplerBase +import numpy as np @dataclass @@ -256,7 +250,7 @@ def generate_dir(self): def ran(self): return np.random.random(self._n).astype(dtype=self._dtype.dtype) - def create_housholder(self, is_rich: bool, domain: List[int] = None, is_oneDir: bool=False) -> np.ndarray: + def create_housholder(self, is_rich: bool, domain: List[int] = None, is_one_dir: bool=False) -> np.ndarray: """Create householder matrix :param is_rich: A flag that indicates if the rich direction option is enabled @@ -301,7 +295,7 @@ def create_housholder(self, is_rich: bool, domain: List[int] = None, is_oneDir: if domain[j] != VAR_TYPE.REAL: hhm[i][j] = int(np.floor(-1 + 2**self.mesh.getdeltaMeshSize().coordinates[i])) - if is_oneDir: + if is_one_dir: return hhm else: hhm = np.vstack((hhm, -hhm)) @@ -324,7 +318,6 @@ def create_poll_set(self, hhm: np.ndarray, ub: List[float], lb: List[float], it: del self.poll_set del self.poll_dirs if is_prim: - # del self.poll_set temp = np.add(hhm, np.array(self.xmin.coordinates), dtype=self._dtype.dtype) else: temp = np.add(hhm, np.array(self.x_sc.coordinates), dtype=self._dtype.dtype) @@ -381,8 +374,6 @@ def scale(self, ub: List[float], lb: List[float], factor: float = 10.0): def directional_scaling(self, p: CandidatePoint, npts: int = 5) -> List[CandidatePoint]: lb = self.lb ub = self.ub - # np.random.seed(self.seed) - # scaling = [self.mesh.msize, 2*self.mesh.msize] scaling = self.mesh.getdeltaMeshSize().coordinates p_trials: List[CandidatePoint] = [0]*len(scaling) for k in range(len(scaling)): @@ -402,7 +393,6 @@ def gauss_perturbation(self, p: CandidatePoint, npts: int = 5) -> List[Candidate # np.random.seed(self.seed) cs = np.zeros((npts, p.n_dimensions)) pts: List[CandidatePoint] = [0] * npts - mp = 1. for k in range(p.n_dimensions): if p.var_type[k] == VAR_TYPE.REAL: cs[:, k] = np.random.normal(loc=p.coordinates[k], scale=self.mesh.getdeltaMeshSize().coordinates[k], size=(npts,)) @@ -422,141 +412,14 @@ def gauss_perturbation(self, p: CandidatePoint, npts: int = 5) -> List[Candidate return pts -# Deprecated evaluation routine - # def evaluate_candidate_point(self, index: int): - # """ Evaluate the point i on the poll set """ - # """ Set the dynamic index for this point """ - # tic = time.perf_counter() - # self.point_index = index - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg=f"Evaluate poll point # {index}...", msg_type=MSG_TYPE.INFO) - # """ Initialize stopping and success conditions""" - # stop: bool = False - # """ Copy the point i to a trial one """ - # xtry: CandidatePoint = self.poll_set[index] - # """ This is a success bool parameter used for - # filtering out successful designs to be printed - # in the output results file""" - # success = SUCCESS_TYPES.US - - # """ Check the cache memory; check if the trial point - # is a duplicate (it has already been evaluated) """ - # unique_p_trials: int = 0 - # is_duplicate: bool = (self.check_cache and self.hashtable.size > 0 and self.hashtable.is_duplicate(xtry)) - # while is_duplicate and unique_p_trials < 5: - # if self.display: - # print(f'Cache hit. Trial# {unique_p_trials}: Looking for a non-duplicate along the poll direction where the duplicate point is located...') - # if xtry.var_type is None: - # if self.xmin.var_type is not None: - # xtry.var_type = self.xmin.var_type - # else: - # xtry.var_type = [VAR_TYPE.CONTINUOUS] * len(self.xmin.coordinates) - # xtries: List[Point] = self.directional_scaling(p=xtry, npts=len(self.poll_dirs)*2) - # for tr in range(len(xtries)): - # is_duplicate = self.hashtable.is_duplicate(xtries[tr]) - # if is_duplicate: - # continue - # else: - # xtry = copy.deepcopy(xtries[tr]) - # break - # unique_p_trials += 1 - - # if (is_duplicate): - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg="Cache hit ... Failed to find a non-duplicate alternative.", msg_type=MSG_TYPE.INFO) - # if self.display: - # print("Cache hit ... Failed to find a non-duplicate alternative.") - # stop = True - # bb_eval = copy.deepcopy(self.bb_eval) - # xtry.fobj = [np.inf] * self.mesh._pbParams.nobj - # psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - # return [stop, index, self.bb_handle.bb_eval, success, psize, xtry] - - # """ Evaluation of the blackbox; get output responses """ - # if xtry.sets is not None and isinstance(xtry.sets,dict): - # p: List[Any] = [] - # for i in range(len(xtry.var_type)): - # if (xtry.var_type[i] == VAR_TYPE.DISCRETE or xtry.var_type[i] == VAR_TYPE.CATEGORICAL) and xtry.var_link[i] is not None: - # p.append(xtry.sets[xtry.var_link[i]][int(xtry.coordinates[i])]) - # else: - # p.append(xtry.coordinates[i]) - # self.bb_output, _ = self.bb_handle.eval(p) - # else: - # self.bb_output, _ = self.bb_handle.eval(xtry.coordinates) - - # """ - # Evaluate the poll point: - # - Set multipliers and penalty - # - Evaluate objective function - # - Evaluate constraint functions (can be an empty vector) - # - Aggregate constraints - # - Penalize the objective (extreme barrier) - # """ - # xtry.LAMBDA = copy.deepcopy(self.constraints_RP.LAMBDA) - # xtry.RHO = copy.deepcopy(self.constraints_RP.RHO) - # xtry.hmax = copy.deepcopy(self.constraints_RP.hmax) - # xtry.constraints_type = copy.deepcopy(self.constraints_RP.constraints_type) - # xtry.__eval__(self.bb_output) - # if not self.hashtable._isPareto: - # self.hashtable.add_to_best_cache(xtry) - # self.constraints_RP.hmax = copy.deepcopy(xtry.hmax) - # toc = time.perf_counter() - # xtry.Eval_time = (toc - tic) - - - # """ Update multipliers and penalty """ - # if self.constraints_RP.LAMBDA == None: - # self.constraints_RP.LAMBDA = self.xmin.LAMBDA - # if len(xtry.cPB) > len(self.constraints_RP.LAMBDA): - # self.constraints_RP.LAMBDA += [self.constraints_RP.LAMBDA[-1]] * abs(len(self.constraints_RP.LAMBDA)-len(xtry.cPB)) - # if len(xtry.cPB) < len(self.constraints_RP.LAMBDA): - # del self.constraints_RP.LAMBDA[len(xtry.cPB):] - # for i in range(len(xtry.cPB)): - # if self.constraints_RP.RHO == 0.: - # self.constraints_RP.RHO = 0.001 - # self.constraints_RP.LAMBDA[i] = copy.deepcopy(max(self.dtype.zero, self.constraints_RP.LAMBDA[i] + (1/self.constraints_RP.RHO)*xtry.cPB[i])) - - # if xtry.status == DESIGN_STATUS.FEASIBLE: - # self.constraints_RP.RHO *= copy.deepcopy(0.5) - - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg=f"Completed evaluation of point # {index} in {xtry.Eval_time} seconds, ftry={xtry.f}, status={xtry.status.name} and htry={xtry.h}. \n", msg_type=MSG_TYPE.INFO) - - # # if xtry < self.xmin: - # # self.success = True - # # success = True - - # """ Add to the cache memory """ - # if self.store_cache: - # self.hashtable.hash_id = xtry - - # # if self.save_results or self.display: - # self.bb_eval = self.bb_handle.bb_eval - # self.psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - # psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - - - - # if success == SUCCESS_TYPES.FS and self.opportunistic and self.iter > 1: - # stop = True - - # """ Check stopping criteria """ - # if self.bb_eval >= self.eval_budget: - # self.terminate = True - # stop = True - # return [stop, index, self.bb_handle.bb_eval, success, psize, xtry] - - # return [stop, index, self.bb_handle.bb_eval, success, psize, xtry] - - def postprocess_evaluated_candidates(self, x_cps: List[CandidatePoint]): - # if len(self.hashtable._best_hash_ID) <= 0: + def postprocess_evaluated_candidates(self, x_cps: List[CandidatePoint] = None): # self.hashtable._best_hash_ID.append(self.xmin.signature) for xtry in x_cps: - if self.log is not None and self.log.isVerbose: - self.log.log_msg(msg=f"Completed evaluation of point # {xtry.evalNo} in {xtry.Eval_time} seconds, ftry={xtry.f}, status={xtry.status.name} and htry={xtry.h}. \n", msg_type=MSG_TYPE.INFO) + if self.log is not None and self.log.is_verbose: + self.log.log_msg(msg=f"Completed evaluation of point # {xtry.eval_no} in {xtry.eval_time} seconds, ftry={xtry.f}, status={xtry.status.name} and htry={xtry.h}. \n", msg_type=MSG_TYPE.INFO) """ Add to the cache memory """ self.hashtable.add_to_cache(xtry) - if not self.hashtable._isPareto: + if not self.hashtable._is_pareto: self.hashtable.add_to_best_cache(xtry) if self.store_cache and xtry.signature not in self.hashtable.hash_id: self.hashtable.hash_id = xtry @@ -567,9 +430,9 @@ def postprocess_evaluated_candidates(self, x_cps: List[CandidatePoint]): def omit_duplicates(self): temp: List[CandidatePoint] = [] for xtry in self.poll_set: - is_dup = xtry.signature in self.hashtable.hash_id if not self.hashtable._isPareto else self.hashtable.is_duplicate(xtry) + is_dup = xtry.signature in self.hashtable.hash_id if not self.hashtable._is_pareto else self.hashtable.is_duplicate(xtry) is_duplicate: bool = (self.check_cache and self.hashtable.size > 0 and is_dup) - # TODO: The commented logic below needs more investigation to make sure that it doesn't hurt. + # COMPLETED: The commented logic below needs more investigation to make sure that it doesn't hurt. # while is_duplicate and unique_p_trials < 5: # if self.display: # print(f'Cache hit. Trial# {unique_p_trials}: Looking for a non-duplicate along the poll direction where the duplicate point is located...') @@ -588,7 +451,7 @@ def omit_duplicates(self): # break # unique_p_trials += 1 if (is_duplicate): - if self.log is not None and self.log.isVerbose: + if self.log is not None and self.log.is_verbose: self.log.log_msg(msg="Cache hit ... Failed to find a non-duplicate alternative.", msg_type=MSG_TYPE.INFO) if self.display: print("Cache hit ... Failed to find a non-duplicate alternative.") @@ -608,11 +471,8 @@ def master_updates(self, x: List[CandidatePoint], peval, save_all_best: bool = F """ Check success conditions """ is_infeas_dom: bool = (xtry.status == DESIGN_STATUS.INFEASIBLE and (xtry.h < self.xmin.h) ) is_feas_dom: bool = (xtry.status == DESIGN_STATUS.FEASIBLE and xtry.fobj < self.xmin.fobj) - is_infea_improving: bool = (self.xmin.status == DESIGN_STATUS.FEASIBLE and xtry.status == DESIGN_STATUS.INFEASIBLE and (xtry.fobj < self.xmin.fobj and xtry.h <= self.xmin.hmax)) - is_feas_improving: bool = (self.xmin.status == DESIGN_STATUS.INFEASIBLE and xtry.status == DESIGN_STATUS.FEASIBLE and xtry.fobj < self.xmin.fobj) - success = SUCCESS_TYPES.US - if ((is_infeas_dom or is_feas_dom)): + if (is_infeas_dom or is_feas_dom): self.success = SUCCESS_TYPES.FS self.n_successes += 1 success = SUCCESS_TYPES.FS # <- This redundant variable is important @@ -622,7 +482,7 @@ def master_updates(self, x: List[CandidatePoint], peval, save_all_best: bool = F del self._xmin self._xmin = CandidatePoint() self._xmin = copy.deepcopy(xtry) - self.constraints_RP.hmax = copy.deepcopy(xtry.hmax) + self.constraints_RP.hmax = copy.deepcopy(xtry.h_max) if self.display: if self._dtype.dtype == np.float64: print(f"Success: fmin = {self.xmin.f} (hmin = {self.xmin.h:.15})") diff --git a/src/OMADS/Evaluator.py b/src/OMADS/Evaluator.py index 93921be..2db17c0 100644 --- a/src/OMADS/Evaluator.py +++ b/src/OMADS/Evaluator.py @@ -1,10 +1,11 @@ import copy import importlib +import platform import time -from ._globals import * +from ._globals import DType, VAR_TYPE, DESIGN_STATUS, BB_EVAL_STATUS import os -from typing import List, Dict, Any, Optional, Callable -from numpy import sum, subtract, add, maximum, minimum, power, inf +from typing import List, Any, Optional, Callable +from numpy import inf import numpy as np from inspect import signature import concurrent.futures @@ -15,6 +16,7 @@ from .Options import Options from .PostProcess import PostMADS from .Point import Point +from dataclasses import dataclass @dataclass class Evaluator: @@ -30,21 +32,21 @@ class Evaluator: :param timeout: The time out of the evaluation process """ blackbox: Any = "rosenbrock" - commandOptions: Any = None + command_options: Any = None internal: Optional[str] = None path: str = "..\\tests\\Rosen" input: str = "input.inp" output: str = "output.out" - constants: List = None + constants: Optional[List] = None bb_eval: int = 0 - _dtype: DType = None + _dtype: Optional[DType] = None timeout: float = 1000000. - local_exec_jobs: List[str] = None - candidates: List[Point] = None - directions: List[Point] = None + local_exec_jobs: Optional[List[str]] = None + candidates: Optional[List[Point]] = None + directions: Optional[List[Point]] = None mesh: List[Any] = None - constraintsRelaxation: dict = None - xmin: CandidatePoint = None + constraints_relaxation: Optional[dict] = None + xmin: Optional[CandidatePoint] = None @@ -73,76 +75,67 @@ def map_variables(self, eval_set: List[CandidatePoint]): self.directions.append(xtry.direction) self.mesh.append(xtry.mesh) - def run_callable_serial_local(self, iter:int, peval: int, eval_set:List[CandidatePoint], options: Options, post: PostMADS, psize: List[float], stepName: str = None, mesh: auto = None, constraintsRelaxation: dict = None, budget:int = 1): + def run_callable_serial_local(self, iter:int, peval: int, eval_set:List[CandidatePoint], options: Options, post: PostMADS, psize: List[float], step_name: str = None, mesh: Any = None, constraints_relaxation: dict = None, budget:int = 1): xc: List[CandidatePoint] = [] self.map_variables(eval_set) - self.constraintsRelaxation = copy.deepcopy(constraintsRelaxation) + self.constraints_relaxation = copy.deepcopy(constraints_relaxation) for it in range(len(eval_set)): peval += 1 - f = self.evaluate_BB(it) + f = self.evaluate_blackbox(it) if f.status != BB_EVAL_STATUS.UNEVALUATED: xc.append(f) if mesh: xc[-1].mesh = copy.deepcopy(mesh) post.bb_eval.append(peval) - xc[-1].evalNo = peval + xc[-1].eval_no = peval post.iter.append(iter) - if stepName: - post.step_name.append(stepName) + if step_name: + post.step_name.append(step_name) post.psize.append(psize) if options.opportunistic and len(xc) > 0 and xc[-1] < self.xmin: break if peval == budget: break - # self.constraintsRelaxation["hmax"] = eval_set[-1].hmax return xc, post, peval - def evaluate_BB(self, index: int)->List[Any]: - tic = time.perf_counter() - f, errStatus = self.eval(self.candidates[index].coordinates) + def evaluate_blackbox(self, index: int)->List[Any]: + f, err_status = self.eval(self.candidates[index].coordinates) x_cp: CandidatePoint = CandidatePoint() x_cp.coordinates = copy.deepcopy(self.candidates[index].coordinates) - x_cp.LAMBDA = copy.deepcopy(self.constraintsRelaxation["LAMBDA"]) - x_cp.RHO = copy.deepcopy(self.constraintsRelaxation["RHO"]) - x_cp.hmax = copy.deepcopy(self.constraintsRelaxation["hmax"]) - x_cp.constraints_type = copy.deepcopy(self.constraintsRelaxation["constraints_type"]) + x_cp.lambda_multipliers = copy.deepcopy(self.constraints_relaxation["LAMBDA"]) + x_cp.rho = copy.deepcopy(self.constraints_relaxation["RHO"]) + x_cp.h_max = copy.deepcopy(self.constraints_relaxation["hmax"]) + x_cp.constraints_type = copy.deepcopy(self.constraints_relaxation["constraints_type"]) x_cp.direction = copy.deepcopy(self.directions[index]) x_cp.mesh = copy.deepcopy(self.mesh[index]) x_cp.__eval__(f) - if errStatus: + if err_status: x_cp.status = DESIGN_STATUS.ERROR # if x_cp.status == DESIGN_STATUS.INFEASIBLE: # self.constraintsRelaxation["hmax"] = x_cp.hmax - if self.constraintsRelaxation["LAMBDA"] == None: - self.constraintsRelaxation["LAMBDA"] = copy.deepcopy(self.xmin.LAMBDA) - if len(x_cp.cPB) > len(self.constraintsRelaxation["LAMBDA"]): - self.constraintsRelaxation["LAMBDA"] += [self.constraintsRelaxation["LAMBDA"][-1]] * abs(len(self.constraintsRelaxation["LAMBDA"])-len(x_cp.cPB)) - if len(x_cp.cPB) < len(self.constraintsRelaxation["LAMBDA"]): - del self.constraintsRelaxation["LAMBDA"][len(x_cp.cPB):] + if self.constraints_relaxation["LAMBDA"] == None: + self.constraints_relaxation["LAMBDA"] = copy.deepcopy(self.xmin.lambda_multipliers) + if len(x_cp.cPB) > len(self.constraints_relaxation["LAMBDA"]): + self.constraints_relaxation["LAMBDA"] += [self.constraints_relaxation["LAMBDA"][-1]] * abs(len(self.constraints_relaxation["LAMBDA"])-len(x_cp.cPB)) + if len(x_cp.cPB) < len(self.constraints_relaxation["LAMBDA"]): + del self.constraints_relaxation["LAMBDA"][len(x_cp.cPB):] for i in range(len(x_cp.cPB)): - if self.constraintsRelaxation["RHO"] == 0.: - self.constraintsRelaxation["RHO"] = 0.001 - self.constraintsRelaxation["LAMBDA"][i] = copy.deepcopy(max(self.dtype.zero, self.constraintsRelaxation["LAMBDA"][i] + (1/self.constraintsRelaxation["RHO"])*x_cp.cPB[i])) + if np.isclose(self.constraints_relaxation["RHO"], 0., rtol=1e-09, atol=1e-09): + self.constraints_relaxation["RHO"] = 0.001 + self.constraints_relaxation["LAMBDA"][i] = copy.deepcopy(max(self.dtype.zero, self.constraints_relaxation["LAMBDA"][i] + (1/self.constraints_relaxation["RHO"])*x_cp.cPB[i])) if x_cp.status == DESIGN_STATUS.FEASIBLE: - self.constraintsRelaxation["RHO"] *= copy.deepcopy(0.5) - - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg=f"Completed evaluation of point # {index} in {x_cp.Eval_time} seconds, ftry={x_cp.f}, status={x_cp.status.name} and htry={x_cp.h}. \n", msg_type=MSG_TYPE.INFO) - # toc = time.perf_counter() - # x_cp.Eval_time = (toc - tic) + self.constraints_relaxation["RHO"] *= copy.deepcopy(0.5) return x_cp - - def run_callable_parallel_local(self, iter:int, peval: int, njobs:int, eval_set:List[CandidatePoint], options: Options, post: PostMADS, psize: List[float], mesh: auto = None, stepName: str = None, eval_call: Callable = None, constraintsRelaxation: dict = None, budget:int = 1): - bb_eval = [] + def run_callable_parallel_local(self, iter:int, peval: int, eval_set:List[CandidatePoint], options: Options, post: PostMADS, psize: List[float], mesh: Any = None, step_name: str = None, constraints_relaxation: dict = None, budget:int = 1): xc: List[CandidatePoint] = [] self.map_variables(eval_set) - self.constraintsRelaxation = copy.deepcopy(constraintsRelaxation) - with concurrent.futures.ProcessPoolExecutor(options.np) as executor: - results = [executor.submit(self.evaluate_BB, it) for it in range(len(eval_set))] + self.constraints_relaxation = copy.deepcopy(constraints_relaxation) + with concurrent.futures.ProcessPoolExecutor(max_workers=options.np) as executor: + results = [executor.submit(self.evaluate_blackbox, it) for it in range(len(eval_set))] for f in concurrent.futures.as_completed(results): # if f.result()[0]: # executor.shutdown(wait=False) @@ -153,20 +146,22 @@ def run_callable_parallel_local(self, iter:int, peval: int, njobs:int, eval_set: if mesh: xc[-1].mesh = copy.deepcopy(mesh) - xc[-1].evalNo = self.bb_eval + xc[-1].eval_no = self.bb_eval self.bb_eval = peval post.bb_eval.append(peval) post.iter.append(iter) # post.poll_dirs.append(poll.poll_dirs[f.result()[1]]) - if stepName: - post.step_name.append(stepName) + if step_name: + post.step_name.append(step_name) post.psize.append(psize) if options.opportunistic and len(xc) > 0 and xc[-1] < self.xmin: break if peval == budget: break - + else: + executor.shutdown(wait=False) + return peval, xc, post, peval # Function to execute .exe file locally @@ -184,7 +179,7 @@ def execute_on_remote(self, host, username, password, exe_path): ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host, username=username, password=password) - stdin, stdout, stderr = ssh.exec_command(exe_path) + _, stdout, stderr = ssh.exec_command(exe_path) output = stdout.read().decode() error = stderr.read().decode() @@ -238,26 +233,19 @@ def eval(self, values: List[float]): is_object = False try: sig = signature(self.blackbox) - except: + except Warning: is_object = True - pass if not is_object: npar = len(sig.parameters) # Get input arguments defined for the callable inputs = str(sig).replace("(", "").replace(")", "").replace(" ","").split(',') # Check if user constants list is defined and if the number of input args of the callable matches what OMADS expects if self.constants is None: - if (npar == 1 or (npar> 0 and npar <= 3 and ('*argv' in inputs))): - try: - f_eval = self.blackbox(values) - except: - evalerr = True - logging.error(f"Callable {str(self.blackbox)} evaluation returned an error at the poll point {values}") - f_eval = [inf, [inf]] - elif (npar == 2 and ('*argv' not in inputs)): + is_argv = '*argv' in inputs + if (npar == 1 or (npar> 0 and npar <= 3 and is_argv)) or (npar == 2 and is_argv): try: f_eval = self.blackbox(values) - except: + except Warning: evalerr = True logging.error(f"Callable {str(self.blackbox)} evaluation returned an error at the poll point {values}") f_eval = [inf, [inf]] @@ -267,7 +255,7 @@ def eval(self, values: List[float]): if (npar == 2 or (npar> 0 and npar <= 3 and ('*argv' in inputs))): try: f_eval = self.blackbox(values, self.constants) - except: + except Warning: evalerr = True logging.error(f"Callable {str(self.blackbox)} evaluation returned an error at the poll point {values}") else: @@ -275,7 +263,7 @@ def eval(self, values: List[float]): else: try: f_eval = self.blackbox(values) - except: + except Warning: evalerr = True logging.error(f"Callable {str(self.blackbox)} evaluation returned an error at the poll point {values}") f_eval = [[inf], [inf]] @@ -287,7 +275,7 @@ def eval(self, values: List[float]): self.write_input(values) pwd = os.getcwd() os.chdir(self.path) - isWin = platform.platform().split('-')[0] == 'Windows' + is_win = platform.platform().split('-')[0] == 'Windows' evalerr = False timouterr = False # Check if the file is executable @@ -295,14 +283,14 @@ def eval(self, values: List[float]): if not executable: raise IOError(f"The blackbox file {str(self.blackbox)} is not an executable! Please provide a valid executable file.") # Prepare the execution command based on the running machine's OS - if isWin and self.commandOptions is None: + if is_win and self.command_options is None: cmd = self.blackbox - elif isWin: - cmd = f'{self.blackbox} {self.commandOptions}' - elif self.commandOptions is None: + elif is_win: + cmd = f'{self.blackbox} {self.command_options}' + elif self.command_options is None: cmd = f'./{self.blackbox}' else: - cmd = f'./{self.blackbox} {self.commandOptions}' + cmd = f'./{self.blackbox} {self.command_options}' try: p = subprocess.run(cmd, shell=True, timeout=self.timeout) if p.returncode != 0: diff --git a/src/OMADS/Exploration.py b/src/OMADS/Exploration.py index c8c9373..6ec7d41 100644 --- a/src/OMADS/Exploration.py +++ b/src/OMADS/Exploration.py @@ -21,55 +21,53 @@ # Copyright (C) 2022 Ahmed H. Bayoumy # # ------------------------------------------------------------------------------------# +import copy from .CandidatePoint import CandidatePoint from .Point import Point from .Barriers import Barrier, BarrierMO -# from ._common import * -from .Directions import * +from .Directions import Dirs2n import samplersLib as explore import random from matplotlib import pyplot as plt -from ._globals import * +from ._globals import DType, VAR_TYPE, SUCCESS_TYPES, DESIGN_STATUS, MSG_TYPE, SAMPLING_METHOD, SEARCH_TYPE, DIST_TYPE, STOP_TYPE from .Parameters import Parameters from .Evaluator import Evaluator -from .Optimizer import GenericSamplerBase, ConstraintsRelaxationParameters +from .Optimizer import GenericSamplerBase +from typing import List, Optional, Any +from dataclasses import dataclass +import numpy as np @dataclass -class VNS_data: - fixed_vars: List[CandidatePoint] = None +class VNSData: + fixed_vars: Optional[List[CandidatePoint]] = None nb_search_pts: int = 0 stop: bool = False stop_reason: STOP_TYPE = STOP_TYPE.NO_STOP success: SUCCESS_TYPES = SUCCESS_TYPES.US count_search: bool = False - new_feas_inc: CandidatePoint = None - new_infeas_inc: CandidatePoint = None - params: Parameters = None - # true_barrier: Barrier = None - # sgte_barrier: Barrier = None - active_barrier: auto = None + new_feas_inc: Optional[CandidatePoint] = None + new_infeas_inc: Optional[CandidatePoint] = None + params: Optional[Parameters] = None + active_barrier: Any = None @dataclass -class VNS(VNS_data): +class VNS(VNSData): """ """ _k: int = 1 _k_max: int = 100 - _old_x: CandidatePoint = None - _dist: List[DIST_TYPE] = None - _ns_dist: List[int] = None + _old_x: Optional[CandidatePoint] = None + _dist: Optional[List[DIST_TYPE]] = None + _ns_dist: Optional[List[int]] = None _rho: float = 0.1 _seed: int = 0 _rho0: float = 0.1 - def __init__(self, active_barrier: auto, stop: bool=False, true_barrier: Barrier=None, sgte_barrier: Barrier=None, params=None): + def __init__(self, active_barrier: Any, stop: bool=False, params=None): self.stop = stop self.count_search = not self.stop - # self.params._opt_only_sgte = False self._dist = [DIST_TYPE.GAUSS, DIST_TYPE.GAMMA, DIST_TYPE.EXPONENTIAL, DIST_TYPE.POISSON] - # self.true_barrier = true_barrier - # self.sgte_barrier = sgte_barrier self.active_barrier = active_barrier self.params = params @@ -137,8 +135,6 @@ def draw_from_exp(self, mean: CandidatePoint) -> List[CandidatePoint]: cs[:, i] = (np.random.exponential(scale=self._rho, size=self._ns_dist[2]))+mean.coordinates[i] - # for i in range(self._ns_dist[2]): - # pts[i].coordinates = copy.deepcopy(cs[i, :]) return cs @@ -186,21 +182,18 @@ def draw_from_binomial(self, mean: CandidatePoint) -> List[CandidatePoint]: else: cs[:, i] = (np.random.binomial(n=(mean.coordinates[i]+delta)/((1/self._rho) if self._rho > 1. else self._rho), p=(1/self._rho) if self._rho > 1. else self._rho, size=(self._ns_dist[4],))-delta) - # for i in range(self._ns_dist[2]): - # pts[i].coordinates = copy.deepcopy(cs[i, :]) return cs - def generate_samples(self, x_inc: CandidatePoint=None, dist: DIST_TYPE = None)->List[float]: + def generate_samples(self, x_inc: CandidatePoint=None, dist: DIST_TYPE = None)->Optional[List[float]]: """_summary_ """ - if isinstance(self.active_barrier, BarrierMO): - if x_inc is None: + if x_inc is None: + if isinstance(self.active_barrier, BarrierMO): x_inc = self.active_barrier.getAllPoints()[0] - elif isinstance(self.active_barrier, Barrier): - if x_inc is None: + elif isinstance(self.active_barrier, Barrier): x_inc = self.active_barrier.select_poll_center() - if x_inc or not x_inc.evaluated: + if x_inc is None or not x_inc.evaluated: return None else: if dist == DIST_TYPE.GAUSS: @@ -224,10 +217,6 @@ def generate_samples(self, x_inc: CandidatePoint=None, dist: DIST_TYPE = None)-> def run(self): if self.stop: return - # Initial - # opt_only_sgte = self.params._opt_only_sgte - - # point x if isinstance(self.active_barrier, Barrier): x: CandidatePoint = self.active_barrier._best_feasible if (x is None or not x.evaluated) and self.active_barrier._filter is not None: @@ -245,11 +234,9 @@ def run(self): if (x is None or not x.evaluated): x = self._old_x - # // update _k and _old_x: if self._old_x is not None and x != self._old_x: self._rho = np.sqrt(np.sum([abs(self._old_x.coordinates[i]-x.coordinates[i])**2 for i in range(len(self._old_x.coordinates))])) - # self._rho *= 2 self._k += 1 if self._k > self._k_max: self.stop = True @@ -283,7 +270,7 @@ def run(self): samples = np.vstack((samples, p)) c += 1 elif isinstance(self.active_barrier, BarrierMO): - if self.active_barrier._currentIncumbentFeas is not None and self.active_barrier._currentIncumbentFeas.evaluated: + if self.active_barrier._currentIncumbentInf is not None and self.active_barrier._currentIncumbentInf.evaluated: for i in range(len(self._dist)): temp = self.generate_samples(x_inc= self.active_barrier._currentIncumbentInf, dist= self._dist[i]) temp = np.unique(temp, axis=0) @@ -302,6 +289,7 @@ def __post_init__(self): self._xmin = CandidatePoint() self.bb_handle = Evaluator() self._dtype = DType() + self.exploreNew = False @property def iter(self): @@ -438,7 +426,6 @@ def scale(self, ub: List[float], lb: List[float], factor: float = 10.0): for k, x in enumerate(np.isinf(self.scaling)): if x: self.scaling[k][0] = 1.0 - s_array = np.diag(self.scaling) def get_list_of_coords_from_list_of_points(self, xps: List[CandidatePoint] = None) -> np.ndarray: coords_array = np.zeros((len(xps), self.dim)) @@ -462,7 +449,7 @@ def generate_2ngrid(self, vlim: np.ndarray = None, x_incumbent: CandidatePoint = hhm = grid.create_housholder(False, domain=self.xmin.var_type) grid.lb = vlim[:, 0] grid.ub = vlim[:, 1] - grid.hmax = self.xmin.hmax + grid.hmax = self.xmin.h_max grid.create_poll_set(hhm=hhm, ub=grid.ub, @@ -490,11 +477,10 @@ def HD_grid(self, n: int =3, vlim: np.ndarray = None) -> np.ndarray: return grid_points[:n, :] - def generate_sample_points(self, nsamples: int = None, samples_in: np.ndarray = None) -> List[CandidatePoint]: + def generate_sample_points(self, nsamples: int = None, samples_in: np.ndarray = None): """ Generate the sample points """ - xlim = [] self.nvars = len(self.prob_params.baseline) - is_AS = False + is_active_sampling = False v = np.empty((self.nvars, 2)) if self.bb_handle.bb_eval + nsamples > self.eval_budget: nsamples = self.eval_budget + self.bb_handle.bb_eval @@ -523,122 +509,83 @@ def generate_sample_points(self, nsamples: int = None, samples_in: np.ndarray = self.ns = nsamples resize = False clipping = True - # self.seed += np.random.randint(0, 10000) if self.sampling_t == SAMPLING_METHOD.FULLFACTORIAL.name: sampling = explore.samplers.FullFactorial(ns=nsamples, vlim=v, w=self.weights, c=clipping) - if clipping: - resize = True + resize = True elif self.sampling_t == SAMPLING_METHOD.RS.name: sampling = explore.samplers.RS(ns=nsamples, vlim=v) sampling.options["randomness"] = self.seed elif self.sampling_t == SAMPLING_METHOD.HALTON.name: sampling = explore.samplers.halton(ns=nsamples, vlim=v, is_ham=True) - elif self.sampling_t == SAMPLING_METHOD.LH.name: + elif self.sampling_t == SAMPLING_METHOD.LH.name or (self.iter == 1 and self.prob_params.lhs_search_initialization): sampling = explore.samplers.LHS(ns=nsamples, vlim=v) - sampling.options["randomness"] = self.seed - sampling.options["criterion"] = self.sampling_criter + sampling.options["randomness"] = self.seed+self.iter + sampling.options["criterion"] = self.sampling_criter if self.iter > 1 else "corr" sampling.options["msize"] = self.mesh.getdeltaMeshSize().coordinates is_lhs = True else: - if self.iter == 1 or (len(self.hashtable._cache_dict) if isinstance(self.activeBarrier, Barrier) or self.activeBarrier is None else len(self.hashtable._best_hash_ID)) < nsamples:# or self.n_successes / (self.iter) <= 0.25: - sampling = explore.samplers.halton(ns=nsamples, vlim=v) if isinstance(self.activeBarrier, Barrier) or self.activeBarrier is None else explore.samplers.LHS(ns=nsamples, vlim=v) + if self.exploreNew or self.iter == 1 or (len(self.hashtable._cache_dict) if isinstance(self.activeBarrier, Barrier) or self.activeBarrier is None else len(self.hashtable._best_hash_id)) < nsamples:# or self.n_successes / (self.iter) <= 0.25: + sampling = explore.samplers.halton(ns=nsamples, vlim=v) if (isinstance(self.activeBarrier, Barrier) or self.activeBarrier is None) and not self.exploreNew and self.iter == 1 else explore.samplers.LHS(ns=nsamples, vlim=v) sampling.options["randomness"] = self.seed + self.iter sampling.options["criterion"] = self.sampling_criter sampling.options["msize"] = self.mesh.getdeltaMeshSize().coordinates sampling.options["varLimits"] = v + self.exploreNew = False else: - # if len(self.hashtable._best_hash_ID) > self.best_samples: - # if len(self.hashtable._best_hash_ID) > self.best_samples: - self.best_samples = len(self.hashtable._best_hash_ID) + if self.hashtable._is_pareto: + nsamples = len(self.hashtable.nd_points) + self.best_samples = len(self.hashtable._best_hash_id) self.AS = explore.samplers.activeSampling(data=self.hashtable.get_best_cache_points(nsamples=nsamples), n_r=nsamples, vlim=v, kernel_type="Gaussian" if self.dim <= 30 else "Silverman", bw_method="SILVERMAN", seed=int(self.seed + self.iter)) - # estGrid = explore.FullFactorial(ns=nsamples, vlim=v, w=self.weights, c=clipping) - if self.estGrid is None: - if self.dim <= 30: + if self.estGrid is None and self.dim <= 30: self.estGrid = explore.samplers.FullFactorial(ns=nsamples, vlim=v, w=self.weights, c=clipping) - # else: - # if (self.iter % 2) == 0: - # self.estGrid = explore.samplers.halton(ns=nsamples, vlim=v) - # else: - # self.estGrid = explore.samplers.RS(ns=nsamples, vlim=v) - # self.estGrid.set_options(c=self.sampling_criter, r= self.seed + self.iter) - # self.estGrid.options["msize"] = self.mesh.msize - - # self.estGrid.set_options(c=self.sampling_criter, r=int(self.seed + self.iter)) self.AS.kernel.bw_method = "SILVERMAN" if self.dim <=30: S = self.estGrid.generate_samples() else: - if True: #(self.iter % 2) == 0: - if self.estGrid == None: - self.estGrid = explore.samplers.LHS(ns=nsamples, vlim=v) - S = self.estGrid.generate_samples() - else: - S = self.estGrid.expand_lhs(x=self.hashtable.get_best_cache_points(nsamples=nsamples), n_points=nsamples, method="ExactSE") + # if True: #(self.iter % 2) == 0: + if self.estGrid == None: + self.estGrid = explore.samplers.LHS(ns=nsamples, vlim=v) + S = self.estGrid.generate_samples() else: - S = self.HD_grid(n=nsamples, vlim=v) + S = self.estGrid.expand_lhs(x=self.hashtable.get_best_cache_points(nsamples=nsamples), n_points=nsamples, method="ExactSE") + # else: + # S = self.HD_grid(n=nsamples, vlim=v) if nsamples < len(S): self.AS.kernel.estimate_pdf(S[:nsamples, :]) else: self.AS.kernel.estimate_pdf(S) - is_AS = True + is_active_sampling = True if self.iter > 1 and is_lhs: - Ps = copy.deepcopy(sampling.expand_lhs(x=self.map_samples_from_points_to_coords(), n_points=nsamples, method= "basic")) + ps = copy.deepcopy(sampling.expand_lhs(x=self.map_samples_from_points_to_coords(), n_points=nsamples, method= "basic")) else: - if is_AS: - Ps = copy.deepcopy(self.AS.resample(size=nsamples, seed=int(self.seed + self.iter))) + if is_active_sampling: + ps = copy.deepcopy(self.AS.resample(size=nsamples, seed=int(self.seed + self.iter))) else: - Ps= copy.deepcopy(sampling.generate_samples()) + ps= copy.deepcopy(sampling.generate_samples()) - if False: - self.df = pd.DataFrame(Ps, columns=[f'x{i}' for i in range(self.dim)]) - pd.plotting.scatter_matrix(self.df, alpha=0.2) - plt.show() if resize: - self.ns = len(Ps) - nsamples = len(Ps) + self.ns = len(ps) + nsamples = len(ps) - # if self.xmin is not None: - # self.visualize_samples(self.xmin.coordinates[0], self.xmin.coordinates[1]) if self.iter > 1 and is_lhs: - self.map_samples_from_coords_to_points(Ps[len(Ps)-nsamples:]) + self.map_samples_from_coords_to_points(ps[len(ps)-nsamples:]) else: - self.map_samples_from_coords_to_points(Ps) - return v, Ps + self.map_samples_from_coords_to_points(ps) def project_coords_to_mesh(self, x:List[float], ref: List[float] = None): pref = Point(self.mesh._n) pref.coordinates = ref px = Point(self.mesh._n) px.coordinates = x - xProjected: Point = self.mesh.projectOnMesh(px, pref) - # if ref == None: - # ref = [0.]*len(x) - # if self.xmin.var_type is None: - # self.xmin.var_type = [VAR_TYPE.REAL] * len(self.xmin.coordinates) - # for i in range(len(x)): - # if self.xmin.var_type[i] != VAR_TYPE.CATEGORICAL: - # if self.xmin.var_type[i] == VAR_TYPE.REAL: - # x[i] = ref[i] + (np.round((x[i]-ref[i])/self.mesh.msize) * self.mesh.msize) - # else: - # x[i] = int(ref[i] + int(int((x[i]-ref[i])/self.mesh.msize) * self.mesh.msize)) - # else: - # x[i] = int(x[i]) - # if x[i] < self.prob_params.lb[i]: - # x[i] = self.prob_params.lb[i] + (self.prob_params.lb[i] - x[i]) - # if x[i] > self.prob_params.ub[i]: - # x[i] = self.prob_params.ub[i] - # if x[i] > self.prob_params.ub[i]: - # x[i] = self.prob_params.ub[i] - (x[i] - self.prob_params.ub[i]) - # if x[i] < self.prob_params.lb[i]: - # x[i] = self.prob_params.lb[i] - - return xProjected.coordinates + x_projected: Point = self.mesh.projectOnMesh(px, pref) + + return x_projected.coordinates def map_samples_from_coords_to_points(self, samples: np.ndarray): for i in range(len(samples)): @@ -668,7 +615,6 @@ def gauss_perturbation(self, p: CandidatePoint, npts: int = 5) -> List[Candidate # np.random.seed(self.seed) cs = np.zeros((npts, p.n_dimensions)) pts: List[CandidatePoint] = [0] * npts - mp = 1. for k in range(p.n_dimensions): if p.var_type[k] == VAR_TYPE.REAL: cs[:, k] = np.random.normal(loc=p.coordinates[k], scale=self.mesh.getdeltaMeshSize().coordinates[k], size=(npts,)) @@ -691,9 +637,9 @@ def gauss_perturbation(self, p: CandidatePoint, npts: int = 5) -> List[Candidate def omit_duplicates(self): temp: List[CandidatePoint] = [] for xtry in self._candidate_points_set: - is_dup = xtry.signature in self.hashtable.hash_id if not self.hashtable._isPareto else self.hashtable.is_duplicate(xtry) + is_dup = xtry.signature in self.hashtable.hash_id if not self.hashtable._is_pareto else self.hashtable.is_duplicate(xtry) is_duplicate: bool = (self.check_cache and self.hashtable.size > 0 and is_dup) - # TODO: The commented logic below needs more investigation to make sure that it doesn't hurt. + # COMPLETED: The commented logic below needs more investigation to make sure that it doesn't hurt. # while is_duplicate and unique_p_trials < 5: # if self.display: # print(f'Cache hit. Trial# {unique_p_trials}: Looking for a non-duplicate along the poll direction where the duplicate point is located...') @@ -712,7 +658,7 @@ def omit_duplicates(self): # break # unique_p_trials += 1 if (is_duplicate): - if self.log is not None and self.log.isVerbose: + if self.log is not None and self.log.is_verbose: self.log.log_msg(msg="Cache hit ... Failed to find a non-duplicate alternative.", msg_type=MSG_TYPE.INFO) if self.display: print("Cache hit ... Failed to find a non-duplicate alternative.") @@ -723,139 +669,15 @@ def omit_duplicates(self): self._candidate_points_set = [] for t in temp: self._candidate_points_set.append(copy.deepcopy(t)) - - # Deprecated routine - # def evaluate_candidate_point(self, index: int): - # """ Evaluate the sample point i on the points set """ - # """ Set the dynamic index for this point """ - # tic = time.perf_counter() - # self.point_index = index - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg=f"Evaluate sample point # {index}...", msg_type=MSG_TYPE.INFO) - # """ Initialize stopping and success conditions""" - # stop: bool = False - # """ Copy the point i to a trial one """ - # xtry: CandidatePoint = self._candidate_points_set[index] - # """ This is a success bool parameter used for - # filtering out successful designs to be printed - # in the output results file""" - # success = SUCCESS_TYPES.US - - # """ Check the cache memory; check if the trial point - # is a duplicate (it has already been evaluated) """ - # unique_p_trials: int = 0 - # is_duplicate: bool = (self.check_cache and self.hashtable.size > 0 and self.hashtable.is_duplicate(xtry)) - # while is_duplicate and unique_p_trials < 5: - # self.log.log_msg(f'Cache hit. Trial# {unique_p_trials}: Looking for a non-duplicate in the vicinity of the duplicate point ...', MSG_TYPE.INFO) - # if self.display: - # print(f'Cache hit. Trial# {unique_p_trials}: Looking for a non-duplicate in the vicinity of the duplicate point ...') - # if xtry.var_type is None: - # if self.xmin.var_type is not None: - # xtry.var_type = self.xmin.var_type - # else: - # xtry.var_type = [VAR_TYPE.CONTINUOUS] * len(self.xmin.coordinates) - - # xtries: List[Point] = self.gauss_perturbation(p=xtry, npts=len(self.samples)*2) - # for tr in range(len(xtries)): - # is_duplicate = self.hashtable.is_duplicate(xtries[tr]) - # if is_duplicate: - # continue - # else: - # xtry = copy.deepcopy(xtries[tr]) - # break - # unique_p_trials += 1 - - # if (is_duplicate): - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg="Cache hit ... Failed to find a non-duplicate alternative.", msg_type=MSG_TYPE.INFO) - # if self.display: - # print("Cache hit ... Failed to find a non-duplicate alternative.") - # stop = True - # bb_eval = copy.deepcopy(self.bb_eval) - # psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - # return [stop, index, self.bb_handle.bb_eval, success, psize, xtry] - - - # """ Evaluation of the blackbox; get output responses """ - # if xtry.sets is not None and isinstance(xtry.sets,dict): - # p: List[Any] = [] - # for i in range(len(xtry.var_type)): - # if (xtry.var_type[i] == VAR_TYPE.DISCRETE or xtry.var_type[i] == VAR_TYPE.CATEGORICAL) and xtry.var_link[i] is not None: - # p.append(xtry.sets[xtry.var_link[i]][int(xtry.coordinates[i])]) - # else: - # p.append(xtry.coordinates[i]) - # self.bb_output, _ = self.bb_handle.eval(p) - # else: - # self.bb_output, _ = self.bb_handle.eval(xtry.coordinates) - - # """ - # Evaluate the poll point: - # - Set multipliers and penalty - # - Evaluate objective function - # - Evaluate constraint functions (can be an empty vector) - # - Aggregate constraints - # - Penalize the objective (extreme barrier) - # """ - # xtry.constraints_type = copy.deepcopy(self.constraints_RP.constraints_type) - # xtry.LAMBDA = copy.deepcopy(self.constraints_RP.LAMBDA) - # xtry.RHO = copy.deepcopy(self.constraints_RP.RHO) - # xtry.hmax = copy.deepcopy(self.constraints_RP.hmax) - # xtry.constraints_type = copy.deepcopy(self.prob_params.constraints_type) - # # xtry.__eval__(self.bb_output) - # # if not self.hashtable._isPareto: - # # self.hashtable.add_to_best_cache(xtry) - # # toc = time.perf_counter() - # # xtry.Eval_time = (toc - tic) - - # """ Update multipliers and penalty """ - # if self.constraints_RP.LAMBDA == None: - # self.constraints_RP.LAMBDA = self.xmin.LAMBDA - # if len(xtry.c_ineq) > len(self.constraints_RP.LAMBDA): - # self.constraints_RP.LAMBDA += [self.constraints_RP.LAMBDA[-1]] * abs(len(self.constraints_RP.LAMBDA)-len(xtry.c_ineq)) - # if len(xtry.c_ineq) < len(self.constraints_RP.LAMBDA): - # del self.constraints_RP.LAMBDA[len(xtry.c_ineq):] - # for i in range(len(xtry.c_ineq)): - # if self.constraints_RP.RHO == 0.: - # self.constraints_RP.RHO = 0.001 - # if self.constraints_RP.LAMBDA is None: - # self.constraints_RP.LAMBDA = xtry.LAMBDA - # self.constraints_RP.LAMBDA[i] = copy.deepcopy(max(self.dtype.zero, self.constraints_RP.LAMBDA[i] + (1/self.constraints_RP.RHO)*xtry.c_ineq[i])) - - # if xtry.status == DESIGN_STATUS.FEASIBLE: - # self.constraints_RP.RHO *= copy.deepcopy(0.5) - - # if self.log is not None and self.log.isVerbose: - # self.log.log_msg(msg=f"Completed evaluation of point # {index} in {xtry.Eval_time} seconds, ftry={xtry.f}, status={xtry.status.name} and htry={xtry.h}. \n", msg_type=MSG_TYPE.INFO) - - # """ Add to the cache memory """ - # if self.store_cache: - # self.hashtable.hash_id = xtry - - # # if self.save_results or self.display: - # self.bb_eval = self.bb_handle.bb_eval - # self.psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - # psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - - # if xtry < self.xmin: - # success = SUCCESS_TYPES.FS - - # if success == SUCCESS_TYPES.FS and self.opportunistic and self.iter > 1: - # stop = True - - # """ Check stopping criteria """ - # if self.bb_eval >= self.eval_budget: - # self.terminate = True - # stop = True - # return [stop, index, self.bb_handle.bb_eval, success, psize, xtry] - def postprocess_evaluated_candidates(self, x_cps: List[CandidatePoint]): + def postprocess_evaluated_candidates(self, x_cps: List[CandidatePoint] = None): for xtry in x_cps: - if self.log is not None and self.log.isVerbose: - self.log.log_msg(msg=f"Completed evaluation of point # {xtry.evalNo} in {xtry.Eval_time} seconds, ftry={xtry.f}, status={xtry.status.name} and htry={xtry.h}. \n", msg_type=MSG_TYPE.INFO) + if self.log is not None and self.log.is_verbose: + self.log.log_msg(msg=f"Completed evaluation of point # {xtry.eval_no} in {xtry.eval_time} seconds, ftry={xtry.f}, status={xtry.status.name} and htry={xtry.h}. \n", msg_type=MSG_TYPE.INFO) """ Add to the cache memory """ self.hashtable.add_to_cache(xtry) - if not self.hashtable._isPareto: + if not self.hashtable._is_pareto: self.hashtable.add_to_best_cache(xtry) if self.store_cache and xtry.signature not in self.hashtable.hash_id: self.hashtable.hash_id = xtry @@ -863,7 +685,6 @@ def postprocess_evaluated_candidates(self, x_cps: List[CandidatePoint]): # if self.save_results or self.display: self.bb_eval = self.bb_handle.bb_eval self.psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) - psize = copy.deepcopy(self.mesh.getDeltaFrameSize().coordinates) def master_updates(self, x: List[CandidatePoint], peval, save_all_best: bool = False, save_all:bool = False): if peval >= self.eval_budget: @@ -875,11 +696,9 @@ def master_updates(self, x: List[CandidatePoint], peval, save_all_best: bool = F """ Check success conditions """ is_infeas_dom: bool = (xtry.status == DESIGN_STATUS.INFEASIBLE and (xtry.h < self.xmin.h) ) is_feas_dom: bool = (xtry.status == DESIGN_STATUS.FEASIBLE and xtry.fobj < self.xmin.fobj) - is_infea_improving: bool = (self.xmin.status == DESIGN_STATUS.FEASIBLE and xtry.status == DESIGN_STATUS.INFEASIBLE and (xtry.fobj < self.xmin.fobj and xtry.h <= self.xmin.hmax)) - is_feas_improving: bool = (self.xmin.status == DESIGN_STATUS.INFEASIBLE and xtry.status == DESIGN_STATUS.FEASIBLE and xtry.fobj < self.xmin.fobj) success = SUCCESS_TYPES.US - if ((is_infeas_dom or is_feas_dom)): + if (is_infeas_dom or is_feas_dom): self.success = SUCCESS_TYPES.FS success = SUCCESS_TYPES.US # <- This redundant variable is important # for managing concurrent parallel execution @@ -907,6 +726,8 @@ def master_updates(self, x: List[CandidatePoint], peval, save_all_best: bool = F return x_post def update_local_region(self, region="expand"): + if self.vicinity_ratio is None: + self.vicinity_ratio = np.ones((len(self.prob_params.baseline),1)) if region =="expand": for i in range(len(self.vicinity_ratio)): if self.vicinity_ratio[i] * 2 < self.prob_params.ub[i]: @@ -923,6 +744,6 @@ class search_sampling: s_method: str = SAMPLING_METHOD.LH.name ns: int = 3 visualize: bool = False - criterion: str = None - weights: List[float] = None + criterion: Optional[str] = None + weights: Optional[List[float]] = None type: str = SEARCH_TYPE.SAMPLING.name \ No newline at end of file diff --git a/src/OMADS/Gmesh.py b/src/OMADS/Gmesh.py index 2d82b9e..0d78364 100644 --- a/src/OMADS/Gmesh.py +++ b/src/OMADS/Gmesh.py @@ -20,56 +20,54 @@ # https://github.com/Ahmed-Bayoumy/OMADS # # Copyright (C) 2022 Ahmed H. Bayoumy # # ------------------------------------------------------------------------------------# +from dataclasses import dataclass -import copy -from typing import List -from ._globals import * +import numpy as np +from ._globals import DType, GL_LIMITS from .Point import Point from .Mesh import Mesh from .Options import Options from .Parameters import Parameters +from typing import Any, Optional @dataclass class Gmesh(Mesh): """ GMesh: Granular mesh """ - _initFrameSizeExp: Point = None - _frameSizeMant: Point = None - _frameSizeExp: Point = None - _finestMeshSize: Point = None - _granularity: Point = None - _enforceSanityChecks: bool = None - _allGranular: bool = None - _anisotropyFactor: float = None - _anisotropicMesh: bool = None + _initFrameSizeExp: Optional[Point] = None + _frameSizeMant: Optional[Point] = None + _frameSizeExp: Optional[Point] = None + _finestMeshSize: Optional[Point] = None + _granularity: Optional[Point] = None + _enforceSanityChecks: Optional[bool] = None + _allGranular: Optional[bool] = None + _anisotropyFactor: Optional[float] = None + _anisotropicMesh: Optional[bool] = None _refineFreq: int = 1 - _refineCount: int = None - _r: Point = None - _r_min: Point = None - _r_max: Point = None - _Delta_0: Point = None - _Delta_0_mant: Point = None - _pos_mant_0: Point = None + _refineCount: Optional[int] = None + _r: Optional[Point] = None + _r_min: Optional[Point] = None + _r_max: Optional[Point] = None + _Delta_0: Optional[Point] = None + _Delta_0_mant: Optional[Point] = None + _pos_mant_0: Optional[Point] = None _HARD_MIN_MESH_INDEX: int = -300 - def __init__(self, pbParam: Parameters, runOptions: Options): + def __init__(self, pb_param: Parameters, run_options: Options): """ Constructor """ - super(Gmesh, self).__init__(pbParams=pbParam, limitMaxMeshIndex=-GL_LIMITS, limitMinMeshIndex=GL_LIMITS) - - # if (self._limit_mesh_index>0): - # raise IOError("Limit mesh index must be <=0 ") - + super(Gmesh, self).__init__(pb_params=pb_param, limit_max_mesh_index=-GL_LIMITS, limit_min_mesh_index=GL_LIMITS) + self._initFrameSizeExp = Point() self._frameSizeMant = Point() self._frameSizeExp = Point() self._finestMeshSize = Point() - self._granularity = pbParam.granularity + self._granularity = pb_param.granularity self._enforceSanityChecks = True self._allGranular = True - self._anisotropyFactor = runOptions.anisotropyFactor - self._anisotropicMesh = runOptions.anistropicMesh - self._refineFreq = runOptions.refineFreq + self._anisotropyFactor = run_options.anisotropyFactor + self._anisotropicMesh = run_options.anistropicMesh + self._refineFreq = run_options.refineFreq self._refineCount = 0 - self._dtype = DType(runOptions.precision) + self._dtype = DType(run_options.precision) self.init() @property @@ -80,64 +78,64 @@ def dtype(self): def dtype(self, other: DType): self.dtype = other - def initFrameSizeGranular(self, initialFrameSize: Point): - if not initialFrameSize.is_all_defined() or initialFrameSize.size != self._n: + def initFrameSizeGranular(self, initial_frame_size: Point): + if not initial_frame_size.is_all_defined() or initial_frame_size.size != self._n: raise IOError("GMesh: initFrameSizeGranular: inconsistent dimension of the frame size. \n" + - f"initial frame size defined: {initialFrameSize.is_all_defined()} \n" + - f"size: {initialFrameSize.size} \n" + + f"initial frame size defined: {initial_frame_size.is_all_defined()} \n" + + f"size: {initial_frame_size.size} \n" + f"n: {self._n}") self._frameSizeExp.reset(n=self._n) self._frameSizeMant.reset(n=self._n) - dMin: float = None + d_min: Optional[float] = None for i in range(self._n): if self._granularity[i] > 0: - dMin = self._granularity[i] + d_min = self._granularity[i] else: - dMin = 1 + d_min = 1 - div: float = initialFrameSize[i] / dMin + div: float = initial_frame_size[i] / d_min exp: int = self.roundFrameSizeExp(np.log10(abs(div))) self._frameSizeExp[i] = exp self._frameSizeMant[i] = self.roundFrameSizeMant(div*10**-exp) def roundFrameSizeExp(self, exp: float) -> int: - frameSizeExp: int = int(exp) - return frameSizeExp + frame_size_exp: int = int(exp) + return frame_size_exp def roundFrameSizeMant(self, mant: float): - frameSizeMant: int = 0 + frame_size_mant: int = 0 if mant < 1.5: - frameSizeMant = 1 + frame_size_mant = 1 elif mant >= 1.5 and mant < 3.5: - frameSizeMant = 2 + frame_size_mant = 2 else: - frameSizeMant = 5 + frame_size_mant = 5 - return frameSizeMant + return frame_size_mant - def getRho(self, i: int = None) -> auto: + def getRho(self, i: int = None) -> Any: if i is not None: - rho: auto + rho: Any diff: float = self._frameSizeExp[i] - self._initFrameSizeExp[i] - powDiff: float = 10.0 ** abs(diff) + pow_diff: float = 10.0 ** abs(diff) if self._granularity[i] > 0: - rho = self._frameSizeMant[i] * min(10.0**self._frameSizeExp[i], powDiff) + rho = self._frameSizeMant[i] * min(10.0**self._frameSizeExp[i], pow_diff) else: - rho = self._frameSizeMant[i] * powDiff + rho = self._frameSizeMant[i] * pow_diff else: rho: auto = [None] * self._n for i in range(self._n): diff: float = self._frameSizeExp[i] - self._initFrameSizeExp[i] - powDiff: float = 10.0 ** abs(diff) + pow_diff: float = 10.0 ** abs(diff) if self._granularity[i] > 0: - rho[i] = self._frameSizeMant[i] * min(10.0**self._frameSizeExp[i], powDiff) + rho[i] = self._frameSizeMant[i] * min(10.0**self._frameSizeExp[i], pow_diff) else: - rho[i] = self._frameSizeMant[i] * powDiff + rho[i] = self._frameSizeMant[i] * pow_diff return rho @@ -167,62 +165,62 @@ def getdeltaMeshSize(self, i: int = None) -> Point: return delta[i] def getDeltaFrameSize(self, i: int = None) -> Point: - dMinGran = 1.0 + d_min_gran = 1.0 Delta: Point = Point(self._n) Delta.coordinates = [0] * self._n if i is None: for i in range(self._n): if self._granularity[i] > 0: - dMinGran = self._granularity[i] - Delta[i] = dMinGran * self._frameSizeMant[i] * 10 ** self._frameSizeExp[i] + d_min_gran = self._granularity[i] + Delta[i] = d_min_gran * self._frameSizeMant[i] * 10 ** self._frameSizeExp[i] return Delta else: if self._granularity[i] > 0: - dMinGran = self._granularity[i] - Delta[i] = dMinGran * self._frameSizeMant[i] * 10 ** self._frameSizeExp[i] + d_min_gran = self._granularity[i] + Delta[i] = d_min_gran * self._frameSizeMant[i] * 10 ** self._frameSizeExp[i] return Delta[i] def getDeltaFrameSizeCoarser(self) -> Point: Delta: Point = Point(self._n) Delta.coordinates = [0] * self._n for i in range(self._n): - frameSizeMantOld = self._frameSizeMant[i] - frameSizeExpOld = self._frameSizeExp[i] - self._frameSizeMant[i], self._frameSizeExp[i] = self.getLargerMantExp(frameSizeMant=frameSizeMantOld, frameSizeExp=frameSizeExpOld, i=i) + frame_size_mant_old = self._frameSizeMant[i] + frame_size_exp_old = self._frameSizeExp[i] + self._frameSizeMant[i], self._frameSizeExp[i] = self.getLargerMantExp(frame_size_mant=frame_size_mant_old, i=i) Delta[i] = self.getDeltaFrameSize(i=i) - self._frameSizeMant[i] = frameSizeMantOld - self._frameSizeExp[i] = frameSizeExpOld + self._frameSizeMant[i] = frame_size_mant_old + self._frameSizeExp[i] = frame_size_exp_old return Delta - def getLargerMantExp(self, frameSizeMant: float, frameSizeExp: float, i: int): - if frameSizeMant == 1: + def getLargerMantExp(self, frame_size_mant: float, i: int): + if frame_size_mant == 1: self._frameSizeMant[i] = 2 - elif frameSizeMant == 2: + elif frame_size_mant == 2: self._frameSizeMant[i] = 5 else: self._frameSizeMant[i] = 1 self._frameSizeExp[i] += 1 return self._frameSizeMant[i], self._frameSizeExp[i] - def checkDeltasGranularity(self, i: int, deltaMeshSize: float, deltaFrameSize: float): + def checkDeltasGranularity(self, i: int, delta_mesh_size: float, delta_frame_size: float): if self._granularity[i] > 0.0: - hasError: bool = False + has_error: bool = False err: str = "Error: setDeltas: " - if not self.isMult(deltaMeshSize, self._granularity[i]): - hasError = True + if not self.isMult(delta_mesh_size, self._granularity[i]): + has_error = True err += f"deltaMeshSize at index {i}" err += f" is not a multiple of granularity {self._granularity[i]}" - elif not self.isMult(deltaFrameSize, self._granularity[i]): - hasError = True + elif not self.isMult(delta_frame_size, self._granularity[i]): + has_error = True err += f"deltaFrameSize at index {i}" err += f" is not a multiple of granularity {self._granularity[i]}" - if hasError: + if has_error: raise IOError(err) - def setDeltas(self, i: int, deltaMeshSize: float, deltaFrameSize: float): + def setDeltas(self, i: int = None, delta_mesh_size: float = None, delta_frame_size: float= None): # Input checks - self.checkDeltasGranularity(i=i, deltaMeshSize=deltaMeshSize, deltaFrameSize=deltaFrameSize) + self.checkDeltasGranularity(i=i, delta_mesh_size=delta_mesh_size, delta_frame_size=delta_frame_size) # Value to use for granularity (division so default = 1.0) gran: float = 1. if 0. < self._granularity[i]: @@ -233,9 +231,9 @@ def setDeltas(self, i: int, deltaMeshSize: float, deltaFrameSize: float): # Compute mantisse first # There are only 3 cases: 1, 2, 5, so compute all # 3 possibilities and then assign the values that work. - mant1: float = deltaFrameSize / (1.*gran) - mant2: float = deltaFrameSize / (2.*gran) - mant5: float = deltaFrameSize / (5. *gran) + mant1: float = delta_frame_size / (1.*gran) + mant2: float = delta_frame_size / (2.*gran) + mant5: float = delta_frame_size / (5. *gran) exp1: float = np.log10(mant1) exp2: float = np.log10(mant2) @@ -261,57 +259,50 @@ def setDeltas(self, i: int, deltaMeshSize: float, deltaFrameSize: float): # Sanity checks if self._enforceSanityChecks: - self.checkFrameSizeIntegrity(frameSizeExp=self._frameSizeExp[i], - frameSizeMant=self._frameSizeMant[i]) - self.checkSetDeltas(i=i, deltaMeshSize=deltaMeshSize, deltaFrameSize=deltaFrameSize) + self.checkFrameSizeIntegrity(frame_size_exp=self._frameSizeExp[i], + frame_size_mant=self._frameSizeMant[i]) + self.checkSetDeltas(i=i, delta_mesh_size=delta_mesh_size, delta_frame_size=delta_frame_size) self.checkDeltasGranularity(i, self.getdeltaMeshSize(i=i), self.getDeltaFrameSize(i=i)) - - - - - - - def checkFrameSizeIntegrity(self, frameSizeExp: float, frameSizeMant: float): + def checkFrameSizeIntegrity(self, frame_size_exp: float, frame_size_mant: float): # frameSizeExp must be an integer. # frameSizeMant must be 1, 2 or 5. - hasError: bool = False + has_error: bool = False err: str = "Error: Integrity check" - if not isinstance(frameSizeExp, int): - hasError = True - err += f" of frameSizeExp ({frameSizeExp}): Should be integer." - elif (frameSizeMant != 1.0 and frameSizeMant != 2.0 and frameSizeMant != 5.0): - hasError = True - err += f" of frameSizeMant ({frameSizeMant}): Should be integer." - - if hasError: + if not isinstance(frame_size_exp, int): + has_error = True + err += f" of frameSizeExp ({frame_size_exp}): Should be integer." + elif (not np.isclose(frame_size_mant, 1.0, rtol=1e-09, atol=1e-09) and not np.isclose(frame_size_mant, 2.0, rtol=1e-09, atol=1e-09) and not np.isclose(frame_size_mant, 5.0, rtol=1e-09, atol=1e-09)): + has_error = True + err += f" of frameSizeMant ({frame_size_mant}): Should be integer." + + if has_error: raise IOError(err) - def checkSetDeltas(self, i: int, deltaMeshSize: float, deltaFrameSize: float): - hasError: bool = False + def checkSetDeltas(self, i: int, delta_mesh_size: float, delta_frame_size: float): + has_error: bool = False err: str = "Warning: setDeltas did not give good value" # Something might be wrong with setDeltas(), so double check. - if self.getdeltaMeshSize(i=i) != deltaMeshSize: - hasError = True + if self.getdeltaMeshSize(i=i) != delta_mesh_size: + has_error = True err += f" for deltaMeshSize at index {i}" - err += f" Expected: {deltaMeshSize}" + err += f" Expected: {delta_mesh_size}" err += f" computed: {self.getdeltaMeshSize(i=i)}" - elif self.getDeltaFrameSize(i=i) != deltaFrameSize: - hasError = True + elif self.getDeltaFrameSize(i=i) != delta_frame_size: + has_error = True err += f" for deltaFrameSize at index {i}" - err += f" Expected: {deltaFrameSize}" + err += f" Expected: {delta_frame_size}" err += f" computed: {self.getDeltaFrameSize(i=i)}" - if (hasError): + if (has_error): raise IOError(err) - - - def scaleAndProjectOnMesh(self, dir: Point): + + def scaleAndProjectOnMesh(self, dir: Point = None): proj: Point = Point(self._n) - infiniteNorm: float = np.linalg.norm(dir.coordinates, np.inf) + infinite_norm: float = np.linalg.norm(dir.coordinates, np.inf) - if 0 == infiniteNorm: + if 0 == infinite_norm: err = "GMesh: scaleAndProjectOnMesh: Cannot handle an infinite norm of zero" raise IOError(err) @@ -319,111 +310,109 @@ def scaleAndProjectOnMesh(self, dir: Point): if self._frameSizeMant.is_all_defined() and self._frameSizeExp.is_all_defined(): for i in range(self._n): - delta: float = self.getdeltaMeshSize(i=i) - proj[i] = np.round(self.getRho(i=i)*dir[i]/infiniteNorm) * delta + delta: Any = self.getdeltaMeshSize(i=i) + proj[i] = np.round(self.getRho(i=i)*dir[i]/infinite_norm) * delta else: err = "GMesh: scaleAndProjectOnMesh cannot be performed." err += f" i = {i}" err += f" mantissa defined: {self._frameSizeMant.is_all_defined()}" err += f" exp defined: {self._frameSizeExp.is_all_defined()}" - err += f"delta mesh size defined: {delta}" + err += f"delta mesh size defined: {self.getdeltaMeshSize()}" raise IOError(err) return proj - def projectOnMesh(self, point: Point, frameCenter: Point): + def projectOnMesh(self, point: Point, frame_center: Point): proj: Point = point - delta: auto = self.getdeltaMeshSize() - maxNbTry: int = 10 - verifValueI: Point = Point(self._n) - verifValueI.coordinates = [0] * self._n + delta: Any = self.getdeltaMeshSize() + max_nb_try: int = 10 + verif_value_i: Point = Point(self._n) + verif_value_i.coordinates = [0] * self._n for i in range(point.size): - deltaI = delta[i] - frameCenterIsOnMesh: bool = self.isMult(frameCenter[i], deltaI) + delta_i = delta[i] + frame_center_is_on_mesh: bool = self.isMult(frame_center[i], delta_i) - diffProjFrameCenter: float = proj[i] - frameCenter[i] - verifValueI[i] = proj[i] if (frameCenterIsOnMesh) else diffProjFrameCenter + diff_proj_frame_center: float = proj[i] - frame_center[i] + verif_value_i[i] = proj[i] if (frame_center_is_on_mesh) else diff_proj_frame_center # // Force verifValueI to be a multiple of deltaI. # // nbTry = 0 means point is already on mesh. # // nbTry = 1 means the projection worked. # // nbTry > 1 means the process went hacky by forcing the value to work # // for verifyPointIsOnMesh. - nbTry = 0 - while (not self.isMult(verifValueI[i], deltaI) and nbTry <= maxNbTry): - newVerifValueI: float - if (0==nbTry): + nb_try = 0 + while (not self.isMult(verif_value_i[i], delta_i) and nb_try <= max_nb_try): + new_verif_value_i: float + if (0==nb_try): # Use closest projection - vHigh = verifValueI.nextMult(deltaI, i) + v_high = verif_value_i.next_mult(delta_i, i) p: Point = Point(self._n) - p.coordinates = [-c for c in verifValueI.coordinates] - vLow = - (p.nextMult(deltaI, i)) - diffHigh = vHigh - verifValueI[i] - diffLow = verifValueI[i] - vLow - verifValueI[i] = vLow if (diffLow < diffHigh) else (vHigh if (diffHigh < diffLow) else (vLow if (proj[i] < 0) else vHigh)) + p.coordinates = [-c for c in verif_value_i.coordinates] + v_low = - (p.next_mult(delta_i, i)) + diff_high = v_high - verif_value_i[i] + diff_low = verif_value_i[i] - v_low + verif_value_i[i] = v_low if (diff_low < diff_high) else (v_high if (diff_high < diff_low) else (v_low if (proj[i] < 0) else v_high)) else: p: Point = Point(self._n) - p.coordinates = [-c for c in verifValueI.coordinates] - verifValueI[i] = verifValueI.nextMult(deltaI, i) if (diffProjFrameCenter >= 0) else (-(p.nextMult(deltaI, i))) - proj[i] = verifValueI[i] if frameCenterIsOnMesh else verifValueI[i] + frameCenter[i] + p.coordinates = [-c for c in verif_value_i.coordinates] + verif_value_i[i] = verif_value_i.next_mult(delta_i, i) if (diff_proj_frame_center >= 0) else (-(p.next_mult(delta_i, i))) + proj[i] = verif_value_i[i] if frame_center_is_on_mesh else verif_value_i[i] + frame_center[i] # Recompute verifValue for more precision - newVerifValueI = proj[i] if frameCenterIsOnMesh else proj[i] - frameCenter[i] - nbTry += 1 + new_verif_value_i = proj[i] if frame_center_is_on_mesh else proj[i] - frame_center[i] + nb_try += 1 # Special cases - while (newVerifValueI != verifValueI[i] and nbTry <= maxNbTry): - if verifValueI[i] >= 0: - verifValueI[i] = max(verifValueI[i], newVerifValueI) - verifValueI[i] += self.dtype.zero - verifValueI[i] = verifValueI.nextMult(deltaI, i) + while (new_verif_value_i != verif_value_i[i] and nb_try <= max_nb_try): + if verif_value_i[i] >= 0: + verif_value_i[i] = max(verif_value_i[i], new_verif_value_i) + verif_value_i[i] += self.dtype.zero + verif_value_i[i] = verif_value_i.next_mult(delta_i, i) else: - verifValueI[i] = min(verifValueI[i], newVerifValueI) - verifValueI[i] -= self.dtype.zero + verif_value_i[i] = min(verif_value_i[i], new_verif_value_i) + verif_value_i[i] -= self.dtype.zero p: Point = Point(self._n) - p.coordinates = [-c for c in verifValueI.coordinates] - verifValueI[i] = -(p.nextMult(deltaI, i)) - proj[i] = verifValueI[i] if frameCenterIsOnMesh else verifValueI[i] + frameCenter[i] + p.coordinates = [-c for c in verif_value_i.coordinates] + verif_value_i[i] = -(p.next_mult(delta_i, i)) + proj[i] = verif_value_i[i] if frame_center_is_on_mesh else verif_value_i[i] + frame_center[i] # Recompute verifValue for more precision - newVerifValueI = proj[i] if frameCenterIsOnMesh else proj[i] - frameCenter[i] - nbTry += 1 + new_verif_value_i = proj[i] if frame_center_is_on_mesh else proj[i] - frame_center[i] + nb_try += 1 - verifValueI[i] = newVerifValueI + verif_value_i[i] = new_verif_value_i - if (nbTry >= maxNbTry and not self.isMult(verifValueI[i], deltaI)): + if (nb_try >= max_nb_try and not self.isMult(verif_value_i[i], delta_i)): # TODO: print warning proj[i] = point[i] return proj - def check_min_poll_size_criterion (self) -> bool: """ Check the minimal poll size criterion. """ if not self._Delta_min_is_defined: return False - S, D = self.get_Delta_object() + S, _ = self.get_Delta_object() return S - def check_min_mesh_size_criterion (self) -> bool: + def check_min_mesh_size_criterion(self) -> bool: """ Check the minimal mesh size criterion. """ if not self._delta_min.is_all_defined(): return False - S, D = self.get_delta_object() + S, _ = self.get_delta_object() return S - def get_rho (self, i: int): + def get_rho(self, i: int): """ Access to the ratio of poll size / mesh size parameter rho^k. :param rho The ratio poll/mesh size rho^k -- OUT. """ - rho: float = None + rho: Optional[float] = None if self._granularity[i] > 0: rho = self._frameSizeMant.coordinates[i] * min(10** self._frameSizeExp.coordinates[i], 10**abs(self._frameSizeExp.coordinates[i]-self._initFrameSizeExp.coordinates[i])) else: rho = self._frameSizeMant.coordinates[i] * 10** abs(self._frameSizeExp.coordinates[i]-self._initFrameSizeExp.coordinates[i]) return rho - - def get_delta (self, i: int): + def get_delta(self, i: int): """ Access to the mesh size parameter delta^k. :param delta: The mesh size parameter delta^k -- OUT. @@ -434,7 +423,7 @@ def get_delta (self, i: int): delta = self._granularity[i] * max(1.0, delta) return delta - def get_Delta (self, i: int): + def get_Delta(self, i: int): """ Access to the poll size parameter Delta^k. :param Delta: The poll size parameter Delta^k -- OUT. @@ -459,7 +448,7 @@ def init(self): self._finestMeshSize = self.getdeltaMeshSize() for i in range(self._n): - if 0.0 == self._granularity[i]: + if np.isclose(0.0, self._granularity[i], rtol=1e-09, atol=1e-09): self._allGranular = False break @@ -468,68 +457,67 @@ def init(self): if self._enforceSanityChecks: for i in range(self._n): - self.checkFrameSizeIntegrity(frameSizeExp=self._frameSizeExp[i], frameSizeMant=self._frameSizeMant[i]) - self.checkDeltasGranularity(i=i, deltaMeshSize=self.getdeltaMeshSize(i=i), deltaFrameSize=self.getDeltaFrameSize(i=i)) + self.checkFrameSizeIntegrity(frame_size_exp=self._frameSizeExp[i], frame_size_mant=self._frameSizeMant[i]) + self.checkDeltasGranularity(i=i, delta_mesh_size=self.getdeltaMeshSize(i=i), delta_frame_size=self.getDeltaFrameSize(i=i)) def isMult(self, v1, v2)->bool: return ((v1%v2) <= self.dtype.zero) - def enlargeDeltaFrameSize(self, direction: Point) -> bool: - oneFrameSizeChanged = False - minRho = np.inf + def enlargeDeltaFrameSize(self, direction: Point = None) -> bool: + one_frame_size_changed = False + min_rho = np.inf for i in range(self._n): if self._granularity[i] == 0: - minRho = min(minRho, self.getRho(i=i)) + min_rho = min(min_rho, self.getRho(i=i)) for i in range(self._n): - frameSizeIChanged = False - if (not self._anisotropicMesh or abs(direction[i])/self.getdeltaMeshSize(i=i)/self.getRho(i=i) > self._anisotropyFactor or (self._granularity[i] == 0 and self._frameSizeExp[i] < self._initFrameSizeExp[i] and self.getRho(i=i) > minRho*minRho)): - self.getLargerMantExp(frameSizeMant=self._frameSizeMant[i], frameSizeExp=self._frameSizeExp[i], i=i) - frameSizeIChanged = True - oneFrameSizeChanged = True + frame_size_i_changed = False + if (not self._anisotropicMesh or abs(direction[i])/self.getdeltaMeshSize(i=i)/self.getRho(i=i) > self._anisotropyFactor or (self._granularity[i] == 0 and self._frameSizeExp[i] < self._initFrameSizeExp[i] and self.getRho(i=i) > min_rho*min_rho)): + self.getLargerMantExp(frame_size_mant=self._frameSizeMant[i], i=i) + frame_size_i_changed = True + one_frame_size_changed = True # update the mesh index self._r[i] += 1 self._rMax[i] = max(self._r[i], self._rMax[i]) # Sanity checks - if self._enforceSanityChecks and frameSizeIChanged: + if self._enforceSanityChecks and frame_size_i_changed: self.checkFrameSizeIntegrity(self._frameSizeExp[i], self._frameSizeMant[i]) - self.checkDeltasGranularity(i=i, deltaMeshSize=self.getdeltaMeshSize(i=i), deltaFrameSize=self.getDeltaFrameSize(i=i)) + self.checkDeltasGranularity(i=i, delta_mesh_size=self.getdeltaMeshSize(i=i), delta_frame_size=self.getDeltaFrameSize(i=i)) # When we enlarge the frame size we may keep the mesh size unchanged. So we need to test. msize = self.getdeltaMeshSize() if self._finestMeshSize < msize: self._isFinest = False - return oneFrameSizeChanged - - def refineDeltaFrameSizeME(self, frameSizeMant: float, frameSizeExp:float, granularity: float): - if frameSizeMant == 1: - frameSizeMant = 5 - frameSizeExp -= 1 - elif frameSizeMant == 2: - frameSizeMant = 1 + return one_frame_size_changed + + def refineDeltaFrameSizeME(self, frame_size_mant: float, frame_size_exp:float, granularity: float): + if frame_size_mant == 1: + frame_size_mant = 5 + frame_size_exp -= 1 + elif frame_size_mant == 2: + frame_size_mant = 1 else: - frameSizeMant = 2 + frame_size_mant = 2 # When the mesh reaches granularity (exp = 1, mant = 1), make sure to remove the refinement - if granularity > 0 and frameSizeExp < 0 and frameSizeMant == 5: - frameSizeExp = 0 - frameSizeMant = 1 + if granularity > 0 and frame_size_exp < 0 and frame_size_mant == 5: + frame_size_exp = 0 + frame_size_mant = 1 - return frameSizeMant, frameSizeExp + return frame_size_mant, frame_size_exp - def getdeltaMeshSizeF(self, frameSizeExp:int, initFrameSizeExp:int, granularity: int)->float: - diff = frameSizeExp - initFrameSizeExp - exp = frameSizeExp - abs(diff) + def getdeltaMeshSizeF(self, frame_size_exp:int, init_frame_size_exp:int, granularity: int)->float: + diff = frame_size_exp - init_frame_size_exp + exp = frame_size_exp - abs(diff) delta = 10.0**exp if 0.0 < granularity: delta = granularity * max(1.0, delta) return delta - - def refineDeltaFrameSize(self) -> bool: + def refineDeltaFrameSize(self): # // Compute the new values frameSizeMant and frameSizeExp first. # // We will do some verifications before setting them. self._refineCount += 1 @@ -539,31 +527,31 @@ def refineDeltaFrameSize(self) -> bool: for i in range(self._n): # // Compute the new values frameSizeMant and frameSizeExp first. # // We will do some verifications before setting them. - frameSizeMant = self._frameSizeMant[i] - frameSizeExp = self._frameSizeExp[i] - frameSizeMant, frameSizeExp= self.refineDeltaFrameSizeME(frameSizeMant=frameSizeMant, frameSizeExp=frameSizeExp, granularity=self._granularity[i]) + frame_size_mant = self._frameSizeMant[i] + frame_size_exp = self._frameSizeExp[i] + frame_size_mant, frame_size_exp= self.refineDeltaFrameSizeME(frame_size_mant=frame_size_mant, frame_size_exp=frame_size_exp, granularity=self._granularity[i]) # Verify delta mesh size does not go too small if we use the new values. - olddeltaMeshSize = self.getdeltaMeshSizeF(frameSizeExp=self._frameSizeExp[i], initFrameSizeExp=self._initFrameSizeExp[i], granularity=self._granularity[i]) - if self._minMeshSize[i] <= olddeltaMeshSize: + old_delta_mesh_size = self.getdeltaMeshSizeF(frame_size_exp=self._frameSizeExp[i], init_frame_size_exp=self._initFrameSizeExp[i], granularity=self._granularity[i]) + if self._minMeshSize[i] <= old_delta_mesh_size: # update mesh index if self._granularity[i] == 0: self._r[i] -= 1 else: # Update mesh index if not already at the min limit. When refining the frame, if mantissa and exponent stay the same, the min limit is reached (do not decrease). - if (not (self._frameSizeMant[i] == frameSizeMant and self._frameSizeExp[i] == frameSizeExp)): + if (not (self._frameSizeMant[i] == frame_size_mant and self._frameSizeExp[i] == frame_size_exp)): self._r[i] -= 1 # Update the minimal mesh index reached so far self._rMin[i] = min(self._r[i], self._rMin[i]) # We can go lower - self._frameSizeMant[i] = frameSizeMant - self._frameSizeExp[i] = frameSizeExp + self._frameSizeMant[i] = frame_size_mant + self._frameSizeExp[i] = frame_size_exp # Sanity checks if self._enforceSanityChecks: - self.checkFrameSizeIntegrity(frameSizeExp=self._frameSizeExp[i], - frameSizeMant=self._frameSizeMant[i]) - self.checkDeltasGranularity(i=i, deltaMeshSize=self.getdeltaMeshSize(i=i), deltaFrameSize=self.getDeltaFrameSize(i=i)) + self.checkFrameSizeIntegrity(frame_size_exp=self._frameSizeExp[i], + frame_size_mant=self._frameSizeMant[i]) + self.checkDeltasGranularity(i=i, delta_mesh_size=self.getdeltaMeshSize(i=i), delta_frame_size=self.getDeltaFrameSize(i=i)) msize = self.getdeltaMeshSize() if msize <= self._finestMeshSize: self._isFinest = True @@ -575,302 +563,6 @@ def update(self): return -# ############################################### -# ############################################### -# ############################################### - - # def init_poll_size_granular (self, cont_init_poll_size: Point ): - # """ - # :param: cont_init_poll_size: continuous initial poll size -- IN. - # """ - - # if not all(cont_init_poll_size.defined) or cont_init_poll_size.n_dimensions != self._n: - # raise IOError("Inconsistent dimension of the poll size!") - - # self._frameSizeExp.reset(n=self._n) - # self._frameSizeMant.reset(n=self._n) - # self._pos_mant_0.reset(n=self._n) - - # d_min: float - - # for i in range(self._n): - # if self._granularity.defined[i] and self._granularity.coordinates[i] > 0: - # d_min = self._granularity[i] - # else: - # d_min=1.0 - - # exp: int = int(np.log10(abs(cont_init_poll_size.coordinates[i]/d_min))) - # if exp < 0: - # exp = 0 - - # self._frameSizeExp.coordinates[i]=exp - # cont_mant: float = cont_init_poll_size.coordinates[i] / d_min * 10.0**(-exp) - - # if cont_mant < 1.5: - # self._frameSizeMant.coordinates[i] = 1 - # self._pos_mant_0[i] = 0 - # elif (cont_mant >= 1.5 and cont_mant < 3.5): - # self._frameSizeMant.coordinates[i] = 2 - # self._pos_mant_0.coordinates[i] = 1 - # else: - # self._frameSizeMant.coordinates[i] = 5 - # self._pos_mant_0.coordinates[i] = 2 - - - - # def get_delta_object(self): - # """ """ - # stop = True - # delta: Point = Point(self._n) - # for i in range(self._n): - # delta.coordinates[i] = self.get_delta(i=i) - # if stop and self._delta_min_is_defined and not self._fixed_variables.defined[i] and self._delta_min.defined[i] and delta.coordinates[i] >= self._delta_min[i]: - # stop = False - # return stop, delta - - # def get_delta_max(self)->Point: - # return self._delta_0 - - # def get_Delta_object(self)->Point: - # """ """ - # stop = True - # Delta: Point = Point(self._n) - # for i in range(self._n): - # Delta.coordinates[i] = self.get_Delta(i=i) - # if stop and self._granularity.coordinates[i] == 0 and not self._fixed_variables.defined[i] and (self._Delta_min_is_complete or Delta.coordinates[i] >= self._Delta_min[i]): - # stop = False - - # if stop and self._granularity.coordinates[i] > 0 and not self._fixed_variables.defined[i] and (not self._Delta_min_is_complete or Delta.coordinates[i] > self._Delta_min[i]): - # stop = False - - - # return stop, Delta - - # def is_finer_than_initial(self): - # """ """ - # for i in range(self._n): - # if not self._fixed_variables.defined[i]: - # # For continuous variables - # if self._granularity.coordinates[i]==0 and (self._frameSizeExp.coordinates[i] > self._initFrameSizeExp.coordinates[i] or ( self._frameSizeExp.coordinates[i] == self._initFrameSizeExp.coordinates[i] and self._frameSizeMant.coordinates[i] >= self._Delta_0_mant.coordinates[i] )): - # return False - # # For granular variables (case 1) - # if self._granularity.coordinates[i] > 0 and (self._frameSizeExp.coordinates[i] > self._initFrameSizeExp.coordinates[i] or ( self._frameSizeExp.coordinates[i] == self._initFrameSizeExp.coordinates[i] and self._frameSizeMant.coordinates[i] > self._Delta_0_mant.coordinates[i] )): - # return False - # # For continuous variables (case 2) - # if self._granularity.coordinates[i]>0 and (self._frameSizeExp.coordinates[i] == self._initFrameSizeExp.coordinates[i] and self._frameSizeMant.coordinates[i] == self._Delta_0_mant.coordinates[i] and (self._frameSizeExp.coordinates[i] != 0 or self._frameSizeMant.coordinates[i] != 1) ): - # return False - - # return True - - # def update(self, success: SUCCESS_TYPES, d: List[float]): - # if d and self._n != len(d): - # raise IOError("delta_0 and d have different sizes") - - # if success == SUCCESS_TYPES.FS: - # for i in range(self._n): - # if (self._granularity.coordinates[i] == 0 and not self._fixed_variables.defined[i]): - # if i > 0: - # min_rho = min(min_rho, self.get_rho(i)) - # else: - # min_rho = self.get_rho(i) - - # for i in range(self._n): - # if (not d or not self._anisotropic_mesh or abs(d[i])/self.get_delta(i)/self.get_rho(i) > self._anisotropic_factor or ( self._granularity.coordinates[i] == 0 and self._frameSizeExp.coordinates[i] < self._initFrameSizeExp.coordinates[i] and self.get_rho(i) > min_rho*min_rho )): - # # Update the mesh index - # self._r.coordinates[i] += 1 - # self._r_max.coordinates[i] = max(self._r.coordinates[i], self._r_max.coordinates[i]) - # # update the mantissa and exponent - # if ( self._frameSizeMant.coordinates[i] == 1 ): - # self._frameSizeMant.coordinates[i]= 2 - # elif ( self._frameSizeMant.coordinates[i] == 2 ): - # self._frameSizeMant.coordinates[i]=5 - # else: - # self._frameSizeMant.coordinates[i]=1 - # self._frameSizeExp.coordinates[i] += 1 - # elif success == SUCCESS_TYPES.US: - # for i in range(self._n): - # if (not self._fixed_variables.defined[i]): - # # update the mesh index - # self._r.coordinates[i] -= 1 - # # update the mesh mantissa and exponent - # if (self._frameSizeMant.coordinates[i]==1): - # self._frameSizeMant.coordinates[i] = 5 - # self._frameSizeExp.coordinates[i] -= 1 - # elif self._frameSizeMant.coordinates[i] == 2: - # self._frameSizeMant.coordinates[i] = 1 - # else: - # self._frameSizeMant.coordinates[i] = 2 - - # if ( self._granularity.coordinates[i] > 0 and self._frameSizeExp.coordinates[i]==-1 and self._frameSizeMant.coordinates[i]==5 ): - # self._r.coordinates[i] += 1 - # self._frameSizeExp.coordinates[i]=0 - # self._frameSizeMant.coordinates[i]=1 - # self._r_min.coordinates[i] = min(self._r.coordinates[i], self._r_min.coordinates[i]) - - # # for i in range(self._n): - # # # Test for producing anisotropic mesh + correction to prevent mesh collapsing for some variables ( ifnot ) - # # if (not d or not self._anisotropic_mesh or d[i]/self.get_delta(i)): - - - # def reset(self): - # """ """ - # self.__init__() - - # def is_finest(self): - # """ """ - # for i in range(self._n): - # if not self._fixed_variables.defined[i] and self._r.coordinates[i] > self._r_min.coordinates[i]: - # return False - # return True - - - - # def scale_and_project(self, i: int, l: float, round_up: bool): - # """ """ - # delta: float = self.get_delta(i=i) - # if i<= self._n and self._frameSizeMant.is_all_defined() and self._frameSizeExp.is_all_defined() and delta is not None: - # d: float = self.get_rho(i=i) * l - # # round to double - # return np.round(d)*delta - # else: - # raise IOError("scale_and_project(): mesh scaling and projection cannot be performed!") - - - - - # def check_min_mesh_sizes(self, stop: bool=None, stop_reason: STOP_TYPE = None): - # """_summary_ - # """ - # if stop: - # return - - # stop = False - # # Coarse mesh stopping criterion - # for i in range(self._n): - # if self._r.coordinates[i] > -GL_LIMITS: - # stop = True - # break - # if stop: - # stop_reason = STOP_TYPE.GL_LIMITS_REACHED - # return - - # stop = True - - # # // Fine mesh stopping criterion. Do not apply when all variables have granularity. - # # // To trigger this stopping criterion: - # # // - All mesh indices must be < _limit_mesh_index for all continuous variables (granularity==0), and - # # // - mesh size == granularity for all granular variables. - # if self._all_granular: - # stop = False - - # else: - # for i in range(self._n): - # # Skip fixed variables - # if self._fixed_variables.defined[i]: - # continue - # # Do not stop if the mesh size of a variable is strictly larger than its granularity - # if self._granularity.coordinates[i] > 0 and self.get_delta(i=i) > self._granularity.coordinates[i]: - # stop = False - # break - # # Do not stop if the mesh of a variable is above the limit mesh index - # if self._granularity.coordinates[i] == 0 and self._r.coordinates[i] >= self._granularity.coordinates[i]: - # stop = False - # break - - # if stop: - # stop_reason = STOP_TYPE.GL_LIMITS_REACHED - # return - - # # 2. delta^k (mesh size) tests: - # if self.check_min_poll_size_criterion(): - # stop = True - # stop_reason = STOP_TYPE.DELTA_P_MIN_REACHED - # return - - # # 3. delta^k (mesh size) tests: - # if self.check_min_mesh_size_criterion(): - # stop = True - # stop_reason = STOP_TYPE.DELTA_M_MIN_REACHED - # return - - - - - # def get_mesh_indices(self): - # """_summary_ - # """ - # return self._r - - - # def get_min_mesh_indices(self): - # """_summary_ - # """ - # return self._r_min - - # def get_max_mesh_indices(self): - # """_summary_ - # """ - # return self._r_max - - # def set_mesh_indices(self, r: Point): - # """_summary_ - # """ - # if r.size != self._n: - # raise IOError("set_mesh_indices(): dimension of provided mesh indices must be consistent with their previous dimension") - - # if r.coordinates[0] < HARD_MIN_MESH_INDEX: - # raise IOError("set_mesh_indices(): mesh index is too small") - - # # Set the mesh indices - # self._r = copy.deepcopy(r) - # for i in range(self._n): - # if (r.coordinates[i]>self._r_max.coordinates[i]): - # self._r_max.coordinates[i] = r.coordinates[i] - # if (r.coordinates[i] < self._r_min.coordinates[i]): - # self._r_min.coordinates[i] = r.coordinates[i] - - # # Set the mesh mantissas and exponents according to the mesh indices - # for i in range(self._n): - # shift: int = int(self._r.coordinates[i] + self._pos_mant_0.coordinates[i]) - # pos: int = self.isMult((shift + 300), 3) - - # self._frameSizeExp.coordinates[i] = np.floor((shift+300.0)/3.0) - 100.0 + self._initFrameSizeExp.coordinates[i] - - # if pos == 0: - # self._frameSizeMant.coordinates[i] = 1 - # elif pos == 1: - # self._frameSizeMant.coordinates[i] = 2 - # elif pos == 2: - # self._frameSizeMant.coordinates[i] = 5 - # else: - # raise IOError("set_mesh_indices(): something is wrong with conversion from index to mantissa and exponent") - - # def set_limit_mesh_index(self, l: int): - # """_summary_ - # """ - # if l > 0: - # raise IOError("set_limit_mesh_index(): the limit mesh index must be negative or null.") - - # if l > HARD_MIN_MESH_INDEX: - # raise IOError("set_limit_mesh_index(): the limit mesh index is too small.") - - # self._limit_mesh_index = l - - - - # def get_mesh_ratio_if_success(self): - # """_summary_ - # """ - # ratio: Point = Point(self._n) - # for i in range(self._n): - # power_of_tau: float = self._update_basis**(0 if self._r.coordinates[i] >= 0 else 2*self._r.coordinates[i]) - - # power_of_tau_if_success: float = self._update_basis**(0 if self._r.coordinates[i]+self._coarsening_step >= 0 else 2*(self._r.coordinates[i]+self._coarsening_step)) - - # ratio.coordinates[i] = power_of_tau_if_success/power_of_tau - - # return ratio - diff --git a/src/OMADS/MADS.py b/src/OMADS/MADS.py index a2743e5..afc4fc0 100644 --- a/src/OMADS/MADS.py +++ b/src/OMADS/MADS.py @@ -30,10 +30,12 @@ import sys import OMADS.POLL as PS import OMADS.SEARCH as SS -from typing import List, Dict, Any +from .Exploration import efficient_exploration +from .Directions import Dirs2n +from typing import List, Dict, Any, Optional import numpy as np if importlib.util.find_spec('BMDFO'): - from BMDFO import toy + from BMDFO import toy # type: ignore import time from .Point import Point from .CandidatePoint import CandidatePoint @@ -50,27 +52,27 @@ @dataclass class MADS: - search: SS.efficient_exploration = None - search_VN: SS.VNS = None - poll: PS.Dirs2n = None - param: Parameters = None - evaluator: Evaluator = None - post: PostMADS = None - out: Output = None - outP: Output = None - options: Options = None - data: dict = None - xmin: CandidatePoint = None + search: Optional[efficient_exploration] = None + search_vns: SS.VNS = None + poll: Optional[Dirs2n] = None + param: Optional[Parameters] = None + evaluator: Optional[Evaluator] = None + post: Optional[PostMADS] = None + out: Optional[Output] = None + out_p: Optional[Output] = None + options: Optional[Options] = None + data: Optional[dict] = None + xmin: Optional[CandidatePoint] = None iteration: int = 0 peval: int = 0 HT: Any = None - log: logger = None + log: Optional[logger] = None B: Any = None - LAMBDA_k: float = 0. - RHO_k: float = 0. + lambda_multipliers_k: float = 0. + rho_k: float = 0. tic: Any = None toc: Any = None - active_barrier: Barrier = None + active_barrier: Optional[Barrier] = None def __init__(self, data: dict): """ Initialize the log file """ @@ -78,16 +80,16 @@ def __init__(self, data: dict): if not os.path.exists(data["param"]["post_dir"]): try: os.mkdir(data["param"]["post_dir"]) - except: + except Warning: os.makedirs(data["param"]["post_dir"], exist_ok=True) self.log.initialize(data["param"]["post_dir"] + "/OMADS.log") - self.log.log_msg(msg="Preprocess the MADS algorithim...", msg_type=PS.MSG_TYPE.INFO) - self.log.log_msg(msg="Preprocess the search step...", msg_type=PS.MSG_TYPE.INFO) + self.log.log_msg(msg="Preprocess the MADS algorithim...", msg_type=MSG_TYPE.INFO) + self.log.log_msg(msg="Preprocess the search step...", msg_type=MSG_TYPE.INFO) _, _, self.search, _, _, _, _, _, _ = SS.PreExploration(data).initialize_from_dict(log=self.log) - self.log.log_msg(msg="Preprocess the POLL step...", msg_type=PS.MSG_TYPE.INFO) - self.iteration, self.xmin, self.poll, self.options, self.param, self.post, self.out, self.B, self.outP = PS.PrePoll(data).initialize_from_dict(log=self.log, xs=self.search.xmin) - self.out.stepName = "Poll" + self.log.log_msg(msg="Preprocess the POLL step...", msg_type=MSG_TYPE.INFO) + self.iteration, self.xmin, self.poll, self.options, self.param, self.post, self.out, self.B, self.out_p = PS.PrePoll(data).initialize_from_dict(log=self.log, xs=self.search.xmin) + self.out.step_name = "Poll" self.post.step_name = [f'Search: {self.search.type}'] self.HT = copy.deepcopy(self.poll.hashtable) @@ -99,8 +101,8 @@ def search_step(self, xmin: SS.CandidatePoint=None): self.search.log = self.log self.search.xmin = xmin self.search.mesh.update() - self.search.LAMBDA = self.LAMBDA_k - self.search.RHO = self.RHO_k + self.search.LAMBDA = self.lambda_multipliers_k + self.search.RHO = self.rho_k B = self.active_barrier if self.HT is not None: self.search.hashtable = self.HT @@ -111,54 +113,38 @@ def search_step(self, xmin: SS.CandidatePoint=None): B.select_poll_center() B.update_and_reset_success() elif isinstance(B, BarrierMO) and self.iteration == 1: - B.init(evalPointList=[xmin]) + B.init(eval_point_list=[xmin]) - - # search.hmax = B._h_max - - if isinstance(B, Barrier): + + if isinstance(B, Barrier) or isinstance(B, BarrierMO): self.search.hmax = B._h_max - # TODO: Check whether the commented code below is needed - # if xmin.status == DESIGN_STATUS.FEASIBLE: - # B.insert_feasible(search.xmin) - # elif xmin.status == DESIGN_STATUS.INFEASIBLE: - # B.insert_infeasible(search.xmin) - # else: - # B.insert(search.xmin) - elif isinstance(B, BarrierMO): - self.search.hmax = B._hMax + # COMPLETED: Check whether the commented code below is needed """ Create the set of poll directions """ - if self.search.type == SS.SEARCH_TYPE.VNS.name and self.search_VN is not None: - self.search_VN.active_barrier = B - self.search._candidate_points_set = self.search_VN.run() - if self.search_VN.stop: + if self.search.type == SS.SEARCH_TYPE.VNS.name and self.search_vns is not None: + self.search_vns.active_barrier = B + self.search._candidate_points_set = self.search_vns.run() + if self.search_vns.stop: print("Reached maximum number of VNS iterations!") self.HT = self.search.hashtable - self.RHO_k = self.search.RHO + self.rho_k = self.search.RHO self.active_barrier = B - self.LAMBDA_k = self.search.LAMBDA + self.lambda_multipliers_k = self.search.LAMBDA return self.search.xmin self.search.map_samples_from_coords_to_points(samples=self.search._candidate_points_set) else: - vvp = vvs = [] - bestFeasible: CandidatePoint = B._currentIncumbentFeas if isinstance(B, BarrierMO) else B._best_feasible - bestInf: CandidatePoint = B._currentIncumbentInf if isinstance(B, BarrierMO) else B.get_best_infeasible() - if bestFeasible is not None and bestFeasible.evaluated: - self.search.xmin = bestFeasible - vvp, _ = self.search.generate_sample_points(int(((self.search.dim+1)/2)*((self.search.dim+2)/2)) if self.search.ns is None else self.search.ns) - if bestInf is not None and bestInf.evaluated: + best_feasible: CandidatePoint = B._currentIncumbentFeas if isinstance(B, BarrierMO) else B._best_feasible + best_inf: CandidatePoint = B._currentIncumbentInf if isinstance(B, BarrierMO) else B.get_best_infeasible() + if best_feasible is not None and best_feasible.evaluated: + self.search.xmin = best_feasible + self.search.generate_sample_points(int(((self.search.dim+1)/2)*((self.search.dim+2)/2)) if self.search.ns is None else self.search.ns) + if best_inf is not None and best_inf.evaluated: # if B._filter is not None and B.get_best_infeasible().evaluated: xmin_bup = self.search.xmin - Prim_samples = self.search._candidate_points_set - self.search.xmin = bestInf#B.get_best_infeasible() - vvs, _ = self.search.generate_sample_points(int(((self.search.dim+1)/2)*((self.search.dim+2)/2)) if self.search.ns is None else self.search.ns) - self.search._candidate_points_set += Prim_samples + prim_samples = self.search._candidate_points_set + self.search.xmin = best_inf#B.get_best_infeasible() + self.search.generate_sample_points(int(((self.search.dim+1)/2)*((self.search.dim+2)/2)) if self.search.ns is None else self.search.ns) + self.search._candidate_points_set += prim_samples self.search.xmin = xmin_bup - - if isinstance(vvs, list) and len(vvs) > 0: - vv = vvp + vvs - else: - vv = vvp """ Save current poll directions and incumbent solution @@ -173,36 +159,32 @@ def search_step(self, xmin: SS.CandidatePoint=None): self.search.bb_output = [] xt = [] """ Serial evaluation for points in the poll set """ - if self.search_VN is not None: - self.search.lb = self.search_VN.params.lb - self.search.ub = self.search_VN.params.ub + if self.search_vns is not None: + self.search.lb = self.search_vns.params.lb + self.search.ub = self.search_vns.params.ub self.search.bb_handle.xmin = xmin - self.search.constraints_RP.LAMBDA = xmin.LAMBDA - self.search.constraints_RP.RHO = xmin.RHO + self.search.constraints_RP.LAMBDA = xmin.lambda_multipliers + self.search.constraints_RP.RHO = xmin.rho self.search.constraints_RP.constraints_type = xmin.constraints_type - self.search.constraints_RP.hmax = xmin.hmax + self.search.constraints_RP.hmax = xmin.h_max if not self.options.parallel_mode: - xt, self.post, self.peval = self.search.bb_handle.run_callable_serial_local(iter=self.iteration, peval=self.peval, eval_set=self.search._candidate_points_set, options=self.options, post=self.post, psize=self.search.mesh.getDeltaFrameSize().coordinates, stepName=f'Search: {self.search.type}', mesh=self.search.mesh, constraintsRelaxation=self.search.constraints_RP.__dict__, budget=self.options.budget) + xt, self.post, self.peval = self.search.bb_handle.run_callable_serial_local(iter=self.iteration, peval=self.peval, eval_set=self.search._candidate_points_set, options=self.options, post=self.post, psize=self.search.mesh.getDeltaFrameSize().coordinates, step_name=f'Search: {self.search.type}', mesh=self.search.mesh, constraints_relaxation=self.search.constraints_RP.__dict__, budget=self.options.budget) else: self.search._point_index = -1 """ Parallel evaluation for points in the samples set """ - self.search.bb_eval, xt, self.post, self.peval = self.search.bb_handle.run_callable_parallel_local(iter=self.iteration, peval=self.peval, njobs=self.options.np, eval_set=self.search._candidate_points_set, options=self.options, post=self.post, mesh=self.search.mesh, stepName=f'Search: {self.search.type}', psize=self.search.mesh.getDeltaFrameSize().coordinates, constraintsRelaxation=self.search.constraints_RP.__dict__, budget=self.options.budget) + self.search.bb_eval, xt, self.post, self.peval = self.search.bb_handle.run_callable_parallel_local(iter=self.iteration, peval=self.peval, eval_set=self.search._candidate_points_set, options=self.options, post=self.post, mesh=self.search.mesh, step_name=f'Search: {self.search.type}', psize=self.search.mesh.getDeltaFrameSize().coordinates, constraints_relaxation=self.search.constraints_RP.__dict__, budget=self.options.budget) - if self.search.bb_handle.constraintsRelaxation: - temp:ConstraintsRelaxationParameters = ConstraintsRelaxationParameters(**self.search.bb_handle.constraintsRelaxation) + if self.search.bb_handle.constraints_relaxation: + temp:ConstraintsRelaxationParameters = ConstraintsRelaxationParameters(**self.search.bb_handle.constraints_relaxation) for i in range(len(temp.LAMBDA)): self.search.constraints_RP.LAMBDA[i] = temp.LAMBDA[i] self.search.constraints_RP.RHO = temp.RHO self.search.constraints_RP.constraints_type = temp.constraints_type self.search.constraints_RP.hmax = temp.hmax - # if options.store_cache: - # for xi in xt: - # search.hashtable.hash_id = xi - # if not search.hashtable._isPareto: - # search.hashtable.add_to_best_cache(xi) - self.LAMBDA_k = self.search.bb_handle.constraintsRelaxation["LAMBDA"] - self.RHO_k = self.search.bb_handle.constraintsRelaxation["RHO"] + + self.lambda_multipliers_k = self.search.bb_handle.constraints_relaxation["LAMBDA"] + self.rho_k = self.search.bb_handle.constraints_relaxation["RHO"] self.search.postprocess_evaluated_candidates(xt) @@ -226,14 +208,12 @@ def search_step(self, xmin: SS.CandidatePoint=None): """ Updates """ if self.search.success == SUCCESS_TYPES.FS: - dir: Point = Point(self.search.mesh._n) - dir.coordinates = self.search.xmin.direction.coordinates - # search.mesh.psize = np.multiply(search.mesh.get, 2, dtype=search.dtype.dtype) - self.search.mesh.enlargeDeltaFrameSize(direction=dir) + direction: Point = Point(self.search.mesh._n) + direction.coordinates = self.search.xmin.direction.coordinates + self.search.mesh.enlargeDeltaFrameSize(direction=direction) if self.search.sampling_t != SAMPLING_METHOD.ACTIVE.name: self.search.update_local_region(region="expand") elif self.search.success == SUCCESS_TYPES.US: - # search.mesh.psize = np.divide(search.mesh.psize, 2, dtype=search.dtype.dtype) self.search.mesh.refineDeltaFrameSize() if self.search.sampling_t != SAMPLING_METHOD.ACTIVE.name: self.search.update_local_region(region="contract") @@ -241,33 +221,33 @@ def search_step(self, xmin: SS.CandidatePoint=None): xpost: List[CandidatePoint] = [] for i in range(len(xt)): xpost.append(xt[i]) - updated, updatedF, updatedInf = B.updateWithPoints(evalPointList=xpost, evalType=None, keepAllPoints=False, updateInfeasibleIncumbentAndHmax=True) + updated, updated_f, updated_inf = B.updateWithPoints(eval_point_list=xpost, keep_all_points=False) if not updated: - newMesh = None + new_mesh = None if B._currentIncumbentInf: B._currentIncumbentInf.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() if self.search.sampling_t != SAMPLING_METHOD.ACTIVE.name: self.search.update_local_region(region="contract") if B._currentIncumbentFeas: B._currentIncumbentFeas.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() if self.search.sampling_t != SAMPLING_METHOD.ACTIVE.name: self.search.update_local_region(region="contract") if self.iteration == 1: self.search.vicinity_ratio = np.ones((len(self.search.xmin.coordinates),1)) - if newMesh: - self.search.mesh = newMesh + if new_mesh: + self.search.mesh = new_mesh else: self.search.mesh.refineDeltaFrameSize() if self.search.sampling_t != SAMPLING_METHOD.ACTIVE.name: self.search.update_local_region(region="contract") else: - self.search.mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if updatedF else copy.deepcopy(B._currentIncumbentInf.mesh) if updatedInf else self.search.mesh - self.search.xmin = copy.deepcopy(B._currentIncumbentFeas) if updatedF else copy.deepcopy(B._currentIncumbentInf) if updatedInf else self.search.xmin + self.search.mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if updated_f else copy.deepcopy(B._currentIncumbentInf.mesh) if updated_inf else self.search.mesh + self.search.xmin = copy.deepcopy(B._currentIncumbentFeas) if updated_f else copy.deepcopy(B._currentIncumbentInf) if updated_inf else self.search.xmin if self.search.sampling_t != SAMPLING_METHOD.ACTIVE.name: self.search.update_local_region(region="expand") @@ -275,7 +255,7 @@ def search_step(self, xmin: SS.CandidatePoint=None): self.post.poll_dirs.append(xpost[i]) self.search.hashtable.best_hash_ID = [] self.search.hashtable.add_to_best_cache(B.getAllPoints()) - self.post.xmin = B._currentIncumbentFeas if updatedF else B._currentIncumbentInf if updatedInf else self.search.xmin + self.post.xmin = B._currentIncumbentFeas if updated_f else B._currentIncumbentInf if updated_inf else self.search.xmin self.search.mesh.update() @@ -290,21 +270,11 @@ def search_step(self, xmin: SS.CandidatePoint=None): self.log.log_msg(f" Run completed in {toc - tic:.4f} seconds", MSG_TYPE.INFO) self.log.log_msg(msg=f" Success status: {self.search.success}", msg_type=MSG_TYPE.INFO) self.log.log_msg(msg=self.post.__str__(), msg_type=MSG_TYPE.INFO) - # log.log_msg(f" Random numbers generator's seed {options.seed}", MSG_TYPE.INFO) - # log.log_msg(" xmin = " + str(search.xmin), MSG_TYPE.INFO) - # log.log_msg(" hmin = " + str(search.xmin.h), MSG_TYPE.INFO) - # log.log_msg(" fmin = " + str(search.xmin.f), MSG_TYPE.INFO) - # log.log_msg(" #bb_eval = " + str(search.bb_handle.bb_eval), MSG_TYPE.INFO) - # log.log_msg(" nb_success = " + str(search.nb_success), MSG_TYPE.INFO) - - # Failure_check = iteration > 0 and search.Failure_stop is not None and search.Failure_stop and not search.success - # if (Failure_check) or (abs(search.mesh.msize) < options.tol or search.bb_eval >= options.budget or search.terminate): - # break - # iteration += 1 - self.RHO_k = self.search.RHO + + self.rho_k = self.search.RHO self.HT = self.search.hashtable self.active_barrier = B - self.LAMBDA_k = self.search.LAMBDA + self.lambda_multipliers_k = self.search.LAMBDA return self.search.xmin def poll_step(self, xmin: PS.CandidatePoint=None): @@ -328,22 +298,21 @@ def poll_step(self, xmin: PS.CandidatePoint=None): B.update_and_reset_success() elif isinstance(B, BarrierMO) and self.iteration == 1: - B.init(evalPointList=[xmin]) + B.init(eval_point_list=[xmin]) if isinstance(B, Barrier): - self.poll.hmax = xmin.hmax + self.poll.hmax = xmin.h_max self.poll.create_poll_set(hhm=hhm, ub=self.param.ub, lb=self.param.lb, it=self.iteration, var_type=xmin.var_type, var_sets=xmin.sets, var_link = xmin.var_link, c_types=self.param.constraints_type, is_prim=True) if B._sec_poll_center is not None and B._sec_poll_center.evaluated: del self.poll.poll_set - # poll.poll_dirs = [] self.poll.x_sc = B._sec_poll_center self.poll.create_poll_set(hhm=hhm, ub=self.param.ub, lb=self.param.lb, it=self.iteration, var_type=B._sec_poll_center.var_type, var_sets=B._sec_poll_center.sets, var_link = B._sec_poll_center.var_link, c_types=self.param.constraints_type, is_prim=False) elif isinstance(B, BarrierMO): - self.poll.hmax = B._hMax + self.poll.hmax = B._h_max del self.poll.poll_set del self.poll.poll_dirs if B._currentIncumbentFeas and B._currentIncumbentFeas.evaluated: @@ -356,7 +325,6 @@ def poll_step(self, xmin: PS.CandidatePoint=None): lb=self.param.lb, it=self.iteration, var_type=self.poll.xmin.var_type, var_sets=self.poll.xmin.sets, var_link = self.poll.xmin.var_link, c_types=self.param.constraints_type, is_prim=True) if B._currentIncumbentInf and B._currentIncumbentInf.evaluated: - # del poll.poll_set self.poll.x_sc = B._currentIncumbentInf self.poll.create_poll_set(hhm=hhm, ub=self.param.ub, @@ -367,8 +335,8 @@ def poll_step(self, xmin: PS.CandidatePoint=None): lb=self.param.lb, it=self.iteration, var_type=self.poll.xmin.var_type, var_sets=self.poll.xmin.sets, var_link = self.poll.xmin.var_link, c_types=self.param.constraints_type, is_prim=False) - self.poll.LAMBDA = self.LAMBDA_k - self.poll.RHO = self.RHO_k + self.poll.LAMBDA = self.lambda_multipliers_k + self.poll.RHO = self.rho_k """ Save current poll directions and incumbent solution so they can be saved later in the post dir """ @@ -383,37 +351,32 @@ def poll_step(self, xmin: PS.CandidatePoint=None): xt = [] self.poll.bb_handle.xmin = xmin """ Serial evaluation for points in the poll set """ - self.poll.constraints_RP.LAMBDA = xmin.LAMBDA - self.poll.constraints_RP.RHO = xmin.RHO + self.poll.constraints_RP.LAMBDA = xmin.lambda_multipliers + self.poll.constraints_RP.RHO = xmin.rho self.poll.constraints_RP.constraints_type = xmin.constraints_type - self.poll.constraints_RP.hmax = xmin.hmax + self.poll.constraints_RP.hmax = xmin.h_max if not self.options.parallel_mode: - xt, self.post, self.peval = self.poll.bb_handle.run_callable_serial_local(iter=self.iteration, peval=self.peval, eval_set=self.poll._candidate_points_set, options=self.options, post=self.post, psize=self.poll.mesh.getDeltaFrameSize().coordinates, stepName=f'Poll Step', mesh=self.poll.mesh, constraintsRelaxation=self.poll.constraints_RP.__dict__, budget=self.options.budget) + xt, self.post, self.peval = self.poll.bb_handle.run_callable_serial_local(iter=self.iteration, peval=self.peval, eval_set=self.poll._candidate_points_set, options=self.options, post=self.post, psize=self.poll.mesh.getDeltaFrameSize().coordinates, step_name='Poll Step', mesh=self.poll.mesh, constraints_relaxation=self.poll.constraints_RP.__dict__, budget=self.options.budget) else: self.poll.point_index = -1 """ Parallel evaluation for points in the samples set """ - self.poll.bb_eval, xt, self.post, self.peval = self.poll.bb_handle.run_callable_parallel_local(iter=self.iteration, peval=self.peval, njobs=self.options.np, eval_set=self.poll._candidate_points_set, options=self.options, post=self.post, mesh=self.poll.mesh, stepName=f'Poll Step', psize=self.poll.mesh.getDeltaFrameSize().coordinates, constraintsRelaxation=self.poll.constraints_RP.__dict__, budget=self.options.budget) + self.poll.bb_eval, xt, self.post, self.peval = self.poll.bb_handle.run_callable_parallel_local(iter=self.iteration, peval=self.peval, eval_set=self.poll._candidate_points_set, options=self.options, post=self.post, mesh=self.poll.mesh, step_name='Poll Step', psize=self.poll.mesh.getDeltaFrameSize().coordinates, constraints_relaxation=self.poll.constraints_RP.__dict__, budget=self.options.budget) - if self.poll.bb_handle.constraintsRelaxation: - temp:ConstraintsRelaxationParameters = ConstraintsRelaxationParameters(**self.poll.bb_handle.constraintsRelaxation) + if self.poll.bb_handle.constraints_relaxation: + temp:ConstraintsRelaxationParameters = ConstraintsRelaxationParameters(**self.poll.bb_handle.constraints_relaxation) for i in range(len(temp.LAMBDA)): self.poll.constraints_RP.LAMBDA[i] = temp.LAMBDA[i] self.poll.constraints_RP.RHO = temp.RHO self.poll.constraints_RP.constraints_type = temp.constraints_type self.poll.constraints_RP.hmax = temp.hmax - # if options.store_cache: - # for xi in xt: - # poll.hashtable.hash_id = xi - # if not poll.hashtable._isPareto: - # poll.hashtable.add_to_best_cache(xi) - self.LAMBDA_k = self.poll.bb_handle.constraintsRelaxation["LAMBDA"] - self.RHO_k = self.poll.bb_handle.constraintsRelaxation["RHO"] + + self.lambda_multipliers_k = self.poll.bb_handle.constraints_relaxation["LAMBDA"] + self.rho_k = self.poll.bb_handle.constraints_relaxation["RHO"] self.poll.postprocess_evaluated_candidates(xt) if isinstance(B, Barrier): xpost: List[CandidatePoint] = self.poll.master_updates(xt, self.peval, save_all_best=self.options.save_all_best, save_all=self.options.save_results) - xmin = copy.deepcopy(self.poll.xmin) if self.options.save_results: for i in range(len(xpost)): self.post.poll_dirs.append(xpost[i]) @@ -429,37 +392,35 @@ def poll_step(self, xmin: PS.CandidatePoint=None): for p in self.poll.poll_set: if p.evaluated: pev += 1 - # if pev != poll.poll_dirs and not poll.success: - # poll.seed += 1 - goToSearch: bool = (pev == 0 and self.poll.Failure_stop is not None and self.poll.Failure_stop) + + go_to_search: bool = (pev == 0 and self.poll.Failure_stop is not None and self.poll.Failure_stop) - dir: Point = Point(self.poll._n) - dir.coordinates = self.poll.xmin.direction.coordinates if self.poll.xmin.direction is not None else [0]*self.poll._n - if self.poll.success == SUCCESS_TYPES.FS and not goToSearch: - self.poll.mesh.enlargeDeltaFrameSize(direction=dir) # poll.mesh.psize = np.multiply(poll.mesh.psize, 2, dtype=poll.dtype.dtype + direction: Point = Point(self.poll._n) + direction.coordinates = self.poll.xmin.direction.coordinates if self.poll.xmin.direction is not None else [0]*self.poll._n + if self.poll.success == SUCCESS_TYPES.FS and not go_to_search: + self.poll.mesh.enlargeDeltaFrameSize(direction=direction) # poll.mesh.psize = np.multiply(poll.mesh.psize, 2, dtype=poll.dtype.dtype elif self.poll.success == SUCCESS_TYPES.US: self.poll.mesh.refineDeltaFrameSize() - # poll.mesh.psize = np.divide(poll.mesh.psize, 2, dtype=poll.dtype.dtype) elif isinstance(B, BarrierMO): xpost: List[CandidatePoint] = [] for i in range(len(xt)): xpost.append(xt[i]) - updated, _, _ = B.updateWithPoints(evalPointList=xpost, evalType=None, keepAllPoints=False, updateInfeasibleIncumbentAndHmax=True) + updated, _, _ = B.updateWithPoints(eval_point_list=xpost, keep_all_points=False) if not updated: - newMesh = None + new_mesh = None if B._currentIncumbentInf: B._currentIncumbentInf.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() if B._currentIncumbentFeas: B._currentIncumbentFeas.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() - if newMesh: - self.poll.mesh = newMesh + if new_mesh: + self.poll.mesh = new_mesh else: self.poll.mesh.refineDeltaFrameSize() @@ -476,8 +437,8 @@ def poll_step(self, xmin: PS.CandidatePoint=None): if self.options.display: print(self.post) - self.LAMBDA_k = self.poll.LAMBDA - self.RHO_k = self.poll.xmin.RHO + self.lambda_multipliers_k = self.poll.LAMBDA + self.rho_k = self.poll.xmin.rho toc = time.perf_counter() @@ -486,19 +447,10 @@ def poll_step(self, xmin: PS.CandidatePoint=None): self.log.log_msg(msg=f" Run completed in {toc - tic:.4f} seconds", msg_type=MSG_TYPE.INFO) self.log.log_msg(msg=f" Success status: {self.poll.success}", msg_type=MSG_TYPE.INFO) self.log.log_msg(msg=self.post.__str__(), msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" Random numbers generator's seed {options.seed}", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" xmin = {poll.xmin.__str__()} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" hmin = {poll.xmin.h} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" fmin {poll.xmin.fobj}", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" #bb_eval = {poll.bb_eval} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" #iteration = {iteration} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" nb_success = {poll.nb_success} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" psize = {poll.mesh.psize} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" psize_success = {poll.mesh.psize_success} ", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" psize_max = {poll.mesh.psize_max} ", msg_type=MSG_TYPE.INFO) + self.HT = self.poll.hashtable self.active_barrier = B - self.LAMBDA_k = self.poll.xmin.LAMBDA + self.lambda_multipliers_k = self.poll.xmin.lambda_multipliers return self.poll.xmin def main(*args) -> Dict[str, Any]: @@ -532,7 +484,7 @@ def main(*args) -> Dict[str, Any]: if not os.path.exists(data["param"]["post_dir"]): try: os.mkdir(data["param"]["post_dir"]) - except: + except Warning: os.makedirs(data["param"]["post_dir"], exist_ok=True) log.initialize(data["param"]["post_dir"] + "/OMADS.log") @@ -540,7 +492,7 @@ def main(*args) -> Dict[str, Any]: """ Run preprocessor for the setup of the optimization problem and for the initialization of optimization process """ - MADS_agent: MADS = MADS(data=data) + mads_agent: MADS = MADS(data=data) iteration: int xmin: CandidatePoint options: Options @@ -551,16 +503,14 @@ def main(*args) -> Dict[str, Any]: # poll: PS.Dirs2n search: SS.efficient_exploration log.log_msg(msg="Preprocess the search step...", msg_type=PS.MSG_TYPE.INFO) - _, _, MADS_agent.search, _, _, _, _, _, _ = SS.PreExploration(data).initialize_from_dict(log=log) + _, _, mads_agent.search, _, _, _, _, _, _ = SS.PreExploration(data).initialize_from_dict(log=log) log.log_msg(msg="Preprocess the MADS algorithim...", msg_type=PS.MSG_TYPE.INFO) - iteration, xmin, MADS_agent.poll, options, param, post, out, B, outP = PS.PrePoll(data).initialize_from_dict(log=log, xs=MADS_agent.search.xmin) - out.stepName = "Poll" - post.step_name = [f'Search: {MADS_agent.search.type}'] + iteration, xmin, mads_agent.poll, options, param, post, out, B, out_p = PS.PrePoll(data).initialize_from_dict(log=log, xs=mads_agent.search.xmin) + out.step_name = "Poll" + post.step_name = [f'Search: {mads_agent.search.type}'] - HT = MADS_agent.poll.hashtable + HT = mads_agent.poll.hashtable - # if MADS_LINK.REPLACE is not None and not MADS_LINK.REPLACE: - # out.replace = False """ Set the random seed for results reproducibility """ if len(args) < 4: @@ -571,76 +521,80 @@ def main(*args) -> Dict[str, Any]: """ Start the count down for calculating the runtime indicator """ tic = PS.time.perf_counter() peval = 0 - LAMBDA_k = xmin.LAMBDA - RHO_k = xmin.RHO + lambda_multipliers = xmin.lambda_multipliers + rho_k = xmin.rho - if MADS_agent.search.type == SS.SEARCH_TYPE.VNS.name: - search_VN = SS.VNS(active_barrier=B, params=param) - search_VN._ns_dist = [int(((MADS_agent.search.dim+1)/2)*((MADS_agent.search.dim+2)/2)/(len(search_VN._dist))) if MADS_agent.search.ns is None else MADS_agent.search.ns] * len(search_VN._dist) - MADS_agent.search.ns = sum(search_VN._ns_dist) + if mads_agent.search.type == SS.SEARCH_TYPE.VNS.name: + search_vn = SS.VNS(active_barrier=B, params=param) + search_vn._ns_dist = [int(((mads_agent.search.dim+1)/2)*((mads_agent.search.dim+2)/2)/(len(search_vn._dist))) if mads_agent.search.ns is None else mads_agent.search.ns] * len(search_vn._dist) + mads_agent.search.ns = sum(search_vn._ns_dist) else: - search_VN = None + search_vn = None - MADS_agent.search.lb = param.lb - MADS_agent.search.ub = param.ub - MADS_agent.options = options - MADS_agent.param = param - MADS_agent.log = log - MADS_agent.outP = outP - MADS_agent.out = out - MADS_agent.post = post - MADS_agent.HT = HT - MADS_agent.peval = peval - MADS_agent.RHO_k = RHO_k - MADS_agent.active_barrier = B - MADS_agent.LAMBDA_k = LAMBDA_k - + mads_agent.search.lb = param.lb + mads_agent.search.ub = param.ub + mads_agent.options = options + mads_agent.param = param + mads_agent.log = log + mads_agent.out_p = out_p + mads_agent.out = out + mads_agent.post = post + mads_agent.HT = HT + mads_agent.peval = peval + mads_agent.rho_k = rho_k + mads_agent.active_barrier = B + mads_agent.lambda_multipliers_k = lambda_multipliers + original_st = copy.deepcopy(mads_agent.search.sampling_t) while True: """ Run search step (Optional) """ - # TODO: This rule cannot be generalized -- needs further invistigation + # COMPLETED: This rule cannot be generalized -- needs further invistigation # if poll.dim > 10 and poll.mesh.psize >= 1E-4: # canSearch = False # else: - canSearch = True - MADS_agent.iteration = iteration + can_search = True + mads_agent.iteration = iteration - if canSearch and (MADS_agent.poll.success == SUCCESS_TYPES.US or iteration == 1): - MADS_agent.log.log_msg(f"------- Iteration # {iteration}: Run the search step -------", MSG_TYPE.INFO) - MADS_agent.search.iter = iteration - xmin = MADS_agent.search_step(xmin=xmin) + if can_search and (mads_agent.poll.success == SUCCESS_TYPES.US or iteration == 1): + mads_agent.log.log_msg(f"------- Iteration # {iteration}: Run the search step -------", MSG_TYPE.INFO) + mads_agent.search.iter = iteration + xmin = mads_agent.search_step(xmin=xmin) """ Run the poll step (Mandatory step) """ - MADS_agent.log.log_msg(f"------- Iteration # {iteration}: Run the poll step -------", MSG_TYPE.INFO) - xmin = MADS_agent.poll_step(xmin=xmin) - xmin = MADS_agent.poll.xmin - MADS_agent.search.mesh = copy.deepcopy(MADS_agent.poll.mesh) - MADS_agent.search.psize = copy.deepcopy(MADS_agent.poll.psize) + mads_agent.log.log_msg(f"------- Iteration # {iteration}: Run the poll step -------", MSG_TYPE.INFO) + xmin = mads_agent.poll_step(xmin=xmin) + xmin = mads_agent.poll.xmin + mads_agent.search.mesh = copy.deepcopy(mads_agent.poll.mesh) + mads_agent.search.psize = copy.deepcopy(mads_agent.poll.psize) """ Check stopping criteria""" - pt = (all(abs(MADS_agent.poll.mesh.getDeltaFrameSize().coordinates[pp]) < options.tol for pp in range(MADS_agent.poll._n))) - st = (all(abs(MADS_agent.search.mesh.getdeltaMeshSize().coordinates[pp]) < options.tol for pp in range(MADS_agent.search.mesh._n))) + pt = (all(abs(mads_agent.poll.mesh.getDeltaFrameSize().coordinates[pp]) < options.tol for pp in range(mads_agent.poll._n))) + st = (all(abs(mads_agent.search.mesh.getdeltaMeshSize().coordinates[pp]) < options.tol for pp in range(mads_agent.search.mesh._n))) if options.save_results: - MADS_agent.post.output_results(out, False) + mads_agent.post.output_results(out, False) if param.isPareto: - MADS_agent.post.nd_points = [] + mads_agent.post.nd_points = [] for i in range(len(B.getAllPoints())): - MADS_agent.post.nd_points.append(B.getAllPoints()[i]) - MADS_agent.post.output_nd_results(outP) - if (pt or st or MADS_agent.search.bb_eval + MADS_agent.poll.bb_eval >= options.budget): - MADS_agent.log.log_msg(f"\n--------------- Termination of MADS ---------------", MSG_TYPE.INFO) + mads_agent.post.nd_points.append(B.getAllPoints()[i]) + mads_agent.post.output_nd_results(out_p) + if (pt or st or mads_agent.search.bb_eval + mads_agent.poll.bb_eval >= options.budget): + mads_agent.log.log_msg("\n--------------- Termination of MADS ---------------", MSG_TYPE.INFO) if pt: - MADS_agent.log.log_msg(f"Termination criterion hit: the poll size is below the minimum threshold defined.", MSG_TYPE.INFO) + mads_agent.log.log_msg("Termination criterion hit: the poll size is below the minimum threshold defined.", MSG_TYPE.INFO) if st: - MADS_agent.log.log_msg(f"Termination criterion hit: the mesh size is below the minimum threshold defined.", MSG_TYPE.INFO) - if (MADS_agent.search.bb_eval + MADS_agent.poll.bb_eval >= options.budget): - MADS_agent.log.log_msg(f"Termination criterion hit: Evaluation budget is exhausted.", MSG_TYPE.INFO) - MADS_agent.log.log_msg(f"----------------------------------------------------\n", MSG_TYPE.INFO) + mads_agent.log.log_msg("Termination criterion hit: the mesh size is below the minimum threshold defined.", MSG_TYPE.INFO) + if (mads_agent.search.bb_eval + mads_agent.poll.bb_eval >= options.budget): + mads_agent.log.log_msg("Termination criterion hit: Evaluation budget is exhausted.", MSG_TYPE.INFO) + mads_agent.log.log_msg("----------------------------------------------------\n", MSG_TYPE.INFO) break iteration += 1 toc = PS.time.perf_counter() if isinstance(B, BarrierMO): - perfM = Metrics(ND_solutions=B.getAllPoints(), nobj=B._nobj) - HV = perfM.hypervolume() + rp: Optional[CandidatePoint] = None + if param.ref_point: + rp = Point() + rp.coordinates = param.ref_point + perf_m = Metrics(nd_solutions=B.getAllPoints(), nobj=B._nobj, ref_point=rp) + HV = perf_m.hypervolume() """ If benchmarking, then populate the results in the benchmarking output report """ if importlib.util.find_spec('BMDFO') and len(args) > 1 and isinstance(args[1], PS.toy.Run): @@ -649,36 +603,34 @@ def main(*args) -> Dict[str, Any]: ncon = 0 else: ncon = len(xmin.c_ineq) - if len(MADS_agent.poll.bb_output) > 0: - b.add_row(name=MADS_agent.poll.bb_handle.blackbox, + if len(mads_agent.poll.bb_output) > 0: + b.add_row(name=mads_agent.poll.bb_handle.blackbox, run_index=int(args[2]), nv=len(param.baseline), nc=ncon, - nb_success=MADS_agent.poll.nb_success, + nb_success=mads_agent.poll.nb_success, it=iteration, - BBEVAL=MADS_agent.poll.bb_eval, + BBEVAL=mads_agent.poll.bb_eval, runtime=toc - tic, - feval=MADS_agent.poll.bb_handle.bb_eval, - hmin=MADS_agent.poll.xmin.h, - fmin=MADS_agent.poll.xmin.f) - print(f"{MADS_agent.poll.bb_handle.blackbox}: fmin = {MADS_agent.poll.xmin.f} , hmin= {MADS_agent.poll.xmin.h:.2f}") + feval=mads_agent.poll.bb_handle.bb_eval, + hmin=mads_agent.poll.xmin.h, + fmin=mads_agent.poll.xmin.f) + print(f"{mads_agent.poll.bb_handle.blackbox}: fmin = {mads_agent.poll.xmin.f} , hmin= {mads_agent.poll.xmin.h:.2f}") elif importlib.util.find_spec('BMDFO') and len(args) > 1 and not isinstance(args[1], toy.Run): raise IOError("Could not find " + args[1] + " in the internal BM suite.") - # if options.save_results: - # post.output_results(out) out_step: Any = None - if MADS_agent.poll.xmin < MADS_agent.search.xmin: - out_step = MADS_agent.poll - elif MADS_agent.search.xmin < MADS_agent.poll.xmin: - out_step = MADS_agent.search + if mads_agent.poll.xmin < mads_agent.search.xmin: + out_step = mads_agent.poll + elif mads_agent.search.xmin < mads_agent.poll.xmin: + out_step = mads_agent.search else: - out_step = MADS_agent.poll + out_step = mads_agent.poll if out_step is None: - out_step = MADS_agent.poll + out_step = mads_agent.poll if options.display: @@ -686,27 +638,26 @@ def main(*args) -> Dict[str, Any]: print(" Final objective value: " + str(out_step.xmin.f) + ", hmin= " + str(out_step.xmin.h)) if options.save_coordinates: - MADS_agent.post.output_coordinates(out) + mads_agent.post.output_coordinates(out) - if MADS_agent.log is not None: - MADS_agent.log.log_msg(msg=" --- MADS Run Summary--- ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" Run completed in {toc - tic:.4f} seconds", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" # of successful search steps = {MADS_agent.search.n_successes}", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" # of successful poll steps = {MADS_agent.poll.n_successes}", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" Run completed in {toc - tic:.4f} seconds", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" Random numbers generator's seed {options.seed}", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" xmin = {MADS_agent.poll.xmin.__str__()} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" hmin = {MADS_agent.poll.xmin.h} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" fmin {MADS_agent.poll.xmin.fobj}", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" Search step # BB evals = {MADS_agent.search.bb_eval} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" Poll step # BB evals = {MADS_agent.poll.bb_eval} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" Total # BB evals = {MADS_agent.poll.bb_eval + MADS_agent.search.bb_eval} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" #iterations = {iteration} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" psize = {MADS_agent.poll.mesh.getDeltaFrameSize().coordinates} ", msg_type=MSG_TYPE.INFO) - MADS_agent.log.log_msg(msg=f" psize_success = {MADS_agent.poll.xmin.mesh.getDeltaFrameSize().coordinates}", msg_type=MSG_TYPE.INFO) + if mads_agent.log is not None: + mads_agent.log.log_msg(msg=" --- MADS Run Summary--- ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Run completed in {toc - tic:.4f} seconds", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" # of successful search steps = {mads_agent.search.n_successes}", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" # of successful poll steps = {mads_agent.poll.n_successes}", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Run completed in {toc - tic:.4f} seconds", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Random numbers generator's seed {options.seed}", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" xmin = {mads_agent.poll.xmin.__str__()} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" hmin = {mads_agent.poll.xmin.h} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" fmin {mads_agent.poll.xmin.fobj}", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Search step # BB evals = {mads_agent.search.bb_eval} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Poll step # BB evals = {mads_agent.poll.bb_eval} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Total # BB evals = {mads_agent.poll.bb_eval + mads_agent.search.bb_eval} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" #iterations = {iteration} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" psize = {mads_agent.poll.mesh.getDeltaFrameSize().coordinates} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" psize_success = {mads_agent.poll.xmin.mesh.getDeltaFrameSize().coordinates}", msg_type=MSG_TYPE.INFO) if isinstance(B, BarrierMO): - MADS_agent.log.log_msg(msg=f" Hypervolume metric = {HV}", msg_type=MSG_TYPE.INFO) - # log.log_msg(msg=f" psize_max = {poll.mesh.psize_max} ", msg_type=MSG_TYPE.INFO) + mads_agent.log.log_msg(msg=f" Hypervolume metric = {HV}", msg_type=MSG_TYPE.INFO) if options.display: print("\n ---MADS Run Summary---") print(f" Run completed in {toc - tic:.4f} seconds") @@ -716,10 +667,9 @@ def main(*args) -> Dict[str, Any]: print(" fmin = " + str(out_step.xmin.f)) print(" #bb_eval = " + str(out_step.bb_eval)) print(" #iteration = " + str(iteration)) - print(" nb_success = " + str(MADS_agent.poll.nb_success + MADS_agent.search.nb_success)) - print(" psize = " + str(MADS_agent.poll.mesh.getDeltaFrameSize().coordinates)) - print(" psize_success = " + str(MADS_agent.poll.xmin.mesh.getDeltaFrameSize().coordinates)) - # print(" psize_max = " + str(poll.mesh.psize_max)) + print(" nb_success = " + str(mads_agent.poll.nb_success + mads_agent.search.nb_success)) + print(" psize = " + str(mads_agent.poll.mesh.getDeltaFrameSize().coordinates)) + print(" psize_success = " + str(mads_agent.poll.xmin.mesh.getDeltaFrameSize().coordinates)) xmin = out_step.xmin """ Evaluation of the blackbox; get output responses """ @@ -737,11 +687,12 @@ def main(*args) -> Dict[str, Any]: "hmin": out_step.xmin.h, "nbb_evals" : out_step.bb_eval, "niterations" : iteration, - "nb_success": MADS_agent.poll.nb_success + MADS_agent.search.nb_success, - "psize": MADS_agent.poll.mesh.getDeltaFrameSize().coordinates, - "psuccess": MADS_agent.poll.xmin.mesh.getDeltaFrameSize().coordinates, + "nb_success": mads_agent.poll.nb_success + mads_agent.search.nb_success, + "psize": mads_agent.poll.mesh.getDeltaFrameSize().coordinates, + "psuccess": mads_agent.poll.xmin.mesh.getDeltaFrameSize().coordinates, # "pmax": poll.mesh.psize_max, - "msize": out_step.mesh.getdeltaMeshSize().coordinates} + "msize": out_step.mesh.getdeltaMeshSize().coordinates, + "HV": HV if param.isPareto else "NA"} return output, out_step @@ -754,7 +705,7 @@ def rosen(x, *argv): def test_omads_callable_quick(): - eval = {"blackbox": rosen} + eval_bb = {"blackbox": rosen} param = {"baseline": [-2.0, -2.0], "lb": [-5, -5], "ub": [10, 10], @@ -769,7 +720,7 @@ def test_omads_callable_quick(): } options = {"seed": 0, "budget": 100000, "tol": 1e-12, "display": True, "check_cache": True, "store_cache": True, "rich_direction": True, "psize_init": 1., "precision": "high"} - data = {"evaluator": eval, "param": param, "options": options, "sampling": sampling} + data = {"evaluator": eval_bb, "param": param, "options": options, "sampling": sampling} out: Dict = main(data) print(out) diff --git a/src/OMADS/Mesh.py b/src/OMADS/Mesh.py index de005d0..d12efb6 100644 --- a/src/OMADS/Mesh.py +++ b/src/OMADS/Mesh.py @@ -1,6 +1,6 @@ -import copy -from typing import Protocol, Any, List -from ._globals import * +from dataclasses import dataclass +from typing import Protocol, Any, List, Optional +from ._globals import DType, M_INF_INT, P_INF_INT from .Point import Point from .Parameters import Parameters @@ -18,27 +18,24 @@ class MeshData(Protocol): :param _dtype: numpy double data type precision """ - _n: int = None - # _anisotropy_factor: int = 0.1 - # _meshSize: Point = None # mesh size - # _frameSize: Point = None # poll size - _initialMeshSize: Point = None # mesh size - _initialFrameSize: Point = None # poll size - _minMeshSize: Point = None # mesh size - _minFrameSize: Point = None # poll size - _lowerBound: Point = None - _upperBound: Point = None - _isFinest: bool = True - _r: Point = None - _rMin: Point = None - _rMax: Point = None + _n: Optional[int] = None + _initialMeshSize: Optional[Point] = None # mesh size + _initialFrameSize: Optional[Point] = None # poll size + _minMeshSize: Optional[Point] = None # mesh size + _minFrameSize: Optional[Point] = None # poll size + _lowerBound: Optional[Point] = None + _upperBound: Optional[Point] = None + _isFinest: Optional[bool] = True + _r: Optional[Point] = None + _rMin: Optional[Point] = None + _rMax: Optional[Point] = None _limitMinMeshIndex: int = M_INF_INT _limitMaxMeshIndex: int = P_INF_INT - _pbParams: Parameters = None - _rho: List[float] = None # poll size to mesh size ratio - _dtype: DType = None + _pbParams: Optional[Parameters] = None + _rho: Optional[List[float] ]= None # poll size to mesh size ratio + _dtype: Optional[DType] = None - # TODO: manage the poll size granularity for discrete variables + # COMPLETED: manage the poll size granularity for discrete variables # # See: Audet et. al, The mesh adaptive direct search algorithm for # # granular and discrete variable # _exp: int = 0 @@ -69,19 +66,19 @@ def getdeltaMeshSize(self): def getDeltaFrameSize(self, i: int): ... - def getDeltaFrameSizeCoarser(self, i: int): + def getDeltaFrameSizeCoarser(self): ... - def setDeltas(self, i: int = None, deltaMeshSize: Any = None, deltaFrameSize: Any = None): + def setDeltas(self, i: int = None, delta_mesh_size: Any = None, delta_frame_size: Any = None): ... - def scaleAndProjectOnMesh(self, i: int = None, l: float = None, dir: Point = None): + def scaleAndProjectOnMesh(self, dir: Point = None): ... - def projectOnMesh(self, point: Point, frameCenter: Point): + def projectOnMesh(self, point: Point, frame_center: Point): ... - def verifyPointIsOnMesh(self, point: Point, frameCenter: Point): + def verifyPointIsOnMesh(self, point: Point, frame_center: Point): ... def verifyDimension(self, name: str, dim: int): @@ -91,23 +88,23 @@ def verifyDimension(self, name: str, dim: int): @dataclass class Mesh(MeshData): - def __init__(self, pbParams: Parameters, limitMinMeshIndex: int, limitMaxMeshIndex: int): - self._n = pbParams._n - self._initialMeshSize = pbParams.initialMeshSize - self._minMeshSize = pbParams.minMeshSize - self._initialFrameSize = pbParams.initialFrameSize - self._minFrameSize = pbParams.minFrameSize - self._lowerBound = Point(self._n, pbParams.lb) - self._upperBound = Point(self._n, pbParams.ub) + def __init__(self, pb_params: Parameters, limit_min_mesh_index: int, limit_max_mesh_index: int): + self._n = pb_params._n + self._initialMeshSize = pb_params.initialMeshSize + self._minMeshSize = pb_params.minMeshSize + self._initialFrameSize = pb_params.initialFrameSize + self._minFrameSize = pb_params.minFrameSize + self._lowerBound = Point(self._n, pb_params.lb) + self._upperBound = Point(self._n, pb_params.ub) self._isFinest = True self._r = Point(self._n).reset(n=self._n, d=0.) self._rMin = Point(self._n).reset(n=self._n, d=0.) self._rMax = Point(self._n).reset(n=self._n, d=0.) - self._limitMinMeshIndex = limitMinMeshIndex - self._limitMaxMeshIndex = limitMaxMeshIndex + self._limitMinMeshIndex = limit_min_mesh_index + self._limitMaxMeshIndex = limit_max_mesh_index self._dtype = DType() - self._pbParams = pbParams - if (not self._pbParams.toBeChecked()): + self._pbParams = pb_params + if (not self._pbParams.to_be_checked()): raise IOError("Parameters::checkAndComply() needs to be called before constructing a mesh.") @property @@ -142,9 +139,9 @@ def setMeshIndex(self, r:Point): def isFinest(self): return self._isFinest - def setLimitMeshIndices(self, limitMinMeshIndex: int, limitMaxMeshIndex: int): - self._limitMaxMeshIndex = limitMaxMeshIndex - self._limitMinMeshIndex = limitMinMeshIndex + def setLimitMeshIndices(self, limit_min_mesh_index: int, limit_max_mesh_index: int): + self._limitMaxMeshIndex = limit_max_mesh_index + self._limitMinMeshIndex = limit_min_mesh_index diff --git a/src/OMADS/Metrics.py b/src/OMADS/Metrics.py index d802c4f..d5cb999 100644 --- a/src/OMADS/Metrics.py +++ b/src/OMADS/Metrics.py @@ -1,27 +1,69 @@ import copy -from ._globals import * +from dataclasses import dataclass + +import numpy as np from .CandidatePoint import CandidatePoint -from typing import List +from typing import List, Optional +from deap.tools._hypervolume import pyhv as hv +from .Point import Point @dataclass class Metrics: - ND_solutions: List[CandidatePoint] = None + nd_solutions: Optional[List[CandidatePoint]] = None nobj: int = 2 - _ref_point: CandidatePoint = None + ref_point: Optional[Point] = None - def find_ref_point(self): - if self.ND_solutions: - self._nobj = len(self.ND_solutions[0].f) - self._ref_point = CandidatePoint(self.ND_solutions[0].n_dimensions) + if self.nd_solutions: + self._nobj = len(self.nd_solutions[0].f) + self.ref_point = Point() ftemp = [] for i in range(self._nobj): f: List[float] = [] - for p in self.ND_solutions: - f.append(p.f[i]) - ftemp.append(max(f)+1) - self._ref_point.f = copy.deepcopy(ftemp) + for p in self.nd_solutions: + f.append(p.fobj[i]) + ftemp.append(max(f)+abs(max(f))*0.025) + self.ref_point.coordinates = copy.deepcopy(ftemp) + + def get_pareto_points(self): + ftemp = [] + if self.nd_solutions: + self._nobj = len(self.nd_solutions[0].fobj) + for p in self.nd_solutions: + f = () + for i in range(self._nobj): + f += (p.fobj[i],) + ftemp.append(f) + return ftemp + + def normalize_data(self, pareto_front, reference_point): + """ + Normalize Pareto points and the reference point. + + :param pareto_front: List of Pareto points where each point is a tuple (x, y). + :param reference_point: The reference point (rx, ry). + :return: Normalized Pareto points and reference point. + """ + # Convert Pareto front and reference point to numpy arrays + pareto_front = np.array(pareto_front) + reference_point = np.array(reference_point) + + # Find min and max values for each objective + min_vals = np.min(pareto_front, axis=0) + max_vals = np.max(pareto_front, axis=0) + + # Ensure that min and max values are not the same to avoid division by zero + if np.any(max_vals == min_vals): + raise ValueError("Max and min values for at least one objective are the same. Normalization cannot be performed.") + + # Normalize Pareto points + normalized_pareto_front = (pareto_front - min_vals) / (max_vals - min_vals) + + # Normalize reference point + normalized_reference_point = (reference_point - min_vals) / (max_vals - min_vals) + + return normalized_pareto_front, normalized_reference_point def hypervolume(self): """ @@ -34,29 +76,96 @@ def hypervolume(self): Returns: - The hypervolume indicator value. """ - self.find_ref_point() - # self.ND_solutions = np.array(self.ND_solutions) - # self._ref_point = np.array(self._ref_point) - - # Ensure all objectives are minimized (convert to maximization problem) - ND_solutions = np.array([np.subtract(self._ref_point.f, xf.f) for xf in self.ND_solutions]) + if not self.ref_point: + self.find_ref_point() + ref_p = tuple(self.ref_point.coordinates) + pf = self.get_pareto_points() + pf_n, ref_n = self.normalize_data(pareto_front=pf, reference_point=ref_p) + # Create a Hypervolume object with the reference point + pf_n_list = [] + for i in range(len(pf_n)): + pf_n_list.append(list(pf_n[i])) + + pf_n_list = np.array(pf_n_list) + return hv.hypervolume(pointset=pf_n_list, ref=np.array(list(ref_n))) + + # # Ensure all objectives are minimized (convert to maximization problem) + # nd_solutions = np.array([np.subtract(self._ref_point.f, xf.f) for xf in self.nd_solutions]) - # Sort self.ND_solutionss lexicographically - ND_solutions.sort(axis=0) + # # Sort self.ND_solutionss lexicographically + # nd_solutions.sort(axis=0) - hypervolume_value = 0.0 - last_volume = [1.0]*self.nobj + # hypervolume_value = 0.0 + # last_volume = [1.0]*self.nobj - for point in ND_solutions: - current_volume = 1.0 - for i in range(len(self._ref_point.f)): - current_volume *= max(last_volume[i], point[i]) - last_volume[i] + # for point in nd_solutions: + # current_volume = 1.0 + # for i in range(len(self._ref_point.f)): + # current_volume *= max(last_volume[i], point[i]) - last_volume[i] - hypervolume_value += current_volume - last_volume = point + # hypervolume_value += current_volume + # last_volume = point - return hypervolume_value + # return hypervolume_value + + def normalize_data(self, pareto_front, reference_point): + """ + Normalize Pareto points and the reference point. + + :param pareto_front: List of Pareto points where each point is a tuple (x, y). + :param reference_point: The reference point (rx, ry). + :return: Normalized Pareto points and reference point. + """ + # Convert Pareto front and reference point to numpy arrays + pareto_front = np.array(pareto_front) + reference_point = np.array(reference_point) + + # Find min and max values for each objective + min_vals = np.min(pareto_front, axis=0) + max_vals = np.max(pareto_front, axis=0) + + # Normalize Pareto points + normalized_pareto_front = (pareto_front - min_vals) / (max_vals - min_vals) + + # Normalize reference point + normalized_reference_point = (reference_point - min_vals) / (max_vals - min_vals) + + return normalized_pareto_front, normalized_reference_point + def calculate_hypervolume(self, pf, rp): + """ + Calculate the hypervolume of a bi-objective Pareto front. + + :param pareto_front: A list of Pareto points where each point is a tuple (x, y). + :param reference_point: The reference point (rx, ry) to compute the hypervolume against. + :return: Hypervolume of the Pareto front. + """ + # Sort Pareto front by the first objective (x-coordinate) + pareto_front, reference_point = self.normalize_data(pf, rp) + pareto_front = sorted(pareto_front, key=lambda point: point[0]) + + # Initialize variables + hypervolume = 0.0 + previous_y = reference_point[1] + + # Iterate through the sorted Pareto points + for i in range(len(pareto_front)): + x, y = pareto_front[i] + # Compute the area between the current point and the previous point + width = pareto_front[i][0] - (pareto_front[i - 1][0] if i > 0 else 0) + height = previous_y - y + hypervolume += width * height + + # Update previous_y to the current y + previous_y = y + + # Account for the last segment up to the reference point + width = reference_point[0] - pareto_front[-1][0] + height = previous_y - reference_point[1] + hypervolume += width * height + + return hypervolume + def generational_distance(self, true_pareto_front, approximate_pareto_front): """ Compute the generational distance (GD) metric between two Pareto fronts. @@ -97,8 +206,8 @@ def inverted_generational_distance(self, true_pareto_front, approximate_pareto_f igd = igd_sum / len(true_pareto_front) return igd - def dominates(self, A, B): - return all(A <= B) and any(A < B) + def dominates(self, a, b): + return all(a <= b) and any(a < b) def ranking(self, solutions): # Initialize ranks @@ -108,8 +217,7 @@ def ranking(self, solutions): # Compare each solution with every other solution for i in range(n): for j in range(n): - if i != j: - if self.dominates(solutions[j], solutions[i]): + if i != j and self.dominates(solutions[j], solutions[i]): rank[i] += 1 diff --git a/src/OMADS/Omesh.py b/src/OMADS/Omesh.py index 874d687..23c27ec 100644 --- a/src/OMADS/Omesh.py +++ b/src/OMADS/Omesh.py @@ -1,10 +1,12 @@ -import copy -from typing import List -from ._globals import * +from typing import List, Optional + +import numpy as np +from ._globals import DType, VAR_TYPE, GL_LIMITS from .Point import Point from .Mesh import Mesh from .Options import Options from .Parameters import Parameters +from dataclasses import dataclass @dataclass class Omesh(Mesh): @@ -20,27 +22,27 @@ class Omesh(Mesh): :param _dtype: numpy double data type precision """ - _n: int = None - _meshSize: Point = None #1.0 # mesh size - _frameSize: Point = 1.0 # poll size - _rho: List[float] = 1.0 # poll size to mesh size ratio + _n: Optional[int] = None + _meshSize: Optional[Point] = None #1.0 # mesh size + _frameSize: Optional[Point] = None # poll size + _rho: Optional[List[float]] = None # poll size to mesh size ratio # Completed: manage the poll size granularity for discrete variables # A new class 'Gmesh' is now avialable. # Gmesh adapts mesh granularity and anistropy # See: Audet et. al, The mesh adaptive direct search algorithm for # granular and discrete variable - _exp: Point = None - _mantissa: Point = None - _maximumFrameSize: Point = None - successfulFrameSize: Point = None + _exp: Optional[Point] = None + _mantissa: Optional[Point] = None + _maximumFrameSize: Optional[Point] = None + successfulFrameSize: Optional[Point] = None # numpy double data type precision - _dtype: DType = None + _dtype: Optional[DType] = None - def __init__(self, pbParam: Parameters, runOptions: Options): + def __init__(self, pb_param: Parameters, run_options: Options): """ Constructor """ - super(Omesh, self).__init__(pbParams=pbParam, limitMaxMeshIndex=-GL_LIMITS, limitMinMeshIndex=GL_LIMITS) - self._n = len(pbParam.baseline) + super(Omesh, self).__init__(pb_params=pb_param, limit_max_mesh_index=-GL_LIMITS, limit_min_mesh_index=GL_LIMITS) + self._n = len(pb_param.baseline) self.meshSize = Point(self._n) self.frameSize = Point(self._n) self._exp = Point(self._n) @@ -48,7 +50,7 @@ def __init__(self, pbParam: Parameters, runOptions: Options): self._maximumFrameSize = Point(self._n) self.successfulFrameSize = Point(self._n) self.rho = [0] * self._n - self.frameSize.coordinates = runOptions.psize_init if isinstance(runOptions.psize_init, list) else [runOptions.psize_init] * self._n + self.frameSize.coordinates = run_options.psize_init if isinstance(run_options.psize_init, list) else [run_options.psize_init] * self._n self.meshSize.reset(n=self._n, d=0) self._r = Point(self._n) self._r.coordinates = [1]*self._n diff --git a/src/OMADS/Optimizer.py b/src/OMADS/Optimizer.py index 3d491f3..50baeda 100644 --- a/src/OMADS/Optimizer.py +++ b/src/OMADS/Optimizer.py @@ -1,55 +1,58 @@ +import numpy as np from .CandidatePoint import CandidatePoint from .Point import Point -from .Barriers import * +from .Barriers import BarrierMO from ._common import logger from dataclasses import dataclass, field -from typing import List, Dict, Any, Protocol +from typing import List, Protocol, Optional from .Gmesh import Gmesh from .Cache import Cache from .Evaluator import Evaluator import samplersLib as explore - +from ._globals import MPP, DType +from .Parameters import Parameters +from .Options import Options @dataclass class ConstraintsRelaxationParameters: RHO: float = MPP.RHO LAMBDA: List[float] = field(default_factory=lambda: [MPP.LAMBDA]) hmax: float = 1. - constraints_type: List[int] = None + constraints_type: Optional[List[int]] = None @dataclass class GenericSamplerBaseData(Protocol): scaling: List[List[float]] = field(default_factory=list) hashtable: Cache = field(default_factory=Cache) - mesh: Gmesh = None + mesh: Optional[Gmesh] = None bb_handle: Evaluator = field(default_factory=Evaluator) - Failure_stop: bool = None + Failure_stop: Optional[bool] = None constraintsHandler: ConstraintsRelaxationParameters = field(default_factory=lambda: ConstraintsRelaxationParameters) - log: logger = None + log: Optional[logger] = None n_successes: int = 0 - prob_params: Parameters = None + prob_params: Optional[Parameters] = None sampling_t: int = 3 vicinity_ratio: np.ndarray = None vicinity_min: float = 0.001 terminate: bool =False visualize: bool = False - sampling_criter: str = None - weights: List[float] = None + sampling_criter: Optional[str] = None + weights: Optional[List[float]] = None AS: explore.samplers.activeSampling = None best_samples: int = 0 estGrid: explore.samplers.sampling = None - activeBarrier: BarrierMO = None + activeBarrier: Optional[BarrierMO] = None constraints_RP: ConstraintsRelaxationParameters = field(default_factory=lambda: ConstraintsRelaxationParameters(RHO=MPP.RHO.value, LAMBDA=None, hmax=1., constraints_type=None)) - _evalSet: List[CandidatePoint] = None - _points: List[Point] = None - _pointsIndex: List[int] = None + _evalSet: Optional[List[CandidatePoint]] = None + _points: Optional[List[Point]] = None + _pointsIndex: Optional[List[int]] = None _n: int = 0 _candidate_points_set : List[CandidatePoint] = field(default_factory=list) _point_index: List[int] = field(default_factory=list) _directions_set: List[Point] = field(default_factory=list) _defined: List[bool] = field(default_factory=lambda: [False]) - _xmin: CandidatePoint = None - _x_sc: CandidatePoint = None + _xmin: Optional[CandidatePoint] = None + _x_sc: Optional[CandidatePoint] = None _nb_success: int = 0 _bb_eval: int = field(default_factory=int) _psize: float = field(default_factory=float) @@ -60,7 +63,7 @@ class GenericSamplerBaseData(Protocol): _save_results = True _opportunistic: bool = False _eval_budget: int = 100 - _dtype: DType = None + _dtype: Optional[DType] = None _success: bool = False _seed: int = 0 _terminate: bool = False @@ -91,15 +94,15 @@ def update(self): ... @dataclass -class genericGlobalLocalSamplerBaseData(Protocol): - localSearch: GenericSamplerBase = None - globalSearch: GenericSamplerBase = None - param: Parameters = None - options: Options = None +class GenericGlobalLocalSamplerBaseData(Protocol): + localSearch: Optional[GenericSamplerBase] = None + globalSearch: Optional[GenericSamplerBase] = None + param: Optional[Parameters] = None + options: Optional[Options] = None @dataclass -class genericGlobalLocalSamplerBase(genericGlobalLocalSamplerBaseData, Protocol): +class GenericGlobalLocalSamplerBase(GenericGlobalLocalSamplerBaseData, Protocol): def generate_candidate_points(self)->List[CandidatePoint]: ... diff --git a/src/OMADS/Options.py b/src/OMADS/Options.py index d7dadbd..534da3b 100644 --- a/src/OMADS/Options.py +++ b/src/OMADS/Options.py @@ -1,9 +1,5 @@ from dataclasses import dataclass -import logging from typing import Any -import numpy as np -from .Point import Point -from ._globals import * @dataclass class Options: diff --git a/src/OMADS/POLL.py b/src/OMADS/POLL.py index 984457a..098d24e 100644 --- a/src/OMADS/POLL.py +++ b/src/OMADS/POLL.py @@ -26,25 +26,22 @@ """ import copy import importlib -import json from multiprocessing import freeze_support import os -import pkgutil import sys import numpy as np -import concurrent.futures import time -from typing import List, Dict, Any +from typing import List, Dict, Any, Optional +import importlib.util if importlib.util.find_spec('BMDFO'): from BMDFO import toy from .Point import Point from .Barriers import Barrier, BarrierMO -from ._common import * -from .Directions import * -from .PrePoll import * +from ._common import validator, logger +from .PrePoll import PrePoll from .CandidatePoint import CandidatePoint -from .PostProcess import Output, PostMADS - +from ._globals import DESIGN_STATUS, MSG_TYPE, SUCCESS_TYPES, VAR_TYPE +from .Metrics import Metrics np.set_printoptions(legacy='1.21') def main(*args) -> Dict[str, Any]: @@ -52,24 +49,24 @@ def main(*args) -> Dict[str, Any]: """ Validate and parse the parameters file """ validate = validator() - data: dict = validate.checkInputFile(args=args) + data: dict = validate.check_input_file(args=args) """ Initialize the log file """ log = logger() if not os.path.exists(data["param"]["post_dir"]): try: os.mkdir(data["param"]["post_dir"]) - except: + except Warning: os.makedirs(data["param"]["post_dir"], exist_ok=True) log.initialize(data["param"]["post_dir"] + "/OMADS.log") """ Run preprocessor for the setup of the optimization problem and for the initialization of optimization process """ - iteration, xmin, poll, options, param, post, out, B, outP = PrePoll(data).initialize_from_dict(log=log) - out.stepName = "Poll" - if outP: - outP.stepName = "Poll_ND" + iteration, xmin, poll, options, param, post, out, B, out_p = PrePoll(data).initialize_from_dict(log=log) + out.step_name = "Poll" + if out_p: + out_p.step_name = "Poll_ND" """ Set the random seed for results reproducibility """ if len(args) < 4: @@ -80,12 +77,12 @@ def main(*args) -> Dict[str, Any]: """ Start the count down for calculating the runtime indicator """ tic = time.perf_counter() peval = poll.bb_handle.bb_eval - LAMBDA_k = xmin.LAMBDA - RHO_k = xmin.RHO + lambda_k = xmin.lambda_multipliers + rho_k = xmin.rho while True: del poll.poll_set poll.mesh.update() - poll.constraints_RP.LAMBDA = copy.deepcopy(xmin.LAMBDA) + poll.constraints_RP.LAMBDA = copy.deepcopy(xmin.lambda_multipliers) poll.constraints_RP.constraints_type = copy.deepcopy(poll.xmin.constraints_type) """ Create the set of poll directions """ hhm = poll.create_housholder(options.rich_direction, domain=xmin.var_type) @@ -100,22 +97,21 @@ def main(*args) -> Dict[str, Any]: else: B.insert(xmin) elif isinstance(B, BarrierMO) and iteration == 1: - B.init(evalPointList=[xmin]) + B.init(eval_point_list=[xmin]) if isinstance(B, Barrier): - poll.constraints_RP.hmax = xmin.hmax + poll.constraints_RP.hmax = xmin.h_max poll.create_poll_set(hhm=hhm, ub=param.ub, lb=param.lb, it=iteration, var_type=xmin.var_type, var_sets=xmin.sets, var_link = xmin.var_link, c_types=param.constraints_type, is_prim=True) if B._sec_poll_center is not None and B._sec_poll_center.evaluated: del poll.poll_set - # poll.poll_dirs = [] poll.x_sc = B._sec_poll_center poll.create_poll_set(hhm=hhm, ub=param.ub, lb=param.lb, it=iteration, var_type=B._sec_poll_center.var_type, var_sets=B._sec_poll_center.sets, var_link = B._sec_poll_center.var_link, c_types=param.constraints_type, is_prim=False) elif isinstance(B, BarrierMO): - poll.constraints_RP.hmax = B._hMax + poll.constraints_RP.hmax = B._h_max del poll.poll_set del poll.poll_dirs if B._currentIncumbentFeas and B._currentIncumbentFeas.evaluated: @@ -128,7 +124,6 @@ def main(*args) -> Dict[str, Any]: lb=param.lb, it=iteration, var_type=poll.xmin.var_type, var_sets=poll.xmin.sets, var_link = poll.xmin.var_link, c_types=param.constraints_type, is_prim=True) if B._currentIncumbentInf and B._currentIncumbentInf.evaluated: - # del poll.poll_set poll.x_sc = B._currentIncumbentInf poll.create_poll_set(hhm=hhm, ub=param.ub, @@ -139,8 +134,8 @@ def main(*args) -> Dict[str, Any]: lb=param.lb, it=iteration, var_type=poll.xmin.var_type, var_sets=poll.xmin.sets, var_link = poll.xmin.var_link, c_types=param.constraints_type, is_prim=False) - poll.constraints_RP.LAMBDA = LAMBDA_k - poll.constraints_RP.RHO = RHO_k + poll.constraints_RP.LAMBDA = lambda_k + poll.constraints_RP.RHO = rho_k """ Save current poll directions and incumbent solution so they can be saved later in the post dir """ @@ -153,18 +148,18 @@ def main(*args) -> Dict[str, Any]: poll.bb_output = [] xt = [] """ Serial evaluation for points in the poll set """ - if log is not None and log.isVerbose: + if log and log.is_verbose: log.log_msg(f"----------- Evaluate poll set # {iteration}-----------", msg_type=MSG_TYPE.INFO) poll.log = log if options.check_cache: poll.omit_duplicates() poll.bb_handle.xmin = poll.xmin if not options.parallel_mode: - xt, post, peval = poll.bb_handle.run_callable_serial_local(iter=iteration, peval=peval, eval_set=poll.poll_set, options=options, post=post, psize=poll.mesh.getDeltaFrameSize().coordinates, constraintsRelaxation=poll.constraints_RP.__dict__, budget=options.budget) + xt, post, peval = poll.bb_handle.run_callable_serial_local(iter=iteration, peval=peval, eval_set=poll.poll_set, options=options, post=post, psize=poll.mesh.getDeltaFrameSize().coordinates, constraints_relaxation=poll.constraints_RP.__dict__, budget=options.budget) else: poll.point_index = -1 """ Parallel evaluation for points in the poll set """ - poll.bb_eval, xt, post, peval = poll.bb_handle.run_callable_parallel_local(iter=iteration, peval=peval, njobs=options.np, eval_set=poll.poll_set, options=options, post=post, psize=poll.mesh.getDeltaFrameSize().coordinates, constraintsRelaxation=poll.constraints_RP.__dict__, budget=options.budget) + poll.bb_eval, xt, post, peval = poll.bb_handle.run_callable_parallel_local(iter=iteration, peval=peval, eval_set=poll.poll_set, options=options, post=post, psize=poll.mesh.getDeltaFrameSize().coordinates, constraints_relaxation=poll.constraints_RP.__dict__, budget=options.budget) poll.postprocess_evaluated_candidates(xt) if isinstance(B, Barrier): xpost: List[CandidatePoint] = poll.master_updates(xt, peval, save_all_best=options.save_all_best, save_all=options.save_results) @@ -184,37 +179,35 @@ def main(*args) -> Dict[str, Any]: for p in poll.poll_set: if p.evaluated: pev += 1 - # if pev != poll.poll_dirs and not poll.success: - # poll.seed += 1 - goToSearch: bool = (pev == 0 and poll.Failure_stop is not None and poll.Failure_stop) + + go_to_search: bool = (pev == 0 and poll.Failure_stop is not None and poll.Failure_stop) - dir: Point = Point(poll._n) - dir.coordinates = poll.xmin.direction.coordinates if poll.xmin.direction is not None else [0]*poll._n - if poll.success == SUCCESS_TYPES.FS and not goToSearch: - poll.mesh.enlargeDeltaFrameSize(direction=dir) # poll.mesh.psize = np.multiply(poll.mesh.psize, 2, dtype=poll.dtype.dtype + direction: Point = Point(poll._n) + direction.coordinates = poll.xmin.direction.coordinates if poll.xmin.direction is not None else [0]*poll._n + if poll.success == SUCCESS_TYPES.FS and not go_to_search: + poll.mesh.enlargeDeltaFrameSize(direction=direction) # poll.mesh.psize = np.multiply(poll.mesh.psize, 2, dtype=poll.dtype.dtype elif poll.success == SUCCESS_TYPES.US: poll.mesh.refineDeltaFrameSize() - # poll.mesh.psize = np.divide(poll.mesh.psize, 2, dtype=poll.dtype.dtype) elif isinstance(B, BarrierMO): xpost: List[CandidatePoint] = [] for i in range(len(xt)): xpost.append(xt[i]) - updated, _, _ = B.updateWithPoints(evalPointList=xpost, evalType=None, keepAllPoints=False, updateInfeasibleIncumbentAndHmax=True) + updated, _, _ = B.updateWithPoints(eval_point_list=xpost, keep_all_points=False) if not updated: - newMesh = None + new_mesh = None if B._currentIncumbentInf: B._currentIncumbentInf.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() if B._currentIncumbentFeas: B._currentIncumbentFeas.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() - if newMesh: - poll.mesh = newMesh + if new_mesh: + poll.mesh = new_mesh else: poll.mesh.refineDeltaFrameSize() @@ -226,38 +219,45 @@ def main(*args) -> Dict[str, Any]: post.xmin = B._currentIncumbentFeas if B._currentIncumbentFeas else B._currentIncumbentInf if B._currentIncumbentInf else poll.xmin poll.mesh.update() - if log is not None: + if log: log.log_msg(msg=post.__str__(), msg_type=MSG_TYPE.INFO) if options.display: print(post) - LAMBDA_k = poll.constraints_RP.LAMBDA - RHO_k = poll.constraints_RP.RHO + lambda_k = poll.constraints_RP.LAMBDA + rho_k = poll.constraints_RP.RHO if options.save_results: post.nd_points = [] - post.output_results(out, allRes=False) + post.output_results(out, all_res=False) if param.isPareto: for i in range(len(B.getAllPoints())): post.nd_points.append(B.getAllPoints()[i]) - post.output_nd_results(outP) + post.output_nd_results(out_p) - Failure_check = iteration > 0 and poll.Failure_stop is not None and poll.Failure_stop and (poll.success == SUCCESS_TYPES.US or goToSearch) + failure_check = iteration > 0 and poll.Failure_stop is not None and poll.Failure_stop and (poll.success == SUCCESS_TYPES.US or go_to_search) - if (Failure_check or poll.bb_eval >= options.budget) or (all(abs(poll.mesh.getDeltaFrameSize().coordinates[pp]) < options.tol for pp in range(poll._n)) or poll.bb_eval >= options.budget or poll.terminate): - log.log_msg(f"\n--------------- Termination of the poll step ---------------", MSG_TYPE.INFO) + if (failure_check or poll.bb_eval >= options.budget) or (all(abs(poll.mesh.getDeltaFrameSize().coordinates[pp]) < options.tol for pp in range(poll._n)) or poll.bb_eval >= options.budget or poll.terminate): + log.log_msg("\n--------------- Termination of the poll step ---------------", MSG_TYPE.INFO) if all(abs(poll.mesh.getDeltaFrameSize().coordinates[pp]) < options.tol for pp in range(poll._n)): log.log_msg("Termination criterion hit: the mesh size is below the minimum threshold defined.", MSG_TYPE.INFO) if (poll.bb_eval >= options.budget or poll.terminate): log.log_msg("Termination criterion hit: evaluation budget is exhausted.", MSG_TYPE.INFO) - if (Failure_check): - log.log_msg(f"Termination criterion hit (optional): failed to find a successful point in iteration # {iteration}.", MSG_TYPE.INFO) - log.log_msg(f"---------------------------------------------------------------\n", MSG_TYPE.INFO) + if (failure_check): + log.log_msg("Termination criterion hit (optional): failed to find a successful point in iteration # {iteration}.", MSG_TYPE.INFO) + log.log_msg("---------------------------------------------------------------\n", MSG_TYPE.INFO) break iteration += 1 toc = time.perf_counter() + if isinstance(B, BarrierMO): + rp: Optional[CandidatePoint] = None + if param.ref_point: + rp = Point() + rp.coordinates = param.ref_point + perf_m = Metrics(nd_solutions=B.getAllPoints(), nobj=B._nobj, ref_point=rp) + HV = perf_m.hypervolume() """ If benchmarking, then populate the results in the benchmarking output report """ if importlib.util.find_spec('BMDFO') and len(args) > 1 and isinstance(args[1], toy.Run): @@ -281,27 +281,28 @@ def main(*args) -> Dict[str, Any]: print(f"{poll.bb_handle.blackbox}: fmin = {poll.xmin.f} , hmin= {poll.xmin.h:.2f}") elif importlib.util.find_spec('BMDFO') and len(args) > 1 and not isinstance(args[1], toy.Run): - if log is not None: - log.log_msg(msg="Could not find " + args[1] + " in the internal BM suite.", msg_type=MSG_TYPE.ERROR) - raise IOError("Could not find " + args[1] + " in the internal BM suite.") + temp = " in the internal BM suite." + if log: + log.log_msg(msg="Could not find " + args[1] + temp, msg_type=MSG_TYPE.ERROR) + raise IOError("Could not find " + args[1] + temp) if options.display: print(" end of orthogonal MADS ") - if log is not None: + if log: log.log_msg(msg=" end of orthogonal MADS ", msg_type=MSG_TYPE.INFO) print(" Final objective value: " + str(poll.xmin.f) + ", hmin= " + str(poll.xmin.h)) - if log is not None: + if log: log.log_msg(msg=" Final objective value: " + str(poll.xmin.f) + ", hmin= " + str(poll.xmin.h), msg_type=MSG_TYPE.INFO) - if log is not None and len(args)>1 and isinstance(args[1], str): + if log and len(args)>1 and isinstance(args[1], str): log.log_msg(msg=" end of orthogonal MADS running" + args[1] + " in the internal BM suite.", msg_type=MSG_TYPE.INFO) if options.save_coordinates: post.output_coordinates(out) - if log is not None: + if log: log.log_msg(msg="\n---Run Summary---", msg_type=MSG_TYPE.INFO) log.log_msg(msg=f" Run completed in {toc - tic:.4f} seconds", msg_type=MSG_TYPE.INFO) log.log_msg(msg=f" Random numbers generator's seed {options.seed}", msg_type=MSG_TYPE.INFO) @@ -349,11 +350,12 @@ def main(*args) -> Dict[str, Any]: "psize": poll.mesh.getDeltaFrameSize().coordinates, "psuccess": poll.xmin.mesh.getDeltaFrameSize().coordinates, # "pmax": poll.mesh.psize_max, - "msize": poll.mesh.getdeltaMeshSize().coordinates} + "msize": poll.mesh.getdeltaMeshSize().coordinates, + "HV": HV if param.isPareto else "NA"} return output, poll -def rosen(x, p, *argv): +def rosen(x, p): x = np.asarray(x) y = [np.sum(p[0] * (x[1:] - x[:-1] ** p[1]) ** p[1] + (1 - x[:-1]) ** p[1], axis=0), [0]] @@ -363,10 +365,10 @@ def alpine(x): y = [abs(x[0]*np.sin(x[0])+0.1*x[0])+abs(x[1]*np.sin(x[1])+0.1*x[1]), [0]] return y -def Ackley3(x): +def ackley3(x): return [-200*np.exp(-0.2*np.sqrt(x[0]**2+x[1]**2))+5*np.exp(np.cos(3*x[0])+np.sin(3*x[1])), [0]] -def eggHolder(individual): +def egg_holder(individual): x = individual[0] y = individual[1] f = (-(y + 47.0) * np.sin(np.sqrt(abs(x/2.0 + (y + 47.0)))) - x * np.sin(np.sqrt(abs(x - (y + 47.0))))) diff --git a/src/OMADS/Parameters.py b/src/OMADS/Parameters.py index 26f05e3..b91cb1a 100644 --- a/src/OMADS/Parameters.py +++ b/src/OMADS/Parameters.py @@ -1,10 +1,10 @@ from dataclasses import dataclass -import logging import os -from typing import List, Dict +from typing import List, Dict, Optional +import warnings import numpy as np from .Point import Point -from ._globals import * +from ._globals import DType, VAR_TYPE, BARRIER_TYPES, MESH_TYPE import copy @dataclass @@ -18,39 +18,42 @@ class Parameters: :param scaling: Scaling factor (can be defined as a list (assigning a factor for each variable) or a scalar value that will be applied on all variables) :param post_dir: The location and name of the post directory where the output results file will live in (if any) """ - _n: int = None - baseline: List[float] = None - lb: List[float] = None - ub: List[float] = None - var_names: List[str] = None - fun_names: List[str] = None - scaling: List[float] = None - post_dir: str = os.path.abspath("./") - var_type: List[str] = None - var_sets: Dict = None - constants: List = None - constants_name: List = None - Failure_stop: bool = None + _n: Optional[int] = None + baseline: Optional[List[float]] = None + lb: Optional[List[float]] = None + ub: Optional[List[float]] = None + var_names: Optional[List[str]] = None + fun_names: Optional[List[str]] = None + scaling: Optional[List[float]] = None + post_dir: Optional[str] = os.path.abspath("./") + var_type: Optional[List[str]] = None + var_sets: Optional[Dict] = None + constants: Optional[List] = None + constants_name: Optional[List] = None + failure_stop: Optional[bool] = None problem_name: str = "unknown" - best_known: List[float] = None - constraints_type: List[BARRIER_TYPES] = None - function_weights: List[float] = None + best_known: Optional[List[float]] = None + constraints_type: Optional[List[BARRIER_TYPES]] = None + function_weights: Optional[List[float]] = None h_max: float = 0 RHO: float = 0.00005 - LAMBDA: List[float] = None + LAMBDA: Optional[List[float]] = None name: str = "undefined" nobj: int = 1 + ref_point: Optional[List[float]] = None + lhs_search_initialization: Optional[bool] = False + # Mesh options meshType: str = MESH_TYPE.ORTHO.name - fixed_variables: Point = None - granularity: Point = None - minMeshSize: Point = None - minFrameSize: Point = None - initialMeshSize: Point = None - initialFrameSize: Point = None + fixed_variables: Optional[Point] = None + granularity: Optional[Point] = None + minMeshSize: Optional[Point] = None + minFrameSize: Optional[Point] = None + initialMeshSize: Optional[Point] = None + initialFrameSize: Optional[Point] = None warningInitialFrameSizeReset: bool = True - x0: Point = None + x0: Optional[Point] = None _initialized_and_checked: bool = False isPareto: bool = False incumbentincumbentSelectionParam: int = 1 @@ -88,7 +91,9 @@ def __init__( isPareto: bool = False, nobj: int=1, incumbentincumbentSelectionParam: int=1, - barrierInitializedFromCache:bool =True): + barrierInitializedFromCache:bool =True, + ref_point: List[float]=None, + lhs_search_initialization: bool = False): self.incumbentincumbentSelectionParam = incumbentincumbentSelectionParam self.barrierInitializedFromCache = barrierInitializedFromCache self.nobj = nobj @@ -106,7 +111,7 @@ def __init__( self.var_type = var_type self.constants = constants self.constants_name = constants_name - self.Failure_stop: bool = Failure_stop + self.failure_stop: bool = Failure_stop self.problem_name = problem_name self.best_known = best_known self.constraints_type = constraints_type @@ -116,6 +121,7 @@ def __init__( self.name = name self.var_sets = var_sets self.isPareto = isPareto + self.lhs_search_initialization = lhs_search_initialization # Mesh options self.meshType = meshType point_init = Point() @@ -194,19 +200,20 @@ def __init__( if self.var_type is None or len(self.var_type) <= 0: self.var_type = [VAR_TYPE.REAL.name] * self.n - self.setMinMeshParameters() - self.setMinFrameParameters() - self.setInitialMeshParameters() - self.x0.checkForGranularity(g=self.granularity, name="baseline") - self.minMeshSize.checkForGranularity(g=self.granularity, name="minMeshSize") - self.minFrameSize.checkForGranularity(g=self.granularity, name="minFrameSize") - self.initialMeshSize.checkForGranularity(g=self.granularity, name="initialMeshSize") - self.initialFrameSize.checkForGranularity(g=self.granularity, name="initialFrameSize") + self.set_min_mesh_parameters() + self.set_min_frame_parameters() + self.set_initial_mesh_parameters() + self.x0.check_for_granularity(g=self.granularity, name="baseline") + self.minMeshSize.check_for_granularity(g=self.granularity, name="minMeshSize") + self.minFrameSize.check_for_granularity(g=self.granularity, name="minFrameSize") + self.initialMeshSize.check_for_granularity(g=self.granularity, name="initialMeshSize") + self.initialFrameSize.check_for_granularity(g=self.granularity, name="initialFrameSize") self._initialized_and_checked = True + self.ref_point = ref_point - def setInitialMeshParameters(self): + def set_initial_mesh_parameters(self): if self.initialMeshSize.is_all_defined() and self.initialMeshSize.size != self.n: raise IOError(f"INITIAL_MESH_SIZE has dimension {self.initialMeshSize.size} which is different from problem dimension {self.n}") @@ -235,7 +242,7 @@ def setInitialMeshParameters(self): self.warningInitialFrameSizeReset = False warnings.warn("Initial frame size reset from initial mesh") self.minFrameSize[i] = self.initialMeshSize[i] * np.power(self.n, 0.5) - self.initialFrameSize[i] = self.initialFrameSize.nextMult(g=self.granularity[i], i=i) + self.initialFrameSize[i] = self.initialFrameSize.next_mult(g=self.granularity[i], i=i) if self.initialFrameSize[i] < self.minFrameSize[i]: self.initialFrameSize[i] = self.minFrameSize[i] @@ -252,7 +259,7 @@ def setInitialMeshParameters(self): else: self.initialFrameSize[i] = 1.0 # Adjust value with granularity - self.initialFrameSize[i] = self.initialFrameSize.nextMult(g=self.granularity[i], i=i) + self.initialFrameSize[i] = self.initialFrameSize.next_mult(g=self.granularity[i], i=i) # Adjust value with minFrameSize if self.initialFrameSize[i] < self.minFrameSize[i]: self.initialFrameSize[i] = self.minFrameSize[i] @@ -260,21 +267,21 @@ def setInitialMeshParameters(self): if not self.initialMeshSize.defined[i]: self.initialMeshSize[i] = self.initialFrameSize[i] * self.n**-0.5 # Adjust value with granularity - self.initialMeshSize[i] = self.initialMeshSize.nextMult(g=self.granularity[i], i=i) + self.initialMeshSize[i] = self.initialMeshSize.next_mult(g=self.granularity[i], i=i) # Adjust value with minMeshSize if (self.initialMeshSize[i] < self.minMeshSize[i]): self.initialMeshSize[i] = self.minMeshSize[i] - if not (self.minMeshSize[i] <= self.initialMeshSize[i]): + if (self.minMeshSize[i] > self.initialMeshSize[i]): raise IOError("Check: initial mesh size is lower than min mesh size.\n" + f"INITIAL_MESH_SIZE + {self.initialMeshSize[i]} \n" + f"MIN_MESH_SIZE {self.minMeshSize[i]}") - if not (self.minFrameSize[i] <= self.minFrameSize[i]): + if (self.minFrameSize[i] > self.minFrameSize[i]): raise IOError("Check: initial frame size is lower than min frame size.\n" + f"INITIAL_FRAME_SIZE + {self.minFrameSize[i]} \n" + f"MIN_FRAME_SIZE {self.minFrameSize[i]}") - def setMinMeshParameters(self): + def set_min_mesh_parameters(self): if not self.minMeshSize.is_all_defined(): for i in range(self.n): if self.granularity[i] > 0.0: @@ -291,7 +298,7 @@ def setMinMeshParameters(self): else: raise IOError("Error: granularity is defined with a negative value.") - def setMinFrameParameters(self): + def set_min_frame_parameters(self): if not self.minFrameSize.is_all_defined(): for i in range(self.n): if self.granularity[i] > 0.0: @@ -308,7 +315,7 @@ def setMinFrameParameters(self): else: raise IOError("Error: granularity is defined with a negative value.") - def toBeChecked(self)-> bool: + def to_be_checked(self)-> bool: return self._initialized_and_checked # TODO: give better control on variabls' resolution (mesh granularity) diff --git a/src/OMADS/Point.py b/src/OMADS/Point.py index 28f3941..6099e83 100644 --- a/src/OMADS/Point.py +++ b/src/OMADS/Point.py @@ -22,11 +22,11 @@ # ------------------------------------------------------------------------------------# import copy -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import List, Dict, Any, Optional -from numpy import sum, subtract, add, maximum, power, inf +from numpy import subtract, add import numpy as np -from ._globals import * +from ._globals import DType @dataclass class Point: @@ -41,23 +41,23 @@ class Point: # Dimension of the point _n: int = 0 # Coordinates of the point - _coords: List[float] = None + _coords: Optional[List[float]] = None # Coordinates definition boolean - _defined: List[bool] = None + _defined: Optional[List[bool]] = None # Evaluation boolean _evaluated: bool = False # hash signature, in the cache memory _signature: int = 0 # numpy double data type precision - _dtype: DType = None + _dtype: Optional[DType] = None # Variables type - _var_type: List[int] = None + _var_type: Optional[List[int]] = None # Discrete set - _sets: Dict = None + _sets: Optional[Dict] = None source: str = "Current run" - Model: str = "Simulation" + model_type: str = "Simulation" def __post_init__(self): self._dtype = DType() @@ -139,9 +139,9 @@ def push_back(self, val: Any): else: self.coordinates = self._coords + [val]*self._n - def checkForGranularity(self, g: Any, name: str) -> bool: + def check_for_granularity(self, g: Any, name: str) -> bool: for i in range(self._n): - if not self.isMult(self.coordinates[i], g[i]): + if not self.is_mult(self.coordinates[i], g[i]): raise IOError("Check: Invalid granularity of parameter " + name + f"at index {i} : {self.coordinates[i]} vs granularity value {g[i]} found a non-zero remainder of {self.coordinates[i] % g[i]}.") return True @@ -193,7 +193,7 @@ def reset(self, n: int = 0, d: Optional[float] = 0): self.defined = [False] * n - def nextMult(self, g: float = None, i: int = 0) -> float: + def next_mult(self, g: float = None, i: int = 0) -> float: d: float # Calculate the remainder when number is divided by multiple_of # Calculate the ratio to find next multiple_of @@ -202,19 +202,17 @@ def nextMult(self, g: float = None, i: int = 0) -> float: # # Calculate the next multiple_of # next_multiple = ratio * self.coordinates[i] value = self.coordinates[i] - if g is None or not self.defined[i] or g <= 0. or self.isMult(value, g): + if g is None or not self.defined[i] or g <= 0. or self.is_mult(value, g): d = value else: # granularity > 0, and _value is not a multiple of granularity. # Adjust value with granularity - granMult = round(abs(value)/g) + gran_mult = round(abs(value)/g) if value > 0: - granMult += 1 - # if abs(value) > 0: - # granMult += granMult - d = granMult*g + gran_mult += 1 + d = gran_mult*g - if not self.isMult(d, g): + if not self.is_mult(d, g): raise IOError("nextMult(gran): cannot get a multiple of granularity") # trials = 0 # while (not self.isMult(d, g)): @@ -228,39 +226,37 @@ def nextMult(self, g: float = None, i: int = 0) -> float: return d - def previousMult(self, g: float, i: int): + def previous_mult(self, g: float = None, i: int = -1): d: float - if g is not None or not self.is_all_defined() or g <= 0. or self.isMult(self.coordinates[i], g): + if g is not None or not self.is_all_defined() or g <= 0. or self.is_mult(self.coordinates[i], g): d = self.coordinates[i] else: - granMult: int = int(self.coordinates[i]/g) + gran_mult: int = int(self.coordinates[i]/g) if self.coordinates[i] < 0: - granMult-= 1 - bigGranExp: int = 10 ** self.nDecimals(g) - bigGran: int = int(g*bigGranExp) - d = granMult * bigGran/bigGranExp + gran_mult-= 1 + big_gran_exp: int = 10 ** self.n_decimals(g) + big_gran: int = int(g*big_gran_exp) + d = gran_mult * big_gran/big_gran_exp return d - def isMult(self, v1: float, v2: float): - isMult: bool = True + def is_mult(self, v1: float, v2: float): + is_mult: bool = True if abs(v1) <= self.dtype.zero: - isMult = True + is_mult = True elif (abs(v2) > 0): mult = round(v1/v2) verif_value = mult * v2 if abs(v1-verif_value) < abs(mult)*self.dtype.zero: - isMult = True + is_mult = True elif v2 < 0: - isMult = False + is_mult = False else: - isMult = True + is_mult = True - return isMult - - # return ((v1%v2) <= self.dtype.zero) if v2 > 0.0 else True + return is_mult - def nDecimals(self, n: float): + def n_decimals(self, n: float): return len(n.rsplit('.')[-1]) if '.' in n else 0 @@ -268,13 +264,13 @@ def __eq__(self, other) -> bool: return self.size is other.size and other.coordinates is self.coordinates \ and self.is_any_defined() is other.is_any_defined() - def __le__(self, other) -> bool: + def __le__(self, other) -> Optional[bool]: if self.size is other._n and self.is_all_defined() is other.is_all_defined(): return all(self.coordinates[i] <= other.coordinates[i] for i in range(self._n)) else: return None - def __lt__(self, other) -> bool: + def __lt__(self, other) -> Optional[bool]: if self.size is other._n and self.is_all_defined() is other.is_all_defined(): return all(self.coordinates[i] < other.coordinates[i] for i in range(self._n)) else: diff --git a/src/OMADS/PostProcess.py b/src/OMADS/PostProcess.py index f70e9f6..543ce64 100644 --- a/src/OMADS/PostProcess.py +++ b/src/OMADS/PostProcess.py @@ -1,10 +1,8 @@ from dataclasses import dataclass, field -import importlib import os -from typing import List, Dict, Any +from typing import List, Dict, Any, Optional from .CandidatePoint import CandidatePoint import json -from ._globals import * import csv @dataclass @@ -19,7 +17,7 @@ class Output: pname: str = "MADS0" runfolder: str = "undefined" replace: bool = True - stepName: str = "Poll" + step_name: str = "Poll" suffix: str = "all" def __post_init__(self): @@ -64,20 +62,18 @@ def clear_csv_content(self): def add_row(self, eval_time: int, iterno: int, evalno: int, source: str, - Mname: str, + m_name: str, poll_size: float, status: str, - fobj: float, - h: float, f: float, rho: float, L: List[float], hmax: float, - x: List[float], stepName: str, fnames: List[str]): - row = {f'{"Runtime (Sec)".rjust(25)}': f'{f"{eval_time}".rjust(25)}', f'{"Iteration".rjust(25)}': f'{f"{iterno}".rjust(25)}', f'{"Evaluation #".rjust(25)}': f'{f"{evalno}".rjust(25)}', f'{"Step".rjust(25)}': f'{f"{stepName}".rjust(25)}', f'{"Source".rjust(25)}': f'{f"{source}".rjust(25)}', f'{"Model_name".rjust(25)}': f'{f"{Mname}".rjust(25)}', f'{"Delta".rjust(25)}': f'{f"{min(poll_size)}".rjust(25)}', f'{"Status".rjust(25)}': f'{f"{status}".rjust(25)}', f'{"phi".rjust(25)}': f'{f"{max(f)}".rjust(25)}'} + fobj: Any, + h: float, f: float, rho: float, lambdas: List[float], hmax: float, + x: List[float], step_name: str, fnames: List[str]): + row = {f'{"Runtime (Sec)".rjust(25)}': f'{f"{eval_time}".rjust(25)}', f'{"Iteration".rjust(25)}': f'{f"{iterno}".rjust(25)}', f'{"Evaluation #".rjust(25)}': f'{f"{evalno}".rjust(25)}', f'{"Step".rjust(25)}': f'{f"{step_name}".rjust(25)}', f'{"Source".rjust(25)}': f'{f"{source}".rjust(25)}', f'{"Model_name".rjust(25)}': f'{f"{m_name}".rjust(25)}', f'{"Delta".rjust(25)}': f'{f"{min(poll_size)}".rjust(25)}', f'{"Status".rjust(25)}': f'{f"{status}".rjust(25)}', f'{"phi".rjust(25)}': f'{f"{max(f)}".rjust(25)}'} for i in range(len(fobj)): row.update({f'{f"{fnames[i]}".rjust(25)}': f'{f"{fobj[i]}".rjust(25)}'}) - row.update({f'{"max(c_in)".rjust(25)}': f'{f"{h}".rjust(25)}', f'{"Penalty_parameter".rjust(25)}': f'{f"{rho}".rjust(25)}', f'{"Multipliers".rjust(25)}': f'{f"{max(L) if len(L)>0 else None}".rjust(25)}', f'{"hmax".rjust(25)}': f'{f"{hmax}".rjust(25)}'}) + row.update({f'{"max(c_in)".rjust(25)}': f'{f"{h}".rjust(25)}', f'{"Penalty_parameter".rjust(25)}': f'{f"{rho}".rjust(25)}', f'{"Multipliers".rjust(25)}': f'{f"{max(lambdas) if len(lambdas)>0 else None}".rjust(25)}', f'{"hmax".rjust(25)}': f'{f"{hmax}".rjust(25)}'}) - # row = {'Iter no.': iterno, 'Eval no.': evalno, - # 'poll_size': poll_size, 'hmin': h, 'fmin': f} ss = 0 for k in range(13+len(fnames), len(self.field_names)): row[self.field_names[k]] = f'{f"{x[ss]}".rjust(25)}' @@ -99,28 +95,28 @@ class PostMADS: iter: List[int] = field(default_factory=list) bb_eval: List[int] = field(default_factory=list) psize: List[float] = field(default_factory=list) - step_name: List[str] = None + step_name: Optional[List[str]] = None nd_points: List[CandidatePoint] = field(default_factory=list) counter: int = 0 - def output_results(self, out: Output, allRes: bool = True): + def output_results(self, out: Output, all_res: bool = True): """ Create a results file from the saved cache""" - if allRes: + if all_res: self.counter = 0 for p in self.poll_dirs[self.counter:]: if p.evaluated and self.counter < len(self.iter): - out.add_row(eval_time= p.Eval_time, + out.add_row(eval_time= p.eval_time, iterno=self.iter[self.counter], evalno=self.bb_eval[self.counter], poll_size=self.psize[self.counter], source=p.source, - Mname=p.Model, + m_name=p.model, f=p.f, status=p.status.name, h=max(p.c_ineq), fobj=p.fobj, - rho=p.RHO, - L=p.LAMBDA, + rho=p.rho, + lambdas=p.lambda_multipliers, x=p.coordinates, - hmax=p.hmax, stepName="Poll-2n" if self.step_name is None else self.step_name[self.counter], fnames=out.fnames) + hmax=p.h_max, step_name="Poll-2n" if self.step_name is None else self.step_name[self.counter], fnames=out.fnames) self.counter += 1 def output_nd_results(self, out: Output): @@ -129,19 +125,19 @@ def output_nd_results(self, out: Output): out.clear_csv_content() for p in self.nd_points: if p.evaluated and counter < len(self.iter): - out.add_row(eval_time= p.Eval_time, + out.add_row(eval_time= p.eval_time, iterno=self.iter[counter], - evalno= p.evalNo, poll_size=self.psize[counter], + evalno= p.eval_no, poll_size=self.psize[counter], source=p.source, - Mname=p.Model, + m_name=p.model, f=p.f, status=p.status.name, h=max(p.c_ineq), fobj=p.fobj, - rho=p.RHO, - L=p.LAMBDA, + rho=p.rho, + lambdas=p.lambda_multipliers, x=p.coordinates, - hmax=p.hmax, stepName="Poll-2n" if self.step_name is None else self.step_name[counter], fnames=out.fnames) + hmax=p.h_max, step_name="Poll-2n" if self.step_name is None else self.step_name[counter], fnames=out.fnames) counter += 1 def output_coordinates(self, out: Output): diff --git a/src/OMADS/PreExploration.py b/src/OMADS/PreExploration.py index c5c88a4..f55975c 100644 --- a/src/OMADS/PreExploration.py +++ b/src/OMADS/PreExploration.py @@ -20,18 +20,28 @@ # https://github.com/Ahmed-Bayoumy/OMADS # # Copyright (C) 2022 Ahmed H. Bayoumy # # ------------------------------------------------------------------------------------# -from .Exploration import * -from typing import Callable +from .Exploration import efficient_exploration, search_sampling from .Parameters import Parameters from .Options import Options from .Omesh import Omesh from multiprocessing import cpu_count from .PostProcess import PostMADS, Output +from dataclasses import dataclass +from typing import Dict, Any, List, Optional +from ._common import logger +from .CandidatePoint import CandidatePoint +from ._globals import MSG_TYPE, VAR_TYPE +import copy +from .Barriers import BarrierMO, Barrier +from .Evaluator import Evaluator +from .Gmesh import Gmesh +from .Cache import Cache + @dataclass class PreExploration: """ Preprocessor for setting up optimization settings and parameters""" data: Dict[Any, Any] - log: logger = None + log: Optional[logger] = None def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): """ MADS initialization """ """ 1- Construct the following classes by unpacking @@ -42,7 +52,7 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): self.log.log_msg(msg="- Reading the input dictionaries", msg_type=MSG_TYPE.INFO) options = Options(**self.data["options"]) param = Parameters(**self.data["param"]) - log.isVerbose = options.isVerbose + log.is_verbose = options.isVerbose B = BarrierMO(param=param, options=options) if param.isPareto else Barrier(param) ev = Evaluator(**self.data["evaluator"]) if self.log is not None: @@ -51,23 +61,9 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): ev.dtype.precision = options.precision if param.constants != None: ev.constants = copy.deepcopy(param.constants) - - # if param.constraints_type is not None and isinstance(param.constraints_type, list): - # for i in range(len(param.constraints_type)): - # if param.constraints_type[i] == BARRIER_TYPES.PB.name: - # param.constraints_type[i] = BARRIER_TYPES.PB - # elif param.constraints_type[i] == BARRIER_TYPES.RB.name: - # param.constraints_type[i] = BARRIER_TYPES.RB - # elif param.constraints_type[i] == BARRIER_TYPES.PEB.name: - # param.constraints_type[i] = BARRIER_TYPES.PEB - # else: - # param.constraints_type[i] = BARRIER_TYPES.EB - # elif param.constraints_type is not None: - # param.constraints_type = BARRIER_TYPES(param.constraints_type) """ 2- Initialize iteration number and construct a point instant for the starting point """ iteration: int = 0 - x_start = CandidatePoint() """ 3- Construct an instant for the poll 2n orthogonal directions class object """ extend = options.extend is not None and isinstance(options.extend, efficient_exploration) is_xs = False @@ -79,14 +75,14 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): if not extend: search = efficient_exploration() search.prob_params = copy.deepcopy(param) - if param.Failure_stop != None and isinstance(param.Failure_stop, bool): - search.Failure_stop = param.Failure_stop + if param.failure_stop != None and isinstance(param.failure_stop, bool): + search.Failure_stop = param.failure_stop search._candidate_points_set = [] search.dtype.precision = options.precision search.save_results = options.save_results """ 4- Construct an instant for the mesh subclass object by inheriting initial parameters from mesh_params() """ - search.mesh = Gmesh(pbParam=param, runOptions=options) if (param.meshType).lower() == "gmesh" else Omesh(pbParam=param, runOptions=options) + search.mesh = Gmesh(pb_param=param, run_options=options) if (param.meshType).lower() == "gmesh" else Omesh(pb_param=param, run_options=options) search.sampling_t = search_step.s_method search.type = search_step.type @@ -97,8 +93,6 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): """ 5- Assign optional algorithmic parameters to the constructed poll instant """ search.opportunistic = options.opportunistic search.seed = options.seed - # search.mesh.dtype.precision = options.precision - # search.mesh.psize = options.psize_init search.eval_budget = options.budget search.store_cache = options.store_cache search.check_cache = options.check_cache @@ -110,7 +104,7 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): search._candidate_points_set = [] n_available_cores = cpu_count() if options.parallel_mode and options.np > n_available_cores: - options.np == n_available_cores + options.np = n_available_cores """ 6- Initialize blackbox handling subclass by copying the evaluator 'ev' instance to the poll object """ search.bb_handle = ev @@ -160,12 +154,11 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): elif k.lower()[0] == "o": x_start.var_type.append(VAR_TYPE.ORDINAL) x_start.var_link.append(None) - # TODO: Implementation in progress + # COMPLETED: Implementation in progress elif k.lower()[0] == "b": x_start.var_type.append(VAR_TYPE.BINARY) else: - x_start.var_type.append(VAR_TYPE.REAL) - x_start.var_link.append(None) + raise IOError("Could not recognize the variable of type " + k + ". Please use on of the following keywords to identify your variable type: real, integer, discrete, categorical, ordinal, or binary") x_start.dtype.precision = options.precision @@ -180,9 +173,9 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): else: if not is_xs: search.bb_output, _ = search.bb_handle.eval(x_start.coordinates) - x_start.hmax = B._h_max if isinstance(B, Barrier) else B._hMax - search.hmax = B._h_max if isinstance(B, Barrier) else B._hMax - x_start.RHO = param.RHO + x_start.h_max = B._h_max + search.hmax = B._h_max + x_start.rho = param.RHO if param.LAMBDA is None: param.LAMBDA = [0] * len(x_start.c_ineq) if not isinstance(param.LAMBDA, list): @@ -191,14 +184,12 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): param.LAMBDA += [param.LAMBDA[-1]] * abs(len(param.LAMBDA)-len(x_start.c_ineq)) if len(x_start.c_ineq) < len(param.LAMBDA): del param.LAMBDA[len(x_start.c_ineq):] - x_start.LAMBDA = param.LAMBDA + x_start.lambda_multipliers = param.LAMBDA x_start.constraints_type = param.constraints_type if not is_xs: x_start.__eval__(search.bb_output) - if isinstance(B, Barrier): - B._h_max = x_start.hmax - elif isinstance(B, BarrierMO): - B._hMax = x_start.hmax + if isinstance(B, Barrier) or isinstance(B, BarrierMO): + B._h_max = x_start.h_max """ 9- Copy the starting point object to the poll's minimizer subclass """ x_start.mesh = copy.deepcopy(search.mesh) search.xmin = copy.deepcopy(x_start) @@ -210,9 +201,9 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): if not extend: search.hashtable = Cache() search.hashtable._n_dim = len(param.baseline) - search.hashtable._isPareto = param.isPareto + search.hashtable._is_pareto = param.isPareto if param.isPareto: - search.hashtable.ND_points = [] + search.hashtable.nd_points = [] """ 10- Initialize the number of successful points found and check if the starting minimizer performs better than the worst (f = inf) """ @@ -222,8 +213,8 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): search.mesh.psize_max =copy.deepcopy(max(search.mesh.getDeltaFrameSize().coordinates)) search._candidate_points_set = [search.xmin] """ 11- Construct the results postprocessor class object 'post' """ - x_start.evalNo = search.bb_handle.bb_eval - search.xmin.evalNo = search.bb_handle.bb_eval + x_start.eval_no = search.bb_handle.bb_eval + search.xmin.eval_no = search.bb_handle.bb_eval post = PostMADS(x_incumbent=[search.xmin], xmin=search.xmin, poll_dirs=[search.xmin]) post.step_name = [] post.step_name.append(f'Search: {search.type}') @@ -235,21 +226,20 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): """ Note: printing the post will print a results row within the results table shown in Python console if the 'display' option is true """ - # if options.display: - # print(post) + """ 12- Add the starting point hash value to the cache memory """ if options.store_cache: search.hashtable.hash_id = x_start """ 13- Initialize the output results file object """ out = Output(file_path=param.post_dir, vnames=param.var_names, fnames=param.fun_names, pname=param.name, runfolder=f'{param.name}_run', replace=True) if param.isPareto: - outP = Output(file_path=param.post_dir, vnames=param.var_names, fnames=param.fun_names, pname=param.name, runfolder=f'{param.name}_ND', suffix="Pareto") + out_p = Output(file_path=param.post_dir, vnames=param.var_names, fnames=param.fun_names, pname=param.name, runfolder=f'{param.name}_ND', suffix="Pareto") else: - outP = None + out_p = None if options.display: print("End of the evaluation of the starting points") if self.log is not None: self.log.log_msg(msg="- End of the evaluation of the starting points.", msg_type=MSG_TYPE.INFO) iteration += 1 - return iteration, x_start, search, options, param, post, out, B, outP + return iteration, x_start, search, options, param, post, out, B, out_p diff --git a/src/OMADS/PrePoll.py b/src/OMADS/PrePoll.py index b69fc87..4e7cfc3 100644 --- a/src/OMADS/PrePoll.py +++ b/src/OMADS/PrePoll.py @@ -23,20 +23,25 @@ from .CandidatePoint import CandidatePoint from .Barriers import Barrier, BarrierMO -# from ._common import * from .Omesh import Omesh -from .Directions import * +from .Directions import Dirs2n from .Parameters import Parameters from .Options import Options from .Evaluator import Evaluator from multiprocessing import cpu_count from .PostProcess import PostMADS, Output - +from typing import Any, Dict, List, Optional +from dataclasses import dataclass +from ._common import logger +import copy +from ._globals import VAR_TYPE, MSG_TYPE, DESIGN_STATUS +from .Gmesh import Gmesh +from .Cache import Cache @dataclass class PrePoll: """ Preprocessor for setting up optimization settings and parameters""" data: Dict[Any, Any] - log: logger = None + log: Optional[logger] = None def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): """ MADS initialization """ @@ -48,7 +53,7 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): self.log.log_msg(msg="- Reading the input dictionaries", msg_type=MSG_TYPE.INFO) options = Options(**self.data["options"]) param = Parameters(**self.data["param"]) - log.isVerbose = options.isVerbose + log.is_verbose = options.isVerbose B = BarrierMO(param=param, options=options) if param.isPareto else Barrier(param) ev = Evaluator(**self.data["evaluator"]) if self.log is not None: @@ -72,18 +77,16 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): if not extend: """ 3- Construct an instant for the poll 2n orthogonal directions class object """ poll = Dirs2n() - if param.Failure_stop != None and isinstance(param.Failure_stop, bool): - poll.Failure_stop = param.Failure_stop + if param.failure_stop != None and isinstance(param.failure_stop, bool): + poll.Failure_stop = param.failure_stop poll.dtype.precision = options.precision """ 4- Construct an instant for the mesh subclass object by inheriting initial parameters from mesh_params() """ # COMPLETED: Add the Gmesh constructor req inputs - poll.mesh = Gmesh(pbParam=param, runOptions=options) if (param.meshType).lower() == "gmesh" else Omesh(pbParam=param, runOptions=options) + poll.mesh = Gmesh(pb_param=param, run_options=options) if (param.meshType).lower() == "gmesh" else Omesh(pb_param=param, run_options=options) """ 5- Assign optional algorithmic parameters to the constructed poll instant """ poll.opportunistic = options.opportunistic poll.seed = options.seed - # poll.mesh.dtype.precision = options.precision - # poll.mesh.psize = options.psize_init poll.eval_budget = options.budget poll.store_cache = options.store_cache poll.check_cache = options.check_cache @@ -94,7 +97,7 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): n_available_cores = cpu_count() if options.parallel_mode and options.np > n_available_cores: - options.np == n_available_cores + options.np = n_available_cores """ 6- Initialize blackbox handling subclass by copying the evaluator 'ev' instance to the poll object""" poll.bb_handle = ev @@ -142,13 +145,11 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): elif k.lower()[0] == "o": x_start.var_type.append(VAR_TYPE.ORDINAL) x_start.var_link.append(None) - # TODO: Implementation in progress + # COMPLETED: Implementation in progress elif k.lower()[0] == "b": x_start.var_type.append(VAR_TYPE.BINARY) else: - x_start.var_type.append(VAR_TYPE.REAL) - x_start.var_link.append(None) - + raise IOError("Could not recognize the variable of type " + k + ". Please use on of the following keywords to identify your variable type: real, integer, discrete, categorical, ordinal, or binary") x_start.dtype.precision = options.precision if x_start.sets is not None and isinstance(x_start.sets,dict): @@ -163,8 +164,8 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): else: if not is_xs: poll.bb_output, _ = poll.bb_handle.eval(x_start.coordinates) - x_start.hmax = B._h_max if isinstance(B, Barrier) else B._hMax - x_start.RHO = param.RHO + x_start.h_max = B._h_max + x_start.rho = param.RHO if param.LAMBDA is None: param.LAMBDA = [0] * len(x_start.c_ineq) @@ -174,14 +175,12 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): param.LAMBDA += [param.LAMBDA[-1]] * abs(len(param.LAMBDA)-len(x_start.c_ineq)) if len(x_start.c_ineq) < len(param.LAMBDA): del param.LAMBDA[len(x_start.c_ineq):] - x_start.LAMBDA = param.LAMBDA + x_start.lambda_multipliers = param.LAMBDA if not is_xs: x_start.__eval__(poll.bb_output) - if isinstance(B, Barrier): - B._h_max = x_start.hmax - elif isinstance(B, BarrierMO): - B._hMax = x_start.hmax + if isinstance(B, Barrier) or isinstance(B, BarrierMO): + B._h_max = x_start.h_max """ 9- Copy the starting point object to the poll's minimizer subclass """ if not extend: if x_start.status == DESIGN_STATUS.INFEASIBLE and isinstance(B, BarrierMO): @@ -196,9 +195,9 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): if not extend: poll.hashtable = Cache() poll.hashtable._n_dim = len(param.baseline) - poll.hashtable._isPareto = param.isPareto + poll.hashtable._is_pareto = param.isPareto if param.isPareto: - poll.hashtable.ND_points = [] + poll.hashtable.nd_points = [] """ 10- Initialize the number of successful points found and check if the starting minimizer performs better than the worst (f = inf) """ @@ -221,16 +220,16 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): """ 11- Construct the results postprocessor class object 'post' """ if poll.xmin.evaluated: - x_start.evalNo = poll.bb_handle.bb_eval - poll.xmin.evalNo = poll.bb_handle.bb_eval + x_start.eval_no = poll.bb_handle.bb_eval + poll.xmin.eval_no = poll.bb_handle.bb_eval post = PostMADS(x_incumbent=[poll.xmin], xmin=poll.xmin, poll_dirs=[poll.xmin]) post.psize.append(poll.mesh.getDeltaFrameSize().coordinates) post.bb_eval.append(poll.bb_handle.bb_eval) post.iter.append(iteration) elif poll.x_sc.evaluated: - x_start.evalNo = poll.bb_handle.bb_eval - poll.x_sc.evalNo = poll.bb_handle.bb_eval + x_start.eval_no = poll.bb_handle.bb_eval + poll.x_sc.eval_no = poll.bb_handle.bb_eval post = PostMADS(x_incumbent=[poll.x_sc], xmin=poll.x_sc, poll_dirs=[poll.x_sc]) post.psize.append(poll.mesh.getDeltaFrameSize().coordinates) post.bb_eval.append(poll.bb_handle.bb_eval) @@ -240,17 +239,15 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): """ Note: printing the post will print a results row within the results table shown in Python console if the 'display' option is true """ - # if options.display: - # print(post) """ 12- Add the starting point hash value to the cache memory """ if options.store_cache: poll.hashtable.hash_id = x_start """ 13- Initialize the output results file object """ out = Output(file_path=param.post_dir, vnames=param.var_names, fnames=param.fun_names, pname=param.name, runfolder=f'{param.name}_run', suffix="all") if param.isPareto: - outP = Output(file_path=param.post_dir, vnames=param.var_names, fnames=param.fun_names, pname=param.name, runfolder=f'{param.name}_ND', suffix="Pareto") + out_p = Output(file_path=param.post_dir, vnames=param.var_names, fnames=param.fun_names, pname=param.name, runfolder=f'{param.name}_ND', suffix="Pareto") else: - outP = None + out_p = None if options.display: print("End of the evaluation of the starting points") if self.log is not None: @@ -258,4 +255,4 @@ def initialize_from_dict(self, log: logger = None, xs: CandidatePoint=None): iteration += 1 - return iteration, x_start, poll, options, param, post, out, B, outP + return iteration, x_start, poll, options, param, post, out, B, out_p diff --git a/src/OMADS/SEARCH.py b/src/OMADS/SEARCH.py index ccc988f..eec4d0a 100644 --- a/src/OMADS/SEARCH.py +++ b/src/OMADS/SEARCH.py @@ -22,24 +22,26 @@ # ------------------------------------------------------------------------------------# import importlib -import json from multiprocessing import freeze_support import os import sys import time import numpy as np import copy -from typing import List, Dict, Any -import concurrent.futures +from typing import List, Dict, Any, Optional from matplotlib import pyplot as plt if importlib.util.find_spec('BMDFO'): from BMDFO import toy from .CandidatePoint import CandidatePoint -from ._common import * -from .Directions import * -from .Exploration import * -from .PreExploration import * +from ._common import logger, validator +from .Exploration import SAMPLING_METHOD, VNS +from .PreExploration import PreExploration +from ._globals import SEARCH_TYPE, VAR_TYPE, DESIGN_STATUS, MSG_TYPE, SUCCESS_TYPES +from .Barriers import Barrier, BarrierMO +from .Point import Point +from .Metrics import Metrics + np.set_printoptions(legacy='1.21') def main(*args) -> Dict[str, Any]: @@ -47,24 +49,24 @@ def main(*args) -> Dict[str, Any]: """ Validate and parse the parameters file """ validate = validator() - data: dict = validate.checkInputFile(args=args) + data: dict = validate.check_input_file(args=args) """ Initialize the log file """ log = logger() if not os.path.exists(data["param"]["post_dir"]): try: os.mkdir(data["param"]["post_dir"]) - except: + except Warning: os.makedirs(data["param"]["post_dir"], exist_ok=True) log.initialize(data["param"]["post_dir"] + "/OMADS.log") """ Run preprocessor for the setup of the optimization problem and for the initialization of optimization process """ - iteration, xmin, search, options, param, post, out, B, outP = PreExploration(data).initialize_from_dict(log=log) + iteration, xmin, search, options, param, post, out, B, out_p = PreExploration(data).initialize_from_dict(log=log) - if outP: - outP.stepName = "Search_ND" + if out_p: + out_p.step_name = "Search_ND" """ Set the random seed for results reproducibility """ if len(args) < 4: @@ -72,18 +74,7 @@ def main(*args) -> Dict[str, Any]: else: np.random.seed(int(args[3])) - out.stepName = f"Search: {search.type}" - - - """ Initialize the visualization figure""" - if search.visualize: - plt.ion() - fig = plt.figure() - ax=[] - nplots = len(param.var_names)-1 - ps = [None]*nplots**2 - for ii in range(nplots**2): - ax.append(fig.add_subplot(nplots, nplots, ii+1)) + out.step_name = f"Search: {search.type}" """ Start the count down for calculating the runtime indicator """ tic = time.perf_counter() @@ -91,17 +82,14 @@ def main(*args) -> Dict[str, Any]: peval = 0 if search.type == SEARCH_TYPE.VNS.name: - search_VN = VNS(active_barrier=B, params=param) - search_VN._ns_dist = [int(((search.dim+1)/2)*((search.dim+2)/2)/(len(search_VN._dist))) if search.ns is None else search.ns] * len(search_VN._dist) - search.ns = sum(search_VN._ns_dist) + search_vn = VNS(active_barrier=B, params=param) + search_vn._ns_dist = [int(((search.dim+1)/2)*((search.dim+2)/2)/(len(search_vn._dist))) if search.ns is None else search.ns] * len(search_vn._dist) + search.ns = sum(search_vn._ns_dist) search.lb = param.lb search.ub = param.ub - LAMBDA_k = xmin.LAMBDA - RHO_k = xmin.RHO - log.log_msg(msg=f"---------------- Run the SEARCH step ({search.sampling_t}) ----------------", msg_type=MSG_TYPE.INFO) num_strat: int = 0 while True: @@ -116,7 +104,7 @@ def main(*args) -> Dict[str, Any]: else: B.insert(search.xmin) elif isinstance(B, BarrierMO) and iteration == 1: - B.init(evalPointList=[xmin]) + B.init(eval_point_list=[xmin]) if isinstance(B, Barrier): search.hmax = B._h_max @@ -131,49 +119,26 @@ def main(*args) -> Dict[str, Any]: """ Create the set of poll directions """ if search.type == SEARCH_TYPE.VNS.name: - search_VN.active_barrier = B - search._candidate_points_set = search_VN.run() - if search_VN.stop: + search_vn.active_barrier = B + search._candidate_points_set = search_vn.run() + if search_vn.stop: print("Reached maximum number of VNS iterations!") break - vv = search.map_samples_from_coords_to_points(samples=search._candidate_points_set) + search.map_samples_from_coords_to_points(samples=search._candidate_points_set) else: - vvp = vvs = [] - bestFeasible: CandidatePoint = B._currentIncumbentFeas if isinstance(B, BarrierMO) else B._best_feasible - bestInf: CandidatePoint = B._currentIncumbentInf if isinstance(B, BarrierMO) else B.get_best_infeasible() - if bestFeasible is not None and bestFeasible.evaluated: - search.xmin = bestFeasible - vvp, _ = search.generate_sample_points(int(((search.dim+1)/2)*((search.dim+2)/2)) if search.ns is None else search.ns) - if bestInf is not None and bestInf.evaluated: + best_feasible: CandidatePoint = B._currentIncumbentFeas if isinstance(B, BarrierMO) else B._best_feasible + best_inf: CandidatePoint = B._currentIncumbentInf if isinstance(B, BarrierMO) else B.get_best_infeasible() + if best_feasible is not None and best_feasible.evaluated: + search.xmin = best_feasible + search.generate_sample_points(int(((search.dim+1)/2)*((search.dim+2)/2)) if search.ns is None else search.ns) + if best_inf is not None and best_inf.evaluated: # if B._filter is not None and B.get_best_infeasible().evaluated: xmin_bup = search.xmin - Prim_samples = search._candidate_points_set - search.xmin = bestInf - vvs, _ = search.generate_sample_points(int(((search.dim+1)/2)*((search.dim+2)/2)) if search.ns is None else search.ns) - search._candidate_points_set += Prim_samples + prim_samples = search._candidate_points_set + search.xmin = best_inf + search.generate_sample_points(int(((search.dim+1)/2)*((search.dim+2)/2)) if search.ns is None else search.ns) + search._candidate_points_set += prim_samples search.xmin = xmin_bup - - if isinstance(vvs, list) and len(vvs) > 0: - vv = vvp + vvs - else: - vv = vvp - - if search.visualize: - sc_old = search.store_cache - cc_old = search.check_cache - search.check_cache = False - search.store_cache = False - for iii in range(len(ax)): - for jjj in range(len(xmin.coordinates)): - for kkk in range(jjj, len(xmin.coordinates)): - if kkk != jjj: - if all([psi is None for psi in ps]): - xinput = [search.xmin] - else: - xinput = search._candidate_points_set - ps = visualize(xinput, jjj, kkk, search.mesh.getdeltaMeshSize().coordinates, vv, fig, ax, search.xmin, ps, bbeval=search.bb_handle, lb=search.prob_params.lb, ub=search.prob_params.ub, spindex=iii, bestKnown=search.prob_params.best_known, blk=False) - search.store_cache = sc_old - search.check_cache = cc_old """ Save current poll directions and incumbent solution @@ -187,22 +152,23 @@ def main(*args) -> Dict[str, Any]: search.bb_output = [] xt = [] """ Serial evaluation for points in the poll set """ - if log is not None and log.isVerbose: + if log and log.is_verbose: log.log_msg(f"----------- Evaluate Search iteration # {iteration}-----------", msg_type=MSG_TYPE.INFO) search.log = log if options.check_cache: search.omit_duplicates() search.bb_handle.xmin = xmin if not options.parallel_mode: - xt, post, peval = search.bb_handle.run_callable_serial_local(iter=iteration, peval=peval, eval_set=search._candidate_points_set, options=options, post=post, psize=search.mesh.getDeltaFrameSize().coordinates, stepName=f'Search: {search.type}', mesh=search.mesh, constraintsRelaxation=search.constraints_RP.__dict__, budget=options.budget) + xt, post, peval = search.bb_handle.run_callable_serial_local(iter=iteration, peval=peval, eval_set=search._candidate_points_set, options=options, post=post, psize=search.mesh.getDeltaFrameSize().coordinates, step_name=f'Search: {search.type}', mesh=search.mesh, constraints_relaxation=search.constraints_RP.__dict__, budget=options.budget) else: """ Parallel evaluation for points in the samples set """ search.point_index = -1 - search.bb_eval, xt, post, peval = search.bb_handle.run_callable_parallel_local(iter=iteration, peval=peval, njobs=options.np, eval_set=search._candidate_points_set, options=options, post=post, mesh=search.mesh, stepName=f'Search: {search.type}', psize=search.mesh.getDeltaFrameSize().coordinates, constraintsRelaxation=search.constraints_RP.__dict__, budget=options.budget) + search.bb_eval, xt, post, peval = search.bb_handle.run_callable_parallel_local(iter=iteration, peval=peval, eval_set=search._candidate_points_set, options=options, post=post, mesh=search.mesh, step_name=f'Search: {search.type}', psize=search.mesh.getDeltaFrameSize().coordinates, constraints_relaxation=search.constraints_RP.__dict__, budget=options.budget) search.postprocess_evaluated_candidates(xt) - + if iteration == 1: + search.vicinity_ratio = np.ones((len(search.xmin.coordinates),1)) if isinstance(B, Barrier): xpost: List[CandidatePoint] = search.master_updates(xt, peval, save_all_best=options.save_all_best, save_all=options.save_results) if options.save_results: @@ -215,20 +181,17 @@ def main(*args) -> Dict[str, Any]: """ Update the xmin in post""" post.xmin = copy.deepcopy(search.xmin) - if iteration == 1: - search.vicinity_ratio = np.ones((len(search.xmin.coordinates),1)) + """ Updates """ if search.success == SUCCESS_TYPES.FS: - dir: Point = Point(search.mesh._n) - dir.coordinates = search.xmin.direction.coordinates - # search.mesh.psize = np.multiply(search.mesh.get, 2, dtype=search.dtype.dtype) - search.mesh.enlargeDeltaFrameSize(direction=dir) + direction: Point = Point(search.mesh._n) + direction.coordinates = search.xmin.direction.coordinates + search.mesh.enlargeDeltaFrameSize(direction=direction) if search.sampling_t != SAMPLING_METHOD.ACTIVE.name: search.update_local_region(region="expand") elif search.success == SUCCESS_TYPES.US: - # search.mesh.psize = np.divide(search.mesh.psize, 2, dtype=search.dtype.dtype) search.mesh.refineDeltaFrameSize() if search.sampling_t != SAMPLING_METHOD.ACTIVE.name: search.update_local_region(region="contract") @@ -236,40 +199,40 @@ def main(*args) -> Dict[str, Any]: xpost: List[CandidatePoint] = [] for i in range(len(xt)): xpost.append(xt[i]) - updated, updatedF, updatedInf = B.updateWithPoints(evalPointList=xpost, evalType=None, keepAllPoints=False, updateInfeasibleIncumbentAndHmax=True) + updated, updated_f, updated_inf = B.updateWithPoints(eval_point_list=xpost, keep_all_points=False) if not updated: - newMesh = None + new_mesh = None if B._currentIncumbentInf: B._currentIncumbentInf.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() if search.sampling_t != SAMPLING_METHOD.ACTIVE.name: search.update_local_region(region="contract") if B._currentIncumbentFeas: B._currentIncumbentFeas.mesh.refineDeltaFrameSize() - newMesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None + new_mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if B._currentIncumbentFeas else copy.deepcopy(B._currentIncumbentInf.mesh) if B._currentIncumbentInf else None B.updateCurrentIncumbents() if search.sampling_t != SAMPLING_METHOD.ACTIVE.name: search.update_local_region(region="contract") - if newMesh: - search.mesh = newMesh + if new_mesh: + search.mesh = new_mesh else: search.mesh.refineDeltaFrameSize() if search.sampling_t != SAMPLING_METHOD.ACTIVE.name: search.update_local_region(region="contract") else: - search.mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if updatedF else copy.deepcopy(B._currentIncumbentInf.mesh) if updatedInf else search.mesh - search.xmin = copy.deepcopy(B._currentIncumbentFeas) if updatedF else copy.deepcopy(B._currentIncumbentInf) if updatedInf else search.xmin + search.mesh = copy.deepcopy(B._currentIncumbentFeas.mesh) if updated_f else copy.deepcopy(B._currentIncumbentInf.mesh) if updated_inf else search.mesh + search.xmin = copy.deepcopy(B._currentIncumbentFeas) if updated_f else copy.deepcopy(B._currentIncumbentInf) if updated_inf else search.xmin if search.sampling_t != SAMPLING_METHOD.ACTIVE.name: search.update_local_region(region="expand") for i in range(len(xpost)): post.poll_dirs.append(xpost[i]) - search.hashtable.best_hash_ID = [] + search.hashtable.best_hash_id = [] search.hashtable.add_to_best_cache(B.getAllPoints()) - post.xmin = B._currentIncumbentFeas if updatedF else B._currentIncumbentInf if updatedInf else search.xmin + post.xmin = B._currentIncumbentFeas if updated_f else B._currentIncumbentInf if updated_inf else search.xmin search.mesh.update() if iteration == 1: @@ -278,35 +241,45 @@ def main(*args) -> Dict[str, Any]: if options.save_results: post.nd_points = [] - post.output_results(out=out, allRes=False) + post.output_results(out=out, all_res=False) if param.isPareto: for i in range(len(B.getAllPoints())): post.nd_points.append(B.getAllPoints()[i]) - post.output_nd_results(outP) + post.output_nd_results(out_p) - if log is not None: + if log: log.log_msg(msg=post.__str__(), msg_type=MSG_TYPE.INFO) if options.display: print(post) - Failure_check = iteration > 0 and search.Failure_stop is not None and search.Failure_stop and not (search.success != SUCCESS_TYPES.FS or SUCCESS_TYPES.PS) + failure_check = iteration > 0 and search.Failure_stop is not None and search.Failure_stop and not (search.success != SUCCESS_TYPES.FS or SUCCESS_TYPES.PS) if search.bb_handle.bb_eval - bbevalold <= 0: num_strat += 1 if num_strat > 5: - search.terminate = True - if (Failure_check or search.bb_handle.bb_eval >= options.budget) or (all(abs(search.mesh.getdeltaMeshSize().coordinates[pp]) < options.tol for pp in range(search.mesh._n)) or search.bb_handle.bb_eval >= options.budget or search.terminate): - log.log_msg(f"\n--------------- Termination of the search step ---------------", MSG_TYPE.INFO) + search.exploreNew = True + num_strat = 0 + else: + num_strat = 0 + if (failure_check or search.bb_handle.bb_eval >= options.budget) or (all(abs(search.mesh.getdeltaMeshSize().coordinates[pp]) < options.tol for pp in range(search.mesh._n)) or search.bb_handle.bb_eval >= options.budget or search.terminate): + log.log_msg("\n--------------- Termination of the search step ---------------", MSG_TYPE.INFO) if (all(abs(search.mesh.getdeltaMeshSize().coordinates[pp]) < options.tol for pp in range(search.mesh._n))): log.log_msg("Termination criterion hit: the mesh size is below the minimum threshold defined.", MSG_TYPE.INFO) if (search.bb_handle.bb_eval >= options.budget or search.terminate): log.log_msg("Termination criterion hit: evaluation budget is exhausted.", MSG_TYPE.INFO) - if (Failure_check): - log.log_msg(f"Termination criterion hit (optional): failed to find a successful point in iteration # {iteration}.", MSG_TYPE.INFO) - log.log_msg(f"-----------------------------------------------------------------\n", MSG_TYPE.INFO) + if (failure_check): + log.log_msg("Termination criterion hit (optional): failed to find a successful point in iteration # {iteration}.", MSG_TYPE.INFO) + log.log_msg("-----------------------------------------------------------------\n", MSG_TYPE.INFO) break iteration += 1 toc = time.perf_counter() + if isinstance(B, BarrierMO): + rp: Optional[CandidatePoint] = None + if param.ref_point: + rp = Point() + rp.coordinates = param.ref_point + perf_m = Metrics(nd_solutions=B.getAllPoints(), nobj=B._nobj, ref_point=rp) + HV = perf_m.hypervolume() """ If benchmarking, then populate the results in the benchmarking output report """ if importlib.util.find_spec('BMDFO') and len(args) > 1 and isinstance(args[1], toy.Run): @@ -330,24 +303,24 @@ def main(*args) -> Dict[str, Any]: print(f"{search.bb_handle.blackbox}: fmin = {search.xmin.f} , hmin= {search.xmin.h:.2f}") elif importlib.util.find_spec('BMDFO') and len(args) > 1 and not isinstance(args[1], toy.Run): - if log is not None: + if log: log.log_msg(msg="Could not find " + args[1] + " in the internal BM suite.", msg_type=MSG_TYPE.ERROR) raise IOError("Could not find " + args[1] + " in the internal BM suite.") if options.display: - if log is not None: + if log: log.log_msg(" end of orthogonal MADS ", MSG_TYPE.INFO) print(" end of orthogonal MADS ") - if log is not None: + if log: log.log_msg(" Final objective value: " + str(search.xmin.f) + ", hmin= " + str(search.xmin.h), MSG_TYPE.INFO) print(" Final objective value: " + str(search.xmin.f) + ", hmin= " + str(search.xmin.h)) if options.save_coordinates: post.output_coordinates(out) - if log is not None: + if log: log.log_msg("\n ---Run Summary---", MSG_TYPE.INFO) log.log_msg(f" Run completed in {toc - tic:.4f} seconds", MSG_TYPE.INFO) log.log_msg(msg=f" # of successful search steps = {search.n_successes}", msg_type=MSG_TYPE.INFO) @@ -392,129 +365,11 @@ def main(*args) -> Dict[str, Any]: "psize": search.mesh.getDeltaFrameSize().coordinates, "psuccess": search.xmin.mesh.getDeltaFrameSize().coordinates, # "pmax": search.mesh.psize_max, - "msize": search.mesh.getdeltaMeshSize().coordinates} - - if search.visualize: - sc_old = search.store_cache - cc_old = search.check_cache - search.check_cache = False - search.store_cache = False - temp = CandidatePoint() - temp.coordinates = output["xmin"] - for ii in range(len(ax)): - for jj in range(len(xmin.coordinates)): - for kk in range(jj+1, len(xmin.coordinates)): - if kk != jj: - ps = visualize(xinput, jj, kk, search.mesh.getdeltaMeshSize().coordinates, vv, fig, ax, temp, ps, bbeval=search.bb_handle, lb=search.prob_params.lb, ub=search.prob_params.ub, title=search.prob_params.problem_name, blk=True,vnames=search.prob_params.var_names, spindex=ii, bestKnown=search.prob_params.best_known) - search.check_cache = sc_old - search.store_cache = cc_old + "msize": search.mesh.getdeltaMeshSize().coordinates, + "HV": HV if param.isPareto else "NA"} return output, search - - -def visualize(points: List[CandidatePoint], hc_index, vc_index, msize, vlim, fig, axes, pmin, ps = None, title="unknown", blk=False, vnames=None, bbeval=None, lb = None, ub=None, spindex=0, bestKnown=None): - - x: np.ndarray = np.zeros(len(points)) - y: np.ndarray = np.zeros(len(points)) - - for i in range(len(points)): - x[i] = points[i].coordinates[hc_index] - y[i] = points[i].coordinates[vc_index] - xmin = pmin.coordinates[hc_index] - ymin = pmin.coordinates[vc_index] - - # Plot grid's dynamic updates - # nrx = int((vlim[hc_index, 1] - vlim[hc_index, 0])/msize) - # nry = int((vlim[vc_index, 1] - vlim[vc_index, 0])/msize) - - # minor_ticksx=np.linspace(vlim[hc_index, 0],vlim[hc_index, 1],nrx+1) - # minor_ticksy=np.linspace(vlim[vc_index, 0],vlim[vc_index, 1],nry+1) - isFirst = False - - if ps[spindex] == None: - isFirst = True - ps[spindex] =[] - if bbeval is not None and lb is not None and ub is not None: - xx = np.arange(lb[hc_index], ub[hc_index], 0.1) - yy = np.arange(lb[vc_index], ub[vc_index], 0.1) - X, Y = np.meshgrid(xx, yy) - Z = np.zeros_like(X) - for i in range(X.shape[0]): - for j in range(X.shape[1]): - Z[i,j] = bbeval.eval([X[i,j], Y[i,j]])[0] - bbeval.bb_eval -= 1 - if bestKnown is not None: - best_index = np.argwhere(Z <= bestKnown+0.005) - if best_index.size == 0: - best_index = np.argwhere(Z == np.min(Z)) - xbk = X[best_index[0][0], best_index[0][1]] - ybk = Y[best_index[0][0], best_index[0][1]] - temp1 = axes[spindex].contourf(X, Y, Z, 100) - axes[spindex].set_aspect('equal') - fig.subplots_adjust(right=0.8) - cbar_ax = fig.add_axes([0.85, 0.1, 0.01, 0.85]) - fig.colorbar(temp1, cbar_ax) - fig.suptitle(title) - - ps[spindex].append(temp1) - - - - temp2, = axes[spindex].plot(xmin, ymin, 'ok', alpha=0.08, markersize=2) - - ps[spindex].append(temp2) - - if bestKnown is not None: - temp3, = axes[spindex].plot(xbk, ybk, 'dr', markersize=2) - ps[spindex].append(temp3) - - - - else: - ps[spindex][1].set_xdata(x) - ps[spindex][1].set_ydata(y) - - - - fig.canvas.draw() - fig.canvas.flush_events() - # axes.set_xticks(minor_ticksx,major=True) - # axes.set_yticks(minor_ticksy,major=True) - - # axes.grid(which="major",alpha=0.3) - # ps[1].set_xdata(x) - # ps[1].set_ydata(y) - if blk: - if bestKnown is not None: - t1 = ps[spindex][2] - t2, =axes[spindex].plot(x, y, 'ok', alpha=0.08, markersize=2) - - - t3, = axes[spindex].plot(xmin, ymin, '*b', markersize=4) - if bestKnown is not None: - fig.legend((t1, t2, t3), ("best_known", "sample_points", "best_found")) - else: - fig.legend((t2, t3), ("sample_points", "best_found")) - else: - axes[spindex].plot(x, y, 'ok', alpha=0.08, markersize=2) - # axes[spindex].plot(xmin, ymin, '*b', markersize=4) - if vnames is not None: - axes[spindex].set_xlabel(vnames[hc_index]) - axes[spindex].set_ylabel(vnames[vc_index]) - if lb is not None and ub is not None: - axes[spindex].set_xlim([lb[hc_index], ub[hc_index]]) - axes[spindex].set_ylim([lb[vc_index], ub[vc_index]]) - plt.show(block=blk) - - if blk: - fig.savefig(f"{title}.png", bbox_inches='tight') - plt.close(fig) - - return ps - - - def rosen(x, *argv): x = np.asarray(x) y = [np.sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0, @@ -525,7 +380,7 @@ def rosen(x, *argv): def test_omads_callable_quick(): - eval = {"blackbox": rosen} + eval_func = {"blackbox": rosen} param = {"baseline": [-2.0, -2.0], "lb": [-5, -5], "ub": [10, 10], @@ -540,13 +395,12 @@ def test_omads_callable_quick(): } options = {"seed": 0, "budget": 100000, "tol": 1e-12, "display": True} - data = {"evaluator": eval, "param": param, "options": options, "sampling": sampling} + data = {"evaluator": eval_func, "param": param, "options": options, "sampling": sampling} out: Dict = main(data) print(out) -def test_omads_file_quick(): - file = "tests\\bm\\constrained\\sphere.json" + if __name__ == "__main__": freeze_support() diff --git a/src/OMADS/_common.py b/src/OMADS/_common.py index f962b4d..0c1ad22 100644 --- a/src/OMADS/_common.py +++ b/src/OMADS/_common.py @@ -22,26 +22,23 @@ # Copyright (C) 2022 Ahmed H. Bayoumy # # ------------------------------------------------------------------------------------# -import copy -from dataclasses import dataclass, field -import importlib +from dataclasses import dataclass import logging -import operator import time import shutil import os -from typing import List, Dict, Any import numpy as np -from .CandidatePoint import CandidatePoint import json -from ._globals import * +from ._globals import MSG_TYPE +import pkg_resources + np.set_printoptions(legacy='1.21') @dataclass class validator: - def checkInputFile(self, args) -> dict: + def check_input_file(self, args) -> dict: if type(args[0]) is dict: data = args[0] elif isinstance(args[0], str): @@ -67,11 +64,11 @@ def checkInputFile(self, args) -> dict: @dataclass class logger: log: None = None - isVerbose: bool = False + is_verbose: bool = False - def initialize(self, file: str, wTime = False, isVerbose = False): + def initialize(self, file: str, w_time = False, is_verbose = False): # Create and configure logger - self.isVerbose = isVerbose + self.is_verbose = is_verbose logging.basicConfig(filename=file, format='%(message)s', filemode='w') @@ -82,8 +79,8 @@ def initialize(self, file: str, wTime = False, isVerbose = False): #Now we are going to Set the threshold of logger to DEBUG self.log.setLevel(logging.DEBUG) cur_time = time.strftime("%Y-%m-%d, %H:%M:%S", time.localtime()) - self.log_msg(msg=f"###################################################### \n", msg_type=MSG_TYPE.INFO) - self.log_msg(msg=f"################# OMADS ver. 2401 #################### \n", msg_type=MSG_TYPE.INFO) + self.log_msg(msg="###################################################### \n", msg_type=MSG_TYPE.INFO) + self.log_msg(msg=f"################# OMADS release #{2410} #################### \n", msg_type=MSG_TYPE.INFO) self.log_msg(msg=f"############### {cur_time} ################# \n", msg_type=MSG_TYPE.INFO) # Remove all handlers associated with the root logger object. @@ -91,7 +88,7 @@ def initialize(self, file: str, wTime = False, isVerbose = False): logging.root.removeHandler(handler) # Create and configure logger - if wTime: + if w_time: logging.basicConfig(filename=file, format='%(asctime)s %(message)s', filemode='a') @@ -119,16 +116,16 @@ def log_msg(self, msg: str, msg_type: MSG_TYPE): elif msg_type == MSG_TYPE.CRITICAL: self.log.critical(msg) - def relocate_logger(self, source_file: str = None, Dest_file: str = None): - if Dest_file is not None and source_file is not None and os.path.exists(source_file): - shutil.copy(source_file, Dest_file) + def relocate_logger(self, source_file: str = None, dest_file: str = None): + if dest_file is not None and source_file is not None and os.path.exists(source_file): + shutil.copy(source_file, dest_file) if os.path.exists("DSMToDMDO.yaml"): - shutil.copy("DSMToDMDO.yaml", Dest_file) + shutil.copy("DSMToDMDO.yaml", dest_file) # Remove all handlers associated with the root logger object. for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # Create and configure logger - logging.basicConfig(filename=os.path.join(Dest_file, "DMDO.log"), + logging.basicConfig(filename=os.path.join(dest_file, "DMDO.log"), format='%(asctime)s %(message)s', filemode='a') #Let us Create an object diff --git a/src/OMADS/_globals.py b/src/OMADS/_globals.py index 311ebce..923c346 100644 --- a/src/OMADS/_globals.py +++ b/src/OMADS/_globals.py @@ -27,7 +27,6 @@ import warnings import numpy as np import platform -import pandas as pd np.set_printoptions(legacy='1.21') @@ -77,7 +76,6 @@ def precision(self): def precision(self, val: str): self._prec = val self._prec = val - isWin = platform.platform().split('-')[0] == 'Windows' if val == "high": if (not hasattr(np, 'float128')): 'Warning: MS Windows does not support precision with the {1e-18} high resolution of the python numerical library (numpy) so high precision will be changed to medium precision which supports {1e-15} resolution check: https://numpy.org/doc/stable/user/basics.types.html ' diff --git a/tests/OMADS_MO_BASIC.py b/tests/OMADS_MO_BASIC.py deleted file mode 100644 index d12d338..0000000 --- a/tests/OMADS_MO_BASIC.py +++ /dev/null @@ -1,391 +0,0 @@ -from OMADS import POLL, SEARCH, MADS -import copy -import os -import numpy as np - -from typing import Dict, List -from multiprocessing import freeze_support -import platform - -def common_dict(): - outDict: dict = { - "evaluator": - { - "blackbox": None}, - - "param": - { - "baseline": None, - "lb": None, - "ub": None, - "var_names": ["x", "y"], - "fun_names": ["f1", "f2"], - # "constraints_type": ["PB", "PB"], - "nobj": 2, - "isPareto": True, - "scaling": None, - "LAMBDA": [1E5, 1E5], - "RHO": 1.0, - "h_max": np.inf, - "meshType": "GMESH", - "post_dir": None - }, - - "options": - { - "seed": 0, - "budget": 2000, - "tol": 1e-12, - "psize_init": 1, - "display": False, - "opportunistic": False, - "check_cache": True, - "store_cache": True, - "collect_y": False, - "rich_direction": True, - "precision": "high", - "save_results": True, - "save_coordinates": False, - "save_all_best": False, - "parallel_mode": False - }, - - "search": { - "type": "sampling", - "s_method": "ACTIVE", - "ns": 10, - "visualize": False - }, - } - return outDict - -def MO_Binh_and_Korn(x): - f1 = 4 * x[0]**2 + 4 * x[1]**2 - f2 = (x[0] - 5)**2 + (x[1] - 5)**2 - g1 = (x[0]-5)**2 + x[1]**2 -25 - g2 = 7.7 - (x[0]-8)**2 - (x[1]+3)**2 - - return [[f1, f2], [g1, g2]] - -def MO_Chankong_and_Haimes(x): - f1 = 2 + (x[0]-2)**2 + (x[1]-1)**2 - f2 = 9*x[0]-(x[1]-1)**2 - g1 = x[0]**2 + x[1]**2-225 - g2 = x[0] -3*x[1]+10 - - return [[f1, f2], [g1, g2]] - -def MO_Test_function_4(x): - f1 = x[0]**2-x[1] - f2 = -0.5*x[0]-x[1]-1 - g1 = -(6.5 - (x[0]/6) - x[1]) - g2 = -(7.5 - 0.5 *x[0] -x[1]) - g3 = -(30 - 5*x[0] -x[1]) - - return [[f1, f2], [g1, g2, g3]] - -def MO_Kursawe(x): - f1 = sum([-10*np.exp(-0.2*np.sqrt(x[i]**2 + x[i+1]**2)) for i in range(2)]) - f2 = sum([abs(x[i])**0.8 + 5*np.sin(x[i]**3) for i in range(3)]) - - return [[f1, f2], [0]] - -def MO_Fonseca_Fleming(x): - n = len(x) - f1 = 1 - np.exp(-sum([(x[i]-(1/np.sqrt(n)))**2 for i in range(n)])) - f2 = 1 - np.exp(-sum([(x[i]+(1/np.sqrt(n)))**2 for i in range(n)])) - - return [[f1, f2], [0]] - -def MO_Osyczka_Kundu(x): - f1 = -25*(x[0]-2)**2 - (x[1]-2)**2 - (x[2]-1)**2 - (x[3]-4)**2 - (x[4]-1)**2 - f2 = sum([x[i]**2 for i in range(6)]) - - g1 = x[0] + x[1] -2 - g2 = 6 - x[0] - x[1] - g3 = 2 - x[1] + x[0] - g4 = 2 - x[0] + 3*x[1] - g5 = 4-(x[2]-3)**2 -x[3] - g6 = (x[4]-3)**2 + x[5] -4 - - return [[f1, f2], [-g1, -g2, -g3, -g4, -g5, -g6]] - -def MO_CTP1(x): - f1 = x[0] - f2 = (1+x[1])*np.exp(-(x[0])/(1+x[1])) - g1 = 1-((f2)/(0.858*np.exp(-0.541*f1))) - g2 = 1-(f2/(0.728*np.exp(-0.295*f1))) - - return [[f1, f2], [g1, g2]] - -def MO_Ex(x): - f1 = x[0] - f2 = (1+x[1])/x[0] - - g1 = 6-(x[1]+9*x[0]) - g2 = 1+x[1] - 9*x[0] - - return [[f1,f2],[g1,g2]] - -def MO_ZDT1(x): - f1 = x[0] # objective 1 - g = 1 + 9 * np.sum(np.divide(x[1:len(x)], (len(x) - 1))) - h = 1 - np.sqrt(f1 / g) - f2 = g * h # objective 2 - - return [[f1, f2], [0]] - -def MO_ZDT3(x): - f1 = x[0] # objective 1 - g = 1 + (9/(len(x) - 1)) * np.sum(x[1:len(x)]) - h = 1 - np.sqrt(f1 / g) - (f1/g)*np.sin(10*np.pi*f1) - f2 = g * h # objective 2 - - return [[f1, f2], [0]] - -def MO_ZDT4(x): - f1 = x[0] # objective 1 - g = 1 + 10*(len(x)-1) + np.sum([x[i]**2 - 10*np.cos(4*np.pi*x[i]) for i in range(1, len(x))]) - h = 1 - np.sqrt(f1 / g) - f2 = g * h # objective 2 - - return [[f1, f2], [0]] - -def MO_ZDT6(x): - f1 = 1 - np.exp(-4*x[0]) * np.sin(6*np.pi*x[0])**6 - g = 1+9*(sum(x[1:len(x)])/9)**.25 - h = 1 - (f1/g)**2 - f2 = g * h # objective 2 - - return [[f1, f2], [0]] - -def test_MO_Binh_and_Korn(): - data = common_dict() - data["evaluator"]["blackbox"] = MO_Binh_and_Korn - data["param"]["name"] = "Binh_and_Korn" - data["param"]["baseline"] = [0, 0] - data["param"]["lb"] = [0, 0] - data["param"]["ub"] = [5, 3] - data["param"]["constraints_type"] = ["PB", "PB"] - data["param"]["scaling"] = [5, 3] - data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Binh_and_Korn/post" - - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_Chankong_and_Haimes(): - data = common_dict() - data["evaluator"]["blackbox"] = MO_Chankong_and_Haimes - data["param"]["name"] = "Chankong_and_Haimes" - data["param"]["baseline"] = [0, 0] - data["param"]["lb"] = [-20, -20] - data["param"]["ub"] = [20, 20] - data["param"]["constraints_type"] = ["PB", "PB"] - data["param"]["scaling"] = [40, 40] - data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Chankong_and_Haimes/post" - - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_Fonseca_Fleming(): - data = common_dict() - data["evaluator"]["blackbox"] = MO_Fonseca_Fleming - data["param"]["name"] = "Fonseca_Fleming" - data["param"]["baseline"] = [0, 0] - data["param"]["lb"] = [-4, -4] - data["param"]["ub"] = [4, 4] - data["meshType"] = "GMESH" - # data["param"]["constraints_type"] = ["EB"] - data["param"]["scaling"] = [8, 8] - data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/Fonseca_Fleming/post" - data["options"]["budget"] = 1000 - - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_Test_function_4(): - data = common_dict() - data["evaluator"]["blackbox"] = MO_Test_function_4 - data["param"]["name"] = "Test_function_4" - data["param"]["baseline"] = [0, 0]#[3, 3] - data["param"]["lb"] = [-7, -7] - data["param"]["ub"] = [4, 4] - data["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB", "PB"] - data["param"]["scaling"] = [10, 10] - data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Test_function_4/post" - data["options"]["budget"] = 1000 - - # data["search"]["type"] = "VNS" - # data["search"]["s_method"] = "RANDOM" - # data["search"]["ns"] = 10 - - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_Kursawe(): - # TODO: uncon logic needs review - data = common_dict() - data["evaluator"]["blackbox"] = MO_Kursawe - data["param"]["name"] = "Kursawe" - # data["param"]["baseline"] = [-2.0, 0.5, -4.5] - data["param"]["baseline"] = [-2.0, -0.5, -5] - data["param"]["var_names"] = ['x1', 'x2', 'x3'] - data["param"]["lb"] = [-5, -5, -5] - data["param"]["ub"] = [5, 5, 5] - # data["param"]["LAMBDA"]= None - # data["param"]["RHO"] = 1 - # data["param"]["h_max"] = 0 - data["meshType"] = "GMESH" - # data["param"]["constraints_type"] = ["PB"] - data["param"]["scaling"] = [10, 10, 10] - data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/Kursawe/post" - data["options"]["budget"] = 10000 - - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_Osyczka_Kundu(): - # COMPLETED: Investigate why starting from infeasible point does not work in MOO - data = common_dict() - data["evaluator"]["blackbox"] = MO_Osyczka_Kundu - data["param"]["name"] = "Osyczka_Kundu" - data["param"]["baseline"] = [3, 2, 2, 0, 5, 10] - # data["param"]["baseline"] = [5, 1, 5, 0, 5, 8] - data["param"]["var_names"] = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6'] - data["param"]["lb"] = [0, 0, 1, 0, 1, 0] - data["param"]["ub"] = [10, 10, 5, 6, 5, 10] - data["param"]["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB"]*6 - data["param"]["scaling"] = [10, 10, 4, 6, 4, 10] - data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Osyczka_Kundu/post" - data["options"]["budget"] = 30000 - data["options"]["seed"] = 1234 - - # POLL.main(data) - data["search"]["ns"] = 22 - # SEARCH.main(data) - MADS.main(data) - -def test_MO_CTP1(): - data = common_dict() - data["evaluator"]["blackbox"] = MO_CTP1 - data["param"]["name"] = "MO_CTP1" - data["param"]["baseline"] = [0.5, 0.5] - data["param"]["var_names"] = ['x1', 'x2'] - data["param"]["lb"] = [0, 0] - data["param"]["ub"] = [1, 1] - data["param"]["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB"]*2 - data["param"]["scaling"] = [1, 1] - data["param"]["post_dir"] = "./tests/bm/MOO/constrained/MO_CTP1/post" - data["options"]["budget"] = 3000 - data["search"]["ns"] = 50 - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_Ex(): - data = common_dict() - data["evaluator"]["blackbox"] = MO_Ex - data["param"]["name"] = "Ex" - data["param"]["baseline"] = [0.6, 2.5] - data["param"]["var_names"] = ['x1', 'x2'] - data["param"]["lb"] = [0.1, 0] - data["param"]["ub"] = [1, 5] - data["param"]["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB"]*2 - data["param"]["scaling"] = [0.9, 5] - data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Ex/post" - data["options"]["budget"] = 5000 - data["search"]["ns"] = 15 - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_ZDT1(): - d = 30 - data = common_dict() - data["evaluator"]["blackbox"] = MO_ZDT1 - data["param"]["name"] = "MO_ZDT1" - np.random.seed(seed= 12345) - data["param"]["baseline"] = np.random.rand(d) - data["param"]["var_names"] = [f'x{i}' for i in range(d)] - data["param"]["lb"] = [0]*d - data["param"]["ub"] = [1]*d - data["param"]["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB"] - data["param"]["scaling"] = [1]*d - data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT1/post" - data["options"]["budget"] = 10000 - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_ZDT3(): - d = 30 - data = common_dict() - data["evaluator"]["blackbox"] = MO_ZDT3 - data["param"]["name"] = "MO_ZDT3" - np.random.seed(seed= 12345) - data["param"]["baseline"] = np.random.rand(d) - data["param"]["var_names"] = [f'x{i}' for i in range(d)] - data["param"]["lb"] = [0]*d - data["param"]["ub"] = [1]*d - data["param"]["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB"] - data["param"]["scaling"] = [1]*d - data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT3/post" - data["options"]["budget"] = 10000 - data["search"]["ns"] = 50 - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_ZDT4(): - d = 10 - data = common_dict() - data["evaluator"]["blackbox"] = MO_ZDT4 - data["param"]["name"] = "MO_ZDT4" - np.random.seed(seed= 12345) - data["param"]["baseline"] = np.random.rand(1).tolist() + np.random.uniform(low=-10, high=10, size=(d-1,)).tolist() - data["param"]["var_names"] = [f'x{i}' for i in range(d)] - data["param"]["lb"] = [0] + [-10]*(d-1) - data["param"]["ub"] = [1] + [10]*(d-1) - data["param"]["meshType"] = "GMESH" - data["param"]["constraints_type"] = ["PB"] - data["param"]["scaling"] = [1] + [20]*(d-1) - data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT4/post" - data["options"]["budget"] = 5000 #40000 - data["search"]["ns"] = 55 - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -def test_MO_ZDT6(): - d = 10 - data = common_dict() - data["evaluator"]["blackbox"] = MO_ZDT6 - data["param"]["name"] = "MO_ZDT6" - np.random.seed(seed= 12345) - data["param"]["baseline"] = np.random.rand(d) - data["param"]["var_names"] = [f'x{i}' for i in range(d)] - data["param"]["lb"] = [0]*d - data["param"]["ub"] = [1]*d - data["param"]["meshType"] = "OMESH" - data["param"]["constraints_type"] = ["PB"] - data["param"]["scaling"] = [1]*d - data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT6/post" - data["options"]["budget"] = 10000 - data["search"]["ns"] = 100 - # POLL.main(data) - # SEARCH.main(data) - MADS.main(data) - -if __name__ == "__main__": - freeze_support() diff --git a/tests/test_OMADS_BASIC.py b/tests/test_OMADS_BASIC.py index a7dc801..7664565 100644 --- a/tests/test_OMADS_BASIC.py +++ b/tests/test_OMADS_BASIC.py @@ -1,4 +1,5 @@ import importlib +import time from OMADS import POLL, SEARCH, MADS from matplotlib import pyplot as plt import copy @@ -9,6 +10,41 @@ from multiprocessing import freeze_support import platform +import logging + +# Configure the logging +# Create a custom logger + + +logger = logging.getLogger('OMADS_SO_BBO_unit_tests') +logger.setLevel(logging.DEBUG) # Set to DEBUG to capture all messages + +# Create a console handler +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.INFO) # Only log INFO and above to console + +# Create a file handler +file_handler = logging.FileHandler(filename='tests/OMADS_BBO_unit_test.log', mode = 'a') +file_handler.setLevel(logging.DEBUG) # Log all messages to file + +# Create a formatter and set it for handlers +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +console_handler.setFormatter(formatter) +file_handler.setFormatter(formatter) + +# Add handlers to the logger +logger.addHandler(console_handler) +logger.addHandler(file_handler) + +# Example filter to exclude messages from the root logger +class NoRootMessagesFilter(logging.Filter): + def filter(self, record): + return record.name != 'root' + +# Add the filter to handlers +console_handler.addFilter(NoRootMessagesFilter()) +file_handler.addFilter(NoRootMessagesFilter()) + def geom_prog(x, *argv): xx = x x2 = np.sqrt(xx[3] ** 2 + xx[4] ** -2 + xx[5] ** -2 + xx[6] ** 2) @@ -35,9 +71,15 @@ def thin_con(x): y = [[f], [c1, c2]] return y -def test_MADS_callable_quick_2d(): +def test_create_out_file(): + open('tests/OMADS_BBO_unit_test.log', 'w').close() + + +def test_callable_quick_2d(): + logger.info('\nStarted running bbo_2d_rosenbrock test... \n') + tic = time.perf_counter() d = 2 - eval = {"blackbox": rosen} + eval_callable = {"blackbox": rosen} param = {"name": "RB","baseline": [-2.5]*d, "lb": [-5]*d, "ub": [10]*d, @@ -50,45 +92,98 @@ def test_MADS_callable_quick_2d(): "visualize": False, "criterion": None } - options = {"seed": 10000, "budget": 3000, "tol": 1e-9, "display": False, "check_cache": True, "store_cache": True, "rich_direction": True, "opportunistic": False, "save_results": False, "isVerbose": False} + options = {"seed": 10000, "budget": 1100, "tol": 1e-9, "display": False, "check_cache": True, "store_cache": True, "rich_direction": True, "opportunistic": False, "save_results": False, "isVerbose": False} search = { "type": "sampling", "s_method": "ACTIVE", "ns": int((d+1)*(d+2)/2)+55, "visualize": False } - data = {"evaluator": eval, "param": param, "options": options, "sampling": sampling, "search": search} - - outM: Dict = MADS.main(data) - outP: Dict = POLL.main(data) - outS: Dict = SEARCH.main(data) - OMS = outM[0]["fmin"][0] - OPS = outP[0]["fmin"][0] - OSS = outS[0]["fmin"][0] - if (outM[0]["fmin"][0] > 0.0006): - raise ValueError(f"\nSequential Exec: MADS: fmin: {OMS} > {0.0006} \nPoll: fmin = {OPS}\nSearch: fmin = {OSS}") + data = {"evaluator": eval_callable, "param": param, "options": options, "sampling": sampling, "search": search} + data["param"]["lhs_search_initialization"] = True + logger.info('\nStarted running MADS on bbo_2d_rosenbrock serial exectution ...') + ticms = time.perf_counter() + out_mads: Dict = MADS.main(data) + tocms = time.perf_counter() + logger.info(f'Completed serial MADS run on bbo_2d_rosenbrock in {tocms - ticms:.4f} seconds.\n') + + ticps = time.perf_counter() + logger.info('\nStarted running POLL on bbo_2d_rosenbrock serial exectution ...') + out_poll: Dict = POLL.main(data) + tocps = time.perf_counter() + logger.info(f'Completed serial POLL run on bbo_2d_rosenbrock in {tocps - ticps:.4f} seconds.\n') + + ticss = time.perf_counter() + logger.info('\nStarted running SEARCH on bbo_2d_rosenbrock serial exectution ...') + out_search: Dict = SEARCH.main(data) + tocss = time.perf_counter() + logger.info(f'Completed serial SEARCH run on bbo_2d_rosenbrock in {tocss - ticss:.4f} seconds.\n') + + OMS = out_mads[0]["fmin"][0] + OPS = out_poll[0]["fmin"][0] + OSS = out_search[0]["fmin"][0] - if (outP[0]["fmin"][0] > 0.0006): - raise ValueError(f"\nSequential Exec: POLL: fmin: {OPS} > {0.0006} \nMADS: fmin = {OMS}\nSearch: fmin = {OSS}") - if (outS[0]["fmin"][0] > 0.0006): - raise ValueError(f"\nSequential Exec: Search: fmin {OSS} > {0.0006} \nMADS: fmin = {OMS}\nPoll: fmin = {OPS}") data["options"]["parallel_mode"] = True - OMS = outM[0]["fmin"][0] - OPS = outP[0]["fmin"][0] - OSS = outS[0]["fmin"][0] - if (outM[0]["fmin"][0] > 0.0006): - raise ValueError(f"\nParallel Exec: MADS: fmin: {OMS} > {0.0006} \nPoll: fmin = {OPS}\nSearch: fmin = {OSS}") + data["options"]["np"] = 4 + logger.info('\nStarted running MADS on bbo_2d_rosenbrock parallel exectution ...') + ticmp = time.perf_counter() + out_mads: Dict = MADS.main(data) + tocmp = time.perf_counter() + logger.info(f'Completed parallel MADS run on bbo_2d_rosenbrock in {tocmp - ticmp:.4f} seconds.\n') + + ticpp = time.perf_counter() + logger.info('\nStarted running POLL on bbo_2d_rosenbrock parallel exectution ...') + out_poll: Dict = POLL.main(data) + tocpp = time.perf_counter() + logger.info(f'Completed parallel POLL run on bbo_2d_rosenbrock in {tocpp - ticpp:.4f} seconds.\n') + + ticsp = time.perf_counter() + logger.info('\nStarted running SEARCH on bbo_2d_rosenbrock parallel exectution ...') + out_search: Dict = SEARCH.main(data) + tocsp = time.perf_counter() + logger.info(f'Completed parallel SEARCH run on bbo_2d_rosenbrock in {tocsp - ticsp:.4f} seconds.\n') - if (outP[0]["fmin"][0] > 0.0006): - raise ValueError(f"\nParallel Exec: POLL: fmin: {OPS} > {0.0006} \nMADS: fmin = {OMS}\nSearch: fmin = {OSS}") + OMP = out_mads[0]["fmin"][0] + OPP = out_poll[0]["fmin"][0] + OSP = out_search[0]["fmin"][0] - if (outS[0]["fmin"][0] > 0.0006): - raise ValueError(f"\nParallel Exec: Search: fmin {OSS} > {0.0006} \nMADS: fmin = {OMS}\nPoll: fmin = {OPS}") + + toc = time.perf_counter() + logger.info(f'Completed bbo_2d_rosenbrock serial test in {toc - tic:.4f} seconds.') + logger.info(f"\nBest known solution: fmin = {0.}") + logger.info(f"\nSequential Exec: MADS: fmin = {OMS} \nPoll: fmin = {OPS} \nSearch: fmin = {OSS}") + logger.info(f"\nParallel Exec: MADS: fmin: {OMP} \nPoll: fmin = {OPP}\nSearch: fmin = {OSP}") -def test_MADS_callable_quick_const_2d(): + if (OMS > 0.0006): + logger.error(f"Sequential Exec: MADS: fmin: {OMS} > {0.0006}") + raise ValueError(f"\nSequential Exec: MADS: fmin: {OMS} > {0.0006}") + + if (OPS > 0.008): + logger.error(f"Sequential Exec: POLL: fmin: {OPS} > {0.008}") + raise ValueError(f"\nSequential Exec: POLL: fmin: {OPS} > {0.008}") + + if (OSS > 0.0006): + logger.error(f"Sequential Exec: Search: fmin {OSS} > {0.0006}") + raise ValueError(f"\nSequential Exec: Search: fmin {OSS} > {0.0006}") + + if (OMP > 0.05): + logger.error(f"Parallel Exec: MADS: fmin: {OMP} > {0.05}") + raise ValueError(f"\nParallel Exec: MADS: fmin: {OMP} > {0.05}") + + if (OPP > 0.008): + logger.error(f"Parallel Exec: POLL: fmin: {OPP} > {0.008}") + raise ValueError(f"\nParallel Exec: POLL: fmin: {OPP} > {0.008}") + + if (OSP > 0.001): + logger.error(f"Parallel Exec: Search: fmin {OSP} > {0.001}") + raise ValueError(f"\nParallel Exec: Search: fmin {OSP} > {0.001}") + +def test_callable_2d_sin_const(): + logger.info('\nStarted running bbo_2d_sin_const test...') + tic = time.perf_counter() d = 2 - eval = {"blackbox": thin_con} + eval_callable = {"blackbox": thin_con} param = {"name": "thin_con","baseline": [0, -10], "lb": [0, -10], "ub": [25, 10], @@ -109,29 +204,31 @@ def test_MADS_callable_quick_const_2d(): "ns": 100, "visualize": False } - data = {"evaluator": eval, "param": param, "options": options, "sampling": sampling, "search": search} + data = {"evaluator": eval_callable, "param": param, "options": options, "sampling": sampling, "search": search} - outM: Dict = MADS.main(data) - OM = outM[0]["fmin"][0] + out_mads: Dict = MADS.main(data) + OMS = out_mads[0]["fmin"][0] + + toc = time.perf_counter() + logger.info(f'Completed bbo_2d_sin_const run in {toc - tic:.4f} seconds.\n') + logger.info(f"\nBest known solution: fmin = {0.0989}") + logger.info(f"\nSequential Exec: MADS: fmin = {OMS}") - if (outM[0]["fmin"][0] > 0.0989): - raise ValueError(f"MADS: fmin: {OM} > {0.0989}") + if (out_mads[0]["fmin"][0] > 0.0989): + logger.error(f"Sequential Exec: MADS: fmin: {OMS} > {0.0989}") + raise ValueError(f"\nSequential Exec: MADS: fmin: {OMS} > {0.0989}") -def test_MADS_callable_quick_10d(): +def test_callable_quick_10d(): + logger.info('\nStarted running bbo_10d_rosenbrock test...') + tic = time.perf_counter() d = 10 - eval = {"blackbox": rosen} + eval_callable = {"blackbox": rosen} param = {"name": "RB","baseline": [-2.5]*d, "lb": [-5]*d, "ub": [10]*d, "var_names": [f"x{i}" for i in range(d)], "scaling": [15.0]*d, "post_dir": "./post"} - sampling = { - "method": 'ACTIVE', - "ns": int((d+1)*(d+2)/2)+50, - "visualize": False, - "criterion": None - } options = {"seed": 10000, "budget": 10000, "tol": 1e-9, "display": False, "check_cache": True, "store_cache": True, "rich_direction": True, "opportunistic": False, "save_results": False, "isVerbose": False} search = { @@ -146,34 +243,57 @@ def test_MADS_callable_quick_10d(): "visualize": False, "criterion": None } - data = {"evaluator": eval, "param": param, "options": options, "sampling": sampling, "search": search} - outS: Dict = SEARCH.main(data) - OS = outS[0]["fmin"][0] - if (OS > 0.0006): - raise ValueError(f"Search: fmin = {OS} > {0.0006}") + data = {"evaluator": eval_callable, "param": param, "options": options, "sampling": sampling, "search": search} + logger.info('\nStarted running MADS on bbo_10d_rosenbrock serial exectution ...') + ticms = time.perf_counter() + out_mads: Dict = MADS.main(data) + tocms = time.perf_counter() + logger.info(f'Completed serial MADS run on bbo_10d_rosenbrock in {tocms - ticms:.4f} seconds.\n') - outP: Dict = POLL.main(data) - OP = outP[0]["fmin"][0] + ticps = time.perf_counter() + logger.info('\nStarted running POLL on bbo_10d_rosenbrock serial exectution ...') + out_poll: Dict = POLL.main(data) + tocps = time.perf_counter() + logger.info(f'Completed serial POL run on bbo_10d_rosenbrock in {tocps - ticps:.4f} seconds.\n') + + ticss = time.perf_counter() + logger.info('\nStarted running SEARCH on bbo_10d_rosenbrock serial exectution ...') + out_search: Dict = SEARCH.main(data) + tocss = time.perf_counter() + logger.info(f'Completed serial SEARCH run on bbo_10d_rosenbrock in {tocss - ticss:.4f} seconds.\n') + + OSS = out_search[0]["fmin"][0] + OPS = out_poll[0]["fmin"][0] + OMS = out_mads[0]["fmin"][0] - if (OP > 0.25): - raise ValueError(f"POLL: fmin = {OP} > {0.25}") + toc = time.perf_counter() + logger.info(f'Completed bbo_10d_rosenbrock run in {toc - tic:.4f} seconds.') + logger.info(f"\nBest known solution: fmin = {0.}") + logger.info(f"\nSequential Exec: MADS: fmin = {OMS} \nPoll: fmin = {OPS} \nSearch: fmin = {OSS}") + + if (out_mads[0]["fmin"][0] > 0.0006): + logger.error(f"Sequential Exec: MADS: fmin: {OMS} > {0.0006}") + raise ValueError(f"\nSequential Exec: MADS: fmin: {OMS} > {0.0006}") + if (out_poll[0]["fmin"][0] > 0.25): + logger.error(f"Sequential Exec: POLL: fmin: {OPS} > {0.25}") + raise ValueError(f"\nSequential Exec: POLL: fmin: {OPS} > {0.25}") - outM: Dict = MADS.main(data) - OM = outM[0]["fmin"][0] - if (OM > 0.0006): - raise ValueError(f"MADS: fmin = {OM} > {0.0006}") + if (out_search[0]["fmin"][0] > 0.0006): + logger.error(f"Sequential Exec: Search: fmin {OSS} > {0.0006}") + raise ValueError(f"\nSequential Exec: Search: fmin {OSS} > {0.0006}") -def test_MADS_callable_quick_20d(): +def test_callable_quick_20d(): + logger.info('\nStarted running bbo_20d_rosenbrock test...') + tic = time.perf_counter() d = 20 - eval = {"blackbox": rosen} + eval_callable = {"blackbox": rosen} param = {"name": "RB","baseline": [-2.5]*d, "lb": [-5]*d, "ub": [10]*d, "var_names": [f"x{i}" for i in range(d)], "scaling": [15.0]*d, "post_dir": "./post"} - isWin = platform.platform().split('-')[0] == 'Windows' sampling = { "method": 'ACTIVE', @@ -189,53 +309,50 @@ def test_MADS_callable_quick_20d(): "visualize": False } - data = {"evaluator": eval, "param": param, "options": options, "sampling": sampling,"search": search} - outS: Dict = SEARCH.main(data) - SR = outS[0]["fmin"][0] - if (SR > 0.0006 and platform.platform().split('-')[0] == 'Windows'): - raise ValueError(f"Search: fmin {SR} > {0.0006}") + data = {"evaluator": eval_callable, "param": param, "options": options, "sampling": sampling,"search": search} + logger.info('\nStarted running MADS on bbo_20d_rosenbrock serial exectution ...') + ticms = time.perf_counter() + out_mads: Dict = MADS.main(data) + tocms = time.perf_counter() + logger.info(f'Completed serial MADS run on bbo_20d_rosenbrock in {tocms - ticms:.4f} seconds.\n') - outP: Dict = POLL.main(data) - PR = outP[0]["fmin"][0] - if (PR > 2.7 and platform.platform().split('-')[0] == 'Windows'): - raise ValueError(f"POLL: fmin {PR} > {2.7}") + ticps = time.perf_counter() + logger.info('\nStarted running POLL on bbo_20d_rosenbrock serial exectution ...') + out_poll: Dict = POLL.main(data) + tocps = time.perf_counter() + logger.info(f'Completed serial POL run on bbo_20d_rosenbrock in {tocps - ticps:.4f} seconds.\n') + + ticss = time.perf_counter() + logger.info('\nStarted running SEARCH on bbo_20d_rosenbrock serial exectution ...') + out_search: Dict = SEARCH.main(data) + tocss = time.perf_counter() + logger.info(f'Completed serial SEARCH run on bbo_20d_rosenbrock in {tocss - ticss:.4f} seconds.\n') + + OSS = out_search[0]["fmin"][0] + OPS = out_poll[0]["fmin"][0] + OMS = out_mads[0]["fmin"][0] + - outM: Dict = MADS.main(data) - MR = outM[0]["fmin"][0] - if (MR > 0.0006 and platform.platform().split('-')[0] == 'Windows'): - raise ValueError(f"MADS: fmin {MR} > {0.0006}") - -def test_omads_callable_quick_parallel(): - eval = {"blackbox": rosen} - param = {"baseline": [-2.0, -2.0], - "lb": [-5, -5], - "ub": [10, 10], - "var_names": ["x1", "x2"], - "scaling": 10.0, - "post_dir": "./post"} - options = {"seed": 0, "budget": 100, "tol": 1e-6, "display": True, "parallel_mode": True, "save_results": True, "isVerbose": True} - search = { - "type": "sampling", - "s_method": "ACTIVE", - "ns": 10, - "visualize": False - } - data = {"evaluator": eval, "param": param, "options": options, "search": search} + toc = time.perf_counter() + logger.info(f'Completed bbo_20d_rosenbrock run in {toc - tic:.4f} seconds.') + logger.info(f"\nBest known solution: fmin = {0.}") + logger.info(f"\nSequential Exec: MADS: fmin = {OMS} \nPoll: fmin = {OPS} \nSearch: fmin = {OSS}") - out: Dict = MADS.main(data) - print(out) + if (out_mads[0]["fmin"][0] > 0.0006): + logger.error(f"Sequential Exec: MADS: fmin: {OMS} > {0.0006}") + raise ValueError(f"\nSequential Exec: MADS: fmin: {OMS} > {0.0006}") + + if (out_poll[0]["fmin"][0] > 2.7): + logger.error(f"Sequential Exec: POLL: fmin: {OPS} > {2.7}") + raise ValueError(f"\nSequential Exec: POLL: fmin: {OPS} > {2.7}") + + if (out_search[0]["fmin"][0] > 0.0006): + logger.error(f"Sequential Exec: Search: fmin {OSS} > {0.0006}") + raise ValueError(f"\nSequential Exec: Search: fmin {OSS} > {0.0006}") def test_omads_toy_quick(): - assert POLL.DType - assert POLL.Options - assert POLL.Parameters - assert POLL.Evaluator assert POLL.CandidatePoint - assert POLL.Cache - assert POLL.Dirs2n assert POLL.PrePoll - assert POLL.Output - assert POLL.PostMADS assert POLL.main if importlib.util.find_spec('BMDFO'): @@ -243,6 +360,7 @@ def test_omads_toy_quick(): p_file = os.path.abspath("./tests/bm/unconstrained/rosenbrock.json") p_file_2 = os.path.abspath("./tests/bm/constrained/geom_prog.json") else: + is_win = platform.platform().split('-')[0] == 'Windows' p_file = { "evaluator": { @@ -301,15 +419,17 @@ def test_omads_toy_quick(): "LAMBDA": [1E5, 1E5, 1E5, 1E5, 1E5, 1E5], "RHO": 1.0, "post_dir": "./tests/bm/constrained/post", - "h_max": 0.0 - }, + "h_max": 0.0, + "lhs_search_initialization": True + }, + "options": { "seed": 10000, "budget": 100000, "tol": 1e-12, - "psize_init": 2.0, + "psize_init": 2.0 if is_win else 1.0, "display": False, "opportunistic": False, "check_cache": True, @@ -330,61 +450,36 @@ def test_omads_toy_quick(): } } - POLL.main(p_file) - SEARCH.main(p_file) - MADS.main(p_file) + logger.info('\nStarted running bbo_2d_rosenbrock_VNS test...') + tic = time.perf_counter() + out_search: Dict = SEARCH.main(p_file) + OSS = out_search[0]["fmin"][0] + out_poll: Dict = POLL.main(p_file) + OPS = out_poll[0]["fmin"][0] + out_mads: Dict = MADS.main(p_file) + OMS = out_mads[0]["fmin"][0] - outP = POLL.main(p_file_2) - res = outP[0]["fmin"][0] - if (outP[0]["fmin"][0] > 23.8 and platform.platform().split('-')[0] == 'Windows'): - raise ValueError(f"GP: Poll: fmin = {res} > {23.8}") - + toc = time.perf_counter() + logger.info(f'Completed bbo_2d_rosenbrock_VNS run in {toc - tic:.4f} seconds.') + logger.info(f"\nBest known solution: fmin = {0.}") + logger.info(f"\nSequential Exec: MADS: fmin = {OMS} \nPoll: fmin = {OPS} \nSearch: fmin = {OSS}") - data = { - "evaluator": - { - "blackbox": rosen - }, - - "param": - { - "baseline": [-2.0, -2.0], - "lb": [-5, -5], - "ub": [10, 10], - "var_names": ["x1", "x2"], - "scaling": 10.0, - "post_dir": "./tests/bm/unconstrained/post" - }, - - "options": - { - "seed": 0, - "budget": 1000, - "tol": 1e-12, - "psize_init": 1, - "display": False, - "opportunistic": False, - "check_cache": True, - "store_cache": True, - "collect_y": False, - "rich_direction": True, - "precision": "high", - "save_results": False, - "save_coordinates": False, - "save_all_best": False, - "parallel_mode": False - }, + logger.info('\nStarted running bbo_GP_POLL test...') + tic = time.perf_counter() + out_poll = POLL.main(p_file_2) + res = out_poll[0]["fmin"][0] + + toc = time.perf_counter() + logger.info(f'Completed bbo_GP_POLL run in {toc - tic:.4f} seconds.') + logger.info(f"\nBest known solution: {15} < fmin <= {25}") + logger.info(f"\nSequential Exec: Poll: fmin = {res}") - "search": { - "type": "VNS", - "s_method": "LH", - "ns": 10, - "visualize": False - } - } + if (res > 23.8 ): + logger.error(f"\nSequential Exec: POLL: fmin: {res} > {23.8 }") + raise ValueError(f"\nSequential Exec: POLL: fmin: {res} > {23.8 }") - MADS.main(data) + if __name__ == "__main__": freeze_support() diff --git a/tests/test_OMADS_MO_BASIC.py b/tests/test_OMADS_MO_BASIC.py new file mode 100644 index 0000000..c8b55b2 --- /dev/null +++ b/tests/test_OMADS_MO_BASIC.py @@ -0,0 +1,625 @@ +import time +from OMADS import POLL, SEARCH, MADS +import copy +import os +import numpy as np +from typing import Dict, List +from multiprocessing import freeze_support +import platform +import logging + +# Configure the logging +# Create a custom logger +logger = logging.getLogger('OMADS_MO_BBO_unit_test') +logger.setLevel(logging.DEBUG) # Set to DEBUG to capture all messages + +# Create a console handler +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.INFO) # Only log INFO and above to console + +# Create a file handler +file_handler = logging.FileHandler(filename='tests/OMADS_BBO_unit_test.log', mode = 'a') +file_handler.setLevel(logging.DEBUG) # Log all messages to file + +# Create a formatter and set it for handlers +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +console_handler.setFormatter(formatter) +file_handler.setFormatter(formatter) + +# Add handlers to the logger +logger.addHandler(console_handler) +logger.addHandler(file_handler) + +# Example filter to exclude messages from the root logger +class NoRootMessagesFilter(logging.Filter): + def filter(self, record): + return record.name != 'root' + +# Add the filter to handlers +console_handler.addFilter(NoRootMessagesFilter()) +file_handler.addFilter(NoRootMessagesFilter()) + +# logging.basicConfig(level=logging.DEBUG, +# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename='tests/unit_tests_moo.log', filemode='w') + + + +def common_dict(): + outDict: dict = { + "evaluator": + { + "blackbox": None}, + + "param": + { + "baseline": None, + "lb": None, + "ub": None, + "var_names": ["x", "y"], + "fun_names": ["f1", "f2"], + # "constraints_type": ["PB", "PB"], + "nobj": 2, + "isPareto": True, + "scaling": None, + "LAMBDA": [1E5, 1E5], + "RHO": 1.0, + "h_max": np.inf, + "meshType": "GMESH", + "post_dir": None + }, + + "options": + { + "seed": 0, + "budget": 2000, + "tol": 1e-12, + "psize_init": 1, + "display": False, + "opportunistic": False, + "check_cache": True, + "store_cache": True, + "collect_y": False, + "rich_direction": True, + "precision": "medium", + "save_results": True, + "save_coordinates": False, + "save_all_best": False, + "parallel_mode": False + }, + + "search": { + "type": "sampling", + "s_method": "ACTIVE", + "ns": 10, + "visualize": False + }, + } + return outDict + +def MO_Binh_and_Korn(x): + f1 = 4 * x[0]**2 + 4 * x[1]**2 + f2 = (x[0] - 5)**2 + (x[1] - 5)**2 + g1 = (x[0]-5)**2 + x[1]**2 -25 + g2 = 7.7 - (x[0]-8)**2 - (x[1]+3)**2 + + return [[f1, f2], [g1, g2]] + +def MO_Chankong_and_Haimes(x): + f1 = 2 + (x[0]-2)**2 + (x[1]-1)**2 + f2 = 9*x[0]-(x[1]-1)**2 + g1 = x[0]**2 + x[1]**2-225 + g2 = x[0] -3*x[1]+10 + + return [[f1, f2], [g1, g2]] + +def MO_Test_function_4(x): + f1 = x[0]**2-x[1] + f2 = -0.5*x[0]-x[1]-1 + g1 = -(6.5 - (x[0]/6) - x[1]) + g2 = -(7.5 - 0.5 *x[0] -x[1]) + g3 = -(30 - 5*x[0] -x[1]) + + return [[f1, f2], [g1, g2, g3]] + +def MO_Kursawe(x): + f1 = sum([-10*np.exp(-0.2*np.sqrt(x[i]**2 + x[i+1]**2)) for i in range(2)]) + f2 = sum([abs(x[i])**0.8 + 5*np.sin(x[i]**3) for i in range(3)]) + + return [[f1, f2], [0]] + +def MO_Fonseca_Fleming(x): + n = len(x) + f1 = 1 - np.exp(-sum([(x[i]-(1/np.sqrt(n)))**2 for i in range(n)])) + f2 = 1 - np.exp(-sum([(x[i]+(1/np.sqrt(n)))**2 for i in range(n)])) + + return [[f1, f2], [0]] + +def MO_Osyczka_Kundu(x): + f1 = -25*(x[0]-2)**2 - (x[1]-2)**2 - (x[2]-1)**2 - (x[3]-4)**2 - (x[4]-1)**2 + f2 = sum([x[i]**2 for i in range(6)]) + + g1 = x[0] + x[1] -2 + g2 = 6 - x[0] - x[1] + g3 = 2 - x[1] + x[0] + g4 = 2 - x[0] + 3*x[1] + g5 = 4-(x[2]-3)**2 -x[3] + g6 = (x[4]-3)**2 + x[5] -4 + + return [[f1, f2], [-g1, -g2, -g3, -g4, -g5, -g6]] + +def MO_CTP1(x): + f1 = x[0] + f2 = (1+x[1])*np.exp(-(x[0])/(1+x[1])) + g1 = 1-((f2)/(0.858*np.exp(-0.541*f1))) + g2 = 1-(f2/(0.728*np.exp(-0.295*f1))) + + return [[f1, f2], [g1, g2]] + +def MO_Ex(x): + f1 = x[0] + f2 = (1+x[1])/x[0] + + g1 = 6-(x[1]+9*x[0]) + g2 = 1+x[1] - 9*x[0] + + return [[f1,f2],[g1,g2]] + +def MO_ZDT1(x): + f1 = x[0] # objective 1 + g = 1 + 9 * np.sum(np.divide(x[1:len(x)], (len(x) - 1))) + h = 1 - np.sqrt(f1 / g) + f2 = g * h # objective 2 + + return [[f1, f2], [0]] + +def MO_ZDT3(x): + f1 = x[0] # objective 1 + g = 1 + (9/(len(x) - 1)) * np.sum(x[1:len(x)]) + h = 1 - np.sqrt(f1 / g) - (f1/g)*np.sin(10*np.pi*f1) + f2 = g * h # objective 2 + + return [[f1, f2], [0]] + +def MO_ZDT4(x): + f1 = x[0] # objective 1 + g = 1 + 10*(len(x)-1) + np.sum([x[i]**2 - 10*np.cos(4*np.pi*x[i]) for i in range(1, len(x))]) + h = 1 - np.sqrt(f1 / g) + f2 = g * h # objective 2 + + return [[f1, f2], [0]] + +def MO_ZDT6(x): + f1 = 1 - np.exp(-4*x[0]) * np.sin(6*np.pi*x[0])**6 + g = 1+9*(sum(x[1:len(x)])/9)**.25 + h = 1 - (f1/g)**2 + f2 = g * h # objective 2 + + return [[f1, f2], [0]] + +def test_MO_Binh_and_Korn(): + logger.info('\nStarted running MO_Binh_and_Korn test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Binh_and_Korn + data["param"]["name"] = "Binh_and_Korn" + data["param"]["baseline"] = [0, 0] + data["param"]["lb"] = [0, 0] + data["param"]["ub"] = [5, 3] + data["param"]["constraints_type"] = ["PB", "PB"] + data["param"]["scaling"] = [5, 3] + data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Binh_and_Korn/post" + data["options"]["budget"] = 500 + data["param"]["ref_point"] = [140, 50] + + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_Binh_and_Korn run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.8}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.79}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.8}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.8 or SHV < 0.79 or MHV < 0.8: + logger.error("The MO_Binh_and_Korn QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Binh_and_Korn QA test completed but failed.") + else: + logger.info("The MO_Binh_and_Korn QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_Chankong_and_Haimes(): + logger.info('\nStarted running MO_Chankong_and_Haimes test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Chankong_and_Haimes + data["param"]["name"] = "Chankong_and_Haimes" + data["param"]["baseline"] = [0, 0] + data["param"]["lb"] = [-20, -20] + data["param"]["ub"] = [20, 20] + data["param"]["constraints_type"] = ["PB", "PB"] + data["param"]["scaling"] = [40, 40] + data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Chankong_and_Haimes/post" + data["options"]["budget"] = 500 + data["param"]["ref_point"] = [275, 0.1] + + # POLL.main(data) + # SEARCH.main(data) + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_Chankong_and_Haimes run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.8}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.6}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.8}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.8 or SHV < 0.6 or MHV < 0.8: + logger.error("The MO_Chankong_and_Haimes QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Chankong_and_Haimes QA test completed but failed.") + else: + logger.info("The MO_Chankong_and_Haimes QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_Fonseca_Fleming(): + logger.info('\nStarted running MO_Fonseca_Fleming test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Fonseca_Fleming + data["param"]["name"] = "Fonseca_Fleming" + data["param"]["baseline"] = [0, 0] + data["param"]["lb"] = [-4, -4] + data["param"]["ub"] = [4, 4] + data["meshType"] = "GMESH" + # data["param"]["constraints_type"] = ["EB"] + data["param"]["scaling"] = [8, 8] + data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/Fonseca_Fleming/post" + data["options"]["budget"] = 500 + data["param"]["ref_point"] = [1, 1] + + # POLL.main(data) + # SEARCH.main(data) + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_Fonseca_Fleming run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.34}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.53}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.35}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.34 or SHV < 0.34 or MHV < 0.35: + logger.error("The MO_Fonseca_Fleming QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Fonseca_Fleming QA test completed but failed.") + else: + logger.info("The MO_Fonseca_Fleming QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_Test_function_4(): + logger.info('\nStarted running MO_Test_function test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Test_function_4 + data["param"]["name"] = "Test_function_4" + data["param"]["baseline"] = [0, 0]#[3, 3] + data["param"]["lb"] = [-7, -7] + data["param"]["ub"] = [4, 4] + data["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB", "PB"] + data["param"]["scaling"] = [10, 10] + data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Test_function_4/post" + data["options"]["budget"] = 500 + data["param"]["ref_point"] = [12, -5] + + + + p_out, _ = POLL.main(data) + m_out, _ = MADS.main(data) + s_out, _ = SEARCH.main(data) + + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_Test_function_4 run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.6}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.4}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.6}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.6 or SHV < 0.4 or MHV < 0.6: + logger.error("The MO_Test_function_4 QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Test_function_4 QA test completed but failed.") + else: + logger.info("The MO_Test_function_4 QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_Kursawe(): + # TODO: uncon logic needs review + logger.info('\nStarted running MO_Kursawe test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Kursawe + data["param"]["name"] = "Kursawe" + # data["param"]["baseline"] = [-2.0, 0.5, -4.5] + data["param"]["baseline"] = [-2.0, -0.5, -5] + data["param"]["var_names"] = ['x1', 'x2', 'x3'] + data["param"]["lb"] = [-5, -5, -5] + data["param"]["ub"] = [5, 5, 5] + # data["param"]["LAMBDA"]= None + # data["param"]["RHO"] = 1 + # data["param"]["h_max"] = 0 + data["meshType"] = "GMESH" + # data["param"]["constraints_type"] = ["PB"] + data["param"]["scaling"] = [10, 10, 10] + data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/Kursawe/post" + data["options"]["budget"] = 1000 + data["param"]["ref_point"] = [-14, 1] + + # POLL.main(data) + # SEARCH.main(data) + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_Kursawe run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.64}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.5}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.45}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.64 or SHV < 0.5 or MHV < 0.45: + logger.error("The MO_Kursawe QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Kursawe QA test completed but failed.") + else: + logger.info("The MO_Kursawe QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_Osyczka_Kundu(): + # COMPLETED: Investigate why starting from infeasible point does not work in MOO + logger.info('\nStarted running MO_Osyczka_Kundu test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Osyczka_Kundu + data["param"]["name"] = "Osyczka_Kundu" + data["param"]["baseline"] = [3, 2, 2, 0, 5, 10] + # data["param"]["baseline"] = [5, 1, 5, 0, 5, 8] + data["param"]["var_names"] = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6'] + data["param"]["lb"] = [0, 0, 1, 0, 1, 0] + data["param"]["ub"] = [10, 10, 5, 6, 5, 10] + data["param"]["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB"]*6 + data["param"]["scaling"] = [10, 10, 4, 6, 4, 10] + data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Osyczka_Kundu/post" + data["options"]["budget"] = 10000 + data["options"]["seed"] = 1234 + data["param"]["ref_point"] = [-50, 80] + is_win = platform.platform().split('-')[0] == 'Windows' + data["param"]["lhs_search_initialization"] = False if is_win else True + + data["search"]["ns"] = 50 + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + data["search"]["ns"] = 150 + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + toc = time.perf_counter() + logger.info(f'Completed MO_Osyczka_Kundu run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {1.3}\n poll step HV_obtained = {PHV}\n search step HV_expected = {1.0}\n search step HV_obtained = {SHV}\n MADS HV_expected = {1.8}\n MADS HV_obtained = {MHV}\n") + if PHV < 1.3 or SHV < 1.0 or MHV < 1.15: + logger.error("The MO_Osyczka_Kundu QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Osyczka_Kundu QA test completed but failed.") + else: + logger.info("The MO_Osyczka_Kundu QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_CTP1(): + logger.info('\nStarted running MO_CTP1 test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_CTP1 + data["param"]["name"] = "MO_CTP1" + data["param"]["baseline"] = [0.5, 0.5] + data["param"]["var_names"] = ['x1', 'x2'] + data["param"]["lb"] = [0, 0] + data["param"]["ub"] = [1, 1] + data["param"]["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB"]*2 + data["param"]["scaling"] = [1, 1] + data["param"]["post_dir"] = "./tests/bm/MOO/constrained/MO_CTP1/post" + data["options"]["budget"] = 500 + data["search"]["ns"] = 50 + data["param"]["ref_point"] = [1, 1] + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_CTP1 run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.6}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.6}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.65}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.6 or SHV < 0.6 or MHV < 0.65: + logger.error("The MO_CTP1 QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_CTP1 QA test completed but failed.") + else: + logger.info("The MO_CTP1 QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_Ex(): + logger.info('\nStarted running MO_Ex test... \n') + tic = time.perf_counter() + data = common_dict() + data["evaluator"]["blackbox"] = MO_Ex + data["param"]["name"] = "Ex" + data["param"]["baseline"] = [0.6, 2.5] + data["param"]["var_names"] = ['x1', 'x2'] + data["param"]["lb"] = [0.1, 0] + data["param"]["ub"] = [1, 5] + data["param"]["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB"]*2 + data["param"]["scaling"] = [0.9, 5] + data["param"]["post_dir"] = "./tests/bm/MOO/constrained/Ex/post" + data["options"]["budget"] = 1000 + data["search"]["ns"] = 15 + data["param"]["ref_point"] = [1, 9] + + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_Ex run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {1.2}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.8}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.8}\n MADS HV_obtained = {MHV}\n") + if PHV < 1.2 or SHV < 0.8 or MHV < 0.8: + logger.error("The MO_Ex QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_Ex QA test completed but failed.") + else: + logger.info("The MO_Ex QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_ZDT1(): + logger.info('\nStarted running MO_ZDT1 test... \n') + tic = time.perf_counter() + d = 30 + data = common_dict() + data["evaluator"]["blackbox"] = MO_ZDT1 + data["param"]["name"] = "MO_ZDT1" + np.random.seed(seed= 12345) + data["param"]["baseline"] = np.random.rand(d) + data["param"]["var_names"] = [f'x{i}' for i in range(d)] + data["param"]["lb"] = [0]*d + data["param"]["ub"] = [1]*d + data["param"]["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB"] + data["param"]["scaling"] = [1]*d + data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT1/post" + data["options"]["budget"] = 10000 + data["param"]["ref_point"] = [1, 1] + + p_out, _ = POLL.main(data) + data["options"]["budget"] = 500 + s_out, _ = SEARCH.main(data) + data["options"]["budget"] = 10000 + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_ZDT1 run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.62}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.7}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.62}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.62 or SHV < 0.7 or MHV < 0.62: + logger.error("The MO_ZDT1 QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_ZDT1 QA test completed but failed.") + else: + logger.info("The MO_ZDT1 QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_ZDT3(): + logger.info('\nStarted running MO_ZDT3 test... \n') + tic = time.perf_counter() + d = 30 + data = common_dict() + data["evaluator"]["blackbox"] = MO_ZDT3 + data["param"]["name"] = "MO_ZDT3" + np.random.seed(seed= 12345) + data["param"]["baseline"] = np.random.rand(d) + data["param"]["var_names"] = [f'x{i}' for i in range(d)] + data["param"]["lb"] = [0]*d + data["param"]["ub"] = [1]*d + data["param"]["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB"] + data["param"]["scaling"] = [1]*d + data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT3/post" + data["options"]["budget"] = 10000 + data["search"]["ns"] = 50 + data["param"]["ref_point"] = [1, 1] + + p_out, _ = POLL.main(data) + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_ZDT3 run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.6}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.7}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.6}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.6 or SHV < 0.7 or MHV < 0.6: + logger.error("The MO_ZDT3 QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_ZDT3 QA test completed but failed.") + else: + logger.info("The MO_ZDT3 QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_ZDT4(): + logger.info('\nStarted running MO_ZDT4 test... \n') + tic = time.perf_counter() + d = 10 + data = common_dict() + data["evaluator"]["blackbox"] = MO_ZDT4 + data["param"]["name"] = "MO_ZDT4" + np.random.seed(seed= 12345) + data["param"]["baseline"] = np.random.rand(1).tolist() + np.random.uniform(low=-10, high=10, size=(d-1,)).tolist() + data["param"]["var_names"] = [f'x{i}' for i in range(d)] + data["param"]["lb"] = [0] + [-10]*(d-1) + data["param"]["ub"] = [1] + [10]*(d-1) + data["param"]["meshType"] = "GMESH" + data["param"]["constraints_type"] = ["PB"] + data["param"]["scaling"] = [1] + [20]*(d-1) + data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT4/post" + data["options"]["budget"] = 2000 #40000 + data["search"]["ns"] = 55 + data["param"]["ref_point"] = [1, 1.2] + data["param"]["lhs_search_initialization"] = True + + m_out, _ = MADS.main(data) + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_ZDT4 run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n MADS step HV_expected = {0.8}\n MADS HV_obtained = {MHV}\n") + if MHV < 0.8: + logger.error("The MO_ZDT4 QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_ZDT4 QA test completed but failed.") + else: + logger.info("The MO_ZDT4 QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + +def test_MO_ZDT6(): + logger.info('\nStarted running MO_ZDT6 test... \n') + tic = time.perf_counter() + d = 10 + data = common_dict() + data["evaluator"]["blackbox"] = MO_ZDT6 + data["param"]["name"] = "MO_ZDT6" + np.random.seed(seed= 12345) + data["param"]["baseline"] = np.random.rand(d) + data["param"]["var_names"] = [f'x{i}' for i in range(d)] + data["param"]["lb"] = [0]*d + data["param"]["ub"] = [1]*d + + data["param"]["constraints_type"] = ["PB"] + data["param"]["scaling"] = [1]*d + data["param"]["post_dir"] = "./tests/bm/MOO/unconstrained/MO_ZDT6/post" + + data["search"]["ns"] = 100 + data["param"]["ref_point"] = [1, 1.2] + data["options"]["budget"] = 10000 #10000 + data["param"]["meshType"] = "GMESH" + p_out, _ = POLL.main(data) + data["options"]["budget"] = 500 #10000 + data["param"]["meshType"] = "OMESH" + s_out, _ = SEARCH.main(data) + m_out, _ = MADS.main(data) + PHV = p_out["HV"] + SHV = s_out["HV"] + MHV = m_out["HV"] + + toc = time.perf_counter() + logger.info(f'Completed MO_ZDT6 run in {toc - tic:.4f} seconds.\n') + logger.info(f"Hypervolume indicators:\n poll step HV_expected = {0.4}\n poll step HV_obtained = {PHV}\n search step HV_expected = {0.1}\n search step HV_obtained = {SHV}\n MADS HV_expected = {0.9}\n MADS HV_obtained = {MHV}\n") + if PHV < 0.4 or SHV < 0.1 or MHV < 0.9: + logger.error("The MO_ZDT6 QA test failed: \n hypervolume indicators obtained does not pass the success criteria \n") + raise IOError("The MO_ZDT6 QA test completed but failed.") + else: + logger.info("The MO_ZDT6 QA test successfully passed: \n hypervolume indicators obtained pass the success criteria \n") + + +if __name__ == "__main__": + freeze_support()