Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ dependencies = [
"scipy <2.0.0",
"numpy >=1.18.1, <3.0.0",
"pandas <3.0.0",
"scikit-learn >=0.21, !=0.23.*",
"tqdm >=4.48.0, <5.0.0",
]

Expand All @@ -53,6 +52,7 @@ test = [
"pytest == 8.3.5",
"flake8",
"pytest-cov",
"scikit-learn >=0.21, !=0.23.*",
"surfaces",
]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

from ..search import Search
from ..optimizers import BayesianOptimizer as _BayesianOptimizer
from ..optimizers.smb_opt.bayesian_optimization import gaussian_process


class BayesianOptimizer(_BayesianOptimizer, Search):
Expand Down Expand Up @@ -61,7 +60,7 @@ def __init__(
max_sample_size: int = 10000000,
sampling: Dict[Literal["random"], int] = {"random": 1000000},
replacement: bool = True,
gpr=gaussian_process["gp_nonlinear"],
gpr=None,
xi: float = 0.03,
):
super().__init__(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,12 @@
# License: MIT License

import numpy as np
from scipy.stats import norm

from ..smb_opt.smbo import SMBO
from ..smb_opt.surrogate_models import EnsembleRegressor
from ..smb_opt.acquisition_function import ExpectedImprovement


from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.neural_network import MLPRegressor


def normalize(array):
num = array - array.min()
den = array.max() - array.min()
Expand All @@ -41,12 +33,7 @@ def __init__(
epsilon=0.03,
distribution="normal",
n_neighbours=3,
estimators=[
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

mutable defaults should never be used - general python thing, not specific to sklearn isolation

GradientBoostingRegressor(n_estimators=5),
# DecisionTreeRegressor(),
# MLPRegressor(),
GaussianProcessRegressor(),
],
estimators=None,
xi=0.01,
warm_start_smbo=None,
max_sample_size=10000000,
Expand All @@ -71,13 +58,27 @@ def __init__(
replacement=replacement,
)
self.estimators = estimators
self.regr = EnsembleRegressor(estimators)
self.xi = xi
self.warm_start_smbo = warm_start_smbo
self.max_sample_size = max_sample_size
self.sampling = sampling
self.warnings = warnings

if estimators is None:
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.gaussian_process import GaussianProcessRegressor

self._estimators = [
GradientBoostingRegressor(n_estimators=5),
# DecisionTreeRegressor(),
# MLPRegressor(),
GaussianProcessRegressor(),
]
else:
self._estimators = estimators

self.regr = EnsembleRegressor(self._estimators)

self.init_warm_start_smbo()

def finish_initialization(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,13 @@
# License: MIT License

import numpy as np
from scipy.stats import norm


from .smbo import SMBO
from .surrogate_models import (
GPR_linear,
GPR,
)
from .acquisition_function import ExpectedImprovement


gaussian_process = {"gp_nonlinear": GPR(), "gp_linear": GPR_linear()}
# gaussian_process = {"gp_nonlinear": GPR(), "gp_linear": GPR_linear()}


def normalize(array):
Expand Down Expand Up @@ -46,9 +41,9 @@ def __init__(
nth_process=None,
warm_start_smbo=None,
max_sample_size=10000000,
sampling={"random": 1000000},
sampling=None,
replacement=True,
gpr=gaussian_process["gp_nonlinear"],
gpr=None,
xi=0.03,
):
super().__init__(
Expand All @@ -65,7 +60,13 @@ def __init__(
)

self.gpr = gpr
self.regr = gpr
if gpr is None:
from gradient_free_optimizers.optimizers.smb_opt.surrogate_models import GPR
self._gpr = GPR()
else:
self._gpr = gpr

self.regr = self._gpr
self.xi = xi

def finish_initialization(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(
nth_process=None,
warm_start_smbo=None,
max_sample_size=10000000,
sampling={"random": 1000000},
sampling=None,
replacement=True,
tree_regressor="extra_tree",
tree_para={"n_estimators": 100},
Expand Down
13 changes: 9 additions & 4 deletions src/gradient_free_optimizers/optimizers/smb_opt/smbo.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(
nth_process=None,
warm_start_smbo=None,
max_sample_size=10000000,
sampling={"random": 1000000},
sampling=None,
replacement=True,
):
super().__init__(
Expand All @@ -40,6 +40,11 @@ def __init__(
self.sampling = sampling
self.replacement = replacement

if sampling is None:
self._sampling_dict = {"random": 1000000}
else:
self._sampling_dict = sampling

self.sampler = InitialSampler(self.conv, max_sample_size)

self.init_warm_start_smbo(warm_start_smbo)
Expand Down Expand Up @@ -97,13 +102,13 @@ def wrapper(self, score):
return wrapper

def _sampling(self, all_pos_comb):
if self.sampling is False:
if self._sampling_dict is False:
return all_pos_comb
elif "random" in self.sampling:
elif "random" in self._sampling_dict:
return self.random_sampling(all_pos_comb)

def random_sampling(self, pos_comb):
n_samples = self.sampling["random"]
n_samples = self._sampling_dict["random"]
n_pos_comb = pos_comb.shape[0]

if n_pos_comb <= n_samples:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import numpy as np

from sklearn.neighbors import KernelDensity
from .smbo import SMBO


Expand Down Expand Up @@ -46,6 +45,8 @@ def __init__(

self.gamma_tpe = gamma_tpe

from sklearn.neighbors import KernelDensity

kde_para = {
"kernel": "gaussian",
"bandwidth": 1,
Expand Down