diff --git a/flaml/searcher/suggestion.py b/flaml/searcher/suggestion.py index a0e488e8f..ff20690f0 100644 --- a/flaml/searcher/suggestion.py +++ b/flaml/searcher/suggestion.py @@ -18,11 +18,8 @@ This source file is adapted here because ray does not fully support Windows. Copyright (c) Microsoft Corporation. ''' import copy -import glob import logging -import os -import time -from typing import Dict, Optional, Union, List, Tuple +from typing import Any, Dict, Optional, Union, List, Tuple import pickle from .variant_generator import parse_spec_vars from ..tune.sample import Categorical, Domain, Float, Integer, LogUniform, \ @@ -51,12 +48,6 @@ UNDEFINED_METRIC_MODE = str( "or pass them to `tune.run()`.") -_logged = set() -_disabled = False -_periodic_log = False -_last_logged = 0.0 - - class Searcher: """Abstract class for wrapping suggesting algorithms. Custom algorithms can extend this class easily by overriding the @@ -341,23 +332,14 @@ class ConcurrencyLimiter(Searcher): try: import optuna as ot + from optuna.trial import TrialState as OptunaTrialState from optuna.samplers import BaseSampler except ImportError: ot = None + OptunaTrialState = None BaseSampler = None -class _Param: - def __getattr__(self, item): - def _inner(*args, **kwargs): - return (item, args, kwargs) - - return _inner - - -param = _Param() - - # (Optional) Default (anonymous) metric when using tune.report(x) DEFAULT_METRIC = "_metric" @@ -395,13 +377,21 @@ class OptunaSearch(Searcher): configurations. sampler (optuna.samplers.BaseSampler): Optuna sampler used to draw hyperparameter configurations. Defaults to ``TPESampler``. - seed (int): The random seed for the sampler + seed (int): Seed to initialize sampler with. This parameter is only + used when ``sampler=None``. In all other cases, the sampler + you pass should be initialized with the seed already. + evaluated_rewards (list): If you have previously evaluated the + parameters passed in as points_to_evaluate you can avoid + re-running those trials by passing in the reward attributes + as a list so the optimiser can be told the results without + needing to re-compute the trial. Must be the same length as + points_to_evaluate. Tune automatically converts search spaces to Optuna's format: .. code-block:: python from ray.tune.suggest.optuna import OptunaSearch config = { "a": tune.uniform(6, 8) - "b": tune.uniform(10, 20) + "b": tune.loguniform(1e-4, 1e-2) } optuna_search = OptunaSearch( metric="loss", @@ -410,12 +400,13 @@ class OptunaSearch(Searcher): If you would like to pass the search space manually, the code would look like this: .. code-block:: python - from ray.tune.suggest.optuna import OptunaSearch, param - space = [ - param.suggest_uniform("a", 6, 8), - param.suggest_uniform("b", 10, 20) - ] - algo = OptunaSearch( + from ray.tune.suggest.optuna import OptunaSearch + import optuna + config = { + "a": optuna.distributions.UniformDistribution(6, 8), + "b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2), + } + optuna_search = OptunaSearch( space, metric="loss", mode="min") @@ -429,7 +420,8 @@ class OptunaSearch(Searcher): mode: Optional[str] = None, points_to_evaluate: Optional[List[Dict]] = None, sampler: Optional[BaseSampler] = None, - seed: Optional[int] = None): + seed: Optional[int] = None, + evaluated_rewards: Optional[List] = None): assert ot is not None, ( "Optuna must be installed! Run `pip install optuna`.") super(OptunaSearch, self).__init__( @@ -443,22 +435,39 @@ class OptunaSearch(Searcher): if domain_vars or grid_vars: logger.warning( UNRESOLVED_SEARCH_SPACE.format( - par="space", cls=type(self))) + par="space", cls=type(self).__name__)) space = self.convert_search_space(space) + else: + # Flatten to support nested dicts + space = flatten_dict(space, "/") + + # Deprecate: 1.5 + if isinstance(space, list): + logger.warning( + "Passing lists of `param.suggest_*()` calls to OptunaSearch " + "as a search space is deprecated and will be removed in " + "a future release of Ray. Please pass a dict mapping " + "to `optuna.distributions` objects instead.") self._space = space - self._points_to_evaluate = points_to_evaluate + self._points_to_evaluate = points_to_evaluate or [] + self._evaluated_rewards = evaluated_rewards self._study_name = "optuna" # Fixed study name for in-memory storage + + if sampler and seed: + logger.warning( + "You passed an initialized sampler to `OptunaSearch`. The " + "`seed` parameter has to be passed to the sampler directly " + "and will be ignored.") + self._sampler = sampler or ot.samplers.TPESampler(seed=seed) + assert isinstance(self._sampler, BaseSampler), \ "You can only pass an instance of `optuna.samplers.BaseSampler` " \ "as a sampler to `OptunaSearcher`." - self._pruner = ot.pruners.NopPruner() - self._storage = ot.storages.InMemoryStorage() - self._ot_trials = {} self._ot_study = None if self._space: @@ -469,14 +478,26 @@ class OptunaSearch(Searcher): # If only a mode was passed, use anonymous metric self._metric = DEFAULT_METRIC + pruner = ot.pruners.NopPruner() + storage = ot.storages.InMemoryStorage() + self._ot_study = ot.study.create_study( - storage=self._storage, + storage=storage, sampler=self._sampler, - pruner=self._pruner, + pruner=pruner, study_name=self._study_name, direction="minimize" if mode == "min" else "maximize", load_if_exists=True) + if self._points_to_evaluate: + if self._evaluated_rewards: + for point, reward in zip(self._points_to_evaluate, + self._evaluated_rewards): + self.add_evaluated_point(point, reward) + else: + for point in self._points_to_evaluate: + self._ot_study.enqueue_trial(point) + def set_search_properties(self, metric: Optional[str], mode: Optional[str], config: Dict) -> bool: if self._space: @@ -503,22 +524,28 @@ class OptunaSearch(Searcher): metric=self._metric, mode=self._mode)) - if trial_id not in self._ot_trials: - ot_trial_id = self._storage.create_new_trial( - self._ot_study._study_id) - self._ot_trials[trial_id] = ot.trial.Trial(self._ot_study, - ot_trial_id) - ot_trial = self._ot_trials[trial_id] + if isinstance(self._space, list): + # Keep for backwards compatibility + # Deprecate: 1.5 + if trial_id not in self._ot_trials: + self._ot_trials[trial_id] = self._ot_study.ask() + + ot_trial = self._ot_trials[trial_id] - if self._points_to_evaluate: - params = self._points_to_evaluate.pop(0) - else: # getattr will fetch the trial.suggest_ function on Optuna trials params = { args[0] if len(args) > 0 else kwargs["name"]: getattr( ot_trial, fn)(*args, **kwargs) for (fn, args, kwargs) in self._space } + else: + # Use Optuna ask interface (since version 2.6.0) + if trial_id not in self._ot_trials: + self._ot_trials[trial_id] = self._ot_study.ask( + fixed_distributions=self._space) + ot_trial = self._ot_trials[trial_id] + params = ot_trial.params + return unflatten_dict(params) def on_trial_result(self, trial_id: str, result: Dict): @@ -532,32 +559,82 @@ class OptunaSearch(Searcher): result: Optional[Dict] = None, error: bool = False): ot_trial = self._ot_trials[trial_id] - ot_trial_id = ot_trial._trial_id - self._storage.set_trial_value(ot_trial_id, result.get( - self.metric, None)) - self._storage.set_trial_state(ot_trial_id, - ot.trial.TrialState.COMPLETE) + + val = result.get(self.metric, None) if result else None + ot_trial_state = OptunaTrialState.COMPLETE + if val is None: + if error: + ot_trial_state = OptunaTrialState.FAIL + else: + ot_trial_state = OptunaTrialState.PRUNED + try: + self._ot_study.tell(ot_trial, val, state=ot_trial_state) + except ValueError as exc: + logger.warning(exc) # E.g. if NaN was reported + + def add_evaluated_point(self, + parameters: Dict, + value: float, + error: bool = False, + pruned: bool = False, + intermediate_values: Optional[List[float]] = None): + if not self._space: + raise RuntimeError( + UNDEFINED_SEARCH_SPACE.format( + cls=self.__class__.__name__, space="space")) + if not self._metric or not self._mode: + raise RuntimeError( + UNDEFINED_METRIC_MODE.format( + cls=self.__class__.__name__, + metric=self._metric, + mode=self._mode)) + + ot_trial_state = OptunaTrialState.COMPLETE + if error: + ot_trial_state = OptunaTrialState.FAIL + elif pruned: + ot_trial_state = OptunaTrialState.PRUNED + + if intermediate_values: + intermediate_values_dict = { + i: value + for i, value in enumerate(intermediate_values) + } + else: + intermediate_values_dict = None + + trial = ot.trial.create_trial( + state=ot_trial_state, + value=value, + params=parameters, + distributions=self._space, + intermediate_values=intermediate_values_dict) + + self._ot_study.add_trial(trial) def save(self, checkpoint_path: str): - save_object = (self._storage, self._pruner, self._sampler, - self._ot_trials, self._ot_study, - self._points_to_evaluate) + save_object = (self._sampler, self._ot_trials, self._ot_study, + self._points_to_evaluate, self._evaluated_rewards) with open(checkpoint_path, "wb") as outputFile: pickle.dump(save_object, outputFile) def restore(self, checkpoint_path: str): with open(checkpoint_path, "rb") as inputFile: save_object = pickle.load(inputFile) - self._storage, self._pruner, self._sampler, \ - self._ot_trials, self._ot_study, \ - self._points_to_evaluate = save_object + if len(save_object) == 5: + self._sampler, self._ot_trials, self._ot_study, \ + self._points_to_evaluate, self._evaluated_rewards = save_object + else: + # Backwards compatibility + self._sampler, self._ot_trials, self._ot_study, \ + self._points_to_evaluate = save_object @staticmethod - def convert_search_space(spec: Dict) -> List[Tuple]: + def convert_search_space(spec: Dict) -> Dict[str, Any]: resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec) if not domain_vars and not grid_vars: - return [] + return {} if grid_vars: raise ValueError( @@ -568,13 +645,18 @@ class OptunaSearch(Searcher): spec = flatten_dict(spec, prevent_delimiter=True) resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec) - def resolve_value(par: str, domain: Domain) -> Tuple: + def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution: quantize = None sampler = domain.get_sampler() if isinstance(sampler, Quantized): quantize = sampler.q sampler = sampler.sampler + if isinstance(sampler, LogUniform): + logger.warning( + "Optuna does not handle quantization in loguniform " + "sampling. The parameter will be passed but it will " + "probably be ignored.") if isinstance(domain, Float): if isinstance(sampler, LogUniform): @@ -582,28 +664,31 @@ class OptunaSearch(Searcher): logger.warning( "Optuna does not support both quantization and " "sampling from LogUniform. Dropped quantization.") - return param.suggest_loguniform(par, domain.lower, - domain.upper) + return ot.distributions.LogUniformDistribution( + domain.lower, domain.upper) + elif isinstance(sampler, Uniform): if quantize: - return param.suggest_discrete_uniform( - par, domain.lower, domain.upper, quantize) - return param.suggest_uniform(par, domain.lower, - domain.upper) + return ot.distributions.DiscreteUniformDistribution( + domain.lower, domain.upper, quantize) + return ot.distributions.UniformDistribution( + domain.lower, domain.upper) + elif isinstance(domain, Integer): if isinstance(sampler, LogUniform): - if quantize: - logger.warning( - "Optuna does not support both quantization and " - "sampling from LogUniform. Dropped quantization.") - return param.suggest_int( - par, domain.lower, domain.upper, log=True) + return ot.distributions.IntLogUniformDistribution( + domain.lower, domain.upper - 1, step=quantize or 1) elif isinstance(sampler, Uniform): - return param.suggest_int( - par, domain.lower, domain.upper, step=quantize or 1) + # Upper bound should be inclusive for quantization and + # exclusive otherwise + return ot.distributions.IntUniformDistribution( + domain.lower, + domain.upper - int(bool(not quantize)), + step=quantize or 1) elif isinstance(domain, Categorical): if isinstance(sampler, Uniform): - return param.suggest_categorical(par, domain.categories) + return ot.distributions.CategoricalDistribution( + domain.categories) raise ValueError( "Optuna search does not support parameters of type " @@ -612,9 +697,9 @@ class OptunaSearch(Searcher): type(domain.sampler).__name__)) # Parameter name is e.g. "a/b/c" for nested dicts - values = [ - resolve_value("/".join(path), domain) + values = { + "/".join(path): resolve_value(domain) for path, domain in domain_vars - ] + } - return values + return values \ No newline at end of file diff --git a/flaml/tune/README.md b/flaml/tune/README.md index f514e12fd..8a1049ea6 100644 --- a/flaml/tune/README.md +++ b/flaml/tune/README.md @@ -44,7 +44,7 @@ print(analysis.best_config) # the best config * Example for using ray tune's API: ```python -# require: pip install flaml[blendsearch] ray[tune] +# require: pip install flaml[blendsearch,ray] from ray import tune as raytune from flaml import CFO, BlendSearch import time @@ -60,18 +60,37 @@ def evaluate_config(config): # use tune.report to report the metric to optimize tune.report(metric=metric) -analysis = raytune.run( - evaluate_config, # the function to evaluate a config - config={ +# provide a time budget (in seconds) for the tuning process +time_budget_s = 60 +# provide the search space +config_search_space = { 'x': tune.lograndint(lower=1, upper=100000), 'y': tune.randint(lower=1, upper=100000) - }, # the search space + } +# provide the low cost partial config +low_cost_partial_config={'x':1} + +# set up CFO +search_alg_cfo = CFO(low_cost_partial_config=low_cost_partial_config) + +# set up BlendSearch. +search_alg_blendsearch = BlendSearch(metric="metric", + mode="min", + space=config_search_space, + low_cost_partial_config=low_cost_partial_config) +# NOTE that when using BlendSearch as a search_alg in ray tune, you need to +# configure the 'time_budget_s' for BlendSearch accordingly as follows such that BlendSearch is aware of the time budget. This step is not needed when BlendSearch is used as the search_alg in flaml.tune as it is already done automatically in flaml. +search_alg_blendsearch.set_search_properties(config={"time_budget_s": time_budget_s}) + +analysis = raytune.run( + evaluate_config, # the function to evaluate a config + config=config_search_space, metric='metric', # the name of the metric used for optimization mode='min', # the optimization mode, 'min' or 'max' num_samples=-1, # the maximal number of configs to try, -1 means infinite - time_budget_s=60, # the time budget in seconds + time_budget_s=time_budget_s, # the time budget in seconds local_dir='logs/', # the local directory to store logs - search_alg=CFO(low_cost_partial_config=[{'x':1}]) # or BlendSearch + search_alg=search_alg_blendsearch # or search_alg_cfo ) print(analysis.best_trial.last_result) # the best trial's result diff --git a/flaml/version.py b/flaml/version.py index fc0a8435a..40e294f71 100644 --- a/flaml/version.py +++ b/flaml/version.py @@ -1 +1 @@ -__version__ = "0.5.8" +__version__ = "0.5.9" diff --git a/notebook/flaml_lightgbm.ipynb b/notebook/flaml_lightgbm.ipynb index 1d45a92dc..49ec982b6 100644 --- a/notebook/flaml_lightgbm.ipynb +++ b/notebook/flaml_lightgbm.ipynb @@ -58,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "slideshow": { "slide_type": "subslide" @@ -67,10 +67,13 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "load dataset from ./openml_ds537.pkl\nDataset name: houses\nX_train.shape: (15480, 8), y_train.shape: (15480,);\nX_test.shape: (5160, 8), y_test.shape: (5160,)\n" + "load dataset from ./openml_ds537.pkl\n", + "Dataset name: houses\n", + "X_train.shape: (15480, 8), y_train.shape: (15480,);\n", + "X_test.shape: (5160, 8), y_test.shape: (5160,)\n" ] } ], @@ -93,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": { "slideshow": { "slide_type": "slide" @@ -109,7 +112,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": { "slideshow": { "slide_type": "slide" @@ -118,7 +121,7 @@ "outputs": [], "source": [ "settings = {\n", - " \"time_budget\": 120, # total running time in seconds\n", + " \"time_budget\": 150, # total running time in seconds\n", " \"metric\": 'r2', # primary metrics for regression can be chosen from: ['mae','mse','r2']\n", " \"estimator_list\": ['lgbm'], # list of ML learners; we tune lightgbm in this example\n", " \"task\": 'regression', # task type \n", @@ -128,7 +131,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": { "slideshow": { "slide_type": "slide" @@ -137,91 +140,111 @@ }, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ - "[flaml.automl: 07-06 11:07:16] {908} INFO - Evaluation method: cv\n", - "[flaml.automl: 07-06 11:07:16] {617} INFO - Using RepeatedKFold\n", - "[flaml.automl: 07-06 11:07:16] {929} INFO - Minimizing error metric: 1-r2\n", - "[flaml.automl: 07-06 11:07:16] {948} INFO - List of ML learners in AutoML Run: ['lgbm']\n", - "[flaml.automl: 07-06 11:07:16] {1012} INFO - iteration 0, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:16] {1160} INFO - at 0.3s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", - "[flaml.automl: 07-06 11:07:16] {1012} INFO - iteration 1, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:16] {1160} INFO - at 0.4s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", - "[flaml.automl: 07-06 11:07:16] {1012} INFO - iteration 2, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:16] {1160} INFO - at 0.7s,\tbest lgbm's error=0.5520,\tbest lgbm's error=0.5520\n", - "[flaml.automl: 07-06 11:07:16] {1012} INFO - iteration 3, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:17] {1160} INFO - at 0.9s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", - "[flaml.automl: 07-06 11:07:17] {1012} INFO - iteration 4, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:17] {1160} INFO - at 1.1s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", - "[flaml.automl: 07-06 11:07:17] {1012} INFO - iteration 5, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:17] {1160} INFO - at 1.2s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", - "[flaml.automl: 07-06 11:07:17] {1012} INFO - iteration 6, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:17] {1160} INFO - at 1.5s,\tbest lgbm's error=0.3023,\tbest lgbm's error=0.3023\n", - "[flaml.automl: 07-06 11:07:17] {1012} INFO - iteration 7, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:17] {1160} INFO - at 1.9s,\tbest lgbm's error=0.2611,\tbest lgbm's error=0.2611\n", - "[flaml.automl: 07-06 11:07:17] {1012} INFO - iteration 8, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:18] {1160} INFO - at 2.1s,\tbest lgbm's error=0.2611,\tbest lgbm's error=0.2611\n", - "[flaml.automl: 07-06 11:07:18] {1012} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:18] {1160} INFO - at 2.5s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", - "[flaml.automl: 07-06 11:07:18] {1012} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:18] {1160} INFO - at 2.7s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", - "[flaml.automl: 07-06 11:07:18] {1012} INFO - iteration 11, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:19] {1160} INFO - at 3.0s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", - "[flaml.automl: 07-06 11:07:19] {1012} INFO - iteration 12, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:19] {1160} INFO - at 3.5s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 07-06 11:07:19] {1012} INFO - iteration 13, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:19] {1160} INFO - at 3.8s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 07-06 11:07:19] {1012} INFO - iteration 14, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:20] {1160} INFO - at 4.3s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 07-06 11:07:20] {1012} INFO - iteration 15, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:20] {1160} INFO - at 4.6s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 07-06 11:07:20] {1012} INFO - iteration 16, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:21] {1160} INFO - at 5.1s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 07-06 11:07:21] {1012} INFO - iteration 17, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:21] {1160} INFO - at 5.4s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 07-06 11:07:21] {1012} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:22] {1160} INFO - at 6.3s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 07-06 11:07:22] {1012} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:22] {1160} INFO - at 6.5s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 07-06 11:07:22] {1012} INFO - iteration 20, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:25] {1160} INFO - at 9.8s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 07-06 11:07:25] {1012} INFO - iteration 21, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:26] {1160} INFO - at 10.3s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 07-06 11:07:26] {1012} INFO - iteration 22, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:27] {1160} INFO - at 11.8s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 07-06 11:07:27] {1012} INFO - iteration 23, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:28] {1160} INFO - at 12.5s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 07-06 11:07:28] {1012} INFO - iteration 24, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:30] {1160} INFO - at 14.1s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 07-06 11:07:30] {1012} INFO - iteration 25, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:30] {1160} INFO - at 14.4s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 07-06 11:07:30] {1012} INFO - iteration 26, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:36] {1160} INFO - at 20.4s,\tbest lgbm's error=0.1652,\tbest lgbm's error=0.1652\n", - "[flaml.automl: 07-06 11:07:36] {1012} INFO - iteration 27, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:40] {1160} INFO - at 24.2s,\tbest lgbm's error=0.1652,\tbest lgbm's error=0.1652\n", - "[flaml.automl: 07-06 11:07:40] {1012} INFO - iteration 28, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:50] {1160} INFO - at 34.5s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:07:50] {1012} INFO - iteration 29, current learner lgbm\n", - "[flaml.automl: 07-06 11:07:52] {1160} INFO - at 36.0s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:07:52] {1012} INFO - iteration 30, current learner lgbm\n", - "[flaml.automl: 07-06 11:08:37] {1160} INFO - at 81.1s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:08:37] {1012} INFO - iteration 31, current learner lgbm\n", - "[flaml.automl: 07-06 11:08:39] {1160} INFO - at 83.8s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:08:39] {1012} INFO - iteration 32, current learner lgbm\n", - "[flaml.automl: 07-06 11:08:49] {1160} INFO - at 93.5s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:08:49] {1012} INFO - iteration 33, current learner lgbm\n", - "[flaml.automl: 07-06 11:09:00] {1160} INFO - at 104.0s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:09:00] {1012} INFO - iteration 34, current learner lgbm\n", - "[flaml.automl: 07-06 11:09:07] {1160} INFO - at 111.4s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:09:07] {1012} INFO - iteration 35, current learner lgbm\n", - "[flaml.automl: 07-06 11:09:16] {1160} INFO - at 120.8s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", - "[flaml.automl: 07-06 11:09:16] {1206} INFO - selected model: LGBMRegressor(colsample_bytree=0.6819303877749074,\n", - " learning_rate=0.13082160708847235, max_bin=512,\n", - " min_child_samples=128, n_estimators=363, num_leaves=269,\n", - " objective='regression', reg_alpha=0.03805198795768637,\n", - " reg_lambda=18.14103139151093, subsample=0.820105567300051)\n", - "[flaml.automl: 07-06 11:09:16] {963} INFO - fit succeeded\n" + "[flaml.automl: 07-24 13:49:03] {912} INFO - Evaluation method: cv\n", + "[flaml.automl: 07-24 13:49:03] {616} INFO - Using RepeatedKFold\n", + "[flaml.automl: 07-24 13:49:03] {933} INFO - Minimizing error metric: 1-r2\n", + "[flaml.automl: 07-24 13:49:03] {952} INFO - List of ML learners in AutoML Run: ['lgbm']\n", + "[flaml.automl: 07-24 13:49:03] {1018} INFO - iteration 0, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:03] {1173} INFO - at 0.5s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", + "[flaml.automl: 07-24 13:49:03] {1018} INFO - iteration 1, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:03] {1173} INFO - at 0.7s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", + "[flaml.automl: 07-24 13:49:03] {1018} INFO - iteration 2, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:03] {1173} INFO - at 0.9s,\tbest lgbm's error=0.5520,\tbest lgbm's error=0.5520\n", + "[flaml.automl: 07-24 13:49:03] {1018} INFO - iteration 3, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:04] {1173} INFO - at 1.1s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", + "[flaml.automl: 07-24 13:49:04] {1018} INFO - iteration 4, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:04] {1173} INFO - at 1.2s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", + "[flaml.automl: 07-24 13:49:04] {1018} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:04] {1173} INFO - at 1.4s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", + "[flaml.automl: 07-24 13:49:04] {1018} INFO - iteration 6, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:04] {1173} INFO - at 1.6s,\tbest lgbm's error=0.3023,\tbest lgbm's error=0.3023\n", + "[flaml.automl: 07-24 13:49:04] {1018} INFO - iteration 7, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:04] {1173} INFO - at 1.9s,\tbest lgbm's error=0.2611,\tbest lgbm's error=0.2611\n", + "[flaml.automl: 07-24 13:49:04] {1018} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:05] {1173} INFO - at 2.1s,\tbest lgbm's error=0.2611,\tbest lgbm's error=0.2611\n", + "[flaml.automl: 07-24 13:49:05] {1018} INFO - iteration 9, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:05] {1173} INFO - at 2.4s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", + "[flaml.automl: 07-24 13:49:05] {1018} INFO - iteration 10, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:05] {1173} INFO - at 2.6s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", + "[flaml.automl: 07-24 13:49:05] {1018} INFO - iteration 11, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:05] {1173} INFO - at 2.8s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", + "[flaml.automl: 07-24 13:49:05] {1018} INFO - iteration 12, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:06] {1173} INFO - at 3.2s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", + "[flaml.automl: 07-24 13:49:06] {1018} INFO - iteration 13, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:06] {1173} INFO - at 3.5s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", + "[flaml.automl: 07-24 13:49:06] {1018} INFO - iteration 14, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:06] {1173} INFO - at 3.9s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", + "[flaml.automl: 07-24 13:49:06] {1018} INFO - iteration 15, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:07] {1173} INFO - at 4.1s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", + "[flaml.automl: 07-24 13:49:07] {1018} INFO - iteration 16, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:07] {1173} INFO - at 4.5s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", + "[flaml.automl: 07-24 13:49:07] {1018} INFO - iteration 17, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:07] {1173} INFO - at 4.8s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", + "[flaml.automl: 07-24 13:49:07] {1018} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:08] {1173} INFO - at 5.5s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", + "[flaml.automl: 07-24 13:49:08] {1018} INFO - iteration 19, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:08] {1173} INFO - at 5.7s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", + "[flaml.automl: 07-24 13:49:08] {1018} INFO - iteration 20, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:11] {1173} INFO - at 8.2s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", + "[flaml.automl: 07-24 13:49:11] {1018} INFO - iteration 21, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:11] {1173} INFO - at 8.5s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", + "[flaml.automl: 07-24 13:49:11] {1018} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:12] {1173} INFO - at 9.6s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", + "[flaml.automl: 07-24 13:49:12] {1018} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:13] {1173} INFO - at 10.1s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", + "[flaml.automl: 07-24 13:49:13] {1018} INFO - iteration 24, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:14] {1173} INFO - at 11.1s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", + "[flaml.automl: 07-24 13:49:14] {1018} INFO - iteration 25, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:14] {1173} INFO - at 11.4s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", + "[flaml.automl: 07-24 13:49:14] {1018} INFO - iteration 26, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:18] {1173} INFO - at 15.6s,\tbest lgbm's error=0.1652,\tbest lgbm's error=0.1652\n", + "[flaml.automl: 07-24 13:49:18] {1018} INFO - iteration 27, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:21] {1173} INFO - at 18.5s,\tbest lgbm's error=0.1652,\tbest lgbm's error=0.1652\n", + "[flaml.automl: 07-24 13:49:21] {1018} INFO - iteration 28, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:28] {1173} INFO - at 25.5s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", + "[flaml.automl: 07-24 13:49:28] {1018} INFO - iteration 29, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:29] {1173} INFO - at 26.5s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", + "[flaml.automl: 07-24 13:49:29] {1018} INFO - iteration 30, current learner lgbm\n", + "[flaml.automl: 07-24 13:49:59] {1173} INFO - at 56.8s,\tbest lgbm's error=0.1642,\tbest lgbm's error=0.1642\n", + "[flaml.automl: 07-24 13:49:59] {1018} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:05] {1173} INFO - at 62.1s,\tbest lgbm's error=0.1622,\tbest lgbm's error=0.1622\n", + "[flaml.automl: 07-24 13:50:05] {1018} INFO - iteration 32, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:13] {1173} INFO - at 70.4s,\tbest lgbm's error=0.1622,\tbest lgbm's error=0.1622\n", + "[flaml.automl: 07-24 13:50:13] {1018} INFO - iteration 33, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:18] {1173} INFO - at 75.6s,\tbest lgbm's error=0.1622,\tbest lgbm's error=0.1622\n", + "[flaml.automl: 07-24 13:50:18] {1018} INFO - iteration 34, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:22] {1173} INFO - at 79.3s,\tbest lgbm's error=0.1622,\tbest lgbm's error=0.1622\n", + "[flaml.automl: 07-24 13:50:22] {1018} INFO - iteration 35, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:49] {1173} INFO - at 106.3s,\tbest lgbm's error=0.1622,\tbest lgbm's error=0.1622\n", + "[flaml.automl: 07-24 13:50:49] {1018} INFO - iteration 36, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:49] {1173} INFO - at 107.0s,\tbest lgbm's error=0.1622,\tbest lgbm's error=0.1622\n", + "[flaml.automl: 07-24 13:50:49] {1018} INFO - iteration 37, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:54] {1173} INFO - at 112.0s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:50:54] {1018} INFO - iteration 38, current learner lgbm\n", + "[flaml.automl: 07-24 13:50:59] {1173} INFO - at 116.0s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:50:59] {1018} INFO - iteration 39, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:06] {1173} INFO - at 123.3s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:06] {1018} INFO - iteration 40, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:08] {1173} INFO - at 126.0s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:08] {1018} INFO - iteration 41, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:11] {1173} INFO - at 128.1s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:11] {1018} INFO - iteration 42, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:18] {1173} INFO - at 135.4s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:18] {1018} INFO - iteration 43, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:20] {1173} INFO - at 137.7s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:20] {1018} INFO - iteration 44, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:27] {1173} INFO - at 144.3s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:27] {1018} INFO - iteration 45, current learner lgbm\n", + "[flaml.automl: 07-24 13:51:32] {1173} INFO - at 149.4s,\tbest lgbm's error=0.1611,\tbest lgbm's error=0.1611\n", + "[flaml.automl: 07-24 13:51:32] {1219} INFO - selected model: LGBMRegressor(colsample_bytree=0.788228718184241,\n", + " learning_rate=0.08917691724022275, max_bin=256,\n", + " min_child_samples=64, n_estimators=157, num_leaves=4886,\n", + " objective='regression', reg_alpha=0.042293060180467086,\n", + " reg_lambda=95.16149755350158, subsample=0.8278302514488655)\n", + "[flaml.automl: 07-24 13:51:32] {969} INFO - fit succeeded\n" ] } ], @@ -243,7 +266,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": { "slideshow": { "slide_type": "slide" @@ -252,10 +275,12 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "Best hyperparmeter config: {'n_estimators': 363.0, 'num_leaves': 269.0, 'min_child_samples': 128.0, 'learning_rate': 0.13082160708847235, 'subsample': 0.820105567300051, 'log_max_bin': 10.0, 'colsample_bytree': 0.6819303877749074, 'reg_alpha': 0.03805198795768637, 'reg_lambda': 18.14103139151093}\nBest r2 on validation data: 0.8358\nTraining duration of best run: 10.26 s\n" + "Best hyperparmeter config: {'n_estimators': 157, 'num_leaves': 4886, 'min_child_samples': 64, 'learning_rate': 0.08917691724022275, 'subsample': 0.8278302514488655, 'log_max_bin': 9, 'colsample_bytree': 0.788228718184241, 'reg_alpha': 0.042293060180467086, 'reg_lambda': 95.16149755350158}\n", + "Best r2 on validation data: 0.8389\n", + "Training duration of best run: 4.971 s\n" ] } ], @@ -268,7 +293,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": { "slideshow": { "slide_type": "slide" @@ -276,18 +301,18 @@ }, "outputs": [ { - "output_type": "execute_result", "data": { "text/plain": [ - "LGBMRegressor(colsample_bytree=0.6819303877749074,\n", - " learning_rate=0.13082160708847235, max_bin=512,\n", - " min_child_samples=128, n_estimators=363, num_leaves=269,\n", - " objective='regression', reg_alpha=0.03805198795768637,\n", - " reg_lambda=18.14103139151093, subsample=0.820105567300051)" + "LGBMRegressor(colsample_bytree=0.788228718184241,\n", + " learning_rate=0.08917691724022275, max_bin=256,\n", + " min_child_samples=64, n_estimators=157, num_leaves=4886,\n", + " objective='regression', reg_alpha=0.042293060180467086,\n", + " reg_lambda=95.16149755350158, subsample=0.8278302514488655)" ] }, + "execution_count": 6, "metadata": {}, - "execution_count": 7 + "output_type": "execute_result" } ], "source": [ @@ -296,7 +321,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": { "slideshow": { "slide_type": "slide" @@ -312,7 +337,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": { "slideshow": { "slide_type": "slide" @@ -321,10 +346,23 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "Predicted labels [144297.78236479 237704.97318762 133286.22864409 ... 201009.77752565\n 229253.8030484 268605.21607981]\nTrue labels [136900. 241300. 200700. ... 160900. 227300. 265600.]\n" + "Predicted labels [143563.86395674 254576.18760499 144158.79619969 ... 196049.43993507\n", + " 252324.54317706 279607.98371458]\n", + "True labels 14740 136900.0\n", + "10101 241300.0\n", + "20566 200700.0\n", + "2670 72500.0\n", + "15709 460000.0\n", + " ... \n", + "13132 121200.0\n", + "8228 137500.0\n", + "3948 160900.0\n", + "8522 227300.0\n", + "16798 265600.0\n", + "Name: median_house_value, Length: 5160, dtype: float64\n" ] } ], @@ -337,7 +375,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 9, "metadata": { "slideshow": { "slide_type": "slide" @@ -346,10 +384,12 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "r2 = 0.8449836920253022\nmse = 2049084080.9890163\nmae = 30591.753451556997\n" + "r2 = 0.8467624164909245\n", + "mse = 2025572000.0048184\n", + "mae = 29845.819846911687\n" ] } ], @@ -363,7 +403,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 10, "metadata": { "slideshow": { "slide_type": "subslide" @@ -372,10 +412,20 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4.0, 'num_leaves': 4.0, 'min_child_samples': 12.0, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'log_max_bin': 10.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4.0, 'num_leaves': 4.0, 'min_child_samples': 12.0, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'log_max_bin': 10.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4.0, 'num_leaves': 4.0, 'min_child_samples': 24.0, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'log_max_bin': 10.0, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4.0, 'num_leaves': 4.0, 'min_child_samples': 24.0, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'log_max_bin': 10.0, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11.0, 'num_leaves': 4.0, 'min_child_samples': 36.0, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'log_max_bin': 10.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 11.0, 'num_leaves': 4.0, 'min_child_samples': 36.0, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'log_max_bin': 10.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20.0, 'num_leaves': 4.0, 'min_child_samples': 46.0, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'log_max_bin': 9.0, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.5460627024738893}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20.0, 'num_leaves': 4.0, 'min_child_samples': 46.0, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'log_max_bin': 9.0, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.5460627024738893}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20.0, 'num_leaves': 11.0, 'min_child_samples': 52.0, 'learning_rate': 1.0, 'subsample': 1.0, 'log_max_bin': 9.0, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20.0, 'num_leaves': 11.0, 'min_child_samples': 52.0, 'learning_rate': 1.0, 'subsample': 1.0, 'log_max_bin': 9.0, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 37.0, 'num_leaves': 15.0, 'min_child_samples': 93.0, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'log_max_bin': 9.0, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 37.0, 'num_leaves': 15.0, 'min_child_samples': 93.0, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'log_max_bin': 9.0, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 75.0, 'num_leaves': 32.0, 'min_child_samples': 83.0, 'learning_rate': 0.19997653978110663, 'subsample': 0.8895588746662894, 'log_max_bin': 7.0, 'colsample_bytree': 0.663557757490723, 'reg_alpha': 0.03147131714846291, 'reg_lambda': 0.38644069375879475}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 75.0, 'num_leaves': 32.0, 'min_child_samples': 83.0, 'learning_rate': 0.19997653978110663, 'subsample': 0.8895588746662894, 'log_max_bin': 7.0, 'colsample_bytree': 0.663557757490723, 'reg_alpha': 0.03147131714846291, 'reg_lambda': 0.38644069375879475}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 81.0, 'num_leaves': 66.0, 'min_child_samples': 93.0, 'learning_rate': 0.07560024606664352, 'subsample': 0.8756054034199897, 'log_max_bin': 7.0, 'colsample_bytree': 0.7142272555842307, 'reg_alpha': 0.00219854653612346, 'reg_lambda': 2.9360090402842274}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 81.0, 'num_leaves': 66.0, 'min_child_samples': 93.0, 'learning_rate': 0.07560024606664352, 'subsample': 0.8756054034199897, 'log_max_bin': 7.0, 'colsample_bytree': 0.7142272555842307, 'reg_alpha': 0.00219854653612346, 'reg_lambda': 2.9360090402842274}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 283.0, 'num_leaves': 171.0, 'min_child_samples': 128.0, 'learning_rate': 0.056885026855831654, 'subsample': 0.9152991332236934, 'log_max_bin': 8.0, 'colsample_bytree': 0.7103230835995594, 'reg_alpha': 0.012993197803320033, 'reg_lambda': 7.0529810054461715}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 283.0, 'num_leaves': 171.0, 'min_child_samples': 128.0, 'learning_rate': 0.056885026855831654, 'subsample': 0.9152991332236934, 'log_max_bin': 8.0, 'colsample_bytree': 0.7103230835995594, 'reg_alpha': 0.012993197803320033, 'reg_lambda': 7.0529810054461715}}\n{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 363.0, 'num_leaves': 269.0, 'min_child_samples': 128.0, 'learning_rate': 0.13082160708847235, 'subsample': 0.820105567300051, 'log_max_bin': 10.0, 'colsample_bytree': 0.6819303877749074, 'reg_alpha': 0.03805198795768637, 'reg_lambda': 18.14103139151093}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 363.0, 'num_leaves': 269.0, 'min_child_samples': 128.0, 'learning_rate': 0.13082160708847235, 'subsample': 0.820105567300051, 'log_max_bin': 10.0, 'colsample_bytree': 0.6819303877749074, 'reg_alpha': 0.03805198795768637, 'reg_lambda': 18.14103139151093}}\n" + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 24, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'log_max_bin': 10, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 24, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'log_max_bin': 10, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 36, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 36, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20, 'num_leaves': 4, 'min_child_samples': 46, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.546062702473889}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20, 'num_leaves': 4, 'min_child_samples': 46, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.546062702473889}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20, 'num_leaves': 11, 'min_child_samples': 52, 'learning_rate': 1.0, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20, 'num_leaves': 11, 'min_child_samples': 52, 'learning_rate': 1.0, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 37, 'num_leaves': 15, 'min_child_samples': 93, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 37, 'num_leaves': 15, 'min_child_samples': 93, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 75, 'num_leaves': 32, 'min_child_samples': 83, 'learning_rate': 0.19997653978110663, 'subsample': 0.8895588746662894, 'log_max_bin': 7, 'colsample_bytree': 0.663557757490723, 'reg_alpha': 0.03147131714846291, 'reg_lambda': 0.38644069375879475}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 75, 'num_leaves': 32, 'min_child_samples': 83, 'learning_rate': 0.19997653978110663, 'subsample': 0.8895588746662894, 'log_max_bin': 7, 'colsample_bytree': 0.663557757490723, 'reg_alpha': 0.03147131714846291, 'reg_lambda': 0.38644069375879475}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 81, 'num_leaves': 66, 'min_child_samples': 93, 'learning_rate': 0.07560024606664352, 'subsample': 0.8756054034199897, 'log_max_bin': 7, 'colsample_bytree': 0.7142272555842307, 'reg_alpha': 0.00219854653612346, 'reg_lambda': 2.9360090402842274}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 81, 'num_leaves': 66, 'min_child_samples': 93, 'learning_rate': 0.07560024606664352, 'subsample': 0.8756054034199897, 'log_max_bin': 7, 'colsample_bytree': 0.7142272555842307, 'reg_alpha': 0.00219854653612346, 'reg_lambda': 2.9360090402842274}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 283, 'num_leaves': 171, 'min_child_samples': 128, 'learning_rate': 0.056885026855831654, 'subsample': 0.9152991332236934, 'log_max_bin': 8, 'colsample_bytree': 0.7103230835995594, 'reg_alpha': 0.012993197803320033, 'reg_lambda': 7.0529810054461715}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 283, 'num_leaves': 171, 'min_child_samples': 128, 'learning_rate': 0.056885026855831654, 'subsample': 0.9152991332236934, 'log_max_bin': 8, 'colsample_bytree': 0.7103230835995594, 'reg_alpha': 0.012993197803320033, 'reg_lambda': 7.0529810054461715}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 363, 'num_leaves': 269, 'min_child_samples': 128, 'learning_rate': 0.13082160708847235, 'subsample': 0.820105567300051, 'log_max_bin': 10, 'colsample_bytree': 0.6819303877749074, 'reg_alpha': 0.03805198795768637, 'reg_lambda': 18.14103139151093}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 363, 'num_leaves': 269, 'min_child_samples': 128, 'learning_rate': 0.13082160708847235, 'subsample': 0.820105567300051, 'log_max_bin': 10, 'colsample_bytree': 0.6819303877749074, 'reg_alpha': 0.03805198795768637, 'reg_lambda': 18.14103139151093}}\n" ] } ], @@ -390,7 +440,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 11, "metadata": { "slideshow": { "slide_type": "slide" @@ -398,15 +448,16 @@ }, "outputs": [ { - "output_type": "display_data", "data": { - "text/plain": "
", - "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAeFUlEQVR4nO3dfZwcVZ3v8c+XIYERCANmdJMhIVFilAdNdISLj8iKAVdJQETg3r2KVyO74rqyN0hcRRYvVzSKF183CwKLgAsEiDEEiEZWFBVQEggQEgwbIpJMogQhEHAkT7/9o6qx03T39CRT/VTf9+s1r+k6darqN0WoX59zqk4pIjAzs/zardEBmJlZYzkRmJnlnBOBmVnOORGYmeWcE4GZWc45EZiZ5ZwTgVkVkt4paWWj4zDLkhOBNS1Jj0t6byNjiIhfRMTErPYvaYqkn0vaJGmDpDslHZ/V8czKcSKwXJPU0cBjnwTcBFwDHAC8GjgX+OBO7EuS/P+z7RT/w7GWI2k3SedIekzSHyXdKGn/ovU3Sfq9pGfTb9uHFK27StIlkhZKegF4T9ry+N+SHkq3uUHSnmn9oyStLdq+Yt10/dmS1ktaJ+kTkkLSQWX+BgEXAV+JiCsi4tmI2B4Rd0bEJ9M650n696JtxqX72z1d/pmkCyTdBfwJmCFpSclxPidpQfp5D0nfkPSEpD9IulRS5y7+57A24ERgregzwDTg3cBo4BlgdtH6HwITgFcB9wPXlmx/GnABsA/wy7TsZOBYYDzwRuBjVY5ftq6kY4GzgPcCBwFHVdnHRGAMMLdKnVr8LTCd5G+5FJgoaULR+tOA69LPFwKvAyal8fWQtEAs55wIrBWdAfxzRKyNiBeB84CTCt+UI+LKiNhUtO5NkvYt2v7miLgr/Qb+57Ts2xGxLiKeBm4huVhWUqnuycB3I2J5RPwpPXYlr0x/r6/1j67gqvR4WyPiWeBm4FSANCG8HliQtkCmA5+LiKcjYhPwf4FTdvH41gacCKwVHQj8QNJGSRuBR4BtwKsldUi6MO02eg54PN1mZNH2a8rs8/dFn/8E7F3l+JXqji7Zd7njFPwx/T2qSp1alB7jOtJEQNIamJ8mpW7gFcB9ReftR2m55ZwTgbWiNcBxEdFV9LNnRPSRXPymknTP7AuMS7dR0fZZTbm7nmTQt2BMlborSf6OD1Wp8wLJxbvgr8rUKf1bbge6JU0iSQiFbqGngH7gkKJztm9EVEt4lhNOBNbshknas+hnd5K+8AskHQggqVvS1LT+PsCLJN+4X0HS/VEvNwKnS3qDpFcAX6pUMZL5388CviTpdEkj0kHwd0i6LK32APAuSWPTrq2ZAwUQEVtI7kSaBexPkhiIiO3A5cC3JL0KQFKPpCk7/dda23AisGa3kOSbbOHnPOBiYAHwY0mbgF8BR6T1rwF+B/QBK9J1dRERPwS+DfwUWFV07Bcr1J8LfAT4OLAO+APwf0j6+YmI24EbgIeA+4BbawzlOpIW0U0RsbWo/POFuNJus/8gGbS2nJNfTGOWDUlvAB4G9ii5IJs1FbcIzIaQpBPS+/X3A74G3OIkYM3OicBsaH0KeBJ4jOROpr9rbDhmA3PXkJlZzrlFYGaWc7s3OoDBGjlyZIwbN67RYZiZtZT77rvvqYgo+wBhyyWCcePGsWTJkoErmpnZSyT9rtI6dw2ZmeWcE4GZWc45EZiZ5ZwTgZlZzjkRmJnlXMvdNWRmljfzl/Yxa9FK1m3sZ3RXJzOmTGTa5J4h278TgVkLyfqCYM1n/tI+Zs5bRv+WbQD0bexn5rxlAEP2395dQ2YtonBB6NvYT/CXC8L8pX2NDs0yNGvRypeSQEH/lm3MWrRyyI7hFoFZi6h0QTh77kNcf+8TDYrKsta3sb9s+boK5TvDLQKzFlHpf/zN27bXORKrp+Ed5S/To7s6h+wYbhFYQ7nPu3ajuzrLfjvs6erkhk8d2YCIrB5KxwgAOod1MGPK0L1czi2CFjZ/aR9vv/AOxp9zG2+/8I6W6yt2n/fgzJgykc5hHTuUDfUFwZrPtMk9fPXEw+jp6kQkif+rJx42pF+YWu59BL29veFJ5yp/SxjqfyBZevuFd5T9hju8Yzcmj+1qQETN76nnX2T1hhcIkguCW1BWK0n3RURvuXXuGmpR7TBwWGkQzH3elY3cew9G7r0HUyf1cNoRYxsdjrUJJ4IW1Q4Dh8M7disbr/u8zerLiaBFtcPAYT0GwcxsYB4szkjWA7ntMHBYj0EwMxtYpi0CSccCFwMdwBURcWHJ+rHA1UBXWueciFiYZUz1UI9Hwgv7OXvuQ2zetr1lBw6nTe5puZjN2k1miUBSBzAbOAZYCyyWtCAiVhRV+yJwY0RcIulgYCEwLquY6qWeA7l7DEvusGmV7iAzaz5Zdg0dDqyKiNURsRmYA0wtqRPAiPTzvsC6DOOpm3oO5B48agRTJ/kbtZntvCy7hnqANUXLa4EjSuqcB/xY0meAvYD3ZhhP3bTDQK6Z5UejB4tPBa6KiAOA9wPfk/SymCRNl7RE0pINGzbUPcjBaoeBXDPLjywTQR8wpmj5gLSs2P8CbgSIiHuAPYGRpTuKiMsiojcieru7uzMKd+gU7oYpTBblu2HMrJll2TW0GJggaTxJAjgFOK2kzhPAXwNXSXoDSSJo/q/8NZg2ueelgWF3B5lZM8usRRARW4EzgUXAIyR3By2XdL6k49Nq/wR8UtKDwPXAx6LVJj8yM2txmT5HkD4TsLCk7NyizyuAt2cZg5mZVdfowWIzM2swJwIzs5xzIjAzyzknAjOznPM01DvB79k1s3biRDBI9ZhZ1MysnpwIBmkwM4uuWP8cB48agZlZM/MYwSANZmZRzwxqZq3ALYJB8syiZtZu3CIYJM8sambtxi2CQWqXV0SamRU4EewEzyxqZu3EXUNmZjnnFsEA/PCYmbU7J4Iqqj08ZmbWLtw1VEW1h8dWrH+uQVGZmQ0tJ4Iqqj085ofFzKxduGuoCj88ZmZ54BZBFX54zMzywC2CKvzwmJnlgRPBAPzwmJm1O3cNmZnlnBOBmVnOORGYmeWcE4GZWc45EZiZ5VymiUDSsZJWSlol6Zwy678l6YH051FJG7OMx8zMXi6z20cldQCzgWOAtcBiSQsiYkWhTkR8rqj+Z4DJWcUzGKUzju45bDdG7r1Ho8MyM8tEls8RHA6siojVAJLmAFOBFRXqnwp8OcN4alJuxtHd1OCgzMwylGXXUA+wpmh5bVr2MpIOBMYDd1RYP13SEklLNmzYMOSBFis34+j2gDVPl5+Azsys1TXLYPEpwNyI2FZuZURcFhG9EdHb3d2daSDVZhw1M2tHWSaCPmBM0fIBaVk5pwDXZxhLzUZ3dZYt76lQbmbW6rJMBIuBCZLGSxpOcrFfUFpJ0uuB/YB7MoylZp5x1MzyJrNEEBFbgTOBRcAjwI0RsVzS+ZKOL6p6CjAnIiKrWAZj2uQevnriYQzvSE5NT1cnXz3xMM84amZtS01y/a1Zb29vLFmyJPPjfOQ7SQPFM46aWTuQdF9E9JZb1yyDxWZm1iBOBGZmOedEYGaWc04EZmY550RgZpZzTgRmZjnnRGBmlnNOBGZmOedEYGaWc04EZmY550RgZpZzTgRmZjnnRGBmlnNOBGZmOedEYGaWc04EZmY550RgZpZzVROBpBGSXlum/I3ZhWRmZvVUMRFIOhn4DfB9ScslvbVo9VVZB2ZmZvVRrUXwBeAtETEJOB34nqQT0nXKPDIzM6uL3aus64iI9QARca+k9wC3ShoDtNYb783MrKJqiWCTpNdGxGMAEbFe0lHAfOCQegSXtflL+5i1aCXrNvYzuquTGVMmMm1yT6PDMjOrq2qJ4O8o6QKKiE2SjgVOzjSqOpi/tI+Z85bRv2UbAH0b+5k5bxmAk4GZ5UrFMYKIeBD4raSflpRviYhrM48sY7MWrXwpCRT0b9nG2XMf4iPfuYcV659rUGRmZvVV9fbRiNgGbJe0b53iqZt1G/vLlm/eth2Ag0eNYOoktwzMrP1V6xoqeB5YJul24IVCYUT8w0Abpt1IFwMdwBURcWGZOicD55EMQD8YEafVFvquGd3VSV+ZZNDT1ckNnzqyHiGYmTWFWhLBvPRnUCR1ALOBY4C1wGJJCyJiRVGdCcBM4O0R8YykVw32ODtrxpSJO4wRAHQO62DGlIn1CsHMrCkMmAgi4uqd3PfhwKqIWA0gaQ4wFVhRVOeTwOyIeCY91pM7eaxBKwwInz33ITZv206P7xoys5yqpUWws3qANUXLa4EjSuq8DkDSXSTdR+dFxI9KdyRpOjAdYOzYsUMW4LTJPVx/7xMA7g4ys9xq9KRzuwMTgKOAU4HLJXWVVoqIyyKiNyJ6u7u76xyimVl7yzIR9AFjipYPSMuKrQUWpLek/hZ4lCQxmJlZnQzYNSTpdcAM4MDi+hFx9ACbLgYmSBpPkgBOAUrvCJpP0hL4rqSRJF1Fq2uO3szMdlktYwQ3AZcClwPbBqj7kojYKulMYBFJ//+VEbFc0vnAkohYkK57n6QV6b5nRMQfB/tHmJnZzqslEWyNiEt2ZucRsRBYWFJ2btHnAM5Kf8zMrAFqGSO4RdLfSxolaf/CT+aRmZlZXdTSIvho+ntGUVkArxn6cMzMrN5qeaBsfD0CMTOzxqjlrqFhJFNSvyst+hnwnYjYkmFcZmZWJ7V0DV0CDAP+NV3+27TsE1kFZWZm9VNLInhrRLypaPkOSQ9mFZCZmdVXLXcNbZP02sKCpNcwiOcJzMysudXSIpgB/FTSapJXVx4InJ5pVGZmVje13DX0k/S9AYWJ+ldGxIvZhmVmZvVSMRFIOjoi7pB0YsmqgyQREYN+WY2ZmTWfai2CdwN3AB8ssy7YibeWmZlZ86mYCCLiy+nH89Mpol+SzihqZmZtoJa7hr5fpmzuUAdiZmaNUW2M4PXAIcC+JeMEI4A9sw7MzMzqo9oYwUTgA0AXO44TbCJ56byZmbWBamMENwM3SzoyIu6pY0xmZlZHtTxQtlTSp0m6iV7qEoqIj2cWlZmZ1U0tg8XfA/4KmALcSfIS+k1ZBmVmZvVTSyI4KCK+BLwQEVcDfwMckW1YZmZWL7UkgsJ7BzZKOhTYF3hVdiGZmVk91TJGcJmk/YAvAQuAvYFzq29iZmatopZJ565IP96J31NsZtZ2qj1Qdla1DSPioqEPx8zM6q1ai2Cf9PdE4K0k3UKQPFx2b5ZBmZlZ/VR7oOxfACT9HHhzRGxKl88DbqtLdGZmlrla7hp6NbC5aHlzWmZmZm2glkRwDXCvpPPS1sCvgatq2bmkYyWtlLRK0jll1n9M0gZJD6Q/nxhM8GZmtutquWvoAkk/BN6ZFp0eEUsH2k5SBzAbOAZYCyyWtCAiVpRUvSEizhxk3GZmNkSq3TU0IiKek7Q/8Hj6U1i3f0Q8PcC+DwdWRcTqdJs5wFSgNBGYmVkDVWsRXEcyDfV9JK+mLFC6PNAzBT3AmqLltZSfmuJDkt4FPAp8LiLWlFaQNB2YDjB27NgBDmtmZoNRcYwgIj6Q/h4fEa8p+hkfEUP1YNktwLiIeCNwO3B1hVgui4jeiOjt7u4eokObmRlU7xp6c7UNI+L+AfbdB4wpWj4gLSvexx+LFq8Avj7APs3MbIhV6xr6ZpV1ARw9wL4XAxPSF933AacApxVXkDQqItani8cDjwywTzMzG2LVHih7z67sOCK2SjoTWAR0AFdGxHJJ5wNLImIB8A+Sjge2Ak8DH9uVY5qZ2eDVMvso6fTTB7PjG8quGWi7iFgILCwpO7fo80xgZq3BmpnZ0BswEUj6MnAUSSJYCBwH/JLkQTMzM2txtTxZfBLw18DvI+J04E0kL6cxM7M2UEsi6I+I7cBWSSOAJ9nxbiAzM2thtYwRLJHUBVxO8nDZ88A9mUZlZmZ1U+05gtnAdRHx92nRpZJ+BIyIiIfqEp2ZmWWuWovgUeAbkkYBNwLX1zLZnJmZtZZqU0xcHBFHAu8G/ghcKek3kr4s6XV1i9DMzDI14GBxRPwuIr4WEZOBU4Fp+AlgM7O2MWAikLS7pA9Kuhb4IbASODHzyMzMrC6qDRYfQ9ICeD/Jy+rnANMj4oU6xZaZ+Uv7mLVoJes29jOsYzfG7N/Z6JDMzBqmWotgJnA38IaIOD4irmuXJDBz3jL6NvYTwOZt2/ntUy8wf2nfgNuambWjaoPFR0fEFRHxTD0DytqsRSvp37Jth7LtkZSbmeVRLU8Wt5V1G/sHVW5m1u5ylwhGd5UfD6hUbmbW7nKXCGZMmUjnsI4dyjqHdTBjysQGRWRm1lg1vY+gnUyb3APA2XMfYvO27fR0dTJjysSXys3M8iZ3iQCSZHD9vU8AcMOnjmxwNGZmjZW7riEzM9uRE4GZWc45EZiZ5ZwTgZlZzjkRmJnlnBOBmVnOORGYmeWcE4GZWc5lmggkHStppaRVks6pUu9DkkJSb5bxmJnZy2WWCCR1ALOB44CDgVMlHVym3j7AZ4FfZxWLmZlVlmWL4HBgVUSsjojNJG84m1qm3leArwF/zjAWMzOrIMtE0AOsKVpem5a9RNKbgTERcVu1HUmaLmmJpCUbNmwY+kjNzHKsYYPFknYDLgL+aaC6EXFZRPRGRG93d3f2wZmZ5UiWiaAPGFO0fEBaVrAPcCjwM0mPA/8NWOABYzOz+soyESwGJkgaL2k4cAqwoLAyIp6NiJERMS4ixgG/Ao6PiCUZxmRmZiUySwQRsRU4E1gEPALcGBHLJZ0v6fisjmtmZoOT6YtpImIhsLCk7NwKdY/KMhYzMyvPTxabmeWcE4GZWc45EZiZ5ZwTgZlZzjkRmJnlnBOBmVnOORGYmeWcE4GZWc45EZiZ5ZwTgZlZzjkRmJnlnBOBmVnOORGYmeWcE4GZWc45EZiZ5ZwTgZlZzjkRmJnlnBOBmVnOORGYmeWcE4GZWc45EZiZ5ZwTgZlZzjkRmJnlnBOBmVnOORGYmeWcE4GZWc5lmggkHStppaRVks4ps/4MScskPSDpl5IOzjIeMzN7ucwSgaQOYDZwHHAwcGqZC/11EXFYREwCvg5clFU8ZmZWXpYtgsOBVRGxOiI2A3OAqcUVIuK5osW9gMgwHjMzK2P3DPfdA6wpWl4LHFFaSdKngbOA4cDR5XYkaTowHWDs2LFDHqiZWZ41fLA4ImZHxGuBzwNfrFDnsojojYje7u7u+gZoZtbmskwEfcCYouUD0rJK5gDTMozHzMzKyDIRLAYmSBovaThwCrCguIKkCUWLfwP8Z4bxmJlZGZmNEUTEVklnAouADuDKiFgu6XxgSUQsAM6U9F5gC/AM8NGs4jEzs/KyHCwmIhYCC0vKzi36/Nksj29mZgNr+GCxmZk1lhOBmVnOORGYmeWcE4GZWc5lOljcLOYv7WPWopWs29jP6K5OZkyZ2OiQzMyaRtu3COYv7WPmvGX0bewngL6N/cyct4ynnn+x0aGZmTWFtk8EsxatpH/Lth3K+rdsY/WGFxoUkZlZc2n7RLBuY3/Z8gCmTuqpbzBmZk2o7RPB6K7OsuU9XZ2cdoRnMjUza/tEMGPKRDqHdexQ1jmswwPGZmaptr9raNrkpPun9K6hQrmZWd61fSKAJBn4wm9mVl7bdw2ZmVl1TgRmZjnnRGBmlnNOBGZmOedEYGaWc4qIRscwKJI2AL8b5GYjgacyCCcrrRYvtF7MrRYvtF7MrRYvtF7Mg4n3wIjoLrei5RLBzpC0JCJ6Gx1HrVotXmi9mFstXmi9mFstXmi9mIcqXncNmZnlnBOBmVnO5SURXNboAAap1eKF1ou51eKF1ou51eKF1ot5SOLNxRiBmZlVlpcWgZmZVeBEYGaWc22dCCQdK2mlpFWSzml0PLWQ9LikZZIekLSk0fGUI+lKSU9KeriobH9Jt0v6z/T3fo2MsViFeM+T1Jee5wckvb+RMRaTNEbSTyWtkLRc0mfT8mY+x5VibsrzLGlPSfdKejCN91/S8vGSfp1eM26QNLzRsRZUifkqSb8tOseTBr3vdh0jkNQBPAocA6wFFgOnRsSKhgY2AEmPA70R0bQPtUh6F/A8cE1EHJqWfR14OiIuTJPufhHx+UbGWVAh3vOA5yPiG42MrRxJo4BREXG/pH2A+4BpwMdo3nNcKeaTacLzLEnAXhHxvKRhwC+BzwJnAfMiYo6kS4EHI+KSRsZaUCXmM4BbI2Luzu67nVsEhwOrImJ1RGwG5gBTGxxTW4iInwNPlxRPBa5OP19NchFoChXibVoRsT4i7k8/bwIeAXpo7nNcKeamFInn08Vh6U8ARwOFC2qzneNKMe+ydk4EPcCaouW1NPE/zCIB/FjSfZKmNzqYQXh1RKxPP/8eeHUjg6nRmZIeSruOmqabpZikccBk4Ne0yDkuiRma9DxL6pD0APAkcDvwGLAxIramVZrumlEac0QUzvEF6Tn+lqQ9Brvfdk4EreodEfFm4Djg02m3RkuJpL+x2fscLwFeC0wC1gPfbGw4Lydpb+D7wD9GxHPF65r1HJeJuWnPc0Rsi4hJwAEkPQivb3BIAyqNWdKhwEyS2N8K7A8MuruwnRNBHzCmaPmAtKypRURf+vtJ4Ack/0BbwR/SfuJCf/GTDY6nqoj4Q/o/1XbgcprsPKd9wN8Hro2IeWlxU5/jcjE3+3kGiIiNwE+BI4EuSYVX+DbtNaMo5mPTbrmIiBeB77IT57idE8FiYEJ6F8Bw4BRgQYNjqkrSXulAG5L2At4HPFx9q6axAPho+vmjwM0NjGVAhQtq6gSa6Dyng4L/BjwSERcVrWrac1wp5mY9z5K6JXWlnztJbip5hOTielJardnOcbmYf1P05UAkYxqDPsdte9cQQHqr2v8DOoArI+KCBodUlaTXkLQCAHYHrmvGmCVdDxxFMgXuH4AvA/OBG4GxJNOEnxwRTTFAWyHeo0i6KwJ4HPhUUf97Q0l6B/ALYBmwPS3+Akmfe7Oe40oxn0oTnmdJbyQZDO4g+UJ8Y0Scn/4/OIeki2Up8D/Sb9oNVyXmO4BuQMADwBlFg8q17budE4GZmQ2snbuGzMysBk4EZmY550RgZpZzTgRmZjnnRGBmlnNOBNZU0kfk/7FoeZGkK4qWvynprCrbXyXppPTzzyS97MXekoZJujCdxfN+SfdIOi5d97ikkTsR90vHrbB+djoz5ApJ/UUzRZ4kaWHh/vChJGmUpFurrB8u6edFD1BZTjkRWLO5C3gbgKTdSO79P6Ro/duAu3fxGF8BRgGHptN5TAP22cV9VhURn06nBng/8FhETEp/5kbE+9MnRYfaWSRP81aKaTPwE+AjGRzbWogTgTWbu0ke9YckATwMbJK0XzqZ1huA+yWdK2mxpIclXZY+VTkgSa8APgl8pvCgUDoNwo1l6p6V7v/hklbK/0wn+HpQ0vfKbPeVtIXQUWNMj0saKWmcpN+k2z4q6VpJ75V0V9p6OTytv1c6gdu9kpZKqjSr7oeAH6XbHJLWfyCNfUJaZz7w32uJ09qXm4TWVCJinaStksaSfPu/h2QGyCOBZ4FlEbFZ0v+PiPMB0ovxB4BbajjEQcATpZO4lZL0FuB04AiSJzZ/LelOYDPwReBtEfGUpP1LtptF0ro4PXbuac2DgA8DHyeZJuU04B3A8SRP6k4D/hm4IyI+nnYp3SvpPyLihaI4xgPPFD0VewZwcURcm065UkhSD5NMVmY55haBNaO7SZJAIRHcU7R8V1rnPUreJLWMZA75Q8rtaBe8A/hBRLyQPq4/D3hneqybCi8OKpni4UvAvhFxxk4mAYDfRsSydJK25cBP0n0tA8aldd4HnKNkOuKfAXuSTDtRbBSwoWj5HuALkj4PHBgR/Wn824DNhTmuLJ+cCKwZFcYJDiP5xvorkhbB24C7Je0J/CtwUkQcRtIPvmeN+14FjJU0YsijTr7Bv6W0lTBIxfPabC9a3s5fWvACPlQ0zjA2Ih4p2U8/ReckIq4jaVX0AwslHV1Udw/gz7sQs7U4JwJrRneTdPU8nU5h/DTQRZIM7uYvF7inlMx/X/FunVIR8SeSWTIvTrtICrM6frik6i+AaZJeoWQm2BPSsjuAD0t6Zbpt8UX/R8CFwG0Zf8NeBHymMC4iaXKZOo/ylxZEYULD1RHxbZIZNd+Ylr8SeCoitmQYrzU5JwJrRstI7hb6VUnZsxHxVHqHzeUkrYVFJN/EB+OLJN0mK5S80P5WoPTFL/cDVwH3ksz6eUVELI2I5cAFwJ2SHgQuKtnupjS2BUqmCs7CV0heU/iQpOXp8g7S8YLHJB2UFp0MPJx2Jx0KXJOWvwe4LaM4rUV49lGzNiXpBOAtEfHFKnXmAedExKP1i8yaje8aMmtTEfGDQhdWOWnX2HwnAXOLwMws5zxGYGaWc04EZmY550RgZpZzTgRmZjnnRGBmlnP/BYVgrdbKgULSAAAAAElFTkSuQmCC\n" + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAdDUlEQVR4nO3df5gcVZ3v8feHIcAohAEzssmQkAgxGkATHeHiT2DFAKskICJwn72KV4O74rqyN0hcRRYvVzSKF5+bRYFlAZffMcao0ciKogJCAoGEBMPGiCSTCOFHIOBIksn3/lE1oWm6e3oyU90zVZ/X8/QzXadOVX0rDf3tc07VKUUEZmZWXLs1OwAzM2suJwIzs4JzIjAzKzgnAjOzgnMiMDMrOCcCM7OCcyIwq0HSuyStbnYcZllyIrAhS9Kjkt7bzBgi4tcRMSmr/UuaJulXkrZI2iTpDkknZXU8s0qcCKzQJLU08dinArcC1wEHAgcAFwAf2IV9SZL/f7Zd4v9wbNiRtJuk8yX9XtJTkm6RtH/J+lsl/UnSs+mv7UNL1l0j6XJJiyS9AByTtjz+l6Tl6TY3S9orrX+0pPUl21etm64/T9JGSRskfVxSSDqkwjkIuBT4ckRcFRHPRsSOiLgjIj6R1rlQ0n+UbDM+3d/u6fIvJV0s6U7gz8AsSUvLjvNZSQvT93tK+rqkxyQ9LunbkloH+HFYDjgR2HD0aWAG8B5gDPAMMLdk/U+AicBrgfuB68u2PxO4GNgH+E1adhpwPDABeBPw0RrHr1hX0vHAucB7gUOAo2vsYxIwFphXo049/haYSXIu3wYmSZpYsv5M4Ib0/SXA64EpaXwdJC0QKzgnAhuOPgn8c0Ssj4gXgQuBU3t/KUfE1RGxpWTdmyXtW7L9DyLizvQX+F/Ssm9FxIaIeBr4IcmXZTXV6p4G/HtErIyIP6fHruY16d+N9Z50Fdekx9seEc8CPwDOAEgTwhuAhWkLZCbw2Yh4OiK2AP8HOH2Ax7cccCKw4egg4PuSNkvaDDwM9AAHSGqRdEnabfQc8Gi6zaiS7ddV2OefSt7/Gdi7xvGr1R1Ttu9Kx+n1VPp3dI069Sg/xg2kiYCkNbAgTUrtwKuA+0r+3X6allvBORHYcLQOOCEi2kpee0VEF8mX33SS7pl9gfHpNirZPqspdzeSDPr2Gluj7mqS8/hgjTovkHx59/qrCnXKz+U2oF3SFJKE0Nst9CTQDRxa8m+2b0TUSnhWEE4ENtSNkLRXyWt3kr7wiyUdBCCpXdL0tP4+wIskv7hfRdL90Si3AGdJeqOkVwFfrFYxkvnfzwW+KOksSSPTQfB3SroirfYA8G5J49Kurdl9BRAR20iuRJoD7E+SGIiIHcCVwDclvRZAUoekabt8tpYbTgQ21C0i+SXb+7oQuAxYCPxM0hbgt8CRaf3rgD8CXcCqdF1DRMRPgG8BvwDWlBz7xSr15wEfBj4GbAAeB/43ST8/EXEbcDOwHLgP+FGdodxA0iK6NSK2l5R/rjeutNvsP0kGra3g5AfTmGVD0huBh4A9y76QzYYUtwjMBpGkk9Pr9fcDvgr80EnAhjonArPBdTbwBPB7kiuZ/q654Zj1zV1DZmYF5xaBmVnB7d7sAPpr1KhRMX78+GaHYWY2rNx3331PRkTFGwiHXSIYP348S5cu7buimZntJOmP1da5a8jMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzght1VQ2ZmRbNgWRdzFq9mw+ZuxrS1MmvaJGZM7Ri0/TsRmA1zWX9JWHMtWNbF7Pkr6N7WA0DX5m5mz18BMGifs7uGzIax3i+Jrs3dBC99SSxY1tXs0GyQzFm8emcS6NW9rYc5i1cP2jHcIjAbxqp9SZw3bzk33vtYk6KywdS1ubti+YYq5bvCLQKzYazal8HWnh0NjsSyskdL5a/pMW2tg3YMtwhsyHLfd9/GtLVW/MXY0dbKzWcf1YSIbLCVjxEAtI5oYda0wXu4nFsEObVgWRfvuOR2Jpz/Y95xye3Drs/Yfd/1mTVtEq0jWl5WNthfEtZcM6Z28JVTDqejrRWRJPmvnHL4oP4oGnbPI+js7AxPOldbtV8Qg/0fT5beccntFX/p7tGyG1PHtTUhoqHryedfZO2mFwiSLwm3nKwSSfdFRGelde4ayqE8DCBWGyBz3/crjdp7T0btvSfTp3Rw5pHjmh2ODUNOBDmUhwHEPVp2qxiv+77NBp8TQQ7lYQCxEQNkZpbwYHGDNHLwNg8DiI0YIDOzRKYtAknHA5cBLcBVEXFJ2fpxwLVAW1rn/IhYlGVMzdCIW8RL9e7zvHnL2dqzY9gOIM6Y2jHsYjYbjjJLBJJagLnAccB6YImkhRGxqqTaF4BbIuJySZOBRcD4rGJqlmYN3u45IrnCZrh0B5lZc2TZNXQEsCYi1kbEVuAmYHpZnQBGpu/3BTZkGE/TNGvwdvLokUyf4l/UZlZbll1DHcC6kuX1wJFldS4Efibp08CrgfdmGE/T5GHw1szyq9mDxWcA10TEgcCJwHclvSImSTMlLZW0dNOmTQ0PcqDyMHhrZvmVZSLoAsaWLB+YlpX6n8AtABFxN7AXMKp8RxFxRUR0RkRne3t7RuFmp/cKmN7Jo3wFjJkNJVl2DS0BJkqaQJIATgfOLKvzGPDXwDWS3kiSCIbfT/46zJjasXNg2N1BZjaUZNYiiIjtwDnAYuBhkquDVkq6SNJJabV/Aj4h6UHgRuCjMdwmPzIzG+YyvY8gvSdgUVnZBSXvVwHvyDIGMzOrrdmDxWZm1mROBGZmBedEYGZWcE4EZmYF52moB8jP1TWz4c6JYAAaPauomVkWnAgGoL+ziq7a+ByTR498RbmZWTN5jGAA+jurqGcDNbOhyC2CAfCsomaWB24RDIBnFTWzPHCLYADy8khIMys2J4IB8qyiZjbcuWvIzKzg3CKok28cM7O8ciKog28cM7M8c9dQHWrdOPbh79zNqo3PNSkyM7OBcyKoQ183jvlGMTMbztw1VAffOGZmeeYWQR1845iZ5ZlbBHXwjWNmlmdOBHXyjWNmllfuGjIzKzgnAjOzgnMiMDMrOCcCM7OCcyIwMyu4TBOBpOMlrZa0RtL5FdZ/U9ID6esRSZuzjMfMzF4ps8tHJbUAc4HjgPXAEkkLI2JVb52I+GxJ/U8DU7OKZ1eUzzi614jdGLX3ns0Oy8xsUGV5H8ERwJqIWAsg6SZgOrCqSv0zgC9lGE+/VJpxdDc1OSgzswxk2TXUAawrWV6flr2CpIOACcDtVdbPlLRU0tJNmzYNeqCVVJpxdEfAuqcrT0BnZjZcDZXB4tOBeRHRU2llRFwREZ0R0dne3t6QgPqacdTMLC+yTARdwNiS5QPTskpOB27MMJZ+G9PWWrG8o0q5mdlwlWUiWAJMlDRB0h4kX/YLyytJegOwH3B3hrH0m2ccNbOiyCwRRMR24BxgMfAwcEtErJR0kaSTSqqeDtwUEZFVLLtixtQOvnLK4ezRkvwTdbS18pVTDveMo2aWOxpi37996uzsjKVLlzbseB/+TtJQ8YyjZjacSbovIjorrRsqg8VmZtYkTgRmZgXnRGBmVnBOBGZmBedEYGZWcE4EZmYF50RgZlZwTgRmZgXnRGBmVnBOBGZmBedEYGZWcE4EZmYF50RgZlZwTgRmZgXnRGBmVnBOBGZmBedEYGZWcDUTgaSRkg6uUP6m7EIyM7NGqpoIJJ0G/A74nqSVkt5WsvqarAMzM7PGqNUi+Dzw1oiYApwFfFfSyek6ZR6ZmZk1xO411rVExEaAiLhX0jHAjySNBYbXE+/NzKyqWolgi6SDI+L3ABGxUdLRwALg0EYE1wgLlnUxZ/FqNmzuZkxbK7OmTWLG1I5mh2Vm1jC1EsHfUdYFFBFbJB0PnJZpVA2yYFkXs+evoHtbDwBdm7uZPX8FgJOBmRVG1UQQEQ9KapH0i4g4pqR8G3B9Q6LL2JzFq3cmgV7d23o4b95ybrz3MQBWbXyOyaNHNiM8M7OGqHn5aET0ADsk7dugeBpqw+buiuVbe3bsfD959EimT3HrwMzyq1bXUK/ngRWSbgNe6C2MiH/oa8O0G+kyoAW4KiIuqVDnNOBCkgHoByPizPpCH7gxba10VUgGHW2t3Hz2UY0Kw8ysqepJBPPTV79IagHmAscB64ElkhZGxKqSOhOB2cA7IuIZSa/t73EGYta0SS8bIwBoHdHCrGmTGhmGmVlT9ZkIIuLaXdz3EcCaiFgLIOkmYDqwqqTOJ4C5EfFMeqwndvFYu6R3QPi8ecvZ2rODDl81ZGYFVE+LYFd1AOtKltcDR5bVeT2ApDtJuo8ujIiflu9I0kxgJsC4ceMGNcgZUzt2Dgy7O8jMiqjZk87tDkwEjgbOAK6U1FZeKSKuiIjOiOhsb29vcIhmZvmWZSLoAsaWLB+YlpVaDyyMiG0R8QfgEZLEYGZmDdJn15Ck1wOzgINK60fEsX1sugSYKGkCSQI4HSi/ImgBSUvg3yWNIukqWlt39GZmNmD1jBHcCnwbuBLo6aPuThGxXdI5wGKS/v+rI2KlpIuApRGxMF33Pkmr0n3Pioin+nsSZma26+pJBNsj4vJd2XlELAIWlZVdUPI+gHPTl5mZNUE9YwQ/lPT3kkZL2r/3lXlkZmbWEPW0CD6S/p1VUhbA6wY/HDMza7R6biib0IhAzMysOeq5amgEyZTU706Lfgl8J52F1MzMhrl6uoYuB0YA/5ou/21a9vGsgjIzs8apJxG8LSLeXLJ8u6QHswrIzMwaq56rhnokHdy7IOl19ON+AjMzG9rqaRHMAn4haS3JoysPAs7KNCozM2uYeq4a+nn63IDeSfpXR8SL2YZlZmaNUjURSDo2Im6XdErZqkMkERH9fliNmZkNPbVaBO8Bbgc+UGFdsAtPLTMzs6GnaiKIiC+lby9Kp4jeKZ1R1MzMcqCeq4a+V6Fs3mAHYmZmzVFrjOANwKHAvmXjBCOBvbIOzMzMGqPWGMEk4P1AGy8fJ9hC8tB5MzPLgVpjBD8AfiDpqIi4u4ExmZlZA9VzQ9kySZ8i6Sba2SUUER/LLCozM2uYegaLvwv8FTANuIPkIfRbsgzKzMwap55EcEhEfBF4ISKuBf4GODLbsMzMrFHqSQS9zx3YLOkwYF/gtdmFZGZmjVTPGMEVkvYDvggsBPYGLqi9iZmZDRf1TDp3Vfr2DvycYjOz3Kl1Q9m5tTaMiEsHPxwzM2u0Wi2CfdK/k4C3kXQLQXJz2b1ZBmVmZo1T64ayfwGQ9CvgLRGxJV2+EPhxQ6IzM7PM1XPV0AHA1pLlrWmZmZnlQD2J4DrgXkkXpq2Be4Br6tm5pOMlrZa0RtL5FdZ/VNImSQ+kr4/3J3gzMxu4eq4auljST4B3pUVnRcSyvraT1ALMBY4D1gNLJC2MiFVlVW+OiHP6GbeZmQ2SWlcNjYyI5yTtDzyavnrX7R8RT/ex7yOANRGxNt3mJmA6UJ4IzMysiWq1CG4gmYb6PpJHU/ZSutzXPQUdwLqS5fVUnprig5LeDTwCfDYi1pVXkDQTmAkwbty4Pg5rZmb9UXWMICLen/6dEBGvK3lNiIjBurHsh8D4iHgTcBtwbZVYroiIzojobG9vH6RDm5kZ1O4aekutDSPi/j723QWMLVk+MC0r3cdTJYtXAV/rY59mZjbIanUNfaPGugCO7WPfS4CJ6YPuu4DTgTNLK0gaHREb08WTgIf72KeZmQ2yWjeUHTOQHUfEdknnAIuBFuDqiFgp6SJgaUQsBP5B0knAduBp4KMDOaaZmfVfPbOPkk4/PZmXP6Hsur62i4hFwKKysgtK3s8GZtcbrJmZDb4+E4GkLwFHkySCRcAJwG9IbjQzM7Nhrp47i08F/hr4U0ScBbyZ5OE0ZmaWA/Ukgu6I2AFslzQSeIKXXw1kZmbDWD1jBEsltQFXktxc9jxwd6ZRmZlZw9S6j2AucENE/H1a9G1JPwVGRsTyhkRnZmaZq9UieAT4uqTRwC3AjfVMNmdmZsNLrSkmLouIo4D3AE8BV0v6naQvSXp9wyI0M7NM9TlYHBF/jIivRsRU4AxgBr4D2MwsN/pMBJJ2l/QBSdcDPwFWA6dkHpmZmTVErcHi40haACeSPKz+JmBmRLzQoNgytWBZF3MWr2bD5m5GtOzG2P1bmx2SmVlT1GoRzAbuAt4YESdFxA15SgKz56+ga3M3AWzt2cEfnnyBBcu6+tzWzCxvag0WHxsRV0XEM40MqBHmLF5N97ael5XtiKTczKxo6rmzOHc2bO7uV7mZWZ4VMhGMaas8HlCt3MwszwqZCGZNm0TriJaXlbWOaGHWtElNisjMrHnqeh5B3syY2gHAefOWs7VnBx1trcyaNmlnuZlZkRQyEUCSDG689zEAbj77qCZHY2bWPIXsGjIzs5c4EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFVymiUDS8ZJWS1oj6fwa9T4oKSR1ZhmPmZm9UmaJQFILMBc4AZgMnCFpcoV6+wCfAe7JKhYzM6suyxbBEcCaiFgbEVtJnnA2vUK9LwNfBf6SYSxmZlZFlomgA1hXsrw+LdtJ0luAsRHx41o7kjRT0lJJSzdt2jT4kZqZFVjTBosl7QZcCvxTX3Uj4oqI6IyIzvb29uyDMzMrkCwTQRcwtmT5wLSs1z7AYcAvJT0K/DdgoQeMzcwaK8tEsASYKGmCpD2A04GFvSsj4tmIGBUR4yNiPPBb4KSIWJphTGZmViazRBAR24FzgMXAw8AtEbFS0kWSTsrquGZm1j+ZPpgmIhYBi8rKLqhS9+gsYzEzs8p8Z7GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFVymiUDS8ZJWS1oj6fwK6z8paYWkByT9RtLkLOMxM7NXyiwRSGoB5gInAJOBMyp80d8QEYdHxBTga8ClWcVjZmaVZdkiOAJYExFrI2IrcBMwvbRCRDxXsvhqIDKMx8zMKtg9w313AOtKltcDR5ZXkvQp4FxgD+DYSjuSNBOYCTBu3LhBD9TMrMiaPlgcEXMj4mDgc8AXqtS5IiI6I6Kzvb29sQGameVclomgCxhbsnxgWlbNTcCMDOMxM7MKskwES4CJkiZI2gM4HVhYWkHSxJLFvwH+K8N4zMysgszGCCJiu6RzgMVAC3B1RKyUdBGwNCIWAudIei+wDXgG+EhW8ZiZWWVZDhYTEYuARWVlF5S8/0yWxzczs741fbDYzMyay4nAzKzgnAjMzArOicDMrOAyHSweKhYs62LO4tVs2NzNmLZWZk2bxIypHc0Oy8xsSMh9IliwrIvZ81fQva0HgK7N3cyev6LJUZmZDR257xqas3j1ziTQq3tbD+fNW86qjc9V2crMrDhynwg2bO6uWL61ZweTR49k+hR3EZlZseW+a2hMWytdFZJBR1srN599VBMiMjMbWnLfIpg1bRKtI1peVtY6ooVZ0yY1KSIzs6El9y2C3quDfNWQmVlluU8EkCQDf/GbmVWW+64hMzOrzYnAzKzgnAjMzArOicDMrOCcCMzMCk4R0ewY+kXSJuCPfVQbBTzZgHCGEp9zMRTtnIt2vpDdOR8UEe2VVgy7RFAPSUsjorPZcTSSz7kYinbORTtfaM45u2vIzKzgnAjMzAour4ngimYH0AQ+52Io2jkX7XyhCeecyzECMzOrX15bBGZmVicnAjOzgstdIpB0vKTVktZIOr/Z8TSCpEclrZD0gKSlzY4nC5KulvSEpIdKyvaXdJuk/0r/7tfMGAdTlfO9UFJX+jk/IOnEZsY42CSNlfQLSaskrZT0mbQ8z59ztXNu6GedqzECSS3AI8BxwHpgCXBGRKxqamAZk/Qo0BkRub3xRtK7geeB6yLisLTsa8DTEXFJmvT3i4jPNTPOwVLlfC8Eno+IrzcztqxIGg2Mjoj7Je0D3AfMAD5Kfj/naud8Gg38rPPWIjgCWBMRayNiK3ATML3JMdkgiIhfAU+XFU8Hrk3fX0vyP1AuVDnfXIuIjRFxf/p+C/Aw0EG+P+dq59xQeUsEHcC6kuX1NOEftQkC+Jmk+yTNbHYwDXRARGxM3/8JOKCZwTTIOZKWp11HuekiKSdpPDAVuIeCfM5l5wwN/KzzlgiK6p0R8RbgBOBTabdCoUTSx5mffs7KLgcOBqYAG4FvNDecbEjaG/ge8I8R8Vzpurx+zhXOuaGfdd4SQRcwtmT5wLQs1yKiK/37BPB9ki6yIng87WPt7Wt9osnxZCoiHo+InojYAVxJDj9nSSNIvhCvj4j5aXGuP+dK59zozzpviWAJMFHSBEl7AKcDC5scU6YkvTodZELSq4H3AQ/V3io3FgIfSd9/BPhBE2PJXO+XYepkcvY5SxLwb8DDEXFpyarcfs7VzrnRn3WurhoCSC+z+r9AC3B1RFzc5JAyJel1JK0AgN2BG/J4zpJuBI4mmaL3ceBLwALgFmAcydTkp0VELgZYq5zv0SRdBQE8Cpxd0nc+7El6J/BrYAWwIy3+PEmfeV4/52rnfAYN/KxzlwjMzKx/8tY1ZGZm/eREYGZWcE4EZmYF50RgZlZwTgRmZgXnRGBDiqRvSvrHkuXFkq4qWf6GpHNrbH+NpFPT97+U9IqHgEsaIemSdDbL+yXdLemEdN2jkkbtQtw7j1tl/dx0FslVkrpLZpU8VdIiSW39PWYdMY2W9KMa6/eQ9CtJuw/2sW14cSKwoeZO4O0AknYjuY7+0JL1bwfuGuAxvgyMBg5Lp+aYAewzwH3WFBGfiogpwInA7yNiSvqaFxEnRsTmDA57LsldqdVi2gr8HPhwBse2YcSJwIaau4Cj0veHktxRuUXSfpL2BN4I3C/pAklLJD0k6Yr0Ds0+SXoV8Ang0xHxIuy8nf+WCnXPTff/UFkr5X+kk4E9KOm7Fbb7ctpCaKkzpkcljZI0XtLv0m0fkXS9pPdKujNtvRyR1n91OhHZvZKWSao2w+4HgZ+m2xya1n8gjX1iWmcB8N/ridPyy01CG1IiYoOk7ZLGkfz6v5tkBtmjgGeBFRGxVdL/i4iLANIv4/cDP6zjEIcAj5VPZlZO0luBs4AjAQH3SLoD2Ap8AXh7RDwpaf+y7eaQtC7Oil27W/MQ4EPAx0imTDkTeCdwEskdpzOAfwZuj4iPpV1K90r6z4h4oSSOCcAzvckO+CRwWURcn06/0pukHgLetgtxWo64RWBD0V0kSaA3EdxdsnxnWucYSfdIWgEcy8u7jwbDO4HvR8QLEfE8MB94V3qsW3sfAlQ21cEXgX0j4pO7mAQA/hARK9LJxlYCP0/3tQIYn9Z5H3C+pAeAXwJ7kUy/UGo0sKlk+W7g85I+BxwUEd1p/D3A1t75qqyYnAhsKOodJzic5Bfrb0laBG8H7pK0F/CvwKkRcThJP/hede57DTBO0shBjzr5Bf/W8lZCP71Y8n5HyfIOXmrBC/hgyTjDuIh4uGw/3ZT8m0TEDSStim5gkaRjS+ruCfxlADHbMOdEYEPRXSRdPU+nU/E+DbSRJIO7eOkL7sl0HveqV+uUi4g/k8z2eFnaRYKkdkkfKqv6a2CGpFels7qenJbdDnxI0mvSbUu/9H8KXAL8OONf2IuBT/eOi0iaWqHOI7zUguidnHBtRHyLZPbON6XlrwGejIhtGcZrQ5wTgQ1FK0iuFvptWdmzEfFkeoXNlSSthcUkv8T74wsk3SarlDwc/kdA+QNQ7geuAe4lmf3yqohYFhErgYuBOyQ9CFxatt2taWwLJbX2M656fRkYASyXtDJdfpl0vOD3kg5Ji04DHkq7kw4DrkvLjwF+nFGcNkx49lGznJJ0MvDWiPhCjTrzgfMj4pHGRWZDja8aMsupiPh+bxdWJWnX2AInAXOLwMys4DxGYGZWcE4EZmYF50RgZlZwTgRmZgXnRGBmVnD/H1LJGz21ghWeAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] }, "metadata": { "needs_background": "light" - } + }, + "output_type": "display_data" } ], "source": [ @@ -422,26 +473,26 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, "source": [ "## 3. Comparison with alternatives\n", "\n", "### FLAML's accuracy" - ], - "cell_type": "markdown", - "metadata": {} + ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 12, "metadata": { "tags": [] }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "flaml r2 = 0.8449836920253022\n" + "flaml r2 = 0.8467624164909245\n" ] } ], @@ -450,11 +501,11 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, "source": [ "### Default LightGBM" - ], - "cell_type": "markdown", - "metadata": {} + ] }, { "cell_type": "code", @@ -472,14 +523,14 @@ "metadata": {}, "outputs": [ { - "output_type": "execute_result", "data": { "text/plain": [ "LGBMRegressor()" ] }, + "execution_count": 14, "metadata": {}, - "execution_count": 14 + "output_type": "execute_result" } ], "source": [ @@ -494,8 +545,8 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "default lgbm r2 = 0.8296179648694404\n" ] @@ -508,11 +559,11 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, "source": [ "### Optuna LightGBM Tuner" - ], - "cell_type": "markdown", - "metadata": {} + ] }, { "cell_type": "code", @@ -520,7 +571,7 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install optuna==2.5.0;" + "# !pip install optuna==2.8.0;" ] }, { @@ -551,83 +602,98 @@ }, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-07-24 13:51:37,223]\u001b[0m A new study created in memory with name: no-name-a9ad03a9-1a95-4cb8-903f-402d3a214640\u001b[0m\n", + "feature_fraction, val_score: 2251528152.166571: 14%|#4 | 1/7 [00:01<00:08, 1.42s/it]\u001b[32m[I 2021-07-24 13:51:38,667]\u001b[0m Trial 0 finished with value: 2251528152.166571 and parameters: {'feature_fraction': 0.5}. Best is trial 0 with value: 2251528152.166571.\u001b[0m\n", + "feature_fraction, val_score: 2251528152.166571: 29%|##8 | 2/7 [00:03<00:07, 1.47s/it]\u001b[32m[I 2021-07-24 13:51:40,257]\u001b[0m Trial 1 finished with value: 2253578981.8374605 and parameters: {'feature_fraction': 0.6}. Best is trial 0 with value: 2251528152.166571.\u001b[0m\n", + "feature_fraction, val_score: 2251528152.166571: 43%|####2 | 3/7 [00:04<00:06, 1.54s/it]\u001b[32m[I 2021-07-24 13:51:41,944]\u001b[0m Trial 2 finished with value: 2293115294.9762316 and parameters: {'feature_fraction': 1.0}. Best is trial 0 with value: 2251528152.166571.\u001b[0m\n", + "feature_fraction, val_score: 2221143011.565778: 57%|#####7 | 4/7 [00:06<00:04, 1.52s/it]\u001b[32m[I 2021-07-24 13:51:43,420]\u001b[0m Trial 3 finished with value: 2221143011.5657783 and parameters: {'feature_fraction': 0.7}. Best is trial 3 with value: 2221143011.5657783.\u001b[0m\n", + "feature_fraction, val_score: 2221143011.565778: 71%|#######1 | 5/7 [00:07<00:03, 1.51s/it]\u001b[32m[I 2021-07-24 13:51:44,892]\u001b[0m Trial 4 finished with value: 2221143011.5657783 and parameters: {'feature_fraction': 0.8}. Best is trial 3 with value: 2221143011.5657783.\u001b[0m\n", + "feature_fraction, val_score: 2221143011.565778: 86%|########5 | 6/7 [00:09<00:01, 1.51s/it]\u001b[32m[I 2021-07-24 13:51:46,405]\u001b[0m Trial 5 finished with value: 2491490333.842695 and parameters: {'feature_fraction': 0.4}. Best is trial 3 with value: 2221143011.5657783.\u001b[0m\n", + "feature_fraction, val_score: 2193757572.841483: 100%|##########| 7/7 [00:10<00:00, 1.52s/it]\u001b[32m[I 2021-07-24 13:51:47,947]\u001b[0m Trial 6 finished with value: 2193757572.841483 and parameters: {'feature_fraction': 0.8999999999999999}. Best is trial 6 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction, val_score: 2193757572.841483: 100%|##########| 7/7 [00:10<00:00, 1.53s/it]\n", + "num_leaves, val_score: 2193757572.841483: 5%|5 | 1/20 [00:06<01:55, 6.10s/it]\u001b[32m[I 2021-07-24 13:51:54,053]\u001b[0m Trial 7 finished with value: 2248042974.885056 and parameters: {'num_leaves': 163}. Best is trial 7 with value: 2248042974.885056.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 10%|# | 2/20 [00:09<01:36, 5.36s/it]\u001b[32m[I 2021-07-24 13:51:57,685]\u001b[0m Trial 8 finished with value: 2202201580.7993436 and parameters: {'num_leaves': 88}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 15%|#5 | 3/20 [00:18<01:46, 6.24s/it]\u001b[32m[I 2021-07-24 13:52:05,988]\u001b[0m Trial 9 finished with value: 2245590498.6014037 and parameters: {'num_leaves': 191}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 20%|## | 4/20 [00:25<01:44, 6.53s/it]\u001b[32m[I 2021-07-24 13:52:13,177]\u001b[0m Trial 10 finished with value: 2313837552.1304107 and parameters: {'num_leaves': 179}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 25%|##5 | 5/20 [00:27<01:19, 5.28s/it]\u001b[32m[I 2021-07-24 13:52:15,543]\u001b[0m Trial 11 finished with value: 2292271962.1367116 and parameters: {'num_leaves': 52}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 30%|### | 6/20 [00:32<01:12, 5.16s/it]\u001b[32m[I 2021-07-24 13:52:20,433]\u001b[0m Trial 12 finished with value: 2262598949.621539 and parameters: {'num_leaves': 143}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 35%|###5 | 7/20 [00:34<00:55, 4.25s/it]\u001b[32m[I 2021-07-24 13:52:22,553]\u001b[0m Trial 13 finished with value: 2290214250.6976314 and parameters: {'num_leaves': 50}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 40%|#### | 8/20 [00:40<00:56, 4.70s/it]\u001b[32m[I 2021-07-24 13:52:28,292]\u001b[0m Trial 14 finished with value: 2274572970.4214416 and parameters: {'num_leaves': 165}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 45%|####5 | 9/20 [00:49<01:06, 6.05s/it]\u001b[32m[I 2021-07-24 13:52:37,485]\u001b[0m Trial 15 finished with value: 2293618526.656807 and parameters: {'num_leaves': 244}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 50%|##### | 10/20 [00:55<01:00, 6.09s/it]\u001b[32m[I 2021-07-24 13:52:43,668]\u001b[0m Trial 16 finished with value: 2248672042.5925345 and parameters: {'num_leaves': 164}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 55%|#####5 | 11/20 [00:59<00:47, 5.33s/it]\u001b[32m[I 2021-07-24 13:52:47,239]\u001b[0m Trial 17 finished with value: 2264385179.765125 and parameters: {'num_leaves': 91}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 60%|###### | 12/20 [01:08<00:51, 6.48s/it]\u001b[32m[I 2021-07-24 13:52:56,413]\u001b[0m Trial 18 finished with value: 2252406272.4344435 and parameters: {'num_leaves': 251}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 65%|######5 | 13/20 [01:12<00:39, 5.71s/it]\u001b[32m[I 2021-07-24 13:53:00,320]\u001b[0m Trial 19 finished with value: 2214542360.152998 and parameters: {'num_leaves': 101}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 70%|####### | 14/20 [01:12<00:25, 4.19s/it]\u001b[32m[I 2021-07-24 13:53:00,950]\u001b[0m Trial 20 finished with value: 2748428041.4812107 and parameters: {'num_leaves': 3}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 75%|#######5 | 15/20 [01:16<00:20, 4.11s/it]\u001b[32m[I 2021-07-24 13:53:04,880]\u001b[0m Trial 21 finished with value: 2228598419.330431 and parameters: {'num_leaves': 100}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 80%|######## | 16/20 [01:20<00:15, 3.97s/it]\u001b[32m[I 2021-07-24 13:53:08,539]\u001b[0m Trial 22 finished with value: 2251484592.265115 and parameters: {'num_leaves': 95}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 85%|########5 | 17/20 [01:22<00:10, 3.47s/it]\u001b[32m[I 2021-07-24 13:53:10,837]\u001b[0m Trial 23 finished with value: 2247121386.2896996 and parameters: {'num_leaves': 49}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 90%|######### | 18/20 [01:23<00:05, 2.71s/it]\u001b[32m[I 2021-07-24 13:53:11,772]\u001b[0m Trial 24 finished with value: 2232858800.451656 and parameters: {'num_leaves': 10}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 95%|#########5| 19/20 [01:27<00:03, 3.14s/it]\u001b[32m[I 2021-07-24 13:53:15,912]\u001b[0m Trial 25 finished with value: 2236616896.4291906 and parameters: {'num_leaves': 111}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 100%|##########| 20/20 [01:32<00:00, 3.62s/it]\u001b[32m[I 2021-07-24 13:53:20,649]\u001b[0m Trial 26 finished with value: 2272025220.904855 and parameters: {'num_leaves': 128}. Best is trial 8 with value: 2202201580.7993436.\u001b[0m\n", + "num_leaves, val_score: 2193757572.841483: 100%|##########| 20/20 [01:32<00:00, 4.64s/it]\n", + "bagging, val_score: 2193757572.841483: 10%|# | 1/10 [00:02<00:19, 2.16s/it]\u001b[32m[I 2021-07-24 13:53:22,811]\u001b[0m Trial 27 finished with value: 2215507853.2691116 and parameters: {'bagging_fraction': 0.833765937354807, 'bagging_freq': 5}. Best is trial 27 with value: 2215507853.2691116.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 20%|## | 2/10 [00:04<00:17, 2.19s/it]\u001b[32m[I 2021-07-24 13:53:25,089]\u001b[0m Trial 28 finished with value: 2289649905.7234273 and parameters: {'bagging_fraction': 0.7166436113253525, 'bagging_freq': 3}. Best is trial 27 with value: 2215507853.2691116.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 30%|### | 3/10 [00:06<00:15, 2.28s/it]\u001b[32m[I 2021-07-24 13:53:27,565]\u001b[0m Trial 29 finished with value: 2435071807.8717895 and parameters: {'bagging_fraction': 0.4311907585027494, 'bagging_freq': 3}. Best is trial 27 with value: 2215507853.2691116.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 40%|#### | 4/10 [00:09<00:13, 2.31s/it]\u001b[32m[I 2021-07-24 13:53:29,967]\u001b[0m Trial 30 finished with value: 2384649358.377396 and parameters: {'bagging_fraction': 0.48304880587976823, 'bagging_freq': 5}. Best is trial 27 with value: 2215507853.2691116.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 50%|##### | 5/10 [00:11<00:11, 2.27s/it]\u001b[32m[I 2021-07-24 13:53:32,141]\u001b[0m Trial 31 finished with value: 2210265907.248649 and parameters: {'bagging_fraction': 0.7877791059553392, 'bagging_freq': 6}. Best is trial 31 with value: 2210265907.248649.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 60%|###### | 6/10 [00:13<00:09, 2.28s/it]\u001b[32m[I 2021-07-24 13:53:34,434]\u001b[0m Trial 32 finished with value: 2272518522.3229847 and parameters: {'bagging_fraction': 0.7142758200867739, 'bagging_freq': 4}. Best is trial 31 with value: 2210265907.248649.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 70%|####### | 7/10 [00:15<00:06, 2.26s/it]\u001b[32m[I 2021-07-24 13:53:36,650]\u001b[0m Trial 33 finished with value: 2265778780.249233 and parameters: {'bagging_fraction': 0.6387893291706077, 'bagging_freq': 1}. Best is trial 31 with value: 2210265907.248649.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 80%|######## | 8/10 [00:18<00:04, 2.26s/it]\u001b[32m[I 2021-07-24 13:53:38,903]\u001b[0m Trial 34 finished with value: 2233770570.763797 and parameters: {'bagging_fraction': 0.7784748097499257, 'bagging_freq': 3}. Best is trial 31 with value: 2210265907.248649.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 90%|######### | 9/10 [00:20<00:02, 2.31s/it]\u001b[32m[I 2021-07-24 13:53:41,337]\u001b[0m Trial 35 finished with value: 2385812002.7077584 and parameters: {'bagging_fraction': 0.43475426988023463, 'bagging_freq': 4}. Best is trial 31 with value: 2210265907.248649.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 100%|##########| 10/10 [00:23<00:00, 2.34s/it]\u001b[32m[I 2021-07-24 13:53:43,759]\u001b[0m Trial 36 finished with value: 2279764756.511298 and parameters: {'bagging_fraction': 0.509552686945636, 'bagging_freq': 5}. Best is trial 31 with value: 2210265907.248649.\u001b[0m\n", + "bagging, val_score: 2193757572.841483: 100%|##########| 10/10 [00:23<00:00, 2.31s/it]\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 17%|#6 | 1/6 [00:01<00:08, 1.68s/it]\u001b[32m[I 2021-07-24 13:53:45,444]\u001b[0m Trial 37 finished with value: 2193757572.841483 and parameters: {'feature_fraction': 0.9159999999999999}. Best is trial 37 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 33%|###3 | 2/6 [00:03<00:06, 1.68s/it]\u001b[32m[I 2021-07-24 13:53:47,130]\u001b[0m Trial 38 finished with value: 2193757572.841483 and parameters: {'feature_fraction': 0.852}. Best is trial 37 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 50%|##### | 3/6 [00:05<00:05, 1.68s/it]\u001b[32m[I 2021-07-24 13:53:48,802]\u001b[0m Trial 39 finished with value: 2193757572.841483 and parameters: {'feature_fraction': 0.82}. Best is trial 37 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 67%|######6 | 4/6 [00:06<00:03, 1.70s/it]\u001b[32m[I 2021-07-24 13:53:50,554]\u001b[0m Trial 40 finished with value: 2293115294.9762316 and parameters: {'feature_fraction': 0.9799999999999999}. Best is trial 37 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 83%|########3 | 5/6 [00:08<00:01, 1.68s/it]\u001b[32m[I 2021-07-24 13:53:52,199]\u001b[0m Trial 41 finished with value: 2193757572.841483 and parameters: {'feature_fraction': 0.8839999999999999}. Best is trial 37 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 100%|##########| 6/6 [00:10<00:00, 1.72s/it]\u001b[32m[I 2021-07-24 13:53:54,016]\u001b[0m Trial 42 finished with value: 2293115294.9762316 and parameters: {'feature_fraction': 0.948}. Best is trial 37 with value: 2193757572.841483.\u001b[0m\n", + "feature_fraction_stage2, val_score: 2193757572.841483: 100%|##########| 6/6 [00:10<00:00, 1.71s/it]\n", + "regularization_factors, val_score: 2193757505.927801: 5%|5 | 1/20 [00:01<00:33, 1.78s/it]\u001b[32m[I 2021-07-24 13:53:55,803]\u001b[0m Trial 43 finished with value: 2193757505.927801 and parameters: {'lambda_l1': 0.035530313749205525, 'lambda_l2': 1.7142996437624364e-05}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 10%|# | 2/20 [00:03<00:31, 1.77s/it]\u001b[32m[I 2021-07-24 13:53:57,544]\u001b[0m Trial 44 finished with value: 2223375534.1890984 and parameters: {'lambda_l1': 1.7561787606994837e-06, 'lambda_l2': 0.028131894820153703}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 15%|#5 | 3/20 [00:05<00:30, 1.80s/it]\u001b[32m[I 2021-07-24 13:53:59,412]\u001b[0m Trial 45 finished with value: 2247496509.5917783 and parameters: {'lambda_l1': 2.7262641755324635e-07, 'lambda_l2': 0.07409628972810856}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 20%|## | 4/20 [00:07<00:31, 1.95s/it]\u001b[32m[I 2021-07-24 13:54:01,725]\u001b[0m Trial 46 finished with value: 2226827669.753629 and parameters: {'lambda_l1': 8.386241389210335, 'lambda_l2': 2.5276971608749994}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 25%|##5 | 5/20 [00:09<00:28, 1.89s/it]\u001b[32m[I 2021-07-24 13:54:03,481]\u001b[0m Trial 47 finished with value: 2216429817.580567 and parameters: {'lambda_l1': 1.1041957573939113e-05, 'lambda_l2': 0.017450153863046828}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 30%|### | 6/20 [00:11<00:26, 1.92s/it]\u001b[32m[I 2021-07-24 13:54:05,458]\u001b[0m Trial 48 finished with value: 2203413700.676657 and parameters: {'lambda_l1': 2.2785459443579694, 'lambda_l2': 0.00206545335591311}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 35%|###5 | 7/20 [00:13<00:25, 1.93s/it]\u001b[32m[I 2021-07-24 13:54:07,430]\u001b[0m Trial 49 finished with value: 2193757572.7271867 and parameters: {'lambda_l1': 0.00039136395694252244, 'lambda_l2': 3.427541754951745e-08}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757505.927801: 40%|#### | 8/20 [00:15<00:23, 1.95s/it]\u001b[32m[I 2021-07-24 13:54:09,422]\u001b[0m Trial 50 finished with value: 2240547395.6341996 and parameters: {'lambda_l1': 0.00018872808872200846, 'lambda_l2': 0.5702426219534228}. Best is trial 43 with value: 2193757505.927801.\u001b[0m\n", + "regularization_factors, val_score: 2193757473.925522: 45%|####5 | 9/20 [00:17<00:20, 1.88s/it]\u001b[32m[I 2021-07-24 13:54:11,149]\u001b[0m Trial 51 finished with value: 2193757473.9255223 and parameters: {'lambda_l1': 5.115495660230786e-06, 'lambda_l2': 2.891511576151028e-05}. Best is trial 51 with value: 2193757473.9255223.\u001b[0m\n", + "regularization_factors, val_score: 2193757473.925522: 50%|##### | 10/20 [00:18<00:18, 1.87s/it]\u001b[32m[I 2021-07-24 13:54:12,975]\u001b[0m Trial 52 finished with value: 2209441211.88343 and parameters: {'lambda_l1': 6.709340304448373e-08, 'lambda_l2': 0.0033851983067145165}. Best is trial 51 with value: 2193757473.9255223.\u001b[0m\n", + "regularization_factors, val_score: 2193757473.925522: 55%|#####5 | 11/20 [00:21<00:17, 1.94s/it]\u001b[32m[I 2021-07-24 13:54:15,091]\u001b[0m Trial 53 finished with value: 2193757554.8642426 and parameters: {'lambda_l1': 0.00398313349668578, 'lambda_l2': 4.918044983674336e-06}. Best is trial 51 with value: 2193757473.9255223.\u001b[0m\n", + "regularization_factors, val_score: 2193757473.925522: 60%|###### | 12/20 [00:22<00:15, 1.89s/it]\u001b[32m[I 2021-07-24 13:54:16,857]\u001b[0m Trial 54 finished with value: 2193757527.2890315 and parameters: {'lambda_l1': 0.03174575050092051, 'lambda_l2': 1.121877835012437e-05}. Best is trial 51 with value: 2193757473.9255223.\u001b[0m\n", + "regularization_factors, val_score: 2193757461.792665: 65%|######5 | 13/20 [00:24<00:13, 1.88s/it]\u001b[32m[I 2021-07-24 13:54:18,728]\u001b[0m Trial 55 finished with value: 2193757461.7926645 and parameters: {'lambda_l1': 0.19245364133000292, 'lambda_l2': 2.098195966946619e-05}. Best is trial 55 with value: 2193757461.7926645.\u001b[0m\n", + "regularization_factors, val_score: 2193757461.792665: 70%|####### | 14/20 [00:26<00:11, 1.87s/it]\u001b[32m[I 2021-07-24 13:54:20,550]\u001b[0m Trial 56 finished with value: 2193757572.4410305 and parameters: {'lambda_l1': 1.0621410162724814e-08, 'lambda_l2': 1.1658751463777809e-07}. Best is trial 55 with value: 2193757461.7926645.\u001b[0m\n", + "regularization_factors, val_score: 2193757022.409628: 75%|#######5 | 15/20 [00:28<00:09, 1.84s/it]\u001b[32m[I 2021-07-24 13:54:22,314]\u001b[0m Trial 57 finished with value: 2193757022.4096284 and parameters: {'lambda_l1': 0.4708623853744175, 'lambda_l2': 0.00013251231491224775}. Best is trial 57 with value: 2193757022.4096284.\u001b[0m\n", + "regularization_factors, val_score: 2193757022.409628: 80%|######## | 16/20 [00:30<00:07, 1.81s/it]\u001b[32m[I 2021-07-24 13:54:24,053]\u001b[0m Trial 58 finished with value: 2193757471.257112 and parameters: {'lambda_l1': 0.48939023058146797, 'lambda_l2': 5.55788736346222e-07}. Best is trial 57 with value: 2193757022.4096284.\u001b[0m\n", + "regularization_factors, val_score: 2193756705.682960: 85%|########5 | 17/20 [00:31<00:05, 1.81s/it]\u001b[32m[I 2021-07-24 13:54:25,872]\u001b[0m Trial 59 finished with value: 2193756705.6829596 and parameters: {'lambda_l1': 0.2944813276784501, 'lambda_l2': 0.00023554829225716752}. Best is trial 59 with value: 2193756705.6829596.\u001b[0m\n", + "regularization_factors, val_score: 2193756705.682960: 90%|######### | 18/20 [00:33<00:03, 1.80s/it]\u001b[32m[I 2021-07-24 13:54:27,663]\u001b[0m Trial 60 finished with value: 2213959934.0753803 and parameters: {'lambda_l1': 5.791380633444603, 'lambda_l2': 0.00017616756434467052}. Best is trial 59 with value: 2193756705.6829596.\u001b[0m\n", + "regularization_factors, val_score: 2193756300.347055: 95%|#########5| 19/20 [00:35<00:01, 1.81s/it]\u001b[32m[I 2021-07-24 13:54:29,475]\u001b[0m Trial 61 finished with value: 2193756300.347055 and parameters: {'lambda_l1': 0.0038798669293550647, 'lambda_l2': 0.000370708788404359}. Best is trial 61 with value: 2193756300.347055.\u001b[0m\n", + "regularization_factors, val_score: 2193754225.665947: 100%|##########| 20/20 [00:37<00:00, 1.81s/it]\u001b[32m[I 2021-07-24 13:54:31,283]\u001b[0m Trial 62 finished with value: 2193754225.665947 and parameters: {'lambda_l1': 0.001623299893398886, 'lambda_l2': 0.0009761417290668266}. Best is trial 62 with value: 2193754225.665947.\u001b[0m\n", + "regularization_factors, val_score: 2193754225.665947: 100%|##########| 20/20 [00:37<00:00, 1.86s/it]\n", + "min_data_in_leaf, val_score: 2193754225.665947: 20%|## | 1/5 [00:01<00:07, 1.90s/it]\u001b[32m[I 2021-07-24 13:54:33,186]\u001b[0m Trial 63 finished with value: 2225388728.9240403 and parameters: {'min_child_samples': 10}. Best is trial 63 with value: 2225388728.9240403.\u001b[0m\n", + "min_data_in_leaf, val_score: 2193754225.665947: 40%|#### | 2/5 [00:03<00:05, 1.88s/it]\u001b[32m[I 2021-07-24 13:54:35,035]\u001b[0m Trial 64 finished with value: 2219135238.044885 and parameters: {'min_child_samples': 5}. Best is trial 64 with value: 2219135238.044885.\u001b[0m\n", + "min_data_in_leaf, val_score: 2193754225.665947: 60%|###### | 3/5 [00:06<00:04, 2.04s/it]\u001b[32m[I 2021-07-24 13:54:37,428]\u001b[0m Trial 65 finished with value: 2275374497.207612 and parameters: {'min_child_samples': 100}. Best is trial 64 with value: 2219135238.044885.\u001b[0m\n", + "min_data_in_leaf, val_score: 2193754225.665947: 80%|######## | 4/5 [00:08<00:01, 1.99s/it]\u001b[32m[I 2021-07-24 13:54:39,318]\u001b[0m Trial 66 finished with value: 2229247396.3587947 and parameters: {'min_child_samples': 25}. Best is trial 64 with value: 2219135238.044885.\u001b[0m\n", + "min_data_in_leaf, val_score: 2193754225.665947: 100%|##########| 5/5 [00:10<00:00, 2.00s/it]\u001b[32m[I 2021-07-24 13:54:41,353]\u001b[0m Trial 67 finished with value: 2274227159.90541 and parameters: {'min_child_samples': 50}. Best is trial 64 with value: 2219135238.044885.\u001b[0m\n", + "min_data_in_leaf, val_score: 2193754225.665947: 100%|##########| 5/5 [00:10<00:00, 2.01s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 2min 55s, sys: 8.33 s, total: 3min 3s\n", + "Wall time: 3min 4s\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", "text": [ - "n': 0.5}. Best is trial 1 with value: 2012422177.233508.\u001b[0m\n", - "feature_fraction, val_score: 2012422177.233508: 43%|####2 | 3/7 [00:07<00:11, 2.76s/it]\u001b[32m[I 2021-05-01 16:56:19,694]\u001b[0m Trial 2 finished with value: 2070320819.099197 and parameters: {'feature_fraction': 0.6}. Best is trial 1 with value: 2012422177.233508.\u001b[0m\n", - "feature_fraction, val_score: 2012422177.233508: 57%|#####7 | 4/7 [00:10<00:08, 2.75s/it]\u001b[32m[I 2021-05-01 16:56:22,414]\u001b[0m Trial 3 finished with value: 2090738130.975806 and parameters: {'feature_fraction': 0.8999999999999999}. Best is trial 1 with value: 2012422177.233508.\u001b[0m\n", - "feature_fraction, val_score: 2012422177.233508: 71%|#######1 | 5/7 [00:13<00:05, 2.67s/it]\u001b[32m[I 2021-05-01 16:56:24,909]\u001b[0m Trial 4 finished with value: 2041753467.8813415 and parameters: {'feature_fraction': 0.8}. Best is trial 1 with value: 2012422177.233508.\u001b[0m\n", - "feature_fraction, val_score: 2012422177.233508: 86%|########5 | 6/7 [00:15<00:02, 2.68s/it]\u001b[32m[I 2021-05-01 16:56:27,596]\u001b[0m Trial 5 finished with value: 2041753467.8813415 and parameters: {'feature_fraction': 0.7}. Best is trial 1 with value: 2012422177.233508.\u001b[0m\n", - "feature_fraction, val_score: 2012422177.233508: 100%|##########| 7/7 [00:18<00:00, 2.59s/it]\u001b[32m[I 2021-05-01 16:56:29,991]\u001b[0m Trial 6 finished with value: 2268739005.2074604 and parameters: {'feature_fraction': 0.4}. Best is trial 1 with value: 2012422177.233508.\u001b[0m\n", - "feature_fraction, val_score: 2012422177.233508: 100%|##########| 7/7 [00:18<00:00, 2.59s/it]\n", - "num_leaves, val_score: 2012422177.233508: 5%|5 | 1/20 [00:05<01:37, 5.15s/it]\u001b[32m[I 2021-05-01 16:56:35,147]\u001b[0m Trial 7 finished with value: 2101942667.8301136 and parameters: {'num_leaves': 100}. Best is trial 7 with value: 2101942667.8301136.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 10%|# | 2/20 [00:11<01:39, 5.53s/it]\u001b[32m[I 2021-05-01 16:56:41,578]\u001b[0m Trial 8 finished with value: 2116990487.8274357 and parameters: {'num_leaves': 170}. Best is trial 7 with value: 2101942667.8301136.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 15%|#5 | 3/20 [00:14<01:19, 4.65s/it]\u001b[32m[I 2021-05-01 16:56:44,174]\u001b[0m Trial 9 finished with value: 2068285393.500253 and parameters: {'num_leaves': 53}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 20%|## | 4/20 [00:25<01:48, 6.75s/it]\u001b[32m[I 2021-05-01 16:56:55,835]\u001b[0m Trial 10 finished with value: 2155721300.061022 and parameters: {'num_leaves': 247}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 25%|##5 | 5/20 [00:27<01:20, 5.36s/it]\u001b[32m[I 2021-05-01 16:56:57,939]\u001b[0m Trial 11 finished with value: 2110152521.9026961 and parameters: {'num_leaves': 14}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 30%|### | 6/20 [00:43<01:59, 8.55s/it]\u001b[32m[I 2021-05-01 16:57:13,922]\u001b[0m Trial 12 finished with value: 2155721300.061022 and parameters: {'num_leaves': 247}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 35%|###5 | 7/20 [00:53<01:53, 8.74s/it]\u001b[32m[I 2021-05-01 16:57:23,128]\u001b[0m Trial 13 finished with value: 2170705249.4392734 and parameters: {'num_leaves': 180}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 40%|#### | 8/20 [00:53<01:16, 6.37s/it]\u001b[32m[I 2021-05-01 16:57:23,950]\u001b[0m Trial 14 finished with value: 3322965157.380943 and parameters: {'num_leaves': 2}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 45%|####5 | 9/20 [01:00<01:09, 6.34s/it]\u001b[32m[I 2021-05-01 16:57:30,222]\u001b[0m Trial 15 finished with value: 2078188917.1665275 and parameters: {'num_leaves': 112}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 50%|##### | 10/20 [01:09<01:12, 7.26s/it]\u001b[32m[I 2021-05-01 16:57:39,631]\u001b[0m Trial 16 finished with value: 2149952453.251796 and parameters: {'num_leaves': 194}. Best is trial 9 with value: 2068285393.500253.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 55%|#####5 | 11/20 [01:12<00:53, 6.00s/it]\u001b[32m[I 2021-05-01 16:57:42,688]\u001b[0m Trial 17 finished with value: 2039014776.0863047 and parameters: {'num_leaves': 50}. Best is trial 17 with value: 2039014776.0863047.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 60%|###### | 12/20 [01:16<00:43, 5.39s/it]\u001b[32m[I 2021-05-01 16:57:46,660]\u001b[0m Trial 18 finished with value: 2028177421.7466378 and parameters: {'num_leaves': 70}. Best is trial 18 with value: 2028177421.7466378.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 65%|######5 | 13/20 [01:21<00:36, 5.21s/it]\u001b[32m[I 2021-05-01 16:57:51,453]\u001b[0m Trial 19 finished with value: 2083200978.2816963 and parameters: {'num_leaves': 72}. Best is trial 18 with value: 2028177421.7466378.\u001b[0m\n", - "num_leaves, val_score: 2012422177.233508: 70%|####### | 14/20 [01:29<00:37, 6.19s/it]\u001b[32m[I 2021-05-01 16:57:59,920]\u001b[0m Trial 20 finished with value: 2121588087.918161 and parameters: {'num_leaves': 129}. Best is trial 18 with value: 2028177421.7466378.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 75%|#######5 | 15/20 [01:33<00:27, 5.55s/it]\u001b[32m[I 2021-05-01 16:58:03,962]\u001b[0m Trial 21 finished with value: 1997587162.470951 and parameters: {'num_leaves': 43}. Best is trial 21 with value: 1997587162.470951.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 80%|######## | 16/20 [01:36<00:18, 4.74s/it]\u001b[32m[I 2021-05-01 16:58:06,830]\u001b[0m Trial 22 finished with value: 2052604443.4670672 and parameters: {'num_leaves': 28}. Best is trial 21 with value: 1997587162.470951.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 85%|########5 | 17/20 [01:43<00:15, 5.24s/it]\u001b[32m[I 2021-05-01 16:58:13,218]\u001b[0m Trial 23 finished with value: 2043471294.5650334 and parameters: {'num_leaves': 80}. Best is trial 21 with value: 1997587162.470951.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 90%|######### | 18/20 [01:51<00:12, 6.21s/it]\u001b[32m[I 2021-05-01 16:58:21,714]\u001b[0m Trial 24 finished with value: 2134499770.7451386 and parameters: {'num_leaves': 138}. Best is trial 21 with value: 1997587162.470951.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 95%|#########5| 19/20 [01:55<00:05, 5.53s/it]\u001b[32m[I 2021-05-01 16:58:25,660]\u001b[0m Trial 25 finished with value: 2044138761.5237503 and parameters: {'num_leaves': 42}. Best is trial 21 with value: 1997587162.470951.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 100%|##########| 20/20 [02:00<00:00, 5.29s/it]\u001b[32m[I 2021-05-01 16:58:30,376]\u001b[0m Trial 26 finished with value: 2043471294.5650334 and parameters: {'num_leaves': 80}. Best is trial 21 with value: 1997587162.470951.\u001b[0m\n", - "num_leaves, val_score: 1997587162.470951: 100%|##########| 20/20 [02:00<00:00, 6.02s/it]\n", - "bagging, val_score: 1997587162.470951: 10%|# | 1/10 [00:03<00:31, 3.52s/it]\u001b[32m[I 2021-05-01 16:58:33,911]\u001b[0m Trial 27 finished with value: 2013108212.2667012 and parameters: {'bagging_fraction': 0.7107931665183529, 'bagging_freq': 4}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 20%|## | 2/10 [00:06<00:27, 3.49s/it]\u001b[32m[I 2021-05-01 16:58:37,305]\u001b[0m Trial 28 finished with value: 2115850610.4036384 and parameters: {'bagging_fraction': 0.7110669495016676, 'bagging_freq': 4}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 30%|### | 3/10 [00:10<00:25, 3.66s/it]\u001b[32m[I 2021-05-01 16:58:41,369]\u001b[0m Trial 29 finished with value: 2195082524.11466 and parameters: {'bagging_fraction': 0.4223130416728271, 'bagging_freq': 3}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 40%|#### | 4/10 [00:14<00:21, 3.67s/it]\u001b[32m[I 2021-05-01 16:58:45,049]\u001b[0m Trial 30 finished with value: 2040265763.438056 and parameters: {'bagging_fraction': 0.9997992429240515, 'bagging_freq': 7}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 50%|##### | 5/10 [00:18<00:18, 3.73s/it]\u001b[32m[I 2021-05-01 16:58:48,928]\u001b[0m Trial 31 finished with value: 2131241507.3480675 and parameters: {'bagging_fraction': 0.6896585879210911, 'bagging_freq': 6}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 60%|###### | 6/10 [00:21<00:14, 3.62s/it]\u001b[32m[I 2021-05-01 16:58:52,305]\u001b[0m Trial 32 finished with value: 2103907334.0925496 and parameters: {'bagging_fraction': 0.7164061602702391, 'bagging_freq': 1}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 70%|####### | 7/10 [00:26<00:11, 3.77s/it]\u001b[32m[I 2021-05-01 16:58:56,405]\u001b[0m Trial 33 finished with value: 2036444350.9989514 and parameters: {'bagging_fraction': 0.8892052985573371, 'bagging_freq': 4}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 80%|######## | 8/10 [00:30<00:07, 3.88s/it]\u001b[32m[I 2021-05-01 16:59:00,560]\u001b[0m Trial 34 finished with value: 2225281871.3367276 and parameters: {'bagging_fraction': 0.49661561085854733, 'bagging_freq': 2}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 90%|######### | 9/10 [00:34<00:03, 3.91s/it]\u001b[32m[I 2021-05-01 16:59:04,536]\u001b[0m Trial 35 finished with value: 2143704197.0784042 and parameters: {'bagging_fraction': 0.5655413899704534, 'bagging_freq': 5}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 100%|##########| 10/10 [00:37<00:00, 3.84s/it]\u001b[32m[I 2021-05-01 16:59:08,211]\u001b[0m Trial 36 finished with value: 2110700689.1702607 and parameters: {'bagging_fraction': 0.8700737972459625, 'bagging_freq': 6}. Best is trial 27 with value: 2013108212.2667012.\u001b[0m\n", - "bagging, val_score: 1997587162.470951: 100%|##########| 10/10 [00:37<00:00, 3.78s/it]\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 17%|#6 | 1/6 [00:02<00:13, 2.66s/it]\u001b[32m[I 2021-05-01 16:59:10,871]\u001b[0m Trial 37 finished with value: 2089539253.8077588 and parameters: {'feature_fraction': 0.58}. Best is trial 37 with value: 2089539253.8077588.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 33%|###3 | 2/6 [00:05<00:10, 2.62s/it]\u001b[32m[I 2021-05-01 16:59:13,403]\u001b[0m Trial 38 finished with value: 1997587162.470951 and parameters: {'feature_fraction': 0.484}. Best is trial 38 with value: 1997587162.470951.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 50%|##### | 3/6 [00:07<00:07, 2.64s/it]\u001b[32m[I 2021-05-01 16:59:16,077]\u001b[0m Trial 39 finished with value: 1997587162.470951 and parameters: {'feature_fraction': 0.516}. Best is trial 38 with value: 1997587162.470951.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 67%|######6 | 4/6 [00:10<00:05, 2.67s/it]\u001b[32m[I 2021-05-01 16:59:18,827]\u001b[0m Trial 40 finished with value: 2284254046.781229 and parameters: {'feature_fraction': 0.42}. Best is trial 38 with value: 1997587162.470951.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 83%|########3 | 5/6 [00:14<00:02, 2.93s/it]\u001b[32m[I 2021-05-01 16:59:22,357]\u001b[0m Trial 41 finished with value: 1997587162.470951 and parameters: {'feature_fraction': 0.5479999999999999}. Best is trial 38 with value: 1997587162.470951.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 100%|##########| 6/6 [00:17<00:00, 3.01s/it]\u001b[32m[I 2021-05-01 16:59:25,574]\u001b[0m Trial 42 finished with value: 1997587162.470951 and parameters: {'feature_fraction': 0.45199999999999996}. Best is trial 38 with value: 1997587162.470951.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1997587162.470951: 100%|##########| 6/6 [00:17<00:00, 2.89s/it]\n", - "regularization_factors, val_score: 1997587070.360476: 5%|5 | 1/20 [00:03<01:02, 3.31s/it]\u001b[32m[I 2021-05-01 16:59:28,890]\u001b[0m Trial 43 finished with value: 1997587070.3604763 and parameters: {'lambda_l1': 1.3325881401359536e-06, 'lambda_l2': 2.012676569064997e-05}. Best is trial 43 with value: 1997587070.3604763.\u001b[0m\n", - "regularization_factors, val_score: 1997587070.360476: 10%|# | 2/20 [00:06<00:58, 3.27s/it]\u001b[32m[I 2021-05-01 16:59:32,081]\u001b[0m Trial 44 finished with value: 1997587071.8468173 and parameters: {'lambda_l1': 3.416658142750445e-07, 'lambda_l2': 1.971124403055093e-05}. Best is trial 43 with value: 1997587070.3604763.\u001b[0m\n", - "regularization_factors, val_score: 1997587070.360476: 15%|#5 | 3/20 [00:09<00:55, 3.28s/it]\u001b[32m[I 2021-05-01 16:59:35,368]\u001b[0m Trial 45 finished with value: 1997587102.3779635 and parameters: {'lambda_l1': 3.267399616442553e-07, 'lambda_l2': 1.3087509647016092e-05}. Best is trial 43 with value: 1997587070.3604763.\u001b[0m\n", - "regularization_factors, val_score: 1997587070.360476: 20%|## | 4/20 [00:13<00:52, 3.30s/it]\u001b[32m[I 2021-05-01 16:59:38,720]\u001b[0m Trial 46 finished with value: 1997587075.3923492 and parameters: {'lambda_l1': 2.682703999444416e-07, 'lambda_l2': 1.8913639824313343e-05}. Best is trial 43 with value: 1997587070.3604763.\u001b[0m\n", - "regularization_factors, val_score: 1997587070.360476: 25%|##5 | 5/20 [00:16<00:49, 3.32s/it]\u001b[32m[I 2021-05-01 16:59:42,085]\u001b[0m Trial 47 finished with value: 1997587070.787969 and parameters: {'lambda_l1': 1.4561401674574448e-07, 'lambda_l2': 1.9976154048638757e-05}. Best is trial 43 with value: 1997587070.3604763.\u001b[0m\n", - "regularization_factors, val_score: 1997587070.360476: 30%|### | 6/20 [00:19<00:46, 3.35s/it]\u001b[32m[I 2021-05-01 16:59:45,512]\u001b[0m Trial 48 finished with value: 1997587087.4850538 and parameters: {'lambda_l1': 2.9044467527482266e-07, 'lambda_l2': 1.6280849368362258e-05}. Best is trial 43 with value: 1997587070.3604763.\u001b[0m\n", - "regularization_factors, val_score: 1997587057.813578: 35%|###5 | 7/20 [00:23<00:43, 3.34s/it]\u001b[32m[I 2021-05-01 16:59:48,814]\u001b[0m Trial 49 finished with value: 1997587057.8135784 and parameters: {'lambda_l1': 2.736584478611428e-07, 'lambda_l2': 2.2832344774742773e-05}. Best is trial 49 with value: 1997587057.8135784.\u001b[0m\n", - "regularization_factors, val_score: 1997586977.666038: 40%|#### | 8/20 [00:26<00:39, 3.32s/it]\u001b[32m[I 2021-05-01 16:59:52,105]\u001b[0m Trial 50 finished with value: 1997586977.666038 and parameters: {'lambda_l1': 2.2552818901556212e-07, 'lambda_l2': 4.0459809426159216e-05}. Best is trial 50 with value: 1997586977.666038.\u001b[0m\n", - "regularization_factors, val_score: 1997586977.666038: 45%|####5 | 9/20 [00:29<00:36, 3.29s/it]\u001b[32m[I 2021-05-01 16:59:55,317]\u001b[0m Trial 51 finished with value: 1997587069.0728564 and parameters: {'lambda_l1': 2.269836233537227e-07, 'lambda_l2': 2.0400060529051817e-05}. Best is trial 50 with value: 1997586977.666038.\u001b[0m\n", - "regularization_factors, val_score: 1997586977.666038: 50%|##### | 10/20 [00:33<00:33, 3.32s/it]\u001b[32m[I 2021-05-01 16:59:58,719]\u001b[0m Trial 52 finished with value: 1997587038.1018682 and parameters: {'lambda_l1': 2.1190142795602203e-07, 'lambda_l2': 2.7161997048896454e-05}. Best is trial 50 with value: 1997586977.666038.\u001b[0m\n", - "regularization_factors, val_score: 1997586977.666038: 55%|#####5 | 11/20 [00:36<00:30, 3.43s/it]\u001b[32m[I 2021-05-01 17:00:02,410]\u001b[0m Trial 53 finished with value: 1997586992.6494768 and parameters: {'lambda_l1': 2.1728160447318185e-07, 'lambda_l2': 3.710639213958161e-05}. Best is trial 50 with value: 1997586977.666038.\u001b[0m\n", - "regularization_factors, val_score: 1996449931.514239: 60%|###### | 12/20 [00:40<00:27, 3.40s/it]\u001b[32m[I 2021-05-01 17:00:05,742]\u001b[0m Trial 54 finished with value: 1996449931.514239 and parameters: {'lambda_l1': 2.1748994754196613e-07, 'lambda_l2': 0.0001457763270993375}. Best is trial 54 with value: 1996449931.514239.\u001b[0m\n", - "regularization_factors, val_score: 1996449931.514239: 65%|######5 | 13/20 [00:44<00:26, 3.79s/it]\u001b[32m[I 2021-05-01 17:00:10,421]\u001b[0m Trial 55 finished with value: 2067232492.4956243 and parameters: {'lambda_l1': 1.024147819302013e-08, 'lambda_l2': 0.00295027742437926}. Best is trial 54 with value: 1996449931.514239.\u001b[0m\n", - "regularization_factors, val_score: 1996449532.606333: 70%|####### | 14/20 [00:49<00:23, 3.91s/it]\u001b[32m[I 2021-05-01 17:00:14,622]\u001b[0m Trial 56 finished with value: 1996449532.606333 and parameters: {'lambda_l1': 0.0738445887576454, 'lambda_l2': 0.00022249854014829427}. Best is trial 56 with value: 1996449532.606333.\u001b[0m\n", - "regularization_factors, val_score: 1996449532.606333: 75%|#######5 | 15/20 [00:51<00:17, 3.50s/it]\u001b[32m[I 2021-05-01 17:00:17,173]\u001b[0m Trial 57 finished with value: 2030166678.6715233 and parameters: {'lambda_l1': 0.4206025220395843, 'lambda_l2': 0.0013715337299642163}. Best is trial 56 with value: 1996449532.606333.\u001b[0m\n", - "regularization_factors, val_score: 1996449532.606333: 80%|######## | 16/20 [00:54<00:13, 3.26s/it]\u001b[32m[I 2021-05-01 17:00:19,852]\u001b[0m Trial 58 finished with value: 2035151132.9680371 and parameters: {'lambda_l1': 0.04874741366424845, 'lambda_l2': 7.210205334409902}. Best is trial 56 with value: 1996449532.606333.\u001b[0m\n", - "regularization_factors, val_score: 1996449532.606333: 85%|########5 | 17/20 [00:56<00:08, 2.99s/it]\u001b[32m[I 2021-05-01 17:00:22,219]\u001b[0m Trial 59 finished with value: 2030171133.0539286 and parameters: {'lambda_l1': 3.325828713424872e-05, 'lambda_l2': 0.000613610913339345}. Best is trial 56 with value: 1996449532.606333.\u001b[0m\n", - "regularization_factors, val_score: 1996449532.606333: 90%|######### | 18/20 [00:58<00:05, 2.79s/it]\u001b[32m[I 2021-05-01 17:00:24,556]\u001b[0m Trial 60 finished with value: 1997587159.9270165 and parameters: {'lambda_l1': 1.122670575237426e-08, 'lambda_l2': 4.7679572155360673e-07}. Best is trial 56 with value: 1996449532.606333.\u001b[0m\n", - "regularization_factors, val_score: 1996449532.606333: 95%|#########5| 19/20 [01:01<00:02, 2.64s/it]\u001b[32m[I 2021-05-01 17:00:26,854]\u001b[0m Trial 61 finished with value: 1996449770.564637 and parameters: {'lambda_l1': 6.446613270805078e-06, 'lambda_l2': 0.00017942732358506184}. Best is trial 56 with value: 1996449532.606333.\u001b[0m\n", - "regularization_factors, val_score: 1996449418.529521: 100%|##########| 20/20 [01:03<00:00, 2.55s/it]\u001b[32m[I 2021-05-01 17:00:29,173]\u001b[0m Trial 62 finished with value: 1996449418.5295208 and parameters: {'lambda_l1': 8.868453484243689e-06, 'lambda_l2': 0.000252862182277996}. Best is trial 62 with value: 1996449418.5295208.\u001b[0m\n", - "regularization_factors, val_score: 1996449418.529521: 100%|##########| 20/20 [01:03<00:00, 3.18s/it]\n", - "min_data_in_leaf, val_score: 1996449418.529521: 20%|## | 1/5 [00:02<00:08, 2.15s/it]\u001b[32m[I 2021-05-01 17:00:31,325]\u001b[0m Trial 63 finished with value: 2035984658.8333156 and parameters: {'min_child_samples': 5}. Best is trial 63 with value: 2035984658.8333156.\u001b[0m\n", - "min_data_in_leaf, val_score: 1996449418.529521: 40%|#### | 2/5 [00:04<00:07, 2.34s/it]\u001b[32m[I 2021-05-01 17:00:34,112]\u001b[0m Trial 64 finished with value: 2047790552.496713 and parameters: {'min_child_samples': 50}. Best is trial 63 with value: 2035984658.8333156.\u001b[0m\n", - "min_data_in_leaf, val_score: 1996449418.529521: 60%|###### | 3/5 [00:07<00:04, 2.27s/it]\u001b[32m[I 2021-05-01 17:00:36,236]\u001b[0m Trial 65 finished with value: 2022941263.9641247 and parameters: {'min_child_samples': 10}. Best is trial 65 with value: 2022941263.9641247.\u001b[0m\n", - "min_data_in_leaf, val_score: 1996449418.529521: 80%|######## | 4/5 [00:09<00:02, 2.31s/it]\u001b[32m[I 2021-05-01 17:00:38,623]\u001b[0m Trial 66 finished with value: 2034136324.3695369 and parameters: {'min_child_samples': 25}. Best is trial 65 with value: 2022941263.9641247.\u001b[0m\n", - "min_data_in_leaf, val_score: 1996449418.529521: 100%|##########| 5/5 [00:12<00:00, 2.60s/it]\u001b[32m[I 2021-05-01 17:00:41,911]\u001b[0m Trial 67 finished with value: 2038947709.9319875 and parameters: {'min_child_samples': 100}. Best is trial 65 with value: 2022941263.9641247.\u001b[0m\n", - "min_data_in_leaf, val_score: 1996449418.529521: 100%|##########| 5/5 [00:12<00:00, 2.55s/it]CPU times: user 4min 10s, sys: 14.6 s, total: 4min 25s\n", - "Wall time: 4min 30s\n", "\n" ] } @@ -650,10 +716,10 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "Optuna LightGBM Tuner r2 = 0.8390948396448961\n" + "Optuna LightGBM Tuner r2 = 0.8454106958774709\n" ] } ], @@ -680,7 +746,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -728,92 +794,97 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 21, "metadata": { "tags": [] }, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ - "[flaml.automl: 07-06 11:11:09] {908} INFO - Evaluation method: cv\n", - "[flaml.automl: 07-06 11:11:09] {617} INFO - Using RepeatedKFold\n", - "[flaml.automl: 07-06 11:11:09] {929} INFO - Minimizing error metric: 1-r2\n", - "[flaml.automl: 07-06 11:11:09] {948} INFO - List of ML learners in AutoML Run: ['my_lgbm']\n", - "[flaml.automl: 07-06 11:11:09] {1012} INFO - iteration 0, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:09] {1160} INFO - at 0.2s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", - "[flaml.automl: 07-06 11:11:09] {1012} INFO - iteration 1, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:09] {1160} INFO - at 0.4s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", - "[flaml.automl: 07-06 11:11:09] {1012} INFO - iteration 2, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:09] {1160} INFO - at 0.6s,\tbest my_lgbm's error=1.7536,\tbest my_lgbm's error=1.7536\n", - "[flaml.automl: 07-06 11:11:09] {1012} INFO - iteration 3, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:10] {1160} INFO - at 0.8s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", - "[flaml.automl: 07-06 11:11:10] {1012} INFO - iteration 4, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:10] {1160} INFO - at 1.0s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", - "[flaml.automl: 07-06 11:11:10] {1012} INFO - iteration 5, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:10] {1160} INFO - at 1.2s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", - "[flaml.automl: 07-06 11:11:10] {1012} INFO - iteration 6, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:10] {1160} INFO - at 1.5s,\tbest my_lgbm's error=0.3159,\tbest my_lgbm's error=0.3159\n", - "[flaml.automl: 07-06 11:11:10] {1012} INFO - iteration 7, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:11] {1160} INFO - at 1.8s,\tbest my_lgbm's error=0.2717,\tbest my_lgbm's error=0.2717\n", - "[flaml.automl: 07-06 11:11:11] {1012} INFO - iteration 8, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:11] {1160} INFO - at 2.1s,\tbest my_lgbm's error=0.2717,\tbest my_lgbm's error=0.2717\n", - "[flaml.automl: 07-06 11:11:11] {1012} INFO - iteration 9, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:11] {1160} INFO - at 2.5s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", - "[flaml.automl: 07-06 11:11:11] {1012} INFO - iteration 10, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:12] {1160} INFO - at 2.8s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", - "[flaml.automl: 07-06 11:11:12] {1012} INFO - iteration 11, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:12] {1160} INFO - at 3.0s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", - "[flaml.automl: 07-06 11:11:12] {1012} INFO - iteration 12, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:12] {1160} INFO - at 3.6s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 07-06 11:11:12] {1012} INFO - iteration 13, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:13] {1160} INFO - at 4.0s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 07-06 11:11:13] {1012} INFO - iteration 14, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:14] {1160} INFO - at 4.7s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 07-06 11:11:14] {1012} INFO - iteration 15, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:14] {1160} INFO - at 5.1s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 07-06 11:11:14] {1012} INFO - iteration 16, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:15] {1160} INFO - at 5.9s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 07-06 11:11:15] {1012} INFO - iteration 17, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:15] {1160} INFO - at 6.2s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 07-06 11:11:15] {1012} INFO - iteration 18, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:17] {1160} INFO - at 7.7s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 07-06 11:11:17] {1012} INFO - iteration 19, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:17] {1160} INFO - at 8.0s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 07-06 11:11:17] {1012} INFO - iteration 20, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:20] {1160} INFO - at 10.9s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 07-06 11:11:20] {1012} INFO - iteration 21, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:20] {1160} INFO - at 11.4s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 07-06 11:11:20] {1012} INFO - iteration 22, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:21] {1160} INFO - at 12.6s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 07-06 11:11:21] {1012} INFO - iteration 23, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:22] {1160} INFO - at 13.1s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 07-06 11:11:22] {1012} INFO - iteration 24, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:23] {1160} INFO - at 14.6s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 07-06 11:11:23] {1012} INFO - iteration 25, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:24] {1160} INFO - at 14.9s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 07-06 11:11:24] {1012} INFO - iteration 26, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:30] {1160} INFO - at 21.2s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 07-06 11:11:30] {1012} INFO - iteration 27, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:33] {1160} INFO - at 24.5s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 07-06 11:11:33] {1012} INFO - iteration 28, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:41] {1160} INFO - at 32.3s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 07-06 11:11:41] {1012} INFO - iteration 29, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:11:42] {1160} INFO - at 33.5s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 07-06 11:11:42] {1012} INFO - iteration 30, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:12:15] {1160} INFO - at 66.4s,\tbest my_lgbm's error=0.1634,\tbest my_lgbm's error=0.1634\n", - "[flaml.automl: 07-06 11:12:15] {1012} INFO - iteration 31, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:12:20] {1160} INFO - at 71.1s,\tbest my_lgbm's error=0.1634,\tbest my_lgbm's error=0.1634\n", - "[flaml.automl: 07-06 11:12:20] {1012} INFO - iteration 32, current learner my_lgbm\n", - "[flaml.automl: 07-06 11:12:48] {1160} INFO - at 99.2s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 07-06 11:12:48] {1206} INFO - selected model: LGBMRegressor(colsample_bytree=0.7688482528052621,\n", - " learning_rate=0.1733433791081217, max_bin=512,\n", - " min_child_samples=98, n_estimators=1516, num_leaves=26,\n", - " objective=,\n", - " reg_alpha=0.026256980419803243, reg_lambda=1.5567350977212402,\n", - " subsample=0.94353743444674)\n", - "[flaml.automl: 07-06 11:12:48] {963} INFO - fit succeeded\n" + "[flaml.automl: 07-24 13:54:42] {912} INFO - Evaluation method: cv\n", + "[flaml.automl: 07-24 13:54:42] {616} INFO - Using RepeatedKFold\n", + "[flaml.automl: 07-24 13:54:42] {933} INFO - Minimizing error metric: 1-r2\n", + "[flaml.automl: 07-24 13:54:42] {952} INFO - List of ML learners in AutoML Run: ['my_lgbm']\n", + "[flaml.automl: 07-24 13:54:42] {1018} INFO - iteration 0, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:42] {1173} INFO - at 0.3s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", + "[flaml.automl: 07-24 13:54:42] {1018} INFO - iteration 1, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:42] {1173} INFO - at 0.4s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", + "[flaml.automl: 07-24 13:54:42] {1018} INFO - iteration 2, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:42] {1173} INFO - at 0.6s,\tbest my_lgbm's error=1.7536,\tbest my_lgbm's error=1.7536\n", + "[flaml.automl: 07-24 13:54:42] {1018} INFO - iteration 3, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:42] {1173} INFO - at 0.8s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", + "[flaml.automl: 07-24 13:54:42] {1018} INFO - iteration 4, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:43] {1173} INFO - at 1.0s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", + "[flaml.automl: 07-24 13:54:43] {1018} INFO - iteration 5, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:43] {1173} INFO - at 1.1s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", + "[flaml.automl: 07-24 13:54:43] {1018} INFO - iteration 6, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:43] {1173} INFO - at 1.5s,\tbest my_lgbm's error=0.3159,\tbest my_lgbm's error=0.3159\n", + "[flaml.automl: 07-24 13:54:43] {1018} INFO - iteration 7, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:43] {1173} INFO - at 1.9s,\tbest my_lgbm's error=0.2717,\tbest my_lgbm's error=0.2717\n", + "[flaml.automl: 07-24 13:54:43] {1018} INFO - iteration 8, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:44] {1173} INFO - at 2.1s,\tbest my_lgbm's error=0.2717,\tbest my_lgbm's error=0.2717\n", + "[flaml.automl: 07-24 13:54:44] {1018} INFO - iteration 9, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:44] {1173} INFO - at 2.5s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", + "[flaml.automl: 07-24 13:54:44] {1018} INFO - iteration 10, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:44] {1173} INFO - at 2.7s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", + "[flaml.automl: 07-24 13:54:44] {1018} INFO - iteration 11, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:45] {1173} INFO - at 2.9s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", + "[flaml.automl: 07-24 13:54:45] {1018} INFO - iteration 12, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:45] {1173} INFO - at 3.4s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", + "[flaml.automl: 07-24 13:54:45] {1018} INFO - iteration 13, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:45] {1173} INFO - at 3.7s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", + "[flaml.automl: 07-24 13:54:45] {1018} INFO - iteration 14, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:46] {1173} INFO - at 4.3s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", + "[flaml.automl: 07-24 13:54:46] {1018} INFO - iteration 15, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:46] {1173} INFO - at 4.6s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", + "[flaml.automl: 07-24 13:54:46] {1018} INFO - iteration 16, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:47] {1173} INFO - at 5.2s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", + "[flaml.automl: 07-24 13:54:47] {1018} INFO - iteration 17, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:47] {1173} INFO - at 5.5s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", + "[flaml.automl: 07-24 13:54:47] {1018} INFO - iteration 18, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:48] {1173} INFO - at 6.7s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", + "[flaml.automl: 07-24 13:54:48] {1018} INFO - iteration 19, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:49] {1173} INFO - at 7.0s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", + "[flaml.automl: 07-24 13:54:49] {1018} INFO - iteration 20, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:51] {1173} INFO - at 9.7s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", + "[flaml.automl: 07-24 13:54:51] {1018} INFO - iteration 21, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:52] {1173} INFO - at 10.1s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", + "[flaml.automl: 07-24 13:54:52] {1018} INFO - iteration 22, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:53] {1173} INFO - at 11.0s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", + "[flaml.automl: 07-24 13:54:53] {1018} INFO - iteration 23, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:53] {1173} INFO - at 11.4s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", + "[flaml.automl: 07-24 13:54:53] {1018} INFO - iteration 24, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:54] {1173} INFO - at 12.5s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", + "[flaml.automl: 07-24 13:54:54] {1018} INFO - iteration 25, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:54:54] {1173} INFO - at 12.8s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", + "[flaml.automl: 07-24 13:54:54] {1018} INFO - iteration 26, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:55:00] {1173} INFO - at 18.5s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", + "[flaml.automl: 07-24 13:55:00] {1018} INFO - iteration 27, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:55:03] {1173} INFO - at 21.5s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", + "[flaml.automl: 07-24 13:55:03] {1018} INFO - iteration 28, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:55:10] {1173} INFO - at 28.2s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", + "[flaml.automl: 07-24 13:55:10] {1018} INFO - iteration 29, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:55:11] {1173} INFO - at 29.1s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", + "[flaml.automl: 07-24 13:55:11] {1018} INFO - iteration 30, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:55:37] {1173} INFO - at 55.3s,\tbest my_lgbm's error=0.1634,\tbest my_lgbm's error=0.1634\n", + "[flaml.automl: 07-24 13:55:37] {1018} INFO - iteration 31, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:55:50] {1173} INFO - at 68.6s,\tbest my_lgbm's error=0.1624,\tbest my_lgbm's error=0.1624\n", + "[flaml.automl: 07-24 13:55:50] {1018} INFO - iteration 32, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:56:13] {1173} INFO - at 90.9s,\tbest my_lgbm's error=0.1624,\tbest my_lgbm's error=0.1624\n", + "[flaml.automl: 07-24 13:56:13] {1018} INFO - iteration 33, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:56:31] {1173} INFO - at 109.9s,\tbest my_lgbm's error=0.1624,\tbest my_lgbm's error=0.1624\n", + "[flaml.automl: 07-24 13:56:31] {1018} INFO - iteration 34, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:56:41] {1173} INFO - at 119.3s,\tbest my_lgbm's error=0.1624,\tbest my_lgbm's error=0.1624\n", + "[flaml.automl: 07-24 13:56:41] {1018} INFO - iteration 35, current learner my_lgbm\n", + "[flaml.automl: 07-24 13:57:18] {1173} INFO - at 156.1s,\tbest my_lgbm's error=0.1624,\tbest my_lgbm's error=0.1624\n", + "[flaml.automl: 07-24 13:57:18] {1219} INFO - selected model: LGBMRegressor(colsample_bytree=0.7929174747127123,\n", + " learning_rate=0.10575205975801834, max_bin=128,\n", + " min_child_samples=128, n_estimators=754, num_leaves=710,\n", + " objective=,\n", + " reg_alpha=0.0009765625, reg_lambda=10.762106709995438)\n", + "[flaml.automl: 07-24 13:57:18] {969} INFO - fit succeeded\n" ] } ], @@ -821,7 +892,7 @@ "automl = AutoML()\n", "automl.add_learner(learner_name='my_lgbm', learner_class=MyLGBM)\n", "settings = {\n", - " \"time_budget\": 120, # total running time in seconds\n", + " \"time_budget\": 150, # total running time in seconds\n", " \"metric\": 'r2', # primary metrics for regression can be chosen from: ['mae','mse','r2']\n", " \"estimator_list\": ['my_lgbm',], # list of ML learners; we tune lightgbm in this example\n", " \"task\": 'regression', # task type \n", @@ -832,24 +903,35 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 22, "metadata": { "tags": [] }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "Best hyperparmeter config: {'n_estimators': 1516.0, 'num_leaves': 26.0, 'min_child_samples': 98.0, 'learning_rate': 0.1733433791081217, 'subsample': 0.94353743444674, 'log_max_bin': 10.0, 'colsample_bytree': 0.7688482528052621, 'reg_alpha': 0.026256980419803243, 'reg_lambda': 1.5567350977212402}\n", - "Best r2 on validation data: 0.8389\n", - "Training duration of best run: 28.05 s\n", - "Predicted labels [139034.72098791 254152.8814857 154250.52168576 ... 203253.92751711\n", - " 231764.04026937 282262.87207539]\n", - "True labels [136900. 241300. 200700. ... 160900. 227300. 265600.]\n", - "r2 = 0.8468233449743118\n", - "mse = 2024766616.448606\n", - "mae = 30028.40209024503\n" + "Best hyperparmeter config: {'n_estimators': 754, 'num_leaves': 710, 'min_child_samples': 128, 'learning_rate': 0.10575205975801834, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 0.7929174747127123, 'reg_alpha': 0.0009765625, 'reg_lambda': 10.762106709995438}\n", + "Best r2 on validation data: 0.8376\n", + "Training duration of best run: 13.28 s\n", + "Predicted labels [135768.25690639 246689.39399877 136637.13857269 ... 175212.47378055\n", + " 243756.5990978 271017.12074672]\n", + "True labels 14740 136900.0\n", + "10101 241300.0\n", + "20566 200700.0\n", + "2670 72500.0\n", + "15709 460000.0\n", + " ... \n", + "13132 121200.0\n", + "8228 137500.0\n", + "3948 160900.0\n", + "8522 227300.0\n", + "16798 265600.0\n", + "Name: median_house_value, Length: 5160, dtype: float64\n", + "r2 = 0.8459538207127344\n", + "mse = 2036260428.588182\n", + "mae = 30277.65301151835\n" ] } ], @@ -870,9 +952,12 @@ } ], "metadata": { + "interpreter": { + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" + }, "kernelspec": { - "name": "python3", - "display_name": "Python 3.8.10 64-bit ('py38': conda)" + "display_name": "Python 3.8.0 64-bit ('blend': conda)", + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -884,10 +969,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" - }, - "interpreter": { - "hash": "4502d015faca2560a557f35a41b6dd402f7fdfc08e843ae17a9c41947939f10c" + "version": "3.8.0" } }, "nbformat": 4, diff --git a/setup.py b/setup.py index 75ad32fc9..67a87345a 100644 --- a/setup.py +++ b/setup.py @@ -48,7 +48,7 @@ setuptools.setup( "coverage>=5.3", "xgboost<1.3", "rgf-python", - "optuna==2.3.0", + "optuna==2.8.0", "vowpalwabbit", "openml", "transformers==4.4.1", @@ -58,10 +58,10 @@ setuptools.setup( "azure-storage-blob", ], "blendsearch": [ - "optuna==2.3.0" + "optuna==2.8.0" ], "ray": [ - "ray[tune]==1.2.0", + "ray[tune]==1.4.1", "pyyaml<5.3.1", ], "azureml": [ @@ -74,7 +74,7 @@ setuptools.setup( "vowpalwabbit", ], "nlp": [ - "ray[tune]>=1.2.0", + "ray[tune]>=1.4.1", "transformers", "datasets==1.4.1", "tensorboardX<=2.2",