diff --git a/lightgbmlss/logger.py b/lightgbmlss/logger.py new file mode 100644 index 0000000..aa1ff04 --- /dev/null +++ b/lightgbmlss/logger.py @@ -0,0 +1,17 @@ +import logging + + +class CustomLogger: + def __init__(self): + self.logger = logging.getLogger('lightgbm_custom') + self.logger.setLevel(logging.ERROR) + + def info(self, message): + self.logger.info(message) + + def warning(self, message): + # Suppress warnings by not doing anything + pass + + def error(self, message): + self.logger.error(message) diff --git a/lightgbmlss/model.py b/lightgbmlss/model.py index 60f9aaf..9882cef 100644 --- a/lightgbmlss/model.py +++ b/lightgbmlss/model.py @@ -9,6 +9,9 @@ import lightgbm as lgb + +from lightgbmlss.distributions.distribution_utils import DistributionClass +from lightgbmlss.logger import CustomLogger from lightgbmlss.utils import * import optuna from optuna.samplers import TPESampler @@ -47,6 +50,7 @@ Tuple[Dataset, Dataset, Dict[str, Any]] ] +lgb.register_logger(CustomLogger()) class LightGBMLSS: """ @@ -59,7 +63,7 @@ class LightGBMLSS: start_values : np.ndarray Starting values for each distributional parameter. """ - def __init__(self, dist): + def __init__(self, dist: DistributionClass): self.dist = dist # Distribution object self.start_values = None # Starting values for distributional parameters @@ -79,9 +83,9 @@ def set_params(self, params: Dict[str, Any]) -> Dict[str, Any]: """ params_adj = {"num_class": self.dist.n_dist_param, "metric": "None", - "objective": "None", "random_seed": 123, - "verbose": -1} + "verbose": -1, + "objective": self.dist.objective_fn} params.update(params_adj) return params @@ -171,7 +175,6 @@ def train(self, self.booster = lgb.train(params, train_set, num_boost_round=num_boost_round, - fobj=self.dist.objective_fn, feval=self.dist.metric_fn, valid_sets=valid_sets, valid_names=valid_names, @@ -265,7 +268,6 @@ def cv(self, self.bstLSS_cv = lgb.cv(params, train_set, - fobj=self.dist.objective_fn, feval=self.dist.metric_fn, num_boost_round=num_boost_round, folds=folds, @@ -389,13 +391,13 @@ def objective(trial): callbacks=[pruning_callback, early_stopping_callback], seed=seed, ) - + print(lgblss_param_tuning) # Extract the optimal number of boosting rounds - opt_rounds = np.argmin(np.array(lgblss_param_tuning[f"{self.dist.loss_fn}-mean"])) + 1 + opt_rounds = np.argmin(np.array(lgblss_param_tuning[f"valid {self.dist.loss_fn}-mean"])) + 1 trial.set_user_attr("opt_round", int(opt_rounds)) # Extract the best score - best_score = np.min(np.array(lgblss_param_tuning[f"{self.dist.loss_fn}-mean"])) + best_score = np.min(np.array(lgblss_param_tuning[f"valid {self.dist.loss_fn}-mean"])) return best_score diff --git a/setup.py b/setup.py index 4c30eac..e8368cf 100644 --- a/setup.py +++ b/setup.py @@ -16,20 +16,20 @@ zip_safe=True, python_requires=">=3.9", install_requires=[ - "lightgbm~=3.3.5", - "torch~=2.0.1", - "pyro-ppl~=1.8.5", - "optuna~=3.2.0", + "lightgbm~=4.1.0", + "torch~=2.1.1", + "pyro-ppl~=1.8.6", + "optuna~=3.4.0", "properscoring~=0.1", - "scikit-learn~=1.2.2", - "numpy~=1.24.3", - "pandas~=2.0.3", - "plotnine~=0.12.1", - "scipy~=1.11.1", - "seaborn~=0.12.2", - "tqdm~=4.65.0", - "matplotlib~=3.7.2", - "ipython~=8.14.0", + "scikit-learn~=1.3.2", + "numpy~=1.26.2", + "pandas~=2.1.3", + "plotnine~=0.12.4", + "scipy~=1.11.4", + "seaborn~=0.13.0", + "tqdm~=4.66.1", + "matplotlib~=3.8.2", + "ipython~=8.18.1", ], extras_require={ "docs": ["mkdocs", "mkdocstrings[python]", "mkdocs-jupyter"]