@@ -7,7 +7,7 @@ makeRLearner.regr.xgboost = function() {
7
7
# we pass all of what goes in 'params' directly to ... of xgboost
8
8
# makeUntypedLearnerParam(id = "params", default = list()),
9
9
makeDiscreteLearnerParam(id = " booster" , default = " gbtree" , values = c(" gbtree" , " gblinear" , " dart" )),
10
- makeIntegerLearnerParam (id = " silent " , default = 0L , tunable = FALSE ),
10
+ makeUntypedLearnerParam (id = " watchlist " , default = NULL , tunable = FALSE ),
11
11
makeNumericLearnerParam(id = " eta" , default = 0.3 , lower = 0 , upper = 1 ),
12
12
makeNumericLearnerParam(id = " gamma" , default = 0 , lower = 0 ),
13
13
makeIntegerLearnerParam(id = " max_depth" , default = 6L , lower = 1L ),
@@ -16,16 +16,17 @@ makeRLearner.regr.xgboost = function() {
16
16
makeNumericLearnerParam(id = " colsample_bytree" , default = 1 , lower = 0 , upper = 1 ),
17
17
makeNumericLearnerParam(id = " colsample_bylevel" , default = 1 , lower = 0 , upper = 1 ),
18
18
makeIntegerLearnerParam(id = " num_parallel_tree" , default = 1L , lower = 1L ),
19
- makeNumericLearnerParam(id = " lambda" , default = 0 , lower = 0 ),
19
+ makeNumericLearnerParam(id = " lambda" , default = 1 , lower = 0 ),
20
20
makeNumericLearnerParam(id = " lambda_bias" , default = 0 , lower = 0 ),
21
21
makeNumericLearnerParam(id = " alpha" , default = 0 , lower = 0 ),
22
22
makeUntypedLearnerParam(id = " objective" , default = " reg:linear" , tunable = FALSE ),
23
23
makeUntypedLearnerParam(id = " eval_metric" , default = " rmse" , tunable = FALSE ),
24
24
makeNumericLearnerParam(id = " base_score" , default = 0.5 , tunable = FALSE ),
25
-
25
+ makeNumericLearnerParam( id = " max_delta_step " , lower = 0 , default = 0 ),
26
26
makeNumericLearnerParam(id = " missing" , default = NULL , tunable = FALSE , when = " both" ,
27
27
special.vals = list (NA , NA_real_ , NULL )),
28
28
makeIntegerVectorLearnerParam(id = " monotone_constraints" , default = 0 , lower = - 1 , upper = 1 ),
29
+ makeNumericLearnerParam(id = " tweedie_variance_power" , lower = 1 , upper = 2 , default = 1.5 , requires = quote(objective == " reg:tweedie" )),
29
30
makeIntegerLearnerParam(id = " nthread" , lower = 1L , tunable = FALSE ),
30
31
makeIntegerLearnerParam(id = " nrounds" , default = 1L , lower = 1L ),
31
32
# FIXME nrounds seems to have no default in xgboost(), if it has 1, par.vals is redundant
@@ -35,9 +36,17 @@ makeRLearner.regr.xgboost = function() {
35
36
requires = quote(verbose == 1L )),
36
37
makeIntegerLearnerParam(id = " early_stopping_rounds" , default = NULL , lower = 1L , special.vals = list (NULL ), tunable = FALSE ),
37
38
makeLogicalLearnerParam(id = " maximize" , default = NULL , special.vals = list (NULL ), tunable = FALSE ),
39
+ makeDiscreteLearnerParam(id = " sample_type" , default = " uniform" , values = c(" uniform" , " weighted" ), requires = quote(booster == " dart" )),
38
40
makeDiscreteLearnerParam(id = " normalize_type" , default = " tree" , values = c(" tree" , " forest" ), requires = quote(booster == " dart" )),
39
41
makeNumericLearnerParam(id = " rate_drop" , default = 0 , lower = 0 , upper = 1 , requires = quote(booster == " dart" )),
40
- makeNumericLearnerParam(id = " skip_drop" , default = 0 , lower = 0 , upper = 1 , requires = quote(booster == " dart" ))
42
+ makeNumericLearnerParam(id = " skip_drop" , default = 0 , lower = 0 , upper = 1 , requires = quote(booster == " dart" )),
43
+ # TODO: uncomment the following after the next CRAN update, and set max_depth's lower = 0L
44
+ # makeLogicalLearnerParam(id = "one_drop", default = FALSE, requires = quote(booster == "dart")),
45
+ # makeDiscreteLearnerParam(id = "tree_method", default = "exact", values = c("exact", "hist"), requires = quote(booster != "gblinear")),
46
+ # makeDiscreteLearnerParam(id = "grow_policy", default = "depthwise", values = c("depthwise", "lossguide"), requires = quote(tree_method == "hist")),
47
+ # makeIntegerLearnerParam(id = "max_leaves", default = 0L, lower = 0L, requires = quote(grow_policy == "lossguide")),
48
+ # makeIntegerLearnerParam(id = "max_bin", default = 256L, lower = 2L, requires = quote(tree_method == "hist")),
49
+ makeUntypedLearnerParam(id = " callbacks" , default = list (), tunable = FALSE )
41
50
),
42
51
par.vals = list (nrounds = 1L , verbose = 0L ),
43
52
properties = c(" numerics" , " weights" , " featimp" , " missings" ),
@@ -52,16 +61,19 @@ makeRLearner.regr.xgboost = function() {
52
61
trainLearner.regr.xgboost = function (.learner , .task , .subset , .weights = NULL , ... ) {
53
62
parlist = list (... )
54
63
55
- parlist $ label = getTaskData(.task , .subset , target.extra = TRUE )$ target
56
- parlist $ data = data.matrix(getTaskData(.task , .subset , target.extra = TRUE )$ data )
57
-
58
64
if (is.null(parlist $ objective ))
59
65
parlist $ objective = " reg:linear"
60
66
67
+ task.data = getTaskData(.task , .subset , target.extra = TRUE )
68
+ parlist $ data = xgboost :: xgb.DMatrix(data = data.matrix(task.data $ data ), label = task.data $ target )
69
+
61
70
if (! is.null(.weights ))
62
- parlist $ data = xgboost :: xgb.DMatrix(data = parlist $ data , label = parlist $ label , weight = .weights )
71
+ xgboost :: setinfo(parlist $ data , " weight" , .weights )
72
+
73
+ if (is.null(parlist $ watchlist ))
74
+ parlist $ watchlist = list (train = parlist $ data )
63
75
64
- do.call(xgboost :: xgboost , parlist )
76
+ do.call(xgboost :: xgb.train , parlist )
65
77
}
66
78
67
79
# ' @export
0 commit comments