-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
Copy pathconfig.h
1321 lines (1136 loc) · 69.8 KB
/
config.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*
* \note
* - desc and descl2 fields must be written in reStructuredText format;
* - nested sections can be placed only at the bottom of parent's section;
* - [no-automatically-extract]
* - do not automatically extract this parameter into a Config property with the same name in Config::GetMembersFromString(). Use if:
* - specialized extraction logic for this param exists in Config::GetMembersFromString()
* - [no-save]
* - this param should not be saved into a model text representation via Config::SaveMembersToString(). Use if:
* - param is only used by the CLI (especially the "predict" and "convert_model" tasks)
* - param is related to LightGBM writing files (e.g. "output_model", "save_binary")
*/
#ifndef LIGHTGBM_CONFIG_H_
#define LIGHTGBM_CONFIG_H_
#include <LightGBM/export.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <string>
#include <algorithm>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace LightGBM {
/*! \brief Types of tasks */
enum TaskType {
kTrain, kPredict, kConvertModel, KRefitTree, kSaveBinary
};
const int kDefaultNumLeaves = 31;
struct Config {
public:
Config() {}
explicit Config(std::unordered_map<std::string, std::string> parameters_map) {
Set(parameters_map);
}
std::string ToString() const;
/*!
* \brief Get string value by specific name of key
* \param params Store the key and value for params
* \param name Name of key
* \param out Value will assign to out if key exists
* \return True if key exists
*/
inline static bool GetString(
const std::unordered_map<std::string, std::string>& params,
const std::string& name, std::string* out);
/*!
* \brief Get int value by specific name of key
* \param params Store the key and value for params
* \param name Name of key
* \param out Value will assign to out if key exists
* \return True if key exists
*/
inline static bool GetInt(
const std::unordered_map<std::string, std::string>& params,
const std::string& name, int* out);
/*!
* \brief Get double value by specific name of key
* \param params Store the key and value for params
* \param name Name of key
* \param out Value will assign to out if key exists
* \return True if key exists
*/
inline static bool GetDouble(
const std::unordered_map<std::string, std::string>& params,
const std::string& name, double* out);
/*!
* \brief Get bool value by specific name of key
* \param params Store the key and value for params
* \param name Name of key
* \param out Value will assign to out if key exists
* \return True if key exists
*/
inline static bool GetBool(
const std::unordered_map<std::string, std::string>& params,
const std::string& name, bool* out);
/*!
* \brief Sort aliases by length and then alphabetically
* \param x Alias 1
* \param y Alias 2
* \return true if x has higher priority than y
*/
inline static bool SortAlias(const std::string& x, const std::string& y);
static void KeepFirstValues(const std::unordered_map<std::string, std::vector<std::string>>& params, std::unordered_map<std::string, std::string>* out);
static void KV2Map(std::unordered_map<std::string, std::vector<std::string>>* params, const char* kv);
static void SetVerbosity(const std::unordered_map<std::string, std::vector<std::string>>& params);
static std::unordered_map<std::string, std::string> Str2Map(const char* parameters);
#ifndef __NVCC__
#pragma region Parameters
#pragma region Core Parameters
#endif // __NVCC__
// [no-automatically-extract]
// [no-save]
// alias = config_file
// desc = path of config file
// desc = **Note**: can be used only in CLI version
std::string config = "";
// [no-automatically-extract]
// [no-save]
// type = enum
// default = train
// options = train, predict, convert_model, refit
// alias = task_type
// desc = ``train``, for training, aliases: ``training``
// desc = ``predict``, for prediction, aliases: ``prediction``, ``test``
// desc = ``convert_model``, for converting model file into if-else format, see more information in `Convert Parameters <#convert-parameters>`__
// desc = ``refit``, for refitting existing models with new data, aliases: ``refit_tree``
// desc = ``save_binary``, load train (and validation) data then save dataset to binary file. Typical usage: ``save_binary`` first, then run multiple ``train`` tasks in parallel using the saved binary file
// desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent functions
TaskType task = TaskType::kTrain;
// [no-automatically-extract]
// [no-save]
// type = enum
// options = regression, regression_l1, huber, fair, poisson, quantile, mape, gamma, tweedie, binary, multiclass, multiclassova, cross_entropy, cross_entropy_lambda, lambdarank, rank_xendcg
// alias = objective_type, app, application, loss
// desc = regression application
// descl2 = ``regression``, L2 loss, aliases: ``regression_l2``, ``l2``, ``mean_squared_error``, ``mse``, ``l2_root``, ``root_mean_squared_error``, ``rmse``
// descl2 = ``regression_l1``, L1 loss, aliases: ``l1``, ``mean_absolute_error``, ``mae``
// descl2 = ``huber``, `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__
// descl2 = ``fair``, `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
// descl2 = ``poisson``, `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__
// descl2 = ``quantile``, `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
// descl2 = ``mape``, `MAPE loss <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`__, aliases: ``mean_absolute_percentage_error``
// descl2 = ``gamma``, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Occurrence_and_applications>`__
// descl2 = ``tweedie``, Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any target that might be `tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Occurrence_and_applications>`__
// desc = binary classification application
// descl2 = ``binary``, binary `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__ classification (or logistic regression)
// descl2 = requires labels in {0, 1}; see ``cross-entropy`` application for general probability labels in [0, 1]
// desc = multi-class classification application
// descl2 = ``multiclass``, `softmax <https://en.wikipedia.org/wiki/Softmax_function>`__ objective function, aliases: ``softmax``
// descl2 = ``multiclassova``, `One-vs-All <https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest>`__ binary objective function, aliases: ``multiclass_ova``, ``ova``, ``ovr``
// descl2 = ``num_class`` should be set as well
// desc = cross-entropy application
// descl2 = ``cross_entropy``, objective function for cross-entropy (with optional linear weights), aliases: ``xentropy``
// descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
// descl2 = label is anything in interval [0, 1]
// desc = ranking application
// descl2 = ``lambdarank``, `lambdarank <https://proceedings.neurips.cc/paper_files/paper/2006/file/af44c4c56f385c43f2529f9b1b018f6a-Paper.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
// descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
// descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
// descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
// desc = custom objective function (gradients and hessians not computed directly by LightGBM)
// descl2 = ``custom``
// descl2 = must be passed through parameters explicitly in the C API
// descl2 = **Note**: cannot be used in CLI version
std::string objective = "regression";
// [no-automatically-extract]
// [no-save]
// type = enum
// alias = boosting_type, boost
// options = gbdt, rf, dart
// desc = ``gbdt``, traditional Gradient Boosting Decision Tree, aliases: ``gbrt``
// desc = ``rf``, Random Forest, aliases: ``random_forest``
// desc = ``dart``, `Dropouts meet Multiple Additive Regression Trees <https://arxiv.org/abs/1505.01866>`__
// descl2 = **Note**: internally, LightGBM uses ``gbdt`` mode for the first ``1 / learning_rate`` iterations
std::string boosting = "gbdt";
// [no-automatically-extract]
// type = enum
// options = bagging, goss
// desc = ``bagging``, Randomly Bagging Sampling
// descl2 = **Note**: ``bagging`` is only effective when ``bagging_freq > 0`` and ``bagging_fraction < 1.0``
// desc = ``goss``, Gradient-based One-Side Sampling
// desc = *New in version 4.0.0*
std::string data_sample_strategy = "bagging";
// alias = train, train_data, train_data_file, data_filename
// desc = path of training data, LightGBM will train from this data
// desc = **Note**: can be used only in CLI version
std::string data = "";
// alias = test, valid_data, valid_data_file, test_data, test_data_file, valid_filenames
// default = ""
// desc = path(s) of validation/test data, LightGBM will output metrics for these data
// desc = support multiple validation data, separated by ``,``
// desc = **Note**: can be used only in CLI version
std::vector<std::string> valid;
// alias = num_iteration, n_iter, num_tree, num_trees, num_round, num_rounds, nrounds, num_boost_round, n_estimators, max_iter
// check = >=0
// desc = number of boosting iterations
// desc = **Note**: internally, LightGBM constructs ``num_class * num_iterations`` trees for multi-class classification problems
int num_iterations = 100;
// alias = shrinkage_rate, eta
// check = >0.0
// desc = shrinkage rate
// desc = in ``dart``, it also affects on normalization weights of dropped trees
double learning_rate = 0.1;
// default = 31
// alias = num_leaf, max_leaves, max_leaf, max_leaf_nodes
// check = >1
// check = <=131072
// desc = max number of leaves in one tree
int num_leaves = kDefaultNumLeaves;
// [no-automatically-extract]
// [no-save]
// type = enum
// options = serial, feature, data, voting
// alias = tree, tree_type, tree_learner_type
// desc = ``serial``, single machine tree learner
// desc = ``feature``, feature parallel tree learner, aliases: ``feature_parallel``
// desc = ``data``, data parallel tree learner, aliases: ``data_parallel``
// desc = ``voting``, voting parallel tree learner, aliases: ``voting_parallel``
// desc = refer to `Distributed Learning Guide <./Parallel-Learning-Guide.rst>`__ to get more details
std::string tree_learner = "serial";
// alias = num_thread, nthread, nthreads, n_jobs
// desc = used only in ``train``, ``prediction`` and ``refit`` tasks or in correspondent functions of language-specific packages
// desc = number of threads for LightGBM
// desc = ``0`` means default number of threads in OpenMP
// desc = for the best speed, set this to the number of **real CPU cores**, not the number of threads (most CPUs use `hyper-threading <https://en.wikipedia.org/wiki/Hyper-threading>`__ to generate 2 threads per CPU core)
// desc = do not set it too large if your dataset is small (for instance, do not use 64 threads for a dataset with 10,000 rows)
// desc = be aware a task manager or any similar CPU monitoring tool might report that cores not being fully utilized. **This is normal**
// desc = for distributed learning, do not use all CPU cores because this will cause poor performance for the network communication
// desc = **Note**: please **don't** change this during training, especially when running multiple jobs simultaneously by external packages, otherwise it may cause undesirable errors
int num_threads = 0;
// [no-automatically-extract]
// [no-save]
// type = enum
// options = cpu, gpu, cuda
// alias = device
// desc = device for the tree learning
// desc = ``cpu`` supports all LightGBM functionality and is portable across the widest range of operating systems and hardware
// desc = ``cuda`` offers faster training than ``gpu`` or ``cpu``, but only works on GPUs supporting CUDA
// desc = ``gpu`` can be faster than ``cpu`` and works on a wider range of GPUs than CUDA
// desc = **Note**: it is recommended to use the smaller ``max_bin`` (e.g. 63) to get the better speed up
// desc = **Note**: for the faster speed, GPU uses 32-bit float point to sum up by default, so this may affect the accuracy for some tasks. You can set ``gpu_use_dp=true`` to enable 64-bit float point, but it will slow down the training
// desc = **Note**: refer to `Installation Guide <./Installation-Guide.rst>`__ to build LightGBM with GPU or CUDA support
std::string device_type = "cpu";
// [no-automatically-extract]
// alias = random_seed, random_state
// default = None
// desc = this seed is used to generate other seeds, e.g. ``data_random_seed``, ``feature_fraction_seed``, etc.
// desc = by default, this seed is unused in favor of default values of other seeds
// desc = this seed has lower priority in comparison with other seeds, which means that it will be overridden, if you set other seeds explicitly
int seed = 0;
// desc = used only with ``cpu`` device type
// desc = setting this to ``true`` should ensure the stable results when using the same data and the same parameters (and different ``num_threads``)
// desc = when you use the different seeds, different LightGBM versions, the binaries compiled by different compilers, or in different systems, the results are expected to be different
// desc = you can `raise issues <https://github.com/microsoft/LightGBM/issues>`__ in LightGBM GitHub repo when you meet the unstable results
// desc = **Note**: setting this to ``true`` may slow down the training
// desc = **Note**: to avoid potential instability due to numerical issues, please set ``force_col_wise=true`` or ``force_row_wise=true`` when setting ``deterministic=true``
bool deterministic = false;
#ifndef __NVCC__
#pragma endregion
#pragma region Learning Control Parameters
#endif // __NVCC__
// desc = used only with ``cpu`` device type
// desc = set this to ``true`` to force col-wise histogram building
// desc = enabling this is recommended when:
// descl2 = the number of columns is large, or the total number of bins is large
// descl2 = ``num_threads`` is large, e.g. ``> 20``
// descl2 = you want to reduce memory cost
// desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
// desc = **Note**: this parameter cannot be used at the same time with ``force_row_wise``, choose only one of them
bool force_col_wise = false;
// desc = used only with ``cpu`` device type
// desc = set this to ``true`` to force row-wise histogram building
// desc = enabling this is recommended when:
// descl2 = the number of data points is large, and the total number of bins is relatively small
// descl2 = ``num_threads`` is relatively small, e.g. ``<= 16``
// descl2 = you want to use small ``bagging_fraction`` or ``goss`` sample strategy to speed up
// desc = **Note**: setting this to ``true`` will double the memory cost for Dataset object. If you have not enough memory, you can try setting ``force_col_wise=true``
// desc = **Note**: when both ``force_col_wise`` and ``force_row_wise`` are ``false``, LightGBM will firstly try them both, and then use the faster one. To remove the overhead of testing set the faster one to ``true`` manually
// desc = **Note**: this parameter cannot be used at the same time with ``force_col_wise``, choose only one of them
bool force_row_wise = false;
// alias = hist_pool_size
// desc = max cache size in MB for historical histogram
// desc = ``< 0`` means no limit
double histogram_pool_size = -1.0;
// desc = limit the max depth for tree model. This is used to deal with over-fitting when ``#data`` is small. Tree still grows leaf-wise
// desc = ``<= 0`` means no limit
int max_depth = -1;
// alias = min_data_per_leaf, min_data, min_child_samples, min_samples_leaf
// check = >=0
// desc = minimal number of data in one leaf. Can be used to deal with over-fitting
// desc = **Note**: this is an approximation based on the Hessian, so occasionally you may observe splits which produce leaf nodes that have less than this many observations
int min_data_in_leaf = 20;
// alias = min_sum_hessian_per_leaf, min_sum_hessian, min_hessian, min_child_weight
// check = >=0.0
// desc = minimal sum hessian in one leaf. Like ``min_data_in_leaf``, it can be used to deal with over-fitting
double min_sum_hessian_in_leaf = 1e-3;
// alias = sub_row, subsample, bagging
// check = >0.0
// check = <=1.0
// desc = like ``feature_fraction``, but this will randomly select part of data without resampling
// desc = can be used to speed up training
// desc = can be used to deal with over-fitting
// desc = **Note**: to enable bagging, ``bagging_freq`` should be set to a non zero value as well
double bagging_fraction = 1.0;
// alias = pos_sub_row, pos_subsample, pos_bagging
// check = >0.0
// check = <=1.0
// desc = used only in ``binary`` application
// desc = used for imbalanced binary classification problem, will randomly sample ``#pos_samples * pos_bagging_fraction`` positive samples in bagging
// desc = should be used together with ``neg_bagging_fraction``
// desc = set this to ``1.0`` to disable
// desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``neg_bagging_fraction`` as well
// desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``, balanced bagging is disabled
// desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
double pos_bagging_fraction = 1.0;
// alias = neg_sub_row, neg_subsample, neg_bagging
// check = >0.0
// check = <=1.0
// desc = used only in ``binary`` application
// desc = used for imbalanced binary classification problem, will randomly sample ``#neg_samples * neg_bagging_fraction`` negative samples in bagging
// desc = should be used together with ``pos_bagging_fraction``
// desc = set this to ``1.0`` to disable
// desc = **Note**: to enable this, you need to set ``bagging_freq`` and ``pos_bagging_fraction`` as well
// desc = **Note**: if both ``pos_bagging_fraction`` and ``neg_bagging_fraction`` are set to ``1.0``, balanced bagging is disabled
// desc = **Note**: if balanced bagging is enabled, ``bagging_fraction`` will be ignored
double neg_bagging_fraction = 1.0;
// alias = subsample_freq
// desc = frequency for bagging
// desc = ``0`` means disable bagging; ``k`` means perform bagging at every ``k`` iteration. Every ``k``-th iteration, LightGBM will randomly select ``bagging_fraction * 100%`` of the data to use for the next ``k`` iterations
// desc = **Note**: bagging is only effective when ``0.0 < bagging_fraction < 1.0``
int bagging_freq = 0;
// alias = bagging_fraction_seed
// desc = random seed for bagging
int bagging_seed = 3;
// desc = whether to do bagging sample by query
bool bagging_by_query = false;
// alias = sub_feature, colsample_bytree
// check = >0.0
// check = <=1.0
// desc = LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
// desc = can be used to speed up training
// desc = can be used to deal with over-fitting
double feature_fraction = 1.0;
// alias = sub_feature_bynode, colsample_bynode
// check = >0.0
// check = <=1.0
// desc = LightGBM will randomly select a subset of features on each tree node if ``feature_fraction_bynode`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features at each tree node
// desc = can be used to deal with over-fitting
// desc = **Note**: unlike ``feature_fraction``, this cannot speed up training
// desc = **Note**: if both ``feature_fraction`` and ``feature_fraction_bynode`` are smaller than ``1.0``, the final fraction of each node is ``feature_fraction * feature_fraction_bynode``
double feature_fraction_bynode = 1.0;
// desc = random seed for ``feature_fraction``
int feature_fraction_seed = 2;
// alias = extra_tree
// desc = use extremely randomized trees
// desc = if set to ``true``, when evaluating node splits LightGBM will check only one randomly-chosen threshold for each feature
// desc = can be used to speed up training
// desc = can be used to deal with over-fitting
bool extra_trees = false;
// desc = random seed for selecting thresholds when ``extra_trees`` is true
int extra_seed = 6;
// alias = early_stopping_rounds, early_stopping, n_iter_no_change
// desc = will stop training if one metric of one validation data doesn't improve in last ``early_stopping_round`` rounds
// desc = ``<= 0`` means disable
// desc = can be used to speed up training
int early_stopping_round = 0;
// check = >=0.0
// desc = when early stopping is used (i.e. ``early_stopping_round > 0``), require the early stopping metric to improve by at least this delta to be considered an improvement
// desc = *New in version 4.4.0*
double early_stopping_min_delta = 0.0;
// desc = LightGBM allows you to provide multiple evaluation metrics. Set this to ``true``, if you want to use only the first metric for early stopping
bool first_metric_only = false;
// alias = max_tree_output, max_leaf_output
// desc = used to limit the max output of tree leaves
// desc = ``<= 0`` means no constraint
// desc = the final max output of leaves is ``learning_rate * max_delta_step``
double max_delta_step = 0.0;
// alias = reg_alpha, l1_regularization
// check = >=0.0
// desc = L1 regularization
double lambda_l1 = 0.0;
// alias = reg_lambda, lambda, l2_regularization
// check = >=0.0
// desc = L2 regularization
double lambda_l2 = 0.0;
// check = >=0.0
// desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/pdf/1802.05640.pdf>`__
double linear_lambda = 0.0;
// alias = min_split_gain
// check = >=0.0
// desc = the minimal gain to perform split
// desc = can be used to speed up training
double min_gain_to_split = 0.0;
// alias = rate_drop
// check = >=0.0
// check = <=1.0
// desc = used only in ``dart``
// desc = dropout rate: a fraction of previous trees to drop during the dropout
double drop_rate = 0.1;
// desc = used only in ``dart``
// desc = max number of dropped trees during one boosting iteration
// desc = ``<=0`` means no limit
int max_drop = 50;
// check = >=0.0
// check = <=1.0
// desc = used only in ``dart``
// desc = probability of skipping the dropout procedure during a boosting iteration
double skip_drop = 0.5;
// desc = used only in ``dart``
// desc = set this to ``true``, if you want to use XGBoost DART mode
bool xgboost_dart_mode = false;
// desc = used only in ``dart``
// desc = set this to ``true``, if you want to use uniform drop
bool uniform_drop = false;
// desc = used only in ``dart``
// desc = random seed to choose dropping models
int drop_seed = 4;
// check = >=0.0
// check = <=1.0
// desc = used only in ``goss``
// desc = the retain ratio of large gradient data
double top_rate = 0.2;
// check = >=0.0
// check = <=1.0
// desc = used only in ``goss``
// desc = the retain ratio of small gradient data
double other_rate = 0.1;
// check = >0
// desc = used for the categorical features
// desc = minimal number of data per categorical group
int min_data_per_group = 100;
// check = >0
// desc = used for the categorical features
// desc = limit number of split points considered for categorical features. See `the documentation on how LightGBM finds optimal splits for categorical features <./Features.rst#optimal-split-for-categorical-features>`_ for more details
// desc = can be used to speed up training
int max_cat_threshold = 32;
// check = >=0.0
// desc = used for the categorical features
// desc = L2 regularization in categorical split
double cat_l2 = 10.0;
// check = >=0.0
// desc = used for the categorical features
// desc = this can reduce the effect of noises in categorical features, especially for categories with few data
double cat_smooth = 10.0;
// check = >0
// desc = used for the categorical features
// desc = when number of categories of one feature smaller than or equal to ``max_cat_to_onehot``, one-vs-other split algorithm will be used
int max_cat_to_onehot = 4;
// alias = topk
// check = >0
// desc = used only in ``voting`` tree learner, refer to `Voting parallel <./Parallel-Learning-Guide.rst#choose-appropriate-parallel-algorithm>`__
// desc = set this to larger value for more accurate result, but it will slow down the training speed
int top_k = 20;
// type = multi-int
// alias = mc, monotone_constraint, monotonic_cst
// default = None
// desc = used for constraints of monotonic features
// desc = ``1`` means increasing, ``-1`` means decreasing, ``0`` means non-constraint
// desc = you need to specify all features in order. For example, ``mc=-1,0,1`` means decreasing for the 1st feature, non-constraint for the 2nd feature and increasing for the 3rd feature
std::vector<int8_t> monotone_constraints;
// type = enum
// alias = monotone_constraining_method, mc_method
// options = basic, intermediate, advanced
// desc = used only if ``monotone_constraints`` is set
// desc = monotone constraints method
// descl2 = ``basic``, the most basic monotone constraints method. It does not slow down the training speed at all, but over-constrains the predictions
// descl2 = ``intermediate``, a `more advanced method <https://hal.science/hal-02862802/document>`__, which may slow down the training speed very slightly. However, this method is much less constraining than the basic method and should significantly improve the results
// descl2 = ``advanced``, an `even more advanced method <https://hal.science/hal-02862802/document>`__, which may slow down the training speed. However, this method is even less constraining than the intermediate method and should again significantly improve the results
std::string monotone_constraints_method = "basic";
// alias = monotone_splits_penalty, ms_penalty, mc_penalty
// check = >=0.0
// desc = used only if ``monotone_constraints`` is set
// desc = `monotone penalty <https://hal.science/hal-02862802/document>`__: a penalization parameter X forbids any monotone splits on the first X (rounded down) level(s) of the tree. The penalty applied to monotone splits on a given depth is a continuous, increasing function the penalization parameter
// desc = if ``0.0`` (the default), no penalization is applied
double monotone_penalty = 0.0;
// type = multi-double
// alias = feature_contrib, fc, fp, feature_penalty
// default = None
// desc = used to control feature's split gain, will use ``gain[i] = max(0, feature_contri[i]) * gain[i]`` to replace the split gain of i-th feature
// desc = you need to specify all features in order
std::vector<double> feature_contri;
// alias = fs, forced_splits_filename, forced_splits_file, forced_splits
// desc = path to a ``.json`` file that specifies splits to force at the top of every decision tree before best-first learning commences
// desc = ``.json`` file can be arbitrarily nested, and each split contains ``feature``, ``threshold`` fields, as well as ``left`` and ``right`` fields representing subsplits
// desc = categorical splits are forced in a one-hot fashion, with ``left`` representing the split containing the feature value and ``right`` representing other values
// desc = **Note**: the forced split logic will be ignored, if the split makes gain worse
// desc = see `this file <https://github.com/microsoft/LightGBM/blob/master/examples/binary_classification/forced_splits.json>`__ as an example
std::string forcedsplits_filename = "";
// check = >=0.0
// check = <=1.0
// desc = decay rate of ``refit`` task, will use ``leaf_output = refit_decay_rate * old_leaf_output + (1.0 - refit_decay_rate) * new_leaf_output`` to refit trees
// desc = used only in ``refit`` task in CLI version or as argument in ``refit`` function in language-specific package
double refit_decay_rate = 0.9;
// check = >=0.0
// desc = cost-effective gradient boosting multiplier for all penalties
double cegb_tradeoff = 1.0;
// check = >=0.0
// desc = cost-effective gradient-boosting penalty for splitting a node
double cegb_penalty_split = 0.0;
// type = multi-double
// default = 0,0,...,0
// desc = cost-effective gradient boosting penalty for using a feature
// desc = applied per data point
std::vector<double> cegb_penalty_feature_lazy;
// type = multi-double
// default = 0,0,...,0
// desc = cost-effective gradient boosting penalty for using a feature
// desc = applied once per forest
std::vector<double> cegb_penalty_feature_coupled;
// check = >= 0.0
// desc = controls smoothing applied to tree nodes
// desc = helps prevent overfitting on leaves with few samples
// desc = if ``0.0`` (the default), no smoothing is applied
// desc = if ``path_smooth > 0`` then ``min_data_in_leaf`` must be at least ``2``
// desc = larger values give stronger regularization
// descl2 = the weight of each node is ``w * (n / path_smooth) / (n / path_smooth + 1) + w_p / (n / path_smooth + 1)``, where ``n`` is the number of samples in the node, ``w`` is the optimal node weight to minimise the loss (approximately ``-sum_gradients / sum_hessians``), and ``w_p`` is the weight of the parent node
// descl2 = note that the parent output ``w_p`` itself has smoothing applied, unless it is the root node, so that the smoothing effect accumulates with the tree depth
double path_smooth = 0;
// desc = controls which features can appear in the same branch
// desc = by default interaction constraints are disabled, to enable them you can specify
// descl2 = for CLI, lists separated by commas, e.g. ``[0,1,2],[2,3]``
// descl2 = for Python-package, list of lists, e.g. ``[[0, 1, 2], [2, 3]]``
// descl2 = for R-package, list of character or numeric vectors, e.g. ``list(c("var1", "var2", "var3"), c("var3", "var4"))`` or ``list(c(1L, 2L, 3L), c(3L, 4L))``. Numeric vectors should use 1-based indexing, where ``1L`` is the first feature, ``2L`` is the second feature, etc.
// desc = any two features can only appear in the same branch only if there exists a constraint containing both features
std::string interaction_constraints = "";
// alias = verbose
// desc = controls the level of LightGBM's verbosity
// desc = ``< 0``: Fatal, ``= 0``: Error (Warning), ``= 1``: Info, ``> 1``: Debug
int verbosity = 1;
// [no-save]
// alias = model_input, model_in
// desc = filename of input model
// desc = for ``prediction`` task, this model will be applied to prediction data
// desc = for ``train`` task, training will be continued from this model
// desc = **Note**: can be used only in CLI version
std::string input_model = "";
// [no-save]
// alias = model_output, model_out
// desc = filename of output model in training
// desc = **Note**: can be used only in CLI version
std::string output_model = "LightGBM_model.txt";
// desc = the feature importance type in the saved model file
// desc = ``0``: count-based feature importance (numbers of splits are counted); ``1``: gain-based feature importance (values of gain are counted)
// desc = **Note**: can be used only in CLI version
int saved_feature_importance_type = 0;
// [no-save]
// alias = save_period
// desc = frequency of saving model file snapshot
// desc = set this to positive value to enable this function. For example, the model file will be snapshotted at each iteration if ``snapshot_freq=1``
// desc = **Note**: can be used only in CLI version
int snapshot_freq = -1;
// desc = whether to use gradient quantization when training
// desc = enabling this will discretize (quantize) the gradients and hessians into bins of ``num_grad_quant_bins``
// desc = with quantized training, most arithmetics in the training process will be integer operations
// desc = gradient quantization can accelerate training, with little accuracy drop in most cases
// desc = **Note**: works only with ``cpu`` and ``cuda`` device type
// desc = *New in version 4.0.0*
bool use_quantized_grad = false;
// desc = used only if ``use_quantized_grad=true``
// desc = number of bins to quantization gradients and hessians
// desc = with more bins, the quantized training will be closer to full precision training
// desc = **Note**: works only with ``cpu`` and ``cuda`` device type
// desc = *New in version 4.0.0*
int num_grad_quant_bins = 4;
// desc = used only if ``use_quantized_grad=true``
// desc = whether to renew the leaf values with original gradients when quantized training
// desc = renewing is very helpful for good quantized training accuracy for ranking objectives
// desc = **Note**: works only with ``cpu`` and ``cuda`` device type
// desc = *New in version 4.0.0*
bool quant_train_renew_leaf = false;
// desc = used only if ``use_quantized_grad=true``
// desc = whether to use stochastic rounding in gradient quantization
// desc = **Note**: works only with ``cpu`` and ``cuda`` device type
// desc = *New in version 4.0.0*
bool stochastic_rounding = true;
#ifndef __NVCC__
#pragma endregion
#pragma region IO Parameters
#pragma region Dataset Parameters
#endif // __NVCC__
// alias = linear_trees
// desc = fit piecewise linear gradient boosting tree
// desc = tree splits are chosen in the usual way, but the model at each leaf is linear instead of constant
// desc = the linear model at each leaf includes all the numerical features in that leaf's branch
// desc = the first tree has constant leaf values
// desc = categorical features are used for splits as normal but are not used in the linear models
// desc = missing values should not be encoded as ``0``. Use ``np.nan`` for Python, ``NA`` for the CLI, and ``NA``, ``NA_real_``, or ``NA_integer_`` for R
// desc = it is recommended to rescale data before training so that features have similar mean and standard deviation
// desc = **Note**: works only with ``cpu``, ``gpu`` device type and ``serial`` tree learner
// desc = **Note**: ``regression_l1`` objective is not supported with linear tree boosting
// desc = **Note**: setting ``linear_tree=true`` significantly increases the memory use of LightGBM
// desc = **Note**: if you specify ``monotone_constraints``, constraints will be enforced when choosing the split points, but not when fitting the linear models on leaves
bool linear_tree = false;
// alias = max_bins
// check = >1
// desc = max number of bins that feature values will be bucketed in
// desc = small number of bins may reduce training accuracy but may increase general power (deal with over-fitting)
// desc = LightGBM will auto compress memory according to ``max_bin``. For example, LightGBM will use ``uint8_t`` for feature value if ``max_bin=255``
int max_bin = 255;
// type = multi-int
// default = None
// desc = max number of bins for each feature
// desc = if not specified, will use ``max_bin`` for all features
std::vector<int32_t> max_bin_by_feature;
// check = >0
// desc = minimal number of data inside one bin
// desc = use this to avoid one-data-one-bin (potential over-fitting)
int min_data_in_bin = 3;
// alias = subsample_for_bin
// check = >0
// desc = number of data that sampled to construct feature discrete bins
// desc = setting this to larger value will give better training result, but may increase data loading time
// desc = set this to larger value if data is very sparse
// desc = **Note**: don't set this to small values, otherwise, you may encounter unexpected errors and poor accuracy
int bin_construct_sample_cnt = 200000;
// alias = data_seed
// desc = random seed for sampling data to construct histogram bins
int data_random_seed = 1;
// alias = is_sparse, enable_sparse, sparse
// desc = used to enable/disable sparse optimization
bool is_enable_sparse = true;
// alias = is_enable_bundle, bundle
// desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper_files/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html>`__
// desc = **Note**: disabling this may cause the slow training speed for sparse datasets
bool enable_bundle = true;
// desc = set this to ``false`` to disable the special handle of missing value
bool use_missing = true;
// desc = set this to ``true`` to treat all zero as missing values (including the unshown values in LibSVM / sparse matrices)
// desc = set this to ``false`` to use ``na`` for representing missing values
bool zero_as_missing = false;
// desc = set this to ``true`` (the default) to tell LightGBM to ignore the features that are unsplittable based on ``min_data_in_leaf``
// desc = as dataset object is initialized only once and cannot be changed after that, you may need to set this to ``false`` when searching parameters with ``min_data_in_leaf``, otherwise features are filtered by ``min_data_in_leaf`` firstly if you don't reconstruct dataset object
// desc = **Note**: setting this to ``false`` may slow down the training
bool feature_pre_filter = true;
// alias = is_pre_partition
// desc = used for distributed learning (excluding the ``feature_parallel`` mode)
// desc = ``true`` if training data are pre-partitioned, and different machines use different partitions
bool pre_partition = false;
// alias = two_round_loading, use_two_round_loading
// desc = set this to ``true`` if data file is too big to fit in memory
// desc = by default, LightGBM will map data file to memory and load features from memory. This will provide faster data loading speed, but may cause run out of memory error when the data file is very big
// desc = **Note**: works only in case of loading data directly from text file
bool two_round = false;
// alias = has_header
// desc = set this to ``true`` if input data has header
// desc = **Note**: works only in case of loading data directly from text file
bool header = false;
// type = int or string
// alias = label
// desc = used to specify the label column
// desc = use number for index, e.g. ``label=0`` means column\_0 is the label
// desc = add a prefix ``name:`` for column name, e.g. ``label=name:is_click``
// desc = if omitted, the first column in the training data is used as the label
// desc = **Note**: works only in case of loading data directly from text file
std::string label_column = "";
// type = int or string
// alias = weight
// desc = used to specify the weight column
// desc = use number for index, e.g. ``weight=0`` means column\_0 is the weight
// desc = add a prefix ``name:`` for column name, e.g. ``weight=name:weight``
// desc = **Note**: works only in case of loading data directly from text file
// desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0, and weight is column\_1, the correct parameter is ``weight=0``
// desc = **Note**: weights should be non-negative
std::string weight_column = "";
// type = int or string
// alias = group, group_id, query_column, query, query_id
// desc = used to specify the query/group id column
// desc = use number for index, e.g. ``query=0`` means column\_0 is the query id
// desc = add a prefix ``name:`` for column name, e.g. ``query=name:query_id``
// desc = **Note**: works only in case of loading data directly from text file
// desc = **Note**: data should be grouped by query\_id, for more information, see `Query Data <#query-data>`__
// desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``, e.g. when label is column\_0 and query\_id is column\_1, the correct parameter is ``query=0``
std::string group_column = "";
// type = multi-int or string
// alias = ignore_feature, blacklist
// desc = used to specify some ignoring columns in training
// desc = use number for index, e.g. ``ignore_column=0,1,2`` means column\_0, column\_1 and column\_2 will be ignored
// desc = add a prefix ``name:`` for column name, e.g. ``ignore_column=name:c1,c2,c3`` means c1, c2 and c3 will be ignored
// desc = **Note**: works only in case of loading data directly from text file
// desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
// desc = **Note**: despite the fact that specified columns will be completely ignored during the training, they still should have a valid format allowing LightGBM to load file successfully
std::string ignore_column = "";
// type = multi-int or string
// alias = cat_feature, categorical_column, cat_column, categorical_features
// desc = used to specify categorical features
// desc = use number for index, e.g. ``categorical_feature=0,1,2`` means column\_0, column\_1 and column\_2 are categorical features
// desc = add a prefix ``name:`` for column name, e.g. ``categorical_feature=name:c1,c2,c3`` means c1, c2 and c3 are categorical features
// desc = **Note**: all values will be cast to ``int32`` (integer codes will be extracted from pandas categoricals in the Python-package)
// desc = **Note**: index starts from ``0`` and it doesn't count the label column when passing type is ``int``
// desc = **Note**: all values should be less than ``Int32.MaxValue`` (2147483647)
// desc = **Note**: using large values could be memory consuming. Tree decision rule works best when categorical features are presented by consecutive integers starting from zero
// desc = **Note**: all negative values will be treated as **missing values**
// desc = **Note**: the output cannot be monotonically constrained with respect to a categorical feature
// desc = **Note**: floating point numbers in categorical features will be rounded towards 0
std::string categorical_feature = "";
// desc = path to a ``.json`` file that specifies bin upper bounds for some or all features
// desc = ``.json`` file should contain an array of objects, each containing the word ``feature`` (integer feature index) and ``bin_upper_bound`` (array of thresholds for binning)
// desc = see `this file <https://github.com/microsoft/LightGBM/blob/master/examples/regression/forced_bins.json>`__ as an example
std::string forcedbins_filename = "";
// [no-save]
// alias = is_save_binary, is_save_binary_file
// desc = if ``true``, LightGBM will save the dataset (including validation data) to a binary file. This speed ups the data loading for the next time
// desc = **Note**: ``init_score`` is not saved in binary file
// desc = **Note**: can be used only in CLI version; for language-specific packages you can use the correspondent function
bool save_binary = false;
// desc = use precise floating point number parsing for text parser (e.g. CSV, TSV, LibSVM input)
// desc = **Note**: setting this to ``true`` may lead to much slower text parsing
bool precise_float_parser = false;
// desc = path to a ``.json`` file that specifies customized parser initialized configuration
// desc = see `lightgbm-transform <https://github.com/microsoft/lightgbm-transform>`__ for usage examples
// desc = **Note**: ``lightgbm-transform`` is not maintained by LightGBM's maintainers. Bug reports or feature requests should go to `issues page <https://github.com/microsoft/lightgbm-transform/issues>`__
// desc = *New in version 4.0.0*
std::string parser_config_file = "";
#ifndef __NVCC__
#pragma endregion
#pragma region Predict Parameters
#endif // __NVCC__
// [no-save]
// desc = used only in ``prediction`` task
// desc = used to specify from which iteration to start the prediction
// desc = ``<= 0`` means from the first iteration
int start_iteration_predict = 0;
// [no-save]
// desc = used only in ``prediction`` task
// desc = used to specify how many trained iterations will be used in prediction
// desc = ``<= 0`` means no limit
int num_iteration_predict = -1;
// [no-save]
// alias = is_predict_raw_score, predict_rawscore, raw_score
// desc = used only in ``prediction`` task
// desc = set this to ``true`` to predict only the raw scores
// desc = set this to ``false`` to predict transformed scores
bool predict_raw_score = false;
// [no-save]
// alias = is_predict_leaf_index, leaf_index
// desc = used only in ``prediction`` task
// desc = set this to ``true`` to predict with leaf index of all trees
bool predict_leaf_index = false;
// [no-save]
// alias = is_predict_contrib, contrib
// desc = used only in ``prediction`` task
// desc = set this to ``true`` to estimate `SHAP values <https://arxiv.org/abs/1706.06060>`__, which represent how each feature contributes to each prediction
// desc = produces ``#features + 1`` values where the last value is the expected value of the model output over the training data
// desc = **Note**: if you want to get more explanation for your model's predictions using SHAP values like SHAP interaction values, you can install `shap package <https://github.com/shap>`__
// desc = **Note**: unlike the shap package, with ``predict_contrib`` we return a matrix with an extra column, where the last column is the expected value
// desc = **Note**: this feature is not implemented for linear trees
bool predict_contrib = false;
// [no-save]
// desc = used only in ``prediction`` task
// desc = control whether or not LightGBM raises an error when you try to predict on data with a different number of features than the training data
// desc = if ``false`` (the default), a fatal error will be raised if the number of features in the dataset you predict on differs from the number seen during training
// desc = if ``true``, LightGBM will attempt to predict on whatever data you provide. This is dangerous because you might get incorrect predictions, but you could use it in situations where it is difficult or expensive to generate some features and you are very confident that they were never chosen for splits in the model
// desc = **Note**: be very careful setting this parameter to ``true``
bool predict_disable_shape_check = false;
// [no-save]
// desc = used only in ``prediction`` task
// desc = used only in ``classification`` and ``ranking`` applications
// desc = used only for predicting normal or raw scores
// desc = if ``true``, will use early-stopping to speed up the prediction. May affect the accuracy
// desc = **Note**: cannot be used with ``rf`` boosting type or custom objective function
bool pred_early_stop = false;
// [no-save]
// desc = used only in ``prediction`` task and if ``pred_early_stop=true``
// desc = the frequency of checking early-stopping prediction
int pred_early_stop_freq = 10;
// [no-save]
// desc = used only in ``prediction`` task and if ``pred_early_stop=true``
// desc = the threshold of margin in early-stopping prediction
double pred_early_stop_margin = 10.0;
// [no-save]
// alias = predict_result, prediction_result, predict_name, prediction_name, pred_name, name_pred
// desc = used only in ``prediction`` task
// desc = filename of prediction result
// desc = **Note**: can be used only in CLI version
std::string output_result = "LightGBM_predict_result.txt";
#ifndef __NVCC__
#pragma endregion
#pragma region Convert Parameters
#endif // __NVCC__
// [no-save]
// desc = used only in ``convert_model`` task
// desc = only ``cpp`` is supported yet; for conversion model to other languages consider using `m2cgen <https://github.com/BayesWitnesses/m2cgen>`__ utility
// desc = if ``convert_model_language`` is set and ``task=train``, the model will be also converted
// desc = **Note**: can be used only in CLI version
std::string convert_model_language = "";
// [no-save]
// alias = convert_model_file
// desc = used only in ``convert_model`` task
// desc = output filename of converted model
// desc = **Note**: can be used only in CLI version
std::string convert_model = "gbdt_prediction.cpp";
#ifndef __NVCC__
#pragma endregion
#pragma endregion
#pragma region Objective Parameters
#endif // __NVCC__
// desc = used only in ``rank_xendcg`` objective
// desc = random seed for objectives, if random process is needed
int objective_seed = 5;
// check = >0
// alias = num_classes
// desc = used only in ``multi-class`` classification application
int num_class = 1;
// alias = unbalance, unbalanced_sets
// desc = used only in ``binary`` and ``multiclassova`` applications
// desc = set this to ``true`` if training data are unbalanced
// desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
// desc = **Note**: this parameter cannot be used at the same time with ``scale_pos_weight``, choose only **one** of them
bool is_unbalance = false;
// check = >0.0
// desc = used only in ``binary`` and ``multiclassova`` applications
// desc = weight of labels with positive class
// desc = **Note**: while enabling this should increase the overall performance metric of your model, it will also result in poor estimates of the individual class probabilities
// desc = **Note**: this parameter cannot be used at the same time with ``is_unbalance``, choose only **one** of them
double scale_pos_weight = 1.0;
// check = >0.0
// desc = used only in ``binary`` and ``multiclassova`` classification and in ``lambdarank`` applications
// desc = parameter for the sigmoid function
double sigmoid = 1.0;
// desc = used only in ``regression``, ``binary``, ``multiclassova`` and ``cross-entropy`` applications
// desc = adjusts initial score to the mean of labels for faster convergence
bool boost_from_average = true;
// desc = used only in ``regression`` application
// desc = used to fit ``sqrt(label)`` instead of original values and prediction result will be also automatically converted to ``prediction^2``
// desc = might be useful in case of large-range labels
bool reg_sqrt = false;
// check = >0.0
// desc = used only in ``huber`` and ``quantile`` ``regression`` applications
// desc = parameter for `Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`__ and `Quantile regression <https://en.wikipedia.org/wiki/Quantile_regression>`__
double alpha = 0.9;
// check = >0.0
// desc = used only in ``fair`` ``regression`` application
// desc = parameter for `Fair loss <https://www.kaggle.com/c/allstate-claims-severity/discussion/24520>`__
double fair_c = 1.0;
// check = >0.0
// desc = used only in ``poisson`` ``regression`` application
// desc = parameter for `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__ to safeguard optimization
double poisson_max_delta_step = 0.7;
// check = >=1.0
// check = <2.0
// desc = used only in ``tweedie`` ``regression`` application
// desc = used to control the variance of the tweedie distribution
// desc = set this closer to ``2`` to shift towards a **Gamma** distribution
// desc = set this closer to ``1`` to shift towards a **Poisson** distribution
double tweedie_variance_power = 1.5;
// check = >0
// desc = used only in ``lambdarank`` application
// desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
// desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
int lambdarank_truncation_level = 30;
// desc = used only in ``lambdarank`` application
// desc = set this to ``true`` to normalize the lambdas for different queries, and improve the performance for unbalanced data
// desc = set this to ``false`` to enforce the original lambdarank algorithm
bool lambdarank_norm = true;
// type = multi-double
// default = 0,1,3,7,15,31,63,...,2^30-1
// desc = used only in ``lambdarank`` application
// desc = relevant gain for labels. For example, the gain of label ``2`` is ``3`` in case of default label gains
// desc = separate by ``,``
std::vector<double> label_gain;
// check = >=0.0
// desc = used only in ``lambdarank`` application when positional information is provided and position bias is modeled
// desc = larger values reduce the inferred position bias factors
// desc = *New in version 4.1.0*
double lambdarank_position_bias_regularization = 0.0;