-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathparams.txt
80 lines (72 loc) · 2.51 KB
/
params.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Collected examples of LGB and XGB params across Kaggle, etc that worked so as to get a "methodical" feel for tuning.
# XGB Example Params
# =============================
params = {'eta': 0.09,
'max_depth': 4,
'objective': 'binary:logistic',
'subsample': 0.8,
'colsample_bytree': 0.8,
'min_child_weight': 0.77,
'scale_pos_weight': 1.6,
'gamma': 10,
'reg_alpha': 8,
'reg_lambda': 1.3,
'eval_metric': 'auc',
'seed': 99,
'silent': True}
oliver={'alpha': [0, 0.01, 100],
'booster': ['gbtree'],
'colsample_bytree': [0.8, 0.6],
'early_stopping': [50],
'eta': [0.1],
'eval_metric': ['mlogloss'],
'feval': [xgb_gini],
'gamma': [1, 100],
'lambda': [1, 100],
'max_depth': [4, 2, 6, 8, 10, 12],
'maximize_feval': [True],
'min_child_weight': [8],
'num_class': [2],
'num_estimators': [1000000000],
'objective': ['multi:softprob'],
'scale_pos_weight': [26],
'silent': [0],
'subsample': [0.8, 0.6]}
{'gamma': 1, 'max_depth': 6, 'subsample': 0.8, 'feval': <function xgb_gini at 0x7f981747e598>, 'maximize_feval': True, 'min_child_weight': 8, 'booster': 'gbtree', 'num_class': 2, 'colsample_bytree': 0.6, 'alpha': 0, 'objective': 'multi:softprob', 'num_estimators': 1000000000, 'eta': 0.1, 'early_stopping': 50, 'lambda': 100, 'eval_metric': 'mlogloss', 'scale_pos_weight': 26, 'silent': 0}
{'max_depth': 3.0, 'gamma': 9.0, 'min_child_weight': 12.0, 'max_delta_step': 2.0, 'subsample': 0.80000000000000004, 'colsample_bytree': 0.80000000000000004}
# LGB Example Params
# =============================
params = {'learning_rate': 0.02,
'max_depth': 4,
'boosting': 'gbdt',
'objective': 'binary',
'max_bin': 10,
'subsample': 0.8,
'subsample_freq': 10,
'colsample_bytree': 0.8,
'min_child_samples': 500,
'metric': 'auc',
'is_training_metric': False,
'seed': 99}
lgbparam = {'boosting_type':['gbdt'],
'num_estimators': [5000],
'is_unbalance': [False],
'num_leaves': [8, 4, 12, 16, 32, 256, 512], # number of leaves in one tree
'early_stopping': [100],
'learning_rate': [0.05],
'min_data_in_leaf': [25, 500], # min_child_samples
'use_missing': [True], # -1
'num_threads': [3],
'objective': ['binary'],
'feature_fraction': [0.8, 0.3], # colsample_bytree
'predict_raw_score': [False],
'bagging_freq': [5, 10, 2], # subsample_freq
'bagging_fraction': [0.8, 0.7], # subsample
'lambda_l2': [0.0],
'max_bin':[255, 10, 20], # def 255
'feval':[None],
'maximize_feval':[True],
'num_class':[1],
'max_depth':[-1, 4, 6], # def -1
'min_hessian':[1e-3, 0.05], # min_child_weight def 1e-3
'eval_metric':[['binary_logloss', 'auc']]}