diff --git a/notebook/automl_classification.ipynb b/notebook/automl_classification.ipynb index 6ab5b8caee..bd7eb89c94 100644 --- a/notebook/automl_classification.ipynb +++ b/notebook/automl_classification.ipynb @@ -111,7 +111,7 @@ "outputs": [], "source": [ "settings = {\n", - " \"time_budget\": 120, # total running time in seconds\n", + " \"time_budget\": 600, # total running time in seconds\n", " \"metric\": 'accuracy', # can be: 'r2', 'rmse', 'mae', 'mse', 'accuracy', 'roc_auc', 'roc_auc_ovr',\n", " # 'roc_auc_ovo', 'log_loss', 'mape', 'f1', 'ap', 'ndcg', 'micro_f1', 'macro_f1'\n", " \"task\": 'classification', # task type\n", @@ -136,174 +136,331 @@ "name": "stderr", "output_type": "stream", "text": [ - "[flaml.automl: 03-25 15:14:37] {2092} INFO - task = classification\n", - "[flaml.automl: 03-25 15:14:37] {2094} INFO - Data split method: stratified\n", - "[flaml.automl: 03-25 15:14:37] {2098} INFO - Evaluation method: holdout\n", - "[flaml.automl: 03-25 15:14:38] {2175} INFO - Minimizing error metric: 1-accuracy\n", - "[flaml.automl: 03-25 15:14:38] {2268} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'lrl1']\n", - "[flaml.automl: 03-25 15:14:38] {2554} INFO - iteration 0, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:38] {2684} INFO - Estimated sufficient time budget=20195s. Estimated necessary time budget=496s.\n", - "[flaml.automl: 03-25 15:14:38] {2731} INFO - at 0.6s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", - "[flaml.automl: 03-25 15:14:38] {2554} INFO - iteration 1, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:38] {2731} INFO - at 0.6s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", - "[flaml.automl: 03-25 15:14:38] {2554} INFO - iteration 2, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:38] {2731} INFO - at 0.7s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:38] {2554} INFO - iteration 3, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:14:39] {2731} INFO - at 1.9s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:39] {2554} INFO - iteration 4, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:39] {2731} INFO - at 2.1s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:39] {2554} INFO - iteration 5, current learner extra_tree\n", - "[flaml.automl: 03-25 15:14:39] {2731} INFO - at 2.2s,\testimator extra_tree's best error=0.3784,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:39] {2554} INFO - iteration 6, current learner rf\n", - "[flaml.automl: 03-25 15:14:39] {2731} INFO - at 2.4s,\testimator rf's best error=0.3835,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:39] {2554} INFO - iteration 7, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 2.4s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 8, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 2.6s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 2.7s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 2.8s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 11, current learner rf\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 3.0s,\testimator rf's best error=0.3785,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 12, current learner rf\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 3.2s,\testimator rf's best error=0.3785,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 13, current learner extra_tree\n", - "[flaml.automl: 03-25 15:14:40] {2731} INFO - at 3.4s,\testimator extra_tree's best error=0.3784,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:40] {2554} INFO - iteration 14, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:14:42] {2731} INFO - at 4.7s,\testimator xgboost's best error=0.3765,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:42] {2554} INFO - iteration 15, current learner extra_tree\n", - "[flaml.automl: 03-25 15:14:42] {2731} INFO - at 4.9s,\testimator extra_tree's best error=0.3749,\tbest estimator lgbm's best error=0.3614\n", - "[flaml.automl: 03-25 15:14:42] {2554} INFO - iteration 16, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:42] {2731} INFO - at 5.1s,\testimator lgbm's best error=0.3550,\tbest estimator lgbm's best error=0.3550\n", - "[flaml.automl: 03-25 15:14:42] {2554} INFO - iteration 17, current learner extra_tree\n", - "[flaml.automl: 03-25 15:14:42] {2731} INFO - at 5.3s,\testimator extra_tree's best error=0.3749,\tbest estimator lgbm's best error=0.3550\n", - "[flaml.automl: 03-25 15:14:42] {2554} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:43] {2731} INFO - at 5.5s,\testimator lgbm's best error=0.3550,\tbest estimator lgbm's best error=0.3550\n", - "[flaml.automl: 03-25 15:14:43] {2554} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:43] {2731} INFO - at 5.9s,\testimator lgbm's best error=0.3545,\tbest estimator lgbm's best error=0.3545\n", - "[flaml.automl: 03-25 15:14:43] {2554} INFO - iteration 20, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:43] {2731} INFO - at 6.1s,\testimator lgbm's best error=0.3545,\tbest estimator lgbm's best error=0.3545\n", - "[flaml.automl: 03-25 15:14:43] {2554} INFO - iteration 21, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:44] {2731} INFO - at 6.8s,\testimator lgbm's best error=0.3545,\tbest estimator lgbm's best error=0.3545\n", - "[flaml.automl: 03-25 15:14:44] {2554} INFO - iteration 22, current learner catboost\n", - "[flaml.automl: 03-25 15:14:45] {2731} INFO - at 7.6s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3545\n", - "[flaml.automl: 03-25 15:14:45] {2554} INFO - iteration 23, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:45] {2731} INFO - at 7.9s,\testimator lgbm's best error=0.3536,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:45] {2554} INFO - iteration 24, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:45] {2731} INFO - at 8.3s,\testimator lgbm's best error=0.3536,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:45] {2554} INFO - iteration 25, current learner catboost\n", - "[flaml.automl: 03-25 15:14:46] {2731} INFO - at 8.5s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:46] {2554} INFO - iteration 26, current learner extra_tree\n", - "[flaml.automl: 03-25 15:14:46] {2731} INFO - at 8.8s,\testimator extra_tree's best error=0.3749,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:46] {2554} INFO - iteration 27, current learner lgbm\n", - "[flaml.automl: 03-25 15:14:47] {2731} INFO - at 9.6s,\testimator lgbm's best error=0.3536,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:47] {2554} INFO - iteration 28, current learner catboost\n", - "[flaml.automl: 03-25 15:14:48] {2731} INFO - at 10.6s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:48] {2554} INFO - iteration 29, current learner catboost\n", - "[flaml.automl: 03-25 15:14:49] {2731} INFO - at 12.1s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3536\n", - "[flaml.automl: 03-25 15:14:49] {2554} INFO - iteration 30, current learner catboost\n", - "[flaml.automl: 03-25 15:14:51] {2731} INFO - at 14.0s,\testimator catboost's best error=0.3487,\tbest estimator catboost's best error=0.3487\n", - "[flaml.automl: 03-25 15:14:51] {2554} INFO - iteration 31, current learner xgb_limitdepth\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:14:52] {2731} INFO - at 15.4s,\testimator xgb_limitdepth's best error=0.3667,\tbest estimator catboost's best error=0.3487\n", - "[flaml.automl: 03-25 15:14:52] {2554} INFO - iteration 32, current learner rf\n", - "[flaml.automl: 03-25 15:14:53] {2731} INFO - at 15.5s,\testimator rf's best error=0.3785,\tbest estimator catboost's best error=0.3487\n", - "[flaml.automl: 03-25 15:14:53] {2554} INFO - iteration 33, current learner rf\n", - "[flaml.automl: 03-25 15:14:53] {2731} INFO - at 15.7s,\testimator rf's best error=0.3771,\tbest estimator catboost's best error=0.3487\n", - "[flaml.automl: 03-25 15:14:53] {2554} INFO - iteration 34, current learner catboost\n", - "[flaml.automl: 03-25 15:14:54] {2731} INFO - at 16.8s,\testimator catboost's best error=0.3477,\tbest estimator catboost's best error=0.3477\n", - "[flaml.automl: 03-25 15:14:54] {2554} INFO - iteration 35, current learner xgb_limitdepth\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:14:55] {2731} INFO - at 18.2s,\testimator xgb_limitdepth's best error=0.3667,\tbest estimator catboost's best error=0.3477\n", - "[flaml.automl: 03-25 15:14:55] {2554} INFO - iteration 36, current learner catboost\n", - "[flaml.automl: 03-25 15:15:01] {2731} INFO - at 23.9s,\testimator catboost's best error=0.3477,\tbest estimator catboost's best error=0.3477\n", - "[flaml.automl: 03-25 15:15:01] {2554} INFO - iteration 37, current learner xgb_limitdepth\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:15:02] {2731} INFO - at 25.1s,\testimator xgb_limitdepth's best error=0.3667,\tbest estimator catboost's best error=0.3477\n", - "[flaml.automl: 03-25 15:15:02] {2554} INFO - iteration 38, current learner xgb_limitdepth\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:15:03] {2731} INFO - at 26.1s,\testimator xgb_limitdepth's best error=0.3654,\tbest estimator catboost's best error=0.3477\n", - "[flaml.automl: 03-25 15:15:03] {2554} INFO - iteration 39, current learner catboost\n", - "[flaml.automl: 03-25 15:15:06] {2731} INFO - at 28.7s,\testimator catboost's best error=0.3396,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:15:06] {2554} INFO - iteration 40, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:15:07] {2731} INFO - at 29.7s,\testimator xgboost's best error=0.3765,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:15:07] {2554} INFO - iteration 41, current learner catboost\n", - "[flaml.automl: 03-25 15:15:58] {2731} INFO - at 81.0s,\testimator catboost's best error=0.3396,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:15:58] {2554} INFO - iteration 42, current learner lrl1\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:352: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", - " warnings.warn(\n", - "[flaml.automl: 03-25 15:15:58] {2731} INFO - at 81.3s,\testimator lrl1's best error=0.4339,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:15:58] {2554} INFO - iteration 43, current learner lrl1\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:352: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", - " warnings.warn(\n", - "[flaml.automl: 03-25 15:15:59] {2731} INFO - at 81.6s,\testimator lrl1's best error=0.4338,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:15:59] {2554} INFO - iteration 44, current learner extra_tree\n", - "[flaml.automl: 03-25 15:15:59] {2731} INFO - at 81.8s,\testimator extra_tree's best error=0.3749,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:15:59] {2554} INFO - iteration 45, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:00] {2731} INFO - at 83.2s,\testimator lgbm's best error=0.3528,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:00] {2554} INFO - iteration 46, current learner extra_tree\n", - "[flaml.automl: 03-25 15:16:01] {2731} INFO - at 83.4s,\testimator extra_tree's best error=0.3749,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:01] {2554} INFO - iteration 47, current learner lrl1\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:352: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", - " warnings.warn(\n", - "[flaml.automl: 03-25 15:16:01] {2731} INFO - at 83.7s,\testimator lrl1's best error=0.4338,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:01] {2554} INFO - iteration 48, current learner extra_tree\n", - "[flaml.automl: 03-25 15:16:01] {2731} INFO - at 83.9s,\testimator extra_tree's best error=0.3749,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:01] {2554} INFO - iteration 49, current learner rf\n", - "[flaml.automl: 03-25 15:16:01] {2731} INFO - at 84.2s,\testimator rf's best error=0.3732,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:01] {2554} INFO - iteration 50, current learner catboost\n", - "[flaml.automl: 03-25 15:16:02] {2731} INFO - at 84.7s,\testimator catboost's best error=0.3396,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:02] {2554} INFO - iteration 51, current learner rf\n", - "[flaml.automl: 03-25 15:16:02] {2731} INFO - at 84.9s,\testimator rf's best error=0.3732,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:02] {2554} INFO - iteration 52, current learner rf\n", - "[flaml.automl: 03-25 15:16:02] {2731} INFO - at 85.2s,\testimator rf's best error=0.3715,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:02] {2554} INFO - iteration 53, current learner extra_tree\n", - "[flaml.automl: 03-25 15:16:03] {2731} INFO - at 85.5s,\testimator extra_tree's best error=0.3749,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:03] {2554} INFO - iteration 54, current learner rf\n", - "[flaml.automl: 03-25 15:16:03] {2731} INFO - at 85.7s,\testimator rf's best error=0.3715,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:03] {2554} INFO - iteration 55, current learner catboost\n", - "[flaml.automl: 03-25 15:16:03] {2731} INFO - at 86.2s,\testimator catboost's best error=0.3396,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:03] {2554} INFO - iteration 56, current learner rf\n", - "[flaml.automl: 03-25 15:16:04] {2731} INFO - at 86.5s,\testimator rf's best error=0.3715,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:04] {2554} INFO - iteration 57, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:08] {2731} INFO - at 91.2s,\testimator lgbm's best error=0.3405,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:08] {2554} INFO - iteration 58, current learner catboost\n", - "[flaml.automl: 03-25 15:16:09] {2731} INFO - at 92.1s,\testimator catboost's best error=0.3396,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:09] {2554} INFO - iteration 59, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:11] {2731} INFO - at 93.5s,\testimator lgbm's best error=0.3405,\tbest estimator catboost's best error=0.3396\n", - "[flaml.automl: 03-25 15:16:11] {2554} INFO - iteration 60, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:14] {2731} INFO - at 97.2s,\testimator lgbm's best error=0.3370,\tbest estimator lgbm's best error=0.3370\n", - "[flaml.automl: 03-25 15:16:14] {2554} INFO - iteration 61, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:18] {2731} INFO - at 100.9s,\testimator lgbm's best error=0.3370,\tbest estimator lgbm's best error=0.3370\n", - "[flaml.automl: 03-25 15:16:18] {2554} INFO - iteration 62, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:21] {2731} INFO - at 104.0s,\testimator lgbm's best error=0.3370,\tbest estimator lgbm's best error=0.3370\n", - "[flaml.automl: 03-25 15:16:21] {2554} INFO - iteration 63, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:25] {2731} INFO - at 108.1s,\testimator lgbm's best error=0.3318,\tbest estimator lgbm's best error=0.3318\n", - "[flaml.automl: 03-25 15:16:25] {2554} INFO - iteration 64, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:29] {2731} INFO - at 111.8s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", - "[flaml.automl: 03-25 15:16:29] {2554} INFO - iteration 65, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:33] {2731} INFO - at 116.0s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", - "[flaml.automl: 03-25 15:16:33] {2554} INFO - iteration 66, current learner lgbm\n", - "[flaml.automl: 03-25 15:16:37] {2731} INFO - at 119.8s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", - "[flaml.automl: 03-25 15:16:40] {2961} INFO - retrain lgbm for 3.5s\n", - "[flaml.automl: 03-25 15:16:40] {2968} INFO - retrained model: LGBMClassifier(learning_rate=0.2607939951456863, max_bin=255,\n", - " min_child_samples=62, n_estimators=150, num_leaves=176,\n", - " reg_alpha=0.015973158305354482, reg_lambda=1.1581244082992255,\n", + "[flaml.automl: 03-30 21:48:57] {2105} INFO - task = classification\n", + "[flaml.automl: 03-30 21:48:57] {2107} INFO - Data split method: stratified\n", + "[flaml.automl: 03-30 21:48:57] {2111} INFO - Evaluation method: holdout\n", + "[flaml.automl: 03-30 21:48:58] {2188} INFO - Minimizing error metric: 1-accuracy\n", + "[flaml.automl: 03-30 21:48:58] {2281} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'lrl1']\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 0, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:58] {2697} INFO - Estimated sufficient time budget=24546s. Estimated necessary time budget=603s.\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 0.7s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 1, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 0.8s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 2, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 0.9s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 3, current learner xgboost\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 1.0s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 4, current learner extra_tree\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 1.1s,\testimator extra_tree's best error=0.3892,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 1.3s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 6, current learner xgboost\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 1.3s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 7, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 1.4s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:58] {2744} INFO - at 1.6s,\testimator lgbm's best error=0.3614,\tbest estimator lgbm's best error=0.3614\n", + "[flaml.automl: 03-30 21:48:58] {2567} INFO - iteration 9, current learner xgboost\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 1.7s,\testimator xgboost's best error=0.3604,\tbest estimator xgboost's best error=0.3604\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 10, current learner xgboost\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 1.9s,\testimator xgboost's best error=0.3601,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 11, current learner extra_tree\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.0s,\testimator extra_tree's best error=0.3892,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 12, current learner extra_tree\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.1s,\testimator extra_tree's best error=0.3792,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 13, current learner rf\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.1s,\testimator rf's best error=0.3789,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 14, current learner rf\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.2s,\testimator rf's best error=0.3789,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 15, current learner rf\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.3s,\testimator rf's best error=0.3766,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 16, current learner lgbm\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.4s,\testimator lgbm's best error=0.3614,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 17, current learner extra_tree\n", + "[flaml.automl: 03-30 21:48:59] {2744} INFO - at 2.5s,\testimator extra_tree's best error=0.3792,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:48:59] {2567} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 2.6s,\testimator lgbm's best error=0.3614,\tbest estimator xgboost's best error=0.3601\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 19, current learner xgboost\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 2.7s,\testimator xgboost's best error=0.3594,\tbest estimator xgboost's best error=0.3594\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 20, current learner xgboost\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 2.8s,\testimator xgboost's best error=0.3594,\tbest estimator xgboost's best error=0.3594\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 21, current learner xgboost\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 2.9s,\testimator xgboost's best error=0.3594,\tbest estimator xgboost's best error=0.3594\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 3.1s,\testimator lgbm's best error=0.3614,\tbest estimator xgboost's best error=0.3594\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 3.3s,\testimator lgbm's best error=0.3550,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 24, current learner extra_tree\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 3.4s,\testimator extra_tree's best error=0.3792,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 25, current learner extra_tree\n", + "[flaml.automl: 03-30 21:49:00] {2744} INFO - at 3.5s,\testimator extra_tree's best error=0.3792,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:00] {2567} INFO - iteration 26, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:01] {2744} INFO - at 3.7s,\testimator lgbm's best error=0.3550,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:01] {2567} INFO - iteration 27, current learner xgboost\n", + "[flaml.automl: 03-30 21:49:01] {2744} INFO - at 3.8s,\testimator xgboost's best error=0.3594,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:01] {2567} INFO - iteration 28, current learner extra_tree\n", + "[flaml.automl: 03-30 21:49:01] {2744} INFO - at 3.9s,\testimator extra_tree's best error=0.3792,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:01] {2567} INFO - iteration 29, current learner extra_tree\n", + "[flaml.automl: 03-30 21:49:01] {2744} INFO - at 4.0s,\testimator extra_tree's best error=0.3792,\tbest estimator lgbm's best error=0.3550\n", + "[flaml.automl: 03-30 21:49:01] {2567} INFO - iteration 30, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:01] {2744} INFO - at 4.5s,\testimator lgbm's best error=0.3545,\tbest estimator lgbm's best error=0.3545\n", + "[flaml.automl: 03-30 21:49:01] {2567} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:02] {2744} INFO - at 4.8s,\testimator lgbm's best error=0.3545,\tbest estimator lgbm's best error=0.3545\n", + "[flaml.automl: 03-30 21:49:02] {2567} INFO - iteration 32, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:03] {2744} INFO - at 6.1s,\testimator lgbm's best error=0.3545,\tbest estimator lgbm's best error=0.3545\n", + "[flaml.automl: 03-30 21:49:03] {2567} INFO - iteration 33, current learner catboost\n", + "[flaml.automl: 03-30 21:49:08] {2744} INFO - at 10.6s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3545\n", + "[flaml.automl: 03-30 21:49:08] {2567} INFO - iteration 34, current learner extra_tree\n", + "[flaml.automl: 03-30 21:49:09] {2744} INFO - at 11.8s,\testimator extra_tree's best error=0.3792,\tbest estimator lgbm's best error=0.3545\n", + "[flaml.automl: 03-30 21:49:09] {2567} INFO - iteration 35, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:10] {2744} INFO - at 13.0s,\testimator lgbm's best error=0.3536,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:10] {2567} INFO - iteration 36, current learner rf\n", + "[flaml.automl: 03-30 21:49:10] {2744} INFO - at 13.5s,\testimator rf's best error=0.3766,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:10] {2567} INFO - iteration 37, current learner extra_tree\n", + "[flaml.automl: 03-30 21:49:15] {2744} INFO - at 18.6s,\testimator extra_tree's best error=0.3792,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:15] {2567} INFO - iteration 38, current learner catboost\n", + "[flaml.automl: 03-30 21:49:17] {2744} INFO - at 20.0s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:17] {2567} INFO - iteration 39, current learner catboost\n", + "[flaml.automl: 03-30 21:49:25] {2744} INFO - at 28.2s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:25] {2567} INFO - iteration 40, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:28] {2744} INFO - at 30.7s,\testimator lgbm's best error=0.3536,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:28] {2567} INFO - iteration 41, current learner catboost\n", + "[flaml.automl: 03-30 21:49:36] {2744} INFO - at 38.9s,\testimator catboost's best error=0.3587,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:36] {2567} INFO - iteration 42, current learner xgboost\n", + "[flaml.automl: 03-30 21:49:37] {2744} INFO - at 40.4s,\testimator xgboost's best error=0.3594,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:37] {2567} INFO - iteration 43, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:41] {2744} INFO - at 44.3s,\testimator lgbm's best error=0.3536,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:41] {2567} INFO - iteration 44, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:49:42] {2744} INFO - at 44.7s,\testimator xgb_limitdepth's best error=0.3630,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:42] {2567} INFO - iteration 45, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:49:43] {2744} INFO - at 46.1s,\testimator xgb_limitdepth's best error=0.3630,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:43] {2567} INFO - iteration 46, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:49:45] {2744} INFO - at 47.7s,\testimator xgb_limitdepth's best error=0.3630,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:45] {2567} INFO - iteration 47, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:49:46] {2744} INFO - at 49.4s,\testimator xgb_limitdepth's best error=0.3572,\tbest estimator lgbm's best error=0.3536\n", + "[flaml.automl: 03-30 21:49:46] {2567} INFO - iteration 48, current learner lgbm\n", + "[flaml.automl: 03-30 21:49:48] {2744} INFO - at 51.1s,\testimator lgbm's best error=0.3528,\tbest estimator lgbm's best error=0.3528\n", + "[flaml.automl: 03-30 21:49:48] {2567} INFO - iteration 49, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:49:49] {2744} INFO - at 51.9s,\testimator xgb_limitdepth's best error=0.3521,\tbest estimator xgb_limitdepth's best error=0.3521\n", + "[flaml.automl: 03-30 21:49:49] {2567} INFO - iteration 50, current learner catboost\n", + "[flaml.automl: 03-30 21:50:01] {2744} INFO - at 63.7s,\testimator catboost's best error=0.3499,\tbest estimator catboost's best error=0.3499\n", + "[flaml.automl: 03-30 21:50:01] {2567} INFO - iteration 51, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:50:01] {2744} INFO - at 64.0s,\testimator xgb_limitdepth's best error=0.3521,\tbest estimator catboost's best error=0.3499\n", + "[flaml.automl: 03-30 21:50:01] {2567} INFO - iteration 52, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:50:06] {2744} INFO - at 68.7s,\testimator xgb_limitdepth's best error=0.3521,\tbest estimator catboost's best error=0.3499\n", + "[flaml.automl: 03-30 21:50:06] {2567} INFO - iteration 53, current learner catboost\n", + "[flaml.automl: 03-30 21:50:13] {2744} INFO - at 75.6s,\testimator catboost's best error=0.3481,\tbest estimator catboost's best error=0.3481\n", + "[flaml.automl: 03-30 21:50:13] {2567} INFO - iteration 54, current learner catboost\n", + "[flaml.automl: 03-30 21:50:41] {2744} INFO - at 104.5s,\testimator catboost's best error=0.3481,\tbest estimator catboost's best error=0.3481\n", + "[flaml.automl: 03-30 21:50:41] {2567} INFO - iteration 55, current learner rf\n", + "[flaml.automl: 03-30 21:50:42] {2744} INFO - at 104.7s,\testimator rf's best error=0.3766,\tbest estimator catboost's best error=0.3481\n", + "[flaml.automl: 03-30 21:50:42] {2567} INFO - iteration 56, current learner lgbm\n", + "[flaml.automl: 03-30 21:50:54] {2744} INFO - at 117.2s,\testimator lgbm's best error=0.3405,\tbest estimator lgbm's best error=0.3405\n", + "[flaml.automl: 03-30 21:50:54] {2567} INFO - iteration 57, current learner lrl1\n", + "/home/ec2-user/miniconda3/envs/myflaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", + " warnings.warn(\"The max_iter was reached which means \"\n", + "[flaml.automl: 03-30 21:50:54] {2744} INFO - at 117.5s,\testimator lrl1's best error=0.4338,\tbest estimator lgbm's best error=0.3405\n", + "[flaml.automl: 03-30 21:50:54] {2567} INFO - iteration 58, current learner lrl1\n", + "/home/ec2-user/miniconda3/envs/myflaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", + " warnings.warn(\"The max_iter was reached which means \"\n", + "[flaml.automl: 03-30 21:50:55] {2744} INFO - at 117.7s,\testimator lrl1's best error=0.4337,\tbest estimator lgbm's best error=0.3405\n", + "[flaml.automl: 03-30 21:50:55] {2567} INFO - iteration 59, current learner lgbm\n", + "[flaml.automl: 03-30 21:50:56] {2744} INFO - at 119.6s,\testimator lgbm's best error=0.3405,\tbest estimator lgbm's best error=0.3405\n", + "[flaml.automl: 03-30 21:50:56] {2567} INFO - iteration 60, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:02] {2744} INFO - at 124.7s,\testimator lgbm's best error=0.3370,\tbest estimator lgbm's best error=0.3370\n", + "[flaml.automl: 03-30 21:51:02] {2567} INFO - iteration 61, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:07] {2744} INFO - at 130.3s,\testimator lgbm's best error=0.3370,\tbest estimator lgbm's best error=0.3370\n", + "[flaml.automl: 03-30 21:51:07] {2567} INFO - iteration 62, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:12] {2744} INFO - at 134.7s,\testimator lgbm's best error=0.3370,\tbest estimator lgbm's best error=0.3370\n", + "[flaml.automl: 03-30 21:51:12] {2567} INFO - iteration 63, current learner rf\n", + "[flaml.automl: 03-30 21:51:12] {2744} INFO - at 135.0s,\testimator rf's best error=0.3755,\tbest estimator lgbm's best error=0.3370\n", + "[flaml.automl: 03-30 21:51:12] {2567} INFO - iteration 64, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:18] {2744} INFO - at 141.5s,\testimator lgbm's best error=0.3318,\tbest estimator lgbm's best error=0.3318\n", + "[flaml.automl: 03-30 21:51:18] {2567} INFO - iteration 65, current learner rf\n", + "[flaml.automl: 03-30 21:51:19] {2744} INFO - at 141.8s,\testimator rf's best error=0.3755,\tbest estimator lgbm's best error=0.3318\n", + "[flaml.automl: 03-30 21:51:19] {2567} INFO - iteration 66, current learner extra_tree\n", + "[flaml.automl: 03-30 21:51:20] {2744} INFO - at 143.4s,\testimator extra_tree's best error=0.3777,\tbest estimator lgbm's best error=0.3318\n", + "[flaml.automl: 03-30 21:51:20] {2567} INFO - iteration 67, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:26] {2744} INFO - at 148.6s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:26] {2567} INFO - iteration 68, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:32] {2744} INFO - at 155.3s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:32] {2567} INFO - iteration 69, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:38] {2744} INFO - at 161.0s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:38] {2567} INFO - iteration 70, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:44] {2744} INFO - at 167.3s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:44] {2567} INFO - iteration 71, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:50] {2744} INFO - at 173.2s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:50] {2567} INFO - iteration 72, current learner rf\n", + "[flaml.automl: 03-30 21:51:50] {2744} INFO - at 173.4s,\testimator rf's best error=0.3755,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:50] {2567} INFO - iteration 73, current learner xgboost\n", + "[flaml.automl: 03-30 21:51:51] {2744} INFO - at 173.6s,\testimator xgboost's best error=0.3594,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:51] {2567} INFO - iteration 74, current learner lgbm\n", + "[flaml.automl: 03-30 21:51:56] {2744} INFO - at 178.7s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:51:56] {2567} INFO - iteration 75, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:52:04] {2744} INFO - at 186.8s,\testimator xgb_limitdepth's best error=0.3382,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:04] {2567} INFO - iteration 76, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:52:06] {2744} INFO - at 189.6s,\testimator xgb_limitdepth's best error=0.3382,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:06] {2567} INFO - iteration 77, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:52:29] {2744} INFO - at 212.3s,\testimator xgb_limitdepth's best error=0.3382,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:29] {2567} INFO - iteration 78, current learner lgbm\n", + "[flaml.automl: 03-30 21:52:33] {2744} INFO - at 215.8s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:33] {2567} INFO - iteration 79, current learner lgbm\n", + "[flaml.automl: 03-30 21:52:43] {2744} INFO - at 226.1s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:43] {2567} INFO - iteration 80, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:52:48] {2744} INFO - at 230.8s,\testimator xgb_limitdepth's best error=0.3382,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:48] {2567} INFO - iteration 81, current learner rf\n", + "[flaml.automl: 03-30 21:52:48] {2744} INFO - at 231.2s,\testimator rf's best error=0.3746,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:52:48] {2567} INFO - iteration 82, current learner lgbm\n", + "[flaml.automl: 03-30 21:53:12] {2744} INFO - at 254.6s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:12] {2567} INFO - iteration 83, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:53:24] {2744} INFO - at 266.9s,\testimator xgb_limitdepth's best error=0.3341,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:24] {2567} INFO - iteration 84, current learner rf\n", + "[flaml.automl: 03-30 21:53:24] {2744} INFO - at 267.2s,\testimator rf's best error=0.3746,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:24] {2567} INFO - iteration 85, current learner extra_tree\n", + "[flaml.automl: 03-30 21:53:25] {2744} INFO - at 268.3s,\testimator extra_tree's best error=0.3777,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:25] {2567} INFO - iteration 86, current learner extra_tree\n", + "[flaml.automl: 03-30 21:53:27] {2744} INFO - at 270.4s,\testimator extra_tree's best error=0.3753,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:27] {2567} INFO - iteration 87, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:53:39] {2744} INFO - at 281.8s,\testimator xgb_limitdepth's best error=0.3341,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:39] {2567} INFO - iteration 88, current learner xgboost\n", + "[flaml.automl: 03-30 21:53:40] {2744} INFO - at 282.8s,\testimator xgboost's best error=0.3594,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:40] {2567} INFO - iteration 89, current learner extra_tree\n", + "[flaml.automl: 03-30 21:53:42] {2744} INFO - at 285.0s,\testimator extra_tree's best error=0.3753,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:42] {2567} INFO - iteration 90, current learner lgbm\n", + "[flaml.automl: 03-30 21:53:44] {2744} INFO - at 286.9s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:53:44] {2567} INFO - iteration 91, current learner lgbm\n", + "[flaml.automl: 03-30 21:54:20] {2744} INFO - at 322.9s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:20] {2567} INFO - iteration 92, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:54:29] {2744} INFO - at 331.6s,\testimator xgb_limitdepth's best error=0.3316,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:29] {2567} INFO - iteration 93, current learner xgboost\n", + "[flaml.automl: 03-30 21:54:30] {2744} INFO - at 332.8s,\testimator xgboost's best error=0.3594,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:30] {2567} INFO - iteration 94, current learner lrl1\n", + "/home/ec2-user/miniconda3/envs/myflaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", + " warnings.warn(\"The max_iter was reached which means \"\n", + "[flaml.automl: 03-30 21:54:30] {2744} INFO - at 333.0s,\testimator lrl1's best error=0.4337,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:30] {2567} INFO - iteration 95, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:54:40] {2744} INFO - at 343.1s,\testimator xgb_limitdepth's best error=0.3316,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:40] {2567} INFO - iteration 96, current learner extra_tree\n", + "[flaml.automl: 03-30 21:54:42] {2744} INFO - at 344.6s,\testimator extra_tree's best error=0.3753,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:42] {2567} INFO - iteration 97, current learner lgbm\n", + "[flaml.automl: 03-30 21:54:43] {2744} INFO - at 346.2s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:43] {2567} INFO - iteration 98, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:54:52] {2744} INFO - at 354.8s,\testimator xgb_limitdepth's best error=0.3316,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:52] {2567} INFO - iteration 99, current learner rf\n", + "[flaml.automl: 03-30 21:54:52] {2744} INFO - at 355.3s,\testimator rf's best error=0.3746,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:52] {2567} INFO - iteration 100, current learner extra_tree\n", + "[flaml.automl: 03-30 21:54:56] {2744} INFO - at 358.8s,\testimator extra_tree's best error=0.3753,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:56] {2567} INFO - iteration 101, current learner rf\n", + "[flaml.automl: 03-30 21:54:56] {2744} INFO - at 359.2s,\testimator rf's best error=0.3746,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:54:56] {2567} INFO - iteration 102, current learner lgbm\n", + "[flaml.automl: 03-30 21:55:02] {2744} INFO - at 365.5s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:02] {2567} INFO - iteration 103, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:55:12] {2744} INFO - at 375.1s,\testimator xgb_limitdepth's best error=0.3306,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:12] {2567} INFO - iteration 104, current learner xgboost\n", + "[flaml.automl: 03-30 21:55:13] {2744} INFO - at 376.4s,\testimator xgboost's best error=0.3501,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:13] {2567} INFO - iteration 105, current learner lgbm\n", + "[flaml.automl: 03-30 21:55:18] {2744} INFO - at 381.5s,\testimator lgbm's best error=0.3282,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:18] {2567} INFO - iteration 106, current learner xgboost\n", + "[flaml.automl: 03-30 21:55:21] {2744} INFO - at 383.9s,\testimator xgboost's best error=0.3501,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:21] {2567} INFO - iteration 107, current learner xgboost\n", + "[flaml.automl: 03-30 21:55:22] {2744} INFO - at 385.1s,\testimator xgboost's best error=0.3392,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:22] {2567} INFO - iteration 108, current learner xgboost\n", + "[flaml.automl: 03-30 21:55:23] {2744} INFO - at 386.5s,\testimator xgboost's best error=0.3392,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:23] {2567} INFO - iteration 109, current learner xgboost\n", + "[flaml.automl: 03-30 21:55:25] {2744} INFO - at 387.7s,\testimator xgboost's best error=0.3391,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:25] {2567} INFO - iteration 110, current learner rf\n", + "[flaml.automl: 03-30 21:55:25] {2744} INFO - at 388.1s,\testimator rf's best error=0.3746,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:25] {2567} INFO - iteration 111, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:55:34] {2744} INFO - at 397.0s,\testimator xgb_limitdepth's best error=0.3306,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:34] {2567} INFO - iteration 112, current learner extra_tree\n", + "[flaml.automl: 03-30 21:55:38] {2744} INFO - at 400.7s,\testimator extra_tree's best error=0.3711,\tbest estimator lgbm's best error=0.3282\n", + "[flaml.automl: 03-30 21:55:38] {2567} INFO - iteration 113, current learner lgbm\n", + "[flaml.automl: 03-30 21:55:43] {2744} INFO - at 405.9s,\testimator lgbm's best error=0.3274,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:55:43] {2567} INFO - iteration 114, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:55:55] {2744} INFO - at 417.7s,\testimator xgb_limitdepth's best error=0.3306,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:55:55] {2567} INFO - iteration 115, current learner extra_tree\n", + "[flaml.automl: 03-30 21:55:57] {2744} INFO - at 419.9s,\testimator extra_tree's best error=0.3711,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:55:57] {2567} INFO - iteration 116, current learner lrl1\n", + "/home/ec2-user/miniconda3/envs/myflaml/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", + " warnings.warn(\"The max_iter was reached which means \"\n", + "[flaml.automl: 03-30 21:55:58] {2744} INFO - at 421.0s,\testimator lrl1's best error=0.4334,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:55:58] {2567} INFO - iteration 117, current learner lgbm\n", + "[flaml.automl: 03-30 21:56:03] {2744} INFO - at 426.5s,\testimator lgbm's best error=0.3274,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:56:03] {2567} INFO - iteration 118, current learner lgbm\n", + "[flaml.automl: 03-30 21:56:07] {2744} INFO - at 429.6s,\testimator lgbm's best error=0.3274,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:56:07] {2567} INFO - iteration 119, current learner xgb_limitdepth\n", + "[flaml.automl: 03-30 21:56:16] {2744} INFO - at 439.2s,\testimator xgb_limitdepth's best error=0.3306,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:56:16] {2567} INFO - iteration 120, current learner extra_tree\n", + "[flaml.automl: 03-30 21:56:18] {2744} INFO - at 440.7s,\testimator extra_tree's best error=0.3711,\tbest estimator lgbm's best error=0.3274\n", + "[flaml.automl: 03-30 21:56:18] {2567} INFO - iteration 121, current learner lgbm\n", + "[flaml.automl: 03-30 21:56:33] {2744} INFO - at 456.2s,\testimator lgbm's best error=0.3268,\tbest estimator lgbm's best error=0.3268\n", + "[flaml.automl: 03-30 21:56:33] {2567} INFO - iteration 122, current learner lgbm\n", + "[flaml.automl: 03-30 21:56:40] {2744} INFO - at 463.3s,\testimator lgbm's best error=0.3268,\tbest estimator lgbm's best error=0.3268\n", + "[flaml.automl: 03-30 21:56:40] {2567} INFO - iteration 123, current learner lgbm\n", + "[flaml.automl: 03-30 21:56:58] {2744} INFO - at 481.3s,\testimator lgbm's best error=0.3250,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:56:58] {2567} INFO - iteration 124, current learner extra_tree\n", + "[flaml.automl: 03-30 21:57:05] {2744} INFO - at 488.1s,\testimator extra_tree's best error=0.3623,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:57:05] {2567} INFO - iteration 125, current learner rf\n", + "[flaml.automl: 03-30 21:57:05] {2744} INFO - at 488.5s,\testimator rf's best error=0.3722,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:57:05] {2567} INFO - iteration 126, current learner lgbm\n", + "[flaml.automl: 03-30 21:57:15] {2744} INFO - at 498.5s,\testimator lgbm's best error=0.3250,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:57:15] {2567} INFO - iteration 127, current learner lgbm\n", + "[flaml.automl: 03-30 21:58:00] {2744} INFO - at 543.2s,\testimator lgbm's best error=0.3250,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:00] {2567} INFO - iteration 128, current learner lgbm\n", + "[flaml.automl: 03-30 21:58:24] {2744} INFO - at 566.8s,\testimator lgbm's best error=0.3250,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:24] {2567} INFO - iteration 129, current learner lgbm\n", + "[flaml.automl: 03-30 21:58:41] {2744} INFO - at 583.9s,\testimator lgbm's best error=0.3250,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:41] {2567} INFO - iteration 130, current learner rf\n", + "[flaml.automl: 03-30 21:58:41] {2744} INFO - at 584.2s,\testimator rf's best error=0.3722,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:41] {2567} INFO - iteration 131, current learner extra_tree\n", + "[flaml.automl: 03-30 21:58:48] {2744} INFO - at 590.7s,\testimator extra_tree's best error=0.3572,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:48] {2567} INFO - iteration 132, current learner extra_tree\n", + "[flaml.automl: 03-30 21:58:54] {2744} INFO - at 596.8s,\testimator extra_tree's best error=0.3572,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:54] {2567} INFO - iteration 133, current learner rf\n", + "[flaml.automl: 03-30 21:58:54] {2744} INFO - at 597.0s,\testimator rf's best error=0.3722,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:54] {2567} INFO - iteration 134, current learner rf\n", + "[flaml.automl: 03-30 21:58:54] {2744} INFO - at 597.2s,\testimator rf's best error=0.3701,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:54] {2567} INFO - iteration 135, current learner rf\n", + "[flaml.automl: 03-30 21:58:54] {2744} INFO - at 597.3s,\testimator rf's best error=0.3701,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:54] {2567} INFO - iteration 136, current learner rf\n", + "[flaml.automl: 03-30 21:58:54] {2744} INFO - at 597.5s,\testimator rf's best error=0.3701,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:54] {2567} INFO - iteration 137, current learner rf\n", + "[flaml.automl: 03-30 21:58:55] {2744} INFO - at 597.7s,\testimator rf's best error=0.3655,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:55] {2567} INFO - iteration 138, current learner rf\n", + "[flaml.automl: 03-30 21:58:55] {2744} INFO - at 597.8s,\testimator rf's best error=0.3655,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:55] {2567} INFO - iteration 139, current learner rf\n", + "[flaml.automl: 03-30 21:58:55] {2744} INFO - at 598.1s,\testimator rf's best error=0.3641,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:55] {2567} INFO - iteration 140, current learner rf\n", + "[flaml.automl: 03-30 21:58:55] {2744} INFO - at 598.3s,\testimator rf's best error=0.3604,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:55] {2567} INFO - iteration 141, current learner rf\n", + "[flaml.automl: 03-30 21:58:55] {2744} INFO - at 598.5s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:55] {2567} INFO - iteration 142, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 598.7s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 143, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 599.0s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 144, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 599.1s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 145, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 599.3s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 146, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 599.4s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 147, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 599.5s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 148, current learner rf\n", + "[flaml.automl: 03-30 21:58:56] {2744} INFO - at 599.5s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:56] {2567} INFO - iteration 149, current learner rf\n", + "[flaml.automl: 03-30 21:58:57] {2744} INFO - at 599.6s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:58:57] {2567} INFO - iteration 150, current learner rf\n", + "[flaml.automl: 03-30 21:58:57] {2744} INFO - at 599.7s,\testimator rf's best error=0.3594,\tbest estimator lgbm's best error=0.3250\n", + "[flaml.automl: 03-30 21:59:13] {2974} INFO - retrain lgbm for 16.9s\n", + "[flaml.automl: 03-30 21:59:14] {2981} INFO - retrained model: LGBMClassifier(colsample_bytree=0.763983850698587,\n", + " learning_rate=0.08749366799403727, max_bin=127,\n", + " min_child_samples=128, n_estimators=302, num_leaves=466,\n", + " reg_alpha=0.09968008477303378, reg_lambda=23.22741934331899,\n", " verbose=-1)\n", - "[flaml.automl: 03-25 15:16:40] {2297} INFO - fit succeeded\n", - "[flaml.automl: 03-25 15:16:40] {2298} INFO - Time taken to find the best model: 111.8131034374237\n", - "[flaml.automl: 03-25 15:16:40] {2309} WARNING - Time taken to find the best model is 93% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" + "[flaml.automl: 03-30 21:59:14] {2310} INFO - fit succeeded\n", + "[flaml.automl: 03-30 21:59:14] {2311} INFO - Time taken to find the best model: 481.2624523639679\n", + "[flaml.automl: 03-30 21:59:14] {2322} WARNING - Time taken to find the best model is 80% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], @@ -353,9 +510,10 @@ { "data": { "text/plain": [ - "LGBMClassifier(learning_rate=0.2607939951456863, max_bin=255,\n", - " min_child_samples=62, n_estimators=150, num_leaves=176,\n", - " reg_alpha=0.015973158305354482, reg_lambda=1.1581244082992255,\n", + "LGBMClassifier(colsample_bytree=0.763983850698587,\n", + " learning_rate=0.08749366799403727, max_bin=127,\n", + " min_child_samples=128, n_estimators=302, num_leaves=466,\n", + " reg_alpha=0.09968008477303378, reg_lambda=23.22741934331899,\n", " verbose=-1)" ] }, @@ -389,14 +547,35 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Predicted labels ['1' '0' '1' ... '1' '0' '0']\n", + "True labels 118331 0\n", + "328182 0\n", + "335454 0\n", + "520591 1\n", + "344651 0\n", + " ..\n", + "367080 0\n", + "203510 1\n", + "254894 0\n", + "296512 1\n", + "362444 0\n", + "Name: Delay, Length: 134846, dtype: category\n", + "Categories (2, object): ['0' < '1']\n" + ] + } + ], "source": [ "'''compute predictions of testing dataset''' \n", "y_pred = automl.predict(X_test)\n", @@ -451,16 +630,21 @@ "output_type": "stream", "text": [ "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 26, 'num_leaves': 4, 'min_child_samples': 18, 'learning_rate': 0.22930096764186372, 'log_max_bin': 9, 'colsample_bytree': 0.9086551727646448, 'reg_alpha': 0.0015561782752413472, 'reg_lambda': 0.33127416269768944, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 26, 'num_leaves': 4, 'min_child_samples': 18, 'learning_rate': 0.22930096764186372, 'log_max_bin': 9, 'colsample_bytree': 0.9086551727646448, 'reg_alpha': 0.0015561782752413472, 'reg_lambda': 0.33127416269768944, 'FLAML_sample_size': 10000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 55, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.4365396221333287, 'log_max_bin': 10, 'colsample_bytree': 0.8048558760626646, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.23010605579846408, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 55, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.4365396221333287, 'log_max_bin': 10, 'colsample_bytree': 0.8048558760626646, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.23010605579846408, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 90, 'num_leaves': 18, 'min_child_samples': 34, 'learning_rate': 0.3572626620529719, 'log_max_bin': 10, 'colsample_bytree': 0.9295656128173544, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.19814636043056766, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 90, 'num_leaves': 18, 'min_child_samples': 34, 'learning_rate': 0.3572626620529719, 'log_max_bin': 10, 'colsample_bytree': 0.9295656128173544, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.19814636043056766, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 56, 'num_leaves': 7, 'min_child_samples': 92, 'learning_rate': 0.23536463281405412, 'log_max_bin': 10, 'colsample_bytree': 0.9898009552962395, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.14329426172643323, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 56, 'num_leaves': 7, 'min_child_samples': 92, 'learning_rate': 0.23536463281405412, 'log_max_bin': 10, 'colsample_bytree': 0.9898009552962395, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.14329426172643323, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'catboost', 'Current Sample': 40000, 'Current Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.09999999999999996, 'n_estimators': 110, 'FLAML_sample_size': 40000}, 'Best Learner': 'catboost', 'Best Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.09999999999999996, 'n_estimators': 110, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'catboost', 'Current Sample': 40000, 'Current Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.2, 'n_estimators': 61, 'FLAML_sample_size': 40000}, 'Best Learner': 'catboost', 'Best Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.2, 'n_estimators': 61, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'catboost', 'Current Sample': 364083, 'Current Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.2, 'n_estimators': 37, 'FLAML_sample_size': 364083}, 'Best Learner': 'catboost', 'Best Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.2, 'n_estimators': 37, 'FLAML_sample_size': 364083}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 180, 'num_leaves': 31, 'min_child_samples': 112, 'learning_rate': 0.14172261747380874, 'log_max_bin': 8, 'colsample_bytree': 0.9882716197099741, 'reg_alpha': 0.004676080321450302, 'reg_lambda': 2.704862827036818, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 180, 'num_leaves': 31, 'min_child_samples': 112, 'learning_rate': 0.14172261747380874, 'log_max_bin': 8, 'colsample_bytree': 0.9882716197099741, 'reg_alpha': 0.004676080321450302, 'reg_lambda': 2.704862827036818, 'FLAML_sample_size': 364083}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 284, 'num_leaves': 24, 'min_child_samples': 57, 'learning_rate': 0.3450637443178264, 'log_max_bin': 8, 'colsample_bytree': 0.9661606582789269, 'reg_alpha': 0.05708594148438568, 'reg_lambda': 3.0806435484123478, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 284, 'num_leaves': 24, 'min_child_samples': 57, 'learning_rate': 0.3450637443178264, 'log_max_bin': 8, 'colsample_bytree': 0.9661606582789269, 'reg_alpha': 0.05708594148438568, 'reg_lambda': 3.0806435484123478, 'FLAML_sample_size': 364083}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 150, 'num_leaves': 176, 'min_child_samples': 62, 'learning_rate': 0.2607939951456863, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.015973158305354482, 'reg_lambda': 1.1581244082992255, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 150, 'num_leaves': 176, 'min_child_samples': 62, 'learning_rate': 0.2607939951456863, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.015973158305354482, 'reg_lambda': 1.1581244082992255, 'FLAML_sample_size': 364083}}\n" + "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 26, 'num_leaves': 4, 'min_child_samples': 18, 'learning_rate': 0.2293009676418639, 'log_max_bin': 9, 'colsample_bytree': 0.9086551727646448, 'reg_alpha': 0.0015561782752413472, 'reg_lambda': 0.33127416269768944, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 26, 'num_leaves': 4, 'min_child_samples': 18, 'learning_rate': 0.2293009676418639, 'log_max_bin': 9, 'colsample_bytree': 0.9086551727646448, 'reg_alpha': 0.0015561782752413472, 'reg_lambda': 0.33127416269768944, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 28, 'max_leaves': 4, 'min_child_weight': 0.7500252416342552, 'learning_rate': 0.23798984382572066, 'subsample': 1.0, 'colsample_bylevel': 0.9045613143846261, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.48864254576029176, 'FLAML_sample_size': 10000}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 28, 'max_leaves': 4, 'min_child_weight': 0.7500252416342552, 'learning_rate': 0.23798984382572066, 'subsample': 1.0, 'colsample_bylevel': 0.9045613143846261, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.48864254576029176, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 129, 'max_leaves': 4, 'min_child_weight': 1.2498964566809219, 'learning_rate': 0.3574837022388901, 'subsample': 0.9773266280674643, 'colsample_bylevel': 0.9705283362807284, 'colsample_bytree': 0.8561269216168275, 'reg_alpha': 0.0021694711024901254, 'reg_lambda': 4.620219690690227, 'FLAML_sample_size': 10000}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 129, 'max_leaves': 4, 'min_child_weight': 1.2498964566809219, 'learning_rate': 0.3574837022388901, 'subsample': 0.9773266280674643, 'colsample_bylevel': 0.9705283362807284, 'colsample_bytree': 0.8561269216168275, 'reg_alpha': 0.0021694711024901254, 'reg_lambda': 4.620219690690227, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 28, 'max_leaves': 5, 'min_child_weight': 0.7500252416342552, 'learning_rate': 0.23798984382572066, 'subsample': 1.0, 'colsample_bylevel': 0.9045613143846261, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.48864254576029176, 'FLAML_sample_size': 10000}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 28, 'max_leaves': 5, 'min_child_weight': 0.7500252416342552, 'learning_rate': 0.23798984382572066, 'subsample': 1.0, 'colsample_bylevel': 0.9045613143846261, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.48864254576029176, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 55, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.43653962213332903, 'log_max_bin': 10, 'colsample_bytree': 0.8048558760626646, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.23010605579846408, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 55, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.43653962213332903, 'log_max_bin': 10, 'colsample_bytree': 0.8048558760626646, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.23010605579846408, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 90, 'num_leaves': 18, 'min_child_samples': 34, 'learning_rate': 0.35726266205297247, 'log_max_bin': 10, 'colsample_bytree': 0.9295656128173544, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.1981463604305675, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 90, 'num_leaves': 18, 'min_child_samples': 34, 'learning_rate': 0.35726266205297247, 'log_max_bin': 10, 'colsample_bytree': 0.9295656128173544, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.1981463604305675, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 56, 'num_leaves': 7, 'min_child_samples': 92, 'learning_rate': 0.23536463281405448, 'log_max_bin': 10, 'colsample_bytree': 0.9898009552962395, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.14329426172643311, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 56, 'num_leaves': 7, 'min_child_samples': 92, 'learning_rate': 0.23536463281405448, 'log_max_bin': 10, 'colsample_bytree': 0.9898009552962395, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.14329426172643311, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 56, 'num_leaves': 7, 'min_child_samples': 92, 'learning_rate': 0.23536463281405448, 'log_max_bin': 10, 'colsample_bytree': 0.9898009552962395, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.14329426172643311, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 56, 'num_leaves': 7, 'min_child_samples': 92, 'learning_rate': 0.23536463281405448, 'log_max_bin': 10, 'colsample_bytree': 0.9898009552962395, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.14329426172643311, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'xgb', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 46, 'max_depth': 6, 'min_child_weight': 1.6664725229213329, 'learning_rate': 0.45062893839370016, 'subsample': 0.9773266280674643, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.8561269216168275, 'reg_alpha': 0.0021694711024901254, 'reg_lambda': 9.455213695118394, 'FLAML_sample_size': 40000}, 'Best Learner': 'xgb', 'Best Hyper-parameters': {'n_estimators': 46, 'max_depth': 6, 'min_child_weight': 1.6664725229213329, 'learning_rate': 0.45062893839370016, 'subsample': 0.9773266280674643, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.8561269216168275, 'reg_alpha': 0.0021694711024901254, 'reg_lambda': 9.455213695118394, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'catboost', 'Current Sample': 40000, 'Current Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.09999999999999996, 'n_estimators': 99, 'FLAML_sample_size': 40000}, 'Best Learner': 'catboost', 'Best Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.09999999999999996, 'n_estimators': 99, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'catboost', 'Current Sample': 40000, 'Current Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.2, 'n_estimators': 52, 'FLAML_sample_size': 40000}, 'Best Learner': 'catboost', 'Best Hyper-parameters': {'early_stopping_rounds': 10, 'learning_rate': 0.2, 'n_estimators': 52, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 179, 'num_leaves': 27, 'min_child_samples': 75, 'learning_rate': 0.09744966359309036, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.002826104794043855, 'reg_lambda': 0.1457318237156161, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 179, 'num_leaves': 27, 'min_child_samples': 75, 'learning_rate': 0.09744966359309036, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.002826104794043855, 'reg_lambda': 0.1457318237156161, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 180, 'num_leaves': 31, 'min_child_samples': 112, 'learning_rate': 0.14172261747380896, 'log_max_bin': 8, 'colsample_bytree': 0.9882716197099741, 'reg_alpha': 0.004676080321450302, 'reg_lambda': 2.704862827036818, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 180, 'num_leaves': 31, 'min_child_samples': 112, 'learning_rate': 0.14172261747380896, 'log_max_bin': 8, 'colsample_bytree': 0.9882716197099741, 'reg_alpha': 0.004676080321450302, 'reg_lambda': 2.704862827036818, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 284, 'num_leaves': 24, 'min_child_samples': 57, 'learning_rate': 0.34506374431782694, 'log_max_bin': 8, 'colsample_bytree': 0.9661606582789269, 'reg_alpha': 0.05708594148438563, 'reg_lambda': 3.0806435484123478, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 284, 'num_leaves': 24, 'min_child_samples': 57, 'learning_rate': 0.34506374431782694, 'log_max_bin': 8, 'colsample_bytree': 0.9661606582789269, 'reg_alpha': 0.05708594148438563, 'reg_lambda': 3.0806435484123478, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 150, 'num_leaves': 176, 'min_child_samples': 62, 'learning_rate': 0.2607939951456869, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.015973158305354472, 'reg_lambda': 1.1581244082992255, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 150, 'num_leaves': 176, 'min_child_samples': 62, 'learning_rate': 0.2607939951456869, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.015973158305354472, 'reg_lambda': 1.1581244082992255, 'FLAML_sample_size': 364083}}\n" ] } ], @@ -483,7 +667,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAAic0lEQVR4nO3dfZxdVX3v8c+XSYBBDAMm0GRCSCxJNIgmGEFEKlJpolUSKUagD4gt0bZaLTqUXOtD8VLxorb6MtUL1Ae8gkIaxqiRkQpCy2MCwYQMDcbwNBMkIWQEYSTJ5Hf/2PuEk5M9MzvJ7Dlzzvm+X6/zmrPXXvvs32GH+c1aa6+1FRGYmZlVOqDaAZiZ2cjkBGFmZpmcIMzMLJMThJmZZXKCMDOzTE4QZmaWyQnCbB9IOlXSumrHYVYkJwirOZIelfS2asYQEf8VEdOL+nxJcyTdLuk5SZsl3SbpzKLOZ5bFCcIsg6SmKp77bOAG4BpgInAU8CngXfvwWZLk/89tn/gfjtUNSQdIukTSryRtkXS9pCPK9t8g6deSfpP+dX5c2b5vSfqapOWSngfemrZUPi5pdXrM9yUdnNY/TVJX2fH91k33XyzpSUkbJf2VpJB0bMZ3EPAl4LMRcXVE/CYidkbEbRFxYVrnM5L+X9kxk9PPG5Vu/1zSZZLuAF4A2iStrDjP30talr4/SNIXJD0u6SlJX5fUvJ+Xw+qAE4TVkw8D84G3ABOArcDisv0/AaYCRwL3A9+tOP484DLg5cB/p2ULgLnAFOC1wPsGOH9mXUlzgYuAtwHHAqcN8BnTgaOBJQPUyePPgYUk3+XrwHRJU8v2nwdcm76/HJgGzEzjayVpsViDc4KwevJB4BMR0RURLwKfAc4u/WUdEd+IiOfK9r1O0mFlx/8gIu5I/2L/XVr2lYjYGBHPAD8k+SXan/7qLgC+GRFrI+KF9Nz9eUX688l8X7lf30rPtyMifgP8ADgXIE0UrwKWpS2WhcDfR8QzEfEc8M/AOft5fqsDThBWT44BbpTUI6kHeAjoA46S1CTp8rT76Vng0fSYsWXHP5Hxmb8ue/8CcOgA5++v7oSKz846T8mW9Of4AerkUXmOa0kTBEnroT1NVuOAQ4D7yv673ZSWW4NzgrB68gTw9ohoKXsdHBHdJL8U55F08xwGTE6PUdnxRS1t/CTJYHPJ0QPUXUfyPf5kgDrPk/xSL/m9jDqV3+VmYJykmSSJotS99DTQCxxX9t/ssIgYKBFag3CCsFo1WtLBZa9RJH3tl0k6BkDSOEnz0vovB14k+Qv9EJJulOFyPXCBpFdLOgT4ZH8VI1l//yLgk5IukDQmHXx/s6Qr02oPAH8gaVLaRbZosAAiYjvJnVFXAEeQJAwiYidwFfAvko4EkNQqac6+flmrH04QVquWk/zlW3p9BvgysAz4qaTngLuBk9L61wCPAd1AZ7pvWETET4CvALcC68vO/WI/9ZcA7wXeD2wEngL+N8k4AhFxM/B9YDVwH/CjnKFcS9KCuiEidpSV/0MprrT77T9JBsutwckPDDIbXpJeDTwIHFTxi9psRHELwmwYSHp3Ot/gcODzwA+dHGykc4IwGx4fADYBvyK5s+qvqxuO2eDcxWRmZpncgjAzs0yjqh3AUBk7dmxMnjy52mGYmdWU++677+mIyJwYWTcJYvLkyaxcuXLwimZmtoukx/rb5y4mMzPL5ARhZmaZnCDMzCyTE4SZmWVygjAzs0x1cxeTmVmjaV/VzRUd69jY08uElmba5kxn/qzWIft8JwgzsxrUvqqbRUvX0Lu9D4Dunl4WLV0DMGRJwl1MZmY16IqOdbuSQ0nv9j6u6Fg3ZOdwgjAzq0Ebe3r3qnxfOEGYmdWgCS3Ne1W+L5wgzMxqUNuc6TSPbtqtrHl0E21zhu5hgB6kNjOrQaWB6IuXrGZb305aC7iLqdAWhKS5ktZJWi/pkn7qLJDUKWmtpGvTsrdKeqDs9TtJ84uM1cys1syf1cqsSS2cNOUI7rjk9CFNDlBgC0JSE7AYOAPoAlZIWhYRnWV1pgKLgFMiYqukIwEi4lZgZlrnCJIHqv+0qFjNzGxPRbYgTgTWR8SGiNgGfA+YV1HnQmBxRGwFiIhNGZ9zNvCTiHihwFjNzKxCkQmiFXiibLsrLSs3DZgm6Q5Jd0uam/E55wDXFRSjmZn1o9qD1KOAqcBpwETgdknHR0QPgKTxwPFAR9bBkhYCCwEmTZo0DOGamTWOIlsQ3cDRZdsT07JyXcCyiNgeEY8AD5MkjJIFwI0RsT3rBBFxZUTMjojZ48ZlPjHPzMz2UZEJYgUwVdIUSQeSdBUtq6jTTtJ6QNJYki6nDWX7z8XdS2ZmVVFYgoiIHcCHSLqHHgKuj4i1ki6VdGZarQPYIqkTuBVoi4gtAJImk7RAbisqRjMz61+hYxARsRxYXlH2qbL3AVyUviqPfZQ9B7XNzOpe0ct451XtQWozMyszHMt45+UEYWY2gvS3jPfFS1Zz3b2P71G/88lnmTF+TCGxeLE+M7MRpL/lurf17cwsnzF+DPNmFtOycAvCzGwEmdDSTHdGkmhtaeb7Hzh5WGNxC8LMbAQZjmW883ILwsxsBBmOZbzzcoIwMxth5s9q3TUgPdzdSuXcxWRmZpmcIMzMLJMThJmZZXKCMDOzTE4QZmaWyQnCzMwyOUGYmVkmJwgzM8vkBGFmZpmcIMzMLJMThJmZZXKCMDOzTE4QZmaWyau5mpn1o31VN1d0rGNjTy8TqrjsdrU4QZiZZWhf1c2ipWt2PR+6u6eXRUvXADRMknCCMDPLcEXHul3JoaR3ex8XL1m961kNRep88llmjB9T+HkG4jEIM7MMGzOeCw2wrW/nsJx/xvgxzJtZ3ZaKWxBmZhkmtDTTnZEkWluaq/qUt+HkFoSZWYa2OdNpHt20W1nz6Cba5kyvUkTDzy0IM7MMpYHoi5esZlvfTlp9F5OZmZXMn9W6a0C6UbqVyrmLyczMMjlBmJlZJncx2V5r9NmlZo2i0BaEpLmS1klaL+mSfuoskNQpaa2ka8vKJ0n6qaSH0v2Ti4zV8inNLu3u6SV4aXZp+6ruaodmZkOssBaEpCZgMXAG0AWskLQsIjrL6kwFFgGnRMRWSUeWfcQ1wGURcbOkQ4HhmZ1iA6r27FKz4TYSZjRXS5EtiBOB9RGxISK2Ad8D5lXUuRBYHBFbASJiE4CkGcCoiLg5Lf9tRLxQYKyWU7Vnl5oNt5Ewo7laihyDaAWeKNvuAk6qqDMNQNIdQBPwmYi4KS3vkbQUmAL8J3BJRPRhVeXZpWaNo9p3MY0CpgKnAecCV0lqSctPBT4OvAF4JfC+yoMlLZS0UtLKzZs3D1PIjc2zS80aR5EJohs4umx7YlpWrgtYFhHbI+IR4GGShNEFPJB2T+0A2oETKk8QEVdGxOyImD1u3LgivoNVmD+rlc+ddTwHNiX/dFpbmvncWcf7LiazOlRkF9MKYKqkKSSJ4RzgvIo67SQth29KGkvStbQB6AFaJI2LiM3A6cDKAmO1vdDos0vNGsWgLQhJr9iXD07/8v8Q0AE8BFwfEWslXSrpzLRaB7BFUidwK9AWEVvSsYaPAz+TtAYQcNW+xGFmZvsmTwvibkkPAN8EfhIRkffDI2I5sLyi7FNl7wO4KH1VHnsz8Nq85zIzs6GVZwxiGnAl8OfALyX9s6RpxYZlZmbVNmiCiMTNEXEuybyF84F7Jd0myR3QZmZ1atAupnQM4s9IWhBPAR8GlgEzgRtI5imYmVmdyTMGcRfwHWB+RHSVla+U9PViwjIzs2rLkyCm9zcwHRGfH+J4zMxshMgzSP3TdHYzAJIOl9RRXEhmZjYS5EkQ4yKip7SRLqx3ZP/VzcysHuRJEH2SJpU2JB0D5J4LYWZmtSnPGMQngP+WdBvJjOZTgYWFRmVmZlU3aIKIiJsknQC8MS36aEQ8XWxYZmZWbXkX6+sDNgEHAzMkERG3FxeWmZlVW56Jcn8FfIRkue4HSFoSd5GssGpmZnUqzyD1R0ge2vNYRLwVmEWyHLeZmdWxPAnidxHxOwBJB0XE/wB+fJiZWZ3LMwbRlU6UawdulrQVeKzIoMzMrPry3MX07vTtZyTdChwG3FRoVGZmVnUDJghJTcDaiHgVQETcNixRmZlZ1Q04BpE++nNd+UxqMzNrDHnGIA4H1kq6F3i+VBgRZ/Z/iJmZ1bo8CeKThUdhZmYjTp5Bao87mJk1oDwzqZ/jpdVbDwRGA89HxJgiAzMzs+rK04J4eem9JAHzeGnhPhtm7au6uaJjHRt7epnQ0kzbnOnMn9Va7bDMrA7lmUm9SyTagTnFhGMDaV/VzaKla+ju6SWA7p5eFi1dQ/uq7mqHZmZ1KE8X01llmwcAs4HfFRaR9euKjnX0bu/brax3ex8XL1nNdfc+PqyxdD75LDPGu5fRrJ7luYvpXWXvdwCPknQz2TDb2NObWb6tb+cwRwIzxo9h3kx3bZnVszxjEBcMRyD2kv7GGSa0NNOdkSRaW5r5/gdOrkKkZlbPBh2DkPTtdLG+0vbhkr5RaFQNbKBxhrY502ke3bRb/ebRTbTN8eK6Zjb08nQxvTYiekobEbFV0qziQmpsA40zzJrUwoSWg9mw+XmCpOXgu5jMrCh5EsQBkg6PiK0Ako7IeZztg8HGGcYeehBjDz2IeTNbOe8kL5FlZsXJ84v+i8Bdkm5It98DXFZcSI3N4wxmNlIMOgYREdcAZwFPpa+zIuI7eT5c0lxJ6yStl3RJP3UWSOqUtFbStWXlfZIeSF/L8n2d2udxBjMbKfLMg3gjyTMhvppuj5F0UkTcM8hxTcBi4AygC1ghaVlEdJbVmQosAk5JxzaOLPuI3oiYudffqMaVxhMuXrKabX07Pc5gZlWTp4vpa8AJZdu/zSjLciKwPiI2AEj6Hsn8ic6yOhcCi0vjGxGxKWfcdW3+rNZdE9/crWRm1ZJnqQ1FRGmxPiJiJ/kSSyvwRNl2V1pWbhowTdIdku6WNLds38GSVqbl8zMDkxamdVZu3rw5R0hmZpZXngSxQdLfSRqdvj4CbBii848CpgKnAecCV5XNuTgmImYD5wH/Kun3Kw+OiCsjYnZEzB43btwQhWRmZpAvQXwQeBPQTdIKOImka2gw3cDRZdsT07JyXcCyiNgeEY8AD5MkDCKiO/25Afg54LkXZmbDKM9dTJsi4pyIODIijgL+kuQv/sGsAKZKmiLpQOAcoPJupPbSZ0kaS9LltCGdrX1QWfkp7D52YWZmBcu13LekJknvkPQd4BHgvYMdExE7gA8BHcBDwPURsVbSpZJKz7PuALZI6gRuBdoiYgvwamClpF+k5ZeX3/1kZmbFG3CwWdJbSMYA3gHcS/KX/Csj4oU8Hx4Ry4HlFWWfKnsfwEXpq7zOncDxec5hZmbF6DdBSOoCHie5pfXjEfGcpEfyJgczM6ttA3UxLQEmkHQnvUvSy3jp2dRmZlbn+k0QEfFRYArJWkynAeuAcenSGIcOS3RmZlY1Aw5Sp8+gvjUiFpIki3NJZkM/OgyxmZlZFeVetjsitgM/An4kqbm4kMzMbCTIdZtrpYjIfmiBmZnVDT/4Z4j19zxpM7Na4wQxhErPky49MrT0PGnAScLMak6e50FMA9qAY8rrR8TpBcZVkwZ6nnRp+e68Op98lhnjxwxleGZmeyVPC+IG4OvAVUDfIHUbRlZX0mDPk94bM8aPYd5MtzrMrHryJIgdEfG1wiOpIf11JbUcMpqtL2zfo76fJ21mtShPgvihpL8BbgReLBVGxDOFRTWCta/q5mPX/4K+2H1See/2Prb17eQAwc6yXX6etJnVqjwJ4vz0Z1tZWQCvHPpwRrZSy6EyOZT07Qx+f9zLeOKZXj9P2sxq3qAJIiKmDEcgtSBrELpca0szP/vYacMXkJlZgfLcxTQa+GvgD9KinwP/N51Z3VD6G4QGdyWZWf3JM5P6a8DrgX9LX69PyxrOhJbsFUaaJD531vHuSjKzupInQbwhIs6PiFvS1wXAG4oObCRqmzOd5tFNu5U1j27iiwte5+RgZnUnT4Lok/T7pQ1Jr6RB50PMn9XK5846ngObkv9srS3NbjmYWd3KcxdTG3CrpA2ASGZUX1BoVCPY/Fmtu2ZFe26DmdWzPHcx/UzSVKA0ArsuIl4c6BgzM6t9Az2T+vSIuEXSWRW7jpVERCwtODYzM6uigVoQbwFuAd6VsS8AJwgzszrWb4KIiE+nby+NiEfK90ny5DkzszqX5y6m/8goWzLUgZiZ2cgy0BjEq4DjgMMqxiHGAAcXHZiZmVXXQGMQ04F3Ai3sPg7xHHBhgTGZmdkIMNAYxA+AH0g6OSLuGsaYzMxsBMgzUW6VpL8l6W7a1bUUEe8vLCozM6u6PIPU3wF+D5gD3AZMJOlmMjOzOpYnQRwbEZ8Eno+IbwN/DJxUbFhmZlZteRJE6bkPPZJeAxwGHFlcSGZmNhLkSRBXSjoc+CSwDOgE/k+eD5c0V9I6SeslXdJPnQWSOiWtlXRtxb4xkrokfTXP+czMbOjkWazv6vTtbezFc6glNQGLgTOALmCFpGUR0VlWZyqwCDglIrZKqmyZfBa4Pe85zcxs6Aw0Ue6igQ6MiC8N8tknAusjYkP6ed8D5pG0QEouBBZHxNb0MzeVnf/1wFHATcDsQc5lZmZDbKAuppenr9kkz6RuTV8fBE7I8dmtwBNl211pWblpwDRJd0i6W9JcAEkHAF8EPj7QCSQtlLRS0srNmzfnCMnMzPIaaKLcPwFIuh04ISKeS7c/A/x4CM8/FTiN5PbZ2yUdD/wZsDwiuiT1e3BEXAlcCTB79uwYopjMzIx8E+WOAraVbW9LywbTDRxdtj0xLSvXBdwTEduBRyQ9TJIwTgZOlfQ3wKHAgZJ+GxGZA91mZjb08iSIa4B7Jd2Ybs8HvpXjuBXA1HRp8G7gHOC8ijrtwLnANyWNJely2hARf1qqIOl9wOyikkP7qm6u6FjHxp5eJrQ00zZnup8xbWZGvruYLpP0E+DUtOiCiFiV47gdkj4EdABNwDciYq2kS4GVEbEs3fdHkjqBPqAtIrbs65fZW+2rulm0dA292/sA6O7pZdHSNQBOEmbW8BSR3XUvaUxEPCvpiKz9EfFMoZHtpdmzZ8fKlSv36phTLr+F7p7ePcoPbDqAWZNa+j2u88lnmTF+DN//wMl7G6aZ2Ygi6b6IyLxTdKAWxLUky33fR/KI0V2fl27nnhMxUm3MSA4A2/p2DnjcjPFjmDfTLQwzq28D3cX0zvRn3T5edEJLc2YLorWl2a0DM2t4A02UG3CuQ0TcP/ThDK+2OdN3G4MAaB7dRNuc6VWMysxsZBioi+mLA+wL4PQhjmXYlQaiL16ymm19O2n1XUxmZrsM1MX01uEMpFrmz2rlunsfB3C3kplZmTzzIEiX+Z7B7k+Uu6aooMzMrPoGTRCSPk2yFMYMYDnwduC/SSbQmZlZncrzPIizgT8Efh0RFwCvI3lokJmZ1bE8CaI3InYCOySNATax+xpLZmZWh/KMQayU1AJcRTJp7rfAXUUGZWZm1TfQPIjFwLUR8Tdp0dcl3QSMiYjVwxKdmZlVzUAtiIeBL0gaD1wPXJdnkT4zM6sP/Y5BRMSXI+Jk4C3AFuAbkv5H0qclTRu2CM3MrCoGHaSOiMci4vMRMYvk2Q3zgYeKDszMzKpr0AQhaZSkd0n6LvATYB1wVuGRmZlZVQ00SH0GSYvhHcC9wPeAhRHx/DDFZmZmVTTQIPUikmdCfCwitg5TPGZmNkIMtFhfza/WamZm+y7PTGozM2tAThBmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy1RogpA0V9I6SeslXdJPnQWSOiWtlXRtWnaMpPslPZCWf7DIOM3MbE8DPQ9iv0hqAhYDZwBdwApJyyKis6zOVJLnTpwSEVslHZnuehI4OSJelHQo8GB67Mai4jUzs90V2YI4EVgfERsiYhvJE+nmVdS5EFhceiBRRGxKf26LiBfTOgcVHKeZmWUo8hdvK/BE2XZXWlZuGjBN0h2S7pY0t7RD0tGSVqef8fms1oOkhZJWSlq5efPmAr6CmVnjqvZf5qOAqcBpJM+/vkpSC0BEPBERrwWOBc6XdFTlwRFxZUTMjojZ48aNG76ozcwaQJEJohs4umx7YlpWrgtYFhHbI+IR4GGShLFL2nJ4EDi1wFjNzKxCkQliBTBV0hRJBwLnAMsq6rSTtB6QNJaky2mDpImSmtPyw4E3A+sKjNXMzCoUliAiYgfwIaADeAi4PiLWSrpU0plptQ5gi6RO4FagLSK2AK8G7pH0C+A24AsRsaaoWM3MbE+F3eYKEBHLgeUVZZ8qex/ARemrvM7NwGuLjM3MzAZW7UFqMzMboZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI1fIJoX9XNqsd7uOeRZzjl8ltoX9Vd7ZDMzEaEhk4Q7au6WbR0Ddv6dgLQ3dPLoqVrnCTMzGjwBHFFxzp6t/ftVta7vY8rOtZVKSIzs5GjoRPExp7evSo3M2skDZ0gJrQ071W5mVkjaegE0TZnOs2jm3Yrax7dRNuc6VWKyMxs5BhV7QCqaf6sViAZi9jY08uElmba5kzfVW5m1sgaOkFAkiScEMzM9tTQXUxmZta/QhOEpLmS1klaL+mSfuoskNQpaa2ka9OymZLuSstWS3pvkXGamdmeCutiktQELAbOALqAFZKWRURnWZ2pwCLglIjYKunIdNcLwF9ExC8lTQDuk9QRET1FxWtmZrsrsgVxIrA+IjZExDbge8C8ijoXAosjYitARGxKfz4cEb9M328ENgHjCozVzMwqFJkgWoEnyra70rJy04Bpku6QdLekuZUfIulE4EDgVxn7FkpaKWnl5s2bhzB0MzOr9l1Mo4CpwGnAROB2SceXupIkjQe+A5wfETsrD46IK4Er07qbJT22DzGMBZ7ep+hHPn+32lOv3wv83UaqY/rbUWSC6AaOLtuemJaV6wLuiYjtwCOSHiZJGCskjQF+DHwiIu4e7GQRsU9dUJJWRsTsfTl2pPN3qz31+r3A360WFdnFtAKYKmmKpAOBc4BlFXXaSVoPSBpL0uW0Ia1/I3BNRCwpMEYzM+tHYQkiInYAHwI6gIeA6yNiraRLJZ2ZVusAtkjqBG4F2iJiC7AA+APgfZIeSF8zi4rVzMz2pIiodgxVJWlhOpZRd/zdak+9fi/wd6tFDZ8gzMwsm5faMDOzTE4QZmaWqaETRJ61omqBpKMl3Vq2ptVH0vIjJN0s6Zfpz8OrHeu+ktQkaZWkH6XbUyTdk16776d3vtUcSS2Slkj6H0kPSTq5Hq6bpL9P/y0+KOk6SQfX6jWT9A1JmyQ9WFaWeY2U+Er6HVdLOqF6ke+/hk0QZWtFvR2YAZwraUZ1o9pnO4CPRcQM4I3A36bf5RLgZxExFfhZul2rPkJyN1zJ54F/iYhjga3AX1Ylqv33ZeCmiHgV8DqS71jT101SK/B3wOyIeA3QRHKbe61es28Blas89HeN3k4yl2sqsBD42jDFWIiGTRDkWyuqJkTEkxFxf/r+OZJfMq0k3+fbabVvA/OrEuB+kjQR+GPg6nRbwOlAaY5MTX43SYeR3M797wARsS1dRaAertsooFnSKOAQ4Elq9JpFxO3AMxXF/V2jeSTztyKd4NuSrghRkxo5QeRZK6rmSJoMzALuAY6KiCfTXb8GjqpWXPvpX4GLgdJyK68AetK5NlC7124KsBn4Ztp9drWkl1Hj1y0iuoEvAI+TJIbfAPdRH9espL9rVFe/Vxo5QdQdSYcC/wF8NCKeLd8Xyf3MNXdPs6R3Apsi4r5qx1KAUcAJwNciYhbwPBXdSbV43dL++HkkCXAC8DL27KKpG7V4jfJq5ASRZ62omiFpNEly+G5ELE2Lnyo1b9Ofm6oV3344BThT0qMk3YCnk/Tbt6TdF1C7164L6IqIe9LtJSQJo9av29uARyJic7rO2lKS61gP16ykv2tUV79XGjlB5FkrqiakffL/DjwUEV8q27UMOD99fz7wg+GObX9FxKKImBgRk0mu0S0R8ackS7OcnVar1e/2a+AJSdPToj8EOqn96/Y48EZJh6T/Nkvfq+avWZn+rtEy4C/Su5neCPymrCuq5jT0TGpJ7yDp324CvhERl1U3on0j6c3AfwFreKmf/n+RjENcD0wCHgMWRETlYFvNkHQa8PGIeKekV5K0KI4AVgF/FhEvVjG8fZKuMXY1yTNPNgAXkPzhVtPXTdI/Ae8lucNuFfBXJH3xNXfNJF1HsqjoWOAp4NMkC43ucY3ShPhVki61F4ALImJlFcIeEg2dIMzMrH+N3MVkZmYDcIIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCKsJkv5F0kfLtjskXV22/UVJFw1w/LcknZ2+/7mkPR4wL2m0pMvTFTrvl3SXpLen+x5V8tz0vY1713n72b9YySN1OyX16qVH7J4tabmklr09Z46YxpdWxe1n/4GSbi+b1GYNygnCasUdwJsAJB1Ack/6cWX73wTcuZ/n+CwwHnhNRJxAsgDby/fzMwcUEX8bETOBdwC/ioiZ6WtJRLwjXbxvqF0EXDVATNtIVih9bwHnthriBGG14k7g5PT9ccCDwHOSDpd0EPBq4H5Jn5K0In0OwZXpxKVBSToEuBD4cGnyVkQ8FRHXZ9S9KP38BytaNX+RPgPgF5K+k3HcZ9MWRVPOmB6VNFbSZCXPi/iWpIclfVfS2yTdkbZ2Tkzrv0zJswvuTRf/62914j8BbkqPOS6t/0Aa+9S0Tjvwp3nitPrlJqTVhIjYKGmHpEkkrYW7SGbmnkyyWuiaiNgm6asRcSlA+kv6ncAPc5ziWODxykUOK0l6Pcls55MAAfdIug3YBvwj8KaIeFrSERXHXUHSGrkg9m126rHAe4D3kywTcx7wZuBMklnz84FPkCxF8v60a+peSf8ZEc+XxTEF2Fo2g/mDwJcj4rvpkjOl5PUg8IZ9iNPqiFsQVkvuJEkOpQRxV9n2HWmdtyp5atkakoX9jsv6oP3wZuDGiHg+In5LshDdqem5boiIpwEqlsb4JHBYRHxwH5MDJIvfrYmIncBakofVBMnyKpPTOn8EXCLpAeDnwMEkS0GUG0+yxHjJXcD/kvQPwDER0ZvG3wdsk1RoF5uNbE4QVktK4xDHk/yFezdJC+JNwJ2SDgb+DTg7Io4n6Wc/OOdnrwcmSRoz5FEnf/G/vrJVsZfK1yzaWba9k5d6AgT8Sdk4xqSIKH8KH0AvZf9NIuJaklZIL7Bc0ulldQ8CfrcfMVuNc4KwWnInSZfRMxHRl/6V3kKSJO7kpV98Tyt5Nka/dw9ViogXSFbE/XLa1YKkcZLeU1H1v4D56UqlLwPenZbdArxH0ivSY8uTwU3A5cCPC/6LvAP4cGncRdKsjDoP81KLg3TRww0R8RWSFUlfm5a/Ang6Xa7bGpQThNWSNSR3L91dUfabiHg6vePnKpLWRQfJX+574x9Jul86lTyg/kdA5YOX7id5RvG9JKvlXh0RqyJiLXAZcJukXwBfqjjuhjS2ZZKa9zKuvD4LjAZWS1qbbu8mHY/4laRj06IFwINpt9RrgGvS8rcCPy4oTqsRXs3VrMFIejfw+oj4xwHqLAUuiYiHhy8yG2l8F5NZg4mIG0tdYVnSLrZ2JwdzC8LMzDJ5DMLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMws0/8HedcK/1gFVw8AAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3dfZxdVX3v8c+XSYDBEicI2GQCJipEUYTAFEREeZAmPpGIXEVq9WIrUGuLehtKtFqsl0qbaotXMDdGfORBiWEI3pBAi4BigCROyCPBgBhmgiYpRjCMJJn87h97n+TkZJ+TnTB7zpk53/frNa+Zvfbae/8mMPObtdZeaykiMDMzq3RAvQMwM7PG5ARhZmaZnCDMzCyTE4SZmWVygjAzs0xOEGZmlskJwmw/SDpD0pp6x2FWJCcIG3QkPSnpbfWMISJ+EhHji7q/pImS7pf0nKSNku6TdF5RzzPL4gRhlkFSSx2ffQFwK/AdYAzwcuBzwLv3416S5J9z2y/+H8eGDEkHSLpS0uOS/lvSDyQdVnb+Vkm/lvS79K/z15Wd+5akr0maJ2kLcFbaUvk7ScvSa74v6eC0/pmSusuur1o3PX+FpKclrZf0l5JC0qszvgcBXwa+EBGzIuJ3EbEjIu6LiI+mda6S9L2ya8am9xuWHt8r6WpJDwDPA5+WtLjiOZ+UNDf9+iBJ/yZpnaTfSJohqfVF/uewIcAJwoaSvwWmAG8FRgO/Ba4rO38ncAxwJPBz4MaK6y8CrgYOBX6alr0PmASMA94A/M8az8+sK2kS8CngbcCr0/iqGQ8cBcyuUSePPwcuIfle/g8wXtIxZecvAm5Kv/4X4FjgxDS+dpIWizU5JwgbSi4FPhMR3RHxAnAVcEHpL+uIuCEinis7d4Kkl5Zdf3tEPJD+xf6HtOwrEbE+Ip4B7iD5JVpNtbrvA74ZESsj4nng8zXu8bL089O5v+ts30qftz0ifgfcDnwAIE0UrwHmpi2WjwKfjIhnIuI54J+BC1/k820IcIKwoeQVwG2SNkvaDKwG+oCXS2qRdE3a/fQs8GR6zeFl1z+Vcc9fl339PPBHNZ5fre7ointnPafkv9PPo2rUyaPyGTeRJgiS1kNnmqyOAA4BlpT9u81Py63JOUHYUPIU8PaIaCv7ODgiekh+KU4m6eZ5KTA2vUZl1xe1tPHTJIPNJUfVqLuG5Pt4b406W0h+qZf8cUadyu/lLuBwSSeSJIpS99ImoBd4Xdm/2UsjolYitCbhBGGD1XBJB5d9DANmAFdLegWApCMkTU7rHwq8QPIX+iEk3SgD5QfAxZJeK+kQavTvR7L+/qeAz0q6WNKIdPD9zZJmptWWAm+RdHTaRTZtbwFExHaScY3pwGHA3Wn5DuDrwL9LOhJAUrukifv93dqQ4QRhg9U8kr98Sx9XAdcCc4G7JD0HPAicmtb/DvAroAdYlZ4bEBFxJ/AV4MfAWmBheuqFKvVnA+8HPgKsB34D/G+ScQQi4m7g+8AyYAnwo5yh3ETSgro1TRglf5/G9WDa/fafJIPl1uTkDYPMBpak1wIrgIMqflGbNRS3IMwGgKT3SDpQ0kiS10rvcHKwRucEYTYwLgU2Ao+TvFn1V/UNx2zv3MVkZmaZ3IIwM7NMw+odQH86/PDDY+zYsfUOw8xs0FiyZMmmiMicGDmkEsTYsWNZvHjx3iuamRkAkn5V7Zy7mMzMLJMThJmZZXKCMDOzTE4QZmaWyQnCzMwyDam3mMzMmklnVw/TF6xh/eZeRre1MnXieKZMaO+3+ztBmJkNQp1dPUybs5zebX0A9GzuZdqc5QD9liTcxWRmNghNX7BmZ3Io6d3Wx/QFa/rtGU4QZmaD0PrNvftUvj+cIMzMBqHRba37VL4/nCDMzAahqRPH0zq8Zbey1uEtTJ3Yf5sBepDazGwQKg1EXzF7GVv7dtBewFtMhbYgJE2StEbSWklXVqlzpqSlklZKui8tG5+WlT6elfSJImM1MxtspkxoZ8LRbZw67jAeuPLsfk0OUGALQlILcB1wLtANLJI0NyJWldVpA64HJkXEOklHAkTEGuDEsvv0ALcVFauZme2pyBbEKcDaiHgiIrYCtwCTK+pcBMyJiHUAEbEh4z7nAI9HRNUlac3MrP8VmSDagafKjrvTsnLHAiMl3StpiaQPZdznQuDmgmI0M7MqihykVkZZ5QbYw4CTSVoJrcBCSQ9GxGMAkg4EzgOmVX2IdAlwCcDRRx/dD2GbmRkU24LoBo4qOx4DrM+oMz8itkTEJuB+4ISy828Hfh4Rv6n2kIiYGREdEdFxxBGZu+aZmdl+KDJBLAKOkTQubQlcCMytqHM7cIakYZIOAU4FVped/wDuXjIzq4vCupgiYrukjwMLgBbghohYKemy9PyMiFgtaT6wDNgBzIqIFQBpwjgXuLSoGM3MrLpCJ8pFxDxgXkXZjIrj6cD0jGufB15WZHxmZo2g6GW795dnUpuZ1dFALNu9v5wgzMzqqNqy3VfMXsbND6/b6/Wrnn6W40aNKCQ2L9ZnZlZH1Zbn3tq3I9f1x40aweQTi2lpuAVhZlZHo9ta6clIEu1trXz/0tPqENEubkGYmdXRQCzbvb/cgjAzq6OBWLZ7fzlBmJnV2ZQJ7TsHpOvdrVTOXUxmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmbyaq5k1pc6uHqYvWMP6zb2MbqAlthuJE4SZNZ3Orh6mzVm+cy/ons29TJuzHMBJoowThJk1nekL1uxMDiW92/q4YvaynfsyDLRVTz/LcaNG1OXZ1XgMwsyazvqMPaABtvbtGOBIdjlu1Agmn9hYrRe3IMys6Yxua6UnI0m0t7U21I5u9eYWhJk1nakTx9M6vGW3stbhLUydOL5OETUmtyDMrOmUBqKvmL2MrX07aPdbTJmcIMysKU2Z0L5zQNrdStncxWRmZpmcIMzMLJMThJmZZSo0QUiaJGmNpLWSrqxS50xJSyWtlHRfWXmbpNmSHpW0WpI7Cc3MBlBhg9SSWoDrgHOBbmCRpLkRsaqsThtwPTApItZJOrLsFtcC8yPiAkkHAocUFauZme2pyBbEKcDaiHgiIrYCtwCTK+pcBMyJiHUAEbEBQNII4C3AN9LyrRGxucBYzcysQpGvubYDT5UddwOnVtQ5Fhgu6V7gUODaiPgO8EpgI/BNSScAS4DLI2JLgfGa2SDjFVmLVWQLQhllUXE8DDgZeCcwEfispGPT8pOAr0XEBGALUG0M4xJJiyUt3rhxY78Fb2aNrbQia8/mXoJdK7J2dvXUO7Qho8gWRDdwVNnxGGB9Rp1Nactgi6T7gROAnwDdEfFQWm82VRJERMwEZgJ0dHRUJiAzG6L6Y0XWRlxBtZEU2YJYBBwjaVw6yHwhMLeizu3AGZKGSTqEpAtqdUT8GnhKUmlhlHOAVZiZpfpjRdZGXEG1key1BSHpsIh4Zl9vHBHbJX0cWAC0ADdExEpJl6XnZ0TEaknzgWXADmBWRKxIb/E3wI1pcnkCuHhfYzCzocsrshZPEbV7ZST9AlgKfBO4M/Z2QR11dHTE4sWL6x2GmQ2Ayl3hIFmR9YvnH++B6n0gaUlEdGSdy9PFdCxJH/+fA2sl/XM6kGxmVjdTJrTzxfOP58CW5NdYe1urk0M/22sXU9piuBu4W9JZwPeAj0l6BLgyIhYWHKOZWSavyFqsPGMQLwM+SNKC+A3J2MBc4ETgVmBckQGamVl95HnNdSHwXWBKRHSXlS+WNKOYsMzMrN7yJIjx1QamI+Jf+jkeMzNrEHkGqe9KF9UDQNJISQsKjMnMzBpAngRxRPlCeRHxW+DIGvXNzGwIyJMg+iQdXTqQ9Ar2XFPJzMyGmDxjEJ8Bflq2mc9bgEuKC8nMzBpBnnkQ8yWdBLyRZIXWT0bEpsIjMzOzusq7mmsfsAE4GDhOEhFxf3FhmZlZveWZKPeXwOUky3UvJWlJLATOLjY0MzOrpzyD1JcDfwL8KiLOAiaQ7PZmZmZDWJ4E8YeI+AOApIMi4lFg/F6uMTOzQS7PGER3OlGuk2TBvt+y585wZmY2xOR5i+k96ZdXSfox8FJgfqFRmZlZ3dVMEJIOAJZFxOsBIuK+WvXNzGzoqDkGERE7gEfKZ1KbmVlzyDMGMQpYKelhYEupMCLOKywqMzOruzwJ4vOFR2FmZg0nzyC1xx3MzJpQnpnUz7Fr9dYDgeHAlogYUWRgZmZWX3laEIeWH0uaApxSWERmZtYQ8syk3k1EdOJ1mMzMhrw8XUznlx0eAHTgDYPM9klnVw/TF6xh/eZeRre1MnXieKZMaK93WGY15XmL6d1lX28HngQmFxKN2RDU2dXDtDnL6d3WB0DP5l6mzVkO4CRhDS3PGMTFAxGI2VA1fcGancmhpHdbH1fMXsbND6+rU1RDx6qnn+W4UX5npgh7HYOQ9O10sb7S8UhJNxQbltnQsX5zb2b51r4dAxzJ0HTcqBFMPtEtsSLk6WJ6Q0RsLh1ExG8lTSgwJrMhZXRbKz0ZSaK9rZXvX3paHSIyyyfPW0wHSBpZOpB0GPm3KjVrelMnjqd1eMtuZa3DW5g60duqWGPL84v+S8DPJM0meXvpfcDVhUZlNoSUBqKvmL2MrX07aPdbTDZI5Bmk/o6kxSRzHwScHxGr8txc0iTgWqAFmBUR12TUORP4D5IZ2psi4q1p+ZPAc0AfsD0iOvI802xfDcQrqFMmtO8ckHa3kg0WeeZBvBFYGRFfTY8PlXRqRDy0l+tagOuAc4FuYJGkueXJJR38vh6YFBHrJB1ZcZuzImLTvn1LZvn5FVSz6vJ0MX0NOKnseEtGWZZTgLUR8QSApFtI5k+Utz4uAuZExDqAiNiQM26zfjGQr6D6dUwbbPIMUisids6cTjcRypNY2oGnyo6707JyxwIjJd0raYmkD5WdC+CutPySqsFJl0haLGnxxo0bc4RltstAvoLq1zFtsMnzi/4JSX9L0moA+BjwRI7rlFFWuUTHMOBk4BygFVgo6cGIeAw4PSLWp91Od0t6NCLu3+OGETOBmQAdHR1eAsT2iV9BNasuTwviMuBNQA9JK+BU4KM5rusGjio7HgOsz6gzPyK2pGMN9wMnAETE+vTzBuA2vIKsFcCvoJpVt9cEEREbIuLCiDgyIl4O/AVwZo57LwKOkTRO0oHAhcDcijq3A2dIGibpEJLks1rSSyQdCiDpJcCfAityf1dmOU2Z0M4Xzz+eA1uSH4X2tla+eP7xHqA2I+eEt/SNpD8FPpB+/ilwa61rImK7pI8DC0hec70hIlZKuiw9PyMiVkuaDywDdpC8CrtC0iuB2ySVYrwpIubv13dothd+BdUsW80EIektJG8avRN4GDgdeGVEPJ/n5hExD5hXUTaj4ng6ML2i7AnSriYzM6uPqglCUjewjmRwempEPCfpl3mTg1l/8V4KZvVRawzihySvpb4feHc6FuC3hGxAlSay9WzuJdg1ka2zq6feoZkNeVVbEBFxuaRPAGeRjD1MB0ZIeh8wLyJ+P0AxWhMbqIlsnsRmtqeabzFF4p6I+CgwlmQ8YgrJrnJmhers6smcowD9P5HNk9jM9pR72e6I2AbcAdwhqbW4kMx2dS1V44lsZsXLM1FuDxGR/WedWT/J6loq8UQ2s4HhjX8GgN/C2XfV1kgCPJHNbIDsVwvC8vNbOPtndFt2L2Z7W6uTg9kAybMfxLHAVOAV5fUj4uwC4xoyBnI56aHk4OEHcIBgR9mL1e5aMhtYebqYbgVmAF8n2d2tqbzY7qGBXE56KDn8jw4C4Klner1Np1md5EkQ2yPia3uvNvT0x25jXk7azAarPAniDkkfI1ly+4VSYUQ8U1hUdVZqNWT9Yt/X7iF3lZjZYJUnQXw4/Ty1rCyAV/Z/OPVX2WrIsi/dQ+4qMbPBaq8JIiLGDUQgjaLW+/cl7h4ys2aQ5y2m4cBfAW9Ji+4F/m86s3rIqfX+Pbh7yMyaR555EF8j2Tf6+vTjZHbtTz3kVHv/HrzbmJk1lzxjEH8SEeWb99wj6ZGiAqq3qRPH7zEG0Tq8xYnBzJpOnhZEn6RXlQ7S7UCH7HwI71FsZpbI04KYCvxY0hOASGZUX1xoVHXmPYrNzPK9xfRfko4BxpMkiEcj4oW9XGZmZoNcrT2pz46IeySdX3HqVZKIiDkFx2ZmZnVUqwXxVuAe4N0Z5wJwgjAzG8Jq7Un9j+mX/xQRvyw/J6mpJs+ZmTWjPG8x/TCjbHZ/B2JmZo2l1hjEa4DXAS+tGIcYARxcdGBmZlZftcYgxgPvAtrYfRziOeCjRQZlZmb1V2sM4nbgdkmnRcTCAYzJzMwaQJ6Jcl2S/pqku2ln11JEfKSwqMzMrO7yDFJ/F/hjYCJwHzCGpJvJzMyGsDwJ4tUR8VlgS0R8G3gncHyxYdVPZ1cPp19zDw/98hm61m2ms6un3iGZmdVFni6m0r4PmyW9Hvg1MLawiOqocje5rX079nkPajOzoSJPC2KmpJHAZ4G5wCrgX/PcXNIkSWskrZV0ZZU6Z0paKmmlpPsqzrVI6pL0ozzPe7GydpPr3dbH9AVrBuLxZmYNJc9ifbPSL+9jH/ahltQCXAecC3QDiyTNjYhVZXXaSDYhmhQR6yQdWXGby4HVJHMvCtXZ1UNPld3k9rbLnJnZUFRrotynal0YEV/ey71PAdZGxBPp/W4BJpO0QEouAuZExLr0nhvKnj+GZLzjaqBmLC9WZ1cPU2dX3wOp1i5zZmZDVa0upkPTjw6SPanb04/LgONy3LsdeKrsuDstK3csMFLSvZKWSPpQ2bn/AK4AdtR6iKRLJC2WtHjjxo05wtrT5+9Yyba+yDznPajNrFnVmij3eQBJdwEnRcRz6fFVwK057q2s22Y8/2TgHKAVWCjpQZLEsSEilkg6s9ZDImImMBOgo6Mj+7f8Xvz2+W1Vz3k3OTNrVnneYjoa2Fp2vJV8bzF1A0eVHY8B1mfU2RQRW4Atku4HTgBOAs6T9A6SyXkjJH0vIj6Y47n9ysnBzJpV3olyD0u6StI/Ag8B38lx3SLgGEnjJB0IXEjyFlS524EzJA2TdAhwKrA6IqZFxJiIGJted09RyaGzqyezqQPQ1jq8iEeamQ0Ked5iulrSncAZadHFEdGV47rtkj4OLABagBsiYqWky9LzMyJitaT5wDKSsYZZEbFif7+ZfVWa95DVLzX8AHHVea8bqFDMzBqOIrK77SWNiIhnJR2WdT4inik0sv3Q0dERixcvzl3/9GvuyXy1tUXiS+87wd1LZjbkSVoSER1Z52q1IG4iWe57CbsPLis9zj0nolFVm9+wI8LJwcyaXq23mN6Vfh6y24uObmvNbEF43oOZWe2JcifVujAift7/4QysqRPH77b2Enjeg5lZSa0upi/VOBfA2f0cy4ArdSNdMXsZW/t20N7WytSJ4929ZGZG7S6mswYykHqZMqGdmx9eB8D3Lz2tztGYmTWOPBPlSJf5Po7dd5TLMxfCzMwGqb0miHRy3JkkCWIe8Hbgp+SbLGdmZoNUnpnUF5CslfTriLiYZCmMgwqNyszM6i5PguiNiB3AdkkjgA0MgTkQZmZWW54xiMXpxj5fJ5k093vg4UKjMjOzuqs1D+KrwE0R8bG0aEa6btKIiFg2INGZmVnd1GpB/AL4kqRRwPeBmyNi6cCEZWZm9VZ1DCIiro2I04C3As8A35S0WtLnJB07YBGamVld7HWQOiJ+FRH/EhETSPaQfg+wuvDIzMysrvaaICQNl/RuSTcCdwKPAe8tPDIzM6urWoPU5wIfAN5J8tbSLcAl6fagZmY2xNUapP40yZ4Qf9eImwOZmVmxmn6xPjMzy5ZnJrWZmTUhJwgzM8vkBGFmZpmcIMzMLJMThJmZZXKCMDOzTE4QZmaWyQnCzMwyOUGYmVkmJwgzM8vkBGFmZpmcIMzMLJMThJmZZSo0QUiaJGmNpLWSrqxS50xJSyWtlHRfWnawpIclPZKWf77IOM3MbE+19oN4USS1ANcB5wLdwCJJcyNiVVmdNuB6YFJErJN0ZHrqBeDsiPi9pOHATyXdGREPFhWvmZntrsgWxCnA2oh4IiK2kuxIN7mizkXAnIhYBxARG9LPERG/T+sMTz+iwFjNzKxCkQmiHXiq7Lg7LSt3LDBS0r2Slkj6UOmEpBZJS4ENwN0R8VDWQyRdImmxpMUbN27s52/BzKx5FZkglFFW2QoYBpxMsu/1ROCzko4FiIi+iDgRGAOcIun1WQ+JiJkR0RERHUcccUT/RW9m1uSKTBDdwFFlx2OA9Rl15kfElojYBNwPnFBeISI2A/cCk4oL1czMKhWZIBYBx0gaJ+lA4EJgbkWd24EzJA2TdAhwKrBa0hHpADaSWoG3AY8WGKuZmVUo7C2miNgu6ePAAqAFuCEiVkq6LD0/IyJWS5oPLAN2ALMiYoWkNwDfTt+EOgD4QUT8qKhYzcxsT4UlCICImAfMqyibUXE8HZheUbYMmFBkbGZmVptnUpuZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMTZ8gOrt66Fq3mYd++QynX3MPnV099Q7JzKwhNHWC6OzqYdqc5Wzt2wFAz+Zeps1Z7iRhZkaTJ4jpC9bQu61vt7LebX1MX7CmThGZmTWOpk4Q6zf37lO5mVkzaeoEMbqtdZ/KzcyaSVMniKkTx9M6vGW3stbhLUydOL5OEZmZNY5h9Q6gnqZMaAeSsYj1m3sZ3dbK1Injd5abmTWzpk4QkCQJJwQzsz01dReTmZlVV2iCkDRJ0hpJayVdWaXOmZKWSlop6b607ChJP5a0Oi2/vMg4zcxsT4V1MUlqAa4DzgW6gUWS5kbEqrI6bcD1wKSIWCfpyPTUduB/RcTPJR0KLJF0d/m1ZmZWrCJbEKcAayPiiYjYCtwCTK6ocxEwJyLWAUTEhvTz0xHx8/Tr54DVgAcKzMwGUJEJoh14quy4mz1/yR8LjJR0r6Qlkj5UeRNJY4EJwENZD5F0iaTFkhZv3LixXwI3M7Ni32JSRllkPP9k4BygFVgo6cGIeAxA0h8BPwQ+ERHPZj0kImYCM9P6GyX9ah/jPBzYtI/XDLTBECMMjjgdY/9wjP2jEWJ8RbUTRSaIbuCosuMxwPqMOpsiYguwRdL9wAnAY5KGkySHGyNiTp4HRsQR+xqkpMUR0bGv1w2kwRAjDI44HWP/cIz9o9FjLLKLaRFwjKRxkg4ELgTmVtS5HThD0jBJhwCnAqslCfgGsDoivlxgjGZmVkVhLYiI2C7p48ACoAW4ISJWSrosPT8jIlZLmg8sA3YAsyJihaQ3A38OLJe0NL3lpyNiXlHxmpnZ7gqdSZ3+Qp9XUTaj4ng6ML2i7Kdkj2EUYeYAPefFGAwxwuCI0zH2D8fYPxo6RkVUjhubmZl5qQ0zM6vCCcLMzDI1dYLIs1bUQKu2DpWkwyTdLekX6eeRDRBri6QuST9qxBgltUmaLenR9N/ztAaM8ZPpf+cVkm6WdHAjxCjpBkkbJK0oK6sal6Rp6c/RGkkT6xjj9PS/9zJJt6XL+TRUjGXn/k5SSDq8njHW0rQJomytqLcDxwEfkHRcfaMCdq1D9VrgjcBfp3FdCfxXRBwD/Fd6XG+XkyyDUtJoMV4LzI+I15DMr1lNA8UoqR34W6AjIl5P8rbfhQ0S47eASRVlmXGl/39eCLwuveb69OerHjHeDbw+It4APAZMa8AYkXQUyTp168rK6hVjVU2bIMi3VtSAq7EO1WTg22m1bwNT6hNhQtIY4J3ArLLiholR0gjgLSTzaYiIrRGxmQaKMTUMaJU0DDiEZDJp3WOMiPuBZyqKq8U1GbglIl6IiF8Ca0l+vgY8xoi4KyK2p4cPkkzQbagYU/8OXMHuq0vUJcZamjlB5Fkrqq4q1qF6eUQ8DUkSAY6sfuWA+A+S/8F3lJU1UoyvBDYC30y7wWZJekkjxRgRPcC/kfwV+TTwu4i4q5FirFAtrkb9WfoIcGf6dcPEKOk8oCciHqk41TAxljRzgsizVlTd5FmHql4kvQvYEBFL6h1LDcOAk4CvRcQEYAv17/LaTdqHPxkYB4wGXiLpg/WNar803M+SpM+QdNfeWCrKqDbgMaYrRnwG+FzW6Yyyuv47NnOCyLNWVF1UWYfqN5JGpedHARvqFR9wOnCepCdJuubOlvQ9GivGbqA7IkqrAM8mSRiNFOPbgF9GxMaI2AbMAd7UYDGWqxZXQ/0sSfow8C7gz2LXRK9GifFVJH8QPJL+/IwBfi7pj2mcGHdq5gSRZ62oAVdjHaq5wIfTrz9Mso5VXUTEtIgYExFjSf7d7omID9JYMf4aeErS+LToHGAVDRQjSdfSGyUdkv53P4dkzKmRYixXLa65wIWSDpI0DjgGeLgO8SFpEvD3wHkR8XzZqYaIMSKWR8SRETE2/fnpBk5K/39tiBh3ExFN+wG8g+RNh8eBz9Q7njSmN5M0K5cBS9OPdwAvI3lz5Bfp58PqHWsa75nAj9KvGypG4ERgcfpv2QmMbMAYPw88CqwAvgsc1AgxAjeTjItsI/kl9he14iLpNnkcWAO8vY4xriXpxy/97MxotBgrzj8JHF7PGGt9eKkNMzPL1MxdTGZmVoMThJmZZXKCMDOzTE4QZmaWyQnCzMwyOUHYoCHp3yV9oux4gaRZZcdfkvSpGtd/S9IF6df3Stpjs3hJwyVdk65YukLSw5Lenp57snzlzX2Ie+dzq5y/TtJSSask9aZfL5V0gaR55SuS9hdJo5Suwlvl/IGS7k/XiLIm5QRhg8nPSGYaI+kA4HCSlS9L3gQ88CKf8QVgFMmKoK8H3g0c+iLvWVNE/HVEnEgy3+XxiDgx/ZgdEe+IZJHB/vYp4Os1YtpKMtfh/QU82wYJJwgbTB4gTRAkiWEF8JykkZIOAl4LdFYQZOsAAAMuSURBVEn6nKRFaQtgZjpLea/SdXI+CvxNRLwAEBG/iYgfZNT9VHr/FRWtmg+lexE8Ium7Gdd9IW1R5PrZK7VaJI1Vss/BrPSZN0p6m6QH0tbOKWn9lyjZg2BRukhhtRWK3wvMT695XdpSWprGfkxapxP4szxx2tDk5qMNGhGxXtJ2SUeTJIqFJKtdngb8DlgWEVslfTUi/gkg/SX9LuCOHI94NbAu9rI4oqSTgYuBU0kWWHtI0n3AVpKZsKdHxCZJh1Vc96/AS4GLY/9mqL4a+B/AJSRLxVxEMvP+PODTJMtvf4Zk6ZOPpF1TD0v6z4jYUhbHOOC3pSQIXAZcGxE3psvOlPYgWAH8yX7EaUOEWxA22JRaEaUEsbDs+GdpnbMkPSRpOXA2u3dD9Yc3A7dFxJaI+D3JIntnpM+aHRGbACKifB+AzwJtEXHpfiYHSBb2Wx4RO4CVJJv3BLAcGJvW+VPgSklLgXuBg4GjK+4zimQp9JKFwKcl/T3wiojoTePvA7ZKKrSLzRqXE4QNNqVxiONJ/sJ9kKQF8SbgAUkHA9cDF0TE8ST97AfnvPda4OgcvxCrdVmJ6sszLwJOrmxV7KMXyr7eUXa8g129AQLeWzaOcXRElO/6B9BL2b9JRNxE0grpBRZIOrus7kHAH15EzDaIOUHYYPMASZfRMxHRl/6V3kaSJBay6xffJiV7alR9e6hSJKt/fgP4StrVUnrbp3KPhvuBKekqrC8B3gP8hGRQ932SXpZeW54M5gPXAP+v4L/IFwB/Uxp3kTQho85j7GpxIOmVwBMR8RWSFUXfkJa/DCgtRW5NyAnCBpvlJG8vPVhR9ruI2JS+8fP1tKyT5C/3ffEPJN0vq5RsNN/J7t0xRLIl7LdIlmJ+CJgVEV0RsRK4GrhP0iPAlyuuuzWNba6k1n2MK68vAMOBZWn8X6iskI5HPC7p1WnR+4EVabfUa4DvpOVnAfMKitMGAa/mataEJL0HODki/qFGnTnAtIhYM3CRWSPxW0xmTSgibit1hWVJu9g6nRyam1sQZmaWyWMQZmaWyQnCzMwyOUGYmVkmJwgzM8vkBGFmZpn+PwR1vy10nLEbAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -532,20 +716,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "LGBMClassifier()" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "lgbm.fit(X_train, y_train)" ] @@ -610,14 +783,14 @@ "text": [ "default xgboost accuracy = 0.6676060098186078\n", "default lgbm accuracy = 0.6602346380315323\n", - "flaml (2 min) accuracy = 0.670713258087003\n" + "flaml (10 min) accuracy = 0.6732939797991784\n" ] } ], "source": [ "print('default xgboost accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred_xgb, y_test))\n", "print('default lgbm accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred_lgbm, y_test))\n", - "print('flaml (2 min) accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred, y_test))" + "print('flaml (10 min) accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred, y_test))" ] }, { @@ -787,107 +960,86 @@ "name": "stderr", "output_type": "stream", "text": [ - "[flaml.automl: 03-25 15:17:41] {2092} INFO - task = classification\n", - "[flaml.automl: 03-25 15:17:41] {2094} INFO - Data split method: stratified\n", - "[flaml.automl: 03-25 15:17:41] {2098} INFO - Evaluation method: holdout\n", - "[flaml.automl: 03-25 15:17:41] {2175} INFO - Minimizing error metric: 1-accuracy\n", - "[flaml.automl: 03-25 15:17:41] {2268} INFO - List of ML learners in AutoML Run: ['RGF', 'lgbm', 'rf', 'xgboost']\n", - "[flaml.automl: 03-25 15:17:41] {2554} INFO - iteration 0, current learner RGF\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/rgf/utils.py:224: UserWarning: Cannot find FastRGF executable files. FastRGF estimators will be unavailable for usage.\n", - " warnings.warn(\"Cannot find FastRGF executable files. \"\n", - "[flaml.automl: 03-25 15:17:43] {2684} INFO - Estimated sufficient time budget=548920s. Estimated necessary time budget=549s.\n", - "[flaml.automl: 03-25 15:17:43] {2731} INFO - at 2.0s,\testimator RGF's best error=0.3840,\tbest estimator RGF's best error=0.3840\n", - "[flaml.automl: 03-25 15:17:43] {2554} INFO - iteration 1, current learner RGF\n", - "[flaml.automl: 03-25 15:17:43] {2731} INFO - at 2.6s,\testimator RGF's best error=0.3840,\tbest estimator RGF's best error=0.3840\n", - "[flaml.automl: 03-25 15:17:43] {2554} INFO - iteration 2, current learner RGF\n", - "[flaml.automl: 03-25 15:17:44] {2731} INFO - at 3.2s,\testimator RGF's best error=0.3840,\tbest estimator RGF's best error=0.3840\n", - "[flaml.automl: 03-25 15:17:44] {2554} INFO - iteration 3, current learner RGF\n", - "[flaml.automl: 03-25 15:17:44] {2731} INFO - at 3.9s,\testimator RGF's best error=0.3840,\tbest estimator RGF's best error=0.3840\n", - "[flaml.automl: 03-25 15:17:44] {2554} INFO - iteration 4, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:44] {2731} INFO - at 3.9s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", - "[flaml.automl: 03-25 15:17:44] {2554} INFO - iteration 5, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 3.9s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 6, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.0s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 7, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.0s,\testimator lgbm's best error=0.3661,\tbest estimator lgbm's best error=0.3661\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 8, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.1s,\testimator lgbm's best error=0.3661,\tbest estimator lgbm's best error=0.3661\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.1s,\testimator lgbm's best error=0.3633,\tbest estimator lgbm's best error=0.3633\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.2s,\testimator lgbm's best error=0.3633,\tbest estimator lgbm's best error=0.3633\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 11, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.2s,\testimator lgbm's best error=0.3633,\tbest estimator lgbm's best error=0.3633\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 12, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.3s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 13, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.4s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 14, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.6s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 15, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:45] {2731} INFO - at 4.8s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:45] {2554} INFO - iteration 16, current learner RGF\n", - "[flaml.automl: 03-25 15:17:46] {2731} INFO - at 5.4s,\testimator RGF's best error=0.3840,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:46] {2554} INFO - iteration 17, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:46] {2731} INFO - at 5.5s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:46] {2554} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:46] {2731} INFO - at 5.7s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:46] {2554} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:46] {2731} INFO - at 5.8s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:46] {2554} INFO - iteration 20, current learner RGF\n", - "[flaml.automl: 03-25 15:17:47] {2731} INFO - at 6.5s,\testimator RGF's best error=0.3766,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:47] {2554} INFO - iteration 21, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:47] {2731} INFO - at 6.5s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:47] {2554} INFO - iteration 22, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:47] {2731} INFO - at 6.6s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:47] {2554} INFO - iteration 23, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:47] {2731} INFO - at 6.7s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:47] {2554} INFO - iteration 24, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:47] {2731} INFO - at 6.8s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:47] {2554} INFO - iteration 25, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:48] {2731} INFO - at 7.0s,\testimator lgbm's best error=0.3590,\tbest estimator lgbm's best error=0.3590\n", - "[flaml.automl: 03-25 15:17:48] {2554} INFO - iteration 26, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:48] {2731} INFO - at 7.2s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:48] {2554} INFO - iteration 27, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:48] {2731} INFO - at 7.4s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:48] {2554} INFO - iteration 28, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:48] {2731} INFO - at 7.6s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:48] {2554} INFO - iteration 29, current learner RGF\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.2s,\testimator RGF's best error=0.3766,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 30, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.3s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 31, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.4s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 32, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.5s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 33, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.6s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 34, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.6s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 35, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.7s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 36, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.8s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 37, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:49] {2731} INFO - at 8.9s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:49] {2554} INFO - iteration 38, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:51] {2731} INFO - at 10.0s,\testimator lgbm's best error=0.3580,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:51] {2554} INFO - iteration 39, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:17:51] {2731} INFO - at 10.5s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3580\n", - "[flaml.automl: 03-25 15:17:53] {2961} INFO - retrain lgbm for 2.2s\n", - "[flaml.automl: 03-25 15:17:53] {2968} INFO - retrained model: LGBMClassifier(colsample_bytree=0.8119653279413637,\n", - " learning_rate=0.20035468820761498, max_bin=1023,\n", - " min_child_samples=7, n_estimators=90, num_leaves=9,\n", - " reg_alpha=0.0015564673105246884, reg_lambda=0.003044645769210298,\n", + "[flaml.automl: 03-30 22:00:01] {2105} INFO - task = classification\n", + "[flaml.automl: 03-30 22:00:02] {2107} INFO - Data split method: stratified\n", + "[flaml.automl: 03-30 22:00:02] {2111} INFO - Evaluation method: holdout\n", + "[flaml.automl: 03-30 22:00:02] {2188} INFO - Minimizing error metric: 1-accuracy\n", + "[flaml.automl: 03-30 22:00:02] {2281} INFO - List of ML learners in AutoML Run: ['RGF', 'lgbm', 'rf', 'xgboost']\n", + "[flaml.automl: 03-30 22:00:02] {2567} INFO - iteration 0, current learner RGF\n", + "[flaml.automl: 03-30 22:00:02] {2697} INFO - Estimated sufficient time budget=255753s. Estimated necessary time budget=256s.\n", + "[flaml.automl: 03-30 22:00:02] {2744} INFO - at 1.3s,\testimator RGF's best error=0.3787,\tbest estimator RGF's best error=0.3787\n", + "[flaml.automl: 03-30 22:00:02] {2567} INFO - iteration 1, current learner RGF\n", + "[flaml.automl: 03-30 22:00:03] {2744} INFO - at 1.9s,\testimator RGF's best error=0.3787,\tbest estimator RGF's best error=0.3787\n", + "[flaml.automl: 03-30 22:00:03] {2567} INFO - iteration 2, current learner RGF\n", + "[flaml.automl: 03-30 22:00:04] {2744} INFO - at 2.6s,\testimator RGF's best error=0.3787,\tbest estimator RGF's best error=0.3787\n", + "[flaml.automl: 03-30 22:00:04] {2567} INFO - iteration 3, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:04] {2744} INFO - at 2.7s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", + "[flaml.automl: 03-30 22:00:04] {2567} INFO - iteration 4, current learner RGF\n", + "[flaml.automl: 03-30 22:00:04] {2744} INFO - at 3.2s,\testimator RGF's best error=0.3787,\tbest estimator lgbm's best error=0.3777\n", + "[flaml.automl: 03-30 22:00:04] {2567} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:04] {2744} INFO - at 3.3s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", + "[flaml.automl: 03-30 22:00:04] {2567} INFO - iteration 6, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.4s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 7, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.4s,\testimator lgbm's best error=0.3661,\tbest estimator lgbm's best error=0.3661\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.5s,\testimator lgbm's best error=0.3633,\tbest estimator lgbm's best error=0.3633\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 9, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.6s,\testimator lgbm's best error=0.3633,\tbest estimator lgbm's best error=0.3633\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 10, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.7s,\testimator lgbm's best error=0.3633,\tbest estimator lgbm's best error=0.3633\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 11, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.8s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 12, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 3.9s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 13, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 4.1s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 14, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 4.2s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 15, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:05] {2744} INFO - at 4.3s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:05] {2567} INFO - iteration 16, current learner RGF\n", + "[flaml.automl: 03-30 22:00:06] {2744} INFO - at 4.9s,\testimator RGF's best error=0.3787,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:06] {2567} INFO - iteration 17, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:06] {2744} INFO - at 5.0s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:06] {2567} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:06] {2744} INFO - at 5.1s,\testimator lgbm's best error=0.3613,\tbest estimator lgbm's best error=0.3613\n", + "[flaml.automl: 03-30 22:00:06] {2567} INFO - iteration 19, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:07] {2744} INFO - at 5.5s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n", + "[flaml.automl: 03-30 22:00:07] {2567} INFO - iteration 20, current learner RGF\n", + "[flaml.automl: 03-30 22:00:07] {2744} INFO - at 6.1s,\testimator RGF's best error=0.3669,\tbest estimator lgbm's best error=0.3600\n", + "[flaml.automl: 03-30 22:00:07] {2567} INFO - iteration 21, current learner RGF\n", + "[flaml.automl: 03-30 22:00:08] {2744} INFO - at 6.7s,\testimator RGF's best error=0.3669,\tbest estimator lgbm's best error=0.3600\n", + "[flaml.automl: 03-30 22:00:08] {2567} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:08] {2744} INFO - at 6.9s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:08] {2567} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:08] {2744} INFO - at 7.1s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:08] {2567} INFO - iteration 24, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:08] {2744} INFO - at 7.2s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:08] {2567} INFO - iteration 25, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:09] {2744} INFO - at 7.4s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:09] {2567} INFO - iteration 26, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:09] {2744} INFO - at 7.5s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:09] {2567} INFO - iteration 27, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:09] {2744} INFO - at 7.6s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:09] {2567} INFO - iteration 28, current learner RGF\n", + "[flaml.automl: 03-30 22:00:09] {2744} INFO - at 8.2s,\testimator RGF's best error=0.3669,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:09] {2567} INFO - iteration 29, current learner RGF\n", + "[flaml.automl: 03-30 22:00:10] {2744} INFO - at 9.3s,\testimator RGF's best error=0.3642,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:10] {2567} INFO - iteration 30, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:11] {2744} INFO - at 9.4s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:11] {2567} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:11] {2744} INFO - at 10.0s,\testimator lgbm's best error=0.3544,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:11] {2567} INFO - iteration 32, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:11] {2744} INFO - at 10.0s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3544\n", + "[flaml.automl: 03-30 22:00:13] {2974} INFO - retrain lgbm for 1.8s\n", + "[flaml.automl: 03-30 22:00:13] {2981} INFO - retrained model: LGBMClassifier(colsample_bytree=0.8485873378520249,\n", + " learning_rate=0.6205212209154768, max_bin=1023,\n", + " min_child_samples=6, n_estimators=46, num_leaves=16,\n", + " reg_alpha=0.0009765625, reg_lambda=0.0033009704647149916,\n", " verbose=-1)\n", - "[flaml.automl: 03-25 15:17:53] {2297} INFO - fit succeeded\n", - "[flaml.automl: 03-25 15:17:53] {2298} INFO - Time taken to find the best model: 7.228140354156494\n", - "[flaml.automl: 03-25 15:17:53] {2309} WARNING - Time taken to find the best model is 72% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" + "[flaml.automl: 03-30 22:00:13] {2310} INFO - fit succeeded\n", + "[flaml.automl: 03-30 22:00:13] {2311} INFO - Time taken to find the best model: 6.87259840965271\n" ] } ], @@ -957,120 +1109,139 @@ "name": "stderr", "output_type": "stream", "text": [ - "[flaml.automl: 03-25 15:17:54] {2092} INFO - task = classification\n", - "[flaml.automl: 03-25 15:17:54] {2094} INFO - Data split method: stratified\n", - "[flaml.automl: 03-25 15:17:54] {2098} INFO - Evaluation method: holdout\n", - "[flaml.automl: 03-25 15:17:54] {2175} INFO - Minimizing error metric: customized metric\n", - "[flaml.automl: 03-25 15:17:54] {2268} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'lrl1']\n", - "[flaml.automl: 03-25 15:17:54] {2554} INFO - iteration 0, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:54] {2684} INFO - Estimated sufficient time budget=15979s. Estimated necessary time budget=392s.\n", - "[flaml.automl: 03-25 15:17:54] {2731} INFO - at 0.5s,\testimator lgbm's best error=0.6647,\tbest estimator lgbm's best error=0.6647\n", - "[flaml.automl: 03-25 15:17:54] {2554} INFO - iteration 1, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:54] {2731} INFO - at 0.6s,\testimator lgbm's best error=0.6647,\tbest estimator lgbm's best error=0.6647\n", - "[flaml.automl: 03-25 15:17:54] {2554} INFO - iteration 2, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:54] {2731} INFO - at 0.6s,\testimator lgbm's best error=0.6491,\tbest estimator lgbm's best error=0.6491\n", - "[flaml.automl: 03-25 15:17:54] {2554} INFO - iteration 3, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.2s,\testimator xgboost's best error=0.6845,\tbest estimator lgbm's best error=0.6491\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 4, current learner extra_tree\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.3s,\testimator extra_tree's best error=0.6629,\tbest estimator lgbm's best error=0.6491\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 5, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.4s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 6, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.4s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 7, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.5s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 8, current learner rf\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.7s,\testimator rf's best error=0.6542,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.7s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 1.8s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 11, current learner rf\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 2.0s,\testimator rf's best error=0.6502,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 12, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:55] {2731} INFO - at 2.1s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:55] {2554} INFO - iteration 13, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:17:56] {2731} INFO - at 2.6s,\testimator xgboost's best error=0.6845,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:56] {2554} INFO - iteration 14, current learner rf\n", - "[flaml.automl: 03-25 15:17:56] {2731} INFO - at 2.8s,\testimator rf's best error=0.6502,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:56] {2554} INFO - iteration 15, current learner extra_tree\n", - "[flaml.automl: 03-25 15:17:56] {2731} INFO - at 3.0s,\testimator extra_tree's best error=0.6622,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:56] {2554} INFO - iteration 16, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:57] {2731} INFO - at 3.2s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:57] {2554} INFO - iteration 17, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:17:57] {2731} INFO - at 3.7s,\testimator xgboost's best error=0.6729,\tbest estimator lgbm's best error=0.6400\n", - "[flaml.automl: 03-25 15:17:57] {2554} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:57] {2731} INFO - at 3.8s,\testimator lgbm's best error=0.6335,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:57] {2554} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:57] {2731} INFO - at 4.0s,\testimator lgbm's best error=0.6335,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:57] {2554} INFO - iteration 20, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:58] {2731} INFO - at 4.1s,\testimator lgbm's best error=0.6335,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:58] {2554} INFO - iteration 21, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:58] {2731} INFO - at 4.2s,\testimator lgbm's best error=0.6335,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:58] {2554} INFO - iteration 22, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:58] {2731} INFO - at 4.4s,\testimator lgbm's best error=0.6335,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:58] {2554} INFO - iteration 23, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:58] {2731} INFO - at 4.5s,\testimator lgbm's best error=0.6335,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:58] {2554} INFO - iteration 24, current learner rf\n", - "[flaml.automl: 03-25 15:17:58] {2731} INFO - at 4.7s,\testimator rf's best error=0.6502,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:58] {2554} INFO - iteration 25, current learner extra_tree\n", - "[flaml.automl: 03-25 15:17:58] {2731} INFO - at 4.8s,\testimator extra_tree's best error=0.6622,\tbest estimator lgbm's best error=0.6335\n", - "[flaml.automl: 03-25 15:17:58] {2554} INFO - iteration 26, current learner lgbm\n", - "[flaml.automl: 03-25 15:17:59] {2731} INFO - at 5.6s,\testimator lgbm's best error=0.6328,\tbest estimator lgbm's best error=0.6328\n", - "[flaml.automl: 03-25 15:17:59] {2554} INFO - iteration 27, current learner catboost\n", - "[flaml.automl: 03-25 15:17:59] {2731} INFO - at 5.7s,\testimator catboost's best error=0.6828,\tbest estimator lgbm's best error=0.6328\n", - "[flaml.automl: 03-25 15:17:59] {2554} INFO - iteration 28, current learner catboost\n", - "[flaml.automl: 03-25 15:17:59] {2731} INFO - at 5.8s,\testimator catboost's best error=0.6828,\tbest estimator lgbm's best error=0.6328\n", - "[flaml.automl: 03-25 15:17:59] {2554} INFO - iteration 29, current learner catboost\n", - "[flaml.automl: 03-25 15:17:59] {2731} INFO - at 5.9s,\testimator catboost's best error=0.6738,\tbest estimator lgbm's best error=0.6328\n", - "[flaml.automl: 03-25 15:17:59] {2554} INFO - iteration 30, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:18:00] {2731} INFO - at 6.6s,\testimator xgboost's best error=0.6490,\tbest estimator lgbm's best error=0.6328\n", - "[flaml.automl: 03-25 15:18:00] {2554} INFO - iteration 31, current learner lgbm\n", - "[flaml.automl: 03-25 15:18:01] {2731} INFO - at 7.3s,\testimator lgbm's best error=0.6276,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:01] {2554} INFO - iteration 32, current learner lgbm\n", - "[flaml.automl: 03-25 15:18:01] {2731} INFO - at 8.0s,\testimator lgbm's best error=0.6276,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:01] {2554} INFO - iteration 33, current learner lgbm\n", - "[flaml.automl: 03-25 15:18:02] {2731} INFO - at 8.7s,\testimator lgbm's best error=0.6276,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:02] {2554} INFO - iteration 34, current learner xgboost\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:18:03] {2731} INFO - at 9.5s,\testimator xgboost's best error=0.6490,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:03] {2554} INFO - iteration 35, current learner rf\n", - "[flaml.automl: 03-25 15:18:03] {2731} INFO - at 9.6s,\testimator rf's best error=0.6502,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:03] {2554} INFO - iteration 36, current learner catboost\n", - "[flaml.automl: 03-25 15:18:03] {2731} INFO - at 9.7s,\testimator catboost's best error=0.6738,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:03] {2554} INFO - iteration 37, current learner xgb_limitdepth\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "/home/qxw5138/miniconda3/envs/ds440flaml/lib/python3.8/site-packages/xgboost/data.py:192: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - " from pandas import MultiIndex, Int64Index\n", - "[flaml.automl: 03-25 15:18:04] {2731} INFO - at 10.5s,\testimator xgb_limitdepth's best error=0.6682,\tbest estimator lgbm's best error=0.6276\n", - "[flaml.automl: 03-25 15:18:04] {2961} INFO - retrain lgbm for 0.6s\n", - "[flaml.automl: 03-25 15:18:04] {2968} INFO - retrained model: LGBMClassifier(learning_rate=0.6108586953417215, max_bin=1023,\n", - " min_child_samples=11, n_estimators=6, num_leaves=17,\n", - " reg_alpha=0.003932726553619989, reg_lambda=22.23452449721895,\n", - " verbose=-1)\n", - "[flaml.automl: 03-25 15:18:04] {2297} INFO - fit succeeded\n", - "[flaml.automl: 03-25 15:18:04] {2298} INFO - Time taken to find the best model: 7.292369365692139\n", - "[flaml.automl: 03-25 15:18:04] {2309} WARNING - Time taken to find the best model is 73% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" + "[flaml.automl: 03-30 22:00:14] {2105} INFO - task = classification\n", + "[flaml.automl: 03-30 22:00:14] {2107} INFO - Data split method: stratified\n", + "[flaml.automl: 03-30 22:00:14] {2111} INFO - Evaluation method: holdout\n", + "[flaml.automl: 03-30 22:00:14] {2188} INFO - Minimizing error metric: customized metric\n", + "[flaml.automl: 03-30 22:00:14] {2281} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'lrl1']\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 0, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:14] {2697} INFO - Estimated sufficient time budget=48059s. Estimated necessary time budget=1180s.\n", + "[flaml.automl: 03-30 22:00:14] {2744} INFO - at 0.8s,\testimator lgbm's best error=0.6796,\tbest estimator lgbm's best error=0.6796\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 1, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:14] {2744} INFO - at 0.9s,\testimator lgbm's best error=0.6796,\tbest estimator lgbm's best error=0.6796\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 2, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:14] {2744} INFO - at 0.9s,\testimator lgbm's best error=0.6491,\tbest estimator lgbm's best error=0.6491\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 3, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:14] {2744} INFO - at 1.0s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 4, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:14] {2744} INFO - at 1.1s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:14] {2744} INFO - at 1.2s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n", + "[flaml.automl: 03-30 22:00:14] {2567} INFO - iteration 6, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.3s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 7, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.3s,\testimator xgboost's best error=0.6672,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.4s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 9, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.5s,\testimator xgboost's best error=0.6672,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 10, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.5s,\testimator xgboost's best error=0.6500,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 11, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.6s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 12, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.6s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 13, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.7s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 14, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.8s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 15, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 1.9s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 16, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 2.0s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 17, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:15] {2744} INFO - at 2.0s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:15] {2567} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.3s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 19, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.4s,\testimator xgboost's best error=0.6393,\tbest estimator xgboost's best error=0.6393\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 20, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.4s,\testimator extra_tree's best error=0.6734,\tbest estimator xgboost's best error=0.6393\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 21, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.6s,\testimator xgboost's best error=0.6342,\tbest estimator xgboost's best error=0.6342\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 22, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.7s,\testimator xgboost's best error=0.6342,\tbest estimator xgboost's best error=0.6342\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 23, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.7s,\testimator extra_tree's best error=0.6617,\tbest estimator xgboost's best error=0.6342\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 24, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.8s,\testimator extra_tree's best error=0.6617,\tbest estimator xgboost's best error=0.6342\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 25, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 2.9s,\testimator xgboost's best error=0.6342,\tbest estimator xgboost's best error=0.6342\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 26, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 3.1s,\testimator xgboost's best error=0.6308,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 27, current learner rf\n", + "[flaml.automl: 03-30 22:00:16] {2744} INFO - at 3.1s,\testimator rf's best error=0.6531,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:16] {2567} INFO - iteration 28, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 3.3s,\testimator xgboost's best error=0.6308,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 29, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 3.5s,\testimator xgboost's best error=0.6308,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 30, current learner rf\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 3.6s,\testimator rf's best error=0.6471,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 31, current learner rf\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 3.6s,\testimator rf's best error=0.6471,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 32, current learner rf\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 3.8s,\testimator rf's best error=0.6471,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 33, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 3.9s,\testimator extra_tree's best error=0.6617,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 34, current learner rf\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 4.0s,\testimator rf's best error=0.6460,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 35, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 4.1s,\testimator xgboost's best error=0.6308,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 36, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:17] {2744} INFO - at 4.2s,\testimator extra_tree's best error=0.6527,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:17] {2567} INFO - iteration 37, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:18] {2744} INFO - at 4.3s,\testimator xgboost's best error=0.6308,\tbest estimator xgboost's best error=0.6308\n", + "[flaml.automl: 03-30 22:00:18] {2567} INFO - iteration 38, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:18] {2744} INFO - at 5.1s,\testimator xgboost's best error=0.6252,\tbest estimator xgboost's best error=0.6252\n", + "[flaml.automl: 03-30 22:00:18] {2567} INFO - iteration 39, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:19] {2744} INFO - at 5.6s,\testimator xgboost's best error=0.6252,\tbest estimator xgboost's best error=0.6252\n", + "[flaml.automl: 03-30 22:00:19] {2567} INFO - iteration 40, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:19] {2744} INFO - at 5.7s,\testimator extra_tree's best error=0.6527,\tbest estimator xgboost's best error=0.6252\n", + "[flaml.automl: 03-30 22:00:19] {2567} INFO - iteration 41, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:19] {2744} INFO - at 5.8s,\testimator extra_tree's best error=0.6527,\tbest estimator xgboost's best error=0.6252\n", + "[flaml.automl: 03-30 22:00:19] {2567} INFO - iteration 42, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:19] {2744} INFO - at 6.0s,\testimator lgbm's best error=0.6335,\tbest estimator xgboost's best error=0.6252\n", + "[flaml.automl: 03-30 22:00:19] {2567} INFO - iteration 43, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:21] {2744} INFO - at 7.7s,\testimator xgboost's best error=0.6237,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:21] {2567} INFO - iteration 44, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:21] {2744} INFO - at 7.9s,\testimator extra_tree's best error=0.6527,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:21] {2567} INFO - iteration 45, current learner xgboost\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 8.6s,\testimator xgboost's best error=0.6237,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 46, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 8.7s,\testimator lgbm's best error=0.6335,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 47, current learner catboost\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 8.8s,\testimator catboost's best error=0.6828,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 48, current learner catboost\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 8.9s,\testimator catboost's best error=0.6828,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 49, current learner catboost\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 9.0s,\testimator catboost's best error=0.6738,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 50, current learner catboost\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 9.1s,\testimator catboost's best error=0.6738,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 51, current learner extra_tree\n", + "[flaml.automl: 03-30 22:00:22] {2744} INFO - at 9.2s,\testimator extra_tree's best error=0.6527,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:22] {2567} INFO - iteration 52, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:23] {2744} INFO - at 9.3s,\testimator lgbm's best error=0.6335,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:23] {2567} INFO - iteration 53, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:23] {2744} INFO - at 9.5s,\testimator lgbm's best error=0.6335,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:23] {2567} INFO - iteration 54, current learner lgbm\n", + "[flaml.automl: 03-30 22:00:23] {2744} INFO - at 10.1s,\testimator lgbm's best error=0.6335,\tbest estimator xgboost's best error=0.6237\n", + "[flaml.automl: 03-30 22:00:32] {2974} INFO - retrain xgboost for 8.8s\n", + "[flaml.automl: 03-30 22:00:32] {2981} INFO - retrained model: XGBClassifier(base_score=0.5, booster='gbtree',\n", + " colsample_bylevel=0.847756342161632, colsample_bynode=1,\n", + " colsample_bytree=0.7597930580523548, gamma=0, gpu_id=-1,\n", + " grow_policy='lossguide', importance_type='gain',\n", + " interaction_constraints='', learning_rate=0.19997653978110663,\n", + " max_delta_step=0, max_depth=0, max_leaves=39,\n", + " min_child_weight=10.070493332676804, missing=nan,\n", + " monotone_constraints='()', n_estimators=13, n_jobs=-1,\n", + " num_parallel_tree=1, random_state=0,\n", + " reg_alpha=0.02609403888821573, reg_lambda=0.19745601532140325,\n", + " scale_pos_weight=1, subsample=0.8895588746662894,\n", + " tree_method='hist', use_label_encoder=False,\n", + " validate_parameters=1, verbosity=0)\n", + "[flaml.automl: 03-30 22:00:32] {2310} INFO - fit succeeded\n", + "[flaml.automl: 03-30 22:00:32] {2311} INFO - Time taken to find the best model: 7.734541177749634\n", + "[flaml.automl: 03-30 22:00:32] {2322} WARNING - Time taken to find the best model is 77% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], diff --git a/test/automl/test_notebook_example.py b/test/automl/test_notebook_example.py index 3b0ce4fdc1..e009120c4e 100644 --- a/test/automl/test_notebook_example.py +++ b/test/automl/test_notebook_example.py @@ -7,7 +7,7 @@ def test_automl(budget=5, dataset_format="dataframe", hpo_method=None): from flaml.data import load_openml_dataset import urllib3 - performance_check_budget = 240 + performance_check_budget = 600 if ( sys.platform == "darwin" and budget < performance_check_budget @@ -141,4 +141,4 @@ def test_mlflow(): if __name__ == "__main__": - test_automl(240) + test_automl(600)