From 68da8f7ede816c9fc1f779a19950a3042f9cffee Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Thu, 23 May 2024 17:21:09 +0200 Subject: [PATCH 01/12] including optuna --- moabb/evaluations/evaluations.py | 47 +++++++++++++++++++++++++++++++- moabb/evaluations/utils.py | 46 +++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index 38fa874aa..0e00ecb0a 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -4,6 +4,7 @@ from typing import Optional, Union import numpy as np +import optuna from mne.epochs import BaseEpochs from sklearn.base import clone from sklearn.metrics import get_scorer @@ -19,7 +20,12 @@ from tqdm import tqdm from moabb.evaluations.base import BaseEvaluation -from moabb.evaluations.utils import create_save_path, save_model_cv, save_model_list +from moabb.evaluations.utils import ( + create_deep_model, + create_save_path, + save_model_cv, + save_model_list, +) try: @@ -35,6 +41,17 @@ Vector = Union[list, tuple, np.ndarray] +def objective(trial, X, y, clf, scorer, epochs): + learning_rate = trial.suggest_float("learning_rate", 1e-4, 1e-2, log=True) + weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True) + drop_rate = trial.suggest_float("drop_rate", 0.0, 1.0) + + model = create_deep_model(clf, learning_rate, weight_decay, drop_rate, epochs=epochs) + model.fit(X, y) + + return scorer(model, X, y) + + class WithinSessionEvaluation(BaseEvaluation): """Performance evaluation within session (k-fold cross-validation) @@ -220,6 +237,34 @@ def _evaluate( y_ = y[ix] if self.mne_labels else y_cv for cv_ind, (train, test) in enumerate(cv.split(X_, y_)): cvclf = clone(grid_clf) + n_epochs = cvclf[-1].epochs + + study = optuna.create_study( + direction="maximize", + ) + study.optimize( + lambda trial: objective( + trial, + X=X_[train], + y=y_[train], + clf=cvclf, + scorer=scorer, + epochs=n_epochs, + ), + n_trials=25, + timeout=60, # one hour + show_progress_bar=True, + n_jobs=1, + ) + + cvclf = create_deep_model( + clf=cvclf, + learning_rate=study.best_params["learning_rate"], + weight_decay=study.best_params["weight_decay"], + drop_rate=study.best_params["drop_rate"], + epochs=n_epochs, + ) + cvclf.fit(X_[train], y_[train]) acc.append(scorer(cvclf, X_[test], y_[test])) diff --git a/moabb/evaluations/utils.py b/moabb/evaluations/utils.py index 85048e0cb..cdcf4892b 100644 --- a/moabb/evaluations/utils.py +++ b/moabb/evaluations/utils.py @@ -4,9 +4,29 @@ from pickle import HIGHEST_PROTOCOL, dump from typing import Sequence +import tensorflow as tf from numpy import argmax from sklearn.pipeline import Pipeline +from moabb.pipelines.deep_learning import ( + KerasDeepConvNet, + KerasEEGITNet, + KerasEEGNet_8_2, + KerasEEGNeX, + KerasEEGTCNet, + KerasShallowConvNet, +) + + +models_class = { + "KerasShallowConvNet": KerasShallowConvNet, + "KerasDeepConvNet": KerasDeepConvNet, + "KerasEEGNet_8_2": KerasEEGNet_8_2, + "KerasEEGITNet": KerasEEGITNet, + "KerasEEGTCNet": KerasEEGTCNet, + "KerasEEGNeX": KerasEEGNeX, +} + def _check_if_is_keras_model(model): """Check if the model is a Keras model. @@ -212,3 +232,29 @@ def create_save_path( return str(path_save) else: print("No hdf5_path provided, models will not be saved.") + + +def create_deep_model(clf, learning_rate, weight_decay, drop_rate, epochs): + keras_clf = clf[-1] + steps = list(clf.steps) + + Adam = tf.keras.optimizers.Adam( + learning_rate=learning_rate, weight_decay=weight_decay + ) + new_keras_clf = models_class[keras_clf.__class__.__name__]( + loss="sparse_categorical_crossentropy", + optimizer=Adam, + drop_rate=drop_rate, + epochs=epochs, + verbose=0, + # rest of the parameters are the same as the original model + callbacks=keras_clf.callbacks, + random_state=keras_clf.random_state, + batch_size=keras_clf.batch_size, + validation_split=keras_clf.validation_split, + shuffle=keras_clf.shuffle, + ) + steps[-1] = (steps[-1][0], new_keras_clf) + + pipe = Pipeline(steps) + return pipe From a07a11eb4759f5da4d896331bfe8de3f2a67131e Mon Sep 17 00:00:00 2001 From: Bru Date: Thu, 23 May 2024 21:47:40 +0100 Subject: [PATCH 02/12] Update moabb/evaluations/evaluations.py Co-authored-by: Thomas Moreau --- moabb/evaluations/evaluations.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index 0e00ecb0a..1dd906c37 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -259,9 +259,7 @@ def _evaluate( cvclf = create_deep_model( clf=cvclf, - learning_rate=study.best_params["learning_rate"], - weight_decay=study.best_params["weight_decay"], - drop_rate=study.best_params["drop_rate"], + **study.best_params, epochs=n_epochs, ) From 0eca6cc8581d3cbb84fd0f4411ca41e882680b21 Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 00:02:56 +0200 Subject: [PATCH 03/12] super ugly solution --- moabb/evaluations/evaluations.py | 38 +++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index 1dd906c37..b10fc15eb 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -14,8 +14,10 @@ StratifiedKFold, StratifiedShuffleSplit, cross_validate, + train_test_split, ) from sklearn.model_selection._validation import _fit_and_score, _score +from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder from tqdm import tqdm @@ -44,12 +46,31 @@ def objective(trial, X, y, clf, scorer, epochs): learning_rate = trial.suggest_float("learning_rate", 1e-4, 1e-2, log=True) weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True) - drop_rate = trial.suggest_float("drop_rate", 0.0, 1.0) + drop_rate = trial.suggest_float("drop_rate", 0.3, 0.9) + + pre_model, model = create_deep_model( + clf, learning_rate, weight_decay, drop_rate, epochs=epochs + ) + n_epochs = list(range(len(y))) + try: + idx_X_train, idx_X_val, y_train, y_val = train_test_split( + n_epochs, y, test_size=0.2, stratify=True + ) + except Exception as e: + print(e) + idx_X_train, idx_X_val, y_train, y_val = train_test_split( + n_epochs, y, test_size=0.2 + ) - model = create_deep_model(clf, learning_rate, weight_decay, drop_rate, epochs=epochs) - model.fit(X, y) + X_train = X[idx_X_train] + X_val = X[idx_X_val] + pre_model.fit(X=X_train, y=y_train) + X_val = pre_model.transform(X_val) - return scorer(model, X, y) + model = Pipeline([("preprocessing", pre_model), ("deep", model)]) + model.fit(X, y, deep__validation_data=(X_val, y_val)) + + return scorer(model, X_val, y_val) class WithinSessionEvaluation(BaseEvaluation): @@ -241,6 +262,7 @@ def _evaluate( study = optuna.create_study( direction="maximize", + study_name=f"{name}_{subject}_{session}_{cv_ind}", ) study.optimize( lambda trial: objective( @@ -251,17 +273,21 @@ def _evaluate( scorer=scorer, epochs=n_epochs, ), - n_trials=25, + n_trials=100, timeout=60, # one hour show_progress_bar=True, n_jobs=1, + gc_after_trial=True, ) - cvclf = create_deep_model( + pre_model, model = create_deep_model( clf=cvclf, **study.best_params, epochs=n_epochs, ) + cvclf = Pipeline( + [("preprocessing", pre_model), ("deep", model)] + ) cvclf.fit(X_[train], y_[train]) acc.append(scorer(cvclf, X_[test], y_[test])) From 7c6afe6b0b1dbfedd743085ee3b56e256ef5e21c Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 00:06:48 +0200 Subject: [PATCH 04/12] adding parameters --- moabb/evaluations/evaluations.py | 8 ++++++-- moabb/evaluations/utils.py | 12 ++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index b10fc15eb..3dbf35b40 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -132,10 +132,14 @@ def __init__( self, n_perms: Optional[Union[int, Vector]] = None, data_size: Optional[dict] = None, + optuna_n_trials: int = 100, + optuna_timeout: int = 60, **kwargs, ): self.data_size = data_size self.n_perms = n_perms + self.optuna_n_trials = optuna_n_trials + self.optuna_timeout = optuna_timeout self.calculate_learning_curve = self.data_size is not None if self.calculate_learning_curve: # Check correct n_perms parameter @@ -273,8 +277,8 @@ def _evaluate( scorer=scorer, epochs=n_epochs, ), - n_trials=100, - timeout=60, # one hour + n_trials=self.optuna_n_trials, + timeout=self.optuna_timeout, # one hour show_progress_bar=True, n_jobs=1, gc_after_trial=True, diff --git a/moabb/evaluations/utils.py b/moabb/evaluations/utils.py index cdcf4892b..076a5b2e1 100644 --- a/moabb/evaluations/utils.py +++ b/moabb/evaluations/utils.py @@ -246,15 +246,15 @@ def create_deep_model(clf, learning_rate, weight_decay, drop_rate, epochs): optimizer=Adam, drop_rate=drop_rate, epochs=epochs, - verbose=0, + verbose=True, # rest of the parameters are the same as the original model callbacks=keras_clf.callbacks, random_state=keras_clf.random_state, batch_size=keras_clf.batch_size, - validation_split=keras_clf.validation_split, + validation_split=0.0, shuffle=keras_clf.shuffle, ) - steps[-1] = (steps[-1][0], new_keras_clf) - - pipe = Pipeline(steps) - return pipe + steps[-1] = ("deep", new_keras_clf) + pre_pipe = Pipeline(steps[:-1]) + # pipe = Pipeline(("deep", new_keras_clf)) + return pre_pipe, new_keras_clf From 1363fca62b52f3122c1b2952cbf0aa363ad6a53a Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 11:37:38 +0200 Subject: [PATCH 05/12] adding random_state --- moabb/evaluations/evaluations.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index 3dbf35b40..c9994a6d4 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -43,7 +43,7 @@ Vector = Union[list, tuple, np.ndarray] -def objective(trial, X, y, clf, scorer, epochs): +def objective(trial, X, y, clf, scorer, epochs, random_state): learning_rate = trial.suggest_float("learning_rate", 1e-4, 1e-2, log=True) weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True) drop_rate = trial.suggest_float("drop_rate", 0.3, 0.9) @@ -54,12 +54,12 @@ def objective(trial, X, y, clf, scorer, epochs): n_epochs = list(range(len(y))) try: idx_X_train, idx_X_val, y_train, y_val = train_test_split( - n_epochs, y, test_size=0.2, stratify=True + n_epochs, y, test_size=0.2, stratify=True, random_state=random_state ) except Exception as e: print(e) idx_X_train, idx_X_val, y_train, y_val = train_test_split( - n_epochs, y, test_size=0.2 + n_epochs, y, test_size=0.2, random_state=random_state ) X_train = X[idx_X_train] @@ -276,6 +276,7 @@ def _evaluate( clf=cvclf, scorer=scorer, epochs=n_epochs, + random_state=self.random_state, ), n_trials=self.optuna_n_trials, timeout=self.optuna_timeout, # one hour From a57d0cc93d09e5dfa7760d185e0cd6eb0b629fb2 Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 11:47:21 +0200 Subject: [PATCH 06/12] fixing names --- moabb/evaluations/evaluations.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index c9994a6d4..d4ed077b1 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -48,7 +48,7 @@ def objective(trial, X, y, clf, scorer, epochs, random_state): weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True) drop_rate = trial.suggest_float("drop_rate", 0.3, 0.9) - pre_model, model = create_deep_model( + pre_process_steps, model = create_deep_model( clf, learning_rate, weight_decay, drop_rate, epochs=epochs ) n_epochs = list(range(len(y))) @@ -64,11 +64,11 @@ def objective(trial, X, y, clf, scorer, epochs, random_state): X_train = X[idx_X_train] X_val = X[idx_X_val] - pre_model.fit(X=X_train, y=y_train) - X_val = pre_model.transform(X_val) + pre_process_steps.fit(X=X_train, y=y_train) + X_val_pre_processed = pre_process_steps.transform(X_val) - model = Pipeline([("preprocessing", pre_model), ("deep", model)]) - model.fit(X, y, deep__validation_data=(X_val, y_val)) + model = Pipeline([("preprocessing", pre_process_steps), ("deep", model)]) + model.fit(X, y, deep__validation_data=(X_val_pre_processed, y_val)) return scorer(model, X_val, y_val) @@ -285,13 +285,13 @@ def _evaluate( gc_after_trial=True, ) - pre_model, model = create_deep_model( + pre_process_steps, model = create_deep_model( clf=cvclf, **study.best_params, epochs=n_epochs, ) cvclf = Pipeline( - [("preprocessing", pre_model), ("deep", model)] + [("preprocessing", pre_process_steps), ("deep", model)] ) cvclf.fit(X_[train], y_[train]) From b6d51f787e04959a934bf4d2e5e03d82512eb19c Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 11:55:54 +0200 Subject: [PATCH 07/12] including the manual validation --- moabb/evaluations/evaluations.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index d4ed077b1..b521279f2 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -49,12 +49,12 @@ def objective(trial, X, y, clf, scorer, epochs, random_state): drop_rate = trial.suggest_float("drop_rate", 0.3, 0.9) pre_process_steps, model = create_deep_model( - clf, learning_rate, weight_decay, drop_rate, epochs=epochs + clf, learning_rate, weight_decay, drop_rate, epochs=epochs, manual_validation=True ) n_epochs = list(range(len(y))) try: idx_X_train, idx_X_val, y_train, y_val = train_test_split( - n_epochs, y, test_size=0.2, stratify=True, random_state=random_state + n_epochs, y, test_size=0.2, stratify=y, random_state=random_state ) except Exception as e: print(e) @@ -132,7 +132,7 @@ def __init__( self, n_perms: Optional[Union[int, Vector]] = None, data_size: Optional[dict] = None, - optuna_n_trials: int = 100, + optuna_n_trials: int = 1, optuna_timeout: int = 60, **kwargs, ): From 968f7c8b7638477e2dd2d11c126db25dee09b551 Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 11:57:28 +0200 Subject: [PATCH 08/12] fixing the number of trials --- moabb/evaluations/evaluations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index b521279f2..ead797b10 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -132,7 +132,7 @@ def __init__( self, n_perms: Optional[Union[int, Vector]] = None, data_size: Optional[dict] = None, - optuna_n_trials: int = 1, + optuna_n_trials: int = 25, optuna_timeout: int = 60, **kwargs, ): From 0f1d0703b2e2bab42dc3175ebb33b0162407b1b2 Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 11:58:06 +0200 Subject: [PATCH 09/12] updating the util --- moabb/evaluations/utils.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/moabb/evaluations/utils.py b/moabb/evaluations/utils.py index 076a5b2e1..fcfdaf91b 100644 --- a/moabb/evaluations/utils.py +++ b/moabb/evaluations/utils.py @@ -234,10 +234,17 @@ def create_save_path( print("No hdf5_path provided, models will not be saved.") -def create_deep_model(clf, learning_rate, weight_decay, drop_rate, epochs): +def create_deep_model( + clf, learning_rate, weight_decay, drop_rate, epochs, manual_validation=False +): keras_clf = clf[-1] steps = list(clf.steps) + if manual_validation: + validation_split = (0.0,) + else: + validation_split = keras_clf.validation_split + Adam = tf.keras.optimizers.Adam( learning_rate=learning_rate, weight_decay=weight_decay ) @@ -246,15 +253,15 @@ def create_deep_model(clf, learning_rate, weight_decay, drop_rate, epochs): optimizer=Adam, drop_rate=drop_rate, epochs=epochs, - verbose=True, + verbose=keras_clf.verbose, # rest of the parameters are the same as the original model callbacks=keras_clf.callbacks, random_state=keras_clf.random_state, batch_size=keras_clf.batch_size, - validation_split=0.0, + validation_split=validation_split, shuffle=keras_clf.shuffle, ) steps[-1] = ("deep", new_keras_clf) - pre_pipe = Pipeline(steps[:-1]) + pre_process_steps = Pipeline(steps[:-1]) # pipe = Pipeline(("deep", new_keras_clf)) - return pre_pipe, new_keras_clf + return pre_process_steps, new_keras_clf From eaaaa9d565966c346a7e4d83b89dab46d6aa0fbd Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Fri, 24 May 2024 16:18:00 +0200 Subject: [PATCH 10/12] increasing the evaluations --- moabb/evaluations/evaluations.py | 74 +++++++++++++++++--------------- 1 file changed, 40 insertions(+), 34 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index ead797b10..e0dbe6057 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -4,7 +4,6 @@ from typing import Optional, Union import numpy as np -import optuna from mne.epochs import BaseEpochs from sklearn.base import clone from sklearn.metrics import get_scorer @@ -262,40 +261,47 @@ def _evaluate( y_ = y[ix] if self.mne_labels else y_cv for cv_ind, (train, test) in enumerate(cv.split(X_, y_)): cvclf = clone(grid_clf) - n_epochs = cvclf[-1].epochs - - study = optuna.create_study( - direction="maximize", - study_name=f"{name}_{subject}_{session}_{cv_ind}", - ) - study.optimize( - lambda trial: objective( - trial, - X=X_[train], - y=y_[train], - clf=cvclf, - scorer=scorer, - epochs=n_epochs, - random_state=self.random_state, - ), - n_trials=self.optuna_n_trials, - timeout=self.optuna_timeout, # one hour - show_progress_bar=True, - n_jobs=1, - gc_after_trial=True, - ) - - pre_process_steps, model = create_deep_model( - clf=cvclf, - **study.best_params, - epochs=n_epochs, - ) - cvclf = Pipeline( - [("preprocessing", pre_process_steps), ("deep", model)] - ) - - cvclf.fit(X_[train], y_[train]) + # n_epochs = cvclf[-1].epochs + # + # study = optuna.create_study( + # direction="maximize", + # study_name=f"{name}_{subject}_{session}_{cv_ind}", + # ) + # study.optimize( + # lambda trial: objective( + # trial, + # X=X_[train], + # y=y_[train], + # clf=cvclf, + # scorer=scorer, + # epochs=n_epochs, + # random_state=self.random_state, + # ), + # n_trials=self.optuna_n_trials, + # timeout=self.optuna_timeout, # one hour + # show_progress_bar=True, + # n_jobs=1, + # gc_after_trial=True, + # ) + # + # pre_process_steps, model = create_deep_model( + # clf=cvclf, + # **study.best_params, + # epochs=n_epochs, + # ) + # cvclf = Pipeline( + # [("preprocessing", pre_process_steps), ("deep", model)] + # ) + + cvclf = cvclf.fit(X_[train], y_[train]) acc.append(scorer(cvclf, X_[test], y_[test])) + if hasattr(cvclf, "_final_estimator"): + save_history_name = ( + f"{name}_{cv_ind}_fold_{session}_session" + ) + history = cvclf._final_estimator.history_ + + np.savez(f"{save_history_name}.npz", history) if self.hdf5_path is not None and self.save_model: save_model_cv( From e0dc85bbe4e5f80af3b1b0ec6117428bc560b18a Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Tue, 28 May 2024 17:06:22 +0200 Subject: [PATCH 11/12] optuna for first fold --- moabb/evaluations/evaluations.py | 66 ++++++++++++++++---------------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index e0dbe6057..cf3dd51ec 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -4,6 +4,7 @@ from typing import Optional, Union import numpy as np +import optuna from mne.epochs import BaseEpochs from sklearn.base import clone from sklearn.metrics import get_scorer @@ -132,7 +133,7 @@ def __init__( n_perms: Optional[Union[int, Vector]] = None, data_size: Optional[dict] = None, optuna_n_trials: int = 25, - optuna_timeout: int = 60, + optuna_timeout: int = 60 * 10, **kwargs, ): self.data_size = data_size @@ -261,37 +262,38 @@ def _evaluate( y_ = y[ix] if self.mne_labels else y_cv for cv_ind, (train, test) in enumerate(cv.split(X_, y_)): cvclf = clone(grid_clf) - # n_epochs = cvclf[-1].epochs - # - # study = optuna.create_study( - # direction="maximize", - # study_name=f"{name}_{subject}_{session}_{cv_ind}", - # ) - # study.optimize( - # lambda trial: objective( - # trial, - # X=X_[train], - # y=y_[train], - # clf=cvclf, - # scorer=scorer, - # epochs=n_epochs, - # random_state=self.random_state, - # ), - # n_trials=self.optuna_n_trials, - # timeout=self.optuna_timeout, # one hour - # show_progress_bar=True, - # n_jobs=1, - # gc_after_trial=True, - # ) - # - # pre_process_steps, model = create_deep_model( - # clf=cvclf, - # **study.best_params, - # epochs=n_epochs, - # ) - # cvclf = Pipeline( - # [("preprocessing", pre_process_steps), ("deep", model)] - # ) + n_epochs = cvclf[-1].epochs + if cv_ind == 0: + study = optuna.create_study( + direction="maximize", + study_name=f"{name}_{subject}_{session}_{cv_ind}", + ) + study.optimize( + lambda trial: objective( + trial, + X=X_[train], + y=y_[train], + clf=cvclf, + scorer=scorer, + epochs=n_epochs, + random_state=self.random_state, + ), + n_trials=self.optuna_n_trials, + timeout=self.optuna_timeout, # one hour + show_progress_bar=True, + n_jobs=1, + gc_after_trial=True, + ) + best_params = study.best_params + + pre_process_steps, model = create_deep_model( + clf=cvclf, + **best_params, + epochs=n_epochs, + ) + cvclf = Pipeline( + [("preprocessing", pre_process_steps), ("deep", model)] + ) cvclf = cvclf.fit(X_[train], y_[train]) acc.append(scorer(cvclf, X_[test], y_[test])) From 119dd2a7e8866583387de241ffc5bb40caff2443 Mon Sep 17 00:00:00 2001 From: bruAristimunha Date: Wed, 29 May 2024 15:52:18 +0200 Subject: [PATCH 12/12] returning all folds --- moabb/evaluations/evaluations.py | 44 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index cf3dd51ec..2d835b802 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -263,28 +263,28 @@ def _evaluate( for cv_ind, (train, test) in enumerate(cv.split(X_, y_)): cvclf = clone(grid_clf) n_epochs = cvclf[-1].epochs - if cv_ind == 0: - study = optuna.create_study( - direction="maximize", - study_name=f"{name}_{subject}_{session}_{cv_ind}", - ) - study.optimize( - lambda trial: objective( - trial, - X=X_[train], - y=y_[train], - clf=cvclf, - scorer=scorer, - epochs=n_epochs, - random_state=self.random_state, - ), - n_trials=self.optuna_n_trials, - timeout=self.optuna_timeout, # one hour - show_progress_bar=True, - n_jobs=1, - gc_after_trial=True, - ) - best_params = study.best_params + + study = optuna.create_study( + direction="maximize", + study_name=f"{name}_{subject}_{session}_{cv_ind}", + ) + study.optimize( + lambda trial: objective( + trial, + X=X_[train], + y=y_[train], + clf=cvclf, + scorer=scorer, + epochs=n_epochs, + random_state=self.random_state, + ), + n_trials=self.optuna_n_trials, + timeout=self.optuna_timeout, # one hour + show_progress_bar=True, + n_jobs=1, + gc_after_trial=True, + ) + best_params = study.best_params pre_process_steps, model = create_deep_model( clf=cvclf,