Skip to content

Commit

Permalink
Merge pull request #236 from Alnusjaponica/update-black
Browse files Browse the repository at this point in the history
Apply Black 2024 to codebase
  • Loading branch information
HideakiImamura authored Feb 16, 2024
2 parents 01eaaed + 7b92e60 commit 5d87ead
Show file tree
Hide file tree
Showing 17 changed files with 81 additions and 54 deletions.
4 changes: 1 addition & 3 deletions .github/workflows/checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ jobs:
- name: Install
run: |
python -m pip install -U pip
pip install --progress-bar off -U hacking isort
# TODO(Shinichi): Remove version specification after black 24.x becomes stable.
pip install --progress-bar off -U "black<24.0.0"
pip install --progress-bar off -U black hacking isort
- name: black
run: black . --check --diff
Expand Down
1 change: 1 addition & 0 deletions chainer/chainermn_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
$ mpirun -n 2 -- python chainermn_simple.py $STUDY_NAME $STORAGE_URL
"""

import sys

import chainermn
Expand Down
23 changes: 14 additions & 9 deletions hiplot/plot_study.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
"from sklearn.datasets import fetch_openml\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"mnist = fetch_openml(name='Fashion-MNIST', version=1)\n",
"mnist = fetch_openml(name=\"Fashion-MNIST\", version=1)\n",
"classes = list(set(mnist.target))\n",
"\n",
"# For demonstrational purpose, only use a subset of the dataset.\n",
Expand All @@ -69,23 +69,26 @@
"source": [
"from sklearn.neural_network import MLPClassifier\n",
"\n",
"\n",
"def objective(trial):\n",
" \n",
"\n",
" clf = MLPClassifier(\n",
" hidden_layer_sizes=tuple([trial.suggest_int('n_units_l{}'.format(i), 32, 64) for i in range(3)]),\n",
" learning_rate_init=trial.suggest_float('lr_init', 1e-5, 1e-1, log=True),\n",
" hidden_layer_sizes=tuple(\n",
" [trial.suggest_int(\"n_units_l{}\".format(i), 32, 64) for i in range(3)]\n",
" ),\n",
" learning_rate_init=trial.suggest_float(\"lr_init\", 1e-5, 1e-1, log=True),\n",
" )\n",
"\n",
" for step in range(100):\n",
" clf.partial_fit(x_train, y_train, classes=classes)\n",
" value = clf.score(x_valid, y_valid) \n",
" \n",
" value = clf.score(x_valid, y_valid)\n",
"\n",
" # Report intermediate objective value.\n",
" trial.report(value, step)\n",
"\n",
" # Handle pruning based on the intermediate value.\n",
" if trial.should_prune():\n",
" raise optuna.TrialPruned() \n",
" raise optuna.TrialPruned()\n",
"\n",
" return value"
]
Expand All @@ -105,9 +108,11 @@
"source": [
"import optuna\n",
"\n",
"optuna.logging.set_verbosity(optuna.logging.WARNING) # This verbosity change is just to simplify the notebook output.\n",
"optuna.logging.set_verbosity(\n",
" optuna.logging.WARNING\n",
") # This verbosity change is just to simplify the notebook output.\n",
"\n",
"study = optuna.create_study(direction='maximize', pruner=optuna.pruners.MedianPruner())\n",
"study = optuna.create_study(direction=\"maximize\", pruner=optuna.pruners.MedianPruner())\n",
"study.optimize(objective, n_trials=100)"
]
},
Expand Down
1 change: 1 addition & 0 deletions keras/keras_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
https://github.com/optuna/optuna-examples/blob/main/mlflow/keras_mlflow.py
"""

import urllib
import warnings

Expand Down
1 change: 1 addition & 0 deletions keras/keras_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Keras. We optimize the filter and kernel size, kernel stride and layer activation.
"""

import urllib
import warnings

Expand Down
1 change: 1 addition & 0 deletions kubernetes/simple/sklearn_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
2 - It sets the storage address to the postgres pod deployed with the workers.
"""

import os

import optuna
Expand Down
1 change: 1 addition & 0 deletions lightgbm/lightgbm_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
$ python lightgbm_integration.py
"""

import numpy as np
import optuna

Expand Down
1 change: 1 addition & 0 deletions lightgbm/lightgbm_tuner_cv.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
In this example, we optimize the cross-validated log loss of cancer detection.
"""

import optuna.integration.lightgbm as lgb

from lightgbm import early_stopping
Expand Down
1 change: 1 addition & 0 deletions pytorch/pytorch_distributed_spawn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
If you wish to use optuna < 3.1.0, you would need to pass
`device=device_id` in TorchDistributedTrial.
"""

import argparse
from functools import partial
import os
Expand Down
1 change: 1 addition & 0 deletions pytorch/pytorch_lightning_ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
$ python pytorch/pytorch_lightning_ddp.py [--pruning]
"""

import argparse
import os
from typing import List
Expand Down
1 change: 1 addition & 0 deletions pytorch/pytorch_lightning_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
$ python pytorch_lightning_simple.py [--pruning]
"""

import argparse
import os
from typing import List
Expand Down
66 changes: 37 additions & 29 deletions quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -67,16 +67,18 @@
"import sklearn.ensemble\n",
"import sklearn.model_selection\n",
"\n",
"\n",
"def objective():\n",
" iris = sklearn.datasets.load_iris() # Prepare the data.\n",
" \n",
" clf = sklearn.ensemble.RandomForestClassifier( \n",
" n_estimators=5, max_depth=3) # Define the model.\n",
" \n",
"\n",
" clf = sklearn.ensemble.RandomForestClassifier(n_estimators=5, max_depth=3) # Define the model.\n",
"\n",
" return sklearn.model_selection.cross_val_score(\n",
" clf, iris.data, iris.target, n_jobs=-1, cv=3).mean() # Train and evaluate the model.\n",
" clf, iris.data, iris.target, n_jobs=-1, cv=3\n",
" ).mean() # Train and evaluate the model.\n",
"\n",
"\n",
"print('Accuracy: {}'.format(objective()))"
"print(\"Accuracy: {}\".format(objective()))"
]
},
{
Expand All @@ -96,24 +98,26 @@
"source": [
"import optuna\n",
"\n",
"\n",
"def objective(trial):\n",
" iris = sklearn.datasets.load_iris()\n",
" \n",
" n_estimators = trial.suggest_int('n_estimators', 2, 20)\n",
" max_depth = int(trial.suggest_float('max_depth', 1, 32, log=True))\n",
" \n",
" clf = sklearn.ensemble.RandomForestClassifier(\n",
" n_estimators=n_estimators, max_depth=max_depth)\n",
" \n",
"\n",
" n_estimators = trial.suggest_int(\"n_estimators\", 2, 20)\n",
" max_depth = int(trial.suggest_float(\"max_depth\", 1, 32, log=True))\n",
"\n",
" clf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth)\n",
"\n",
" return sklearn.model_selection.cross_val_score(\n",
" clf, iris.data, iris.target, n_jobs=-1, cv=3).mean()\n",
" clf, iris.data, iris.target, n_jobs=-1, cv=3\n",
" ).mean()\n",
"\n",
"\n",
"study = optuna.create_study(direction='maximize')\n",
"study = optuna.create_study(direction=\"maximize\")\n",
"study.optimize(objective, n_trials=100)\n",
"\n",
"trial = study.best_trial\n",
"\n",
"print('Accuracy: {}'.format(trial.value))\n",
"print(\"Accuracy: {}\".format(trial.value))\n",
"print(\"Best hyperparameters: {}\".format(trial.params))"
]
},
Expand All @@ -132,31 +136,35 @@
"source": [
"import sklearn.svm\n",
"\n",
"\n",
"def objective(trial):\n",
" iris = sklearn.datasets.load_iris()\n",
"\n",
" classifier = trial.suggest_categorical('classifier', ['RandomForest', 'SVC'])\n",
" \n",
" if classifier == 'RandomForest':\n",
" n_estimators = trial.suggest_int('n_estimators', 2, 20)\n",
" max_depth = int(trial.suggest_float('max_depth', 1, 32, log=True))\n",
" classifier = trial.suggest_categorical(\"classifier\", [\"RandomForest\", \"SVC\"])\n",
"\n",
" if classifier == \"RandomForest\":\n",
" n_estimators = trial.suggest_int(\"n_estimators\", 2, 20)\n",
" max_depth = int(trial.suggest_float(\"max_depth\", 1, 32, log=True))\n",
"\n",
" clf = sklearn.ensemble.RandomForestClassifier(\n",
" n_estimators=n_estimators, max_depth=max_depth)\n",
" n_estimators=n_estimators, max_depth=max_depth\n",
" )\n",
" else:\n",
" c = trial.suggest_float('svc_c', 1e-10, 1e10, log=True)\n",
" \n",
" clf = sklearn.svm.SVC(C=c, gamma='auto')\n",
" c = trial.suggest_float(\"svc_c\", 1e-10, 1e10, log=True)\n",
"\n",
" clf = sklearn.svm.SVC(C=c, gamma=\"auto\")\n",
"\n",
" return sklearn.model_selection.cross_val_score(\n",
" clf, iris.data, iris.target, n_jobs=-1, cv=3).mean()\n",
" clf, iris.data, iris.target, n_jobs=-1, cv=3\n",
" ).mean()\n",
"\n",
"\n",
"study = optuna.create_study(direction='maximize')\n",
"study = optuna.create_study(direction=\"maximize\")\n",
"study.optimize(objective, n_trials=100)\n",
"\n",
"trial = study.best_trial\n",
"\n",
"print('Accuracy: {}'.format(trial.value))\n",
"print(\"Accuracy: {}\".format(trial.value))\n",
"print(\"Best hyperparameters: {}\".format(trial.params))"
]
},
Expand Down Expand Up @@ -207,7 +215,7 @@
"metadata": {},
"outputs": [],
"source": [
"optuna.visualization.plot_contour(study, params=['n_estimators', 'max_depth'])"
"optuna.visualization.plot_contour(study, params=[\"n_estimators\", \"max_depth\"])"
]
}
],
Expand Down
1 change: 1 addition & 0 deletions rapids_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
Learn more about rapids: https://rapids.ai/
"""

import cudf
from cuml.ensemble import RandomForestClassifier
from cuml.metrics import accuracy_score
Expand Down
1 change: 1 addition & 0 deletions ray/ray_joblib.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
$ python ray_joblib.py
"""

import joblib
import optuna

Expand Down
1 change: 1 addition & 0 deletions rl/sb3_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
$ python sb3_simple.py
"""

from typing import Any
from typing import Dict

Expand Down
1 change: 0 additions & 1 deletion terminator/terminator_improvement_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
"""


import optuna
from optuna.terminator.erroreval import report_cross_validation_scores
from optuna.visualization import plot_terminator_improvement
Expand Down
29 changes: 17 additions & 12 deletions visualization/plot_study.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
"from sklearn.datasets import fetch_openml\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"mnist = fetch_openml(name='Fashion-MNIST', version=1)\n",
"mnist = fetch_openml(name=\"Fashion-MNIST\", version=1)\n",
"classes = list(set(mnist.target))\n",
"\n",
"# For demonstrational purpose, only use a subset of the dataset.\n",
Expand All @@ -71,23 +71,26 @@
"source": [
"from sklearn.neural_network import MLPClassifier\n",
"\n",
"\n",
"def objective(trial):\n",
" \n",
"\n",
" clf = MLPClassifier(\n",
" hidden_layer_sizes=tuple([trial.suggest_int('n_units_l{}'.format(i), 32, 64) for i in range(3)]),\n",
" learning_rate_init=trial.suggest_float('lr_init', 1e-5, 1e-1, log=True),\n",
" hidden_layer_sizes=tuple(\n",
" [trial.suggest_int(\"n_units_l{}\".format(i), 32, 64) for i in range(3)]\n",
" ),\n",
" learning_rate_init=trial.suggest_float(\"lr_init\", 1e-5, 1e-1, log=True),\n",
" )\n",
"\n",
" for step in range(100):\n",
" clf.partial_fit(x_train, y_train, classes=classes)\n",
" value = clf.score(x_valid, y_valid) \n",
" \n",
" value = clf.score(x_valid, y_valid)\n",
"\n",
" # Report intermediate objective value.\n",
" trial.report(value, step)\n",
"\n",
" # Handle pruning based on the intermediate value.\n",
" if trial.should_prune():\n",
" raise optuna.TrialPruned() \n",
" raise optuna.TrialPruned()\n",
"\n",
" return value"
]
Expand All @@ -107,9 +110,11 @@
"source": [
"import optuna\n",
"\n",
"optuna.logging.set_verbosity(optuna.logging.WARNING) # This verbosity change is just to simplify the notebook output.\n",
"optuna.logging.set_verbosity(\n",
" optuna.logging.WARNING\n",
") # This verbosity change is just to simplify the notebook output.\n",
"\n",
"study = optuna.create_study(direction='maximize', pruner=optuna.pruners.MedianPruner())\n",
"study = optuna.create_study(direction=\"maximize\", pruner=optuna.pruners.MedianPruner())\n",
"study.optimize(objective, n_trials=100)"
]
},
Expand Down Expand Up @@ -180,7 +185,7 @@
"metadata": {},
"outputs": [],
"source": [
"plot_parallel_coordinate(study, params=['lr_init', 'n_units_l0'])"
"plot_parallel_coordinate(study, params=[\"lr_init\", \"n_units_l0\"])"
]
},
{
Expand Down Expand Up @@ -214,7 +219,7 @@
"metadata": {},
"outputs": [],
"source": [
"plot_contour(study, params=['n_units_l0', 'n_units_l1'])"
"plot_contour(study, params=[\"n_units_l0\", \"n_units_l1\"])"
]
},
{
Expand Down Expand Up @@ -248,7 +253,7 @@
"metadata": {},
"outputs": [],
"source": [
"plot_slice(study, params=['n_units_l0', 'n_units_l1'])"
"plot_slice(study, params=[\"n_units_l0\", \"n_units_l1\"])"
]
},
{
Expand Down

0 comments on commit 5d87ead

Please sign in to comment.