Skip to content

Commit

Permalink
feat(ruff): enable Pylint
Browse files Browse the repository at this point in the history
  • Loading branch information
hongbo-miao committed Jan 6, 2025
1 parent 69bf420 commit a5beec9
Show file tree
Hide file tree
Showing 16 changed files with 80 additions and 121 deletions.
8 changes: 7 additions & 1 deletion .ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,12 @@ select = [
"F", # Pyflakes
"I", # isort
"PGH", # pygrep-hooks
"PL", # Pylint
"UP", # pyupgrade
]
ignore = ["E501"]
ignore = [
"E501",
"PLR0913",
"PLR0915",
"PLR2004",
]
25 changes: 0 additions & 25 deletions api-python/routers/seed.py

This file was deleted.

21 changes: 0 additions & 21 deletions api-python/routers/seed_test.py

This file was deleted.

2 changes: 1 addition & 1 deletion api-rust/scripts/download_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import httpx
import torch
import torchvision.models as models
from torchvision import models

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch.nn as nn
import torch.nn.functional as F
from torch import nn


class Net(nn.Module):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@ def test(model, test_loader, device):
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
device_data, device_target = data.to(device), target.to(device)
output = model(device_data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
test_loss += F.nll_loss(output, device_target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
correct += pred.eq(device_target.view_as(pred)).sum().item()

test_loss /= len(test_loader.dataset)
logger.info(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from models.net import Net
from torch import optim
from utils.average_gradients import average_gradients
from utils.get_test_data_loader import get_test_data_loader
from utils.get_train_data_loader import get_train_data_loader
Expand All @@ -19,10 +19,10 @@

def train(args):
is_distributed = len(args.hosts) > 1 and args.backend is not None
logger.info("Distributed training:", is_distributed)
logger.info(f"Distributed training: {is_distributed}")

use_cuda = args.num_gpus > 0
logger.info("Number of gpus available:", args.num_gpus)
logger.info(f"Number of gpus available: {args.num_gpus}")
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
device = torch.device("cuda" if use_cuda else "cpu")

Expand Down Expand Up @@ -71,18 +71,18 @@ def train(args):
for epoch in range(1, args.epochs + 1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader, 1):
data, target = data.to(device), target.to(device)
device_data, device_target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
output = model(device_data)
loss = F.nll_loss(output, device_target)
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for a multi-machine cpu case only
average_gradients(model)
optimizer.step()
if batch_idx % args.log_interval == 0:
logger.info(
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.sampler)} ({100.0 * batch_idx / len(train_loader):.0f}%)] Loss: {loss.item():.6f}"
f"Train Epoch: {epoch} [{batch_idx * len(device_data)}/{len(train_loader.sampler)} ({100.0 * batch_idx / len(train_loader):.0f}%)] Loss: {loss.item():.6f}"
)
test(model, test_loader, device)
save_model(model, args.model_dir)
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def export_to_parquet(
str(parquet_file_path),
]
logger.info(f"Executing command: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
if result.returncode == 0:
return parquet_file_path
else:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
import torchvision
import torchvision.transforms as transforms
from args import get_args
from torchvision import transforms

args = get_args()
transform = transforms.Compose(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import nn


class Net(nn.Module):
Expand Down
4 changes: 1 addition & 3 deletions machine-learning/dali/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@
import urllib.request
from pathlib import Path

import nvidia.dali.fn as fn
import nvidia.dali.types as types
import torch
from nvidia.dali import pipeline_def
from nvidia.dali import fn, pipeline_def, types
from nvidia.dali.plugin.pytorch import DALIGenericIterator

logger = logging.getLogger(__name__)
Expand Down
99 changes: 50 additions & 49 deletions machine-learning/graph-neural-network/src/main.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import numpy as np
import torch
import torch.optim as optim
import wandb
from args import get_args
from model.data_loader import fetch_dataset, get_dataloaders
from model.gnn import GNN
from ogb.graphproppred import Evaluator
from torch import optim
from tqdm import tqdm

cls_criterion = torch.nn.BCEWithLogitsLoss()
Expand All @@ -17,24 +17,24 @@ def train(model, device, loader, optimizer, task_type):
total_loss = 0

for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
device_batch = batch.to(device)

if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
if device_batch.x.shape[0] == 1 or device_batch.batch[-1] == 0:
pass
else:
pred = model(batch)
pred = model(device_batch)
optimizer.zero_grad()
# ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
is_labeled = device_batch.y == device_batch.y
if "classification" in task_type:
loss = cls_criterion(
pred.to(torch.float32)[is_labeled],
batch.y.to(torch.float32)[is_labeled],
device_batch.y.to(torch.float32)[is_labeled],
)
else:
loss = reg_criterion(
pred.to(torch.float32)[is_labeled],
batch.y.to(torch.float32)[is_labeled],
device_batch.y.to(torch.float32)[is_labeled],
)
loss.backward()
total_loss += loss.item()
Expand All @@ -49,15 +49,15 @@ def evaluate(model, device, loader, evaluator):
y_pred = []

for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
device_batch = batch.to(device)

if batch.x.shape[0] == 1:
if device_batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
pred = model(device_batch)

y_true.append(batch.y.view(pred.shape).detach().cpu())
y_true.append(device_batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())

y_true = torch.cat(y_true, dim=0).numpy()
Expand Down Expand Up @@ -93,44 +93,45 @@ def main():
val_loader = dataloaders["val"]
test_loader = dataloaders["test"]

if config.gnn == "gin":
model = GNN(
gnn_type="gin",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=False,
).to(device)
elif config.gnn == "gin-virtual":
model = GNN(
gnn_type="gin",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=True,
).to(device)
elif config.gnn == "gcn":
model = GNN(
gnn_type="gcn",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=False,
).to(device)
elif config.gnn == "gcn-virtual":
model = GNN(
gnn_type="gcn",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=True,
).to(device)
else:
raise ValueError("Invalid GNN type")
match config.gnn:
case "gin":
model = GNN(
gnn_type="gin",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=False,
).to(device)
case "gin-virtual":
model = GNN(
gnn_type="gin",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=True,
).to(device)
case "gcn":
model = GNN(
gnn_type="gcn",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=False,
).to(device)
case "gcn-virtual":
model = GNN(
gnn_type="gcn",
num_tasks=dataset.num_tasks,
num_layer=config.num_layer,
emb_dim=config.emb_dim,
drop_ratio=config.drop_ratio,
virtual_node=True,
).to(device)
case _:
raise ValueError("Invalid GNN type")

wb.watch(model)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
def train():
import lightning as L
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision
from torch import nn
from torch.utils import data

class LitAutoEncoder(L.LightningModule):
def __init__(self):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import lightning as L
import mlflow
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision
from args import get_args
from lightning.pytorch.loggers.wandb import WandbLogger
from torch import nn
from torch.utils import data


class LitAutoEncoder(L.LightningModule):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
import torchvision.models as models
from torchvision import models


def main() -> None:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
import torchvision.models as models
from torchvision import models


def main() -> None:
Expand Down

0 comments on commit a5beec9

Please sign in to comment.