-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #16 from igorastashov/hw_3
Hw 3
- Loading branch information
Showing
11 changed files
with
349 additions
and
190 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import torch | ||
import torch.nn as nn | ||
|
||
|
||
class ConvNet(nn.Module): | ||
def __init__(self): | ||
super().__init__() | ||
|
||
self.network = nn.Sequential( | ||
nn.Conv2d( | ||
in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1 | ||
), # 16 x 224 x 224 | ||
nn.BatchNorm2d(16), | ||
nn.ReLU(), | ||
nn.Conv2d( | ||
in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1 | ||
), # 32 x 224 x 224 | ||
nn.BatchNorm2d(32), | ||
nn.ReLU(), | ||
nn.AvgPool2d(kernel_size=28), # 32 x 8 x 8 | ||
nn.Flatten(), # 32*8*8 = 2048 | ||
nn.Linear(2048, 150), | ||
) | ||
|
||
def forward(self, x: torch.Tensor) -> torch.Tensor: | ||
return self.network(x) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
import torch | ||
import torch.nn as nn | ||
from torch.optim import Optimizer | ||
from torch.utils.data import DataLoader | ||
from tqdm.notebook import tqdm | ||
|
||
from ds.tracking import plot_losses | ||
|
||
|
||
def training_epoch( | ||
model: nn.Module, | ||
optimizer: Optimizer, | ||
criterion, | ||
train_loader, | ||
device: torch.device, | ||
tqdm_desc, | ||
): | ||
train_loss, train_accuracy = 0.0, 0.0 | ||
model.train() | ||
|
||
for _, (images, labels) in enumerate(tqdm(train_loader, desc=tqdm_desc), 1): | ||
images = images.to(device) # images: batch_size x num_channels x height x width | ||
labels = labels.to(device) # labels: batch_size | ||
|
||
optimizer.zero_grad() | ||
logits = model(images) # logits: batch_size x num_classes | ||
loss = criterion(logits, labels) | ||
loss.backward() | ||
optimizer.step() | ||
|
||
train_loss += loss.item() * images.shape[0] | ||
accuracy = (logits.argmax(dim=1) == labels).sum().item() | ||
train_accuracy += accuracy | ||
|
||
train_loss /= len(train_loader.dataset) | ||
train_accuracy /= len(train_loader.dataset) | ||
return train_loss, train_accuracy | ||
|
||
|
||
@torch.no_grad() | ||
def validation_epoch( | ||
model: nn.Module, criterion, test_loader: DataLoader, device: torch.device, tqdm_desc | ||
): | ||
test_loss, test_accuracy = 0.0, 0.0 | ||
model.eval() | ||
for images, labels in tqdm(test_loader, desc=tqdm_desc): | ||
images = images.to(device) # images: batch_size x num_channels x height x width | ||
labels = labels.to(device) # labels: batch_size | ||
logits = model(images) # logits: batch_size x num_classes | ||
loss = criterion(logits, labels) | ||
|
||
test_loss += loss.item() * images.shape[0] | ||
test_accuracy += (logits.argmax(dim=1) == labels).sum().item() | ||
|
||
test_loss /= len(test_loader.dataset) | ||
test_accuracy /= len(test_loader.dataset) | ||
return test_loss, test_accuracy | ||
|
||
|
||
def train( | ||
model: object, | ||
optimizer: object, | ||
scheduler: object, | ||
criterion: object, | ||
train_loader: object, | ||
test_loader: object, | ||
num_epochs: object, | ||
device: object, | ||
title: object, | ||
) -> object: | ||
train_losses, train_accuracies = [], [] | ||
test_losses, test_accuracies = [], [] | ||
|
||
for epoch in range(1, num_epochs + 1): | ||
train_loss, train_accuracy = training_epoch( | ||
model, | ||
optimizer, | ||
criterion, | ||
train_loader, | ||
device, | ||
tqdm_desc=f'Training {epoch}/{num_epochs}', | ||
) | ||
test_loss, test_accuracy = validation_epoch( | ||
model, | ||
criterion, | ||
test_loader, | ||
device, | ||
tqdm_desc=f'Validating {epoch}/{num_epochs}', | ||
) | ||
|
||
if scheduler is not None: | ||
scheduler.step() | ||
|
||
train_losses += [train_loss] | ||
train_accuracies += [train_accuracy] | ||
test_losses += [test_loss] | ||
test_accuracies += [test_accuracy] | ||
|
||
plot_losses(train_losses, test_losses, train_accuracies, test_accuracies, title) | ||
|
||
return train_losses, test_losses, train_accuracies, test_accuracies |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
import matplotlib.pyplot as plt | ||
import seaborn as sns | ||
from IPython.display import clear_output | ||
|
||
|
||
sns.set_style('darkgrid') | ||
plt.rcParams.update({'font.size': 15}) | ||
|
||
LOG_PATH = "runs" | ||
|
||
|
||
def plot_losses( | ||
train_losses: list[float], | ||
test_losses: list[float], | ||
train_accuracies: list[float], | ||
test_accuracies: list[float], | ||
title: str, | ||
): | ||
clear_output() | ||
fig, axs = plt.subplots(1, 2, figsize=(13, 4)) | ||
axs[0].plot(range(1, len(train_losses) + 1), train_losses, label='train') | ||
axs[0].plot(range(1, len(test_losses) + 1), test_losses, label='test') | ||
axs[0].set_ylabel('loss') | ||
axs[0].set_title(title + ' loss') | ||
|
||
axs[1].plot(range(1, len(train_accuracies) + 1), train_accuracies, label='train') | ||
axs[1].plot(range(1, len(test_accuracies) + 1), test_accuracies, label='test') | ||
axs[1].set_ylabel('accuracy') | ||
axs[1].set_title(title + ' accuracy') | ||
|
||
for ax in axs: | ||
ax.set_xlabel('epoch') | ||
ax.legend() | ||
|
||
plt.savefig(f'{LOG_PATH}/{title}.png') | ||
plt.show() |
Oops, something went wrong.