Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Week 4 #27

Open
wants to merge 11 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 23 additions & 5 deletions course/week2/monitor_project/monitor/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ def get_ks_score(tr_probs, te_probs):
# te_probs: torch.Tensor
# predicted probabilities from test test
# score: float - between 0 and 1
pass # remove me
tr_probs_np = tr_probs.numpy()
te_probs_np = te_probs.numpy()
_, score = ks_2samp(tr_probs_np, te_probs_np)
# ============================
return score

Expand Down Expand Up @@ -68,7 +70,16 @@ def get_hist_score(tr_probs, te_probs, bins=10):
#
# Read the documentation for `np.histogram` carefully, in
# particular what `bin_edges` represent.
pass # remove me
tr_heights, bin_edges = np.histogram(tr_probs.numpy(), bins=bins, density=True)
te_heights, _ = np.histogram(te_probs.numpy(), bins=bin_edges, density=True)

score = 0
for i in range(len(bin_edges) - 1):
bin_diff = bin_edges[i+1] - bin_edges[i]
tr_area = bin_diff * tr_heights[i]
te_area = bin_diff * te_heights[i]
intersect = min(tr_area, te_area)
score += intersect
# ============================
return score

Expand Down Expand Up @@ -97,7 +108,12 @@ def get_vocab_outlier(tr_vocab, te_vocab):
# te_vocab: dict[str, int]
# Map from word to count for test examples
# score: float (between 0 and 1)
pass # remove me
num_seen = sum(1 for word in te_vocab if word in tr_vocab)
num_total = len(te_vocab)
if num_total == 0:
score = 0 # If test vocab is empty, we consider no outliers
else:
score = 1 - (num_seen / num_total)
# ============================
return score

Expand Down Expand Up @@ -132,7 +148,9 @@ def calibrate(self, tr_probs, tr_labels, te_probs):
# it to a torch.Tensor.
#
# `te_probs_cal`: torch.Tensor
pass # remove me
iso_reg = IsotonicRegression(out_of_bounds='clip')
tr_probs_cal = torch.tensor(iso_reg.fit_transform(tr_probs.numpy(), tr_labels.numpy()))
te_probs_cal = torch.tensor(iso_reg.predict(te_probs.numpy()))
# ============================
return tr_probs_cal, te_probs_cal

Expand All @@ -149,4 +167,4 @@ def monitor(self, te_vocab, te_probs):
'hist_score': hist_score,
'outlier_score': outlier_score,
}
return metrics
return metrics
2 changes: 1 addition & 1 deletion course/week2/testing_project/configs/test.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{
"data": "mnist",
"model": "linear.ckpt"
"model": "mlp.ckpt"
}
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 11 additions & 0 deletions course/week2/testing_project/images/integration/labels.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
path,label
zero.png,0
one.png,1
two.png,2
three.png,3
four.png,4
five.png,5
six.png,6
seven.png,7
eight.png,8
nine.png,9
Binary file not shown.
5 changes: 3 additions & 2 deletions course/week2/testing_project/testing/directionality.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def test(self, trainer, system):
preds_raw = torch.argmax(logits_raw, dim=1)
preds_transformed = torch.argmax(logits_transformed, dim=1)

batch_metric = 0 # store metric here
#batch_metric = 0 # store metric here
# ================================
# FILL ME OUT
#
Expand All @@ -166,7 +166,8 @@ def test(self, trainer, system):
# --
# batch_metric: float (not torch.Tensor!)
# Metric computed on a minibatch
pass # remove me

batch_metric = (preds_raw == preds_transformed).float().mean().item()
# ================================
metric.append(batch_metric)
pbar.update()
Expand Down
3 changes: 2 additions & 1 deletion course/week2/testing_project/testing/integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ def test(self, trainer, system):
# Notes:
# --
# Nothing to return here
pass # remove me
loader = self.get_dataloader()
trainer.test(system, dataloaders=loader)
# ================================


Expand Down
4 changes: 3 additions & 1 deletion course/week2/testing_project/testing/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,9 @@ def build_regression_test(system, loader):
# batch_is_correct: List[int] (not a torch.Tensor!)
# List of integers - 1 if the model got that element correct
# - 0 if the model got that element incorrect
pass # remove me
batch_is_correct = (preds == labels).long().numpy().tolist()
batch_loss = F.cross_entropy(logits, labels, reduction='none')
batch_loss = batch_loss.numpy().tolist()
# ================================
losses.extend(batch_loss)
is_correct.extend(batch_is_correct)
Expand Down