Skip to content

Commit

Permalink
Merge pull request #95 from eth-cscs/release-0.6.3
Browse files Browse the repository at this point in the history
Release 0.6.3
  • Loading branch information
statrita2004 authored Aug 27, 2021
2 parents 3ee0aee + 3ee238d commit 48c4715
Show file tree
Hide file tree
Showing 31 changed files with 4,362 additions and 582 deletions.
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ unittest:
python3 -m unittest discover -s tests -v -p "*_tests.py" || (echo "Error in standard unit tests."; exit 1)
@# remove temporary files created during testing
@if test -f net.pth; then rm net.pth; fi
@if test -f net_with_discard_wrapper.pth; then rm net_with_discard_wrapper.pth; fi
@if test -f scaler.pkl; then rm scaler.pkl; fi
@if test -f tmp.jnl; then rm tmp.jnl; fi
@if test -f journal_tests_testfile.pkl; then rm journal_tests_testfile.pkl; fi
Expand All @@ -40,6 +41,10 @@ unittest_mpi:
exampletest: $(MAKEDIRS)
@echo "Testing standard examples.."
python3 -m unittest -v tests/test_examples.py || (echo "Error in example tests."; exit 1)
@if test -f scaler.pkl; then rm scaler.pkl; fi
@if test -f seminn_net.pth; then rm seminn_net.pth; fi
@if test -f triplet_net.pth; then rm triplet_net.pth; fi
@if test -f tmp.jnl; then rm tmp.jnl; fi

exampletest_mpi:
@echo "Testing MPI backend examples.."
Expand Down
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ Additional **features** are:
* several methods for summary selection:
* [Semi-automatic summary selection (with Neural networks)](http://proceedings.mlr.press/v97/wiqvist19a/wiqvist19a.pdf)
* [summary selection using distance learning (with Neural networks)](https://link.springer.com/article/10.1007/s13571-019-00208-8)
* [Sufficient statistics of exponential family approximating the likelihood (with Neural networks)](https://arxiv.org/abs/2012.10903)
* [Random Forest Model Selection Scheme](https://academic.oup.com/bioinformatics/article/32/6/859/1744513)


Expand Down Expand Up @@ -130,9 +131,9 @@ Publications in which ABCpy was applied:

* R. Dutta, K. Zouaoui-Boudjeltia, C. Kotsalos, A. Rousseau, D. Ribeiro de Sousa, J. M. Desmet, A. Van Meerhaeghe, A. Mira, and B. Chopard. "Interpretable pathological test for Cardio-vascular disease: Approximate Bayesian computation with distance learning.", 2020, arXiv:2010.06465.

* R. Dutta, S. Gomes, D. Kalise, L. Pacchiardi. "Using mobility data in the design of optimal lockdown strategies for the COVID-19 pandemic in England.", 2020, arXiv:2006.16059.
* R. Dutta, S. Gomes, D. Kalise, L. Pacchiardi. "Using mobility data in the design of optimal lockdown strategies for the COVID-19 pandemic in England.", 2021, PLOS Computational Biology, 17(8), e1009236.

* L. Pacchiardi, P. Künzli, M. Schöngens, B. Chopard, R. Dutta, "Distance-Learning for Approximate Bayesian Computation to Model a Volcanic Eruption", 2020, Sankhya B, 1-30.
* L. Pacchiardi, P. Künzli, M. Schöngens, B. Chopard, R. Dutta, "Distance-Learning for Approximate Bayesian Computation to Model a Volcanic Eruption", 2021, Sankhya B, 83(1), 288-317.

* R. Dutta, J. P. Onnela, A. Mira, "Bayesian Inference of Spreading Processes on Networks", 2018, Proceedings of Royal Society A, 474(2215), 20180129.

Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.6.2
0.6.3
12 changes: 6 additions & 6 deletions abcpy/NN_utilities/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,13 @@ def contrastive_training(samples, similarity_set, embedding_net, cuda, batch_siz
scheduler = scheduler(optimizer, **scheduler_kwargs)

# now train:
fit(pairs_train_loader, model_contrastive, loss_fn, optimizer, scheduler, n_epochs, cuda,
train_losses, test_losses = fit(pairs_train_loader, model_contrastive, loss_fn, optimizer, scheduler, n_epochs, cuda,
val_loader=pairs_train_loader_val,
early_stopping=early_stopping, start_epoch_early_stopping=start_epoch_early_stopping,
epochs_early_stopping_interval=epochs_early_stopping_interval, start_epoch_training=start_epoch_training,
use_tqdm=use_tqdm)

return embedding_net
return embedding_net, train_losses, test_losses


def triplet_training(samples, similarity_set, embedding_net, cuda, batch_size=16, n_epochs=400,
Expand Down Expand Up @@ -155,12 +155,12 @@ def triplet_training(samples, similarity_set, embedding_net, cuda, batch_size=16
scheduler = scheduler(optimizer, **scheduler_kwargs)

# now train:
fit(triplets_train_loader, model_triplet, loss_fn, optimizer, scheduler, n_epochs, cuda,
train_losses, test_losses = fit(triplets_train_loader, model_triplet, loss_fn, optimizer, scheduler, n_epochs, cuda,
val_loader=triplets_train_loader_val,
early_stopping=early_stopping, start_epoch_early_stopping=start_epoch_early_stopping,
epochs_early_stopping_interval=epochs_early_stopping_interval, start_epoch_training=start_epoch_training, use_tqdm=use_tqdm)

return embedding_net
return embedding_net, train_losses, test_losses


def FP_nn_training(samples, target, embedding_net, cuda, batch_size=1, n_epochs=50, samples_val=None, target_val=None,
Expand Down Expand Up @@ -222,9 +222,9 @@ def FP_nn_training(samples, target, embedding_net, cuda, batch_size=1, n_epochs=
scheduler = scheduler(optimizer, **scheduler_kwargs)

# now train:
fit(data_loader_FP_nn, embedding_net, loss_fn, optimizer, scheduler, n_epochs, cuda,
train_losses, test_losses = fit(data_loader_FP_nn, embedding_net, loss_fn, optimizer, scheduler, n_epochs, cuda,
val_loader=data_loader_FP_nn_val,
early_stopping=early_stopping, start_epoch_early_stopping=start_epoch_early_stopping,
epochs_early_stopping_interval=epochs_early_stopping_interval, start_epoch_training=start_epoch_training, use_tqdm=use_tqdm)

return embedding_net
return embedding_net, train_losses, test_losses
22 changes: 22 additions & 0 deletions abcpy/NN_utilities/losses.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import torch
import torch.nn as nn
import torch.nn.functional as F

Expand Down Expand Up @@ -37,3 +38,24 @@ def forward(self, anchor, positive, negative, size_average=True):
distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()


def Fisher_divergence_loss(first_der_t, second_der_t, eta, lam=0):
"""lam is the regularization parameter of the Kingma & LeCun (2010) regularization"""
inner_prod_second_der_eta = torch.bmm(second_der_t, eta.unsqueeze(-1)) # this is used twice

if lam == 0:
return sum(
(0.5 * torch.bmm(first_der_t, eta.unsqueeze(-1)) ** 2 + inner_prod_second_der_eta).view(-1))
else:
return sum(
(0.5 * torch.bmm(first_der_t, eta.unsqueeze(-1)) ** 2 +
inner_prod_second_der_eta + lam * inner_prod_second_der_eta ** 2).view(-1))


def Fisher_divergence_loss_with_c_x(first_der_t, second_der_t, eta, lam=0):
# this enables to use the term c(x) in the approximating family, ie a term that depends only on x and not on theta.
new_eta = torch.cat((eta, torch.ones(eta.shape[0], 1).to(eta)),
dim=1) # the one tensor need to be on same device as eta.
# then call the other loss function with this new_eta:
return Fisher_divergence_loss(first_der_t, second_der_t, new_eta, lam=lam)
Loading

0 comments on commit 48c4715

Please sign in to comment.