diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 00000000..3f107357 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,30 @@ +cff-version: "1.2.0" +message: "If you use this software, please cite our article." +authors: +- family-names: Batzner + given-names: Simon +- family-names: Musaelian + given-names: Albert +- family-names: Sun + given-names: Lixin +- family-names: Geiger + given-names: Mario +- family-names: Mailoa + given-names: Jonathan P. +- family-names: Kornbluth + given-names: Mordechai +- family-names: Molinari + given-names: Nicola +- family-names: Smidt + given-names: Tess E. +- family-names: Kozinsky + given-names: Boris +doi: 10.1038/s41467-022-29939-5 +date-published: 2022-05-04 +issn: 2041-1723 +journal: Nature Communications +start: 2453 +title: "E(3)-equivariant graph neural networks for data-efficient and accurate interatomic potentials" +type: article +url: "https://www.nature.com/articles/s41467-022-29939-5" +volume: 13 diff --git a/README.md b/README.md index 9c983f9f..e8340018 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,9 @@ Details on writing and using plugins can be found in the [Allegro tutorial](http ## References & citing -The theory behind NequIP is described in our preprint (1). NequIP's backend builds on e3nn, a general framework for building E(3)-equivariant neural networks (2). If you use this repository in your work, please consider citing NequIP (1) and e3nn (3): +The theory behind NequIP is described in our [article](https://www.nature.com/articles/s41467-022-29939-5) (1). +NequIP's backend builds on [`e3nn`](https://e3nn.org), a general framework for building E(3)-equivariant +neural networks (2). If you use this repository in your work, please consider citing `NequIP` (1) and `e3nn` (3): 1. https://www.nature.com/articles/s41467-022-29939-5 2. https://e3nn.org diff --git a/docs/cite.rst b/docs/cite.rst index 9f8296cc..f266a59a 100644 --- a/docs/cite.rst +++ b/docs/cite.rst @@ -1,3 +1,25 @@ -Citing Nequip +Citing NequIP ============= +If you use ``NequIP`` in your research, please cite our `article `_: +.. code-block:: bibtex + + @article{batzner_e3-equivariant_2022, + title = {E(3)-Equivariant Graph Neural Networks for Data-Efficient and Accurate Interatomic Potentials}, + author = {Batzner, Simon and Musaelian, Albert and Sun, Lixin and Geiger, Mario and Mailoa, Jonathan P. and Kornbluth, Mordechai and Molinari, Nicola and Smidt, Tess E. and Kozinsky, Boris}, + year = {2022}, + month = may, + journal = {Nature Communications}, + volume = {13}, + number = {1}, + pages = {2453}, + issn = {2041-1723}, + doi = {10.1038/s41467-022-29939-5}, + } + +The theory behind NequIP is described in our `article `_ above. +NequIP's backend builds on `e3nn `_, a general framework for building E(3)-equivariant +neural networks (1). If you use this repository in your work, please consider citing ``NequIP`` and ``e3nn`` (2): + + 1. https://e3nn.org + 2. https://doi.org/10.5281/zenodo.3724963 diff --git a/examples/plot_dimers.py b/examples/plot_dimers.py index bafac7ac..a13b9e15 100644 --- a/examples/plot_dimers.py +++ b/examples/plot_dimers.py @@ -39,19 +39,20 @@ print("Computing dimers...") potential = {} N_sample = args.n_samples -N_combs = len(list(itertools.combinations_with_replacement(range(num_types), 2))) -r = torch.zeros(N_sample * N_combs, 2, 3, device=args.device) -rs_one = torch.linspace(args.r_min, model_r_max, 500, device=args.device) -rs = rs_one.repeat([N_combs]) -assert rs.shape == (N_combs * N_sample,) +type_combos = [ + list(e) for e in itertools.combinations_with_replacement(range(num_types), 2) +] +N_combos = len(type_combos) +r = torch.zeros(N_sample * N_combos, 2, 3, device=args.device) +rs_one = torch.linspace(args.r_min, model_r_max, N_sample, device=args.device) +rs = rs_one.repeat([N_combos]) +assert rs.shape == (N_combos * N_sample,) r[:, 1, 0] += rs # offset second atom along x axis -types = torch.as_tensor( - [list(e) for e in itertools.combinations_with_replacement(range(num_types), 2)] -) -types = types.reshape(N_combs, 1, 2).expand(N_combs, N_sample, 2).reshape(-1) +types = torch.as_tensor(type_combos) +types = types.reshape(N_combos, 1, 2).expand(N_combos, N_sample, 2).reshape(-1) r = r.reshape(-1, 3) assert types.shape == r.shape[:1] -N_at_total = N_sample * N_combs * 2 +N_at_total = N_sample * N_combos * 2 assert len(types) == N_at_total edge_index = torch.vstack( ( @@ -61,14 +62,14 @@ ) ) data = AtomicData(pos=r, atom_types=types, edge_index=edge_index) -data.batch = torch.arange(N_sample * N_combs, device=args.device).repeat_interleave(2) -data.ptr = torch.arange(0, 2 * N_sample * N_combs + 1, 2, device=args.device) +data.batch = torch.arange(N_sample * N_combos, device=args.device).repeat_interleave(2) +data.ptr = torch.arange(0, 2 * N_sample * N_combos + 1, 2, device=args.device) result = model(AtomicData.to_AtomicDataDict(data.to(device=args.device))) print("Plotting...") energies = ( result[AtomicDataDict.TOTAL_ENERGY_KEY] - .reshape(N_combs, N_sample) + .reshape(N_combos, N_sample) .cpu() .detach() .numpy() @@ -83,9 +84,7 @@ dpi=120, ) -for i, (type1, type2) in enumerate( - itertools.combinations_with_replacement(range(num_types), 2) -): +for i, (type1, type2) in enumerate(type_combos): ax = axs[i] ax.set_ylabel(f"{type_names[type1]}-{type_names[type2]}") ax.plot(rs_one, energies[i])