From d573d62981695ab9bd81c5c22c770f62f1d8ba50 Mon Sep 17 00:00:00 2001 From: knc6 Date: Sun, 22 Sep 2024 00:23:31 -0500 Subject: [PATCH] Make global executable. --- README.md | 4 ++-- atomgpt/__init__.py | 2 +- atomgpt/inverse_models/inverse_models.py | 6 ++++-- setup.py | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index d890709..ae79684 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ pip install atomgpt Forwards model are used for developing surrogate models for atomic structure to property predictions. It requires text input which can be either the raw POSCAR type files or a text description of the material. After that, we can use Google-T5/ OpenAI GPT2 etc. models with customizing langauage head for accomplishing such a task. The description of a material is generated with [ChemNLP/describer](https://github.com/usnistgov/jarvis/blob/master/jarvis/core/atoms.py#L1567) function. If you turn [`convert`](https://github.com/usnistgov/atomgpt/blob/develop/atomgpt/forward_models/forward_models.py#L277) to `False`, you can also train on bare POSCAR files. ``` -python atomgpt/forward_models/forward_models.py --config_name atomgpt/examples/forward_model/config.json +atomgpt_forward --config_name atomgpt/examples/forward_model/config.json ``` ## Inverse model example (property to structure) @@ -51,7 +51,7 @@ python atomgpt/forward_models/forward_models.py --config_name atomgpt/examples/f Inverse models are used for generating materials given property and description such as chemical formula. Currently, we use Mistral model, but other models such as Gemma, Lllama etc. can also be easily used. After the structure generation, we can optimize the structure with ALIGNN-FF model (example [here](https://colab.research.google.com/github/knc6/jarvis-tools-notebooks/blob/master/jarvis-tools-notebooks/ALIGNN_Structure_Relaxation_Phonons_Interface.ipynb) and then subject to density functional theory calculations for a few selected candidates using JARVIS-DFT or similar workflow (tutorial for example [here](https://pages.nist.gov/jarvis/tutorials/). Note that currently, the inversely model training as well as conference requires GPUs. ``` -python atomgpt/inverse_models/inverse_models.py --config_name atomgpt/examples/inverse_model/config.json +atomgpt_inverse --config_name atomgpt/examples/inverse_model/config.json ``` More detailed examples/case-studies would be added here soon. diff --git a/atomgpt/__init__.py b/atomgpt/__init__.py index b60c2a2..f10342f 100644 --- a/atomgpt/__init__.py +++ b/atomgpt/__init__.py @@ -1,3 +1,3 @@ """Version number.""" -__version__ = "2024.9.8" +__version__ = "2024.9.18" diff --git a/atomgpt/inverse_models/inverse_models.py b/atomgpt/inverse_models/inverse_models.py index 5ee4f41..8479939 100644 --- a/atomgpt/inverse_models/inverse_models.py +++ b/atomgpt/inverse_models/inverse_models.py @@ -38,7 +38,7 @@ class TrainingPropConfig(BaseSettings): prefix: str = "atomgpt_run" model_name: str = "unsloth/mistral-7b-bnb-4bit" batch_size: int = 2 - num_epochs: int = 2 + num_epochs: int = 5 seed_val: int = 42 num_train: Optional[int] = 2 num_val: Optional[int] = 2 @@ -179,7 +179,9 @@ def gen_atoms(prompt="", max_new_tokens=512, model="", tokenizer=""): outputs = model.generate( **inputs, max_new_tokens=max_new_tokens, use_cache=True ) - response = tokenizer.batch_decode(outputs)[0].split("# Output:")[1] + response = tokenizer.batch_decode(outputs) + print("response", response) + response = response[0].split("# Output:")[1] atoms = None try: atoms = text2atoms(response) diff --git a/setup.py b/setup.py index 615b180..806e715 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="atomgpt", - version="2024.9.8", + version="2024.9.18", author="Kamal Choudhary", author_email="kamal.choudhary@nist.gov", description="atomgpt",