diff --git a/atomgpt/forward_models/forward_models.py b/atomgpt/forward_models/forward_models.py index 6597583..817cdb5 100644 --- a/atomgpt/forward_models/forward_models.py +++ b/atomgpt/forward_models/forward_models.py @@ -30,6 +30,8 @@ import sys import argparse from alignn.pretrained import get_figshare_model +from atomgpt.inverse_models.utils import get_figlet + parser = argparse.ArgumentParser( description="Atomistic Generative Pre-trained Transformer." @@ -279,6 +281,8 @@ def __getitem__(self, idx): def main(config_file="config.json"): + figlet = get_figlet() + print(figlet) print("Running AtomGPT prop predictor.") # run_path = os.path.abspath(config_file).split("config.json")[0] config = loadjson(config_file) @@ -772,6 +776,6 @@ def main(config_file="config.json"): # output_dir = make_id_prop() # output_dir="." args = parser.parse_args(sys.argv[1:]) - run_atomgpt(config_file=args.config_name) + main(config_file=args.config_name) # config_file="config.json" # ) diff --git a/atomgpt/inverse_models/inverse_models.py b/atomgpt/inverse_models/inverse_models.py index d523288..8617b65 100644 --- a/atomgpt/inverse_models/inverse_models.py +++ b/atomgpt/inverse_models/inverse_models.py @@ -258,6 +258,8 @@ def batch_evaluate( def main(config_file="config.json"): + if not torch.cuda.is_available(): + raise ValueError("Currently model training is possible with GPU only.") figlet = get_figlet() print(figlet) t1 = time.time() @@ -464,6 +466,6 @@ def main(config_file="config.json"): # output_dir = make_id_prop() # output_dir="." args = parser.parse_args(sys.argv[1:]) - run_atomgpt_inverse(config_file=args.config_name) + main(config_file=args.config_name) # config_file="config.json" # )