Skip to content

Commit

Permalink
new version with minor bug fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
AutoViML committed Jan 16, 2024
1 parent 74fa45e commit e558848
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 7 deletions.
2 changes: 1 addition & 1 deletion deep_autoviml/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
model_options=model_options, save_model_flag=True, use_my_model='',
model_use_case='', verbose=0)
predictions = deepauto.predict(model, project_name, test_dataset=test,
predictions = deepauto.predict(model, project_name="deep_autoviml", test_dataset=test,
keras_model_type=keras_model_type,
cat_vocab_dict=cat_vocab_dict)
""" %(module_type, version_number))
Expand Down
2 changes: 1 addition & 1 deletion deep_autoviml/__version__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,6 @@
__author__ = "Ram Seshadri"
__description__ = "deep_autoviml - build and test multiple Tensorflow 2.0 models and pipelines"
__url__ = "https://github.com/Auto_ViML/deep_autoviml.git"
__version__ = "0.0.82"
__version__ = "0.0.84"
__license__ = "Apache License 2.0"
__copyright__ = "2020-21 Google"
6 changes: 3 additions & 3 deletions deep_autoviml/deep_autoviml.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def fit(train_data_or_file, target, keras_model_type="basic", project_name="deep
"compression": None => you can set it to zip or other file compression formats if your data is compressed
"csv_encoding": default 'utf-8'. But you can set it to any other csv encoding format your data is in
"label_encode_flag": False. But you can set it to True if you want it encoded.
"max_trials": default = 30 ## number of Storm Tuner trials ### Lower this for faster processing.
"max_trials": default = 5 ## number of Storm Tuner trials ### Lower this for faster processing.
"tuner": default = 'storm' ## Storm Tuner is the default tuner. Optuna is the other option.
"embedding_size": default = 50 ## this is the NLP embedding size minimum
"tf_hub_model": default "" (empty string). If you want to supply TF hub model, provide URL here.
Expand Down Expand Up @@ -361,7 +361,7 @@ def fit(train_data_or_file, target, keras_model_type="basic", project_name="deep
"patience", "epochs", "steps_per_epoch", "optimizer",
"kernel_initializer", "num_layers", "class_weight",
"loss", "metrics", "monitor","mode", "lr_scheduler","early_stopping",
"class_weight"]
]

keras_options = copy.deepcopy(keras_options_defaults)
if len(keras_options_copy) > 0:
Expand Down Expand Up @@ -389,7 +389,7 @@ def fit(train_data_or_file, target, keras_model_type="basic", project_name="deep
model_options_defaults['compression'] = None ## is is needed in case to read Zip files
model_options_defaults["label_encode_flag"] = '' ## User can set it to True or False depending on their need.
model_options_defaults["header"] = 0 ### this is the header row for pandas to read
model_options_defaults["max_trials"] = 30 ## number of Storm Tuner trials ###
model_options_defaults["max_trials"] = 5 ## The number of Storm Tuner trials - make it small ###
model_options_defaults['tuner'] = 'storm' ## Storm Tuner is the default tuner. Optuna is the other option.
model_options_defaults["embedding_size"] = "" ## this is the NLP embedding size minimum
model_options_defaults["tf_hub_model"] = "" ## If you want to use a pretrained Hub model, provide URL here.
Expand Down
1 change: 1 addition & 0 deletions deep_autoviml/modeling/train_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ def train_model(deep_model, full_ds, target, keras_model_type, keras_options,
callbacks_dict, tb_logpath = get_callbacks(val_mode, val_monitor, patience, learning_rate,
save_weights_only, onecycle_steps, save_model_path)

early_stopping = check_keras_options(keras_options, "early_stopping", False)
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
if early_stopping:
callbacks_list = [callbacks_dict['early_stop'], callbacks_dict['print']]
Expand Down
6 changes: 5 additions & 1 deletion deep_autoviml/utilities/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -820,7 +820,11 @@ def add_outputs_to_model_body(model_body, meta_outputs):
##### This is the simplest way to convert a sequential model to functional!
for num, each_layer in enumerate(model_body.layers):
if num == 0:
final_outputs = each_layer(meta_outputs)
if isinstance(meta_outputs,list):
combined_input = layers.concatenate(meta_outputs, name='auto_combined_layer')
final_outputs = each_layer(combined_input)
else:
final_outputs = each_layer(meta_outputs)
else:
final_outputs = each_layer(final_outputs)
return final_outputs
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

setuptools.setup(
name="deep_autoviml",
version="0.0.82",
version="0.0.84",
author="Ram Seshadri",
# author_email="[email protected]",
description="Automatically Build Deep Learning Models and Pipelines fast!",
Expand Down

0 comments on commit e558848

Please sign in to comment.