From 25536e3b47f913c0cafcd3423f60c05170aae835 Mon Sep 17 00:00:00 2001 From: xpai Date: Wed, 6 Nov 2024 20:06:37 +0800 Subject: [PATCH] Fix multi-task models (#115, #125) --- CHANGELOG.md | 3 + experiment/run_expid.py | 1 - fuxictr/autotuner.py | 2 +- fuxictr/pytorch/models/multitask_model.py | 37 ++++--- fuxictr/version.py | 2 +- .../multitask/MMoE/config/model_config.yaml | 33 +++++- model_zoo/multitask/MMoE/fuxictr_version.py | 2 +- .../multitask/PLE/config/model_config.yaml | 35 +++++- model_zoo/multitask/PLE/fuxictr_version.py | 2 +- model_zoo/multitask/PLE/src/PLE.py | 71 +++++++----- .../ShareBottom/config/model_config.yaml | 29 ++++- .../multitask/ShareBottom/fuxictr_version.py | 2 +- model_zoo/multitask/ShareBottom/run_expid.py | 5 +- .../multitask/ShareBottom/src/__init__.py | 2 +- setup.py | 2 +- tests/test_torch.sh | 103 +++++++++--------- 16 files changed, 219 insertions(+), 112 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ae3a27..ca7959c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ [Doing] Add support for saving pb file, exporting embeddings [Doing] Add support of multi-gpu training +**FuxiCTR v2.3.5, 2024-11-06** ++ [Fix] Fix get_inputs() bug ([#115](https://github.com/reczoo/FuxiCTR/issues/115)) + **FuxiCTR v2.3.4, 2024-11-05** + [Feature] Add WuKong model + [Fix] Fix OOV token update ([#119](https://github.com/reczoo/FuxiCTR/issues/119)) diff --git a/experiment/run_expid.py b/experiment/run_expid.py index f4ce70b..49ffcf5 100644 --- a/experiment/run_expid.py +++ b/experiment/run_expid.py @@ -20,7 +20,6 @@ import sys import logging import fuxictr_version -from fuxictr import datasets from datetime import datetime from fuxictr.utils import load_config, set_logger, print_to_json, print_to_list from fuxictr.features import FeatureMap diff --git a/fuxictr/autotuner.py b/fuxictr/autotuner.py index 74483bd..3a6e391 100644 --- a/fuxictr/autotuner.py +++ b/fuxictr/autotuner.py @@ -72,7 +72,7 @@ def enumerate_params(config_file, exclude_expid=[]): dataset_params = dict(zip(dataset_para_keys, values)) if (dataset_params["data_format"] == "npz" or (dataset_params["data_format"] == "parquet" and - dataset_params["rebuild_dataset"] == False)): + dataset_params.get("rebuild_dataset") == False)): dataset_para_combs[dataset_id] = dataset_params else: hash_id = hashlib.md5("".join(sorted(print_to_json(dataset_params))).encode("utf-8")).hexdigest()[0:8] diff --git a/fuxictr/pytorch/models/multitask_model.py b/fuxictr/pytorch/models/multitask_model.py index aa37ae2..231102a 100644 --- a/fuxictr/pytorch/models/multitask_model.py +++ b/fuxictr/pytorch/models/multitask_model.py @@ -44,19 +44,19 @@ def __init__(self, reduce_lr_on_plateau=True, **kwargs): super(MultiTaskModel, self).__init__(feature_map=feature_map, - model_id=model_id, - task="binary_classification", - gpu=gpu, - loss_weight=loss_weight, - monitor=monitor, - save_best_only=save_best_only, - monitor_mode=monitor_mode, - early_stop_patience=early_stop_patience, - eval_steps=eval_steps, - embedding_regularizer=embedding_regularizer, - net_regularizer=net_regularizer, - reduce_lr_on_plateau=reduce_lr_on_plateau, - **kwargs) + model_id=model_id, + task="binary_classification", + gpu=gpu, + loss_weight=loss_weight, + monitor=monitor, + save_best_only=save_best_only, + monitor_mode=monitor_mode, + early_stop_patience=early_stop_patience, + eval_steps=eval_steps, + embedding_regularizer=embedding_regularizer, + net_regularizer=net_regularizer, + reduce_lr_on_plateau=reduce_lr_on_plateau, + **kwargs) self.device = get_device(gpu) self.num_tasks = num_tasks self.loss_weight = loss_weight @@ -64,7 +64,9 @@ def __init__(self, assert len(task) == num_tasks, "the number of tasks must equal the length of \"task\"" self.output_activation = nn.ModuleList([self.get_output_activation(str(t)) for t in task]) else: - self.output_activation = nn.ModuleList([self.get_output_activation(task) for _ in range(num_tasks)]) + self.output_activation = nn.ModuleList( + [self.get_output_activation(task) for _ in range(num_tasks)] + ) def compile(self, optimizer, loss, lr): self.optimizer = get_optimizer(optimizer, self.parameters(), lr) @@ -74,8 +76,9 @@ def compile(self, optimizer, loss, lr): self.loss_fn = [get_loss(loss) for _ in range(self.num_tasks)] def get_labels(self, inputs): + """ Override get_labels() to use multiple labels """ labels = self.feature_map.labels - y = [inputs[:, self.feature_map.get_column_index(labels[i])].to(self.device).float().view(-1, 1) + y = [inputs[labels[i]].to(self.device).float().view(-1, 1) for i in range(len(labels))] return y @@ -140,7 +143,7 @@ def evaluate(self, data_generator, metrics=None): val_logs = self.evaluate_metrics(y_true, y_pred, metrics, group_id) else: val_logs = self.evaluate_metrics(y_true, y_pred, self.validation_metrics, group_id) - logging.info('[Metrics] [Task: {}] '.format(labels[i]) + ' - '.join( + logging.info('[Task: {}][Metrics] '.format(labels[i]) + ' - '.join( '{}: {:.6f}'.format(k, v) for k, v in val_logs.items())) for k, v in val_logs.items(): all_val_logs['{}_{}'.format(labels[i], k)] = v @@ -162,4 +165,4 @@ def predict(self, data_generator): for i in range(len(labels)): y_pred_all[labels[i]].extend( return_dict["{}_pred".format(labels[i])].data.cpu().numpy().reshape(-1)) - return y_pred_all \ No newline at end of file + return y_pred_all diff --git a/fuxictr/version.py b/fuxictr/version.py index 90288fc..9cbcfb8 100644 --- a/fuxictr/version.py +++ b/fuxictr/version.py @@ -1 +1 @@ -__version__="2.3.4" +__version__="2.3.5" diff --git a/model_zoo/multitask/MMoE/config/model_config.yaml b/model_zoo/multitask/MMoE/config/model_config.yaml index 8bef3aa..7a181e5 100644 --- a/model_zoo/multitask/MMoE/config/model_config.yaml +++ b/model_zoo/multitask/MMoE/config/model_config.yaml @@ -1,3 +1,4 @@ +### Base: This base setting will be inherited by all the expid configs. Base: model_root: './checkpoints/' num_workers: 3 @@ -12,6 +13,34 @@ Base: feature_specs: null feature_config: null +### ModelName_default: This is a config template for hyper-tuning use +MMoE_default: + model: MMoE + dataset_id: TBD + loss: ['binary_crossentropy','binary_crossentropy'] + metrics: ['logloss', 'AUC'] + task: ['binary_classification','binary_classification'] + num_tasks: 2 + optimizer: adam + learning_rate: 1.e-3 + num_experts: 8 + expert_hidden_units: [512,256,128] + gate_hidden_units: [128, 64] + tower_hidden_units: [128, 64] + hidden_activations: relu + net_regularizer: 0 + embedding_regularizer: 1.e-6 + batch_norm: False + net_dropout: 0 + batch_size: 128 + embedding_dim: 128 + epochs: 100 + shuffle: True + seed: 2023 + monitor: 'AUC' + monitor_mode: 'max' + +### ModelName_test: This is a config for test only MMoE_test: model: MMoE dataset_id: tiny_mtl @@ -32,8 +61,8 @@ MMoE_test: net_dropout: 0 batch_size: 128 embedding_dim: 128 - epochs: 50 + epochs: 1 shuffle: True seed: 2023 monitor: 'AUC' - monitor_mode: 'max' \ No newline at end of file + monitor_mode: 'max' diff --git a/model_zoo/multitask/MMoE/fuxictr_version.py b/model_zoo/multitask/MMoE/fuxictr_version.py index be6a36a..b35d295 100644 --- a/model_zoo/multitask/MMoE/fuxictr_version.py +++ b/model_zoo/multitask/MMoE/fuxictr_version.py @@ -1,3 +1,3 @@ # pip install -U fuxictr import fuxictr -assert fuxictr.__version__ >= "2.2.0" +assert fuxictr.__version__ >= "2.3.4" diff --git a/model_zoo/multitask/PLE/config/model_config.yaml b/model_zoo/multitask/PLE/config/model_config.yaml index fcb765a..f67b8f7 100644 --- a/model_zoo/multitask/PLE/config/model_config.yaml +++ b/model_zoo/multitask/PLE/config/model_config.yaml @@ -1,3 +1,4 @@ +### Base: This base setting will be inherited by all the expid configs. Base: model_root: './checkpoints/' num_workers: 3 @@ -12,6 +13,36 @@ Base: feature_specs: null feature_config: null +### ModelName_default: This is a config template for hyper-tuning use +PLE_default: + model: PLE + dataset_id: TBD + loss: ['binary_crossentropy','binary_crossentropy'] + metrics: ['logloss', 'AUC'] + task: ['binary_classification','binary_classification'] + num_tasks: 2 + optimizer: adam + learning_rate: 1.e-3 + num_layers: 1 + num_shared_experts: 8 + num_specific_experts: 1 + expert_hidden_units: [512,256,128] + gate_hidden_units: [128, 64] + tower_hidden_units: [128, 64] + hidden_activations: relu + net_regularizer: 0 + embedding_regularizer: 1.e-6 + batch_norm: False + net_dropout: 0 + batch_size: 128 + embedding_dim: 128 + epochs: 50 + shuffle: True + seed: 2023 + monitor: 'AUC' + monitor_mode: 'max' + +### ModelName_test: This is a config for test only PLE_test: model: PLE dataset_id: tiny_mtl @@ -34,8 +65,8 @@ PLE_test: net_dropout: 0 batch_size: 128 embedding_dim: 128 - epochs: 50 + epochs: 1 shuffle: True seed: 2023 monitor: 'AUC' - monitor_mode: 'max' \ No newline at end of file + monitor_mode: 'max' diff --git a/model_zoo/multitask/PLE/fuxictr_version.py b/model_zoo/multitask/PLE/fuxictr_version.py index be6a36a..b35d295 100644 --- a/model_zoo/multitask/PLE/fuxictr_version.py +++ b/model_zoo/multitask/PLE/fuxictr_version.py @@ -1,3 +1,3 @@ # pip install -U fuxictr import fuxictr -assert fuxictr.__version__ >= "2.2.0" +assert fuxictr.__version__ >= "2.3.4" diff --git a/model_zoo/multitask/PLE/src/PLE.py b/model_zoo/multitask/PLE/src/PLE.py index 6d63b81..c0ce1a0 100644 --- a/model_zoo/multitask/PLE/src/PLE.py +++ b/model_zoo/multitask/PLE/src/PLE.py @@ -22,31 +22,38 @@ class CGC_Layer(nn.Module): - def __init__(self, num_shared_experts, num_specific_experts, num_tasks, input_dim, expert_hidden_units, gate_hidden_units, hidden_activations, + def __init__(self, num_shared_experts, num_specific_experts, num_tasks, input_dim, + expert_hidden_units, gate_hidden_units, hidden_activations, net_dropout, batch_norm): super(CGC_Layer, self).__init__() self.num_shared_experts = num_shared_experts self.num_specific_experts = num_specific_experts self.num_tasks = num_tasks - self.shared_experts = nn.ModuleList([MLP_Block(input_dim=input_dim, - hidden_units=expert_hidden_units, - hidden_activations=hidden_activations, - output_activation=None, - dropout_rates=net_dropout, - batch_norm=batch_norm) for _ in range(self.num_shared_experts)]) - self.specific_experts = nn.ModuleList([nn.ModuleList([MLP_Block(input_dim=input_dim, - hidden_units=expert_hidden_units, - hidden_activations=hidden_activations, - output_activation=None, - dropout_rates=net_dropout, - batch_norm=batch_norm) for _ in range(self.num_specific_experts)]) for _ in range(num_tasks)]) - self.gate = nn.ModuleList([MLP_Block(input_dim=input_dim, - output_dim=num_specific_experts+num_shared_experts if i < num_tasks else num_shared_experts, - hidden_units=gate_hidden_units, - hidden_activations=hidden_activations, - output_activation=None, - dropout_rates=net_dropout, - batch_norm=batch_norm) for i in range(self.num_tasks+1)]) + self.shared_experts = nn.ModuleList( + [MLP_Block(input_dim=input_dim, + hidden_units=expert_hidden_units, + hidden_activations=hidden_activations, + output_activation=None, + dropout_rates=net_dropout, + batch_norm=batch_norm) for _ in range(self.num_shared_experts)] + ) + self.specific_experts = nn.ModuleList( + [nn.ModuleList([MLP_Block(input_dim=input_dim, + hidden_units=expert_hidden_units, + hidden_activations=hidden_activations, + output_activation=None, + dropout_rates=net_dropout, + batch_norm=batch_norm) for _ in range(self.num_specific_experts)]) for _ in range(num_tasks)] + ) + self.gate = nn.ModuleList( + [MLP_Block(input_dim=input_dim, + output_dim=num_specific_experts+num_shared_experts if i < num_tasks else num_shared_experts, + hidden_units=gate_hidden_units, + hidden_activations=hidden_activations, + output_activation=None, + dropout_rates=net_dropout, + batch_norm=batch_norm) for i in range(self.num_tasks+1)] + ) self.gate_activation = get_activation('softmax') def forward(self, x, require_gate=False): """ @@ -69,7 +76,8 @@ def forward(self, x, require_gate=False): for i in range(self.num_tasks+1): if i < self.num_tasks: # for specific experts - gate_input = torch.stack(specific_expert_outputs[i] + shared_expert_outputs, dim=1) # (?, num_specific_experts+num_shared_experts, dim) + # gate_input: (?, num_specific_experts+num_shared_experts, dim) + gate_input = torch.stack(specific_expert_outputs[i] + shared_expert_outputs, dim=1) gate = self.gate_activation(self.gate[i](x[i])) # (?, num_specific_experts+num_shared_experts) gates.append(gate.mean(0)) cgc_output = torch.sum(gate.unsqueeze(-1) * gate_input, dim=1) # (?, dim) @@ -86,6 +94,7 @@ def forward(self, x, require_gate=False): else: return cgc_outputs + class PLE(MultiTaskModel): def __init__(self, feature_map, @@ -117,15 +126,17 @@ def __init__(self, **kwargs) self.embedding_layer = FeatureEmbedding(feature_map, embedding_dim) self.num_layers = num_layers - self.cgc_layers = nn.ModuleList([CGC_Layer(num_shared_experts, - num_specific_experts, - num_tasks, - input_dim= embedding_dim * feature_map.num_fields if i==0 else expert_hidden_units[-1], - expert_hidden_units= expert_hidden_units, - gate_hidden_units=gate_hidden_units, - hidden_activations=hidden_activations, - net_dropout=net_dropout, - batch_norm=batch_norm) for i in range(self.num_layers)]) + self.cgc_layers = nn.ModuleList( + [CGC_Layer(num_shared_experts, + num_specific_experts, + num_tasks, + input_dim= embedding_dim * feature_map.num_fields if i==0 else expert_hidden_units[-1], + expert_hidden_units= expert_hidden_units, + gate_hidden_units=gate_hidden_units, + hidden_activations=hidden_activations, + net_dropout=net_dropout, + batch_norm=batch_norm) for i in range(self.num_layers)] + ) self.tower = nn.ModuleList([MLP_Block(input_dim=expert_hidden_units[-1], output_dim=1, hidden_units=tower_hidden_units, diff --git a/model_zoo/multitask/ShareBottom/config/model_config.yaml b/model_zoo/multitask/ShareBottom/config/model_config.yaml index 7325476..c5ec57b 100644 --- a/model_zoo/multitask/ShareBottom/config/model_config.yaml +++ b/model_zoo/multitask/ShareBottom/config/model_config.yaml @@ -1,3 +1,4 @@ +### Base: This base setting will be inherited by all the expid configs. Base: model_root: './checkpoints/' num_workers: 3 @@ -12,8 +13,34 @@ Base: feature_specs: null feature_config: null +### ModelName_default: This is a config template for hyper-tuning use +ShareBottom_default: + model: ShareBottom + dataset_id: TBD + loss: ['binary_crossentropy', 'binary_crossentropy'] + metrics: ['logloss', 'AUC'] + task: ['binary_classification', 'binary_classification'] + num_tasks: 2 + optimizer: adam + learning_rate: 1.e-3 + bottom_hidden_units: [512, 256, 128] + tower_hidden_units: [128, 64] + hidden_activations: relu + net_regularizer: 0 + embedding_regularizer: 1.e-6 + batch_norm: False + net_dropout: 0 + batch_size: 128 + embedding_dim: 128 + epochs: 100 + shuffle: True + seed: 2023 + monitor: 'AUC' + monitor_mode: 'max' + +### ModelName_test: This is a config for test only ShareBottom_test: - model: SharedBottom + model: ShareBottom dataset_id: tiny_mtl loss: ['binary_crossentropy', 'binary_crossentropy'] metrics: ['logloss', 'AUC'] diff --git a/model_zoo/multitask/ShareBottom/fuxictr_version.py b/model_zoo/multitask/ShareBottom/fuxictr_version.py index be6a36a..b35d295 100644 --- a/model_zoo/multitask/ShareBottom/fuxictr_version.py +++ b/model_zoo/multitask/ShareBottom/fuxictr_version.py @@ -1,3 +1,3 @@ # pip install -U fuxictr import fuxictr -assert fuxictr.__version__ >= "2.2.0" +assert fuxictr.__version__ >= "2.3.4" diff --git a/model_zoo/multitask/ShareBottom/run_expid.py b/model_zoo/multitask/ShareBottom/run_expid.py index 3a74f94..6fa97f4 100644 --- a/model_zoo/multitask/ShareBottom/run_expid.py +++ b/model_zoo/multitask/ShareBottom/run_expid.py @@ -27,7 +27,7 @@ from fuxictr.pytorch.torch_utils import seed_everything from fuxictr.pytorch.dataloaders import RankDataLoader from fuxictr.preprocess import FeatureProcessor, build_dataset -import src as model_zoo +import src import gc import argparse import os @@ -61,7 +61,7 @@ feature_map.load(feature_map_json, params) logging.info("Feature specs: " + print_to_json(feature_map.features)) - model_class = getattr(model_zoo, params['model']) + model_class = getattr(src, params['model']) model = model_class(feature_map, **params) model.count_parameters() # print number of parameters used in model @@ -85,4 +85,3 @@ .format(datetime.now().strftime('%Y%m%d-%H%M%S'), ' '.join(sys.argv), experiment_id, params['dataset_id'], "N.A.", print_to_list(valid_result), print_to_list(test_result))) - diff --git a/model_zoo/multitask/ShareBottom/src/__init__.py b/model_zoo/multitask/ShareBottom/src/__init__.py index 901e562..ae86b63 100644 --- a/model_zoo/multitask/ShareBottom/src/__init__.py +++ b/model_zoo/multitask/ShareBottom/src/__init__.py @@ -1 +1 @@ -from .ShareBottom import * +from .ShareBottom import ShareBottom diff --git a/setup.py b/setup.py index 4786327..fd20d58 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="fuxictr", - version="2.3.4", + version="2.3.5", author="RECZOO", author_email="reczoo@users.noreply.github.com", description="A configurable, tunable, and reproducible library for CTR prediction", diff --git a/tests/test_torch.sh b/tests/test_torch.sh index 5e809ce..4272c1e 100644 --- a/tests/test_torch.sh +++ b/tests/test_torch.sh @@ -1,56 +1,61 @@ #! /bin/sh home="$(pwd)/../model_zoo" -echo "=== Testing AFM ===" && cd $home/AFM && python run_expid.py --expid AFM_test && \ -echo "=== Testing AFN ===" && cd $home/AFN && python run_expid.py --expid AFN_test && \ -echo "=== Testing AOANet ===" && cd $home/AOANet && python run_expid.py --expid AOANet_test && \ -echo "=== Testing APG ===" && cd $home/APG && python run_expid.py --expid APG_DeepFM_test && \ -echo "=== Testing AutoInt ===" && cd $home/AutoInt && python run_expid.py --expid AutoInt_test && \ -echo "=== Testing BST ===" && cd $home/BST && python run_expid.py --expid BST_test && \ -echo "=== Testing CCPM ===" && cd $home/CCPM && python run_expid.py --expid CCPM_test && \ -echo "=== Testing DCN ===" && cd $home/DCN/DCN_torch && python run_expid.py --expid DCN_test && \ -echo "=== Testing DCNv2 ===" && cd $home/DCNv2 && python run_expid.py --expid DCNv2_test && \ -echo "=== Testing DCNv3 ===" && cd $home/DCNv3 && python run_expid.py --expid DCNv3_test && \ -echo "=== Testing DeepCrossing ===" && cd $home/DeepCrossing && python run_expid.py --expid DeepCrossing_test && \ -echo "=== Testing DeepFM ===" && cd $home/DeepFM/DeepFM_torch && python run_expid.py --expid DeepFM_test && \ -echo "=== Testing DeepIM ===" && cd $home/DeepIM && python run_expid.py --expid DeepIM_test && \ -echo "=== Testing DESTINE ===" && cd $home/DESTINE && python run_expid.py --expid DESTINE_test && \ -echo "=== Testing DIEN ===" && cd $home/DIEN && python run_expid.py --expid DIEN_test && \ -echo "=== Testing DIN ===" && cd $home/DIN && python run_expid.py --expid DIN_test && \ -echo "=== Testing DLRM ===" && cd $home/DLRM && python run_expid.py --expid DLRM_test && \ -echo "=== Testing DMIN ===" && cd $home/DMIN && python run_expid.py --expid DMIN_test && \ -echo "=== Testing DMR ===" && cd $home/DMR && python run_expid.py --expid DMR_test && \ -echo "=== Testing DNN ===" && cd $home/DNN/DNN_torch && python run_expid.py --expid DNN_test && \ -echo "=== Testing DSSM ===" && cd $home/DSSM && python run_expid.py --expid DSSM_test && \ -echo "=== Testing EDCN ===" && cd $home/EDCN && python run_expid.py --expid EDCN_test && \ -echo "=== Testing EulerNet ===" && cd $home/EulerNet && python run_expid.py --expid EulerNet_test && \ -echo "=== Testing FFM ===" && cd $home/FFM && python run_expid.py --expid FFM_test && \ -echo "=== Testing FFMv2 ===" && python run_expid.py --expid FFMv2_test -echo "=== Testing FGCNN ===" && cd $home/FGCNN && python run_expid.py --expid FGCNN_test && \ -echo "=== Testing FiBiNET ===" && cd $home/FiBiNET && python run_expid.py --expid FiBiNET_test && \ -echo "=== Testing FiGNN ===" && cd $home/FiGNN && python run_expid.py --expid FiGNN_test && \ -echo "=== Testing FinalMLP ===" && cd $home/FinalMLP && python run_expid.py --expid FinalMLP_test && \ -echo "=== Testing FinalNet ===" && cd $home/FinalNet && python run_expid.py --expid FinalNet_test && \ -echo "=== Testing FLEN ===" && cd $home/FLEN && python run_expid.py --expid FLEN_test && \ -echo "=== Testing FM ===" && cd $home/FM && python run_expid.py --expid FM_test && \ -echo "=== Testing FmFM ===" && cd $home/FmFM && python run_expid.py --expid FmFM_test && \ -echo "=== Testing FwFM ===" && cd $home/FwFM && python run_expid.py --expid FwFM_test && \ -echo "=== Testing GDCN ===" && cd $home/GDCN && python run_expid.py --expid GDCN_test && \ -echo "=== Testing HFM ===" && cd $home/HFM && python run_expid.py --expid HFM_test && \ -echo "=== Testing HOFM ===" && cd $home/HOFM && python run_expid.py --expid HOFM_test && \ -echo "=== Testing InterHAt ===" && cd $home/InterHAt && python run_expid.py --expid InterHAt_test && \ -echo "=== Testing LorentzFM ===" && cd $home/LorentzFM && python run_expid.py --expid LorentzFM_test && \ -echo "=== Testing LR ===" && cd $home/LR && python run_expid.py --expid LR_test && \ -echo "=== Testing MaskNet ===" && cd $home/MaskNet && python run_expid.py --expid MaskNet_test && \ -echo "=== Testing NFM ===" && cd $home/NFM && python run_expid.py --expid NFM_test && \ -echo "=== Testing ONN ===" && cd $home/ONN/ONN_torch && python run_expid.py --expid ONN_test && \ -echo "=== Testing ONNv2 ===" && cd $home/ONN/ONN_torch && python run_expid.py --expid ONNv2_test && \ -echo "=== Testing PPNet ===" && cd $home/PEPNet && python run_expid.py --expid PPNet_test && \ -echo "=== Testing PNN ===" && cd $home/PNN && python run_expid.py --expid PNN_test && \ -echo "=== Testing SAM ===" && cd $home/SAM && python run_expid.py --expid SAM_test && \ -echo "=== Testing TransAct ===" && cd $home/TransAct && python run_expid.py --expid TransAct_test && \ -echo "=== Testing WideDeep ===" && cd $home/WideDeep/WideDeep_torch && python run_expid.py --expid WideDeep_test && \ +# echo "=== Testing AFM ===" && cd $home/AFM && python run_expid.py --expid AFM_test && \ +# echo "=== Testing AFN ===" && cd $home/AFN && python run_expid.py --expid AFN_test && \ +# echo "=== Testing AOANet ===" && cd $home/AOANet && python run_expid.py --expid AOANet_test && \ +# echo "=== Testing APG ===" && cd $home/APG && python run_expid.py --expid APG_DeepFM_test && \ +# echo "=== Testing AutoInt ===" && cd $home/AutoInt && python run_expid.py --expid AutoInt_test && \ +# echo "=== Testing BST ===" && cd $home/BST && python run_expid.py --expid BST_test && \ +# echo "=== Testing CCPM ===" && cd $home/CCPM && python run_expid.py --expid CCPM_test && \ +# echo "=== Testing DCN ===" && cd $home/DCN/DCN_torch && python run_expid.py --expid DCN_test && \ +# echo "=== Testing DCNv2 ===" && cd $home/DCNv2 && python run_expid.py --expid DCNv2_test && \ +# echo "=== Testing DCNv3 ===" && cd $home/DCNv3 && python run_expid.py --expid DCNv3_test && \ +# echo "=== Testing DeepCrossing ===" && cd $home/DeepCrossing && python run_expid.py --expid DeepCrossing_test && \ +# echo "=== Testing DeepFM ===" && cd $home/DeepFM/DeepFM_torch && python run_expid.py --expid DeepFM_test && \ +# echo "=== Testing DeepIM ===" && cd $home/DeepIM && python run_expid.py --expid DeepIM_test && \ +# echo "=== Testing DESTINE ===" && cd $home/DESTINE && python run_expid.py --expid DESTINE_test && \ +# echo "=== Testing DIEN ===" && cd $home/DIEN && python run_expid.py --expid DIEN_test && \ +# echo "=== Testing DIN ===" && cd $home/DIN && python run_expid.py --expid DIN_test && \ +# echo "=== Testing DLRM ===" && cd $home/DLRM && python run_expid.py --expid DLRM_test && \ +# echo "=== Testing DMIN ===" && cd $home/DMIN && python run_expid.py --expid DMIN_test && \ +# echo "=== Testing DMR ===" && cd $home/DMR && python run_expid.py --expid DMR_test && \ +# echo "=== Testing DNN ===" && cd $home/DNN/DNN_torch && python run_expid.py --expid DNN_test && \ +# echo "=== Testing DSSM ===" && cd $home/DSSM && python run_expid.py --expid DSSM_test && \ +# echo "=== Testing EDCN ===" && cd $home/EDCN && python run_expid.py --expid EDCN_test && \ +# echo "=== Testing EulerNet ===" && cd $home/EulerNet && python run_expid.py --expid EulerNet_test && \ +# echo "=== Testing FFM ===" && cd $home/FFM && python run_expid.py --expid FFM_test && \ +# echo "=== Testing FFMv2 ===" && python run_expid.py --expid FFMv2_test +# echo "=== Testing FGCNN ===" && cd $home/FGCNN && python run_expid.py --expid FGCNN_test && \ +# echo "=== Testing FiBiNET ===" && cd $home/FiBiNET && python run_expid.py --expid FiBiNET_test && \ +# echo "=== Testing FiGNN ===" && cd $home/FiGNN && python run_expid.py --expid FiGNN_test && \ +# echo "=== Testing FinalMLP ===" && cd $home/FinalMLP && python run_expid.py --expid FinalMLP_test && \ +# echo "=== Testing FinalNet ===" && cd $home/FinalNet && python run_expid.py --expid FinalNet_test && \ +# echo "=== Testing FLEN ===" && cd $home/FLEN && python run_expid.py --expid FLEN_test && \ +# echo "=== Testing FM ===" && cd $home/FM && python run_expid.py --expid FM_test && \ +# echo "=== Testing FmFM ===" && cd $home/FmFM && python run_expid.py --expid FmFM_test && \ +# echo "=== Testing FwFM ===" && cd $home/FwFM && python run_expid.py --expid FwFM_test && \ +# echo "=== Testing GDCN ===" && cd $home/GDCN && python run_expid.py --expid GDCN_test && \ +# echo "=== Testing HFM ===" && cd $home/HFM && python run_expid.py --expid HFM_test && \ +# echo "=== Testing HOFM ===" && cd $home/HOFM && python run_expid.py --expid HOFM_test && \ +# echo "=== Testing InterHAt ===" && cd $home/InterHAt && python run_expid.py --expid InterHAt_test && \ +# echo "=== Testing LorentzFM ===" && cd $home/LorentzFM && python run_expid.py --expid LorentzFM_test && \ +# echo "=== Testing LR ===" && cd $home/LR && python run_expid.py --expid LR_test && \ +# echo "=== Testing MaskNet ===" && cd $home/MaskNet && python run_expid.py --expid MaskNet_test && \ +# echo "=== Testing NFM ===" && cd $home/NFM && python run_expid.py --expid NFM_test && \ +# echo "=== Testing ONN ===" && cd $home/ONN/ONN_torch && python run_expid.py --expid ONN_test && \ +# echo "=== Testing ONNv2 ===" && cd $home/ONN/ONN_torch && python run_expid.py --expid ONNv2_test && \ +# echo "=== Testing PPNet ===" && cd $home/PEPNet && python run_expid.py --expid PPNet_test && \ +# echo "=== Testing PNN ===" && cd $home/PNN && python run_expid.py --expid PNN_test && \ +# echo "=== Testing SAM ===" && cd $home/SAM && python run_expid.py --expid SAM_test && \ +# echo "=== Testing TransAct ===" && cd $home/TransAct && python run_expid.py --expid TransAct_test && \ +# echo "=== Testing WideDeep ===" && cd $home/WideDeep/WideDeep_torch && python run_expid.py --expid WideDeep_test && \ echo "=== Testing WuKong ===" && cd $home/WuKong && python run_expid.py --expid WuKong_test && \ echo "=== Testing xDeepFM ===" && cd $home/xDeepFM && python run_expid.py --expid xDeepFM_test && \ +# Multi-task recommendation +echo "=== Testing ShareBottom ===" && cd $home/multitask/ShareBottom && python run_expid.py --expid ShareBottom_test && \ +echo "=== Testing MMoE ===" && cd $home/multitask/MMoE && python run_expid.py --expid MMoE_test && \ +echo "=== Testing PLE ===" && cd $home/multitask/PLE && python run_expid.py --expid PLE_test && \ + echo "All tests done."