diff --git a/baselines/DeepAR/ETTh1.py b/baselines/DeepAR/ETTh1.py new file mode 100644 index 00000000..94f336f5 --- /dev/null +++ b/baselines/DeepAR/ETTh1.py @@ -0,0 +1,103 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "ETTh1" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 7 + } +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 50 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/ETTm1.py b/baselines/DeepAR/ETTm1.py new file mode 100644 index 00000000..00533a5c --- /dev/null +++ b/baselines/DeepAR/ETTm1.py @@ -0,0 +1,103 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "ETTm1" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 7 + } +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 50 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/Electricity.py b/baselines/DeepAR/Electricity.py new file mode 100644 index 00000000..b4eed750 --- /dev/null +++ b/baselines/DeepAR/Electricity.py @@ -0,0 +1,103 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "Electricity" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 321 + } +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 15 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 16 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.USE_GPU = False diff --git a/baselines/DeepAR/ExchangeRate.py b/baselines/DeepAR/ExchangeRate.py new file mode 100644 index 00000000..2c174d3f --- /dev/null +++ b/baselines/DeepAR/ExchangeRate.py @@ -0,0 +1,103 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "ExchangeRate" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 8 + } +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 15 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/PEMS-BAY.py b/baselines/DeepAR/PEMS-BAY.py new file mode 100644 index 00000000..22c44e4d --- /dev/null +++ b/baselines/DeepAR/PEMS-BAY.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 +CFG.NULL_VAL = 0.0 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 325 +} +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/PEMS03.py b/baselines/DeepAR/PEMS03.py new file mode 100644 index 00000000..b97b0f1e --- /dev/null +++ b/baselines/DeepAR/PEMS03.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 +CFG.NULL_VAL = 0.0 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 358 +} +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/PEMS04.py b/baselines/DeepAR/PEMS04.py new file mode 100644 index 00000000..a74cc390 --- /dev/null +++ b/baselines/DeepAR/PEMS04.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 +CFG.NULL_VAL = 0.0 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 307 +} +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/PEMS04_LTSF.py b/baselines/DeepAR/PEMS04_LTSF.py new file mode 100644 index 00000000..51e015a0 --- /dev/null +++ b/baselines/DeepAR/PEMS04_LTSF.py @@ -0,0 +1,100 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 +CFG.NULL_VAL = 0.0 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 307 +} +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 50 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 24 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 16 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/baselines/DeepAR/PEMS07.py b/baselines/DeepAR/PEMS07.py new file mode 100644 index 00000000..be72c230 --- /dev/null +++ b/baselines/DeepAR/PEMS07.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 +CFG.NULL_VAL = 0.0 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 883 +} +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/PEMS08_LTSF.py b/baselines/DeepAR/PEMS08_LTSF.py new file mode 100644 index 00000000..37776690 --- /dev/null +++ b/baselines/DeepAR/PEMS08_LTSF.py @@ -0,0 +1,100 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 +CFG.NULL_VAL = 0.0 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 170 + } +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 50 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 16 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/baselines/DeepAR/Weather.py b/baselines/DeepAR/Weather.py new file mode 100644 index 00000000..65bf134a --- /dev/null +++ b/baselines/DeepAR/Weather.py @@ -0,0 +1,103 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .arch import DeepAR +from .runner import DeepARRunner +from .loss import gaussian_loss + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DeepAR model configuration" +CFG.RUNNER = DeepARRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "Weather" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 96 +CFG.DATASET_OUTPUT_LEN = 336 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DeepAR" +CFG.MODEL.ARCH = DeepAR +CFG.MODEL.PARAM = { + "cov_feat_size" : 2, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : True, + "id_feat_size": 32, + "num_nodes": 21 + } +CFG.MODEL.FORWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gaussian_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 15 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False + +# ================= evaluate ================= # +CFG.EVAL = EasyDict() +CFG.EVAL.HORIZONS = [3, 6, 12] diff --git a/baselines/DeepAR/run.sh b/baselines/DeepAR/run.sh new file mode 100644 index 00000000..d8c1b259 --- /dev/null +++ b/baselines/DeepAR/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# python experiments/train.py -c baselines/DeepAR/METR-LA.py --gpus '0' +# python experiments/train.py -c baselines/DeepAR/PEMS-BAY.py --gpus '0' +# python experiments/train.py -c baselines/DeepAR/PEMS03.py --gpus '0' +# python experiments/train.py -c baselines/DeepAR/PEMS04.py --gpus '0' +# python experiments/train.py -c baselines/DeepAR/PEMS07.py --gpus '0' +# python experiments/train.py -c baselines/DeepAR/PEMS08.py --gpus '0' + +# python experiments/train.py -c baselines/DeepAR/ETTh1.py --gpus '0' +# python experiments/train.py -c baselines/DeepAR/ETTm1.py --gpus '0' +python experiments/train.py -c baselines/DeepAR/Electricity.py --gpus '0' +python experiments/train.py -c baselines/DeepAR/Weather.py --gpus '0' +python experiments/train.py -c baselines/DeepAR/ExchangeRate.py --gpus '0' +python experiments/train.py -c baselines/DeepAR/PEMS04_LTSF.py --gpus '0' +python experiments/train.py -c baselines/DeepAR/PEMS08_LTSF.py --gpus '0' \ No newline at end of file diff --git a/baselines/DeepAR_M4/M4.py b/baselines/DeepAR_M4/M4.py new file mode 100644 index 00000000..8ce59b29 --- /dev/null +++ b/baselines/DeepAR_M4/M4.py @@ -0,0 +1,108 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import M4ForecastingDataset + +from .arch import DeepAR +from .loss import gaussian_loss +from .runner import DeepARRunner + +def get_cfg(seasonal_pattern): + assert seasonal_pattern in ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"] + prediction_len = {"Yearly": 6, "Quarterly": 8, "Monthly": 18, "Weekly": 13, "Daily": 14, "Hourly": 48}[seasonal_pattern] + num_nodes = {"Yearly": 23000, "Quarterly": 24000, "Monthly": 48000, "Weekly": 359, "Daily": 4227, "Hourly": 414}[seasonal_pattern] + history_size = 2 + history_len = history_size * prediction_len + + CFG = EasyDict() + + # ================= general ================= # + CFG.DESCRIPTION = "DeepAR M4" + CFG.RUNNER = DeepARRunner + CFG.DATASET_CLS = M4ForecastingDataset + CFG.DATASET_NAME = "M4_" + seasonal_pattern + CFG.DATASET_INPUT_LEN = history_len + CFG.DATASET_OUTPUT_LEN = prediction_len + CFG.GPU_NUM = 1 + + # ================= environment ================= # + CFG.ENV = EasyDict() + CFG.ENV.SEED = 1 + CFG.ENV.CUDNN = EasyDict() + CFG.ENV.CUDNN.ENABLED = True + + # ================= model ================= # + CFG.MODEL = EasyDict() + CFG.MODEL.NAME = "DeepAR" + CFG.MODEL.ARCH = DeepAR + CFG.MODEL.PARAM = { + "cov_feat_size" : 0, + "embedding_size" : 32, + "hidden_size" : 64, + "num_layers": 3, + "use_ts_id" : False, + "id_feat_size": None, + "num_nodes": None + } + CFG.MODEL.FORWARD_FEATURES = [0] # values, node id + CFG.MODEL.TARGET_FEATURES = [0] + + # ================= optim ================= # + CFG.TRAIN = EasyDict() + CFG.TRAIN.LOSS = gaussian_loss + CFG.TRAIN.OPTIM = EasyDict() + CFG.TRAIN.OPTIM.TYPE = "Adam" + CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.0005, + "weight_decay": 0.0001, + } + CFG.TRAIN.LR_SCHEDULER = EasyDict() + CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" + CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 + } + + # ================= train ================= # + CFG.TRAIN.CLIP_GRAD_PARAM = { + 'max_norm': 5.0 + } + CFG.TRAIN.NUM_EPOCHS = 101 + CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + ) + # train data + CFG.TRAIN.DATA = EasyDict() + # read data + CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME + # dataloader args, optional + CFG.TRAIN.DATA.BATCH_SIZE = 64 + CFG.TRAIN.DATA.PREFETCH = False + CFG.TRAIN.DATA.SHUFFLE = True + CFG.TRAIN.DATA.NUM_WORKERS = 2 + CFG.TRAIN.DATA.PIN_MEMORY = False + + # ================= test ================= # + CFG.TEST = EasyDict() + CFG.TEST.INTERVAL = 1 + # test data + CFG.TEST.DATA = EasyDict() + # read data + CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME + # dataloader args, optional + CFG.TEST.DATA.BATCH_SIZE = 64 + CFG.TEST.DATA.PREFETCH = False + CFG.TEST.DATA.SHUFFLE = False + CFG.TEST.DATA.NUM_WORKERS = 2 + CFG.TEST.DATA.PIN_MEMORY = False + + # ================= evaluate ================= # + CFG.EVAL = EasyDict() + CFG.EVAL.HORIZONS = [] + CFG.EVAL.SAVE_PATH = os.path.abspath(__file__ + "/..") + + return CFG diff --git a/baselines/DeepAR_M4/arch/__init__.py b/baselines/DeepAR_M4/arch/__init__.py new file mode 100644 index 00000000..6ec10582 --- /dev/null +++ b/baselines/DeepAR_M4/arch/__init__.py @@ -0,0 +1 @@ +from .deepar import DeepAR \ No newline at end of file diff --git a/baselines/DeepAR_M4/arch/deepar.py b/baselines/DeepAR_M4/arch/deepar.py new file mode 100644 index 00000000..d18c7a66 --- /dev/null +++ b/baselines/DeepAR_M4/arch/deepar.py @@ -0,0 +1,120 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .distributions import Gaussian + + +class DeepAR(nn.Module): + """ + Paper: DeepAR: Probabilistic Forecasting with Autoregressive Recurrent Networks; Link: https://arxiv.org/abs/1704.04110; Ref Code: https://github.com/jingw2/demand_forecast, https://github.com/husnejahan/DeepAR-pytorch, https://github.com/arrigonialberto86/deepar. + """ + + def __init__(self, cov_feat_size, embedding_size, hidden_size, num_layers, use_ts_id, id_feat_size=0, num_nodes=0) -> None: + """Init DeepAR. + + Args: + cov_feat_size (int): covariate feature size (e.g. time in day, day in week, etc.). + embedding_size (int): output size of the input embedding layer. + hidden_size (int): hidden size of the LSTM. + num_layers (int): number of LSTM layers. + use_ts_id (bool): whether to use time series id to construct spatial id embedding as additional features. + id_feat_size (int, optional): size of the spatial id embedding. Defaults to 0. + num_nodes (int, optional): number of nodes. Defaults to 0. + """ + super().__init__() + self.use_ts_id = use_ts_id + # input embedding layer + self.input_embed = nn.Linear(1, embedding_size) + # spatial id embedding layer + if use_ts_id: + assert id_feat_size > 0, "id_feat_size must be greater than 0 if use_ts_id is True" + assert num_nodes > 0, "num_nodes must be greater than 0 if use_ts_id is True" + self.id_feat = nn.Parameter(torch.empty(num_nodes, id_feat_size)) + nn.init.xavier_uniform_(self.id_feat) + else: + id_feat_size = 0 + # the LSTM layer + self.encoder = nn.LSTM(embedding_size+cov_feat_size+id_feat_size, hidden_size, num_layers, bias=True, batch_first=True) + # the likelihood function + self.likelihood_layer = Gaussian(hidden_size, 1) + + def gaussian_sample(self, mu, sigma): + """Sampling. + + Args: + mu (torch.Tensor): mean values of distributions. + sigma (torch.Tensor): std values of distributions. + """ + mu = mu.squeeze(1) + sigma = sigma.squeeze(1) + gaussian = torch.distributions.Normal(mu, sigma) + ypred = gaussian.sample([1]).squeeze(0) + return ypred + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, train: bool, history_mask: torch.Tensor, future_mask: torch.Tensor, **kwargs) -> torch.Tensor: + """Feed forward of DeepAR. + Reference code: https://github.com/jingw2/demand_forecast/blob/master/deepar.py + + Args: + history_data (torch.Tensor): history data. [B, L, N, C]. + future_data (torch.Tensor): future data. [B, L, N, C]. + train (bool): is training or not. + """ + mask = torch.cat([history_mask, future_mask], dim=1).unsqueeze(-1)[:, 1:, ...] + # mask = torch.where(mask == 0, torch.ones_like(mask) * 1e-5, mask) + # mask = torch.ones_like(mask) + # nornalization + means = history_data.mean(1, keepdim=True).detach() + stdev = torch.sqrt(torch.var(history_data, dim=1, keepdim=True, unbiased=False) + 1e-5) + history_data_normed = history_data - means + history_data_normed /= stdev + future_data_normed = future_data - means + future_data_normed /= stdev + + history_next = None + preds = [] + mus = [] + sigmas = [] + len_in, len_out = history_data.shape[1], future_data.shape[1] + B, _, N, C = history_data.shape + input_feat_full_normed = torch.cat([history_data_normed[:, :, :, 0:1], future_data_normed[:, :, :, 0:1]], dim=1) # B, L_in+L_out, N, 1 + input_feat_full = torch.cat([history_data[:, :, :, 0:1], future_data[:, :, :, 0:1]], dim=1) # B, L_in+L_out, N, 1 + + for t in range(1, len_in + len_out): + if not (t > len_in and not train): # not in the decoding stage when inferecing + history_next = input_feat_full_normed[:, t-1:t, :, 0:1] + embed_feat = self.input_embed(history_next) + # 检查nan + assert not torch.isnan(history_next).any(), "history_next中存在nan" + assert not torch.isnan(self.input_embed.weight).any(), "embed_feat中存在nan" + assert not torch.isnan(self.input_embed.bias).any(), "embed_feat中存在nan" + assert not torch.isnan(embed_feat).any(), "embed_feat中存在nan" + encoder_input = embed_feat + # lstm + B, _, N, C = encoder_input.shape # _ is 1 + encoder_input = encoder_input.transpose(1, 2).reshape(B * N, -1, C) + _, (h, c) = self.encoder(encoder_input) if t == 1 else self.encoder(encoder_input, (h, c)) + # distribution proj + mu, sigma = self.likelihood_layer(h[-1, :, :]) + history_next = self.gaussian_sample(mu, sigma).view(B, N).view(B, 1, N, 1) + mus.append(mu.view(B, N, 1).unsqueeze(1)) + sigmas.append(sigma.view(B, N, 1).unsqueeze(1)) + preds.append(history_next) + assert not torch.isnan(history_next).any() + + preds = torch.concat(preds, dim=1) + mus = torch.concat(mus, dim=1) + sigmas = torch.concat(sigmas, dim=1) + reals = input_feat_full[:, -preds.shape[1]:, :, :] + + # 检查mus和sigmas中是否存在nan + assert not torch.isnan(mus).any(), "mus中存在nan" + assert not torch.isnan(sigmas).any(), "sigmas中存在nan" + + # denormalization + preds = preds * stdev + means + mus = mus * stdev + means + sigmas = sigmas * stdev + means + + return {"prediction": preds * mask, "target": reals * mask, "mus": mus, "sigmas": sigmas, "mask_prior": mask} diff --git a/baselines/DeepAR_M4/arch/distributions.py b/baselines/DeepAR_M4/arch/distributions.py new file mode 100644 index 00000000..0c84d512 --- /dev/null +++ b/baselines/DeepAR_M4/arch/distributions.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn + + +class Gaussian(nn.Module): + + def __init__(self, hidden_size, output_size): + """ + Gaussian Likelihood Supports Continuous Data + Args: + input_size (int): hidden h_{i,t} column size + output_size (int): embedding size + """ + super(Gaussian, self).__init__() + self.mu_layer = nn.Linear(hidden_size, output_size) + self.sigma_layer = nn.Linear(hidden_size, output_size) + + def forward(self, h): + sigma_t = torch.log(1 + torch.exp(self.sigma_layer(h))) + 1e-6 + sigma_t = sigma_t.squeeze(0) + mu_t = self.mu_layer(h).squeeze(0) + return mu_t, sigma_t diff --git a/baselines/DeepAR_M4/loss/__init__.py b/baselines/DeepAR_M4/loss/__init__.py new file mode 100644 index 00000000..9b08b8a3 --- /dev/null +++ b/baselines/DeepAR_M4/loss/__init__.py @@ -0,0 +1 @@ +from .gaussian import gaussian_loss \ No newline at end of file diff --git a/baselines/DeepAR_M4/loss/gaussian.py b/baselines/DeepAR_M4/loss/gaussian.py new file mode 100644 index 00000000..9cf53631 --- /dev/null +++ b/baselines/DeepAR_M4/loss/gaussian.py @@ -0,0 +1,36 @@ +import torch +import numpy as np +from basicts.metrics import masked_mae + +def masked_mae_loss(prediction, target, pred_len, null_val = np.nan): + prediction = prediction[:, -pred_len:, :, :] + target = target[:, -pred_len:, :, :] + return masked_mae(prediction, target, null_val) + +def gaussian_loss(prediction, target, mus, sigmas, mask_prior, null_val = np.nan): + """Masked gaussian loss. Kindly note that the gaussian loss is calculated based on mu, sigma, and target. The prediction is sampled from N(mu, sigma), and is not used in the loss calculation (it will be used in the metrics calculation). + + Args: + prediction (torch.Tensor): prediction of model. [B, L, N, 1]. + target (torch.Tensor): ground truth. [B, L, N, 1]. + mus (torch.Tensor): the mean of gaussian distribution. [B, L, N, 1]. + sigmas (torch.Tensor): the std of gaussian distribution. [B, L, N, 1] + null_val (optional): null value. Defaults to np.nan. + """ + # mask + if np.isnan(null_val): + mask = ~torch.isnan(target) + else: + eps = 5e-5 + mask = ~torch.isclose(target, torch.tensor(null_val).expand_as(target).to(target.device), atol=eps, rtol=0.) + mask = mask.float() + mask /= torch.mean((mask)) + mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) + + distribution = torch.distributions.Normal(mus, sigmas) + likelihood = distribution.log_prob(target) + likelihood = likelihood * mask + likelihood = likelihood * mask_prior + assert not torch.isnan(likelihood).any(), "likelihood中存在nan" + loss_g = -torch.mean(likelihood) + return loss_g diff --git a/baselines/DeepAR_M4/runner/__init__.py b/baselines/DeepAR_M4/runner/__init__.py new file mode 100644 index 00000000..1e41b855 --- /dev/null +++ b/baselines/DeepAR_M4/runner/__init__.py @@ -0,0 +1 @@ +from .deepar_runner import DeepARRunner diff --git a/baselines/DeepAR_M4/runner/deepar_runner.py b/baselines/DeepAR_M4/runner/deepar_runner.py new file mode 100644 index 00000000..ad5425f5 --- /dev/null +++ b/baselines/DeepAR_M4/runner/deepar_runner.py @@ -0,0 +1,141 @@ +from typing import Dict +import torch +from basicts.data.registry import SCALER_REGISTRY +from easytorch.utils.dist import master_only + +from basicts.runners.base_m4_runner import BaseM4Runner +from basicts.metrics import masked_mae +from basicts.utils import partial +from ..loss.gaussian import masked_mae_loss + +class DeepARRunner(BaseM4Runner): + def __init__(self, cfg: dict): + super().__init__(cfg) + self.forward_features = cfg["MODEL"].get("FORWARD_FEATURES", None) + self.target_features = cfg["MODEL"].get("TARGET_FEATURES", None) + self.output_seq_len = cfg["DATASET_OUTPUT_LEN"] + self.metrics = cfg.get("METRICS", {"loss": self.loss, "real_mae": partial(masked_mae_loss, pred_len=self.output_seq_len), "full_mae": masked_mae}) + + def select_input_features(self, data: torch.Tensor) -> torch.Tensor: + """Select input features and reshape data to fit the target model. + + Args: + data (torch.Tensor): input history data, shape [B, L, N, C]. + + Returns: + torch.Tensor: reshaped data + """ + + # select feature using self.forward_features + if self.forward_features is not None: + data = data[:, :, :, self.forward_features] + return data + + def select_target_features(self, data: torch.Tensor) -> torch.Tensor: + """Select target features and reshape data back to the BasicTS framework + + Args: + data (torch.Tensor): prediction of the model with arbitrary shape. + + Returns: + torch.Tensor: reshaped data with shape [B, L, N, C] + """ + + # select feature using self.target_features + data = data[:, :, :, self.target_features] + return data + + def rescale_data(self, input_data: Dict) -> Dict: + """Rescale data. + + Args: + data (Dict): Dict of data to be re-scaled. + + Returns: + Dict: Dict re-scaled data. + """ + + if self.if_rescale: + input_data["inputs"] = SCALER_REGISTRY.get(self.scaler["func"])(input_data["inputs"], **self.scaler["args"]) + input_data["prediction"] = SCALER_REGISTRY.get(self.scaler["func"])(input_data["prediction"], **self.scaler["args"]) + input_data["target"] = SCALER_REGISTRY.get(self.scaler["func"])(input_data["target"], **self.scaler["args"]) + if "mus" in input_data.keys(): + input_data["mus"] = SCALER_REGISTRY.get(self.scaler["func"])(input_data["mus"], **self.scaler["args"]) + if "sigmas" in input_data.keys(): + input_data["sigmas"] = SCALER_REGISTRY.get(self.scaler["func"])(input_data["sigmas"], **self.scaler["args"]) + return input_data + + def forward(self, data: tuple, epoch: int = None, iter_num: int = None, train: bool = True, **kwargs) -> tuple: + """Feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. + + Args: + data (tuple): (future_data, history_data, future_mask, history_mask). + epoch (int, optional): epoch number. Defaults to None. + iter_num (int, optional): iteration number. Defaults to None. + train (bool, optional): if in the training process. Defaults to True. + + Returns: + tuple: (prediction, real_value) + """ + + # preprocess + future_data, history_data, future_mask, history_mask = data + history_data = self.to_running_device(history_data) # B, L, 1, C + future_data = self.to_running_device(future_data) # B, L, 1, C + history_mask = self.to_running_device(history_mask) # B, L, 1 + future_mask = self.to_running_device(future_mask) # B, L, 1 + + batch_size, length, num_nodes, _ = future_data.shape + + history_data = self.select_input_features(history_data) + future_data_4_dec = self.select_input_features(future_data) + + # model forward + model_return = self.model(history_data=history_data, future_data=future_data_4_dec, history_mask=history_mask, future_mask=future_mask, batch_seen=iter_num, epoch=epoch, train=train) + if isinstance(model_return, torch.Tensor): model_return = {"prediction": model_return * future_mask.unsqueeze(-1)} + if "inputs" not in model_return: model_return["inputs"] = self.select_target_features(history_data) + if "target" not in model_return: model_return["target"] = self.select_target_features(future_data * future_mask.unsqueeze(-1)) + return model_return + + @torch.no_grad() + @master_only + def test(self): + """Evaluate the model. + + Args: + train_epoch (int, optional): current epoch if in training process. + """ + + # TODO: fix OOM: especially when inputs, targets, and predictions are saved at the same time. + # test loop + prediction =[] + target = [] + inputs = [] + mus = [] + sigmas = [] + mask_priors = [] + for _, data in enumerate(self.test_data_loader): + forward_return = self.forward(data, epoch=None, iter_num=None, train=False) + if not self.if_evaluate_on_gpu: + forward_return["prediction"] = forward_return["prediction"].detach().cpu() + forward_return["target"] = forward_return["target"].detach().cpu() + forward_return["inputs"] = forward_return["inputs"].detach().cpu() + forward_return["mus"] = forward_return["mus"].detach().cpu() + forward_return["sigmas"] = forward_return["sigmas"].detach().cpu() + forward_return["mask_prior"] = forward_return["mask_prior"].detach().cpu() + prediction.append(forward_return["prediction"]) + target.append(forward_return["target"]) + inputs.append(forward_return["inputs"]) + mus.append(forward_return["mus"]) + sigmas.append(forward_return["sigmas"]) + mask_priors.append(forward_return["mask_prior"]) + prediction = torch.cat(prediction, dim=0) + target = torch.cat(target, dim=0) + inputs = torch.cat(inputs, dim=0) + mus = torch.cat(mus, dim=0) + sigmas = torch.cat(sigmas, dim=0) + mask_priors = torch.cat(mask_priors, dim=0) + # re-scale data + returns_all = self.rescale_data({"prediction": prediction[:, -self.output_seq_len:, :, :], "target": target[:, -self.output_seq_len:, :, :], "inputs": inputs, "mus": mus[:, -self.output_seq_len:, :, :], "sigmas": sigmas[:, -self.output_seq_len:, :, :], "mask_prior": mask_priors[:, -self.output_seq_len:, :, :]}) + # evaluate + self.save_prediction(returns_all) diff --git a/baselines/LightGBM/PEMS08.ipynb b/baselines/LightGBM/PEMS08.ipynb deleted file mode 100644 index 6f8a1f67..00000000 --- a/baselines/LightGBM/PEMS08.ipynb +++ /dev/null @@ -1,198 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import lightgbm as lgb\n", - "\n", - "import os\n", - "import sys\n", - "PROJECT_DIR = os.path.abspath(\"../..\")\n", - "sys.path.append(PROJECT_DIR)\n", - "\n", - "from basicts.utils import load_pkl\n", - "from basicts.data import TimeSeriesForecastingDataset\n", - "from basicts.metrics import masked_mae, masked_rmse, masked_mape\n", - "from basicts.data import SCALER_REGISTRY\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Construct hyper parameters" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# construct configs\n", - "dataset_name = \"PEMS08\"\n", - "input_len = 12\n", - "output_len = 12\n", - "gpu_num = 1\n", - "null_val = 0.0\n", - "train_data_dir = \"datasets/\" + dataset_name\n", - "rescale = True\n", - "batch_size = 128 # only used for collecting data\n", - "\n", - "# lgm params\n", - "params = {\n", - " 'boosting_type': 'gbdt',\n", - " 'objective': 'regression',\n", - " 'metric': 'l2',\n", - " 'num_leaves': 31,\n", - " 'learning_rate': 0.05,\n", - " 'feature_fraction': 0.9,\n", - " 'bagging_fraction': 0.8,\n", - " 'bagging_freq': 5,\n", - " 'verbose': 0\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Construct datasets" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# construct dataset\n", - "data_file_path = PROJECT_DIR + \"/{0}/data_in_{1}_out_{2}_rescale_{3}.pkl\".format(train_data_dir, input_len, output_len, rescale)\n", - "index_file_path = PROJECT_DIR + \"/{0}/index_in_{1}_out_{2}_rescale_{3}.pkl\".format(train_data_dir, input_len, output_len, rescale)\n", - "\n", - "train_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode=\"train\")\n", - "train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)\n", - "\n", - "valid_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode=\"valid\")\n", - "valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)\n", - "\n", - "test_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode=\"test\")\n", - "test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# training & validation\n", - "Xs_train = []\n", - "Ys_train = []\n", - "Xs_valid = []\n", - "Ys_valid = []\n", - "Xs_test = []\n", - "Ys_test = []\n", - "\n", - "for i, (data, target) in enumerate(train_loader):\n", - " B, L, N, C = data.shape\n", - " data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0]\n", - " target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0]\n", - " Xs_train.append(data)\n", - " Ys_train.append(target)\n", - "\n", - "for i, (data, target) in enumerate(valid_loader):\n", - " B, L, N, C = data.shape\n", - " data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0]\n", - " target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0]\n", - " Xs_valid.append(data)\n", - " Ys_valid.append(target)\n", - "\n", - "for i, (data, target) in enumerate(test_loader):\n", - " B, L, N, C = data.shape\n", - " data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0]\n", - " target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0]\n", - " Xs_test.append(data)\n", - " Ys_test.append(target)\n", - "\n", - "Xs_train = torch.cat(Xs_train, dim=0).numpy()\n", - "Ys_train = torch.cat(Ys_train, dim=0).numpy()\n", - "Xs_valid = torch.cat(Xs_valid, dim=0).numpy()\n", - "Ys_valid = torch.cat(Ys_valid, dim=0).numpy()\n", - "Xs_test = torch.cat(Xs_test, dim=0).numpy()\n", - "Ys_test = torch.cat(Ys_test, dim=0).numpy()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Direct Multi-step Forecasting (Train)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# direct forecasting\n", - "from sklearn.multioutput import MultiOutputRegressor\n", - "model = MultiOutputRegressor(lgb.LGBMRegressor(), n_jobs = -1)\n", - "model.fit(Xs_train, Ys_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Direct Multi-step Forecasting (Test)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# inference\n", - "preds_test = model.predict(Xs_test)\n", - "# rescale\n", - "scaler = load_pkl(PROJECT_DIR + \"/{0}/scaler_in_{1}_out_{2}_rescale_{3}.pkl\".format(train_data_dir, input_len, output_len, rescale))\n", - "preds_test = torch.Tensor(preds_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1)\n", - "Ys_test = torch.Tensor(Ys_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1)\n", - "prediction = SCALER_REGISTRY.get(scaler[\"func\"])(preds_test, **scaler[\"args\"])\n", - "real_value = SCALER_REGISTRY.get(scaler[\"func\"])(Ys_test, **scaler[\"args\"])\n", - "# print results\n", - "print(\"MAE: \", masked_mae(prediction, real_value, null_val).item())\n", - "print(\"RMSE: \", masked_rmse(prediction, real_value, null_val).item())\n", - "print(\"MAPE: {:.2f}%\".format(masked_mape(prediction, real_value, null_val) * 100))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "BasicTS", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.15" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/baselines/LightGBM/Weather.py b/baselines/LightGBM/Weather.py new file mode 100644 index 00000000..a7931ec2 --- /dev/null +++ b/baselines/LightGBM/Weather.py @@ -0,0 +1,21 @@ +import os +import sys +sys.path.append(os.path.abspath(__file__ + "/..")) + +# from evaluate_ar import evaluate +from evaluate import evaluate + +import numpy as np + +# construct configs +dataset_name = "Weather" +input_len = 336 +output_len = 336 +gpu_num = 1 +null_val = np.nan +train_data_dir = "datasets/" + dataset_name +rescale = True +batch_size = 128 # only used for collecting data +project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +# print(evaluate(project_dir, train_data_dir, input_len, output_len, rescale, null_val, batch_size, patch_len=1)) +print(evaluate(project_dir, train_data_dir, input_len, output_len, rescale, null_val, batch_size)) diff --git a/baselines/LightGBM/evaluate.py b/baselines/LightGBM/evaluate.py new file mode 100644 index 00000000..73cdc87c --- /dev/null +++ b/baselines/LightGBM/evaluate.py @@ -0,0 +1,80 @@ +import torch +import lightgbm as lgb +import os +import sys +sys.path.append("/workspace/S22/BasicTS") +from basicts.utils import load_pkl +from basicts.data import TimeSeriesForecastingDataset +from basicts.metrics import masked_mae, masked_rmse, masked_mape, masked_wape +from basicts.data import SCALER_REGISTRY + + +def evaluate(project_dir, train_data_dir, input_len, output_len, rescale, null_val, batch_size): + + # construct dataset + data_file_path = project_dir + "/{0}/data_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + index_file_path = project_dir + "/{0}/index_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + + train_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode="train") + train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True) + + valid_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode="valid") + valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False) + + test_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode="test") + test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False) + + # training & validation + Xs_train = [] + Ys_train = [] + Xs_valid = [] + Ys_valid = [] + Xs_test = [] + Ys_test = [] + + for i, (target, data) in enumerate(train_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_train.append(data) + Ys_train.append(target) + + for i, (target, data) in enumerate(valid_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_valid.append(data) + Ys_valid.append(target) + + for i, (target, data) in enumerate(test_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_test.append(data) + Ys_test.append(target) + + Xs_train = torch.cat(Xs_train, dim=0).numpy() + Ys_train = torch.cat(Ys_train, dim=0).numpy() + Xs_valid = torch.cat(Xs_valid, dim=0).numpy() + Ys_valid = torch.cat(Ys_valid, dim=0).numpy() + Xs_test = torch.cat(Xs_test, dim=0).numpy() + Ys_test = torch.cat(Ys_test, dim=0).numpy() + + # direct forecasting + from sklearn.multioutput import MultiOutputRegressor + model = MultiOutputRegressor(lgb.LGBMRegressor(), n_jobs = -1) + model.fit(Xs_train, Ys_train) + # inference + preds_test = model.predict(Xs_test) + print(preds_test.shape) + # rescale + scaler = load_pkl(project_dir + "/{0}/scaler_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale)) + preds_test = torch.Tensor(preds_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1) + Ys_test = torch.Tensor(Ys_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1) + prediction = SCALER_REGISTRY.get(scaler["func"])(preds_test, **scaler["args"]) + real_value = SCALER_REGISTRY.get(scaler["func"])(Ys_test, **scaler["args"]) + # print results + print("MAE: ", masked_mae(prediction, real_value, null_val).item()) + print("RMSE: ", masked_rmse(prediction, real_value, null_val).item()) + print("MAPE: {:.2f}%".format(masked_mape(prediction, real_value, null_val) * 100)) + print("WAPE: {:.2f}%".format(masked_wape(prediction, real_value, null_val) * 100)) diff --git a/baselines/LightGBM/evaluate_ar.py b/baselines/LightGBM/evaluate_ar.py new file mode 100644 index 00000000..8042a847 --- /dev/null +++ b/baselines/LightGBM/evaluate_ar.py @@ -0,0 +1,96 @@ +import torch +import lightgbm as lgb +import os +import sys +sys.path.append("/workspace/S22/BasicTS") +import numpy as np +from tqdm import tqdm +from basicts.utils import load_pkl +from basicts.data import TimeSeriesForecastingDataset +from basicts.metrics import masked_mae, masked_rmse, masked_mape, masked_wape +from basicts.data import SCALER_REGISTRY + + +def evaluate(project_dir, train_data_dir, input_len, output_len, rescale, null_val, batch_size, patch_len, down_sampling=1): + assert output_len % patch_len == 0 + num_steps = output_len // patch_len + # construct dataset + data_file_path = project_dir + "/{0}/data_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + index_file_path = project_dir + "/{0}/index_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + + train_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode="train") + train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True) + + valid_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode="valid") + valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False) + + test_set = TimeSeriesForecastingDataset(data_file_path, index_file_path, mode="test") + test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False) + + # training & validation + Xs_train = [] + Ys_train = [] + Xs_valid = [] + Ys_valid = [] + Xs_test = [] + Ys_test = [] + + for i, (target, data) in enumerate(train_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + B, L, N, C = target.shape + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_train.append(data) + Ys_train.append(target) + + Xs_train = torch.cat(Xs_train, dim=0).numpy()[::down_sampling, :] + Ys_train = torch.cat(Ys_train, dim=0).numpy()[::down_sampling, :][:, :patch_len] + print("Xs_train: ", Xs_train.shape) + + # direct forecasting + from sklearn.multioutput import MultiOutputRegressor + model = MultiOutputRegressor(lgb.LGBMRegressor(), n_jobs = -1) + model.fit(Xs_train, Ys_train) + + import pickle + # sange model + with open("model.pkl", "wb") as f: + pickle.dump(model, f) + + for i, (target, data) in enumerate(test_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + B, L, N, C = target.shape + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_test.append(data) + Ys_test.append(target) + + Xs_test = torch.cat(Xs_test, dim=0).numpy() + Ys_test = torch.cat(Ys_test, dim=0).numpy() + print("Xs_test: ", Xs_test.shape) + + # inference + preds_test = [] + input_data = Xs_test + + for i in tqdm(range(num_steps)): + # Predict the next step + pred_step = model.predict(input_data) + preds_test.append(pred_step) + # Update input_data to include predicted step for next prediction + input_data = np.concatenate([input_data[:, patch_len:], pred_step[:, :]], axis=1) + # concat preds_test + # preds_test = np.vstack(preds_test).T + preds_test = np.concatenate(preds_test, axis=1) + + # rescale + scaler = load_pkl(project_dir + "/{0}/scaler_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale)) + preds_test = torch.Tensor(preds_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1) + Ys_test = torch.Tensor(Ys_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1) + prediction = SCALER_REGISTRY.get(scaler["func"])(preds_test, **scaler["args"]) + real_value = SCALER_REGISTRY.get(scaler["func"])(Ys_test, **scaler["args"]) + # print results + print("MAE: ", masked_mae(prediction, real_value, null_val).item()) + print("RMSE: ", masked_rmse(prediction, real_value, null_val).item()) + print("MAPE: {:.2f}%".format(masked_mape(prediction, real_value, null_val) * 100)) + print("WAPE: {:.2f}%".format(masked_wape(prediction, real_value, null_val) * 100)) diff --git a/baselines/LightGBM/evaluate_m4_ar.py b/baselines/LightGBM/evaluate_m4_ar.py new file mode 100644 index 00000000..03768745 --- /dev/null +++ b/baselines/LightGBM/evaluate_m4_ar.py @@ -0,0 +1,90 @@ +import torch +import lightgbm as lgb +import os +import sys +sys.path.append("/workspace/S22/BasicTS") +import numpy as np +from tqdm import tqdm +from basicts.utils import load_pkl +from basicts.data import M4ForecastingDataset +from basicts.metrics import masked_mae, masked_rmse, masked_mape, masked_wape +from basicts.data import SCALER_REGISTRY + + +def evaluate(project_dir, train_data_dir, input_len, output_len, rescale, null_val, batch_size, patch_len, down_sampling=1, seasonal_pattern=None): + assert output_len % patch_len == 0 + num_steps = output_len // patch_len + # construct dataset + data_file_path = project_dir + "/{0}/data_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + mask_file_path = project_dir + "/{0}/mask_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + index_file_path = project_dir + "/{0}/index_in_{1}_out_{2}_rescale_{3}.pkl".format(train_data_dir, input_len, output_len, rescale) + + train_set = M4ForecastingDataset(data_file_path, index_file_path, mask_file_path, mode="train") + train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True) + + test_set = M4ForecastingDataset(data_file_path, index_file_path, mask_file_path, mode="test") + test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False) + + # training & validation + Xs_train = [] + Ys_train = [] + Xs_test = [] + Ys_test = [] + + for i, (target, data, future_mask, history_mask) in enumerate(train_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + B, L, N, C = target.shape + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_train.append(data) + Ys_train.append(target) + + Xs_train = torch.cat(Xs_train, dim=0).numpy()[::down_sampling, :] + Ys_train = torch.cat(Ys_train, dim=0).numpy()[::down_sampling, :][:, :patch_len] + print("Xs_train: ", Xs_train.shape) + + # direct forecasting + from sklearn.multioutput import MultiOutputRegressor + model = MultiOutputRegressor(lgb.LGBMRegressor(), n_jobs = -1) + model.fit(Xs_train, Ys_train) + + for i, (target, data, future_mask, history_mask) in enumerate(test_loader): + B, L, N, C = data.shape + data = data.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + B, L, N, C = target.shape + target = target.transpose(1, 2).reshape(B*N, L, C)[:, :, 0] + Xs_test.append(data) + Ys_test.append(target) + + Xs_test = torch.cat(Xs_test, dim=0).numpy() + Ys_test = torch.cat(Ys_test, dim=0).numpy() + print("Xs_test: ", Xs_test.shape) + + # inference + preds_test = [] + input_data = Xs_test + + for i in tqdm(range(num_steps)): + # Predict the next step + pred_step = model.predict(input_data) + preds_test.append(pred_step) + # Update input_data to include predicted step for next prediction + input_data = np.concatenate([input_data[:, patch_len:], pred_step[:, :]], axis=1) + # concat preds_test + # preds_test = np.vstack(preds_test).T + preds_test = np.concatenate(preds_test, axis=1) + + # rescale + preds_test = torch.Tensor(preds_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1) + Ys_test = torch.Tensor(Ys_test).view(-1, N, output_len).transpose(1, 2).unsqueeze(-1) + prediction = preds_test + real_value = Ys_test + np.save("/workspace/S22/BasicTS/baselines/LightGBM/M4_{0}.npy".format(seasonal_pattern), prediction.unsqueeze(-1).unsqueeze(-1).numpy()) + + # print results + print("MAE: ", masked_mae(prediction, real_value, null_val).item()) + print("RMSE: ", masked_rmse(prediction, real_value, null_val).item()) + print("MAPE: {:.2f}%".format(masked_mape(prediction, real_value, null_val) * 100)) + print("WAPE: {:.2f}%".format(masked_wape(prediction, real_value, null_val) * 100)) + # save + \ No newline at end of file diff --git a/baselines/LightGBM/run.sh b/baselines/LightGBM/run.sh new file mode 100644 index 00000000..a0724000 --- /dev/null +++ b/baselines/LightGBM/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# python baselines/LightGBM/METR-LA.py +# python baselines/LightGBM/PEMS-BAY.py +# python baselines/LightGBM/PEMS03.py +# python baselines/LightGBM/PEMS04.py +# python baselines/LightGBM/PEMS07.py +# python baselines/LightGBM/PEMS08.py + +# python baselines/LightGBM/ETTh1.py +# python baselines/LightGBM/ETTm1.py +# python baselines/LightGBM/Weather.py +# python baselines/LightGBM/PEMS08_ltsf.py +# python baselines/LightGBM/PEMS04_ltsf.py + +python baselines/LightGBM/Electricity.py +python baselines/LightGBM/ExchangeRate.py diff --git a/baselines/NBeats_M4/M4.py b/baselines/NBeats_M4/M4.py index 907cfacb..f347957b 100644 --- a/baselines/NBeats_M4/M4.py +++ b/baselines/NBeats_M4/M4.py @@ -82,7 +82,7 @@ def get_cfg(seasonal_pattern): CFG.TRAIN.CLIP_GRAD_PARAM = { 'max_norm': 5.0 } - CFG.TRAIN.NUM_EPOCHS = 100 + CFG.TRAIN.NUM_EPOCHS = 52 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( "checkpoints", "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) @@ -100,7 +100,7 @@ def get_cfg(seasonal_pattern): # ================= test ================= # CFG.TEST = EasyDict() - CFG.TEST.INTERVAL = CFG.TRAIN.NUM_EPOCHS + CFG.TEST.INTERVAL = 52 # test data CFG.TEST.DATA = EasyDict() # read data diff --git a/baselines/PatchTST/M4.py b/baselines/PatchTST/M4.py new file mode 100644 index 00000000..edf115a9 --- /dev/null +++ b/baselines/PatchTST/M4.py @@ -0,0 +1,119 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import M4ForecastingDataset +from basicts.runners import M4ForecastingRunner + +from .arch import PatchTST + +def get_cfg(seasonal_pattern): + assert seasonal_pattern in ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"] + prediction_len = {"Yearly": 6, "Quarterly": 8, "Monthly": 18, "Weekly": 13, "Daily": 14, "Hourly": 48}[seasonal_pattern] + history_size = 2 + history_len = history_size * prediction_len + + CFG = EasyDict() + + # ================= general ================= # + CFG.DESCRIPTION = "Multi-layer perceptron model configuration " + CFG.RUNNER = M4ForecastingRunner + CFG.DATASET_CLS = M4ForecastingDataset + CFG.DATASET_NAME = "M4_" + seasonal_pattern + CFG.DATASET_INPUT_LEN = history_len + CFG.DATASET_OUTPUT_LEN = prediction_len + CFG.GPU_NUM = 1 + + # ================= environment ================= # + CFG.ENV = EasyDict() + CFG.ENV.SEED = 1 + CFG.ENV.CUDNN = EasyDict() + CFG.ENV.CUDNN.ENABLED = True + + # ================= model ================= # + CFG.MODEL = EasyDict() + CFG.MODEL.NAME = "PatchTST" + CFG.MODEL.ARCH = PatchTST + CFG.MODEL.PARAM = { + "enc_in": 1, # num nodes + "seq_len": CFG.DATASET_INPUT_LEN, # input sequence length + "pred_len": CFG.DATASET_OUTPUT_LEN, # prediction sequence length + "e_layers": 3, # num of encoder layers + "n_heads": 4, + "d_model": 16, + "d_ff": 128, + "dropout": 0.3, + "fc_dropout": 0.3, + "head_dropout": 0.0, + "patch_len": 2, + "stride": 1, + "individual": 0, # individual head; True 1 False 0 + "padding_patch": "end", # None: None; end: padding on the end + "revin": 1, # RevIN; True 1 False 0 + "affine": 0, # RevIN-affine; True 1 False 0 + "subtract_last": 0, # 0: subtract mean; 1: subtract last + "decomposition": 0, # decomposition; True 1 False 0 + "kernel_size": 2, # decomposition-kernel + } + CFG.MODEL.FORWARD_FEATURES = [0] + CFG.MODEL.TARGET_FEATURES = [0] + + # ================= optim ================= # + CFG.TRAIN = EasyDict() + CFG.TRAIN.LOSS = masked_mae + CFG.TRAIN.OPTIM = EasyDict() + CFG.TRAIN.OPTIM.TYPE = "Adam" + CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, + } + CFG.TRAIN.LR_SCHEDULER = EasyDict() + CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" + CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 + } + + # ================= train ================= # + CFG.TRAIN.CLIP_GRAD_PARAM = { + 'max_norm': 5.0 + } + CFG.TRAIN.NUM_EPOCHS = 100 + CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + ) + # train data + CFG.TRAIN.DATA = EasyDict() + # read data + CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME + # dataloader args, optional + CFG.TRAIN.DATA.BATCH_SIZE = 64 + CFG.TRAIN.DATA.PREFETCH = False + CFG.TRAIN.DATA.SHUFFLE = True + CFG.TRAIN.DATA.NUM_WORKERS = 2 + CFG.TRAIN.DATA.PIN_MEMORY = False + + # ================= test ================= # + CFG.TEST = EasyDict() + CFG.TEST.INTERVAL = 5 + # test data + CFG.TEST.DATA = EasyDict() + # read data + CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME + # dataloader args, optional + CFG.TEST.DATA.BATCH_SIZE = 64 + CFG.TEST.DATA.PREFETCH = False + CFG.TEST.DATA.SHUFFLE = False + CFG.TEST.DATA.NUM_WORKERS = 2 + CFG.TEST.DATA.PIN_MEMORY = False + + # ================= evaluate ================= # + CFG.EVAL = EasyDict() + CFG.EVAL.HORIZONS = [] + CFG.EVAL.SAVE_PATH = os.path.abspath(__file__ + "/..") + + return CFG diff --git a/baselines/STID_M4/M4.py b/baselines/STID_M4/M4.py index d7a04e5e..d845540d 100644 --- a/baselines/STID_M4/M4.py +++ b/baselines/STID_M4/M4.py @@ -77,7 +77,7 @@ def get_cfg(seasonal_pattern): CFG.TRAIN.CLIP_GRAD_PARAM = { 'max_norm': 5.0 } - CFG.TRAIN.NUM_EPOCHS = 100 + CFG.TRAIN.NUM_EPOCHS = 99 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( "checkpoints", "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)])