Skip to content

Commit

Permalink
Add GRU/LSTM/Mamba/Transformer to the model list.
Browse files Browse the repository at this point in the history
  • Loading branch information
zhengbi-yong committed Oct 24, 2024
1 parent 83b7940 commit 1b77fe9
Show file tree
Hide file tree
Showing 16 changed files with 949 additions and 37 deletions.
16 changes: 13 additions & 3 deletions COMMANDS.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,22 @@

## 训练

### WINDOWS+GPU

```bash
python src/train.py experiment=PPG_FieldStudy_Base callbacks=PPG_FieldStudy logger=tensorboard

python src/train.py experiment=PPG_FieldStudy_Base callbacks=PPG_FieldStudy logger=tensorboard trainer=gpu
python src/train.py experiment=PPG_FieldStudy_Base logger=tensorboard callbacks=PPG_FieldStudy trainer=gpu debug=fdr

python src/train.py experiment=PPG_FieldStudy_Base logger=tensorboard callbacks=PPG_FieldStudy trainer=gpu debug=fdr
```

### WINDOWS+CPU

```powershell
python src/train.py experiment=PPG_FieldStudy_Base callbacks=PPG_FieldStudy logger=tensorboard trainer.accelerator=cpu
python src/train.py experiment=PPG_FieldStudy_LSTM callbacks=PPG_FieldStudy logger=tensorboard trainer.accelerator=cpu
```

## 实验结果查看
Expand All @@ -21,5 +33,3 @@ tensorboard --logdir=./logs/train/runs/
```bash
python src/eval.py ckpt_path="D:\lightning-hydra-template\logs\train\runs\2024-10-06_11-35-20\checkpoints\epoch_005.ckpt"
```


6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,9 @@ Related models:
3. Transformer
4. Mamba
5. KAN

## Datasets

### PPG_FieldStudy

https://www.kaggle.com/datasets/dishantvishwakarma/ppg-dataset-shared/data
46 changes: 46 additions & 0 deletions configs/experiment/PPG_FieldStudy_GRU.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: PPG_FieldStudy
- override /model: PPGGRU
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["PPG_FieldStudy", "PPGGRU"]

seed: 12345

trainer:
min_epochs: 10
max_epochs: 50
gradient_clip_val: 0.5
accelerator: "gpu"
devices: 1
callbacks: [progress_bar]
enable_checkpointing: True

model:
input_dim: 14
hidden_dim: 128
num_layers: 2
bidirectional: True
dropout: 0.3
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1

data:
batch_size: 64

logger:
wandb:
tags: ${tags}
group: "PPG_FieldStudy"
aim:
experiment: "PPG_FieldStudy"
46 changes: 46 additions & 0 deletions configs/experiment/PPG_FieldStudy_LSTM.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: PPG_FieldStudy
- override /model: PPGLSTM
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["PPG_FieldStudy", "PPGLSTM"]

seed: 12345

trainer:
min_epochs: 10
max_epochs: 50
gradient_clip_val: 0.5
accelerator: "gpu"
devices: 1
callbacks: [progress_bar]
enable_checkpointing: True

model:
input_dim: 14
hidden_dim: 128
num_layers: 2
bidirectional: True
dropout: 0.3
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1

data:
batch_size: 64

logger:
wandb:
tags: ${tags}
group: "PPG_FieldStudy"
aim:
experiment: "PPG_FieldStudy"
47 changes: 47 additions & 0 deletions configs/experiment/PPG_FieldStudy_Mamba.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: PPG_FieldStudy
- override /model: PPGMamba
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["PPG_FieldStudy", "PPGMamba"]

seed: 12345

trainer:
min_epochs: 10
max_epochs: 50
gradient_clip_val: 0.5
accelerator: "gpu"
devices: 1
callbacks: [progress_bar]
enable_checkpointing: True

model:
input_dim: 14
d_model: 128
d_state: 64
d_conv: 4
expand: 2
dropout: 0.3
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1

data:
batch_size: 64

logger:
wandb:
tags: ${tags}
group: "PPG_FieldStudy"
aim:
experiment: "PPG_FieldStudy"
47 changes: 47 additions & 0 deletions configs/experiment/PPG_FieldStudy_Transformer.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: PPG_FieldStudy
- override /model: PPGTransformer
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["PPG_FieldStudy", "PPGTransformer"]

seed: 12345

trainer:
min_epochs: 10
max_epochs: 50
gradient_clip_val: 0.5
accelerator: "gpu"
devices: 1
callbacks: [progress_bar]
enable_checkpointing: True

model:
input_dim: 14
d_model: 128
nhead: 8
num_encoder_layers: 4
dim_feedforward: 256
dropout: 0.1
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1

data:
batch_size: 64

logger:
wandb:
tags: ${tags}
group: "PPG_FieldStudy"
aim:
experiment: "PPG_FieldStudy"
10 changes: 10 additions & 0 deletions configs/model/PPGGRU.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
_target_: src.models.PPGGRUModule.PPGGRUModule

input_dim: 14
hidden_dim: 128
num_layers: 2
bidirectional: True
dropout: 0.3
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1
10 changes: 10 additions & 0 deletions configs/model/PPGLSTM.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
_target_: src.models.PPGLSTMModule.PPGLSTMModule

input_dim: 14
hidden_dim: 128
num_layers: 2
bidirectional: True
dropout: 0.3
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1
11 changes: 11 additions & 0 deletions configs/model/PPGMamba.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
_target_: src.models.PPGMambaModule.PPGMambaModule

input_dim: 14
d_model: 128
d_state: 64
d_conv: 4
expand: 2
dropout: 0.3
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1
11 changes: 11 additions & 0 deletions configs/model/PPGTransformer.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
_target_: src.models.PPGTransformerModule.PPGTransformerModule

input_dim: 14
d_model: 128
nhead: 8
num_encoder_layers: 4
dim_feedforward: 256
dropout: 0.1
lr: 1e-3
scheduler_step_size: 10
scheduler_gamma: 0.1
18 changes: 9 additions & 9 deletions src/data/PPGFieldStudyDatamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ def __init__(

# Validate labels length
assert (
len(labels) == self.num_windows
), f"Number of labels ({len(labels)}) does not match number of windows ({self.num_windows})"
len(labels) >= self.num_windows
), f"Number of labels ({len(labels)}) is less than number of windows ({self.num_windows})."

# Normalize signals using StandardScaler
self.scalers = {}
Expand All @@ -61,7 +61,7 @@ def __init__(
self.scalers[location][sensor] = scaler

self.data = data
self.labels = labels
self.labels = labels[:self.num_windows] # Ensure labels match the number of windows

def __len__(self):
return self.num_windows
Expand Down Expand Up @@ -93,21 +93,21 @@ def __getitem__(self, idx):
# Concatenate all sensor data along the feature dimension
chest_features = np.concatenate(
[features["chest"][sensor] for sensor in features["chest"]], axis=1
) # Shape: [256, 8]
) # Shape: [window_size, 8]
wrist_features = np.concatenate(
[features["wrist"][sensor] for sensor in features["wrist"]], axis=1
) # Shape: [256, 6]
) # Shape: [window_size, 6]
combined_features = np.concatenate(
[chest_features, wrist_features], axis=1
) # Shape: [256, 14]
) # Shape: [window_size, 14]

if self.transform:
combined_features = self.transform(combined_features)

# Aggregate window into single vector by computing the mean across the window_size dimension
aggregated_features = combined_features.mean(axis=0) # Shape: [14]
# 保留整个窗口的特征,不进行汇聚
aggregated_features = combined_features # Shape: [window_size, 14]

# Ensure labels are aligned with windows
# 获取对应的标签
label = self.labels[idx]

return torch.tensor(aggregated_features, dtype=torch.float32), torch.tensor(
Expand Down
Loading

0 comments on commit 1b77fe9

Please sign in to comment.