-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexample_ParticleTransformer.py
65 lines (51 loc) · 1.85 KB
/
example_ParticleTransformer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import torch
import sys
sys.path.insert(1, '/home/jovyan/Transformer_MET')
from loss import *
from transformer import ParticleTransformer
from weaver.utils.logger import _logger
'''
Link to the full model implementation:
https://github.com/hqucms/weaver-core/blob/main/weaver/nn/model/ParticleTransformer.py
'''
class ParticleTransformerWrapper(torch.nn.Module):
def __init__(self, **kwargs) -> None:
super().__init__()
self.mod = ParticleTransformer(**kwargs)
@torch.jit.ignore
def no_weight_decay(self):
return {'mod.cls_token', }
def forward(self, points, features, lorentz_vectors, mask):
return self.mod(features, v=lorentz_vectors, mask=mask)
def get_model(data_config, **kwargs):
cfg = dict(
input_dim=len(data_config.input_dicts['pf_features']),
num_classes=len(data_config.label_value),
# network configurations
pair_input_dim=4,
use_pre_activation_pair=False,
embed_dims=[128, 512, 100],
pair_embed_dims=[64, 64, 64],
num_heads=5,
num_layers=8,
num_cls_layers=2,
block_params=None,
cls_block_params={'dropout': 0, 'attn_dropout': 0, 'activation_dropout': 0},
fc_params=[],
activation='gelu',
# misc
trim=True,
for_inference=False,
)
cfg.update(**kwargs)
_logger.info('Model config: %s' % str(cfg))
model = ParticleTransformerWrapper(**cfg)
model_info = {
'input_names': list(data_config.input_names),
'input_shapes': {k: ((1,) + s[1:]) for k, s in data_config.input_shapes.items()},
'output_names': ['softmax'],
'dynamic_axes': {**{k: {0: 'N', 2: 'n_' + k.split('_')[0]} for k in data_config.input_names}, **{'softmax': {0: 'N'}}},
}
return model, model_info
def get_loss(data_config, **kwargs):
return custom_loss