From 770eb8e24a080b7d86fa7443e43879238875b8d9 Mon Sep 17 00:00:00 2001 From: Yixiao Fang <36138628+fangyixiao18@users.noreply.github.com> Date: Wed, 17 May 2023 17:32:10 +0800 Subject: [PATCH] [Fix] Fix ddp bugs caused by `out_type`. (#1570) * set out_type to be 'raw' * update test --- mmpretrain/models/selfsup/beit.py | 2 +- mmpretrain/models/selfsup/cae.py | 2 +- mmpretrain/models/selfsup/mae.py | 2 +- mmpretrain/models/selfsup/maskfeat.py | 2 +- tests/test_models/test_selfsup/test_beit.py | 2 +- tests/test_models/test_selfsup/test_cae.py | 2 +- tests/test_models/test_selfsup/test_mae.py | 2 +- tests/test_models/test_selfsup/test_maskfeat.py | 2 +- tests/test_models/test_selfsup/test_milan.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mmpretrain/models/selfsup/beit.py b/mmpretrain/models/selfsup/beit.py index 13b39bdf7ff..c301f7d5cae 100644 --- a/mmpretrain/models/selfsup/beit.py +++ b/mmpretrain/models/selfsup/beit.py @@ -182,7 +182,7 @@ def __init__(self, drop_path_rate: float = 0, norm_cfg: dict = dict(type='LN', eps=1e-6), final_norm: bool = True, - out_type: str = 'avg_featmap', + out_type: str = 'raw', frozen_stages: int = -1, use_abs_pos_emb: bool = False, use_rel_pos_bias: bool = False, diff --git a/mmpretrain/models/selfsup/cae.py b/mmpretrain/models/selfsup/cae.py index 2c7cfeae05a..67ac09188e9 100644 --- a/mmpretrain/models/selfsup/cae.py +++ b/mmpretrain/models/selfsup/cae.py @@ -251,7 +251,7 @@ def __init__( bias: bool = 'qv_bias', norm_cfg: dict = dict(type='LN', eps=1e-6), final_norm: bool = True, - out_type: str = 'avg_featmap', + out_type: str = 'raw', frozen_stages: int = -1, use_abs_pos_emb: bool = True, use_rel_pos_bias: bool = False, diff --git a/mmpretrain/models/selfsup/mae.py b/mmpretrain/models/selfsup/mae.py index 178acff4527..d631860711c 100644 --- a/mmpretrain/models/selfsup/mae.py +++ b/mmpretrain/models/selfsup/mae.py @@ -64,7 +64,7 @@ def __init__(self, drop_path_rate: float = 0, norm_cfg: dict = dict(type='LN', eps=1e-6), final_norm: bool = True, - out_type: str = 'avg_featmap', + out_type: str = 'raw', interpolate_mode: str = 'bicubic', patch_cfg: dict = dict(), layer_cfgs: dict = dict(), diff --git a/mmpretrain/models/selfsup/maskfeat.py b/mmpretrain/models/selfsup/maskfeat.py index c765051c4e2..fd9f0b296c4 100644 --- a/mmpretrain/models/selfsup/maskfeat.py +++ b/mmpretrain/models/selfsup/maskfeat.py @@ -207,7 +207,7 @@ def __init__(self, drop_path_rate: float = 0, norm_cfg: dict = dict(type='LN', eps=1e-6), final_norm: bool = True, - out_type: str = 'avg_featmap', + out_type: str = 'raw', interpolate_mode: str = 'bicubic', patch_cfg: dict = dict(), layer_cfgs: dict = dict(), diff --git a/tests/test_models/test_selfsup/test_beit.py b/tests/test_models/test_selfsup/test_beit.py index 4066a78eac7..7fbd5a7019f 100644 --- a/tests/test_models/test_selfsup/test_beit.py +++ b/tests/test_models/test_selfsup/test_beit.py @@ -35,7 +35,7 @@ def test_beit_pretrain_vit(self): # test without mask fake_outputs = beit_backbone(fake_inputs, None) - assert fake_outputs[0].shape == torch.Size([2, 768]) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) @pytest.mark.skipif( platform.system() == 'Windows', reason='Windows mem limit') diff --git a/tests/test_models/test_selfsup/test_cae.py b/tests/test_models/test_selfsup/test_cae.py index fb5f5e59845..3c9127d403c 100644 --- a/tests/test_models/test_selfsup/test_cae.py +++ b/tests/test_models/test_selfsup/test_cae.py @@ -25,7 +25,7 @@ def test_cae_vit(): # test without mask fake_outputs = cae_backbone(fake_inputs, None) - assert fake_outputs[0].shape == torch.Size([1, 192]) + assert fake_outputs[0].shape == torch.Size([1, 197, 192]) @pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') diff --git a/tests/test_models/test_selfsup/test_mae.py b/tests/test_models/test_selfsup/test_mae.py index 8201d5f3c0f..48fb88c60cb 100644 --- a/tests/test_models/test_selfsup/test_mae.py +++ b/tests/test_models/test_selfsup/test_mae.py @@ -21,7 +21,7 @@ def test_mae_vit(): # test without mask fake_outputs = mae_backbone(fake_inputs, None) - assert fake_outputs[0].shape == torch.Size([2, 768]) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) @pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') diff --git a/tests/test_models/test_selfsup/test_maskfeat.py b/tests/test_models/test_selfsup/test_maskfeat.py index 5feaa2a394f..75909c1fd77 100644 --- a/tests/test_models/test_selfsup/test_maskfeat.py +++ b/tests/test_models/test_selfsup/test_maskfeat.py @@ -22,7 +22,7 @@ def test_maskfeat_vit(): # test without mask fake_outputs = maskfeat_backbone(fake_inputs, None) - assert fake_outputs[0].shape == torch.Size([2, 768]) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) @pytest.mark.skipif( diff --git a/tests/test_models/test_selfsup/test_milan.py b/tests/test_models/test_selfsup/test_milan.py index 12ad9aee907..f45f766dc33 100644 --- a/tests/test_models/test_selfsup/test_milan.py +++ b/tests/test_models/test_selfsup/test_milan.py @@ -24,7 +24,7 @@ def test_milan_vit(): # test without mask fake_outputs = milan_backbone(fake_inputs, None) - assert fake_outputs[0].shape == torch.Size([2, 768]) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) @pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')