diff --git a/.dev_scripts/generate_readme.py b/.dev_scripts/generate_readme.py index e80d691a19c..27a14e407c2 100644 --- a/.dev_scripts/generate_readme.py +++ b/.dev_scripts/generate_readme.py @@ -301,7 +301,7 @@ def generate_model_table(models, if any('Converted From' in model.data for model in models): table_string += ( f"\n*Models with \* are converted from the [official repo]({converted_from['Code']}). " - "The config files of these models are only for inference. We haven't reprodcue the training results.*\n" + "The config files of these models are only for inference. We haven't reproduce the training results.*\n" ) return table_string diff --git a/configs/beit/README.md b/configs/beit/README.md index 8116bd78e00..404e6524a4d 100644 --- a/configs/beit/README.md +++ b/configs/beit/README.md @@ -74,7 +74,7 @@ python tools/test.py configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1 | `beit-base-p16_beit-pre_8xb128-coslr-100e_in1k` | [BEIT](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221128-ab79e626.pth) | 86.53 | 17.58 | 83.10 | N/A | [config](benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221128-0ca393e9.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221128-0ca393e9.json) | | `beit-base-p16_beit-in21k-pre_3rdparty_in1k`\* | BEIT ImageNet-21k | 86.53 | 17.58 | 85.28 | 97.59 | [config](benchmarks/beit-base-p16_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/beit/beit-base_3rdparty_in1k_20221114-c0a4df23.pth) | -*Models with * are converted from the [official repo](https://github.com/microsoft/unilm/tree/master/beit). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/microsoft/unilm/tree/master/beit). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/beitv2/README.md b/configs/beitv2/README.md index 3a7cb4f79ff..5447e2d3a36 100644 --- a/configs/beitv2/README.md +++ b/configs/beitv2/README.md @@ -74,7 +74,7 @@ python tools/test.py configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_i | `beit-base-p16_beitv2-pre_8xb128-coslr-100e_in1k` | [BEITV2](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221212-a157be30.pth) | 86.53 | 17.58 | 85.00 | N/A | [config](benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221212-d1c0789e.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221212-d1c0789e.json) | | `beit-base-p16_beitv2-in21k-pre_3rdparty_in1k`\* | BEITV2 ImageNet-21k | 86.53 | 17.58 | 86.47 | 97.99 | [config](benchmarks/beit-base-p16_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/beit/beitv2-base_3rdparty_in1k_20221114-73e11905.pth) | -*Models with * are converted from the [official repo](https://github.com/microsoft/unilm/tree/master/beit2). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/microsoft/unilm/tree/master/beit2). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/blip/README.md b/configs/blip/README.md index d1ec3df0850..1a8dce392cb 100644 --- a/configs/blip/README.md +++ b/configs/blip/README.md @@ -112,7 +112,7 @@ python tools/test.py configs/blip/blip-base_8xb32_caption.py https://download.op | :-------------------------- | :--------: | :-------: | :---------------------------------: | :------------------------------------------------------------------------------------------------------------: | | `blip-base_3rdparty_nlvr`\* | 259.37 | 82.33 | [config](./blip-base_8xb32_nlvr.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_nlvr_20230427-3b14d33f.pth) | -*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reproduce the training results.* *Results with # denote zero-shot evaluation. The corresponding model hasn't been finetuned on that dataset.* diff --git a/configs/blip2/README.md b/configs/blip2/README.md index 8851ddf57b0..68ce679d704 100644 --- a/configs/blip2/README.md +++ b/configs/blip2/README.md @@ -58,7 +58,7 @@ python tools/test.py configs/blip2/blip2_8xb32_retrieval.py https://download.ope | :--------------------------- | :--------: | :------: | :----------------------------------: | :-------------------------------------------------------------------------------------------------------------: | | `blip2_3rdparty_retrieval`\* | 1173.19 | 85.40 | [config](./blip2_8xb32_retrieval.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth) | -*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/chinese_clip/README.md b/configs/chinese_clip/README.md index b7f027cc002..acb37e7a2ad 100644 --- a/configs/chinese_clip/README.md +++ b/configs/chinese_clip/README.md @@ -55,7 +55,7 @@ python tools/test.py configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100 | `cn-clip_vit-large-p14_zeroshot-cls_cifar100`\* | 406.00 | 74.80 | [config](cn-clip_vit-large-p14_zeroshot-cls_cifar100.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-large-p14_3rdparty_20230519-3f844503.pth) | | `cn-clip_vit-huge-p14_zeroshot-cls_cifar100`\* | 958.00 | 79.10 | [config](cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-huge-p14_3rdparty_20230519-e4f49b00.pth) | -*Models with * are converted from the [official repo](https://github.com/OFA-Sys/Chinese-CLIP). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/Chinese-CLIP). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/clip/README.md b/configs/clip/README.md index 116107b2be3..7a14be4d8e0 100644 --- a/configs/clip/README.md +++ b/configs/clip/README.md @@ -74,7 +74,7 @@ python tools/test.py configs/clip/vit-base-p32_pt-64xb64_in1k.py https://downloa | `vit-base-p16_clip-openai-in12k-pre_3rdparty_in1k-384px`\* | CLIP OPENAI ImageNet-12k | 86.57 | 49.37 | 86.87 | 98.05 | [config](vit-base-p16_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-in12k-pre_3rdparty_in1k-384px_20221220-8df86b74.pth) | | `vit-base-p16_clip-openai-pre_3rdparty_in1k-384px`\* | CLIP OPENAI | 86.57 | 49.37 | 86.25 | 97.90 | [config](vit-base-p16_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-pre_3rdparty_in1k-384px_20221220-eb012e87.pth) | -*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/conformer/README.md b/configs/conformer/README.md index 6b149a8abcf..04b5d4770b2 100644 --- a/configs/conformer/README.md +++ b/configs/conformer/README.md @@ -70,7 +70,7 @@ python tools/test.py configs/conformer/conformer-tiny-p16_8xb128_in1k.py https:/ | `conformer-small-p32_8xb128_in1k` | From scratch | 38.85 | 7.09 | 81.96 | 96.02 | [config](conformer-small-p32_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth) | | `conformer-base-p16_3rdparty_in1k`\* | From scratch | 83.29 | 22.89 | 83.82 | 96.59 | [config](conformer-base-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth) | -*Models with * are converted from the [official repo](https://github.com/pengzhiliang/Conformer/blob/main/models.py#L89). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/pengzhiliang/Conformer/blob/main/models.py#L89). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/convmixer/README.md b/configs/convmixer/README.md index 597c57814ca..a87d27ffb8e 100644 --- a/configs/convmixer/README.md +++ b/configs/convmixer/README.md @@ -63,7 +63,7 @@ python tools/test.py configs/convmixer/convmixer-768-32_10xb64_in1k.py https://d | `convmixer-1024-20_3rdparty_in1k`\* | From scratch | 24.38 | 5.55 | 76.94 | 93.36 | [config](convmixer-1024-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth) | | `convmixer-1536-20_3rdparty_in1k`\* | From scratch | 51.63 | 48.71 | 81.37 | 95.61 | [config](convmixer-1536-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth) | -*Models with * are converted from the [official repo](https://github.com/locuslab/convmixer). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/locuslab/convmixer). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/convnext/README.md b/configs/convnext/README.md index 79c09c49cea..2e6e14c2f2e 100644 --- a/configs/convnext/README.md +++ b/configs/convnext/README.md @@ -81,7 +81,7 @@ python tools/test.py configs/convnext/convnext-tiny_32xb128_in1k.py https://down | `convnext-large_3rdparty_in21k`\* | 197.77 | 34.37 | [config](convnext-large_64xb64_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth) | | `convnext-xlarge_3rdparty_in21k`\* | 350.20 | 60.93 | [config](convnext-xlarge_64xb64_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We haven't reproduce the training results.* ### Image Classification on ImageNet-1k @@ -109,7 +109,7 @@ python tools/test.py configs/convnext/convnext-tiny_32xb128_in1k.py https://down | `convnext-xlarge_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 350.20 | 60.93 | 86.97 | 98.20 | [config](convnext-xlarge_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth) | | `convnext-xlarge_in21k-pre-3rdparty_in1k-384px`\* | From scratch | 350.20 | 179.20 | 87.76 | 98.55 | [config](convnext-xlarge_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_in1k-384px_20221219-b161bc14.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/convnext_v2/README.md b/configs/convnext_v2/README.md index 614fb7fa530..e561387412a 100644 --- a/configs/convnext_v2/README.md +++ b/configs/convnext_v2/README.md @@ -68,7 +68,7 @@ python tools/test.py configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py https:/ | `convnext-v2-large_3rdparty-fcmae_in1k`\* | 197.96 | 34.40 | [config](convnext-v2-large_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_3rdparty-fcmae_in1k_20230104-bf38df92.pth) | | `convnext-v2-huge_3rdparty-fcmae_in1k`\* | 660.29 | 115.00 | [config](convnext-v2-huge_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_3rdparty-fcmae_in1k_20230104-fe43ae6c.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt-V2). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt-V2). The config files of these models are only for inference. We haven't reproduce the training results.* ### Image Classification on ImageNet-1k @@ -93,7 +93,7 @@ python tools/test.py configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py https:/ | `convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-384px`\* | FCMAE ImageNet-21k | 660.29 | 337.96 | 88.68 | 98.73 | [config](convnext-v2-huge_32xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-02a4eb35.pth) | | `convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-512px`\* | FCMAE ImageNet-21k | 660.29 | 600.81 | 88.86 | 98.74 | [config](convnext-v2-huge_32xb32_in1k-512px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-512px_20230104-ce32e63c.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt-V2). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt-V2). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/cspnet/README.md b/configs/cspnet/README.md index 41b314472ce..f3b145ba039 100644 --- a/configs/cspnet/README.md +++ b/configs/cspnet/README.md @@ -63,7 +63,7 @@ python tools/test.py configs/cspnet/cspdarknet50_8xb32_in1k.py https://download. | `cspresnet50_3rdparty_8xb32_in1k`\* | From scratch | 21.62 | 3.48 | 79.55 | 94.68 | [config](cspresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth) | | `cspresnext50_3rdparty_8xb32_in1k`\* | From scratch | 20.57 | 3.11 | 79.96 | 94.96 | [config](cspresnext50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth) | -*Models with * are converted from the [official repo](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/davit/README.md b/configs/davit/README.md index 7f4a47772c4..1be19d98e37 100644 --- a/configs/davit/README.md +++ b/configs/davit/README.md @@ -63,7 +63,7 @@ python tools/test.py configs/davit/davit-tiny_4xb256_in1k.py https://download.op | `davit-small_3rdparty_in1k`\* | From scratch | 49.75 | 8.80 | 83.61 | 96.75 | [config](davit-small_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/davit/davit-small_3rdparty_in1k_20221116-51a849a6.pth) | | `davit-base_3rdparty_in1k`\* | From scratch | 87.95 | 15.51 | 84.09 | 96.82 | [config](davit-base_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/davit/davit-base_3rdparty_in1k_20221116-19e0d956.pth) | -*Models with * are converted from the [official repo](https://github.com/dingmyu/davit/blob/main/mmdet/mmdet/models/backbones/davit.py#L355). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/dingmyu/davit/blob/main/mmdet/mmdet/models/backbones/davit.py#L355). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/deit/README.md b/configs/deit/README.md index 7b2f58a5383..ee434140a43 100644 --- a/configs/deit/README.md +++ b/configs/deit/README.md @@ -75,7 +75,7 @@ python tools/test.py configs/deit/deit-tiny_4xb256_in1k.py https://download.open | `deit-base_224px-pre_3rdparty_in1k-384px`\* | 224px | 86.86 | 55.54 | 83.04 | 96.31 | [config](deit-base_16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth) | | `deit-base-distilled_224px-pre_3rdparty_in1k-384px`\* | 224px | 87.63 | 55.65 | 85.55 | 97.35 | [config](deit-base-distilled_16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L168). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L168). The config files of these models are only for inference. We haven't reproduce the training results.* ```{warning} MMPretrain doesn't support training the distilled version DeiT. diff --git a/configs/deit3/README.md b/configs/deit3/README.md index 18f678e256f..18694b7eb9b 100644 --- a/configs/deit3/README.md +++ b/configs/deit3/README.md @@ -76,7 +76,7 @@ python tools/test.py configs/deit3/deit3-small-p16_64xb64_in1k.py https://downlo | `deit3-huge-p14_3rdparty_in1k`\* | From scratch | 632.13 | 167.40 | 85.21 | 97.36 | [config](deit3-huge-p14_64xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_3rdparty_in1k_20221009-e107bcb7.pth) | | `deit3-huge-p14_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 632.13 | 167.40 | 87.19 | 98.26 | [config](deit3-huge-p14_64xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_in21k-pre_3rdparty_in1k_20221009-19b8a535.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/densenet/README.md b/configs/densenet/README.md index 774ba574db5..fe40fdd99cf 100644 --- a/configs/densenet/README.md +++ b/configs/densenet/README.md @@ -64,7 +64,7 @@ python tools/test.py configs/densenet/densenet121_4xb256_in1k.py https://downloa | `densenet201_3rdparty_in1k`\* | From scratch | 20.01 | 4.37 | 77.32 | 93.64 | [config](densenet201_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth) | | `densenet161_3rdparty_in1k`\* | From scratch | 28.68 | 7.82 | 77.61 | 93.83 | [config](densenet161_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth) | -*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/dinov2/README.md b/configs/dinov2/README.md index cdd56f08cd3..aa79d6b43c6 100644 --- a/configs/dinov2/README.md +++ b/configs/dinov2/README.md @@ -44,7 +44,7 @@ print(type(feats)) | `vit-large-p14_dinov2-pre_3rdparty`\* | 304.00 | 507.00 | [config](vit-large-p14_dinov2-pre_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-large-p14_dinov2-pre_3rdparty_20230426-f3302d9e.pth) | | `vit-giant-p14_dinov2-pre_3rdparty`\* | 1136.00 | 1784.00 | [config](vit-giant-p14_dinov2-pre_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-giant-p14_dinov2-pre_3rdparty_20230426-2934a630.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/dinov2). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/dinov2). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/edgenext/README.md b/configs/edgenext/README.md index 2906a171443..1c9686f7d96 100644 --- a/configs/edgenext/README.md +++ b/configs/edgenext/README.md @@ -66,7 +66,7 @@ python tools/test.py configs/edgenext/edgenext-xxsmall_8xb256_in1k.py https://do | `edgenext-base_3rdparty_in1k`\* | From scratch | 18.51 | 3.81 | 82.48 | 96.20 | [config](edgenext-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty_in1k_20220801-9ade408b.pth) | | `edgenext-base_3rdparty-usi_in1k`\* | From scratch | 18.51 | 3.81 | 83.67 | 96.70 | [config](edgenext-base_8xb256-usi_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty-usi_in1k_20220801-909e8939.pth) | -*Models with * are converted from the [official repo](https://github.com/mmaaz60/EdgeNeXt). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/mmaaz60/EdgeNeXt). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/efficientformer/README.md b/configs/efficientformer/README.md index 4ef79225c44..537777efc0d 100644 --- a/configs/efficientformer/README.md +++ b/configs/efficientformer/README.md @@ -63,7 +63,7 @@ python tools/test.py configs/efficientformer/efficientformer-l1_8xb128_in1k.py h | `efficientformer-l3_3rdparty_8xb128_in1k`\* | From scratch | 31.41 | 3.74 | 82.45 | 96.18 | [config](efficientformer-l3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220915-466793d6.pth) | | `efficientformer-l7_3rdparty_8xb128_in1k`\* | From scratch | 82.23 | 10.16 | 83.40 | 96.60 | [config](efficientformer-l7_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220915-185e30af.pth) | -*Models with * are converted from the [official repo](https://github.com/snap-research/EfficientFormer). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/snap-research/EfficientFormer). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/efficientnet/README.md b/configs/efficientnet/README.md index e44174035dd..c7b7b76ab5d 100644 --- a/configs/efficientnet/README.md +++ b/configs/efficientnet/README.md @@ -106,7 +106,7 @@ python tools/test.py configs/efficientnet/efficientnet-b0_8xb32_in1k.py https:// | `efficientnet-l2_3rdparty-ra-noisystudent_in1k-800px`\* | From scratch | 480.31 | 174.20 | 88.33 | 98.65 | [config](efficientnet-l2_8xb8_in1k-800px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-noisystudent_in1k_20221103-be73be13.pth) | | `efficientnet-l2_3rdparty-ra-noisystudent_in1k-475px`\* | From scratch | 480.31 | 484.98 | 88.18 | 98.55 | [config](efficientnet-l2_8xb32_in1k-475px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-noisystudent_in1k-475px_20221103-5a0d8058.pth) | -*Models with * are converted from the [official repo](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/efficientnet_v2/README.md b/configs/efficientnet_v2/README.md index 4b8ccee4baa..965421823e7 100644 --- a/configs/efficientnet_v2/README.md +++ b/configs/efficientnet_v2/README.md @@ -64,7 +64,7 @@ python tools/test.py configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py htt | `efficientnetv2-l_3rdparty_in21k`\* | 145.22 | 13.11 | [config](efficientnetv2-l_8xb32_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_3rdparty_in21k_20221220-f28f91e1.pth) | | `efficientnetv2-xl_3rdparty_in21k`\* | 234.82 | 18.86 | [config](efficientnetv2-xl_8xb32_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-xl_3rdparty_in21k_20221220-b2c9329c.pth) | -*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py). The config files of these models are only for inference. We haven't reproduce the training results.* ### Image Classification on ImageNet-1k @@ -82,7 +82,7 @@ python tools/test.py configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py htt | `efficientnetv2-l_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 118.52 | 60.14 | 86.31 | 97.99 | [config](efficientnetv2-l_8xb32_in1k-480px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_in21k-pre-3rdparty_in1k_20221220-63df0efd.pth) | | `efficientnetv2-xl_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 208.12 | 98.34 | 86.39 | 97.83 | [config](efficientnetv2-xl_8xb32_in1k-512px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-xl_in21k-pre-3rdparty_in1k_20221220-583ac18b.pth) | -*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/eva/README.md b/configs/eva/README.md index 5d2820ff5fb..6e49c8abe8e 100644 --- a/configs/eva/README.md +++ b/configs/eva/README.md @@ -72,7 +72,7 @@ python tools/test.py configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k. | `beit-g-p14_3rdparty-eva_30m`\* | 1011.60 | 267.17 | [config](eva-g-p14_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_3rdparty_30m_20221213-3b7aca97.pth) | | `beit-g-p14_eva-30m-pre_3rdparty_in21k`\* | 1011.60 | 267.17 | [config](eva-g-p14_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth) | -*Models with * are converted from the [official repo](https://github.com/baaivision/EVA). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA). The config files of these models are only for inference. We haven't reproduce the training results.* ### Image Classification on ImageNet-1k @@ -87,7 +87,7 @@ python tools/test.py configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k. | `beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-336px`\* | [EVA 30M ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth) | 1013.01 | 620.64 | 89.61 | 98.93 | [config](eva-g-p14_8xb16_in1k-336px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-in21k-pre_3rdparty_in1k-336px_20221213-210f9071.pth) | | `beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-560px`\* | [EVA 30M ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth) | 1014.45 | 1906.76 | 89.71 | 98.96 | [config](eva-g-p14_8xb16_in1k-560px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-in21k-pre_3rdparty_in1k-560px_20221213-fa1c3652.pth) | -*Models with * are converted from the [official repo](https://github.com/baaivision/EVA). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/eva02/README.md b/configs/eva02/README.md index bf0cea780fd..bc8f64e76d1 100644 --- a/configs/eva02/README.md +++ b/configs/eva02/README.md @@ -85,7 +85,7 @@ python tools/test.py configs/eva02/eva02-tiny-p14_in1k.py /path/to/eva02-tiny-p1 | `vit-small-p14_eva02-in21k-pre_3rdparty_in1k-336px`\* | EVA02 ImageNet-21k | 22.13 | 15.48 | 85.78 | 97.60 | [config](./eva02-small-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-small-p14_in21k-pre_3rdparty_in1k-336px_20230505-9c5b0e85.pth) | | `vit-base-p14_eva02-in21k-pre_3rdparty_in1k-448px`\* | EVA02 ImageNet-21k | 87.13 | 107.11 | 88.29 | 98.53 | [config](./eva02-base-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_in21k-pre_3rdparty_in1k-448px_20230505-8ad211c5.pth) | -*Models with * are converted from the [official repo](https://github.com/baaivision/EVA/tree/master/EVA-02). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA/tree/master/EVA-02). The config files of these models are only for inference. We haven't reproduce the training results.* #### (*w* IN-21K intermediate fine-tuning) @@ -95,7 +95,7 @@ python tools/test.py configs/eva02/eva02-tiny-p14_in1k.py /path/to/eva02-tiny-p1 | `vit-large-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px`\* | EVA02 ImageNet-21k | 305.08 | 362.33 | 89.65 | 98.95 | [config](./eva02-large-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_in21k-pre_in21k-medft_3rdparty_in1k-448px_20230505-926d1599.pth) | | `vit-large-p14_eva02_m38m-pre_in21k-medft_3rdparty_in1k-448px`\* | EVA02 Merged-38M | 305.10 | 362.33 | 89.83 | 99.00 | [config](./eva02-large-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_m38m-pre_in21k-medft_3rdparty_in1k-448px_20230505-150dc5ed.pth) | -*Models with * are converted from the [official repo](https://github.com/baaivision/EVA/tree/master/EVA-02). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA/tree/master/EVA-02). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/flamingo/README.md b/configs/flamingo/README.md index 33816c23f95..60c6af0f50e 100644 --- a/configs/flamingo/README.md +++ b/configs/flamingo/README.md @@ -46,7 +46,7 @@ python tools/test.py configs/flamingo/flamingo_zeroshot_caption.py https://downl | :------------------------------------- | :--------: | :---: | :------------------------------------: | :-----------------------------------------------------------------------------------------------------------: | | `flamingo_3rdparty-zeroshot_caption`\* | 8.220 | 65.50 | [config](flamingo_zeroshot_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth) | -*Models with * are converted from the [openflamingo](https://github.com/mlfoundations/open_flamingo). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [openflamingo](https://github.com/mlfoundations/open_flamingo). The config files of these models are only for inference. We haven't reproduce the training results.* ### Visual Question Answering on VQAv2 @@ -54,7 +54,7 @@ python tools/test.py configs/flamingo/flamingo_zeroshot_caption.py https://downl | :--------------------------------- | :--------: | :------: | :--------------------------------: | :----------------------------------------------------------------------------------------------------------------: | | `flamingo_3rdparty-zeroshot_vqa`\* | 8.22 | 43.50 | [config](flamingo_zeroshot_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth) | -*Models with * are converted from the [openflamingo](https://github.com/mlfoundations/open_flamingo). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [openflamingo](https://github.com/mlfoundations/open_flamingo). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/hornet/README.md b/configs/hornet/README.md index 20367c18785..b4dbf05bd35 100644 --- a/configs/hornet/README.md +++ b/configs/hornet/README.md @@ -66,7 +66,7 @@ python tools/test.py configs/hornet/hornet-tiny_8xb128_in1k.py https://download. | `hornet-base_3rdparty_in1k`\* | From scratch | 87.26 | 15.58 | 84.24 | 96.94 | [config](hornet-base_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth) | | `hornet-base-gf_3rdparty_in1k`\* | From scratch | 88.42 | 15.42 | 84.32 | 96.95 | [config](hornet-base-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth) | -*Models with * are converted from the [official repo](https://github.com/raoyongming/HorNet). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/raoyongming/HorNet). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md index 4ada781039e..31725cf8a4e 100644 --- a/configs/hrnet/README.md +++ b/configs/hrnet/README.md @@ -69,7 +69,7 @@ python tools/test.py configs/hrnet/hrnet-w18_4xb32_in1k.py https://download.open | `hrnet-w18_3rdparty_8xb32-ssld_in1k`\* | From scratch | 21.30 | 4.33 | 81.06 | 95.70 | [config](hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth) | | `hrnet-w48_3rdparty_8xb32-ssld_in1k`\* | From scratch | 77.47 | 17.36 | 83.63 | 96.79 | [config](hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth) | -*Models with * are converted from the [official repo](https://github.com/HRNet/HRNet-Image-Classification). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/HRNet/HRNet-Image-Classification). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/inception_v3/README.md b/configs/inception_v3/README.md index 4fea38c959d..24fde38118d 100644 --- a/configs/inception_v3/README.md +++ b/configs/inception_v3/README.md @@ -61,7 +61,7 @@ python tools/test.py configs/inception_v3/inception-v3_8xb32_in1k.py https://dow | :----------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :-----------------------------------------------------------------------------: | | `inception-v3_3rdparty_8xb32_in1k`\* | From scratch | 23.83 | 5.75 | 77.57 | 93.58 | [config](inception-v3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/inception-v3/inception-v3_3rdparty_8xb32_in1k_20220615-dcd4d910.pth) | -*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py#L28). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py#L28). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/levit/README.md b/configs/levit/README.md index 9586ac2817d..234edb60618 100644 --- a/configs/levit/README.md +++ b/configs/levit/README.md @@ -65,7 +65,7 @@ python tools/test.py configs/levit/levit-128s_8xb256_in1k.py https://download.op | `levit-256_3rdparty_in1k`\* | From scratch | 18.38 | 1.14 | 81.59 | 95.46 | [config](levit-256_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-256_3rdparty_in1k_20230117-5ae2ce7d.pth) | | `levit-384_3rdparty_in1k`\* | From scratch | 38.36 | 2.37 | 82.59 | 95.95 | [config](levit-384_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-384_3rdparty_in1k_20230117-f3539cce.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/LeViT). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/LeViT). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/mlp_mixer/README.md b/configs/mlp_mixer/README.md index 3f10a5836c9..f0bb4ce0984 100644 --- a/configs/mlp_mixer/README.md +++ b/configs/mlp_mixer/README.md @@ -62,7 +62,7 @@ python tools/test.py configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py https:/ | `mlp-mixer-base-p16_3rdparty_64xb64_in1k`\* | From scratch | 59.88 | 12.61 | 76.68 | 92.25 | [config](mlp-mixer-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth) | | `mlp-mixer-large-p16_3rdparty_64xb64_in1k`\* | From scratch | 208.20 | 44.57 | 72.34 | 88.02 | [config](mlp-mixer-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth) | -*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/mobilenet_v3/README.md b/configs/mobilenet_v3/README.md index dcf7f4c4932..833de5b25aa 100644 --- a/configs/mobilenet_v3/README.md +++ b/configs/mobilenet_v3/README.md @@ -84,7 +84,7 @@ python tools/test.py configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py | `mobilenet-v3-large_8xb128_in1k` | From scratch | 5.48 | 0.23 | 73.49 | 91.31 | [config](mobilenet-v3-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-large_8xb128_in1k_20221114-0ed9ed9a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-large_8xb128_in1k_20221114-0ed9ed9a.json) | | `mobilenet-v3-large_3rdparty_in1k`\* | From scratch | 5.48 | 0.23 | 74.04 | 91.34 | [config](mobilenet-v3-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth) | -*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/mobilevit/README.md b/configs/mobilevit/README.md index 28f6c05088b..fa0960d123a 100644 --- a/configs/mobilevit/README.md +++ b/configs/mobilevit/README.md @@ -82,7 +82,7 @@ python tools/test.py configs/mobilevit/mobilevit-small_8xb128_in1k.py https://do | `mobilevit-xsmall_3rdparty_in1k`\* | From scratch | 2.32 | 1.05 | 74.75 | 92.32 | [config](mobilevit-xsmall_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-xsmall_3rdparty_in1k_20221018-be39a6e7.pth) | | `mobilevit-xxsmall_3rdparty_in1k`\* | From scratch | 1.27 | 0.42 | 69.02 | 88.91 | [config](mobilevit-xxsmall_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-xxsmall_3rdparty_in1k_20221018-77835605.pth) | -*Models with * are converted from the [official repo](https://github.com/apple/ml-cvnets). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/apple/ml-cvnets). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/mvit/README.md b/configs/mvit/README.md index 8428e14f029..1bf72e5e4cb 100644 --- a/configs/mvit/README.md +++ b/configs/mvit/README.md @@ -71,7 +71,7 @@ python tools/test.py configs/mvit/mvitv2-tiny_8xb256_in1k.py https://download.op | `mvitv2-base_3rdparty_in1k`\* | From scratch | 51.47 | 10.16 | 84.34 | 96.86 | [config](mvitv2-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth) | | `mvitv2-large_3rdparty_in1k`\* | From scratch | 217.99 | 43.87 | 85.25 | 97.14 | [config](mvitv2-large_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/mvit). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/mvit). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/ofa/README.md b/configs/ofa/README.md index 9c0edca783c..22e20f8bd85 100644 --- a/configs/ofa/README.md +++ b/configs/ofa/README.md @@ -46,7 +46,7 @@ python tools/test.py configs/ofa/ofa-base_finetuned_refcoco.py https://download. | :-------------------------------------- | :--------: | :----: | :----: | :-------------------------------------: | :--------------------------------------------------------------------------------------------------: | | `ofa-base_3rdparty-finetuned_caption`\* | 182.24 | 42.64 | 144.50 | [config](ofa-base_finetuned_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_coco-caption_20230418-de18914e.pth) | -*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reproduce the training results.* ### Visual Grounding on RefCOCO @@ -54,7 +54,7 @@ python tools/test.py configs/ofa/ofa-base_finetuned_refcoco.py https://download. | :-------------------------------------- | :--------: | :--------------: | :--------------: | :-------------------------------------: | :------------------------------------------------------------------------------: | | `ofa-base_3rdparty-finetuned_refcoco`\* | 182.24 | 90.49 | 83.63 | [config](ofa-base_finetuned_refcoco.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_refcoco_20230418-2797d3ab.pth) | -*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reproduce the training results.* ### Visual Question Answering on VQAv2 @@ -63,7 +63,7 @@ python tools/test.py configs/ofa/ofa-base_finetuned_refcoco.py https://download. | `ofa-base_3rdparty-finetuned_vqa`\* | 182.24 | 78.00 | [config](ofa-base_finetuned_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_coco-vqa_20230418-f38539a5.pth) | | `ofa-base_3rdparty-zeroshot_vqa`\* | 182.24 | 58.32 | [config](ofa-base_zeroshot_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_pretrain_20230418-dccfc07f.pth) | -*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/poolformer/README.md b/configs/poolformer/README.md index 75c9e74d0aa..2c4b249329e 100644 --- a/configs/poolformer/README.md +++ b/configs/poolformer/README.md @@ -65,7 +65,7 @@ python tools/test.py configs/poolformer/poolformer-s12_32xb128_in1k.py https://d | `poolformer-m36_3rdparty_32xb128_in1k`\* | From scratch | 56.17 | 8.96 | 82.14 | 95.71 | [config](poolformer-m36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth) | | `poolformer-m48_3rdparty_32xb128_in1k`\* | From scratch | 73.47 | 11.80 | 82.51 | 95.95 | [config](poolformer-m48_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth) | -*Models with * are converted from the [official repo](https://github.com/sail-sg/poolformer). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/sail-sg/poolformer). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/replknet/README.md b/configs/replknet/README.md index 49aaa77b6d3..3d312f24aa9 100644 --- a/configs/replknet/README.md +++ b/configs/replknet/README.md @@ -93,7 +93,7 @@ backbone.switch_to_deploy() | `replknet-31L_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 172.67 | 97.24 | 86.63 | 98.00 | [config](replknet-31L_32xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31L_in21k-pre_3rdparty_in1k-384px_20221118-dc3fc07c.pth) | | `replknet-XL_meg73m-pre_3rdparty_in1k-320px`\* | MEG73M | 335.44 | 129.57 | 87.57 | 98.39 | [config](replknet-XL_32xb64_in1k-320px.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-XL_meg73m-pre_3rdparty_in1k-320px_20221118-88259b1d.pth) | -*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/repmlp/README.md b/configs/repmlp/README.md index 73cb6123f9d..41dfa234bd0 100644 --- a/configs/repmlp/README.md +++ b/configs/repmlp/README.md @@ -89,7 +89,7 @@ backbone.switch_to_deploy() | `repmlp-base_3rdparty_8xb64_in1k`\* | From scratch | 68.24 | 6.71 | 80.41 | 95.14 | [config](repmlp-base_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth) | | `repmlp-base_3rdparty_8xb64_in1k-256px`\* | From scratch | 96.45 | 9.69 | 81.11 | 95.50 | [config](repmlp-base_8xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth) | -*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L278). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L278). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/repvgg/README.md b/configs/repvgg/README.md index c4b73c693aa..9a47f9d1e0a 100644 --- a/configs/repvgg/README.md +++ b/configs/repvgg/README.md @@ -127,7 +127,7 @@ backbone.switch_to_deploy() | `repvgg-B3g4_8xb32_in1k` | From scratch | 75.63 | 16.06 | 80.26 | 95.15 | [config](repvgg-B3g4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_8xb32_in1k_20221213-e01cb280.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_8xb32_in1k_20221213-e01cb280.log) | | `repvgg-D2se_3rdparty_in1k`\* | From scratch | 120.39 | 32.84 | 81.81 | 95.94 | [config](repvgg-D2se_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) | -*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/res2net/README.md b/configs/res2net/README.md index bb8b5f1beff..68b1acce79c 100644 --- a/configs/res2net/README.md +++ b/configs/res2net/README.md @@ -63,7 +63,7 @@ python tools/test.py configs/res2net/res2net50-w14-s8_8xb32_in1k.py https://down | `res2net50-w26-s8_3rdparty_8xb32_in1k`\* | From scratch | 48.40 | 8.39 | 79.20 | 94.36 | [config](res2net50-w26-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth) | | `res2net101-w26-s4_3rdparty_8xb32_in1k`\* | From scratch | 45.21 | 8.12 | 79.19 | 94.44 | [config](res2net101-w26-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth) | -*Models with * are converted from the [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/revvit/README.md b/configs/revvit/README.md index ac2415cc474..0439b22ac9d 100644 --- a/configs/revvit/README.md +++ b/configs/revvit/README.md @@ -76,7 +76,7 @@ python tools/test.py configs/revvit/revvit-small_8xb256_in1k.py https://download | `revvit-small_3rdparty_in1k`\* | From scratch | 22.44 | 4.58 | 79.87 | 94.90 | [config](revvit-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/revvit/revvit-base_3rdparty_in1k_20221213-87a7b0a5.pth) | | `revvit-base_3rdparty_in1k`\* | From scratch | 87.34 | 17.49 | 81.81 | 95.56 | [config](revvit-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/revvit/revvit-small_3rdparty_in1k_20221213-a3a34f5c.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/SlowFast). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/SlowFast). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/sam/README.md b/configs/sam/README.md index baf8895ba8f..1a5668a3d0b 100644 --- a/configs/sam/README.md +++ b/configs/sam/README.md @@ -43,7 +43,7 @@ print(type(feats)) | `vit-large-p16_sam-pre_3rdparty_sa1b-1024px`\* | 308.00 | 1494.00 | [config](vit-large-p16_sam_headless.py) | [model](https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-large-p16_sam-pre_3rdparty_sa1b-1024px_20230411-595feafd.pth) | | `vit-huge-p16_sam-pre_3rdparty_sa1b-1024px`\* | 637.00 | 2982.00 | [config](vit-huge-p16_sam_headless.py) | [model](https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-huge-p16_sam-pre_3rdparty_sa1b-1024px_20230411-3f13c653.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/segment-anything/). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/segment-anything/). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/swin_transformer/README.md b/configs/swin_transformer/README.md index 7e00b13c96a..1d41f13a525 100644 --- a/configs/swin_transformer/README.md +++ b/configs/swin_transformer/README.md @@ -91,7 +91,7 @@ python tools/test.py configs/swin_transformer/swin-tiny_16xb64_in1k.py https://d | `swin-large_in21k-pre-3rdparty_in1k`\* | From scratch | 196.53 | 34.04 | 86.24 | 97.88 | [config](swin-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth) | | `swin-large_in21k-pre-3rdparty_in1k-384`\* | From scratch | 196.74 | 100.04 | 87.25 | 98.25 | [config](swin-large_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth) | -*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458). The config files of these models are only for inference. We haven't reproduce the training results.* ### Image Classification on CUB-200-2011 diff --git a/configs/swin_transformer_v2/README.md b/configs/swin_transformer_v2/README.md index b1bfd30e1d9..dd20548ae78 100644 --- a/configs/swin_transformer_v2/README.md +++ b/configs/swin_transformer_v2/README.md @@ -86,7 +86,7 @@ python tools/test.py configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256p | `swinv2-base-w12_3rdparty_in21k-192px`\* | 87.92 | 8.51 | [config](swinv2-base-w12_8xb128_in21k-192px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth) | | `swinv2-large-w12_3rdparty_in21k-192px`\* | 196.74 | 19.04 | [config](swinv2-large-w12_8xb128_in21k-192px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth) | -*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer). The config files of these models are only for inference. We haven't reproduce the training results.* ### Image Classification on ImageNet-1k @@ -103,7 +103,7 @@ python tools/test.py configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256p | `swinv2-large-w16_in21k-pre_3rdparty_in1k-256px`\* | ImageNet-21k | 196.75 | 33.86 | 86.93 | 98.06 | [config](swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth) | | `swinv2-large-w24_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 196.75 | 76.20 | 87.59 | 98.27 | [config](swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth) | -*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/tinyvit/README.md b/configs/tinyvit/README.md index 3354788f926..58ceb5779b4 100644 --- a/configs/tinyvit/README.md +++ b/configs/tinyvit/README.md @@ -68,7 +68,7 @@ python tools/test.py configs/tinyvit/tinyvit-5m_8xb256_in1k.py https://download. | `tinyvit-21m_in21k-distill-pre_3rdparty_in1k-384px`\* | ImageNet-21k DISTILL | 21.23 | 13.85 | 86.21 | 97.77 | [config](tinyvit-21m-distill_8xb256_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k-384px_20221021-65be6b3f.pth) | | `tinyvit-21m_in21k-distill-pre_3rdparty_in1k-512px`\* | ImageNet-21k DISTILL | 21.27 | 27.15 | 86.44 | 97.89 | [config](tinyvit-21m-distill_8xb256_in1k-512px.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k-512px_20221021-e42a9bea.pth) | -*Models with * are converted from the [official repo](https://github.com/microsoft/Cream/tree/main/TinyViT). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/microsoft/Cream/tree/main/TinyViT). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/tnt/README.md b/configs/tnt/README.md index bb21f7913cc..e86da0b4a8d 100644 --- a/configs/tnt/README.md +++ b/configs/tnt/README.md @@ -61,7 +61,7 @@ python tools/test.py configs/tnt/tnt-s-p16_16xb64_in1k.py https://download.openm | :------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------: | :------------------------------------------------------------------------------------: | | `tnt-small-p16_3rdparty_in1k`\* | From scratch | 23.76 | 3.36 | 81.52 | 95.73 | [config](tnt-s-p16_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | -*Models with * are converted from the [official repo](https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/twins/README.md b/configs/twins/README.md index f8a972022f3..9e97b7842d9 100644 --- a/configs/twins/README.md +++ b/configs/twins/README.md @@ -66,7 +66,7 @@ python tools/test.py configs/twins/twins-pcpvt-small_8xb128_in1k.py https://down | `twins-svt-base_8xb128_3rdparty_in1k`\* | From scratch | 56.07 | 8.35 | 83.13 | 96.29 | [config](twins-svt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth) | | `twins-svt-large_3rdparty_16xb64_in1k`\* | From scratch | 99.27 | 14.82 | 83.60 | 96.50 | [config](twins-svt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth) | -*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/van/README.md b/configs/van/README.md index b02720229fc..7e548b6b800 100644 --- a/configs/van/README.md +++ b/configs/van/README.md @@ -64,7 +64,7 @@ python tools/test.py configs/van/van-tiny_8xb128_in1k.py https://download.openmm | `van-base_3rdparty_in1k`\* | From scratch | 26.58 | 5.03 | 82.80 | 96.21 | [config](van-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth) | | `van-large_3rdparty_in1k`\* | From scratch | 44.77 | 8.99 | 83.86 | 96.73 | [config](van-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth) | -*Models with * are converted from the [official repo](https://github.com/Visual-Attention-Network/VAN-Classification). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/Visual-Attention-Network/VAN-Classification). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/vig/README.md b/configs/vig/README.md index ae6cf566b51..624e387ac37 100644 --- a/configs/vig/README.md +++ b/configs/vig/README.md @@ -67,7 +67,7 @@ python tools/test.py configs/vig/vig-tiny_8xb128_in1k.py https://download.openmm | `pvig-medium_3rdparty_in1k`\* | From scratch | 51.68 | 8.89 | 83.12 | 96.35 | [config](pvig-medium_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-medium_3rdparty_in1k_20230117-21057a6d.pth) | | `pvig-base_3rdparty_in1k`\* | From scratch | 95.21 | 16.86 | 83.59 | 96.52 | [config](pvig-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-base_3rdparty_in1k_20230117-dbab3c85.pth) | -*Models with * are converted from the [official repo](https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/vision_transformer/README.md b/configs/vision_transformer/README.md index b97336ecccd..66bd3f529dd 100644 --- a/configs/vision_transformer/README.md +++ b/configs/vision_transformer/README.md @@ -85,7 +85,7 @@ python tools/test.py configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.p | `vit-base-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 86.86 | 55.54 | 85.43 | 97.77 | [config](vit-base-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth) | | `vit-large-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 304.72 | 191.21 | 85.63 | 97.63 | [config](vit-large-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth) | -*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/wrn/README.md b/configs/wrn/README.md index 28e178f230f..2753307b066 100644 --- a/configs/wrn/README.md +++ b/configs/wrn/README.md @@ -63,7 +63,7 @@ python tools/test.py configs/wrn/wide-resnet50_8xb32_in1k.py https://download.op | `wide-resnet101_3rdparty_8xb32_in1k`\* | From scratch | 126.89 | 22.81 | 78.84 | 94.28 | [config](wide-resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) | | `wide-resnet50_3rdparty-timm_8xb32_in1k`\* | From scratch | 68.88 | 11.44 | 81.45 | 95.53 | [config](wide-resnet50_timm_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth) | -*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/configs/xcit/README.md b/configs/xcit/README.md index 9008c1fc5cf..ab2cd7a3634 100644 --- a/configs/xcit/README.md +++ b/configs/xcit/README.md @@ -92,7 +92,7 @@ python tools/test.py configs/xcit/xcit-nano-12-p16_8xb128_in1k.py https://downlo | `xcit-medium-24-p8_3rdparty-dist_in1k-384px`\* | 84.32 | 186.67 | [config](xcit-medium-24-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty-dist_in1k-384px_20230214-5db925e0.pth) | | `xcit-large-24-p8_3rdparty-dist_in1k-384px`\* | 188.93 | 415.00 | [config](xcit-large-24-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty-dist_in1k-384px_20230214-9f718b1a.pth) | -*Models with * are converted from the [official repo](https://github.com/facebookresearch/xcit). The config files of these models are only for inference. We haven't reprodcue the training results.* +*Models with * are converted from the [official repo](https://github.com/facebookresearch/xcit). The config files of these models are only for inference. We haven't reproduce the training results.* ## Citation diff --git a/mmpretrain/models/backbones/deit3.py b/mmpretrain/models/backbones/deit3.py index 9be3627915f..acedabe42d6 100644 --- a/mmpretrain/models/backbones/deit3.py +++ b/mmpretrain/models/backbones/deit3.py @@ -126,7 +126,7 @@ class DeiT3TransformerEncoderLayer(BaseModule): use_layer_scale (bool): Whether to use layer_scale in DeiT3TransformerEncoderLayer. Defaults to True. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. init_cfg (dict, optional): Initialization config dict. diff --git a/mmpretrain/models/backbones/mlp_mixer.py b/mmpretrain/models/backbones/mlp_mixer.py index af714fea48a..26fb8ce0186 100644 --- a/mmpretrain/models/backbones/mlp_mixer.py +++ b/mmpretrain/models/backbones/mlp_mixer.py @@ -28,7 +28,7 @@ class MixerBlock(BaseModule): num_fcs (int): The number of fully-connected layers for FFNs. Defaults to 2. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. init_cfg (dict, optional): Initialization config dict. diff --git a/mmpretrain/models/backbones/t2t_vit.py b/mmpretrain/models/backbones/t2t_vit.py index 288ef0dc257..a57b95e1fb0 100644 --- a/mmpretrain/models/backbones/t2t_vit.py +++ b/mmpretrain/models/backbones/t2t_vit.py @@ -38,7 +38,7 @@ class T2TTransformerLayer(BaseModule): qk_scale (float, optional): Override default qk scale of ``(input_dims // num_heads) ** -0.5`` if set. Defaults to None. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. init_cfg (dict, optional): Initialization config dict. diff --git a/mmpretrain/models/backbones/vision_transformer.py b/mmpretrain/models/backbones/vision_transformer.py index 2f10d43f081..21572f36b5a 100644 --- a/mmpretrain/models/backbones/vision_transformer.py +++ b/mmpretrain/models/backbones/vision_transformer.py @@ -33,7 +33,7 @@ class TransformerEncoderLayer(BaseModule): qkv_bias (bool): enable bias for qkv if True. Defaults to True. ffn_type (str): Select the type of ffn layers. Defaults to 'origin'. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. init_cfg (dict, optional): Initialization config dict. diff --git a/mmpretrain/models/backbones/vit_sam.py b/mmpretrain/models/backbones/vit_sam.py index be1ccf055a3..0eb46a72adf 100644 --- a/mmpretrain/models/backbones/vit_sam.py +++ b/mmpretrain/models/backbones/vit_sam.py @@ -235,7 +235,7 @@ class TransformerEncoderLayer(BaseModule): Defaults to 2. qkv_bias (bool): enable bias for qkv if True. Defaults to True. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. use_rel_pos (bool):Whether to use relative position embedding. diff --git a/mmpretrain/models/classifiers/image.py b/mmpretrain/models/classifiers/image.py index a4e9c9bc77e..6d0edd7aed8 100644 --- a/mmpretrain/models/classifiers/image.py +++ b/mmpretrain/models/classifiers/image.py @@ -261,5 +261,5 @@ def get_layer_depth(self, param_name: str): return self.backbone.get_layer_depth(param_name, 'backbone.') else: raise NotImplementedError( - f"The babckone {type(self.backbone)} doesn't " + f"The backbone {type(self.backbone)} doesn't " 'support `get_layer_depth` by now.') diff --git a/mmpretrain/models/necks/cae_neck.py b/mmpretrain/models/necks/cae_neck.py index 2cd2e9271ef..81fc3011136 100644 --- a/mmpretrain/models/necks/cae_neck.py +++ b/mmpretrain/models/necks/cae_neck.py @@ -39,7 +39,7 @@ class CAETransformerRegressorLayer(BaseModule): layer_scale_init_value (float): The init value of gamma. Defaults to 0.0. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. """ diff --git a/mmpretrain/models/necks/milan_neck.py b/mmpretrain/models/necks/milan_neck.py index c142d2007e7..b48b7678723 100644 --- a/mmpretrain/models/necks/milan_neck.py +++ b/mmpretrain/models/necks/milan_neck.py @@ -29,7 +29,7 @@ class PromptTransformerEncoderLayer(TransformerEncoderLayer): Defaults to 2. qkv_bias (bool): Enable bias for qkv if True. Defaults to True. act_cfg (dict): The activation config for FFNs. - Defaluts to ``dict(type='GELU')``. + Defaults to ``dict(type='GELU')``. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='LN')``. batch_first (bool): Key, Query and Value are shape of diff --git a/mmpretrain/models/selfsup/base.py b/mmpretrain/models/selfsup/base.py index 1dd6e59aa17..9d53a72871d 100644 --- a/mmpretrain/models/selfsup/base.py +++ b/mmpretrain/models/selfsup/base.py @@ -175,5 +175,5 @@ def get_layer_depth(self, param_name: str): return self.backbone.get_layer_depth(param_name, 'backbone.') else: raise NotImplementedError( - f"The babckone {type(self.backbone)} doesn't " + f"The backbone {type(self.backbone)} doesn't " 'support `get_layer_depth` by now.')