Skip to content

Commit

Permalink
[Enhancement] Change readme style and Update metafiles. (open-mmlab#895)
Browse files Browse the repository at this point in the history
* [Enhancement] Change readme style and prepare for metafiles update.

* Update apcnet github repo url.

* add code snippet.

* split code snippet & official repo.

* update md2yml hook.

* Update metafiles.

* Add converted from attribute.

* process conflict.

* Put defualt variable value.

* update bisenet v2 metafile.

* checkout to ubuntu environment.

* pop empty attribute & make task attribute list.

* update readme style

* update readme style

* update metafiles

Co-authored-by: Junjun2016 <[email protected]>
  • Loading branch information
clownrat6 and Junjun2016 authored Sep 28, 2021
1 parent d96937a commit 2800d43
Show file tree
Hide file tree
Showing 72 changed files with 3,262 additions and 2,573 deletions.
112 changes: 95 additions & 17 deletions .dev/md2yml.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,25 +9,28 @@
import glob
import os
import os.path as osp
import re
import sys

import mmcv
from lxml import etree

MMSEG_ROOT = osp.dirname(osp.dirname((osp.dirname(__file__))))


def dump_yaml_and_check_difference(obj, filename):
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
filename (str): YAML filename to dump the object to.
sort_keys (str); Sort key by dictionary order.
Returns:
Bool: If the target YAML file is different from the original.
"""

str_dump = mmcv.dump(obj, None, file_format='yaml', sort_keys=True)
str_dump = mmcv.dump(obj, None, file_format='yaml', sort_keys=sort_keys)
if osp.isfile(filename):
file_exists = True
with open(filename, 'r', encoding='utf-8') as f:
Expand All @@ -54,12 +57,29 @@ def parse_md(md_file):
Returns:
Bool: If the target YAML file is different from the original.
"""
collection_name = osp.dirname(md_file).split('/')[-1]
collection_name = osp.split(osp.dirname(md_file))[1]
configs = os.listdir(osp.dirname(md_file))

collection = dict(Name=collection_name, Metadata={'Training Data': []})
collection = dict(
Name=collection_name,
Metadata={'Training Data': []},
Paper={
'URL': '',
'Title': ''
},
README=md_file,
Code={
'URL': '',
'Version': ''
})
collection.update({'Converted From': {'Weights': '', 'Code': ''}})
models = []
datasets = []
paper_url = None
paper_title = None
code_url = None
code_version = None
repo_url = None

with open(md_file, 'r') as md:
lines = md.readlines()
Expand All @@ -70,7 +90,36 @@ def parse_md(md_file):
if len(line) == 0:
i += 1
continue
if line[:3] == '###':
if line[:2] == '# ':
paper_title = line.replace('# ', '')
i += 1
elif line[:3] == '<a ':
content = etree.HTML(line)
node = content.xpath('//a')[0]
if node.text == 'Code Snippet':
code_url = node.get('href', None)
assert code_url is not None, (
f'{collection_name} hasn\'t code snippet url.')
# version extraction
filter_str = r'blob/(.*)/mm'
pattern = re.compile(filter_str)
code_version = pattern.findall(code_url)
assert len(code_version) == 1, (
f'false regular expression ({filter_str}) use.')
code_version = code_version[0]
elif node.text == 'Official Repo':
repo_url = node.get('href', None)
assert repo_url is not None, (
f'{collection_name} hasn\'t official repo url.')
i += 1
elif line[:9] == '<summary ':
content = etree.HTML(line)
nodes = content.xpath('//a')
assert len(nodes) == 1, (
'summary tag should only have single a tag.')
paper_url = nodes[0].get('href', None)
i += 1
elif line[:4] == '### ':
datasets.append(line[4:])
current_dataset = line[4:]
i += 2
Expand Down Expand Up @@ -113,22 +162,28 @@ def parse_md(md_file):
crop_size = els[crop_size_id].split('x')
assert len(crop_size) == 2
model = {
'Name': model_name,
'In Collection': collection_name,
'Name':
model_name,
'In Collection':
collection_name,
'Metadata': {
'backbone': els[backbone_id],
'crop size': f'({crop_size[0]},{crop_size[1]})',
'lr schd': int(els[lr_schd_id]),
},
'Results': {
'Task': 'Semantic Segmentation',
'Dataset': current_dataset,
'Metrics': {
'mIoU': float(els[ss_id]),
'Results': [
{
'Task': 'Semantic Segmentation',
'Dataset': current_dataset,
'Metrics': {
'mIoU': float(els[ss_id]),
},
},
},
'Config': config,
'Weights': weight,
],
'Config':
config,
'Weights':
weight,
}
if fps != -1:
try:
Expand All @@ -152,15 +207,38 @@ def parse_md(md_file):
}]
if mem != -1:
model['Metadata']['memory (GB)'] = float(mem)
# Only have semantic segmentation now
if ms_id and els[ms_id] != '-' and els[ms_id] != '':
model['Results']['Metrics']['mIoU(ms+flip)'] = float(
els[ms_id])
model['Results'][0]['Metrics'][
'mIoU(ms+flip)'] = float(els[ms_id])
models.append(model)
j += 1
i = j
else:
i += 1
flag = (code_url is not None) and (paper_url is not None) and (repo_url
is not None)
assert flag, f'{collection_name} readme error'
collection['Metadata']['Training Data'] = datasets
collection['Code']['URL'] = code_url
collection['Code']['Version'] = code_version
collection['Paper']['URL'] = paper_url
collection['Paper']['Title'] = paper_title
collection['Converted From']['Code'] = repo_url
# ['Converted From']['Weights] miss
# remove empty attribute
check_key_list = ['Code', 'Paper', 'Converted From']
for check_key in check_key_list:
key_list = list(collection[check_key].keys())
for key in key_list:
if check_key not in collection:
break
if collection[check_key][key] == '':
if len(collection[check_key].keys()) == 1:
collection.pop(check_key)
else:
collection[check_key].pop(key)

result = {'Collections': [collection], 'Models': models}
yml_file = f'{md_file[:-9]}{collection_name}.yml'
return dump_yaml_and_check_difference(result, yml_file)
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ repos:
name: update-model-index
description: Collect model information and update model-index.yml
entry: .dev/md2yml.py
additional_dependencies: [mmcv]
additional_dependencies: [mmcv, lxml]
language: python
files: ^configs/.*\.md$
require_serial: true
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ Supported backbones:
- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3)
- [x] [Vision Transformer (ICLR'2021)](configs/vit)
- [x] [Swin Transformer (ArXiv'2021)](configs/swin)
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)

Supported methods:

Expand Down Expand Up @@ -94,7 +95,7 @@ Supported methods:
- [x] [PointRend (CVPR'2020)](configs/point_rend)
- [x] [CGNet (TIP'2020)](configs/cgnet)
- [x] [SETR (CVPR'2021)](configs/setr)
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)
- [x] [DPT (ArXiv'2021)](configs/dpt)
- [x] [SegFormer (ArXiv'2021)](configs/segformer)

Supported datasets:
Expand Down
3 changes: 2 additions & 1 deletion README_zh-CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 O
- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3)
- [x] [Vision Transformer (ICLR'2021)](configs/vit)
- [x] [Swin Transformer (ArXiv'2021)](configs/swin)
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)

已支持的算法:

Expand Down Expand Up @@ -93,7 +94,7 @@ MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 O
- [x] [PointRend (CVPR'2020)](configs/point_rend)
- [x] [CGNet (TIP'2020)](configs/cgnet)
- [x] [SETR (CVPR'2021)](configs/setr)
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)
- [x] [DPT (ArXiv'2021)](configs/dpt)
- [x] [SegFormer (ArXiv'2021)](configs/segformer)

已支持的数据集:
Expand Down
9 changes: 9 additions & 0 deletions configs/ann/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@

<!-- [ALGORITHM] -->

<a href="https://github.com/MendelXu/ANN">Official Repo</a>

<a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ann_head.py#L185">Code Snippet</a>

<details>
<summary align="right"><a href="https://arxiv.org/abs/1908.07678">ANN (ICCV'2019)</a></summary>

```latex
@inproceedings{zhu2019asymmetric,
title={Asymmetric non-local neural networks for semantic segmentation},
Expand All @@ -14,6 +21,8 @@
}
```

</details>

## Results and models

### Cityscapes
Expand Down
Loading

0 comments on commit 2800d43

Please sign in to comment.