diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..84bcc52 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,70 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main ] + schedule: + - cron: '43 4 * * 5' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/README.md b/README.md index 94a4dfa..121ead4 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ This repo is the official implementation of "*[ActiveMLP: An MLP-like Architectu **ActiveMLP** is a general MLP-like visual backbone, which is applicable to image classification, object detection and semantic segmentation tasks. The core operator, Active Token Mixer (`ATM`), actively incorporates contextual information from other tokens in the global scope. It adaptively predicts where to capture useful contexts and learns how to fuse the captured contexts with the origianl information at channel levels. -The ActiveMLP variants achieves `79.7% ~ 83.8%` acc@top1 with model scaled from `15M ~ 76M` on ImageNet-1K. It also shows its superiority on downstream dense prediction tasks. `ActiveMLP-Large` achieves `51.1% mIoU` with UperNet on ADE20K semantic segmentation dataset. +The ActiveMLP variants achieve `79.7% ~ 83.8%` acc@top1 with the models scaled from `15M ~ 76M` on ImageNet-1K. It also shows the superiority on downstream dense prediction tasks. `ActiveMLP-Large` achieves `51.1% mIoU` with UperNet on ADE20K semantic segmentation dataset. ![ActiveMLP](assets/teaser.png) @@ -21,7 +21,7 @@ The ActiveMLP variants achieves `79.7% ~ 83.8%` acc@top1 with model scaled from ## Usage -The following is the guideline for ActiveMLP on image classification task, the usage on semantic segmentation can be found [here](segmentation/README.md). +The following guideline of ActiveMLP is for image classification, the guideline for semantic segmentation can be found [here](segmentation/README.md). ### Install @@ -38,7 +38,7 @@ pip install -r requirements.txt ### Data preparation -Download the standard ImageNet dataset from [http://image-net.org](http://image-net.org), and construct the data like: +Download the standard ImageNet-1K dataset from [http://image-net.org](http://image-net.org), and construct the data like: ```bash ImageNet_Root ├── train