Skip to content

chore(deps-dev): bump eslint-plugin-n from 17.11.1 to 17.12.0 #14112

chore(deps-dev): bump eslint-plugin-n from 17.11.1 to 17.12.0

chore(deps-dev): bump eslint-plugin-n from 17.11.1 to 17.12.0 #14112

Workflow file for this run

name: Performance
on:
pull_request:
paths:
- client/src/**
- .github/workflows/performance.yml
- package.json
- yarn.lock
jobs:
lighthouse:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
# Needed because of
# https://github.com/GoogleChrome/lighthouse-ci/issues/172
ref: ${{ github.event.pull_request.head.sha }}
- uses: actions/checkout@v4
with:
repository: mdn/content
path: mdn/content
- name: Setup Node.js environment
uses: actions/setup-node@v4
with:
node-version-file: ".nvmrc"
cache: yarn
- name: Cache @vscode/ripgrep bin
uses: actions/cache@v4
with:
key: vscode-ripgrep-bin-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }}
path: node_modules/@vscode/ripgrep/bin/
- name: Install all yarn packages
run: yarn --frozen-lockfile
env:
# https://github.com/microsoft/vscode-ripgrep#github-api-limit-note
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build select important pages
env:
# Remember, the mdn/content repo got cloned into `pwd` into a
# sub-folder called "mdn/content"
CONTENT_ROOT: "${{ github.workspace }}/mdn/content/files"
# Make sure it's set to something so that the build uses the
# Google Analytics tag which is most realistic.
BUILD_GOOGLE_ANALYTICS_MEASUREMENT_ID: G-XXXXXXXX
run: |
yarn build:prepare
# BUILD_FOLDERSEARCH=mdn/kitchensink yarn build:docs
BUILD_FOLDERSEARCH=web/javascript/reference/global_objects/array/foreach yarn build:docs
yarn render:html
- name: Serve and lhci
env:
LHCI_GITHUB_APP_TOKEN: ${{ secrets.LHCI_GITHUB_APP_TOKEN }}
run: |
yarn start:static-server &
sleep 1
curl --retry-connrefused --retry 5 \
http://localhost:5042/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/forEach/ > /dev/null
npm install -g @lhci/cli@next
# Note, we can supply multiple `--collect.url=...` to have lhci test
# a variety of URLs and then assume a average (or median?) across them.
# All options for "collect" here:
# https://github.com/GoogleChrome/lighthouse-ci/blob/master/docs/configuration.md#collect
lhci autorun \
--upload.target=temporary-public-storage \
--collect.url="http://localhost:5042/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/forEach/"
# TODO (as of Oct 2020)
# Once our Lighthouse score starts to not be so terrible, we can start
# adding assertions here.
# See https://github.com/GoogleChrome/lighthouse-ci/blob/master/docs/configuration.md#categories