fleet/.github/workflows/deploy-vulnerability-dashboard.yml

90 lines
3.7 KiB
YAML

name: Deploy app to vulnerability dashboard pipeline on Heroku.
on:
push:
branches: [ main ]
paths:
- 'ee/vulnerability-dashboard/**'
permissions:
contents: read
jobs:
build:
permissions:
contents: write # for Git to git push
if: ${{ github.repository == 'fleetdm/fleet' }}
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [14.x]
steps:
- name: Harden Runner
uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
# Configure our access credentials for the Heroku CLI
- uses: akhileshns/heroku-deploy@79ef2ae4ff9b897010907016b268fd0f88561820 # v3.6.8
with:
heroku_api_key: ${{secrets.HEROKU_API_TOKEN_FOR_BOT_USER}}
heroku_app_name: "" # this has to be blank or it doesn't work
heroku_email: ${{secrets.HEROKU_EMAIL_FOR_BOT_USER}}
justlogin: true
- run: heroku auth:whoami
# Set the Node.js version
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
with:
node-version: ${{ matrix.node-version }}
# Now start building!
# > …but first, get a little crazy for a sec and delete the top-level package.json file
# > i.e. the one used by the Fleet server. This is because require() in node will go
# > hunting in ancestral directories for missing dependencies, and since some of the
# > bundled transpiler tasks sniff for package availability using require(), this trips
# > up when it encounters another Node universe in the parent directory.
- run: rm -rf package.json package-lock.json node_modules/
# > Turns out there's a similar issue with how eslint plugins are looked up, so we
# > delete the top level .eslintrc file too.
- run: rm -f .eslintrc.js
# > And, as a change to the top-level fleetdm/fleet .gitignore on May 2, 2022 revealed,
# > we also need to delete the top level .gitignore file too, so that its rules don't
# > interfere with the committing and force-pushing we're doing as part of our deploy
# > script here. For more info, see: https://github.com/fleetdm/fleet/pull/5549
- run: rm -f .gitignore
# Get dependencies (including dev deps)
- run: cd ee/vulnerability-dashboard/ && npm install
# Run sanity checks
- run: cd ee/vulnerability-dashboard/ && npm test
# Compile assets
- run: cd ee/vulnerability-dashboard/ && npm run build-for-prod
# Commit newly-built assets locally so we can push them to Heroku below.
# (This commit will never be pushed to GitHub- only to Heroku.)
# > The local config flags make this work in GitHub's environment.
- run: git add ee/vulnerability-dashboard/.www
- run: git -c "user.name=GitHub" -c "user.email=github@example.com" commit -am 'AUTOMATED COMMIT - Deployed the latest, including modified HTML layouts and .sailsrc file that reference minified assets.'
# Configure the Heroku app we'll be deploying to
- run: heroku git:remote -a vulnerability-dashboard
- run: git remote -v
# Deploy to Heroku (by pushing)
# > Since a shallow clone was grabbed, we have to "unshallow" it before forcepushing.
- run: echo "Unshallowing local repository…"
- run: git fetch --prune --unshallow
- run: echo "Deploying branch '${GITHUB_REF##*/}' to Heroku…"
- run: git push heroku +${GITHUB_REF##*/}:master
- name: 🌐 The dashboard has been deployed
run: echo '' && echo '--' && echo 'OK, done. It should be live momentarily.' && echo '(if you get impatient, check the Heroku dashboard for status)'