mirror of
https://github.com/luau-lang/luau.git
synced 2024-12-12 13:00:38 +00:00
Remove old benchmark-dev workflows
Nobody is maintaining this and we haven't really used it and are unlikely to start due to a high degree of noise and lack of dedicated machines for this setup. Callgrind has worked for us well enough, with additional profiling performed locally by engineers - this is not perfect but it doesn't look like we have a path to making this better.
This commit is contained in:
parent
3f478bb439
commit
bd229816c0
2 changed files with 0 additions and 248 deletions
185
.github/workflows/benchmark-dev.yml
vendored
185
.github/workflows/benchmark-dev.yml
vendored
|
@ -1,185 +0,0 @@
|
|||
name: benchmark-dev
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "papers/**"
|
||||
- "rfcs/**"
|
||||
- "*.md"
|
||||
|
||||
jobs:
|
||||
windows:
|
||||
name: windows-${{matrix.arch}}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [windows-latest]
|
||||
arch: [Win32, x64]
|
||||
bench:
|
||||
- {
|
||||
script: "run-benchmarks",
|
||||
timeout: 12,
|
||||
title: "Luau Benchmarks",
|
||||
}
|
||||
benchResultsRepo:
|
||||
- { name: "luau-lang/benchmark-data", branch: "main" }
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout Luau repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build Luau
|
||||
shell: bash # necessary for fail-fast
|
||||
run: |
|
||||
mkdir build && cd build
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release
|
||||
cmake --build . --target Luau.Repl.CLI --config Release
|
||||
cmake --build . --target Luau.Analyze.CLI --config Release
|
||||
|
||||
- name: Move build files to root
|
||||
run: |
|
||||
move build/Release/* .
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: "3.9"
|
||||
architecture: "x64"
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install requests
|
||||
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
- name: Push benchmark results
|
||||
id: pushBenchmarkAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push benchmark results (Attempt 2)
|
||||
id: pushBenchmarkAttempt2
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push benchmark results (Attempt 3)
|
||||
id: pushBenchmarkAttempt3
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
unix:
|
||||
name: ${{matrix.os}}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest]
|
||||
bench:
|
||||
- {
|
||||
script: "run-benchmarks",
|
||||
timeout: 12,
|
||||
title: "Luau Benchmarks",
|
||||
}
|
||||
benchResultsRepo:
|
||||
- { name: "luau-lang/benchmark-data", branch: "main" }
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout Luau repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build Luau
|
||||
run: make config=release luau luau-analyze
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: "3.9"
|
||||
architecture: "x64"
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install requests
|
||||
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
- name: Push benchmark results
|
||||
id: pushBenchmarkAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push benchmark results (Attempt 2)
|
||||
id: pushBenchmarkAttempt2
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push benchmark results (Attempt 3)
|
||||
id: pushBenchmarkAttempt3
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
63
.github/workflows/push-results/action.yml
vendored
63
.github/workflows/push-results/action.yml
vendored
|
@ -1,63 +0,0 @@
|
|||
name: Checkout & push results
|
||||
description: Checkout a given repo and push results to GitHub
|
||||
inputs:
|
||||
repository:
|
||||
required: true
|
||||
type: string
|
||||
description: The benchmark results repository to check out
|
||||
branch:
|
||||
required: true
|
||||
type: string
|
||||
description: The benchmark results repository's branch to check out
|
||||
token:
|
||||
required: true
|
||||
type: string
|
||||
description: The GitHub token to use for pushing results
|
||||
path:
|
||||
required: true
|
||||
type: string
|
||||
description: The path to check out the results repository to
|
||||
bench_name:
|
||||
required: true
|
||||
type: string
|
||||
bench_tool:
|
||||
required: true
|
||||
type: string
|
||||
bench_output_file_path:
|
||||
required: true
|
||||
type: string
|
||||
bench_external_data_json_path:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.branch }}
|
||||
token: ${{ inputs.token }}
|
||||
path: ${{ inputs.path }}
|
||||
|
||||
- name: Store results
|
||||
uses: Roblox/rhysd-github-action-benchmark@v-luau
|
||||
with:
|
||||
name: ${{ inputs.bench_name }}
|
||||
tool: ${{ inputs.bench_tool }}
|
||||
gh-pages-branch: ${{ inputs.branch }}
|
||||
output-file-path: ${{ inputs.bench_output_file_path }}
|
||||
external-data-json-path: ${{ inputs.bench_external_data_json_path }}
|
||||
|
||||
- name: Push benchmark results
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Pushing benchmark results..."
|
||||
cd gh-pages
|
||||
git config user.name github-actions
|
||||
git config user.email github@users.noreply.github.com
|
||||
git add *.json
|
||||
git commit -m "Add benchmarks results for ${{ github.sha }}"
|
||||
git push
|
||||
cd ..
|
Loading…
Reference in a new issue