mirror of
https://github.com/luau-lang/luau.git
synced 2025-05-04 10:33:46 +01:00
build: added retry attempts to benchmark workflow
This commit is contained in:
parent
eafeb3a340
commit
8d1a6b5427
1 changed files with 292 additions and 170 deletions
462
.github/workflows/benchmark-dev.yml
vendored
462
.github/workflows/benchmark-dev.yml
vendored
|
@ -64,200 +64,322 @@ jobs:
|
||||||
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||||
|
|
||||||
- name: Push benchmark results
|
- name: Push benchmark results
|
||||||
if: github.event_name == 'push'
|
id: pushBenchmarkAttempt1
|
||||||
uses: nick-fields/retry@v2.7.0
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
with:
|
with:
|
||||||
max_attempts: 5
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
retry_on: error
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
retry_wait_seconds: 60
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
timeout_minutes: 5
|
path: "./gh-pages"
|
||||||
command:
|
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||||
uses: ./.github/workflows/push-results
|
bench_tool: "benchmarkluau"
|
||||||
with:
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
repository: ${{ matrix.benchResultsRepo.name }}
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
|
||||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
|
||||||
path: "./gh-pages"
|
|
||||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
|
||||||
bench_tool: "benchmarkluau"
|
|
||||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
|
||||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
|
||||||
|
|
||||||
# unix:
|
- name: Push benchmark results (Attempt 2)
|
||||||
# name: ${{matrix.os}}
|
id: pushBenchmarkAttempt2
|
||||||
# strategy:
|
continue-on-error: true
|
||||||
# fail-fast: false
|
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
|
||||||
# matrix:
|
uses: ./.github/workflows/push-results
|
||||||
# os: [ubuntu-latest, macos-latest]
|
with:
|
||||||
# bench:
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
# - {
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
# script: "run-benchmarks",
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
# timeout: 12,
|
path: "./gh-pages"
|
||||||
# title: "Luau Benchmarks",
|
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||||
# cachegrindTitle: "Performance",
|
bench_tool: "benchmarkluau"
|
||||||
# cachegrindIterCount: 20,
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
# }
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
# benchResultsRepo:
|
|
||||||
# - { name: "luau-lang/benchmark-data", branch: "main" }
|
|
||||||
|
|
||||||
# runs-on: ${{ matrix.os }}
|
- name: Push benchmark results (Attempt 3)
|
||||||
# steps:
|
id: pushBenchmarkAttempt3
|
||||||
# - name: Checkout Luau repository
|
continue-on-error: true
|
||||||
# uses: actions/checkout@v3
|
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||||
|
bench_tool: "benchmarkluau"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
# - name: Build Luau
|
unix:
|
||||||
# run: make config=release luau luau-analyze
|
name: ${{matrix.os}}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest]
|
||||||
|
bench:
|
||||||
|
- {
|
||||||
|
script: "run-benchmarks",
|
||||||
|
timeout: 12,
|
||||||
|
title: "Luau Benchmarks",
|
||||||
|
cachegrindTitle: "Performance",
|
||||||
|
cachegrindIterCount: 20,
|
||||||
|
}
|
||||||
|
benchResultsRepo:
|
||||||
|
- { name: "luau-lang/benchmark-data", branch: "main" }
|
||||||
|
|
||||||
# - uses: actions/setup-python@v3
|
runs-on: ${{ matrix.os }}
|
||||||
# with:
|
steps:
|
||||||
# python-version: "3.9"
|
- name: Checkout Luau repository
|
||||||
# architecture: "x64"
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
# - name: Install python dependencies
|
- name: Build Luau
|
||||||
# run: |
|
run: make config=release luau luau-analyze
|
||||||
# python -m pip install requests
|
|
||||||
# python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
|
|
||||||
|
|
||||||
# - name: Run benchmark
|
- uses: actions/setup-python@v3
|
||||||
# run: |
|
with:
|
||||||
# python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
python-version: "3.9"
|
||||||
|
architecture: "x64"
|
||||||
|
|
||||||
# - name: Install valgrind
|
- name: Install python dependencies
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
run: |
|
||||||
# run: |
|
python -m pip install requests
|
||||||
# sudo apt-get install valgrind
|
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||||
|
|
||||||
# - name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
- name: Run benchmark
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
run: |
|
||||||
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
|
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||||
|
|
||||||
# - name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
- name: Install valgrind
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
|
run: |
|
||||||
|
sudo apt-get install valgrind
|
||||||
|
|
||||||
# - name: Checkout Benchmark Results repository
|
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
||||||
# uses: actions/checkout@v3
|
if: matrix.os == 'ubuntu-latest'
|
||||||
# with:
|
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
|
||||||
# repository: ${{ matrix.benchResultsRepo.name }}
|
|
||||||
# ref: ${{ matrix.benchResultsRepo.branch }}
|
|
||||||
# token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
|
||||||
# path: "./gh-pages"
|
|
||||||
|
|
||||||
# - name: Store ${{ matrix.bench.title }} result
|
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
||||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
if: matrix.os == 'ubuntu-latest'
|
||||||
# with:
|
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
|
||||||
# name: ${{ matrix.bench.title }}
|
|
||||||
# tool: "benchmarkluau"
|
|
||||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
|
||||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
|
||||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
# - name: Store ${{ matrix.bench.title }} result (CacheGrind)
|
- name: Push benchmark results
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
id: pushBenchmarkAttempt1
|
||||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
continue-on-error: true
|
||||||
# with:
|
uses: ./.github/workflows/push-results
|
||||||
# name: ${{ matrix.bench.title }} (CacheGrind)
|
with:
|
||||||
# tool: "roblox"
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "benchmarkluau"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
# - name: Push benchmark results
|
- name: Push benchmark results (Attempt 2)
|
||||||
# if: github.event_name == 'push'
|
id: pushBenchmarkAttempt2
|
||||||
# run: |
|
continue-on-error: true
|
||||||
# echo "Pushing benchmark results..."
|
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
|
||||||
# cd gh-pages
|
uses: ./.github/workflows/push-results
|
||||||
# git config user.name github-actions
|
with:
|
||||||
# git config user.email github@users.noreply.github.com
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
# git add ./dev/bench/data-${{ matrix.os }}.json
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
# git commit -m "Add benchmarks results for ${{ github.sha }}"
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
# git push
|
path: "./gh-pages"
|
||||||
# cd ..
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "benchmarkluau"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
# static-analysis:
|
- name: Push benchmark results (Attempt 3)
|
||||||
# name: luau-analyze
|
id: pushBenchmarkAttempt3
|
||||||
# strategy:
|
continue-on-error: true
|
||||||
# fail-fast: false
|
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
|
||||||
# matrix:
|
uses: ./.github/workflows/push-results
|
||||||
# os: [ubuntu-latest]
|
with:
|
||||||
# bench:
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
# - {
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
# script: "run-analyze",
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
# timeout: 12,
|
path: "./gh-pages"
|
||||||
# title: "Luau Analyze",
|
bench_name: ${{ matrix.bench.title }}
|
||||||
# cachegrindTitle: "Performance",
|
bench_tool: "benchmarkluau"
|
||||||
# cachegrindIterCount: 20,
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
# }
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
# benchResultsRepo:
|
|
||||||
# - { name: "luau-lang/benchmark-data", branch: "main" }
|
|
||||||
# runs-on: ${{ matrix.os }}
|
|
||||||
# steps:
|
|
||||||
# - uses: actions/checkout@v3
|
|
||||||
# with:
|
|
||||||
# token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
|
|
||||||
|
|
||||||
# - name: Build Luau
|
- name: Push Cachegrind benchmark results
|
||||||
# run: make config=release luau luau-analyze
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
id: pushBenchmarkCachegrindAttempt1
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }} (CacheGrind)
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
# - uses: actions/setup-python@v4
|
- name: Push Cachegrind benchmark results (Attempt 2)
|
||||||
# with:
|
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt1.outcome == 'failure'
|
||||||
# python-version: "3.9"
|
id: pushBenchmarkCachegrindAttempt2
|
||||||
# architecture: "x64"
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }} (CacheGrind)
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
# - name: Install python dependencies
|
- name: Push Cachegrind benchmark results (Attempt 3)
|
||||||
# run: |
|
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt2.outcome == 'failure'
|
||||||
# sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
|
id: pushBenchmarkCachegrindAttempt3
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }} (CacheGrind)
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
# - name: Install valgrind
|
static-analysis:
|
||||||
# run: |
|
name: luau-analyze
|
||||||
# sudo apt-get install valgrind
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
bench:
|
||||||
|
- {
|
||||||
|
script: "run-analyze",
|
||||||
|
timeout: 12,
|
||||||
|
title: "Luau Analyze",
|
||||||
|
cachegrindTitle: "Performance",
|
||||||
|
cachegrindIterCount: 20,
|
||||||
|
}
|
||||||
|
benchResultsRepo:
|
||||||
|
- { name: "luau-lang/benchmark-data", branch: "main" }
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
|
||||||
|
|
||||||
# - name: Run Luau Analyze on static file
|
- name: Build Luau
|
||||||
# run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
|
run: make config=release luau luau-analyze
|
||||||
|
|
||||||
# - name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
- uses: actions/setup-python@v4
|
||||||
# run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
with:
|
||||||
|
python-version: "3.9"
|
||||||
|
architecture: "x64"
|
||||||
|
|
||||||
# - name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
- name: Install python dependencies
|
||||||
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
run: |
|
||||||
|
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||||
|
|
||||||
# - name: Checkout Benchmark Results repository
|
- name: Install valgrind
|
||||||
# uses: actions/checkout@v3
|
run: |
|
||||||
# with:
|
sudo apt-get install valgrind
|
||||||
# repository: ${{ matrix.benchResultsRepo.name }}
|
|
||||||
# ref: ${{ matrix.benchResultsRepo.branch }}
|
|
||||||
# token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
|
||||||
# path: "./gh-pages"
|
|
||||||
|
|
||||||
# - name: Store ${{ matrix.bench.title }} result
|
- name: Run Luau Analyze on static file
|
||||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
|
||||||
# with:
|
|
||||||
# name: ${{ matrix.bench.title }}
|
|
||||||
# tool: "benchmarkluau"
|
|
||||||
|
|
||||||
# gh-pages-branch: "main"
|
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
||||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
||||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
|
||||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
# - name: Store ${{ matrix.bench.title }} result (CacheGrind)
|
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
||||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
||||||
# with:
|
|
||||||
# name: ${{ matrix.bench.title }}
|
|
||||||
# tool: "roblox"
|
|
||||||
# gh-pages-branch: "main"
|
|
||||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
|
||||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
|
||||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
# - name: Push benchmark results
|
- name: Push static analysis results
|
||||||
# if: github.event_name == 'push'
|
id: pushStaticAnalysisAttempt1
|
||||||
# run: |
|
continue-on-error: true
|
||||||
# echo "Pushing benchmark results..."
|
uses: ./.github/workflows/push-results
|
||||||
# cd gh-pages
|
with:
|
||||||
# git config user.name github-actions
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
# git config user.email github@users.noreply.github.com
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
# git add ./dev/bench/data-${{ matrix.os }}.json
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
# git commit -m "Add benchmarks results for ${{ github.sha }}"
|
path: "./gh-pages"
|
||||||
# git push
|
bench_name: ${{ matrix.bench.title }}
|
||||||
# cd ..
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
|
- name: Push static analysis results (Attempt 2)
|
||||||
|
if: steps.pushStaticAnalysisAttempt1.outcome == 'failure'
|
||||||
|
id: pushStaticAnalysisAttempt2
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
|
- name: Push static analysis results (Attempt 3)
|
||||||
|
if: steps.pushStaticAnalysisAttempt2.outcome == 'failure'
|
||||||
|
id: pushStaticAnalysisAttempt3
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
|
- name: Push static analysis Cachegrind results
|
||||||
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
id: pushStaticAnalysisCachegrindAttempt1
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
|
- name: Push static analysis Cachegrind results (Attempt 2)
|
||||||
|
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt1.outcome == 'failure'
|
||||||
|
id: pushStaticAnalysisCachegrindAttempt2
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
||||||
|
- name: Push static analysis Cachegrind results (Attempt 2)
|
||||||
|
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt2.outcome == 'failure'
|
||||||
|
id: pushStaticAnalysisCachegrindAttempt3
|
||||||
|
continue-on-error: true
|
||||||
|
uses: ./.github/workflows/push-results
|
||||||
|
with:
|
||||||
|
repository: ${{ matrix.benchResultsRepo.name }}
|
||||||
|
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||||
|
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||||
|
path: "./gh-pages"
|
||||||
|
bench_name: ${{ matrix.bench.title }}
|
||||||
|
bench_tool: "roblox"
|
||||||
|
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||||
|
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||||
|
|
Loading…
Add table
Reference in a new issue