mirror of
https://github.com/luau-lang/luau.git
synced 2025-05-04 10:33:46 +01:00
build: added retry attempts to benchmark workflow
This commit is contained in:
parent
eafeb3a340
commit
8d1a6b5427
1 changed files with 292 additions and 170 deletions
462
.github/workflows/benchmark-dev.yml
vendored
462
.github/workflows/benchmark-dev.yml
vendored
|
@ -64,200 +64,322 @@ jobs:
|
|||
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
- name: Push benchmark results
|
||||
if: github.event_name == 'push'
|
||||
uses: nick-fields/retry@v2.7.0
|
||||
id: pushBenchmarkAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
max_attempts: 5
|
||||
retry_on: error
|
||||
retry_wait_seconds: 60
|
||||
timeout_minutes: 5
|
||||
command:
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# unix:
|
||||
# name: ${{matrix.os}}
|
||||
# strategy:
|
||||
# fail-fast: false
|
||||
# matrix:
|
||||
# os: [ubuntu-latest, macos-latest]
|
||||
# bench:
|
||||
# - {
|
||||
# script: "run-benchmarks",
|
||||
# timeout: 12,
|
||||
# title: "Luau Benchmarks",
|
||||
# cachegrindTitle: "Performance",
|
||||
# cachegrindIterCount: 20,
|
||||
# }
|
||||
# benchResultsRepo:
|
||||
# - { name: "luau-lang/benchmark-data", branch: "main" }
|
||||
- name: Push benchmark results (Attempt 2)
|
||||
id: pushBenchmarkAttempt2
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# runs-on: ${{ matrix.os }}
|
||||
# steps:
|
||||
# - name: Checkout Luau repository
|
||||
# uses: actions/checkout@v3
|
||||
- name: Push benchmark results (Attempt 3)
|
||||
id: pushBenchmarkAttempt3
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# - name: Build Luau
|
||||
# run: make config=release luau luau-analyze
|
||||
unix:
|
||||
name: ${{matrix.os}}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
bench:
|
||||
- {
|
||||
script: "run-benchmarks",
|
||||
timeout: 12,
|
||||
title: "Luau Benchmarks",
|
||||
cachegrindTitle: "Performance",
|
||||
cachegrindIterCount: 20,
|
||||
}
|
||||
benchResultsRepo:
|
||||
- { name: "luau-lang/benchmark-data", branch: "main" }
|
||||
|
||||
# - uses: actions/setup-python@v3
|
||||
# with:
|
||||
# python-version: "3.9"
|
||||
# architecture: "x64"
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout Luau repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# - name: Install python dependencies
|
||||
# run: |
|
||||
# python -m pip install requests
|
||||
# python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||
- name: Build Luau
|
||||
run: make config=release luau luau-analyze
|
||||
|
||||
# - name: Run benchmark
|
||||
# run: |
|
||||
# python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||
- uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: "3.9"
|
||||
architecture: "x64"
|
||||
|
||||
# - name: Install valgrind
|
||||
# if: matrix.os == 'ubuntu-latest'
|
||||
# run: |
|
||||
# sudo apt-get install valgrind
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install requests
|
||||
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||
|
||||
# - name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
||||
# if: matrix.os == 'ubuntu-latest'
|
||||
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
# - name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
||||
# if: matrix.os == 'ubuntu-latest'
|
||||
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
- name: Install valgrind
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install valgrind
|
||||
|
||||
# - name: Checkout Benchmark Results repository
|
||||
# uses: actions/checkout@v3
|
||||
# with:
|
||||
# repository: ${{ matrix.benchResultsRepo.name }}
|
||||
# ref: ${{ matrix.benchResultsRepo.branch }}
|
||||
# token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
# path: "./gh-pages"
|
||||
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
# - name: Store ${{ matrix.bench.title }} result
|
||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
||||
# with:
|
||||
# name: ${{ matrix.bench.title }}
|
||||
# tool: "benchmarkluau"
|
||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
# - name: Store ${{ matrix.bench.title }} result (CacheGrind)
|
||||
# if: matrix.os == 'ubuntu-latest'
|
||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
||||
# with:
|
||||
# name: ${{ matrix.bench.title }} (CacheGrind)
|
||||
# tool: "roblox"
|
||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
- name: Push benchmark results
|
||||
id: pushBenchmarkAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# - name: Push benchmark results
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# echo "Pushing benchmark results..."
|
||||
# cd gh-pages
|
||||
# git config user.name github-actions
|
||||
# git config user.email github@users.noreply.github.com
|
||||
# git add ./dev/bench/data-${{ matrix.os }}.json
|
||||
# git commit -m "Add benchmarks results for ${{ github.sha }}"
|
||||
# git push
|
||||
# cd ..
|
||||
- name: Push benchmark results (Attempt 2)
|
||||
id: pushBenchmarkAttempt2
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# static-analysis:
|
||||
# name: luau-analyze
|
||||
# strategy:
|
||||
# fail-fast: false
|
||||
# matrix:
|
||||
# os: [ubuntu-latest]
|
||||
# bench:
|
||||
# - {
|
||||
# script: "run-analyze",
|
||||
# timeout: 12,
|
||||
# title: "Luau Analyze",
|
||||
# cachegrindTitle: "Performance",
|
||||
# cachegrindIterCount: 20,
|
||||
# }
|
||||
# benchResultsRepo:
|
||||
# - { name: "luau-lang/benchmark-data", branch: "main" }
|
||||
# runs-on: ${{ matrix.os }}
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# with:
|
||||
# token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
|
||||
- name: Push benchmark results (Attempt 3)
|
||||
id: pushBenchmarkAttempt3
|
||||
continue-on-error: true
|
||||
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "benchmarkluau"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# - name: Build Luau
|
||||
# run: make config=release luau luau-analyze
|
||||
- name: Push Cachegrind benchmark results
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
id: pushBenchmarkCachegrindAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }} (CacheGrind)
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# - uses: actions/setup-python@v4
|
||||
# with:
|
||||
# python-version: "3.9"
|
||||
# architecture: "x64"
|
||||
- name: Push Cachegrind benchmark results (Attempt 2)
|
||||
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt1.outcome == 'failure'
|
||||
id: pushBenchmarkCachegrindAttempt2
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }} (CacheGrind)
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# - name: Install python dependencies
|
||||
# run: |
|
||||
# sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||
- name: Push Cachegrind benchmark results (Attempt 3)
|
||||
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt2.outcome == 'failure'
|
||||
id: pushBenchmarkCachegrindAttempt3
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }} (CacheGrind)
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
# - name: Install valgrind
|
||||
# run: |
|
||||
# sudo apt-get install valgrind
|
||||
static-analysis:
|
||||
name: luau-analyze
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
bench:
|
||||
- {
|
||||
script: "run-analyze",
|
||||
timeout: 12,
|
||||
title: "Luau Analyze",
|
||||
cachegrindTitle: "Performance",
|
||||
cachegrindIterCount: 20,
|
||||
}
|
||||
benchResultsRepo:
|
||||
- { name: "luau-lang/benchmark-data", branch: "main" }
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
|
||||
|
||||
# - name: Run Luau Analyze on static file
|
||||
# run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
|
||||
- name: Build Luau
|
||||
run: make config=release luau luau-analyze
|
||||
|
||||
# - name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
||||
# run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.9"
|
||||
architecture: "x64"
|
||||
|
||||
# - name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
||||
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
|
||||
|
||||
# - name: Checkout Benchmark Results repository
|
||||
# uses: actions/checkout@v3
|
||||
# with:
|
||||
# repository: ${{ matrix.benchResultsRepo.name }}
|
||||
# ref: ${{ matrix.benchResultsRepo.branch }}
|
||||
# token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
# path: "./gh-pages"
|
||||
- name: Install valgrind
|
||||
run: |
|
||||
sudo apt-get install valgrind
|
||||
|
||||
# - name: Store ${{ matrix.bench.title }} result
|
||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
||||
# with:
|
||||
# name: ${{ matrix.bench.title }}
|
||||
# tool: "benchmarkluau"
|
||||
- name: Run Luau Analyze on static file
|
||||
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
# gh-pages-branch: "main"
|
||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
|
||||
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
# - name: Store ${{ matrix.bench.title }} result (CacheGrind)
|
||||
# uses: Roblox/rhysd-github-action-benchmark@v-luau
|
||||
# with:
|
||||
# name: ${{ matrix.bench.title }}
|
||||
# tool: "roblox"
|
||||
# gh-pages-branch: "main"
|
||||
# output-file-path: ./${{ matrix.bench.script }}-output.txt
|
||||
# external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
|
||||
# github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
|
||||
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
|
||||
|
||||
# - name: Push benchmark results
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# echo "Pushing benchmark results..."
|
||||
# cd gh-pages
|
||||
# git config user.name github-actions
|
||||
# git config user.email github@users.noreply.github.com
|
||||
# git add ./dev/bench/data-${{ matrix.os }}.json
|
||||
# git commit -m "Add benchmarks results for ${{ github.sha }}"
|
||||
# git push
|
||||
# cd ..
|
||||
- name: Push static analysis results
|
||||
id: pushStaticAnalysisAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push static analysis results (Attempt 2)
|
||||
if: steps.pushStaticAnalysisAttempt1.outcome == 'failure'
|
||||
id: pushStaticAnalysisAttempt2
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push static analysis results (Attempt 3)
|
||||
if: steps.pushStaticAnalysisAttempt2.outcome == 'failure'
|
||||
id: pushStaticAnalysisAttempt3
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push static analysis Cachegrind results
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
id: pushStaticAnalysisCachegrindAttempt1
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push static analysis Cachegrind results (Attempt 2)
|
||||
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt1.outcome == 'failure'
|
||||
id: pushStaticAnalysisCachegrindAttempt2
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
||||
- name: Push static analysis Cachegrind results (Attempt 2)
|
||||
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt2.outcome == 'failure'
|
||||
id: pushStaticAnalysisCachegrindAttempt3
|
||||
continue-on-error: true
|
||||
uses: ./.github/workflows/push-results
|
||||
with:
|
||||
repository: ${{ matrix.benchResultsRepo.name }}
|
||||
branch: ${{ matrix.benchResultsRepo.branch }}
|
||||
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
|
||||
path: "./gh-pages"
|
||||
bench_name: ${{ matrix.bench.title }}
|
||||
bench_tool: "roblox"
|
||||
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
|
||||
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
|
||||
|
|
Loading…
Add table
Reference in a new issue