|
11 | 11 | iterations:
|
12 | 12 | description: 'number of iterations in the benchmark'
|
13 | 13 | type: number
|
14 |
| - default: 3 |
| 14 | + default: 10 |
15 | 15 | required: false
|
16 | 16 | iteration-time:
|
17 | 17 | description: 'duration of individual integration in benchmark'
|
|
25 | 25 | required: false
|
26 | 26 | schedule:
|
27 | 27 | - cron: "0 2 * * 1"
|
| 28 | + push: |
| 29 | + branches: |
| 30 | + - main |
| 31 | + pull_request: |
| 32 | + |
| 33 | +env: |
| 34 | + REPORT_FORMAT: ${{ (github.event_name == 'push' || github.event_name == 'pull_request' ) && 'json' || 'csv' }} |
| 35 | + |
| 36 | +concurrency: |
| 37 | + cancel-in-progress: true |
| 38 | + group: bench-${{ github.event_name }}-${{ github.event.pull_request.number || github.event.after }} |
28 | 39 |
|
29 | 40 | jobs:
|
30 |
| - check-linux: |
31 |
| - uses: ./.github/workflows/platform-benchmark.yml |
32 |
| - with: |
33 |
| - run-on: ubuntu-latest |
34 |
| - warmups: ${{ inputs.warmups }} |
35 |
| - iterations: ${{ inputs.iterations }} |
36 |
| - iteration-time: ${{ inputs.iteration-time }} |
37 |
| - iteration-time-unit: ${{ inputs.iteration-time-unit }} |
38 |
| - check-macos: |
39 |
| - uses: ./.github/workflows/platform-benchmark.yml |
40 |
| - with: |
41 |
| - run-on: macos-latest |
42 |
| - additional-task: "-x :benchmark:jvmBenchmark" |
43 |
| - warmups: ${{ inputs.warmups }} |
44 |
| - iterations: ${{ inputs.iterations }} |
45 |
| - iteration-time: ${{ inputs.iteration-time }} |
46 |
| - iteration-time-unit: ${{ inputs.iteration-time-unit }} |
47 |
| - check-windows: |
48 |
| - uses: ./.github/workflows/platform-benchmark.yml |
49 |
| - with: |
50 |
| - run-on: windows-latest |
51 |
| - additional-task: "-x :benchmark:jvmBenchmark" |
52 |
| - warmups: ${{ inputs.warmups }} |
53 |
| - iterations: ${{ inputs.iterations }} |
54 |
| - iteration-time: ${{ inputs.iteration-time }} |
55 |
| - iteration-time-unit: ${{ inputs.iteration-time-unit }} |
| 41 | + benchmark-matrix: |
| 42 | + strategy: |
| 43 | + matrix: |
| 44 | + include: |
| 45 | + # - os: ubuntu-latest |
| 46 | + # additional-task: '' |
| 47 | + - os: macos-latest |
| 48 | + additional-task: '-x :benchmark:jvmBenchmark' |
| 49 | + - os: macos-13 # for macosX64 |
| 50 | + additional-task: '-x :benchmark:jvmBenchmark' |
| 51 | + - os: windows-latest |
| 52 | + additional-task: '-x :benchmark:jvmBenchmark' |
| 53 | + runs-on: ${{ matrix.os }} |
| 54 | + name: Run benchmarks on ${{ matrix.os }} |
| 55 | + steps: |
| 56 | + - name: 'Install native dependencies' |
| 57 | + run: sudo apt-get install -y libunistring-dev |
| 58 | + if: matrix.os == 'ubuntu-latest' |
| 59 | + - name: 'Checkout Repository' |
| 60 | + uses: actions/checkout@v4 |
| 61 | + - uses: actions/setup-java@v4 |
| 62 | + with: |
| 63 | + distribution: temurin |
| 64 | + java-version-file: .java-version |
| 65 | + - uses: actions/setup-python@v5 |
| 66 | + with: |
| 67 | + python-version-file: .python-version |
| 68 | + - name: Validate Gradle Wrapper |
| 69 | + uses: gradle/actions/wrapper-validation@v3 |
| 70 | + - name: Cache konan |
| 71 | + uses: actions/cache@v4 |
| 72 | + with: |
| 73 | + path: ~/.konan |
| 74 | + key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }} |
| 75 | + restore-keys: | |
| 76 | + ${{ runner.os }}-gradle- |
| 77 | + - name: Cache unicode data |
| 78 | + uses: actions/cache@v4 |
| 79 | + with: |
| 80 | + path: unicode_dump |
| 81 | + key: unicode-dump-${{ hashFiles('unicode_dump/*') }} |
| 82 | + restore-keys: | |
| 83 | + unicode-dump- |
| 84 | + - name: Setup Gradle |
| 85 | + uses: gradle/actions/setup-gradle@v3 |
| 86 | + with: |
| 87 | + gradle-version: wrapper |
| 88 | + - name: Run benchmarks |
| 89 | + run: > |
| 90 | + ./gradlew --no-daemon :benchmark:benchmark ${{ matrix.additional-task }} |
| 91 | + -Pbenchmark_warmups=${{ inputs.warmups }} |
| 92 | + -Pbenchmark_iterations=${{ inputs.iterations }} |
| 93 | + -Pbenchmark_iteration_time=${{ inputs.iteration-time }} |
| 94 | + -Pbenchmark_iteration_time_unit=${{ inputs.iteration-time-unit }} |
| 95 | + -Pbenchmark_report_format=${{ env.REPORT_FORMAT }} |
| 96 | + - name: Install CSV to MD converter |
| 97 | + if: env.REPORT_FORMAT == 'csv' |
| 98 | + run: pip install csv2md |
| 99 | + - name: Add benchmark results to summary |
| 100 | + shell: bash |
| 101 | + if: env.REPORT_FORMAT == 'csv' |
| 102 | + run: | |
| 103 | + for report in $(find ./benchmark/build/reports/benchmarks/main -type f -name "*.csv") |
| 104 | + do |
| 105 | + file_name=$(basename "$report") |
| 106 | + platform="${file_name%.*}" |
| 107 | + echo "File $file_name" |
| 108 | + # remove empty lines |
| 109 | + sed -i -e '/^[[:space:]]*$/d' $report |
| 110 | + echo "::group::Report CSV" |
| 111 | + cat "$report" |
| 112 | + echo "::endgroup::" |
| 113 | + markdown_table=$(csv2md "$report") |
| 114 | + echo "::group::Report Markdown" |
| 115 | + echo "$markdown_table" |
| 116 | + echo "::endgroup::" |
| 117 | + echo "# Platform ${platform}" >> $GITHUB_STEP_SUMMARY |
| 118 | + echo "$markdown_table" >> $GITHUB_STEP_SUMMARY |
| 119 | + done |
| 120 | + - name: Store results as artifact |
| 121 | + if: env.REPORT_FORMAT == 'json' |
| 122 | + uses: actions/upload-artifact@v4 |
| 123 | + with: |
| 124 | + name: bench-result-${{ matrix.os }} |
| 125 | + path: benchmark/build/reports/benchmarks/main/**/*.json |
| 126 | + |
| 127 | + upload-benchmark-results: |
| 128 | + if: (github.event_name == 'push' || github.event_name == 'pull_request') && github.repository == 'OptimumCode/json-schema-validator' |
| 129 | + needs: |
| 130 | + - benchmark-matrix |
| 131 | + runs-on: ubuntu-latest |
| 132 | + env: |
| 133 | + RESULTS_DIR: bench-results |
| 134 | + permissions: |
| 135 | + # deployments permission to deploy GitHub pages website |
| 136 | + deployments: write |
| 137 | + # contents permission to update benchmark contents in gh-pages branch |
| 138 | + contents: write |
| 139 | + # pull-requests permission to create comments on PR in case of alert |
| 140 | + pull-requests: write |
| 141 | + steps: |
| 142 | + - name: 'Checkout Repository' |
| 143 | + uses: actions/checkout@v4 |
| 144 | + - name: Download benchmark results |
| 145 | + uses: actions/download-artifact@v4 |
| 146 | + with: |
| 147 | + path: ${{ env.RESULTS_DIR }} |
| 148 | + merge-multiple: true |
| 149 | + - name: Show downloaded artifacts |
| 150 | + run: tree ${{ env.RESULTS_DIR }} |
| 151 | + - name: Prepare and join benchmark reports |
| 152 | + id: prep |
| 153 | + run: | |
| 154 | + for report in $(find ./${{ env.RESULTS_DIR }} -type f -name "*.json") |
| 155 | + do |
| 156 | + file_name=$(basename "$report") |
| 157 | + platform="${file_name%.*}" |
| 158 | + jq "[ .[] | .benchmark |= \"${platform}.\" + ltrimstr(\"io.github.optimumcode.json.schema.benchmark.\") ]" $report > ${{ env.RESULTS_DIR }}/$platform.json |
| 159 | + done |
| 160 | + AGGREGATED_REPORT=aggregated.json |
| 161 | + # Joined reports looks like this: [[{},{}], [{},{}]] |
| 162 | + # We need to transform them into this: [{},{}] |
| 163 | + ls ${{ env.RESULTS_DIR }}/*.json |
| 164 | + jq -s '[ .[] | .[] ]' ${{ env.RESULTS_DIR }}/*.json > $AGGREGATED_REPORT |
| 165 | + echo "report=$AGGREGATED_REPORT" >> $GITHUB_OUTPUT |
| 166 | + - name: Store benchmark result |
| 167 | + uses: benchmark-action/github-action-benchmark@v1 |
| 168 | + with: |
| 169 | + name: KMP JSON schema validator |
| 170 | + tool: 'jmh' |
| 171 | + output-file-path: ${{ steps.prep.outputs.report }} |
| 172 | + alert-comment-cc-users: "@OptimumCode" |
| 173 | + comment-on-alert: true |
| 174 | + summary-always: true |
| 175 | + alert-threshold: '50%' |
| 176 | + fail-threshold: '100%' |
| 177 | + github-token: ${{ secrets.GITHUB_TOKEN }} |
| 178 | + # Push and deploy GitHub pages branch automatically only if run in main repo and not in PR |
| 179 | + auto-push: ${{ github.event_name != 'pull_request' }} |
0 commit comments