From 92788a19d337a900fad8062ebb5415c233b88cbc Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:21:43 +0100
Subject: [PATCH 01/12] ci: Add performance impact step to CI (#9916)
---
.github/workflows/ci-performance.yml | 291 ++++++++++++++++++++++
CONTRIBUTING.md | 58 ++++-
benchmark/performance.js | 354 +++++++++++++++++++++++++++
package.json | 5 +-
4 files changed, 706 insertions(+), 2 deletions(-)
create mode 100644 .github/workflows/ci-performance.yml
create mode 100644 benchmark/performance.js
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
new file mode 100644
index 0000000000..4cde4d97b0
--- /dev/null
+++ b/.github/workflows/ci-performance.yml
@@ -0,0 +1,291 @@
+name: ci-performance
+on:
+ pull_request:
+ branches:
+ - alpha
+ - beta
+ - release
+ - 'release-[0-9]+.x.x'
+ - next-major
+ paths-ignore:
+ - '**.md'
+ - 'docs/**'
+
+env:
+ NODE_VERSION: 24.11.0
+ MONGODB_VERSION: 8.0.4
+
+permissions:
+ contents: read
+ pull-requests: write
+ issues: write
+
+jobs:
+ performance-check:
+ name: Benchmarks
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+
+ steps:
+ - name: Checkout base branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.base_ref }}
+ fetch-depth: 1
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ cache: 'npm'
+
+ - name: Install dependencies (base)
+ run: npm ci
+
+ - name: Build Parse Server (base)
+ run: npm run build
+
+ - name: Run baseline benchmarks
+ id: baseline
+ run: |
+ echo "Checking if benchmark script exists..."
+ if [ ! -f "benchmark/performance.js" ]; then
+ echo "⚠️ Benchmark script not found in base branch - this is expected for new features"
+ echo "Skipping baseline benchmark"
+ echo '[]' > baseline.json
+ echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt
+ exit 0
+ fi
+ echo "Running baseline benchmarks..."
+ npm run benchmark > baseline-output.txt 2>&1 || true
+ echo "Benchmark command completed with exit code: $?"
+ echo "Output file size: $(wc -c < baseline-output.txt) bytes"
+ echo "--- Begin baseline-output.txt ---"
+ cat baseline-output.txt
+ echo "--- End baseline-output.txt ---"
+ # Extract JSON from output (everything between first [ and last ])
+ sed -n '/^\[/,/^\]/p' baseline-output.txt > baseline.json || echo '[]' > baseline.json
+ echo "Extracted JSON size: $(wc -c < baseline.json) bytes"
+ echo "Baseline benchmark results:"
+ cat baseline.json
+ continue-on-error: true
+
+ - name: Upload baseline results
+ uses: actions/upload-artifact@v4
+ with:
+ name: baseline-benchmark
+ path: |
+ baseline.json
+ baseline-output.txt
+ retention-days: 7
+
+ - name: Checkout PR branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ fetch-depth: 1
+ clean: true
+
+ - name: Setup Node.js (PR)
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ cache: 'npm'
+
+ - name: Install dependencies (PR)
+ run: npm ci
+
+ - name: Build Parse Server (PR)
+ run: npm run build
+
+ - name: Run PR benchmarks
+ id: pr-bench
+ run: |
+ echo "Running PR benchmarks..."
+ npm run benchmark > pr-output.txt 2>&1 || true
+ echo "Benchmark command completed with exit code: $?"
+ echo "Output file size: $(wc -c < pr-output.txt) bytes"
+ echo "--- Begin pr-output.txt ---"
+ cat pr-output.txt
+ echo "--- End pr-output.txt ---"
+ # Extract JSON from output (everything between first [ and last ])
+ sed -n '/^\[/,/^\]/p' pr-output.txt > pr.json || echo '[]' > pr.json
+ echo "Extracted JSON size: $(wc -c < pr.json) bytes"
+ echo "PR benchmark results:"
+ cat pr.json
+ continue-on-error: true
+
+ - name: Upload PR results
+ uses: actions/upload-artifact@v4
+ with:
+ name: pr-benchmark
+ path: |
+ pr.json
+ pr-output.txt
+ retention-days: 7
+
+ - name: Verify benchmark files exist
+ run: |
+ echo "Checking for benchmark result files..."
+ if [ ! -f baseline.json ] || [ ! -s baseline.json ]; then
+ echo "⚠️ baseline.json is missing or empty, creating empty array"
+ echo '[]' > baseline.json
+ fi
+ if [ ! -f pr.json ] || [ ! -s pr.json ]; then
+ echo "⚠️ pr.json is missing or empty, creating empty array"
+ echo '[]' > pr.json
+ fi
+ echo "baseline.json size: $(wc -c < baseline.json) bytes"
+ echo "pr.json size: $(wc -c < pr.json) bytes"
+
+ - name: Store benchmark result (PR)
+ uses: benchmark-action/github-action-benchmark@v1
+ if: github.event_name == 'pull_request' && hashFiles('pr.json') != ''
+ continue-on-error: true
+ with:
+ name: Parse Server Performance
+ tool: 'customSmallerIsBetter'
+ output-file-path: pr.json
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: false
+ save-data-file: false
+ alert-threshold: '110%'
+ comment-on-alert: true
+ fail-on-alert: false
+ alert-comment-cc-users: '@parse-community/maintainers'
+ summary-always: true
+
+ - name: Compare benchmark results
+ id: compare
+ run: |
+ node -e "
+ const fs = require('fs');
+
+ let baseline, pr;
+ try {
+ baseline = JSON.parse(fs.readFileSync('baseline.json', 'utf8'));
+ pr = JSON.parse(fs.readFileSync('pr.json', 'utf8'));
+ } catch (e) {
+ console.log('⚠️ Could not parse benchmark results');
+ process.exit(0);
+ }
+
+ // Handle case where baseline doesn't exist (new feature)
+ if (!Array.isArray(baseline) || baseline.length === 0) {
+ if (!Array.isArray(pr) || pr.length === 0) {
+ console.log('⚠️ Benchmark results are empty or invalid');
+ process.exit(0);
+ }
+ console.log('# Performance Benchmark Results\n');
+ console.log('> ℹ️ Baseline not available - this appears to be a new feature\n');
+ console.log('| Benchmark | Value | Details |');
+ console.log('|-----------|-------|---------|');
+ pr.forEach(result => {
+ console.log(\`| \${result.name} | \${result.value.toFixed(2)} ms | \${result.extra} |\`);
+ });
+ console.log('');
+ console.log('✅ **New benchmarks established for this feature.**');
+ process.exit(0);
+ }
+
+ if (!Array.isArray(pr) || pr.length === 0) {
+ console.log('⚠️ PR benchmark results are empty or invalid');
+ process.exit(0);
+ }
+
+ console.log('# Performance Comparison\n');
+ console.log('| Benchmark | Baseline | PR | Change | Status |');
+ console.log('|-----------|----------|----|---------| ------ |');
+
+ let hasRegression = false;
+ let hasImprovement = false;
+
+ baseline.forEach(baseResult => {
+ const prResult = pr.find(p => p.name === baseResult.name);
+ if (!prResult) {
+ console.log(\`| \${baseResult.name} | \${baseResult.value.toFixed(2)} ms | N/A | - | ⚠️ Missing |\`);
+ return;
+ }
+
+ const baseValue = parseFloat(baseResult.value);
+ const prValue = parseFloat(prResult.value);
+ const change = ((prValue - baseValue) / baseValue * 100);
+ const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
+
+ let status = '✅';
+ if (change > 20) {
+ status = '❌ Much Slower';
+ hasRegression = true;
+ } else if (change > 10) {
+ status = '⚠️ Slower';
+ hasRegression = true;
+ } else if (change < -10) {
+ status = '🚀 Faster';
+ hasImprovement = true;
+ }
+
+ console.log(\`| \${baseResult.name} | \${baseValue.toFixed(2)} ms | \${prValue.toFixed(2)} ms | \${changeStr} | \${status} |\`);
+ });
+
+ console.log('');
+ if (hasRegression) {
+ console.log('⚠️ **Performance regressions detected.** Please review the changes.');
+ } else if (hasImprovement) {
+ console.log('🚀 **Performance improvements detected!** Great work!');
+ } else {
+ console.log('✅ **No significant performance changes.**');
+ }
+ " | tee comparison.md
+
+ - name: Upload comparison
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-comparison
+ path: comparison.md
+ retention-days: 30
+
+ - name: Prepare comment body
+ if: github.event_name == 'pull_request'
+ run: |
+ echo "## Performance Impact Report" > comment.md
+ echo "" >> comment.md
+ if [ -f comparison.md ]; then
+ cat comparison.md >> comment.md
+ else
+ echo "⚠️ Could not generate performance comparison." >> comment.md
+ fi
+ echo "" >> comment.md
+ echo "" >> comment.md
+ echo "📊 View detailed results
" >> comment.md
+ echo "" >> comment.md
+ echo "### Baseline Results" >> comment.md
+ echo "\`\`\`json" >> comment.md
+ cat baseline.json >> comment.md
+ echo "\`\`\`" >> comment.md
+ echo "" >> comment.md
+ echo "### PR Results" >> comment.md
+ echo "\`\`\`json" >> comment.md
+ cat pr.json >> comment.md
+ echo "\`\`\`" >> comment.md
+ echo "" >> comment.md
+ echo " " >> comment.md
+ echo "" >> comment.md
+ echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md
+
+ - name: Comment PR with results
+ if: github.event_name == 'pull_request'
+ uses: thollander/actions-comment-pull-request@v2
+ continue-on-error: true
+ with:
+ filePath: comment.md
+ comment_tag: performance-benchmark
+ mode: recreate
+
+ - name: Generate job summary
+ if: always()
+ run: |
+ if [ -f comparison.md ]; then
+ cat comparison.md >> $GITHUB_STEP_SUMMARY
+ else
+ echo "⚠️ Benchmark comparison not available" >> $GITHUB_STEP_SUMMARY
+ fi
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 01c88df10c..f79caa4236 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -21,9 +21,13 @@
- [Good to Know](#good-to-know)
- [Troubleshooting](#troubleshooting)
- [Please Do's](#please-dos)
- - [TypeScript Tests](#typescript-tests)
+ - [TypeScript Tests](#typescript-tests)
- [Test against Postgres](#test-against-postgres)
- [Postgres with Docker](#postgres-with-docker)
+ - [Performance Testing](#performance-testing)
+ - [Adding Tests](#adding-tests)
+ - [Adding Benchmarks](#adding-benchmarks)
+ - [Benchmark Guidelines](#benchmark-guidelines)
- [Breaking Changes](#breaking-changes)
- [Deprecation Policy](#deprecation-policy)
- [Feature Considerations](#feature-considerations)
@@ -298,6 +302,58 @@ RUN chmod +x /docker-entrypoint-initdb.d/setup-dbs.sh
Note that the script above will ONLY be executed during initialization of the container with no data in the database, see the official [Postgres image](https://hub.docker.com/_/postgres) for details. If you want to use the script to run again be sure there is no data in the /var/lib/postgresql/data of the container.
+### Performance Testing
+
+Parse Server includes an automated performance benchmarking system that runs on every pull request to detect performance regressions and track improvements over time.
+
+#### Adding Tests
+
+You should consider adding performance benchmarks if your contribution:
+
+- **Introduces a performance-critical feature**: Features that will be frequently used in production environments, such as new query operations, authentication methods, or data processing functions.
+- **Modifies existing critical paths**: Changes to core functionality like object CRUD operations, query execution, user authentication, file operations, or Cloud Code execution.
+- **Has potential performance impact**: Any change that affects database operations, network requests, data parsing, caching mechanisms, or algorithmic complexity.
+- **Optimizes performance**: If your PR specifically aims to improve performance, adding benchmarks helps verify the improvement and prevents future regressions.
+
+#### Adding Benchmarks
+
+Performance benchmarks are located in [`benchmark/performance.js`](benchmark/performance.js). To add a new benchmark:
+
+1. **Identify the operation to benchmark**: Determine the specific operation you want to measure (e.g., a new query type, a new API endpoint).
+
+2. **Create a benchmark function**: Follow the existing patterns in `benchmark/performance.js`:
+ ```javascript
+ async function benchmarkNewFeature() {
+ return measureOperation('Feature Name', async () => {
+ // Your operation to benchmark
+ const result = await someOperation();
+ }, ITERATIONS);
+ }
+ ```
+
+3. **Add to benchmark suite**: Register your benchmark in the `runBenchmarks()` function:
+ ```javascript
+ console.error('Running New Feature benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkNewFeature());
+ ```
+
+4. **Test locally**: Run the benchmarks locally to verify they work:
+ ```bash
+ npm run benchmark:quick # Quick test with 10 iterations
+ npm run benchmark # Full test with 100 iterations
+ ```
+
+For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
+
+#### Benchmark Guidelines
+
+- **Keep benchmarks focused**: Each benchmark should test a single, well-defined operation.
+- **Use realistic data**: Test with data that reflects real-world usage patterns.
+- **Clean up between runs**: Use `cleanupDatabase()` to ensure consistent test conditions.
+- **Consider iteration count**: Use fewer iterations for expensive operations (see `ITERATIONS` environment variable).
+- **Document what you're testing**: Add clear comments explaining what the benchmark measures and why it's important.
+
## Breaking Changes
Breaking changes should be avoided whenever possible. For a breaking change to be accepted, the benefits of the change have to clearly outweigh the costs of developers having to adapt their deployments. If a breaking change is only cosmetic it will likely be rejected and preferred to become obsolete organically during the course of further development, unless it is required as part of a larger change. Breaking changes should follow the [Deprecation Policy](#deprecation-policy).
diff --git a/benchmark/performance.js b/benchmark/performance.js
new file mode 100644
index 0000000000..831a57db37
--- /dev/null
+++ b/benchmark/performance.js
@@ -0,0 +1,354 @@
+/**
+ * Performance Benchmark Suite for Parse Server
+ *
+ * This suite measures the performance of critical Parse Server operations
+ * using the Node.js Performance API. Results are output in a format
+ * compatible with github-action-benchmark.
+ *
+ * Run with: npm run benchmark
+ */
+
+const Parse = require('parse/node');
+const { performance, PerformanceObserver } = require('perf_hooks');
+const { MongoClient } = require('mongodb');
+
+// Configuration
+const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_benchmark_test';
+const SERVER_URL = 'http://localhost:1337/parse';
+const APP_ID = 'benchmark-app-id';
+const MASTER_KEY = 'benchmark-master-key';
+const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100', 10);
+
+// Parse Server instance
+let parseServer;
+let mongoClient;
+
+/**
+ * Initialize Parse Server for benchmarking
+ */
+async function initializeParseServer() {
+ const express = require('express');
+ const { default: ParseServer } = require('../lib/index.js');
+
+ const app = express();
+
+ parseServer = new ParseServer({
+ databaseURI: MONGODB_URI,
+ appId: APP_ID,
+ masterKey: MASTER_KEY,
+ serverURL: SERVER_URL,
+ silent: true,
+ allowClientClassCreation: true,
+ });
+
+ app.use('/parse', parseServer.app);
+
+ return new Promise((resolve, reject) => {
+ const server = app.listen(1337, (err) => {
+ if (err) {
+ reject(new Error(`Failed to start server: ${err.message}`));
+ return;
+ }
+ Parse.initialize(APP_ID);
+ Parse.masterKey = MASTER_KEY;
+ Parse.serverURL = SERVER_URL;
+ resolve(server);
+ });
+
+ server.on('error', (err) => {
+ reject(new Error(`Server error: ${err.message}`));
+ });
+ });
+}
+
+/**
+ * Clean up database between benchmarks
+ */
+async function cleanupDatabase() {
+ try {
+ if (!mongoClient) {
+ mongoClient = await MongoClient.connect(MONGODB_URI);
+ }
+ const db = mongoClient.db();
+ const collections = await db.listCollections().toArray();
+
+ for (const collection of collections) {
+ if (!collection.name.startsWith('system.')) {
+ await db.collection(collection.name).deleteMany({});
+ }
+ }
+ } catch (error) {
+ throw new Error(`Failed to cleanup database: ${error.message}`);
+ }
+}
+
+/**
+ * Measure average time for an async operation over multiple iterations
+ */
+async function measureOperation(name, operation, iterations = ITERATIONS) {
+ const times = [];
+
+ for (let i = 0; i < iterations; i++) {
+ const start = performance.now();
+ await operation();
+ const end = performance.now();
+ times.push(end - start);
+ }
+
+ // Calculate statistics
+ times.sort((a, b) => a - b);
+ const sum = times.reduce((acc, val) => acc + val, 0);
+ const mean = sum / times.length;
+ const p50 = times[Math.floor(times.length * 0.5)];
+ const p95 = times[Math.floor(times.length * 0.95)];
+ const p99 = times[Math.floor(times.length * 0.99)];
+ const min = times[0];
+ const max = times[times.length - 1];
+
+ return {
+ name,
+ value: mean,
+ unit: 'ms',
+ range: `${min.toFixed(2)} - ${max.toFixed(2)}`,
+ extra: `p50: ${p50.toFixed(2)}ms, p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms`,
+ };
+}
+
+/**
+ * Benchmark: Object Create
+ */
+async function benchmarkObjectCreate() {
+ let counter = 0;
+
+ return measureOperation('Object Create', async () => {
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const obj = new TestObject();
+ obj.set('testField', `test-value-${counter++}`);
+ obj.set('number', counter);
+ obj.set('boolean', true);
+ await obj.save();
+ });
+}
+
+/**
+ * Benchmark: Object Read (by ID)
+ */
+async function benchmarkObjectRead() {
+ // Setup: Create test objects
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < ITERATIONS; i++) {
+ const obj = new TestObject();
+ obj.set('testField', `read-test-${i}`);
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+
+ let counter = 0;
+
+ return measureOperation('Object Read', async () => {
+ const query = new Parse.Query('BenchmarkTest');
+ await query.get(objects[counter++ % objects.length].id);
+ });
+}
+
+/**
+ * Benchmark: Object Update
+ */
+async function benchmarkObjectUpdate() {
+ // Setup: Create test objects
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < ITERATIONS; i++) {
+ const obj = new TestObject();
+ obj.set('testField', `update-test-${i}`);
+ obj.set('counter', 0);
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+
+ let counter = 0;
+
+ return measureOperation('Object Update', async () => {
+ const obj = objects[counter++ % objects.length];
+ obj.increment('counter');
+ obj.set('lastUpdated', new Date());
+ await obj.save();
+ });
+}
+
+/**
+ * Benchmark: Simple Query
+ */
+async function benchmarkSimpleQuery() {
+ // Setup: Create test data
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < 100; i++) {
+ const obj = new TestObject();
+ obj.set('category', i % 10);
+ obj.set('value', i);
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+
+ let counter = 0;
+
+ return measureOperation('Simple Query', async () => {
+ const query = new Parse.Query('BenchmarkTest');
+ query.equalTo('category', counter++ % 10);
+ await query.find();
+ });
+}
+
+/**
+ * Benchmark: Batch Save (saveAll)
+ */
+async function benchmarkBatchSave() {
+ const BATCH_SIZE = 10;
+
+ return measureOperation('Batch Save (10 objects)', async () => {
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < BATCH_SIZE; i++) {
+ const obj = new TestObject();
+ obj.set('batchField', `batch-${i}`);
+ obj.set('timestamp', new Date());
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+ }, Math.floor(ITERATIONS / BATCH_SIZE)); // Fewer iterations for batch operations
+}
+
+/**
+ * Benchmark: User Signup
+ */
+async function benchmarkUserSignup() {
+ let counter = 0;
+
+ return measureOperation('User Signup', async () => {
+ counter++;
+ const user = new Parse.User();
+ user.set('username', `benchmark_user_${Date.now()}_${counter}`);
+ user.set('password', 'benchmark_password');
+ user.set('email', `benchmark${counter}@example.com`);
+ await user.signUp();
+ }, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
+}
+
+/**
+ * Benchmark: User Login
+ */
+async function benchmarkUserLogin() {
+ // Setup: Create test users
+ const users = [];
+
+ for (let i = 0; i < 10; i++) {
+ const user = new Parse.User();
+ user.set('username', `benchmark_login_user_${i}`);
+ user.set('password', 'benchmark_password');
+ user.set('email', `login${i}@example.com`);
+ await user.signUp();
+ users.push({ username: user.get('username'), password: 'benchmark_password' });
+ await Parse.User.logOut();
+ }
+
+ let counter = 0;
+
+ return measureOperation('User Login', async () => {
+ const userCreds = users[counter++ % users.length];
+ await Parse.User.logIn(userCreds.username, userCreds.password);
+ await Parse.User.logOut();
+ }, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
+}
+
+/**
+ * Run all benchmarks
+ */
+async function runBenchmarks() {
+ console.error('Starting Parse Server Performance Benchmarks...');
+ console.error(`Iterations per benchmark: ${ITERATIONS}`);
+ console.error('');
+
+ let server;
+
+ try {
+ // Initialize Parse Server
+ console.error('Initializing Parse Server...');
+ server = await initializeParseServer();
+
+ // Wait for server to be ready
+ await new Promise(resolve => setTimeout(resolve, 2000));
+
+ const results = [];
+
+ // Run each benchmark with database cleanup
+ console.error('Running Object Create benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkObjectCreate());
+
+ console.error('Running Object Read benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkObjectRead());
+
+ console.error('Running Object Update benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkObjectUpdate());
+
+ console.error('Running Simple Query benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkSimpleQuery());
+
+ console.error('Running Batch Save benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkBatchSave());
+
+ console.error('Running User Signup benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkUserSignup());
+
+ console.error('Running User Login benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkUserLogin());
+
+ // Output results in github-action-benchmark format
+ console.log(JSON.stringify(results, null, 2));
+
+ console.error('');
+ console.error('Benchmarks completed successfully!');
+ console.error('');
+ console.error('Summary:');
+ results.forEach(result => {
+ console.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
+ });
+
+ } catch (error) {
+ console.error('Error running benchmarks:', error);
+ process.exit(1);
+ } finally {
+ // Cleanup
+ if (mongoClient) {
+ await mongoClient.close();
+ }
+ if (server) {
+ server.close();
+ }
+ // Give some time for cleanup
+ setTimeout(() => process.exit(0), 1000);
+ }
+}
+
+// Run benchmarks if executed directly
+if (require.main === module) {
+ runBenchmarks();
+}
+
+module.exports = { runBenchmarks };
diff --git a/package.json b/package.json
index add0d15339..fe043e8ee4 100644
--- a/package.json
+++ b/package.json
@@ -138,7 +138,10 @@
"prettier": "prettier --write {src,spec}/{**/*,*}.js",
"prepare": "npm run build",
"postinstall": "node -p 'require(\"./postinstall.js\")()'",
- "madge:circular": "node_modules/.bin/madge ./src --circular"
+ "madge:circular": "node_modules/.bin/madge ./src --circular",
+ "benchmark": "cross-env MONGODB_VERSION=8.0.4 MONGODB_TOPOLOGY=standalone mongodb-runner exec -t standalone --version 8.0.4 -- --port 27017 -- npm run benchmark:only",
+ "benchmark:only": "node benchmark/performance.js",
+ "benchmark:quick": "cross-env BENCHMARK_ITERATIONS=10 npm run benchmark:only"
},
"types": "types/index.d.ts",
"engines": {
From 06f25ff31a5cc66f9cf3921f1d18de90228cdec1 Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:28:07 +0100
Subject: [PATCH 02/12] ci: Fix performance step in CI missing permissions
(#9917)
---
.github/workflows/ci-performance.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 4cde4d97b0..c65dd23dd2 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -1,6 +1,6 @@
name: ci-performance
on:
- pull_request:
+ pull_request_target:
branches:
- alpha
- beta
From a85ba199be1d42ce77e9ef6165cc367286fc8c9a Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:34:07 +0100
Subject: [PATCH 03/12] ci: Fix performance step in CI missing permissions
(#9918)
---
.github/workflows/ci-performance.yml | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index c65dd23dd2..4ae8c80d67 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -70,13 +70,19 @@ jobs:
cat baseline.json
continue-on-error: true
+ - name: Save baseline results to temp location
+ run: |
+ mkdir -p /tmp/benchmark-results
+ cp baseline.json /tmp/benchmark-results/ || echo '[]' > /tmp/benchmark-results/baseline.json
+ cp baseline-output.txt /tmp/benchmark-results/ || echo 'No baseline output' > /tmp/benchmark-results/baseline-output.txt
+
- name: Upload baseline results
uses: actions/upload-artifact@v4
with:
name: baseline-benchmark
path: |
- baseline.json
- baseline-output.txt
+ /tmp/benchmark-results/baseline.json
+ /tmp/benchmark-results/baseline-output.txt
retention-days: 7
- name: Checkout PR branch
@@ -86,6 +92,11 @@ jobs:
fetch-depth: 1
clean: true
+ - name: Restore baseline results
+ run: |
+ cp /tmp/benchmark-results/baseline.json ./ || echo '[]' > baseline.json
+ cp /tmp/benchmark-results/baseline-output.txt ./ || echo 'No baseline output' > baseline-output.txt
+
- name: Setup Node.js (PR)
uses: actions/setup-node@v4
with:
From b73ebac5c9b6edc375d19c48eebfb836a0a6057f Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sun, 9 Nov 2025 02:02:17 +0100
Subject: [PATCH 04/12] ci: Fix performance step in CI (#9921)
---
.github/workflows/ci-performance.yml | 47 +++++++++++++----
CONTRIBUTING.md | 2 +-
benchmark/performance.js | 79 ++++++++++++++++++----------
3 files changed, 87 insertions(+), 41 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 4ae8c80d67..3fd2200d02 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -27,11 +27,31 @@ jobs:
timeout-minutes: 30
steps:
+ - name: Checkout PR branch (for benchmark script)
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ fetch-depth: 1
+
+ - name: Save PR benchmark script
+ run: |
+ mkdir -p /tmp/pr-benchmark
+ cp -r benchmark /tmp/pr-benchmark/ || echo "No benchmark directory"
+ cp package.json /tmp/pr-benchmark/ || true
+
- name: Checkout base branch
uses: actions/checkout@v4
with:
ref: ${{ github.base_ref }}
fetch-depth: 1
+ clean: true
+
+ - name: Restore PR benchmark script
+ run: |
+ if [ -d "/tmp/pr-benchmark/benchmark" ]; then
+ rm -rf benchmark
+ cp -r /tmp/pr-benchmark/benchmark .
+ fi
- name: Setup Node.js
uses: actions/setup-node@v4
@@ -47,17 +67,18 @@ jobs:
- name: Run baseline benchmarks
id: baseline
+ env:
+ NODE_ENV: production
run: |
- echo "Checking if benchmark script exists..."
+ echo "Running baseline benchmarks with CPU affinity (using PR's benchmark script)..."
if [ ! -f "benchmark/performance.js" ]; then
- echo "⚠️ Benchmark script not found in base branch - this is expected for new features"
+ echo "⚠️ Benchmark script not found - this is expected for new features"
echo "Skipping baseline benchmark"
echo '[]' > baseline.json
- echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt
+ echo "Baseline: N/A (no benchmark script)" > baseline-output.txt
exit 0
fi
- echo "Running baseline benchmarks..."
- npm run benchmark > baseline-output.txt 2>&1 || true
+ taskset -c 0 npm run benchmark > baseline-output.txt 2>&1 || npm run benchmark > baseline-output.txt 2>&1 || true
echo "Benchmark command completed with exit code: $?"
echo "Output file size: $(wc -c < baseline-output.txt) bytes"
echo "--- Begin baseline-output.txt ---"
@@ -111,9 +132,11 @@ jobs:
- name: Run PR benchmarks
id: pr-bench
+ env:
+ NODE_ENV: production
run: |
- echo "Running PR benchmarks..."
- npm run benchmark > pr-output.txt 2>&1 || true
+ echo "Running PR benchmarks with CPU affinity..."
+ taskset -c 0 npm run benchmark > pr-output.txt 2>&1 || npm run benchmark > pr-output.txt 2>&1 || true
echo "Benchmark command completed with exit code: $?"
echo "Output file size: $(wc -c < pr-output.txt) bytes"
echo "--- Begin pr-output.txt ---"
@@ -224,13 +247,13 @@ jobs:
const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
let status = '✅';
- if (change > 20) {
+ if (change > 100) {
status = '❌ Much Slower';
hasRegression = true;
- } else if (change > 10) {
+ } else if (change > 50) {
status = '⚠️ Slower';
hasRegression = true;
- } else if (change < -10) {
+ } else if (change < -50) {
status = '🚀 Faster';
hasImprovement = true;
}
@@ -281,7 +304,9 @@ jobs:
echo "" >> comment.md
echo "" >> comment.md
echo "" >> comment.md
- echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md
+ echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-10000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md
+ echo "" >> comment.md
+ echo "> **Note:** Using 10k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md
- name: Comment PR with results
if: github.event_name == 'pull_request'
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f79caa4236..30050f87a6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per
4. **Test locally**: Run the benchmarks locally to verify they work:
```bash
npm run benchmark:quick # Quick test with 10 iterations
- npm run benchmark # Full test with 100 iterations
+ npm run benchmark # Full test with 10,000 iterations
```
For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
diff --git a/benchmark/performance.js b/benchmark/performance.js
index 831a57db37..7021ed35b3 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -8,6 +8,8 @@
* Run with: npm run benchmark
*/
+/* eslint-disable no-console */
+
const Parse = require('parse/node');
const { performance, PerformanceObserver } = require('perf_hooks');
const { MongoClient } = require('mongodb');
@@ -17,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_
const SERVER_URL = 'http://localhost:1337/parse';
const APP_ID = 'benchmark-app-id';
const MASTER_KEY = 'benchmark-master-key';
-const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100', 10);
+const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '10000', 10);
// Parse Server instance
let parseServer;
@@ -39,6 +41,8 @@ async function initializeParseServer() {
serverURL: SERVER_URL,
silent: true,
allowClientClassCreation: true,
+ logLevel: 'error', // Minimal logging for performance
+ verbose: false,
});
app.use('/parse', parseServer.app);
@@ -84,10 +88,18 @@ async function cleanupDatabase() {
/**
* Measure average time for an async operation over multiple iterations
+ * Uses warmup iterations, median metric, and outlier filtering for robustness
*/
async function measureOperation(name, operation, iterations = ITERATIONS) {
+ const warmupCount = Math.floor(iterations * 0.2); // 20% warmup iterations
const times = [];
+ // Warmup phase - stabilize JIT compilation and caches
+ for (let i = 0; i < warmupCount; i++) {
+ await operation();
+ }
+
+ // Measurement phase
for (let i = 0; i < iterations; i++) {
const start = performance.now();
await operation();
@@ -95,22 +107,33 @@ async function measureOperation(name, operation, iterations = ITERATIONS) {
times.push(end - start);
}
- // Calculate statistics
+ // Sort times for percentile calculations
times.sort((a, b) => a - b);
- const sum = times.reduce((acc, val) => acc + val, 0);
- const mean = sum / times.length;
- const p50 = times[Math.floor(times.length * 0.5)];
- const p95 = times[Math.floor(times.length * 0.95)];
- const p99 = times[Math.floor(times.length * 0.99)];
- const min = times[0];
- const max = times[times.length - 1];
+
+ // Filter outliers using Interquartile Range (IQR) method
+ const q1Index = Math.floor(times.length * 0.25);
+ const q3Index = Math.floor(times.length * 0.75);
+ const q1 = times[q1Index];
+ const q3 = times[q3Index];
+ const iqr = q3 - q1;
+ const lowerBound = q1 - 1.5 * iqr;
+ const upperBound = q3 + 1.5 * iqr;
+
+ const filtered = times.filter(t => t >= lowerBound && t <= upperBound);
+
+ // Calculate statistics on filtered data
+ const median = filtered[Math.floor(filtered.length * 0.5)];
+ const p95 = filtered[Math.floor(filtered.length * 0.95)];
+ const p99 = filtered[Math.floor(filtered.length * 0.99)];
+ const min = filtered[0];
+ const max = filtered[filtered.length - 1];
return {
name,
- value: mean,
+ value: median, // Use median (p50) as primary metric for stability in CI
unit: 'ms',
range: `${min.toFixed(2)} - ${max.toFixed(2)}`,
- extra: `p50: ${p50.toFixed(2)}ms, p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms`,
+ extra: `p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`,
};
}
@@ -274,15 +297,14 @@ async function benchmarkUserLogin() {
* Run all benchmarks
*/
async function runBenchmarks() {
- console.error('Starting Parse Server Performance Benchmarks...');
- console.error(`Iterations per benchmark: ${ITERATIONS}`);
- console.error('');
+ console.log('Starting Parse Server Performance Benchmarks...');
+ console.log(`Iterations per benchmark: ${ITERATIONS}`);
let server;
try {
// Initialize Parse Server
- console.error('Initializing Parse Server...');
+ console.log('Initializing Parse Server...');
server = await initializeParseServer();
// Wait for server to be ready
@@ -291,43 +313,42 @@ async function runBenchmarks() {
const results = [];
// Run each benchmark with database cleanup
- console.error('Running Object Create benchmark...');
+ console.log('Running Object Create benchmark...');
await cleanupDatabase();
results.push(await benchmarkObjectCreate());
- console.error('Running Object Read benchmark...');
+ console.log('Running Object Read benchmark...');
await cleanupDatabase();
results.push(await benchmarkObjectRead());
- console.error('Running Object Update benchmark...');
+ console.log('Running Object Update benchmark...');
await cleanupDatabase();
results.push(await benchmarkObjectUpdate());
- console.error('Running Simple Query benchmark...');
+ console.log('Running Simple Query benchmark...');
await cleanupDatabase();
results.push(await benchmarkSimpleQuery());
- console.error('Running Batch Save benchmark...');
+ console.log('Running Batch Save benchmark...');
await cleanupDatabase();
results.push(await benchmarkBatchSave());
- console.error('Running User Signup benchmark...');
+ console.log('Running User Signup benchmark...');
await cleanupDatabase();
results.push(await benchmarkUserSignup());
- console.error('Running User Login benchmark...');
+ console.log('Running User Login benchmark...');
await cleanupDatabase();
results.push(await benchmarkUserLogin());
- // Output results in github-action-benchmark format
+ // Output results in github-action-benchmark format (stdout)
console.log(JSON.stringify(results, null, 2));
- console.error('');
- console.error('Benchmarks completed successfully!');
- console.error('');
- console.error('Summary:');
+ // Output summary to stderr for visibility
+ console.log('Benchmarks completed successfully!');
+ console.log('Summary:');
results.forEach(result => {
- console.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
+ console.log(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
});
} catch (error) {
@@ -342,7 +363,7 @@ async function runBenchmarks() {
server.close();
}
// Give some time for cleanup
- setTimeout(() => process.exit(0), 1000);
+ setTimeout(() => process.exit(0), 10000);
}
}
From 818824f6fc86ef61d5fa78e8765c21c76d6feba8 Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sun, 9 Nov 2025 02:18:20 +0100
Subject: [PATCH 05/12] ci: Fix performance step in CI (#9922)
---
.github/workflows/ci-performance.yml | 4 ++--
CONTRIBUTING.md | 2 +-
benchmark/performance.js | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 3fd2200d02..438f5b1233 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -304,9 +304,9 @@ jobs:
echo "" >> comment.md
echo "" >> comment.md
echo "" >> comment.md
- echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-10000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md
+ echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-1000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md
echo "" >> comment.md
- echo "> **Note:** Using 10k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md
+ echo "> **Note:** Using 1k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md
- name: Comment PR with results
if: github.event_name == 'pull_request'
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 30050f87a6..8e3db29efa 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per
4. **Test locally**: Run the benchmarks locally to verify they work:
```bash
npm run benchmark:quick # Quick test with 10 iterations
- npm run benchmark # Full test with 10,000 iterations
+ npm run benchmark # Full test with 1,000 iterations
```
For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
diff --git a/benchmark/performance.js b/benchmark/performance.js
index 7021ed35b3..d6984560bb 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_
const SERVER_URL = 'http://localhost:1337/parse';
const APP_ID = 'benchmark-app-id';
const MASTER_KEY = 'benchmark-master-key';
-const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '10000', 10);
+const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10);
// Parse Server instance
let parseServer;
From d94f348d86f998e3f7daf74128076f3892edf40b Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sun, 9 Nov 2025 02:21:58 +0100
Subject: [PATCH 06/12] ci: Cancel obsolete performance benchmark jobs (#9923)
---
.github/workflows/ci-performance.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 438f5b1233..782f4a3cd5 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -325,3 +325,6 @@ jobs:
else
echo "⚠️ Benchmark comparison not available" >> $GITHUB_STEP_SUMMARY
fi
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
From 36e166cc813fbd68f1c195649c5c166f599215be Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sun, 9 Nov 2025 13:30:23 +0100
Subject: [PATCH 07/12] ci: Fix performance step in CI (#9925)
---
.github/workflows/ci-performance.yml | 10 +++++-----
CONTRIBUTING.md | 2 +-
benchmark/performance.js | 8 ++++----
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 782f4a3cd5..78df157e89 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -247,13 +247,13 @@ jobs:
const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
let status = '✅';
- if (change > 100) {
+ if (change > 50) {
status = '❌ Much Slower';
hasRegression = true;
- } else if (change > 50) {
+ } else if (change > 25) {
status = '⚠️ Slower';
hasRegression = true;
- } else if (change < -50) {
+ } else if (change < -25) {
status = '🚀 Faster';
hasImprovement = true;
}
@@ -304,9 +304,9 @@ jobs:
echo "" >> comment.md
echo "" >> comment.md
echo "" >> comment.md
- echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-1000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md
+ echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-10000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md
echo "" >> comment.md
- echo "> **Note:** Using 1k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md
+ echo "> **Note:** Using 10k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >25%, ❌ >50%." >> comment.md
- name: Comment PR with results
if: github.event_name == 'pull_request'
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 8e3db29efa..30050f87a6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per
4. **Test locally**: Run the benchmarks locally to verify they work:
```bash
npm run benchmark:quick # Quick test with 10 iterations
- npm run benchmark # Full test with 1,000 iterations
+ npm run benchmark # Full test with 10,000 iterations
```
For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
diff --git a/benchmark/performance.js b/benchmark/performance.js
index d6984560bb..77e7f175ad 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_
const SERVER_URL = 'http://localhost:1337/parse';
const APP_ID = 'benchmark-app-id';
const MASTER_KEY = 'benchmark-master-key';
-const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10);
+const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '10000', 10);
// Parse Server instance
let parseServer;
@@ -248,7 +248,7 @@ async function benchmarkBatchSave() {
}
await Parse.Object.saveAll(objects);
- }, Math.floor(ITERATIONS / BATCH_SIZE)); // Fewer iterations for batch operations
+ });
}
/**
@@ -264,7 +264,7 @@ async function benchmarkUserSignup() {
user.set('password', 'benchmark_password');
user.set('email', `benchmark${counter}@example.com`);
await user.signUp();
- }, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
+ });
}
/**
@@ -290,7 +290,7 @@ async function benchmarkUserLogin() {
const userCreds = users[counter++ % users.length];
await Parse.User.logIn(userCreds.username, userCreds.password);
await Parse.User.logOut();
- }, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
+ });
}
/**
From 3cc8c1ae3e57c7e8640b3d4a973f9682b152fd2a Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sun, 9 Nov 2025 13:36:10 +0100
Subject: [PATCH 08/12] ci: Fix performance step in CI (#9926)
---
.github/workflows/ci-performance.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 78df157e89..e3afe3626f 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -326,5 +326,5 @@ jobs:
echo "⚠️ Benchmark comparison not available" >> $GITHUB_STEP_SUMMARY
fi
concurrency:
- group: ${{ github.workflow }}-${{ github.ref }}
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
From 133660fb5289d5837f82ad1319e6010e0373e9ad Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Sun, 9 Nov 2025 18:57:23 +0100
Subject: [PATCH 09/12] ci: Fix performance step in CI (#9927)
---
.github/workflows/ci-performance.yml | 4 +---
benchmark/performance.js | 4 ++--
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index e3afe3626f..b080b668aa 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -304,9 +304,7 @@ jobs:
echo "" >> comment.md
echo "" >> comment.md
echo "" >> comment.md
- echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-10000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md
- echo "" >> comment.md
- echo "> **Note:** Using 10k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >25%, ❌ >50%." >> comment.md
+ echo "> **Note:** Thresholds: ⚠️ >25%, ❌ >50%." >> comment.md
- name: Comment PR with results
if: github.event_name == 'pull_request'
diff --git a/benchmark/performance.js b/benchmark/performance.js
index 77e7f175ad..64aa016df3 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_
const SERVER_URL = 'http://localhost:1337/parse';
const APP_ID = 'benchmark-app-id';
const MASTER_KEY = 'benchmark-master-key';
-const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '10000', 10);
+const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10);
// Parse Server instance
let parseServer;
@@ -363,7 +363,7 @@ async function runBenchmarks() {
server.close();
}
// Give some time for cleanup
- setTimeout(() => process.exit(0), 10000);
+ setTimeout(() => process.exit(0), 1000);
}
}
From 52f7c89f3fa67ed2c8cbe41651cd11a9a7ef0a28 Mon Sep 17 00:00:00 2001
From: Manuel <5673677+mtrezza@users.noreply.github.com>
Date: Mon, 17 Nov 2025 01:14:29 +0100
Subject: [PATCH 10/12] ci: Fix performance step in CI (#9931)
---
benchmark/MongoLatencyWrapper.js | 137 ++++++++++++
benchmark/performance.js | 372 +++++++++++++++++++++----------
2 files changed, 388 insertions(+), 121 deletions(-)
create mode 100644 benchmark/MongoLatencyWrapper.js
diff --git a/benchmark/MongoLatencyWrapper.js b/benchmark/MongoLatencyWrapper.js
new file mode 100644
index 0000000000..2b0480c1bc
--- /dev/null
+++ b/benchmark/MongoLatencyWrapper.js
@@ -0,0 +1,137 @@
+/**
+ * MongoDB Latency Wrapper
+ *
+ * Utility to inject artificial latency into MongoDB operations for performance testing.
+ * This wrapper temporarily wraps MongoDB Collection methods to add delays before
+ * database operations execute.
+ *
+ * Usage:
+ * const { wrapMongoDBWithLatency } = require('./MongoLatencyWrapper');
+ *
+ * // Before initializing Parse Server
+ * const unwrap = wrapMongoDBWithLatency(10); // 10ms delay
+ *
+ * // ... run benchmarks ...
+ *
+ * // Cleanup when done
+ * unwrap();
+ */
+
+const { Collection } = require('mongodb');
+
+// Store original methods for restoration
+const originalMethods = new Map();
+
+/**
+ * Wrap a Collection method to add artificial latency
+ * @param {string} methodName - Name of the method to wrap
+ * @param {number} latencyMs - Delay in milliseconds
+ */
+function wrapMethod(methodName, latencyMs) {
+ if (!originalMethods.has(methodName)) {
+ originalMethods.set(methodName, Collection.prototype[methodName]);
+ }
+
+ const originalMethod = originalMethods.get(methodName);
+
+ Collection.prototype[methodName] = function (...args) {
+ // For methods that return cursors (like find, aggregate), we need to delay the execution
+ // but still return a cursor-like object
+ const result = originalMethod.apply(this, args);
+
+ // Check if result has cursor methods (toArray, forEach, etc.)
+ if (result && typeof result.toArray === 'function') {
+ // Wrap cursor methods that actually execute the query
+ const originalToArray = result.toArray.bind(result);
+ result.toArray = function() {
+ // Wait for the original promise to settle, then delay the result
+ return originalToArray().then(
+ value => new Promise(resolve => setTimeout(() => resolve(value), latencyMs)),
+ error => new Promise((_, reject) => setTimeout(() => reject(error), latencyMs))
+ );
+ };
+ return result;
+ }
+
+ // For promise-returning methods, wrap the promise with delay
+ if (result && typeof result.then === 'function') {
+ // Wait for the original promise to settle, then delay the result
+ return result.then(
+ value => new Promise(resolve => setTimeout(() => resolve(value), latencyMs)),
+ error => new Promise((_, reject) => setTimeout(() => reject(error), latencyMs))
+ );
+ }
+
+ // For synchronous methods, just add delay
+ return new Promise((resolve) => {
+ setTimeout(() => {
+ resolve(result);
+ }, latencyMs);
+ });
+ };
+}
+
+/**
+ * Wrap MongoDB Collection methods with artificial latency
+ * @param {number} latencyMs - Delay in milliseconds to inject before each operation
+ * @returns {Function} unwrap - Function to restore original methods
+ */
+function wrapMongoDBWithLatency(latencyMs) {
+ if (typeof latencyMs !== 'number' || latencyMs < 0) {
+ throw new Error('latencyMs must be a non-negative number');
+ }
+
+ if (latencyMs === 0) {
+ // eslint-disable-next-line no-console
+ console.log('Latency is 0ms, skipping MongoDB wrapping');
+ return () => {}; // No-op unwrap function
+ }
+
+ // eslint-disable-next-line no-console
+ console.log(`Wrapping MongoDB operations with ${latencyMs}ms artificial latency`);
+
+ // List of MongoDB Collection methods to wrap
+ const methodsToWrap = [
+ 'find',
+ 'findOne',
+ 'countDocuments',
+ 'estimatedDocumentCount',
+ 'distinct',
+ 'aggregate',
+ 'insertOne',
+ 'insertMany',
+ 'updateOne',
+ 'updateMany',
+ 'replaceOne',
+ 'deleteOne',
+ 'deleteMany',
+ 'findOneAndUpdate',
+ 'findOneAndReplace',
+ 'findOneAndDelete',
+ 'createIndex',
+ 'createIndexes',
+ 'dropIndex',
+ 'dropIndexes',
+ 'drop',
+ ];
+
+ methodsToWrap.forEach(methodName => {
+ wrapMethod(methodName, latencyMs);
+ });
+
+ // Return unwrap function to restore original methods
+ return function unwrap() {
+ // eslint-disable-next-line no-console
+ console.log('Removing MongoDB latency wrapper, restoring original methods');
+
+ originalMethods.forEach((originalMethod, methodName) => {
+ Collection.prototype[methodName] = originalMethod;
+ });
+
+ originalMethods.clear();
+ };
+}
+
+module.exports = {
+ wrapMongoDBWithLatency,
+};
diff --git a/benchmark/performance.js b/benchmark/performance.js
index 64aa016df3..4983d2bc7d 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -10,21 +10,28 @@
/* eslint-disable no-console */
+const core = require('@actions/core');
const Parse = require('parse/node');
const { performance, PerformanceObserver } = require('perf_hooks');
const { MongoClient } = require('mongodb');
+const { wrapMongoDBWithLatency } = require('./MongoLatencyWrapper');
// Configuration
const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_benchmark_test';
const SERVER_URL = 'http://localhost:1337/parse';
const APP_ID = 'benchmark-app-id';
const MASTER_KEY = 'benchmark-master-key';
-const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10);
+const ITERATIONS = process.env.BENCHMARK_ITERATIONS ? parseInt(process.env.BENCHMARK_ITERATIONS, 10) : undefined;
+const LOG_ITERATIONS = false;
// Parse Server instance
let parseServer;
let mongoClient;
+// Logging helpers
+const logInfo = message => core.info(message);
+const logError = message => core.error(message);
+
/**
* Initialize Parse Server for benchmarking
*/
@@ -87,54 +94,107 @@ async function cleanupDatabase() {
}
/**
- * Measure average time for an async operation over multiple iterations
- * Uses warmup iterations, median metric, and outlier filtering for robustness
+ * Reset Parse SDK to use the default server
+ */
+function resetParseServer() {
+ Parse.serverURL = SERVER_URL;
+}
+
+/**
+ * Measure average time for an async operation over multiple iterations.
+ * @param {Object} options Measurement options.
+ * @param {string} options.name Name of the operation being measured.
+ * @param {Function} options.operation Async function to measure.
+ * @param {number} options.iterations Number of iterations to run; choose a value that is high
+ * enough to create reliable benchmark metrics with low variance but low enough to keep test
+ * duration reasonable around <=10 seconds.
+ * @param {boolean} [options.skipWarmup=false] Skip warmup phase.
+ * @param {number} [options.dbLatency] Artificial DB latency in milliseconds to apply during
+ * this benchmark.
*/
-async function measureOperation(name, operation, iterations = ITERATIONS) {
- const warmupCount = Math.floor(iterations * 0.2); // 20% warmup iterations
+async function measureOperation({ name, operation, iterations, skipWarmup = false, dbLatency }) {
+ // Override iterations if global ITERATIONS is set
+ iterations = ITERATIONS || iterations;
+
+ // Determine warmup count (20% of iterations)
+ const warmupCount = skipWarmup ? 0 : Math.floor(iterations * 0.2);
const times = [];
- // Warmup phase - stabilize JIT compilation and caches
- for (let i = 0; i < warmupCount; i++) {
- await operation();
+ // Apply artificial latency if specified
+ let unwrapLatency = null;
+ if (dbLatency !== undefined && dbLatency > 0) {
+ logInfo(`Applying ${dbLatency}ms artificial DB latency for this benchmark`);
+ unwrapLatency = wrapMongoDBWithLatency(dbLatency);
}
- // Measurement phase
- for (let i = 0; i < iterations; i++) {
- const start = performance.now();
- await operation();
- const end = performance.now();
- times.push(end - start);
- }
+ try {
+ if (warmupCount > 0) {
+ logInfo(`Starting warmup phase of ${warmupCount} iterations...`);
+ const warmupStart = performance.now();
+ for (let i = 0; i < warmupCount; i++) {
+ await operation();
+ }
+ logInfo(`Warmup took: ${(performance.now() - warmupStart).toFixed(2)}ms`);
+ }
+
+ // Measurement phase
+ logInfo(`Starting measurement phase of ${iterations} iterations...`);
+ const progressInterval = Math.ceil(iterations / 10); // Log every 10%
+ const measurementStart = performance.now();
+
+ for (let i = 0; i < iterations; i++) {
+ const start = performance.now();
+ await operation();
+ const end = performance.now();
+ const duration = end - start;
+ times.push(duration);
+
+ // Log progress every 10% or individual iterations if LOG_ITERATIONS is enabled
+ if (LOG_ITERATIONS) {
+ logInfo(`Iteration ${i + 1}: ${duration.toFixed(2)}ms`);
+ } else if ((i + 1) % progressInterval === 0 || i + 1 === iterations) {
+ const progress = Math.round(((i + 1) / iterations) * 100);
+ logInfo(`Progress: ${progress}%`);
+ }
+ }
- // Sort times for percentile calculations
- times.sort((a, b) => a - b);
-
- // Filter outliers using Interquartile Range (IQR) method
- const q1Index = Math.floor(times.length * 0.25);
- const q3Index = Math.floor(times.length * 0.75);
- const q1 = times[q1Index];
- const q3 = times[q3Index];
- const iqr = q3 - q1;
- const lowerBound = q1 - 1.5 * iqr;
- const upperBound = q3 + 1.5 * iqr;
-
- const filtered = times.filter(t => t >= lowerBound && t <= upperBound);
-
- // Calculate statistics on filtered data
- const median = filtered[Math.floor(filtered.length * 0.5)];
- const p95 = filtered[Math.floor(filtered.length * 0.95)];
- const p99 = filtered[Math.floor(filtered.length * 0.99)];
- const min = filtered[0];
- const max = filtered[filtered.length - 1];
-
- return {
- name,
- value: median, // Use median (p50) as primary metric for stability in CI
- unit: 'ms',
- range: `${min.toFixed(2)} - ${max.toFixed(2)}`,
- extra: `p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`,
- };
+ logInfo(`Measurement took: ${(performance.now() - measurementStart).toFixed(2)}ms`);
+
+ // Sort times for percentile calculations
+ times.sort((a, b) => a - b);
+
+ // Filter outliers using Interquartile Range (IQR) method
+ const q1Index = Math.floor(times.length * 0.25);
+ const q3Index = Math.floor(times.length * 0.75);
+ const q1 = times[q1Index];
+ const q3 = times[q3Index];
+ const iqr = q3 - q1;
+ const lowerBound = q1 - 1.5 * iqr;
+ const upperBound = q3 + 1.5 * iqr;
+
+ const filtered = times.filter(t => t >= lowerBound && t <= upperBound);
+
+ // Calculate statistics on filtered data
+ const median = filtered[Math.floor(filtered.length * 0.5)];
+ const p95 = filtered[Math.floor(filtered.length * 0.95)];
+ const p99 = filtered[Math.floor(filtered.length * 0.99)];
+ const min = filtered[0];
+ const max = filtered[filtered.length - 1];
+
+ return {
+ name,
+ value: median, // Use median (p50) as primary metric for stability in CI
+ unit: 'ms',
+ range: `${min.toFixed(2)} - ${max.toFixed(2)}`,
+ extra: `p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`,
+ };
+ } finally {
+ // Remove latency wrapper if it was applied
+ if (unwrapLatency) {
+ unwrapLatency();
+ logInfo('Removed artificial DB latency');
+ }
+ }
}
/**
@@ -143,13 +203,17 @@ async function measureOperation(name, operation, iterations = ITERATIONS) {
async function benchmarkObjectCreate() {
let counter = 0;
- return measureOperation('Object Create', async () => {
- const TestObject = Parse.Object.extend('BenchmarkTest');
- const obj = new TestObject();
- obj.set('testField', `test-value-${counter++}`);
- obj.set('number', counter);
- obj.set('boolean', true);
- await obj.save();
+ return measureOperation({
+ name: 'Object Create',
+ iterations: 1_000,
+ operation: async () => {
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const obj = new TestObject();
+ obj.set('testField', `test-value-${counter++}`);
+ obj.set('number', counter);
+ obj.set('boolean', true);
+ await obj.save();
+ },
});
}
@@ -161,7 +225,7 @@ async function benchmarkObjectRead() {
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
- for (let i = 0; i < ITERATIONS; i++) {
+ for (let i = 0; i < 1_000; i++) {
const obj = new TestObject();
obj.set('testField', `read-test-${i}`);
objects.push(obj);
@@ -171,9 +235,13 @@ async function benchmarkObjectRead() {
let counter = 0;
- return measureOperation('Object Read', async () => {
- const query = new Parse.Query('BenchmarkTest');
- await query.get(objects[counter++ % objects.length].id);
+ return measureOperation({
+ name: 'Object Read',
+ iterations: 1_000,
+ operation: async () => {
+ const query = new Parse.Query('BenchmarkTest');
+ await query.get(objects[counter++ % objects.length].id);
+ },
});
}
@@ -185,7 +253,7 @@ async function benchmarkObjectUpdate() {
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
- for (let i = 0; i < ITERATIONS; i++) {
+ for (let i = 0; i < 1_000; i++) {
const obj = new TestObject();
obj.set('testField', `update-test-${i}`);
obj.set('counter', 0);
@@ -196,11 +264,15 @@ async function benchmarkObjectUpdate() {
let counter = 0;
- return measureOperation('Object Update', async () => {
- const obj = objects[counter++ % objects.length];
- obj.increment('counter');
- obj.set('lastUpdated', new Date());
- await obj.save();
+ return measureOperation({
+ name: 'Object Update',
+ iterations: 1_000,
+ operation: async () => {
+ const obj = objects[counter++ % objects.length];
+ obj.increment('counter');
+ obj.set('lastUpdated', new Date());
+ await obj.save();
+ },
});
}
@@ -223,10 +295,14 @@ async function benchmarkSimpleQuery() {
let counter = 0;
- return measureOperation('Simple Query', async () => {
- const query = new Parse.Query('BenchmarkTest');
- query.equalTo('category', counter++ % 10);
- await query.find();
+ return measureOperation({
+ name: 'Simple Query',
+ iterations: 1_000,
+ operation: async () => {
+ const query = new Parse.Query('BenchmarkTest');
+ query.equalTo('category', counter++ % 10);
+ await query.find();
+ },
});
}
@@ -236,18 +312,22 @@ async function benchmarkSimpleQuery() {
async function benchmarkBatchSave() {
const BATCH_SIZE = 10;
- return measureOperation('Batch Save (10 objects)', async () => {
- const TestObject = Parse.Object.extend('BenchmarkTest');
- const objects = [];
-
- for (let i = 0; i < BATCH_SIZE; i++) {
- const obj = new TestObject();
- obj.set('batchField', `batch-${i}`);
- obj.set('timestamp', new Date());
- objects.push(obj);
- }
+ return measureOperation({
+ name: 'Batch Save (10 objects)',
+ iterations: 1_000,
+ operation: async () => {
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < BATCH_SIZE; i++) {
+ const obj = new TestObject();
+ obj.set('batchField', `batch-${i}`);
+ obj.set('timestamp', new Date());
+ objects.push(obj);
+ }
- await Parse.Object.saveAll(objects);
+ await Parse.Object.saveAll(objects);
+ },
});
}
@@ -257,13 +337,17 @@ async function benchmarkBatchSave() {
async function benchmarkUserSignup() {
let counter = 0;
- return measureOperation('User Signup', async () => {
- counter++;
- const user = new Parse.User();
- user.set('username', `benchmark_user_${Date.now()}_${counter}`);
- user.set('password', 'benchmark_password');
- user.set('email', `benchmark${counter}@example.com`);
- await user.signUp();
+ return measureOperation({
+ name: 'User Signup',
+ iterations: 500,
+ operation: async () => {
+ counter++;
+ const user = new Parse.User();
+ user.set('username', `benchmark_user_${Date.now()}_${counter}`);
+ user.set('password', 'benchmark_password');
+ user.set('email', `benchmark${counter}@example.com`);
+ await user.signUp();
+ },
});
}
@@ -286,10 +370,66 @@ async function benchmarkUserLogin() {
let counter = 0;
- return measureOperation('User Login', async () => {
- const userCreds = users[counter++ % users.length];
- await Parse.User.logIn(userCreds.username, userCreds.password);
- await Parse.User.logOut();
+ return measureOperation({
+ name: 'User Login',
+ iterations: 500,
+ operation: async () => {
+ const userCreds = users[counter++ % users.length];
+ await Parse.User.logIn(userCreds.username, userCreds.password);
+ await Parse.User.logOut();
+ },
+ });
+}
+
+/**
+ * Benchmark: Query with Include (Parallel Include Pointers)
+ */
+async function benchmarkQueryWithInclude() {
+ // Setup: Create nested object hierarchy
+ const Level2Class = Parse.Object.extend('Level2');
+ const Level1Class = Parse.Object.extend('Level1');
+ const RootClass = Parse.Object.extend('Root');
+
+ return measureOperation({
+ name: 'Query with Include (2 levels)',
+ skipWarmup: true,
+ dbLatency: 50,
+ iterations: 100,
+ operation: async () => {
+ // Create 10 Level2 objects
+ const level2Objects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level2Class();
+ obj.set('name', `level2-${i}`);
+ obj.set('value', i);
+ level2Objects.push(obj);
+ }
+ await Parse.Object.saveAll(level2Objects);
+
+ // Create 10 Level1 objects, each pointing to a Level2 object
+ const level1Objects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level1Class();
+ obj.set('name', `level1-${i}`);
+ obj.set('level2', level2Objects[i % level2Objects.length]);
+ level1Objects.push(obj);
+ }
+ await Parse.Object.saveAll(level1Objects);
+
+ // Create 10 Root objects, each pointing to a Level1 object
+ const rootObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new RootClass();
+ obj.set('name', `root-${i}`);
+ obj.set('level1', level1Objects[i % level1Objects.length]);
+ rootObjects.push(obj);
+ }
+ await Parse.Object.saveAll(rootObjects);
+
+ const query = new Parse.Query('Root');
+ query.include('level1.level2');
+ await query.find();
+ },
});
}
@@ -297,14 +437,13 @@ async function benchmarkUserLogin() {
* Run all benchmarks
*/
async function runBenchmarks() {
- console.log('Starting Parse Server Performance Benchmarks...');
- console.log(`Iterations per benchmark: ${ITERATIONS}`);
+ logInfo('Starting Parse Server Performance Benchmarks...');
let server;
try {
// Initialize Parse Server
- console.log('Initializing Parse Server...');
+ logInfo('Initializing Parse Server...');
server = await initializeParseServer();
// Wait for server to be ready
@@ -312,47 +451,38 @@ async function runBenchmarks() {
const results = [];
- // Run each benchmark with database cleanup
- console.log('Running Object Create benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkObjectCreate());
-
- console.log('Running Object Read benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkObjectRead());
-
- console.log('Running Object Update benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkObjectUpdate());
+ // Define all benchmarks to run
+ const benchmarks = [
+ { name: 'Object Create', fn: benchmarkObjectCreate },
+ { name: 'Object Read', fn: benchmarkObjectRead },
+ { name: 'Object Update', fn: benchmarkObjectUpdate },
+ { name: 'Simple Query', fn: benchmarkSimpleQuery },
+ { name: 'Batch Save', fn: benchmarkBatchSave },
+ { name: 'User Signup', fn: benchmarkUserSignup },
+ { name: 'User Login', fn: benchmarkUserLogin },
+ { name: 'Query with Include', fn: benchmarkQueryWithInclude },
+ ];
- console.log('Running Simple Query benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkSimpleQuery());
-
- console.log('Running Batch Save benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkBatchSave());
-
- console.log('Running User Signup benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkUserSignup());
-
- console.log('Running User Login benchmark...');
- await cleanupDatabase();
- results.push(await benchmarkUserLogin());
+ // Run each benchmark with database cleanup
+ for (const benchmark of benchmarks) {
+ logInfo(`\nRunning benchmark '${benchmark.name}'...`);
+ resetParseServer();
+ await cleanupDatabase();
+ results.push(await benchmark.fn());
+ }
// Output results in github-action-benchmark format (stdout)
- console.log(JSON.stringify(results, null, 2));
+ logInfo(JSON.stringify(results, null, 2));
// Output summary to stderr for visibility
- console.log('Benchmarks completed successfully!');
- console.log('Summary:');
+ logInfo('Benchmarks completed successfully!');
+ logInfo('Summary:');
results.forEach(result => {
- console.log(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
+ logInfo(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
});
} catch (error) {
- console.error('Error running benchmarks:', error);
+ logError('Error running benchmarks:', error);
process.exit(1);
} finally {
// Cleanup
From dafea21eb39b0fdc2b52bb8a14f7b61e3f2b8d13 Mon Sep 17 00:00:00 2001
From: Antoine Cormouls
Date: Mon, 17 Nov 2025 15:42:49 +0100
Subject: [PATCH 11/12] perf: `Parse.Query.include` now fetches pointers at
same level in parallel (#9861)
---
.github/workflows/ci-performance.yml | 4 +-
benchmark/performance.js | 213 +++++++++++++++++++--------
spec/RestQuery.spec.js | 82 +++++++++++
src/RestQuery.js | 61 +++++---
4 files changed, 279 insertions(+), 81 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index b080b668aa..c9cb055e13 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -70,7 +70,7 @@ jobs:
env:
NODE_ENV: production
run: |
- echo "Running baseline benchmarks with CPU affinity (using PR's benchmark script)..."
+ echo "Running baseline benchmarks..."
if [ ! -f "benchmark/performance.js" ]; then
echo "⚠️ Benchmark script not found - this is expected for new features"
echo "Skipping baseline benchmark"
@@ -135,7 +135,7 @@ jobs:
env:
NODE_ENV: production
run: |
- echo "Running PR benchmarks with CPU affinity..."
+ echo "Running PR benchmarks..."
taskset -c 0 npm run benchmark > pr-output.txt 2>&1 || npm run benchmark > pr-output.txt 2>&1 || true
echo "Benchmark command completed with exit code: $?"
echo "Output file size: $(wc -c < pr-output.txt) bytes"
diff --git a/benchmark/performance.js b/benchmark/performance.js
index 4983d2bc7d..3518052434 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -200,11 +200,11 @@ async function measureOperation({ name, operation, iterations, skipWarmup = fals
/**
* Benchmark: Object Create
*/
-async function benchmarkObjectCreate() {
+async function benchmarkObjectCreate(name) {
let counter = 0;
return measureOperation({
- name: 'Object Create',
+ name,
iterations: 1_000,
operation: async () => {
const TestObject = Parse.Object.extend('BenchmarkTest');
@@ -220,7 +220,7 @@ async function benchmarkObjectCreate() {
/**
* Benchmark: Object Read (by ID)
*/
-async function benchmarkObjectRead() {
+async function benchmarkObjectRead(name) {
// Setup: Create test objects
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
@@ -236,7 +236,7 @@ async function benchmarkObjectRead() {
let counter = 0;
return measureOperation({
- name: 'Object Read',
+ name,
iterations: 1_000,
operation: async () => {
const query = new Parse.Query('BenchmarkTest');
@@ -248,7 +248,7 @@ async function benchmarkObjectRead() {
/**
* Benchmark: Object Update
*/
-async function benchmarkObjectUpdate() {
+async function benchmarkObjectUpdate(name) {
// Setup: Create test objects
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
@@ -265,7 +265,7 @@ async function benchmarkObjectUpdate() {
let counter = 0;
return measureOperation({
- name: 'Object Update',
+ name,
iterations: 1_000,
operation: async () => {
const obj = objects[counter++ % objects.length];
@@ -279,7 +279,7 @@ async function benchmarkObjectUpdate() {
/**
* Benchmark: Simple Query
*/
-async function benchmarkSimpleQuery() {
+async function benchmarkSimpleQuery(name) {
// Setup: Create test data
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
@@ -296,7 +296,7 @@ async function benchmarkSimpleQuery() {
let counter = 0;
return measureOperation({
- name: 'Simple Query',
+ name,
iterations: 1_000,
operation: async () => {
const query = new Parse.Query('BenchmarkTest');
@@ -309,11 +309,11 @@ async function benchmarkSimpleQuery() {
/**
* Benchmark: Batch Save (saveAll)
*/
-async function benchmarkBatchSave() {
+async function benchmarkBatchSave(name) {
const BATCH_SIZE = 10;
return measureOperation({
- name: 'Batch Save (10 objects)',
+ name,
iterations: 1_000,
operation: async () => {
const TestObject = Parse.Object.extend('BenchmarkTest');
@@ -334,11 +334,11 @@ async function benchmarkBatchSave() {
/**
* Benchmark: User Signup
*/
-async function benchmarkUserSignup() {
+async function benchmarkUserSignup(name) {
let counter = 0;
return measureOperation({
- name: 'User Signup',
+ name,
iterations: 500,
operation: async () => {
counter++;
@@ -354,7 +354,7 @@ async function benchmarkUserSignup() {
/**
* Benchmark: User Login
*/
-async function benchmarkUserLogin() {
+async function benchmarkUserLogin(name) {
// Setup: Create test users
const users = [];
@@ -371,7 +371,7 @@ async function benchmarkUserLogin() {
let counter = 0;
return measureOperation({
- name: 'User Login',
+ name,
iterations: 500,
operation: async () => {
const userCreds = users[counter++ % users.length];
@@ -382,52 +382,146 @@ async function benchmarkUserLogin() {
}
/**
- * Benchmark: Query with Include (Parallel Include Pointers)
+ * Benchmark: Query with Include (Parallel Pointers)
+ * Tests the performance improvement when fetching multiple pointers at the same level.
*/
-async function benchmarkQueryWithInclude() {
- // Setup: Create nested object hierarchy
+async function benchmarkQueryWithIncludeParallel(name) {
+ const PointerAClass = Parse.Object.extend('PointerA');
+ const PointerBClass = Parse.Object.extend('PointerB');
+ const PointerCClass = Parse.Object.extend('PointerC');
+ const RootClass = Parse.Object.extend('Root');
+
+ // Create pointer objects
+ const pointerAObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new PointerAClass();
+ obj.set('name', `pointerA-${i}`);
+ pointerAObjects.push(obj);
+ }
+ await Parse.Object.saveAll(pointerAObjects);
+
+ const pointerBObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new PointerBClass();
+ obj.set('name', `pointerB-${i}`);
+ pointerBObjects.push(obj);
+ }
+ await Parse.Object.saveAll(pointerBObjects);
+
+ const pointerCObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new PointerCClass();
+ obj.set('name', `pointerC-${i}`);
+ pointerCObjects.push(obj);
+ }
+ await Parse.Object.saveAll(pointerCObjects);
+
+ // Create Root objects with multiple pointers at the same level
+ const rootObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new RootClass();
+ obj.set('name', `root-${i}`);
+ obj.set('pointerA', pointerAObjects[i % pointerAObjects.length]);
+ obj.set('pointerB', pointerBObjects[i % pointerBObjects.length]);
+ obj.set('pointerC', pointerCObjects[i % pointerCObjects.length]);
+ rootObjects.push(obj);
+ }
+ await Parse.Object.saveAll(rootObjects);
+
+ return measureOperation({
+ name,
+ skipWarmup: true,
+ dbLatency: 100,
+ iterations: 100,
+ operation: async () => {
+ const query = new Parse.Query('Root');
+ // Include multiple pointers at the same level - should fetch in parallel
+ query.include(['pointerA', 'pointerB', 'pointerC']);
+ await query.find();
+ },
+ });
+}
+
+/**
+ * Benchmark: Query with Include (Nested Pointers with Parallel Leaf Nodes)
+ * Tests the PR's optimization for parallel fetching at each nested level.
+ * Pattern: p1.p2.p3, p1.p2.p4, p1.p2.p5
+ * After fetching p2, we know the objectIds and can fetch p3, p4, p5 in parallel.
+ */
+async function benchmarkQueryWithIncludeNested(name) {
+ const Level3AClass = Parse.Object.extend('Level3A');
+ const Level3BClass = Parse.Object.extend('Level3B');
+ const Level3CClass = Parse.Object.extend('Level3C');
const Level2Class = Parse.Object.extend('Level2');
const Level1Class = Parse.Object.extend('Level1');
const RootClass = Parse.Object.extend('Root');
+ // Create Level3 objects (leaf nodes)
+ const level3AObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level3AClass();
+ obj.set('name', `level3A-${i}`);
+ level3AObjects.push(obj);
+ }
+ await Parse.Object.saveAll(level3AObjects);
+
+ const level3BObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level3BClass();
+ obj.set('name', `level3B-${i}`);
+ level3BObjects.push(obj);
+ }
+ await Parse.Object.saveAll(level3BObjects);
+
+ const level3CObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level3CClass();
+ obj.set('name', `level3C-${i}`);
+ level3CObjects.push(obj);
+ }
+ await Parse.Object.saveAll(level3CObjects);
+
+ // Create Level2 objects pointing to multiple Level3 objects
+ const level2Objects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level2Class();
+ obj.set('name', `level2-${i}`);
+ obj.set('level3A', level3AObjects[i % level3AObjects.length]);
+ obj.set('level3B', level3BObjects[i % level3BObjects.length]);
+ obj.set('level3C', level3CObjects[i % level3CObjects.length]);
+ level2Objects.push(obj);
+ }
+ await Parse.Object.saveAll(level2Objects);
+
+ // Create Level1 objects pointing to Level2
+ const level1Objects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new Level1Class();
+ obj.set('name', `level1-${i}`);
+ obj.set('level2', level2Objects[i % level2Objects.length]);
+ level1Objects.push(obj);
+ }
+ await Parse.Object.saveAll(level1Objects);
+
+ // Create Root objects pointing to Level1
+ const rootObjects = [];
+ for (let i = 0; i < 10; i++) {
+ const obj = new RootClass();
+ obj.set('name', `root-${i}`);
+ obj.set('level1', level1Objects[i % level1Objects.length]);
+ rootObjects.push(obj);
+ }
+ await Parse.Object.saveAll(rootObjects);
+
return measureOperation({
- name: 'Query with Include (2 levels)',
+ name,
skipWarmup: true,
- dbLatency: 50,
+ dbLatency: 100,
iterations: 100,
operation: async () => {
- // Create 10 Level2 objects
- const level2Objects = [];
- for (let i = 0; i < 10; i++) {
- const obj = new Level2Class();
- obj.set('name', `level2-${i}`);
- obj.set('value', i);
- level2Objects.push(obj);
- }
- await Parse.Object.saveAll(level2Objects);
-
- // Create 10 Level1 objects, each pointing to a Level2 object
- const level1Objects = [];
- for (let i = 0; i < 10; i++) {
- const obj = new Level1Class();
- obj.set('name', `level1-${i}`);
- obj.set('level2', level2Objects[i % level2Objects.length]);
- level1Objects.push(obj);
- }
- await Parse.Object.saveAll(level1Objects);
-
- // Create 10 Root objects, each pointing to a Level1 object
- const rootObjects = [];
- for (let i = 0; i < 10; i++) {
- const obj = new RootClass();
- obj.set('name', `root-${i}`);
- obj.set('level1', level1Objects[i % level1Objects.length]);
- rootObjects.push(obj);
- }
- await Parse.Object.saveAll(rootObjects);
-
const query = new Parse.Query('Root');
- query.include('level1.level2');
+ // After fetching level1.level2, the PR should fetch level3A, level3B, level3C in parallel
+ query.include(['level1.level2.level3A', 'level1.level2.level3B', 'level1.level2.level3C']);
await query.find();
},
});
@@ -453,14 +547,15 @@ async function runBenchmarks() {
// Define all benchmarks to run
const benchmarks = [
- { name: 'Object Create', fn: benchmarkObjectCreate },
- { name: 'Object Read', fn: benchmarkObjectRead },
- { name: 'Object Update', fn: benchmarkObjectUpdate },
- { name: 'Simple Query', fn: benchmarkSimpleQuery },
- { name: 'Batch Save', fn: benchmarkBatchSave },
- { name: 'User Signup', fn: benchmarkUserSignup },
- { name: 'User Login', fn: benchmarkUserLogin },
- { name: 'Query with Include', fn: benchmarkQueryWithInclude },
+ { name: 'Object.save (create)', fn: benchmarkObjectCreate },
+ { name: 'Object.save (update)', fn: benchmarkObjectUpdate },
+ { name: 'Object.saveAll (batch save)', fn: benchmarkBatchSave },
+ { name: 'Query.get (by objectId)', fn: benchmarkObjectRead },
+ { name: 'Query.find (simple query)', fn: benchmarkSimpleQuery },
+ { name: 'User.signUp', fn: benchmarkUserSignup },
+ { name: 'User.login', fn: benchmarkUserLogin },
+ { name: 'Query.include (parallel pointers)', fn: benchmarkQueryWithIncludeParallel },
+ { name: 'Query.include (nested pointers)', fn: benchmarkQueryWithIncludeNested },
];
// Run each benchmark with database cleanup
@@ -468,7 +563,7 @@ async function runBenchmarks() {
logInfo(`\nRunning benchmark '${benchmark.name}'...`);
resetParseServer();
await cleanupDatabase();
- results.push(await benchmark.fn());
+ results.push(await benchmark.fn(benchmark.name));
}
// Output results in github-action-benchmark format (stdout)
diff --git a/spec/RestQuery.spec.js b/spec/RestQuery.spec.js
index 6fe3c0fa18..7b676da1ea 100644
--- a/spec/RestQuery.spec.js
+++ b/spec/RestQuery.spec.js
@@ -386,6 +386,88 @@ describe('rest query', () => {
}
);
});
+
+ it('battle test parallel include with 100 nested includes', async () => {
+ const RootObject = Parse.Object.extend('RootObject');
+ const Level1Object = Parse.Object.extend('Level1Object');
+ const Level2Object = Parse.Object.extend('Level2Object');
+
+ // Create 100 level2 objects (10 per level1 object)
+ const level2Objects = [];
+ for (let i = 0; i < 100; i++) {
+ const level2 = new Level2Object({
+ index: i,
+ value: `level2_${i}`,
+ });
+ level2Objects.push(level2);
+ }
+ await Parse.Object.saveAll(level2Objects);
+
+ // Create 10 level1 objects, each with 10 pointers to level2 objects
+ const level1Objects = [];
+ for (let i = 0; i < 10; i++) {
+ const level1 = new Level1Object({
+ index: i,
+ value: `level1_${i}`,
+ });
+ // Set 10 pointer fields (level2_0 through level2_9)
+ for (let j = 0; j < 10; j++) {
+ level1.set(`level2_${j}`, level2Objects[i * 10 + j]);
+ }
+ level1Objects.push(level1);
+ }
+ await Parse.Object.saveAll(level1Objects);
+
+ // Create 1 root object with 10 pointers to level1 objects
+ const rootObject = new RootObject({
+ value: 'root',
+ });
+ for (let i = 0; i < 10; i++) {
+ rootObject.set(`level1_${i}`, level1Objects[i]);
+ }
+ await rootObject.save();
+
+ // Build include paths: level1_0 through level1_9, and level1_0.level2_0 through level1_9.level2_9
+ const includePaths = [];
+ for (let i = 0; i < 10; i++) {
+ includePaths.push(`level1_${i}`);
+ for (let j = 0; j < 10; j++) {
+ includePaths.push(`level1_${i}.level2_${j}`);
+ }
+ }
+
+ // Query with all includes
+ const query = new Parse.Query(RootObject);
+ query.equalTo('objectId', rootObject.id);
+ for (const path of includePaths) {
+ query.include(path);
+ }
+ console.time('query.find');
+ const results = await query.find();
+ console.timeEnd('query.find');
+ expect(results.length).toBe(1);
+
+ const result = results[0];
+ expect(result.id).toBe(rootObject.id);
+
+ // Verify all 10 level1 objects are included
+ for (let i = 0; i < 10; i++) {
+ const level1Field = result.get(`level1_${i}`);
+ expect(level1Field).toBeDefined();
+ expect(level1Field instanceof Parse.Object).toBe(true);
+ expect(level1Field.get('index')).toBe(i);
+ expect(level1Field.get('value')).toBe(`level1_${i}`);
+
+ // Verify all 10 level2 objects are included for each level1 object
+ for (let j = 0; j < 10; j++) {
+ const level2Field = level1Field.get(`level2_${j}`);
+ expect(level2Field).toBeDefined();
+ expect(level2Field instanceof Parse.Object).toBe(true);
+ expect(level2Field.get('index')).toBe(i * 10 + j);
+ expect(level2Field.get('value')).toBe(`level2_${i * 10 + j}`);
+ }
+ }
+ });
});
describe('RestQuery.each', () => {
diff --git a/src/RestQuery.js b/src/RestQuery.js
index dd226f249c..c48cecdb6f 100644
--- a/src/RestQuery.js
+++ b/src/RestQuery.js
@@ -856,31 +856,54 @@ _UnsafeRestQuery.prototype.handleExcludeKeys = function () {
};
// Augments this.response with data at the paths provided in this.include.
-_UnsafeRestQuery.prototype.handleInclude = function () {
+_UnsafeRestQuery.prototype.handleInclude = async function () {
if (this.include.length == 0) {
return;
}
- var pathResponse = includePath(
- this.config,
- this.auth,
- this.response,
- this.include[0],
- this.context,
- this.restOptions
- );
- if (pathResponse.then) {
- return pathResponse.then(newResponse => {
- this.response = newResponse;
- this.include = this.include.slice(1);
- return this.handleInclude();
+ const indexedResults = this.response.results.reduce((indexed, result, i) => {
+ indexed[result.objectId] = i;
+ return indexed;
+ }, {});
+
+ // Build the execution tree
+ const executionTree = {}
+ this.include.forEach(path => {
+ let current = executionTree;
+ path.forEach((node) => {
+ if (!current[node]) {
+ current[node] = {
+ path,
+ children: {}
+ };
+ }
+ current = current[node].children
});
- } else if (this.include.length > 0) {
- this.include = this.include.slice(1);
- return this.handleInclude();
+ });
+
+ const recursiveExecutionTree = async (treeNode) => {
+ const { path, children } = treeNode;
+ const pathResponse = includePath(
+ this.config,
+ this.auth,
+ this.response,
+ path,
+ this.context,
+ this.restOptions,
+ this,
+ );
+ if (pathResponse.then) {
+ const newResponse = await pathResponse
+ newResponse.results.forEach(newObject => {
+ // We hydrate the root of each result with sub results
+ this.response.results[indexedResults[newObject.objectId]][path[0]] = newObject[path[0]];
+ })
+ }
+ return Promise.all(Object.values(children).map(recursiveExecutionTree));
}
- return pathResponse;
+ await Promise.all(Object.values(executionTree).map(recursiveExecutionTree));
+ this.include = []
};
//Returns a promise of a processed set of results
@@ -1018,7 +1041,6 @@ function includePath(config, auth, response, path, context, restOptions = {}) {
} else if (restOptions.readPreference) {
includeRestOptions.readPreference = restOptions.readPreference;
}
-
const queryPromises = Object.keys(pointersHash).map(async className => {
const objectIds = Array.from(pointersHash[className]);
let where;
@@ -1057,7 +1079,6 @@ function includePath(config, auth, response, path, context, restOptions = {}) {
}
return replace;
}, {});
-
var resp = {
results: replacePointers(response.results, path, replace),
};
From 306c5fd8309d655e118bae54ce708db64a6eb86a Mon Sep 17 00:00:00 2001
From: semantic-release-bot
Date: Mon, 17 Nov 2025 14:43:50 +0000
Subject: [PATCH 12/12] chore(release): 8.5.0-alpha.8 [skip ci]
# [8.5.0-alpha.8](https://github.com/parse-community/parse-server/compare/8.5.0-alpha.7...8.5.0-alpha.8) (2025-11-17)
### Performance Improvements
* `Parse.Query.include` now fetches pointers at same level in parallel ([#9861](https://github.com/parse-community/parse-server/issues/9861)) ([dafea21](https://github.com/parse-community/parse-server/commit/dafea21eb39b0fdc2b52bb8a14f7b61e3f2b8d13))
---
changelogs/CHANGELOG_alpha.md | 7 +++++++
package-lock.json | 4 ++--
package.json | 2 +-
3 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/changelogs/CHANGELOG_alpha.md b/changelogs/CHANGELOG_alpha.md
index 045b3ae5ab..5f436295f9 100644
--- a/changelogs/CHANGELOG_alpha.md
+++ b/changelogs/CHANGELOG_alpha.md
@@ -1,3 +1,10 @@
+# [8.5.0-alpha.8](https://github.com/parse-community/parse-server/compare/8.5.0-alpha.7...8.5.0-alpha.8) (2025-11-17)
+
+
+### Performance Improvements
+
+* `Parse.Query.include` now fetches pointers at same level in parallel ([#9861](https://github.com/parse-community/parse-server/issues/9861)) ([dafea21](https://github.com/parse-community/parse-server/commit/dafea21eb39b0fdc2b52bb8a14f7b61e3f2b8d13))
+
# [8.5.0-alpha.7](https://github.com/parse-community/parse-server/compare/8.5.0-alpha.6...8.5.0-alpha.7) (2025-11-08)
diff --git a/package-lock.json b/package-lock.json
index 70067284a4..cb058f8179 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "parse-server",
- "version": "8.5.0-alpha.7",
+ "version": "8.5.0-alpha.8",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "parse-server",
- "version": "8.5.0-alpha.7",
+ "version": "8.5.0-alpha.8",
"hasInstallScript": true,
"license": "Apache-2.0",
"dependencies": {
diff --git a/package.json b/package.json
index fe043e8ee4..b289aa6796 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "parse-server",
- "version": "8.5.0-alpha.7",
+ "version": "8.5.0-alpha.8",
"description": "An express module providing a Parse-compatible API server",
"main": "lib/index.js",
"repository": {