1+ name : CI/CD with Benchmarks
2+
3+ on :
4+ push :
5+ branches : [ main, develop ]
6+ pull_request :
7+ branches : [ main, develop ]
8+
9+ jobs :
10+ build-and-test :
11+ runs-on : ubuntu-latest
12+
13+ steps :
14+ - uses : actions/checkout@v4
15+
16+ - name : Setup Zig
17+ uses : goto-bus-stop/setup-zig@v2
18+ with :
19+ version : 0.11.0
20+
21+ - name : Restore Zig cache
22+ uses : actions/cache@v3
23+ with :
24+ path : |
25+ ~/.cache/zig
26+ zig-cache
27+ key : ${{ runner.os }}-zig-${{ hashFiles('**/*.zig', '**/build.zig.zon') }}
28+ restore-keys : |
29+ ${{ runner.os }}-zig-
30+
31+ - name : Build project
32+ run : zig build
33+
34+ - name : Run tests
35+ run : zig build test
36+
37+ - name : Build benchmark tool
38+ run : zig build benchmark
39+
40+ benchmark :
41+ runs-on : ubuntu-latest
42+ needs : build-and-test
43+ if : github.event_name == 'push' || github.event_name == 'pull_request'
44+
45+ steps :
46+ - uses : actions/checkout@v4
47+
48+ - name : Setup Zig
49+ uses : goto-bus-stop/setup-zig@v2
50+ with :
51+ version : 0.11.0
52+
53+ - name : Restore Zig cache
54+ uses : actions/cache@v3
55+ with :
56+ path : |
57+ ~/.cache/zig
58+ zig-cache
59+ key : ${{ runner.os }}-zig-${{ hashFiles('**/*.zig', '**/build.zig.zon') }}
60+ restore-keys : |
61+ ${{ runner.os }}-zig-
62+
63+ - name : Build benchmark tool
64+ run : zig build
65+
66+ - name : Start gRPC server in background
67+ run : |
68+ # Start the server in the background
69+ timeout 30 ./zig-out/bin/grpc-server-example &
70+ SERVER_PID=$!
71+ echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
72+
73+ # Wait for server to start
74+ sleep 5
75+
76+ # Check if server is running
77+ if ! kill -0 $SERVER_PID 2>/dev/null; then
78+ echo "Server failed to start"
79+ exit 1
80+ fi
81+
82+ - name : Run benchmarks
83+ run : |
84+ # Run benchmark with reduced load for CI
85+ timeout 60 ./zig-out/bin/grpc-benchmark \
86+ --host localhost \
87+ --port 50051 \
88+ --requests 100 \
89+ --clients 5 \
90+ --size 512 \
91+ --output json > benchmark_results.json
92+
93+ # Also output in text format for logs
94+ timeout 60 ./zig-out/bin/grpc-benchmark \
95+ --host localhost \
96+ --port 50051 \
97+ --requests 100 \
98+ --clients 5 \
99+ --size 512 \
100+ --output text
101+ continue-on-error : true
102+
103+ - name : Stop server
104+ run : |
105+ if [ ! -z "$SERVER_PID" ]; then
106+ kill $SERVER_PID || true
107+ fi
108+
109+ - name : Upload benchmark results
110+ uses : actions/upload-artifact@v3
111+ with :
112+ name : benchmark-results-${{ github.sha }}
113+ path : benchmark_results.json
114+ retention-days : 30
115+ if : always()
116+
117+ - name : Post benchmark results to PR
118+ uses : actions/github-script@v6
119+ if : github.event_name == 'pull_request' && always()
120+ with :
121+ script : |
122+ const fs = require('fs');
123+
124+ try {
125+ const results = JSON.parse(fs.readFileSync('benchmark_results.json', 'utf8'));
126+
127+ const comment = `## 🚀 Benchmark Results
128+
129+ **Performance Summary:**
130+ - **Requests/sec:** ${results.requests_per_second.toFixed(2)}
131+ - **Total Requests:** ${results.total_requests}
132+ - **Success Rate:** ${((results.successful_requests / results.total_requests) * 100).toFixed(1)}%
133+ - **Error Rate:** ${(results.error_rate * 100).toFixed(2)}%
134+
135+ **Latency Statistics:**
136+ - **Average:** ${results.latency_stats.avg_ms.toFixed(2)}ms
137+ - **95th percentile:** ${results.latency_stats.p95_ms.toFixed(2)}ms
138+ - **99th percentile:** ${results.latency_stats.p99_ms.toFixed(2)}ms
139+ - **Min/Max:** ${results.latency_stats.min_ms.toFixed(2)}ms / ${results.latency_stats.max_ms.toFixed(2)}ms
140+
141+ **Configuration:**
142+ - Concurrent clients: 5
143+ - Requests per client: 100
144+ - Payload size: 512 bytes
145+
146+ ---
147+ *Benchmark run on: ${new Date(results.timestamp * 1000).toISOString()}*
148+ `;
149+
150+ github.rest.issues.createComment({
151+ issue_number: context.issue.number,
152+ owner: context.repo.owner,
153+ repo: context.repo.repo,
154+ body: comment
155+ });
156+ } catch (error) {
157+ console.log('Failed to post benchmark results:', error);
158+
159+ // Post a fallback comment
160+ github.rest.issues.createComment({
161+ issue_number: context.issue.number,
162+ owner: context.repo.owner,
163+ repo: context.repo.repo,
164+ body: '## 🚀 Benchmark Results\n\n❌ Benchmark failed to complete or results could not be parsed. Check the CI logs for details.'
165+ });
166+ }
167+
168+ performance-regression-check :
169+ runs-on : ubuntu-latest
170+ needs : benchmark
171+ if : github.event_name == 'pull_request'
172+
173+ steps :
174+ - uses : actions/checkout@v4
175+ with :
176+ fetch-depth : 0
177+
178+ - name : Download benchmark results
179+ uses : actions/download-artifact@v3
180+ with :
181+ name : benchmark-results-${{ github.sha }}
182+ path : ./current
183+ continue-on-error : true
184+
185+ - name : Check for performance regression
186+ uses : actions/github-script@v6
187+ with :
188+ script : |
189+ const fs = require('fs');
190+
191+ try {
192+ const currentResults = JSON.parse(fs.readFileSync('./current/benchmark_results.json', 'utf8'));
193+
194+ // Define performance thresholds
195+ const thresholds = {
196+ requests_per_second_min: 1000, // Minimum acceptable RPS
197+ latency_p95_max: 100, // Maximum acceptable P95 latency (ms)
198+ error_rate_max: 0.05 // Maximum acceptable error rate (5%)
199+ };
200+
201+ let warnings = [];
202+ let errors = [];
203+
204+ // Check performance against thresholds
205+ if (currentResults.requests_per_second < thresholds.requests_per_second_min) {
206+ warnings.push(`⚠️ Low throughput: ${currentResults.requests_per_second.toFixed(2)} RPS (threshold: ${thresholds.requests_per_second_min} RPS)`);
207+ }
208+
209+ if (currentResults.latency_stats.p95_ms > thresholds.latency_p95_max) {
210+ warnings.push(`⚠️ High latency: P95 ${currentResults.latency_stats.p95_ms.toFixed(2)}ms (threshold: ${thresholds.latency_p95_max}ms)`);
211+ }
212+
213+ if (currentResults.error_rate > thresholds.error_rate_max) {
214+ errors.push(`❌ High error rate: ${(currentResults.error_rate * 100).toFixed(2)}% (threshold: ${(thresholds.error_rate_max * 100).toFixed(1)}%)`);
215+ }
216+
217+ if (errors.length > 0 || warnings.length > 0) {
218+ let comment = '## 🚨 Performance Analysis\n\n';
219+
220+ if (errors.length > 0) {
221+ comment += '**Critical Issues:**\n';
222+ errors.forEach(error => comment += `${error}\n`);
223+ comment += '\n';
224+ }
225+
226+ if (warnings.length > 0) {
227+ comment += '**Warnings:**\n';
228+ warnings.forEach(warning => comment += `${warning}\n`);
229+ comment += '\n';
230+ }
231+
232+ comment += '**Recommendations:**\n';
233+ comment += '- Review recent changes for performance impact\n';
234+ comment += '- Run local benchmarks to confirm results\n';
235+ comment += '- Consider optimizing critical code paths\n';
236+
237+ github.rest.issues.createComment({
238+ issue_number: context.issue.number,
239+ owner: context.repo.owner,
240+ repo: context.repo.repo,
241+ body: comment
242+ });
243+
244+ if (errors.length > 0) {
245+ core.setFailed('Performance regression detected');
246+ }
247+ } else {
248+ github.rest.issues.createComment({
249+ issue_number: context.issue.number,
250+ owner: context.repo.owner,
251+ repo: context.repo.repo,
252+ body: '## ✅ Performance Analysis\n\nAll performance metrics are within acceptable thresholds.'
253+ });
254+ }
255+ } catch (error) {
256+ console.log('Performance regression check failed:', error);
257+ }
0 commit comments