Skip to content

fix(tasks): add Assign Tasks button for stuck pending tasks #627

fix(tasks): add Assign Tasks button for stuck pending tasks

fix(tasks): add Assign Tasks button for stuck pending tasks #627

Workflow file for this run

name: Test Suite (Unit + E2E)
on:
push:
branches: [main, develop, '0*']
pull_request:
branches: [main, develop]
workflow_call: # Allow this workflow to be called by other workflows
# schedule:
# Run E2E tests nightly at 2 AM UTC
# - cron: '0 2 * * *'
env:
PYTHON_VERSION: '3.11'
NODE_VERSION: '20'
jobs:
# ============================================
# Code Quality Checks (Run First - Fast Fail)
# ============================================
code-quality:
name: Code Quality (Lint + Type Check)
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- name: Create virtual environment
run: uv venv
- name: Install dependencies
run: uv sync --extra dev
- name: Run ruff (linting)
run: uv run ruff check .
- name: Run mypy (type checking)
run: uv run mypy codeframe/
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
cache-dependency-path: 'web-ui/package-lock.json'
- name: Install frontend dependencies
working-directory: web-ui
run: npm ci
- name: Run ESLint
working-directory: web-ui
run: npm run lint -- --strict
- name: Run TypeScript type checking
working-directory: web-ui
run: npm run type-check
# ============================================
# Static Analysis - Check for Hardcoded URLs
# ============================================
check-hardcoded-urls:
name: Check for Hardcoded URLs
runs-on: ubuntu-latest
needs: code-quality
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check for hardcoded localhost URLs in frontend
run: |
echo "🔍 Checking for hardcoded localhost URLs..."
# Check for hardcoded localhost URLs with common ports (3000-9999)
# We ONLY allow: process.env.NEXT_PUBLIC_* || 'http://localhost:...' (or ws://, wss://, https://)
# We reject: 'http://localhost:...' (without env var)
# We reject: 'http://localhost:...' || process.env.NEXT_PUBLIC_* (reversed)
HARDCODED=$(grep -rn -E "localhost:[0-9]{2,5}" web-ui/src \
--include="*.ts" \
--include="*.tsx" \
--include="*.js" \
--include="*.jsx" \
| grep -v "\.test\." \
| grep -v "\.spec\." \
| grep -v "// " \
| grep -v "/\*" \
| grep -vE "process\.env\.NEXT_PUBLIC_[A-Z0-9_]+\s*\|\|\s*['\"](https?|wss?)://" \
|| true)
if [ -n "$HARDCODED" ]; then
echo "❌ Found hardcoded localhost URLs:"
echo "$HARDCODED"
echo ""
echo "💡 Fix: Use process.env.NEXT_PUBLIC_API_URL instead"
echo " Example: const API_URL = process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8080';"
echo " Note: The env var MUST come BEFORE the fallback (not reversed)"
exit 1
fi
echo "✅ No hardcoded localhost URLs found"
- name: Check for Vite-style env vars in Next.js code
run: |
echo "🔍 Checking for Vite-style environment variables..."
# Check for window.VITE_ or import.meta.env patterns
VITE_VARS=$(grep -rn "window\.VITE_\|import\.meta\.env" web-ui/src \
--include="*.ts" \
--include="*.tsx" \
--include="*.js" \
--include="*.jsx" \
| grep -v "// " \
| grep -v "/\*" \
|| true)
if [ -n "$VITE_VARS" ]; then
echo "❌ Found Vite-style environment variables in Next.js code:"
echo "$VITE_VARS"
echo ""
echo "💡 Fix: Use process.env.NEXT_PUBLIC_* for Next.js"
echo " Next.js uses NEXT_PUBLIC_ prefix for client-side env vars"
exit 1
fi
echo "✅ No Vite-style environment variables found"
- name: Verify API files use consistent env vars
run: |
echo "🔍 Verifying API files use process.env.NEXT_PUBLIC_API_URL..."
# This check is a heuristic to catch files making API calls to localhost
# without using environment variables. The primary hardcoded URL check above
# is more precise; this is a secondary sanity check.
#
# Note: Files that have BOTH a fetch/axios localhost call AND a process.env
# reference somewhere in the file are considered OK. This may miss edge cases
# where the env var is for something unrelated to the localhost URL.
ISSUES=""
# Check common API-related files for hardcoded URLs
for file in $(find web-ui/src -type f \( -name "*.ts" -o -name "*.tsx" \) \
-not -path "*/.next/*" \
-not -path "*/node_modules/*"); do
# Skip test files
if [[ $file == *".test."* ]] || [[ $file == *".spec."* ]]; then
continue
fi
# Check if file has fetch or axios calls with hardcoded localhost
# AND does not have any NEXT_PUBLIC env var reference
if grep -qE "fetch\(.*localhost|axios.*localhost" "$file" 2>/dev/null; then
if ! grep -q "process.env.NEXT_PUBLIC" "$file" 2>/dev/null; then
ISSUES="${ISSUES}\n - ${file}: Contains localhost URL without env var reference"
fi
fi
done
if [ -n "$ISSUES" ]; then
echo "⚠️ API files with potential issues (heuristic check):"
echo -e "$ISSUES"
echo ""
echo "💡 These files appear to use localhost URLs without environment variable configuration"
echo " This is a heuristic check - verify manually if these are intentional."
# Changed from exit 1 to warning - the primary check above is more reliable
# exit 1
else
echo "✅ All API files properly configured"
fi
# ============================================
# Backend Unit Tests (After Code Quality)
# ============================================
backend-tests:
name: Backend Unit Tests
runs-on: ubuntu-latest
needs: code-quality
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- name: Create virtual environment
run: uv venv
- name: Install dependencies
run: uv sync --extra dev
- name: Configure git for tests
run: |
git config --global user.name "GitHub Actions"
git config --global user.email "[email protected]"
- name: Install codeframe package
run: |
uv pip install -e .
echo "✅ Package installed in editable mode"
- name: Run pytest with coverage
timeout-minutes: 15
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
uv run pytest tests/ \
--ignore=tests/e2e \
--cov=codeframe \
--cov-report=term \
--cov-report=xml \
--cov-report=html \
-v
- name: Check coverage threshold (65%)
run: |
COVERAGE=$(uv run coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
echo "Coverage: ${COVERAGE}%"
if (( $(echo "$COVERAGE < 65" | bc -l) )); then
echo "❌ Coverage ${COVERAGE}% is below 65% threshold"
exit 1
else
echo "✅ Coverage ${COVERAGE}% meets 65% threshold"
fi
- name: Upload coverage reports
uses: codecov/codecov-action@v4
with:
file: ./coverage.xml
flags: backend
name: backend-coverage
# ============================================
# Frontend Unit Tests (After Code Quality)
# ============================================
frontend-tests:
name: Frontend Unit Tests
runs-on: ubuntu-latest
needs: code-quality
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
cache-dependency-path: 'web-ui/package-lock.json'
- name: Install dependencies
working-directory: web-ui
run: npm ci
- name: Run Jest tests with coverage
working-directory: web-ui
run: npm run test:coverage
- name: Check coverage threshold (65%)
working-directory: web-ui
run: |
COVERAGE=$(cat coverage/coverage-summary.json | jq '.total.statements.pct')
echo "Coverage: ${COVERAGE}%"
if (( $(echo "$COVERAGE < 65" | bc -l) )); then
echo "❌ Coverage ${COVERAGE}% is below 65% threshold"
exit 1
else
echo "✅ Coverage ${COVERAGE}% meets 65% threshold"
fi
- name: Upload coverage reports
uses: codecov/codecov-action@v4
with:
directory: web-ui/coverage
flags: frontend
name: frontend-coverage
# ============================================
# E2E Backend Tests (Pytest)
# ============================================
e2e-backend-tests:
name: E2E Backend Tests
runs-on: ubuntu-latest
# Only run on main branch or scheduled runs (not every PR)
if: github.ref == 'refs/heads/main' || github.event_name == 'schedule'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- name: Create virtual environment
run: uv venv
- name: Install dependencies
run: uv sync --extra dev
- name: Initialize git for E2E tests
run: |
git config --global user.name "GitHub Actions"
git config --global user.email "[email protected]"
- name: Install codeframe package
run: |
uv pip install -e .
echo "✅ Package installed in editable mode"
- name: Initialize database
run: |
mkdir -p .codeframe
source .venv/bin/activate
python -c "from codeframe.persistence.database import Database; db = Database('.codeframe/state.db'); db.initialize(); db.close()"
echo "✅ Database initialized"
- name: Start FastAPI server in background
env:
DATABASE_PATH: ${{ github.workspace }}/.codeframe/state.db
WORKSPACE_ROOT: ${{ github.workspace }}
CODEFRAME_DEPLOYMENT_MODE: self_hosted
run: |
source .venv/bin/activate
python -m uvicorn codeframe.ui.server:app --port 8080 > /tmp/server.log 2>&1 &
echo "BACKEND_PID=$!" >> $GITHUB_ENV
echo "Server started with PID: $!"
- name: Verify server startup
run: |
sleep 5
if ! ps -p $BACKEND_PID > /dev/null; then
echo "❌ Server process died immediately"
cat /tmp/server.log
exit 1
fi
echo "✅ Server process is running (PID: $BACKEND_PID)"
- name: Wait for backend to be ready
run: |
echo "Waiting for server to start..."
for i in {1..120}; do
if curl -s http://localhost:8080/health > /dev/null; then
echo "✅ Server is ready!"
curl -s http://localhost:8080/health | jq .
exit 0
fi
echo "Attempt $i/120: Server not ready yet..."
sleep 1
done
echo "❌ Server failed to start within 120 seconds"
echo "=== Server Logs ==="
cat /tmp/server.log
exit 1
- name: Run E2E backend tests
run: |
uv run pytest tests/e2e/ \
-v \
--tb=short \
-m "e2e"
- name: Stop FastAPI server
if: always()
run: |
if [ -n "$BACKEND_PID" ]; then
kill $BACKEND_PID || true
fi
- name: Upload E2E test reports
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-backend-reports
path: |
tests/e2e/fixtures/
.pytest_cache/
- name: Upload server logs
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-backend-server-logs
path: /tmp/server.log
retention-days: 7
# ============================================
# E2E Smoke Tests (Playwright - Chromium only)
# ============================================
e2e-smoke-tests:
name: E2E Smoke Tests (Chromium)
runs-on: ubuntu-latest
# Run on PRs for fast feedback
if: github.event_name == 'pull_request'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
cache-dependency-path: 'web-ui/package-lock.json'
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- name: Install Python dependencies
run: |
uv venv
uv sync
- name: Install frontend dependencies
working-directory: web-ui
run: npm ci
- name: Install E2E test dependencies
working-directory: tests/e2e
run: |
npm ci
# Rebuild native modules for current Node.js version
# (better-sqlite3 requires matching NODE_MODULE_VERSION)
npm rebuild better-sqlite3
- name: Install Playwright browsers (Chromium only)
working-directory: tests/e2e
run: npx playwright install chromium --with-deps
- name: Install codeframe package
run: |
uv pip install -e .
echo "✅ Package installed in editable mode"
- name: Initialize database
run: |
# E2E tests expect database at tests/e2e/.codeframe/state.db
mkdir -p tests/e2e/.codeframe
source .venv/bin/activate
python -c "from codeframe.persistence.database import Database; db = Database('tests/e2e/.codeframe/state.db'); db.initialize(); db.close()"
echo "✅ Database initialized at tests/e2e/.codeframe/state.db"
- name: Start backend server
env:
# Must match TEST_DB_PATH in tests/e2e/e2e-config.ts
DATABASE_PATH: ${{ github.workspace }}/tests/e2e/.codeframe/state.db
WORKSPACE_ROOT: ${{ github.workspace }}
CODEFRAME_DEPLOYMENT_MODE: self_hosted
run: |
source .venv/bin/activate
python -m uvicorn codeframe.ui.server:app --port 8080 > /tmp/backend.log 2>&1 &
echo "BACKEND_PID=$!" >> $GITHUB_ENV
echo "Backend started with PID: $!"
- name: Wait for backend to be ready
run: |
echo "Waiting for backend to start..."
for i in {1..120}; do
if curl -s http://localhost:8080/health > /dev/null; then
echo "✅ Backend is ready!"
curl -s http://localhost:8080/health | jq .
break
fi
echo "Attempt $i/120: Backend not ready yet..."
sleep 1
if [ $i -eq 120 ]; then
echo "❌ Backend failed to start within 120 seconds"
cat /tmp/backend.log
exit 1
fi
done
- name: Start frontend dev server
working-directory: web-ui
env:
PORT: 3001
NEXT_PUBLIC_API_URL: http://localhost:8080
run: |
npm run dev > /tmp/frontend.log 2>&1 &
echo "FRONTEND_PID=$!" >> $GITHUB_ENV
echo "Frontend started with PID: $! on port 3001"
- name: Wait for frontend to be ready
run: |
echo "Waiting for frontend to start on port 3001..."
for i in {1..60}; do
if curl -s http://localhost:3001 > /dev/null; then
echo "✅ Frontend is ready on port 3001!"
break
fi
echo "Attempt $i/60: Frontend not ready yet..."
sleep 1
if [ $i -eq 60 ]; then
echo "❌ Frontend failed to start within 60 seconds"
cat /tmp/frontend.log
exit 1
fi
done
- name: Run Playwright smoke tests (Chromium only)
working-directory: tests/e2e
run: npm run test:smoke
- name: Upload Playwright report
if: always()
uses: actions/upload-artifact@v4
with:
name: playwright-smoke-report
path: tests/e2e/playwright-report/
retention-days: 7
- name: Upload backend logs
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-smoke-backend-logs
path: /tmp/backend.log
retention-days: 7
- name: Upload frontend logs
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-smoke-frontend-logs
path: /tmp/frontend.log
retention-days: 7
- name: Stop servers
if: always()
run: |
if [ -n "$BACKEND_PID" ]; then
kill $BACKEND_PID || true
fi
if [ -n "$FRONTEND_PID" ]; then
kill $FRONTEND_PID || true
fi
# ============================================
# E2E Frontend Tests (Playwright - All Browsers)
# ============================================
# COMMENTED OUT: Enable when smoke tests are passing consistently
# e2e-frontend-tests:
# name: E2E Frontend Tests (All Browsers)
# runs-on: ubuntu-latest
# # Only run on main branch or scheduled runs
# if: github.ref == 'refs/heads/main' || github.event_name == 'schedule'
#
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
#
# - name: Set up Node.js
# uses: actions/setup-node@v4
# with:
# node-version: ${{ env.NODE_VERSION }}
# cache: 'npm'
# cache-dependency-path: 'web-ui/package-lock.json'
#
# - name: Set up Python
# uses: actions/setup-python@v5
# with:
# python-version: ${{ env.PYTHON_VERSION }}
#
# - name: Install uv
# uses: astral-sh/setup-uv@v4
# with:
# enable-cache: true
#
# - name: Install Python dependencies
# run: |
# uv venv
# uv sync
#
# - name: Install frontend dependencies
# working-directory: web-ui
# run: npm ci
#
# - name: Install E2E test dependencies
# working-directory: tests/e2e
# run: npm ci
#
# - name: Install Playwright browsers
# working-directory: tests/e2e
# run: npx playwright install --with-deps
#
# - name: Install codeframe package
# run: |
# uv pip install -e .
# echo "✅ Package installed in editable mode"
#
# - name: Initialize database
# run: |
# mkdir -p .codeframe
# source .venv/bin/activate
# python -c "from codeframe.persistence.database import Database; db = Database('.codeframe/state.db'); db.initialize(); db.close()"
# echo "✅ Database initialized"
#
# - name: Start backend server
# env:
# DATABASE_PATH: ${{ github.workspace }}/.codeframe/state.db
# WORKSPACE_ROOT: ${{ github.workspace }}
# CODEFRAME_DEPLOYMENT_MODE: self_hosted
# run: |
# source .venv/bin/activate
# python -m uvicorn codeframe.ui.server:app --port 8080 > /tmp/backend.log 2>&1 &
# echo "BACKEND_PID=$!" >> $GITHUB_ENV
# echo "Backend started with PID: $!"
#
# - name: Verify backend startup
# run: |
# sleep 5
# if ! ps -p $BACKEND_PID > /dev/null; then
# echo "❌ Backend process died immediately"
# cat /tmp/backend.log
# exit 1
# fi
# echo "✅ Backend process is running (PID: $BACKEND_PID)"
#
# - name: Wait for backend to be ready
# run: |
# echo "Waiting for backend to start..."
# for i in {1..120}; do
# if curl -s http://localhost:8080/health > /dev/null; then
# echo "✅ Backend is ready!"
# curl -s http://localhost:8080/health | jq .
# break
# fi
# echo "Attempt $i/120: Backend not ready yet..."
# sleep 1
# if [ $i -eq 120 ]; then
# echo "❌ Backend failed to start within 120 seconds"
# echo "=== Backend Logs ==="
# cat /tmp/backend.log
# exit 1
# fi
# done
#
# - name: Start frontend dev server
# working-directory: web-ui
# run: |
# npm run dev > /tmp/frontend.log 2>&1 &
# echo "FRONTEND_PID=$!" >> $GITHUB_ENV
# echo "Frontend started with PID: $!"
#
# - name: Wait for frontend to be ready
# run: |
# echo "Waiting for frontend to start..."
# for i in {1..60}; do
# if curl -s http://localhost:3000 > /dev/null; then
# echo "✅ Frontend is ready!"
# break
# fi
# echo "Attempt $i/60: Frontend not ready yet..."
# sleep 1
# if [ $i -eq 60 ]; then
# echo "❌ Frontend failed to start within 60 seconds"
# echo "=== Frontend Logs ==="
# cat /tmp/frontend.log
# exit 1
# fi
# done
#
# - name: Run Playwright E2E tests
# working-directory: tests/e2e
# run: |
# npx playwright test \
# --config=playwright.config.ts \
# *.spec.ts
#
# - name: Upload Playwright report
# if: always()
# uses: actions/upload-artifact@v4
# with:
# name: playwright-report
# path: tests/e2e/playwright-report/
# retention-days: 30
#
# - name: Upload backend logs
# if: always()
# uses: actions/upload-artifact@v4
# with:
# name: e2e-frontend-backend-logs
# path: /tmp/backend.log
# retention-days: 7
#
# - name: Upload frontend logs
# if: always()
# uses: actions/upload-artifact@v4
# with:
# name: e2e-frontend-frontend-logs
# path: /tmp/frontend.log
# retention-days: 7
#
# - name: Stop servers
# if: always()
# run: |
# if [ -n "$BACKEND_PID" ]; then
# kill $BACKEND_PID || true
# fi
# if [ -n "$FRONTEND_PID" ]; then
# kill $FRONTEND_PID || true
# fi
# ============================================
# TestSprite E2E Tests (Optional)
# ============================================
testsprite-e2e:
name: TestSprite E2E Tests
runs-on: ubuntu-latest
# Only run on scheduled builds (nightly)
if: github.event_name == 'schedule'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: Install frontend dependencies
working-directory: web-ui
run: npm ci
- name: Start frontend dev server
working-directory: web-ui
run: |
npm run dev &
echo "FRONTEND_PID=$!" >> $GITHUB_ENV
- name: Wait for frontend
run: npx wait-on http://localhost:3000 --timeout 60000
- name: Run TestSprite E2E tests
env:
TESTSPRITE_API_KEY: ${{ secrets.TESTSPRITE_API_KEY }}
run: |
npx @testsprite/testsprite-mcp@latest generateCodeAndExecute
- name: Upload TestSprite results
if: always()
uses: actions/upload-artifact@v4
with:
name: testsprite-results
path: testsprite_tests/
- name: Stop frontend server
if: always()
run: |
if [ -n "$FRONTEND_PID" ]; then
kill $FRONTEND_PID || true
fi
# ============================================
# Test Summary
# ============================================
test-summary:
name: Test Summary
runs-on: ubuntu-latest
needs: [backend-tests, frontend-tests, code-quality, check-hardcoded-urls]
if: always()
steps:
- name: Report test results
run: |
echo "## Test Suite Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Check | Status |" >> $GITHUB_STEP_SUMMARY
echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY
echo "| Code Quality | ${{ needs.code-quality.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| Hardcoded URLs | ${{ needs.check-hardcoded-urls.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| Backend Tests | ${{ needs.backend-tests.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| Frontend Tests | ${{ needs.frontend-tests.result }} |" >> $GITHUB_STEP_SUMMARY
if [ "${{ needs.code-quality.result }}" == "failure" ] || \
[ "${{ needs.check-hardcoded-urls.result }}" == "failure" ] || \
[ "${{ needs.backend-tests.result }}" == "failure" ] || \
[ "${{ needs.frontend-tests.result }}" == "failure" ]; then
echo "" >> $GITHUB_STEP_SUMMARY
echo "❌ Some checks failed. Please review the logs above." >> $GITHUB_STEP_SUMMARY
exit 1
else
echo "" >> $GITHUB_STEP_SUMMARY
echo "✅ All checks passed!" >> $GITHUB_STEP_SUMMARY
fi