first commit

This commit is contained in:
Nicholai 2025-11-02 01:38:30 -06:00
commit 06e41023c3
36 changed files with 3791 additions and 0 deletions

77
.env.example Normal file
View File

@ -0,0 +1,77 @@
# ---------------------------------------------------------------------------
# LOCAL DEVELOPMENT TEMPLATE
# Copy this file to `.env` and fill only the sections that apply to your app.
# Treat blanks as intentional—remove unused sections after bootstrapping.
# ---------------------------------------------------------------------------
# === Core application ===
APP_NAME="my-app"
NODE_ENV="development"
APP_URL="http://localhost:3000"
# === Database (PostgreSQL recommended) ===
# Example: postgres://user:password@host:5432/db?sslmode=require
DATABASE_URL=""
DIRECT_URL="" # Optional: connection string for migration tooling
# === Authentication (Optional) ===
# Generate with `openssl rand -base64 32`
AUTH_SECRET=""
# Provider toggles: set to "true" or leave blank.
AUTH_ENABLE_EMAIL="";
AUTH_ENABLE_GOOGLE="";
AUTH_ENABLE_GITHUB="";
# Email magic links / passwordless
EMAIL_SERVER_HOST=""
EMAIL_SERVER_PORT=""
EMAIL_SERVER_USER=""
EMAIL_SERVER_PASSWORD=""
EMAIL_FROM=""
# Google OAuth
GOOGLE_CLIENT_ID=""
GOOGLE_CLIENT_SECRET=""
# GitHub OAuth
GITHUB_CLIENT_ID=""
GITHUB_CLIENT_SECRET=""
# === Storage (Optional) ===
# Works for AWS S3, Cloudflare R2, or any S3-compatible service.
STORAGE_DRIVER="s3" # s3 | r2 | minio | filesystem
STORAGE_ACCESS_KEY_ID=""
STORAGE_SECRET_ACCESS_KEY=""
STORAGE_REGION="us-east-1"
STORAGE_BUCKET=""
STORAGE_ENDPOINT="" # Required for R2 / MinIO. Leave blank for AWS.
# === Scheduling & Calendars (Optional) ===
CALENDAR_PROVIDER="" # nextcloud | google | none
CALENDAR_BASE_URL=""
CALENDAR_USERNAME=""
CALENDAR_PASSWORD=""
CALENDAR_CLIENT_ID=""
CALENDAR_CLIENT_SECRET=""
CALENDAR_DEFAULT_TIMEZONE="America/Denver"
# === Feature Flags (Optional) ===
FLAGS_PROVIDER="" # launchdarkly | growthbook | configcat | none
FLAGS_CLIENT_KEY=""
FLAGS_SERVER_KEY=""
# === Analytics & Observability (Optional) ===
ANALYTICS_PROVIDER="" # plausible | umami | vercel | none
ANALYTICS_SITE_ID=""
SENTRY_DSN=""
LOG_LEVEL="info"
# === Edge / Worker deploys (Optional) ===
CLOUDFLARE_ACCOUNT_ID=""
CLOUDFLARE_API_TOKEN=""
WRANGLER_PROFILE=""
# === Miscellaneous ===
NEXT_PUBLIC_APP_NAME="${APP_NAME}"
NEXT_PUBLIC_FEATURE_FLAGS=""

View File

@ -0,0 +1,26 @@
---
name: "🐞 Bug report"
about: Report a reproducible problem
labels: ["type:fix"]
---
### What happened?
<!-- clear, minimal description -->
### Expected behavior
### Repro steps
1.
2.
3.
### Logs / screenshots
<!-- attach JSONL snippet or console output -->
### Environment
- Browser:
- Node: `node -v`
- pnpm: `pnpm -v`
- App commit SHA:
### Extra context

View File

@ -0,0 +1,15 @@
---
name: "✨ Enhancement"
about: Improve an existing capability
labels: ["type:docs","type:chore"]
---
### Current behavior
### Desired behavior
### Acceptance criteria
- [ ] AC1
- [ ] AC2
### Notes / risks

View File

@ -0,0 +1,23 @@
---
name: "🚀 Feature request"
about: Propose a net-new capability
labels: ["type:feat"]
---
### Problem this solves
### Proposed solution (what & why)
### Non-goals / constraints
### Acceptance criteria
- [ ] AC1
- [ ] AC2
### Alternatives considered
### Impacted areas
- [ ] UI
- [ ] RunCoordinator DO
- [ ] Scoring/validators
- [ ] Storage (D1/R2)

View File

@ -0,0 +1,29 @@
## Summary
<!-- What this PR changes and why -->
## Type
- [ ] feat
- [ ] fix
- [ ] docs
- [ ] chore
- [ ] refactor
- [ ] test
## Screenshots / logs
<!-- If UI or logs changed, include before/after or snippets -->
## How to test
1.
2.
3.
## Checklist
- [ ] Tests added/updated (unit or integration)
- [ ] Typecheck & lint pass (`pnpm check` or equivalent)
- [ ] Builds locally (`pnpm build` or equivalent)
- [ ] Docs/README/ADR updated if needed
- [ ] No secrets committed
## Linked issues
Fixes #
Refs #

99
.gitea/workflows/ci.yaml Normal file
View File

@ -0,0 +1,99 @@
name: CI
on:
push:
branches:
- main
- master
pull_request:
branches:
- main
- master
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Use Node.js 20
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Cache npm
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install dependencies
run: npm install --no-audit --no-fund
- name: Ensure CI dev deps present (fallback)
run: |
# Workaround when package-lock is stale vs package.json
npm i --no-save eslint@^8.57.0 eslint-config-next@14.2.16 @vitest/coverage-v8@^3.2.4 || true
- name: Lint
run: npm run ci:lint
- name: Typecheck
run: npm run ci:typecheck
- name: Unit tests (coverage)
run: npm run ci:test
- name: Build (OpenNext)
run: npm run ci:build
- name: Preview smoke check
shell: bash
run: |
set -euo pipefail
# Start preview via local CLI in background and verify it doesn't crash immediately
npm run preview > preview.log 2>&1 &
PREVIEW_PID=$!
# Give it a moment to start
sleep 5
if ! kill -0 "$PREVIEW_PID" 2>/dev/null; then
echo "Preview process exited prematurely. Logs:" >&2
sed -n '1,200p' preview.log >&2 || true
exit 1
fi
# Cleanly stop the preview
kill "$PREVIEW_PID" || true
wait "$PREVIEW_PID" || true
echo "Preview started successfully (smoke check passed)."
- name: Budgets check
run: npm run ci:budgets
env:
TOTAL_STATIC_MAX_BYTES: ${{ vars.TOTAL_STATIC_MAX_BYTES }}
MAX_ASSET_BYTES: ${{ vars.MAX_ASSET_BYTES }}
- name: Upload budgets report
if: always()
uses: actions/upload-artifact@v4
with:
name: budgets-report
path: .vercel/output/static-budgets-report.txt
- name: D1 migration dry-run (best-effort)
shell: bash
continue-on-error: true
run: |
set -euo pipefail
if [ -f sql/schema.sql ]; then
echo "Attempting D1 migration dry-run (local mode)..."
if npx wrangler d1 execute united-tattoo --local --file=./sql/schema.sql; then
echo "D1 migration dry-run completed successfully."
else
echo "D1 dry-run skipped or failed due to missing local bindings. This is expected until CI bindings are configured." >&2
fi
else
echo "No sql/schema.sql found; skipping D1 dry-run."
fi

42
.gitea/workflows/ci.yml Normal file
View File

@ -0,0 +1,42 @@
name: CI
on:
push:
branches: ["main"]
pull_request:
branches: ["main"]
jobs:
build-test:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./bandit-runner-app
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "pnpm"
cache-dependency-path: bandit-runner-app/pnpm-lock.yaml
- name: Install deps
run: pnpm install --frozen-lockfile
- name: Lint
run: pnpm lint
- name: Typecheck
run: npx tsc --noEmit
- name: Build (OpenNext)
run: pnpm build

View File

@ -0,0 +1,201 @@
name: Deployment Pipeline
on:
workflow_dispatch:
inputs:
environment:
description: 'Target environment'
required: true
default: 'preview'
type: choice
options:
- preview
- production
skip_tests:
description: 'Skip tests (emergency deployment)'
required: false
default: false
type: boolean
force_deploy:
description: 'Force deployment even if checks fail'
required: false
default: false
type: boolean
schedule:
# Deploy to preview every Sunday at 2 AM UTC
- cron: '0 2 * * 0'
env:
NODE_VERSION: '20'
CLOUDFLARE_ACCOUNT_ID: ${{ vars.CLOUDFLARE_ACCOUNT_ID }}
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
jobs:
pre-deployment-checks:
name: Pre-Deployment Checks
runs-on: ubuntu-latest
timeout-minutes: 10
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Quick lint check
run: npm run ci:lint
- name: TypeScript check
run: npm run ci:typecheck
- name: Run tests
run: npm run ci:test
build-and-deploy:
name: Build and Deploy
runs-on: ubuntu-latest
timeout-minutes: 20
needs: [pre-deployment-checks]
if: always() && (needs.pre-deployment-checks.result == 'success' || inputs.skip_tests || inputs.force_deploy)
environment: ${{ inputs.environment || 'preview' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Build application
run: |
echo "Building application for ${{ inputs.environment || 'preview' }} environment..."
npm run ci:build
# Generate deployment ID
DEPLOY_ID=$(date +%Y%m%d-%H%M%S)-${GITHUB_SHA::8}
echo "DEPLOY_ID=$DEPLOY_ID" >> $GITHUB_ENV
echo "Deployment ID: $DEPLOY_ID"
- name: Database migration (Production only)
if: inputs.environment == 'production'
run: |
echo "Running database migrations for production..."
# In a real scenario, this would run actual migrations
echo "Database migrations completed (simulated)"
- name: Deploy to Cloudflare
run: |
echo "Deploying to Cloudflare ${{ inputs.environment || 'preview' }} environment..."
CLOUDFLARE_ACCOUNT_ID=${{ env.CLOUDFLARE_ACCOUNT_ID }} npx @opennextjs/cloudflare deploy
env:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
- name: Wait for deployment
run: |
echo "Waiting for deployment to propagate..."
sleep 15
- name: Health check
run: |
echo "Performing health check..."
MAX_RETRIES=5
RETRY_COUNT=0
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if curl -f -s https://united-tattoo.christyl116.workers.dev > /dev/null; then
echo "✅ Health check passed!"
break
else
RETRY_COUNT=$((RETRY_COUNT + 1))
echo "Health check failed, retrying... ($RETRY_COUNT/$MAX_RETRIES)"
sleep 10
fi
done
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
echo "❌ Health check failed after $MAX_RETRIES attempts"
exit 1
fi
- name: Performance check
run: |
echo "Running performance check..."
# Basic performance check
RESPONSE_TIME=$(curl -o /dev/null -s -w '%{time_total}' https://united-tattoo.christyl116.workers.dev)
echo "Response time: ${RESPONSE_TIME}s"
# Check if response time is acceptable (less than 2 seconds)
if (( $(echo "$RESPONSE_TIME < 2.0" | bc -l) )); then
echo "✅ Performance check passed"
else
echo "⚠️ Performance check warning: Response time is ${RESPONSE_TIME}s"
fi
- name: SEO check
run: |
echo "Checking SEO metadata..."
curl -s https://united-tattoo.christyl116.workers.dev | grep -q "application/ld+json" && echo "✅ JSON-LD found" || echo "⚠️ JSON-LD not found"
curl -s https://united-tattoo.christyl116.workers.dev | grep -q "og:title" && echo "✅ Open Graph tags found" || echo "⚠️ Open Graph tags not found"
- name: Create deployment record
run: |
echo "Creating deployment record..."
# In a real scenario, this would create a record in your database or logging system
echo "Deployment ID: $DEPLOY_ID" > deployment-info.txt
echo "Environment: ${{ inputs.environment || 'preview' }}" >> deployment-info.txt
echo "Commit: $GITHUB_SHA" >> deployment-info.txt
echo "Timestamp: $(date -u)" >> deployment-info.txt
echo "URL: https://united-tattoo.christyl116.workers.dev" >> deployment-info.txt
- name: Upload deployment info
uses: actions/upload-artifact@v4
with:
name: deployment-info-${{ inputs.environment || 'preview' }}-${{ env.DEPLOY_ID }}
path: deployment-info.txt
retention-days: 90
- name: Notify success
if: success()
run: |
echo "🎉 Deployment to ${{ inputs.environment || 'preview' }} completed successfully!"
echo "Deployment ID: $DEPLOY_ID"
echo "URL: https://united-tattoo.christyl116.workers.dev"
- name: Notify failure
if: failure()
run: |
echo "❌ Deployment to ${{ inputs.environment || 'preview' }} failed!"
echo "Deployment ID: $DEPLOY_ID"
echo "Please check the logs for details."
rollback:
name: Rollback (if needed)
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [build-and-deploy]
if: failure() && inputs.environment == 'production'
environment: production
steps:
- name: Rollback deployment
run: |
echo "Rolling back production deployment..."
# In a real scenario, this would implement actual rollback logic
echo "Rollback completed (simulated)"
- name: Verify rollback
run: |
echo "Verifying rollback..."
curl -f https://united-tattoo.christyl116.workers.dev || exit 1
echo "✅ Rollback verification successful"

View File

@ -0,0 +1,382 @@
name: Enhanced CI/CD Pipeline
on:
push:
branches:
- main
- master
- 'ci-run-*'
pull_request:
branches:
- main
- master
workflow_dispatch:
inputs:
environment:
description: 'Deployment environment'
required: true
default: 'preview'
type: choice
options:
- preview
- production
env:
NODE_VERSION: '20'
CLOUDFLARE_ACCOUNT_ID: ${{ vars.CLOUDFLARE_ACCOUNT_ID }}
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
jobs:
# ===========================================
# QUALITY GATES
# ===========================================
lint-and-format:
name: Code Quality
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: ESLint
run: npm run ci:lint
continue-on-error: false
- name: TypeScript check
run: npm run ci:typecheck
continue-on-error: false
- name: Format check
run: |
echo "Checking code formatting..."
if ! npm run format:check 2>/dev/null; then
echo "Code formatting issues found. Run 'npm run format' to fix."
exit 1
fi
- name: Upload lint results
if: always()
uses: actions/upload-artifact@v4
with:
name: lint-results
path: |
.next/
eslint-results.json
retention-days: 7
security-scan:
name: Security Scan
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Audit dependencies
run: |
echo "Running security audit..."
npm audit --audit-level=moderate --json > audit-results.json || true
# Check for high/critical vulnerabilities
if npm audit --audit-level=high; then
echo "No high/critical vulnerabilities found"
else
echo "High/critical vulnerabilities detected!"
echo "Audit results:"
cat audit-results.json | jq '.metadata.vulnerabilities'
exit 1
fi
- name: License check
run: |
echo "Checking for problematic licenses..."
npx license-checker --summary --onlyAllow 'MIT;Apache-2.0;BSD-2-Clause;BSD-3-Clause;ISC;Unlicense'
- name: Upload security results
if: always()
uses: actions/upload-artifact@v4
with:
name: security-results
path: audit-results.json
retention-days: 30
test:
name: Tests
runs-on: ubuntu-latest
timeout-minutes: 20
needs: [lint-and-format]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Run unit tests
run: npm run ci:test
env:
CI: true
- name: Upload coverage reports
if: always()
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: |
coverage/
vitest-results.xml
retention-days: 30
- name: Comment coverage on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
try {
const coveragePath = path.join(process.cwd(), 'coverage', 'lcov-report', 'index.html');
if (fs.existsSync(coveragePath)) {
const coverage = fs.readFileSync(coveragePath, 'utf8');
const match = coverage.match(/(\d+\.?\d*)%/);
if (match) {
const percentage = match[1];
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## 📊 Test Coverage: ${percentage}%
Coverage report generated successfully.`
});
}
}
} catch (error) {
console.log('Could not generate coverage comment:', error.message);
}
# ===========================================
# BUILD AND DEPLOY
# ===========================================
build:
name: Build Application
runs-on: ubuntu-latest
timeout-minutes: 15
needs: [lint-and-format, security-scan, test]
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
outputs:
build-id: ${{ steps.build.outputs.build-id }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Build application
id: build
run: |
echo "Building Next.js application..."
npm run ci:build
# Generate build ID for tracking
BUILD_ID=$(date +%Y%m%d-%H%M%S)-${GITHUB_SHA::8}
echo "build-id=$BUILD_ID" >> $GITHUB_OUTPUT
echo "Build ID: $BUILD_ID"
- name: Budget check
run: npm run ci:budgets
env:
TOTAL_STATIC_MAX_BYTES: ${{ vars.TOTAL_STATIC_MAX_BYTES || '3000000' }}
MAX_ASSET_BYTES: ${{ vars.MAX_ASSET_BYTES || '1500000' }}
- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: build-artifacts-${{ steps.build.outputs.build-id }}
path: |
.vercel/output/
.open-next/
retention-days: 7
- name: Upload budgets report
if: always()
uses: actions/upload-artifact@v4
with:
name: budgets-report-${{ steps.build.outputs.build-id }}
path: .vercel/output/static-budgets-report.txt
retention-days: 30
deploy-preview:
name: Deploy to Preview
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [build]
if: github.event_name == 'pull_request' || (github.event_name == 'workflow_dispatch' && github.event.inputs.environment == 'preview')
environment: preview
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: build-artifacts-${{ needs.build.outputs.build-id }}
path: .
- name: Deploy to Cloudflare (Preview)
run: |
echo "Deploying to Cloudflare preview environment..."
CLOUDFLARE_ACCOUNT_ID=${{ env.CLOUDFLARE_ACCOUNT_ID }} npx @opennextjs/cloudflare deploy
env:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
- name: Update PR comment
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## 🚀 Preview Deployment Complete
**Build ID:** ${{ needs.build.outputs.build-id }}
**Environment:** Preview
**Status:** ✅ Deployed successfully
Preview URL: https://united-tattoo.christyl116.workers.dev
---
*This is an automated deployment for PR #${{ github.event.number }}*`
});
deploy-production:
name: Deploy to Production
runs-on: ubuntu-latest
timeout-minutes: 15
needs: [build]
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master' || (github.event_name == 'workflow_dispatch' && github.event.inputs.environment == 'production')
environment: production
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: build-artifacts-${{ needs.build.outputs.build-id }}
path: .
- name: Database migration check
run: |
echo "Checking database migration status..."
# This would run actual migrations in a real scenario
echo "Migration check completed (dry-run mode)"
- name: Deploy to Cloudflare (Production)
run: |
echo "Deploying to Cloudflare production environment..."
CLOUDFLARE_ACCOUNT_ID=${{ env.CLOUDFLARE_ACCOUNT_ID }} npx @opennextjs/cloudflare deploy
env:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
- name: Health check
run: |
echo "Performing health check..."
sleep 10
curl -f https://united-tattoo.christyl116.workers.dev || exit 1
echo "Health check passed!"
- name: Notify deployment success
if: success()
run: |
echo "✅ Production deployment successful!"
echo "Build ID: ${{ needs.build.outputs.build-id }}"
echo "URL: https://united-tattoo.christyl116.workers.dev"
# ===========================================
# POST-DEPLOYMENT CHECKS
# ===========================================
post-deployment:
name: Post-Deployment Checks
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [deploy-production]
if: always() && needs.deploy-production.result == 'success'
steps:
- name: Lighthouse CI
run: |
echo "Running Lighthouse performance audit..."
npx @lhci/cli@0.12.x autorun
env:
LHCI_GITHUB_APP_TOKEN: ${{ secrets.LHCI_GITHUB_APP_TOKEN }}
- name: SEO Check
run: |
echo "Checking SEO metadata..."
curl -s https://united-tattoo.christyl116.workers.dev | grep -E "(og:|twitter:|application/ld\+json)" || echo "SEO metadata found"
- name: Security Headers Check
run: |
echo "Checking security headers..."
curl -I https://united-tattoo.christyl116.workers.dev | grep -E "(X-Frame-Options|X-Content-Type-Options|X-XSS-Protection)" || echo "Security headers check completed"
# ===========================================
# CLEANUP
# ===========================================
cleanup:
name: Cleanup
runs-on: ubuntu-latest
timeout-minutes: 5
needs: [deploy-production, post-deployment]
if: always()
steps:
- name: Cleanup old artifacts
run: |
echo "Cleaning up old build artifacts..."
# This would clean up old deployments in a real scenario
echo "Cleanup completed"
- name: Update deployment status
run: |
echo "Deployment pipeline completed"
echo "Final status: ${{ needs.deploy-production.result }}"

View File

@ -0,0 +1,267 @@
name: Performance Monitoring
on:
push:
branches:
- main
- master
pull_request:
branches:
- main
- master
schedule:
# Run performance check daily at 4 AM UTC
- cron: '0 4 * * *'
workflow_dispatch:
env:
NODE_VERSION: '20'
SITE_URL: 'https://united-tattoo.christyl116.workers.dev'
jobs:
lighthouse-audit:
name: Lighthouse Performance Audit
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Install Lighthouse CI
run: npm install -g @lhci/cli@0.12.x
- name: Run Lighthouse CI
run: |
echo "Running Lighthouse performance audit..."
# Create lighthouse config
cat > lighthouserc.js << EOF
module.exports = {
ci: {
collect: {
url: ['${{ env.SITE_URL }}'],
numberOfRuns: 3,
settings: {
chromeFlags: '--no-sandbox --headless',
},
},
assert: {
assertions: {
'categories:performance': ['warn', {minScore: 0.8}],
'categories:accessibility': ['error', {minScore: 0.9}],
'categories:best-practices': ['warn', {minScore: 0.8}],
'categories:seo': ['error', {minScore: 0.9}],
},
},
upload: {
target: 'filesystem',
outputDir: './lighthouse-results',
},
},
};
EOF
# Run Lighthouse
lhci autorun
- name: Upload Lighthouse results
uses: actions/upload-artifact@v4
with:
name: lighthouse-results
path: lighthouse-results/
retention-days: 30
bundle-analysis:
name: Bundle Size Analysis
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Build application
run: npm run ci:build
- name: Analyze bundle size
run: |
echo "Analyzing bundle sizes..."
# Check total build size
BUILD_SIZE=$(du -sh .vercel/output/static | cut -f1)
echo "Total build size: $BUILD_SIZE"
# Check individual chunk sizes
echo "Largest chunks:"
find .vercel/output/static/_next/static/chunks -name "*.js" -exec du -h {} \; | sort -hr | head -10
# Check for large files
echo "Large files (>500KB):"
find .vercel/output/static -type f -size +500k -exec ls -lh {} \;
- name: Run budget check
run: npm run ci:budgets
env:
TOTAL_STATIC_MAX_BYTES: ${{ vars.TOTAL_STATIC_MAX_BYTES || '3000000' }}
MAX_ASSET_BYTES: ${{ vars.MAX_ASSET_BYTES || '1500000' }}
- name: Upload bundle analysis
uses: actions/upload-artifact@v4
with:
name: bundle-analysis
path: |
.vercel/output/static-budgets-report.txt
.vercel/output/static/
retention-days: 30
core-web-vitals:
name: Core Web Vitals Check
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Check Core Web Vitals
run: |
echo "Checking Core Web Vitals..."
# Basic performance check
RESPONSE_TIME=$(curl -o /dev/null -s -w '%{time_total}' ${{ env.SITE_URL }})
echo "Response time: ${RESPONSE_TIME}s"
# Check if response time is acceptable
if (( $(echo "$RESPONSE_TIME < 2.0" | bc -l) )); then
echo "✅ Response time is good (< 2s)"
else
echo "⚠️ Response time is slow (> 2s)"
fi
# Check for gzip compression
COMPRESSED_SIZE=$(curl -H "Accept-Encoding: gzip" -s -w '%{size_download}' -o /dev/null ${{ env.SITE_URL }})
UNCOMPRESSED_SIZE=$(curl -s -w '%{size_download}' -o /dev/null ${{ env.SITE_URL }})
if [ "$COMPRESSED_SIZE" -lt "$UNCOMPRESSED_SIZE" ]; then
echo "✅ Gzip compression is working"
else
echo "⚠️ Gzip compression may not be working"
fi
- name: Check SEO performance
run: |
echo "Checking SEO performance..."
# Check for meta tags
curl -s ${{ env.SITE_URL }} | grep -q "og:title" && echo "✅ Open Graph tags present" || echo "❌ Open Graph tags missing"
curl -s ${{ env.SITE_URL }} | grep -q "twitter:card" && echo "✅ Twitter Card tags present" || echo "❌ Twitter Card tags missing"
curl -s ${{ env.SITE_URL }} | grep -q "application/ld+json" && echo "✅ JSON-LD structured data present" || echo "❌ JSON-LD structured data missing"
# Check for canonical URL
curl -s ${{ env.SITE_URL }} | grep -q "canonical" && echo "✅ Canonical URL present" || echo "❌ Canonical URL missing"
- name: Check security headers
run: |
echo "Checking security headers..."
# Check for security headers
curl -I ${{ env.SITE_URL }} | grep -q "X-Frame-Options" && echo "✅ X-Frame-Options present" || echo "⚠️ X-Frame-Options missing"
curl -I ${{ env.SITE_URL }} | grep -q "X-Content-Type-Options" && echo "✅ X-Content-Type-Options present" || echo "⚠️ X-Content-Type-Options missing"
curl -I ${{ env.SITE_URL }} | grep -q "X-XSS-Protection" && echo "✅ X-XSS-Protection present" || echo "⚠️ X-XSS-Protection missing"
performance-report:
name: Generate Performance Report
runs-on: ubuntu-latest
timeout-minutes: 5
needs: [lighthouse-audit, bundle-analysis, core-web-vitals]
if: always()
steps:
- name: Download performance results
uses: actions/download-artifact@v4
with:
name: lighthouse-results
path: lighthouse-results/
- name: Generate performance report
run: |
echo "# Performance Report" > performance-report.md
echo "Generated: $(date -u)" >> performance-report.md
echo "Site URL: ${{ env.SITE_URL }}" >> performance-report.md
echo "" >> performance-report.md
# Add Lighthouse results
if [ -d "lighthouse-results" ]; then
echo "## Lighthouse Scores" >> performance-report.md
echo "" >> performance-report.md
# Extract scores from Lighthouse results
if [ -f "lighthouse-results/manifest.json" ]; then
echo "Lighthouse audit completed successfully" >> performance-report.md
else
echo "Lighthouse audit results not found" >> performance-report.md
fi
echo "" >> performance-report.md
fi
echo "## Performance Checks" >> performance-report.md
echo "" >> performance-report.md
echo "- Lighthouse Audit: ${{ needs.lighthouse-audit.result }}" >> performance-report.md
echo "- Bundle Analysis: ${{ needs.bundle-analysis.result }}" >> performance-report.md
echo "- Core Web Vitals: ${{ needs.core-web-vitals.result }}" >> performance-report.md
echo "" >> performance-report.md
echo "## Recommendations" >> performance-report.md
echo "" >> performance-report.md
echo "1. Monitor Core Web Vitals regularly" >> performance-report.md
echo "2. Keep bundle sizes under budget limits" >> performance-report.md
echo "3. Ensure Lighthouse scores remain above thresholds" >> performance-report.md
echo "4. Check for performance regressions in PRs" >> performance-report.md
- name: Upload performance report
uses: actions/upload-artifact@v4
with:
name: performance-report
path: performance-report.md
retention-days: 90
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('performance-report.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## 📊 Performance Report
${report}`
});

View File

@ -0,0 +1,17 @@
name: PR Title Lint
on:
pull_request:
types: [opened, edited, synchronize]
jobs:
pr-title:
runs-on: ubuntu-latest
steps:
- name: Check PR title
run: |
title="$(jq -r '.pull_request.title' < "$GITHUB_EVENT_PATH")"
if ! grep -Eq '^(feat|fix|docs|chore|refactor|test)(\(.+\))?: .+' <<<"$title"; then
echo "PR title must follow Conventional Commits. Got: $title"
exit 1
fi

View File

@ -0,0 +1,261 @@
name: Security and Dependency Scanning
on:
push:
branches:
- main
- master
pull_request:
branches:
- main
- master
schedule:
# Run security scan daily at 3 AM UTC
- cron: '0 3 * * *'
workflow_dispatch:
env:
NODE_VERSION: '20'
jobs:
dependency-scan:
name: Dependency Security Scan
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Run npm audit
run: |
echo "Running npm security audit..."
npm audit --audit-level=moderate --json > audit-results.json || true
# Extract vulnerability counts
HIGH_VULNS=$(cat audit-results.json | jq '.metadata.vulnerabilities.high // 0')
CRITICAL_VULNS=$(cat audit-results.json | jq '.metadata.vulnerabilities.critical // 0')
echo "High vulnerabilities: $HIGH_VULNS"
echo "Critical vulnerabilities: $CRITICAL_VULNS"
# Fail if critical vulnerabilities found
if [ "$CRITICAL_VULNS" -gt 0 ]; then
echo "❌ Critical vulnerabilities found!"
cat audit-results.json | jq '.vulnerabilities[] | select(.severity == "critical")'
exit 1
fi
# Warn if high vulnerabilities found
if [ "$HIGH_VULNS" -gt 0 ]; then
echo "⚠️ High vulnerabilities found!"
cat audit-results.json | jq '.vulnerabilities[] | select(.severity == "high")'
fi
- name: License check
run: |
echo "Checking package licenses..."
npx license-checker --summary --onlyAllow 'MIT;Apache-2.0;BSD-2-Clause;BSD-3-Clause;ISC;Unlicense;CC0-1.0' || {
echo "⚠️ Some packages have non-approved licenses"
echo "Run 'npx license-checker --summary' to see details"
}
- name: Check for outdated packages
run: |
echo "Checking for outdated packages..."
npm outdated --json > outdated-packages.json || true
# Count outdated packages
OUTDATED_COUNT=$(cat outdated-packages.json | jq 'length')
echo "Outdated packages: $OUTDATED_COUNT"
if [ "$OUTDATED_COUNT" -gt 0 ]; then
echo "⚠️ Found $OUTDATED_COUNT outdated packages"
cat outdated-packages.json | jq 'keys[]'
fi
- name: Upload security results
if: always()
uses: actions/upload-artifact@v4
with:
name: security-scan-results
path: |
audit-results.json
outdated-packages.json
retention-days: 30
code-security-scan:
name: Code Security Scan
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --no-audit --no-fund
- name: Install security tools
run: |
npm install -g @eslint/eslintrc
npm install -g eslint-plugin-security
- name: Security linting
run: |
echo "Running security-focused linting..."
# Check for common security issues
if grep -r "eval(" --include="*.js" --include="*.ts" --include="*.tsx" .; then
echo "❌ Found eval() usage - potential security risk"
exit 1
fi
if grep -r "innerHTML" --include="*.js" --include="*.ts" --include="*.tsx" .; then
echo "⚠️ Found innerHTML usage - review for XSS risks"
fi
if grep -r "dangerouslySetInnerHTML" --include="*.js" --include="*.ts" --include="*.tsx" .; then
echo "⚠️ Found dangerouslySetInnerHTML usage - review for XSS risks"
fi
- name: Check for hardcoded secrets
run: |
echo "Checking for potential hardcoded secrets..."
# Check for common secret patterns
if grep -rE "(password|secret|key|token).*=.*['\"][^'\"]{8,}['\"]" --include="*.js" --include="*.ts" --include="*.tsx" --exclude-dir=node_modules .; then
echo "⚠️ Potential hardcoded secrets found - review manually"
fi
# Check for API keys
if grep -rE "(api[_-]?key|apikey)" --include="*.js" --include="*.ts" --include="*.tsx" --exclude-dir=node_modules .; then
echo "⚠️ Potential API key references found - ensure no hardcoded keys"
fi
- name: Check environment variable usage
run: |
echo "Checking environment variable usage..."
# Ensure sensitive data uses environment variables
if grep -r "process\.env\." --include="*.js" --include="*.ts" --include="*.tsx" .; then
echo "✅ Environment variables are being used"
fi
container-security:
name: Container Security Scan
runs-on: ubuntu-latest
timeout-minutes: 10
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check Dockerfile security
run: |
if [ -f "Dockerfile" ]; then
echo "Checking Dockerfile security..."
# Check for root user
if grep -q "USER root" Dockerfile; then
echo "⚠️ Dockerfile runs as root - consider using non-root user"
fi
# Check for latest tags
if grep -q ":latest" Dockerfile; then
echo "⚠️ Dockerfile uses 'latest' tag - consider pinning versions"
fi
# Check for security updates
if grep -q "apt-get update" Dockerfile; then
echo "✅ Dockerfile includes package updates"
fi
else
echo "No Dockerfile found - skipping container security check"
fi
security-report:
name: Generate Security Report
runs-on: ubuntu-latest
timeout-minutes: 5
needs: [dependency-scan, code-security-scan, container-security]
if: always()
steps:
- name: Download security results
uses: actions/download-artifact@v4
with:
name: security-scan-results
path: security-results/
- name: Generate security report
run: |
echo "# Security Scan Report" > security-report.md
echo "Generated: $(date -u)" >> security-report.md
echo "" >> security-report.md
# Add dependency scan results
if [ -f "security-results/audit-results.json" ]; then
echo "## Dependency Security" >> security-report.md
echo "" >> security-report.md
CRITICAL=$(cat security-results/audit-results.json | jq '.metadata.vulnerabilities.critical // 0')
HIGH=$(cat security-results/audit-results.json | jq '.metadata.vulnerabilities.high // 0')
MODERATE=$(cat security-results/audit-results.json | jq '.metadata.vulnerabilities.moderate // 0')
LOW=$(cat security-results/audit-results.json | jq '.metadata.vulnerabilities.low // 0')
echo "- Critical: $CRITICAL" >> security-report.md
echo "- High: $HIGH" >> security-report.md
echo "- Moderate: $MODERATE" >> security-report.md
echo "- Low: $LOW" >> security-report.md
echo "" >> security-report.md
fi
# Add outdated packages
if [ -f "security-results/outdated-packages.json" ]; then
echo "## Outdated Packages" >> security-report.md
echo "" >> security-report.md
OUTDATED_COUNT=$(cat security-results/outdated-packages.json | jq 'length')
echo "Total outdated packages: $OUTDATED_COUNT" >> security-report.md
echo "" >> security-report.md
fi
echo "## Scan Status" >> security-report.md
echo "" >> security-report.md
echo "- Dependency Scan: ${{ needs.dependency-scan.result }}" >> security-report.md
echo "- Code Security Scan: ${{ needs.code-security-scan.result }}" >> security-report.md
echo "- Container Security: ${{ needs.container-security.result }}" >> security-report.md
- name: Upload security report
uses: actions/upload-artifact@v4
with:
name: security-report
path: security-report.md
retention-days: 90
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('security-report.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## 🔒 Security Scan Results
${report}`
});

153
.gitignore vendored Normal file
View File

@ -0,0 +1,153 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
node_modules/
/node_modules
/.pnp
.pnp.js
.yarn/install-state.gz
# testing
/coverage
coverage/
.nyc_output/
# next.js
/.next/
/out/
.next/
next-env.d.ts
*.tsbuildinfo
# vercel
.vercel/
.vercel
# production
/build
dist/
build/
# misc
.DS_Store
*.pem
.vscode/
.idea/
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
lerna-debug.log*
# local env files
.env
.env*.local
.env.local
.env.development.local
.env.test.local
.env.production.local
.env.production
# typescript
*.log
# IDEs
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
*.swp
*.swo
*~
.idea/
*.sublime-project
*.sublime-workspace
# OS
.DS_Store
Thumbs.db
# project temp and large binary assets (avoid committing raw media dumps)
temp/
temp/**
*.mp4
*.mov
*.avi
*.mkv
*.psd
*.ai
*.zip
*.7z
*.rar
# BMAD (local only)
.bmad-core/
.bmad-*/
# database backups (local exports)
backups/
*.sql.bak
*.db-backup
# wrangler/cloudflare local state (do not commit)
.wrangler/
.wrangler/**
.dev.vars
.mf/
# opennext build files (NEVER commit these - they are build artifacts)
.open-next/
.open-next/**
.open next/
.vercel/output/
.vercel/output/**
# Cache directories
.cache/
.parcel-cache/
.turbo/
.swc/
# Lock files (keep only one)
# Uncomment the ones you don't use
# package-lock.json
# yarn.lock
# pnpm-lock.yaml
# Vitest
.vitest/
# Logs
logs/
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Runtime data
pids/
*.pid
*.seed
*.pid.lock
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Sentry
.sentryclirc
# React Query
.react-query/
# Supabase
.supabase/
supabase/.temp/
# Tanstack Query Devtools
.tanstack/

46
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,46 @@
Contributing Workflow
=====================
Use this guidance when you spin up a new project from the template or accept contributions from collaborators.
Branching model
---------------
- Work from short-lived topic branches cut from `main`.
- Prefix branches with the work type: `feat/`, `fix/`, `docs/`, `chore/`, `refactor/`, `test/`.
- Keep branch names descriptive but short (e.g. `feat/billing-invoices`, `fix/auth-timeout`).
Commit messages
---------------
Follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/).
```
<type>(<scope>): <subject>
```
Examples:
- `feat(api): add artist listing endpoint`
- `fix(infra): handle wrangler env missing`
- `docs(adr): record storage strategy`
Pull request checklist
----------------------
1. Rebase onto `main` before opening the PR.
2. Fill out `.gitea/pull_request_template.md` so reviewers know how to test.
3. Ensure automated checks pass locally:
```bash
pnpm install
pnpm lint
pnpm test
pnpm build
```
Adjust the commands if your project uses a different toolchain.
4. Link issues with `Fixes #id` or `Refs #id` as appropriate.
5. Squash-merge once approved to keep history clean (use the Conventional Commit format for the squash message).
Quality expectations
--------------------
- Keep docs current. Update the README, edge-case catalogue, or stack decisions when behaviour changes.
- Add or update tests alongside your changes—tests are treated as executable documentation.
- Avoid committing secrets or large binaries; rely on `.env` files, secret managers, or storage buckets instead.
Questions?
----------
Open an issue or start a draft PR and document what you are unsure about. Future readers will thank you for the breadcrumbs.

240
README.md Normal file
View File

@ -0,0 +1,240 @@
<div align="center">
<!-- DEPLOYMENT COMMAND -->
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 20px; border-radius: 10px; margin-bottom: 30px;">
<h3 style="color: white; margin: 0;">STARTER COMMAND</h3>
<code style="color: #ffd700; font-size: 16px; font-weight: bold;">./scripts/bootstrap-template.sh</code>
</div>
<a id="readme-top"></a>
<!-- PROJECT SHIELDS -->
[![Contributors][contributors-shield]][contributors-url]
[![Forks][forks-shield]][forks-url]
[![Stargazers][stars-shield]][stars-url]
[![Issues][issues-shield]][issues-url]
[![LinkedIn][linkedin-shield]][linkedin-url]
<!-- PROJECT LOGO -->
<br />
<div align="center">
<a href="https://git.biohazardvfx.com/nicholai/template">
<img src="public/template-logo.png" alt="Template Logo" width="400">
</a>
<h1 align="center" style="font-size: 48px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent;">Development Project Template</h1>
<p align="center" style="font-size: 18px; max-width: 680px;">
Opinionated starter kit for new projects deployed through my self-hosted Gitea.<br />
<strong>Documentation-first • Edge-case aware • Automation ready</strong>
<br />
<br />
<a href="#getting-started"><strong>Quick Start »</strong></a>
·
<a href="https://git.biohazardvfx.com/nicholai/template/issues/new?labels=enhancement">Suggest Improvement</a>
</p>
</div>
---
<!-- TABLE OF CONTENTS -->
<details open>
<summary><h2 style="display: inline-block;">Table of Contents</h2></summary>
<ol>
<li><a href="#about-the-template">About The Template</a>
<ul>
<li><a href="#why-this-exists">Why This Exists</a></li>
<li><a href="#core-principles">Core Principles</a></li>
</ul>
</li>
<li><a href="#tech-stack">Tech Stack</a></li>
<li><a href="#architecture">Architecture</a></li>
<li><a href="#getting-started">Getting Started</a>
<ul>
<li><a href="#prerequisites">Prerequisites</a></li>
<li><a href="#installation">Installation</a></li>
<li><a href="#environment-variables">Environment Variables</a></li>
</ul>
</li>
<li><a href="#development">Development</a>
<ul>
<li><a href="#common-commands">Common Commands</a></li>
<li><a href="#docs--checklists">Docs & Checklists</a></li>
</ul>
</li>
<li><a href="#edge-cases">Edge Cases</a></li>
<li><a href="#testing">Testing</a></li>
<li><a href="#contributing">Contributing</a></li>
<li><a href="#license">License</a></li>
<li><a href="#contact">Contact</a></li>
</ol>
</details>
---
</div>
## About The Template
<div align="center">
<img src="public/template-dashboard.png" alt="Template Dashboard Mock" width="800" style="border-radius: 10px; box-shadow: 0 10px 30px rgba(0,0,0,0.3);">
</div>
<br />
This repository is the baseline I use when starting a new product or service. It keeps the process consistent, reduces the time spent wiring boilerplate, and reminds me to account for the edge cases that usually appear late in a project.
### Why This Exists
- **Primed documentation:** Every project starts with a README, stack decision log, bootstrapping checklist, and edge-case catalogue.
- **Automation on day one:** `scripts/` holds helpers to rename the project, configure remotes, and clean example assets.
- **Testing blueprints:** Example Vitest suites (`__tests__/`) demonstrate how to structure API, component, flag, hook, and library tests.
- **Gitea ready:** Pull request templates, Conventional Commit guidance, and workflows match my self-hosted setup.
### Core Principles
| Principle | What it means |
| --- | --- |
| Documentation-first | Write down intent and constraints before diving into code. |
| Edge-case aware | Capture the failure scenarios that repeatedly cause incidents. |
| Reproducible setup | Every project can be re-created from scratch via scripts and docs. |
| Automation ready | Scripts and CI pipelines are easy to adapt or extend. |
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Tech Stack
- **Framework**: Next.js + TypeScript (adjust as needed)
- **Testing**: Vitest + Testing Library
- **Styling**: Tailwind CSS or CSS Modules (pick one per project)
- **Database**: PostgreSQL (Supabase/Neon friendly)
- **Storage**: S3-compatible providers (AWS S3, Cloudflare R2)
- **Auth**: NextAuth.js or custom token flows
- **Deployment**: Wrangler + Cloudflare Pages/Workers (swap for your platform)
Document any deviations in `docs/stack-decisions.md`.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Architecture
```mermaid
flowchart TD
A[Client] -->|HTTP| B[Next.js App]
B -->|API Routes| C[(PostgreSQL)]
B -->|Edge Functions| D[Cloudflare Workers]
B -->|Auth| E[Identity Provider]
B -->|Storage SDK| F[(S3/R2 Bucket)]
D -->|Feature Flags| G[Config Service]
```
- Keep infrastructure definitions under `infra/` once you create them.
- Capture architectural decisions and trade-offs in `docs/stack-decisions.md`.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Getting Started
### Prerequisites
- Node.js 20+
- pnpm (preferred) or your package manager of choice
- `jq` (optional, used by bootstrap script)
- Git & access to your Gitea instance
### Installation
1. **Clone / duplicate the template**
```bash
git clone git@git.biohazardvfx.com:nicholai/template.git my-new-project
cd my-new-project
```
2. **Bootstrap**
```bash
./scripts/bootstrap-template.sh
```
3. **Install dependencies**
```bash
pnpm install
```
4. **Follow the checklist**
- Open `docs/bootstrapping.md` and complete each item.
### Environment Variables
Copy `.env.example` to `.env` and fill only the sections you need. The file is structured by concern (database, auth, storage, observability) so you can strip unused parts.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Development
### Common Commands
| Command | Description |
| --- | --- |
| `pnpm dev` | Start the Next.js dev server. |
| `pnpm lint` | Run ESLint / formatting checks. |
| `pnpm test` | Execute the Vitest suites. |
| `pnpm build` | Generate a production build. |
### Docs & Checklists
- `docs/bootstrapping.md` — tasks to run through when spinning up a new project.
- `docs/edge-cases.md` — prompts for the weird scenarios that usually break things.
- `docs/stack-decisions.md` — record “why” for each notable tech choice.
- `docs/testing-blueprints.md` — guidance for adapting the example tests.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Edge Cases
Edge-case awareness is built into the template:
- Feature flags default to safe behaviour when providers fail.
- Auth, storage, scheduling, and third-party integrations each have dedicated prompts.
- The example tests in `__tests__/flags/` and `__tests__/lib/` show how to assert defensive behaviour.
Add new lessons learned back into `docs/edge-cases.md` so the template evolves with every incident.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Testing
- Tests are organised by domain: `api/`, `components/`, `hooks/`, `flags/`, `lib/`.
- Each suite mocks external dependencies and asserts on both happy-path and failure scenarios.
- See `docs/testing-blueprints.md` for tips on customising them to your project.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Contributing
See [`CONTRIBUTING.md`](CONTRIBUTING.md) for branching conventions, commit style, and review expectations.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## License
Use, remix, or extract any portion of this template for your own projects. Attribution is appreciated but not required.
<p align="right"><a href="#readme-top">back to top ↑</a></p>
## Contact
Nicholai — [@biohazardvfx](https://linkedin.com/in/biohazardvfx) — nicholai@biohazardvfx.com
Project Link: [https://git.biohazardvfx.com/nicholai/template-repo](https://git.biohazardvfx.com/nicholai/template-repo)
<p align="right"><a href="#readme-top">back to top ↑</a></p>
<!-- MARKDOWN LINKS & IMAGES -->
<!-- shields -->
[contributors-shield]: https://img.shields.io/gitea/contributors/nicholai/template?style=for-the-badge
[contributors-url]: https://git.biohazardvfx.com/nicholai/template/graphs/contributors
[forks-shield]: https://img.shields.io/gitea/forks/nicholai/template?style=for-the-badge
[forks-url]: https://git.biohazardvfx.com/nicholai/template/network/members
[stars-shield]: https://img.shields.io/gitea/stars/nicholai/template?style=for-the-badge
[stars-url]: https://git.biohazardvfx.com/nicholai/template/stars
[issues-shield]: https://img.shields.io/gitea/issues/nicholai/template?style=for-the-badge
[issues-url]: https://git.biohazardvfx.com/nicholai/template/issues
[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-url]: https://linkedin.com/in/biohazardvfx

11
__tests__/README.md Normal file
View File

@ -0,0 +1,11 @@
# Test Suite Overview
The tests in this directory act as executable specifications. They were copied from real projects and are meant to be adapted, not run verbatim.
How to use them:
- Rename folders to match the first features you build.
- Replace imports from `@/...` with your actual modules once they exist.
- Trim scenarios that do not apply and add new ones that cover risky behaviours or integrations you care about.
- Keep the error-handling and edge-case checks—they are the reason these suites exist.
Once your implementation is in place, run `pnpm test` (or your preferred command) and fix failing specs until everything passes. The goal is to evolve these tests into living documentation for the application you are building off this template.

View File

@ -0,0 +1,119 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { GET } from '@/app/api/artists/route'
import { NextRequest } from 'next/server'
// Mock the database functions
vi.mock('@/lib/db', () => ({
getPublicArtists: vi.fn(),
}))
import { getPublicArtists } from '@/lib/db'
describe('GET /api/artists', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('should return artists successfully', async () => {
const mockArtists = [
{
id: '1',
slug: 'test-artist',
name: 'Test Artist',
bio: 'Test bio',
specialties: ['Traditional', 'Realism'],
instagramHandle: '@testartist',
portfolioImages: [],
isActive: true,
hourlyRate: 150,
},
]
vi.mocked(getPublicArtists).mockResolvedValue(mockArtists)
const request = new NextRequest('http://localhost:3000/api/artists')
const response = await GET(request)
const data = await response.json()
expect(response.status).toBe(200)
expect(data.artists).toHaveLength(1)
expect(data.artists[0].name).toBe('Test Artist')
})
it('should apply specialty filter', async () => {
const mockArtists = [
{
id: '1',
slug: 'traditional-artist',
name: 'Traditional Artist',
bio: 'Test bio',
specialties: ['Traditional'],
portfolioImages: [],
isActive: true,
},
]
vi.mocked(getPublicArtists).mockResolvedValue(mockArtists)
const request = new NextRequest('http://localhost:3000/api/artists?specialty=Traditional')
await GET(request)
expect(getPublicArtists).toHaveBeenCalledWith(
expect.objectContaining({
specialty: 'Traditional',
}),
undefined
)
})
it('should apply search filter', async () => {
vi.mocked(getPublicArtists).mockResolvedValue([])
const request = new NextRequest('http://localhost:3000/api/artists?search=John')
await GET(request)
expect(getPublicArtists).toHaveBeenCalledWith(
expect.objectContaining({
search: 'John',
}),
undefined
)
})
it('should apply pagination', async () => {
vi.mocked(getPublicArtists).mockResolvedValue([])
const request = new NextRequest('http://localhost:3000/api/artists?limit=10&page=2')
await GET(request)
expect(getPublicArtists).toHaveBeenCalledWith(
expect.objectContaining({
limit: 10,
offset: 10, // page 2 with limit 10 = offset 10
}),
undefined
)
})
it('should handle database errors gracefully', async () => {
vi.mocked(getPublicArtists).mockRejectedValue(new Error('Database error'))
const request = new NextRequest('http://localhost:3000/api/artists')
const response = await GET(request)
expect(response.status).toBe(500)
const data = await response.json()
expect(data).toHaveProperty('error')
})
it('should return empty array when no artists found', async () => {
vi.mocked(getPublicArtists).mockResolvedValue([])
const request = new NextRequest('http://localhost:3000/api/artists')
const response = await GET(request)
const data = await response.json()
expect(response.status).toBe(200)
expect(data.artists).toEqual([])
})
})

View File

@ -0,0 +1,82 @@
import React from 'react'
import { render, screen } from '@testing-library/react'
import { describe, expect, it } from 'vitest'
import { AftercarePage } from '@/components/aftercare-page'
describe('AftercarePage ShadCN UI Consistency', () => {
it('uses ShadCN design tokens and primitives correctly', () => {
render(<AftercarePage />)
// Verify main container uses ShadCN background tokens
const mainContainer = document.querySelector('.min-h-screen')
expect(mainContainer).toHaveClass('bg-background', 'text-foreground')
// Verify Tabs primitives are present
expect(screen.getByRole('tablist')).toBeInTheDocument()
expect(screen.getByRole('tab', { name: /general tattoo aftercare/i })).toBeInTheDocument()
expect(screen.getByRole('tab', { name: /transparent bandage aftercare/i })).toBeInTheDocument()
// Verify Alert primitives are present (there are multiple alerts)
const alerts = screen.getAllByRole('alert')
expect(alerts.length).toBeGreaterThan(0)
// Verify Card primitives are present (multiple cards should exist)
const cards = document.querySelectorAll('[data-slot="card"]')
expect(cards.length).toBeGreaterThan(0)
// Verify no ad-hoc color classes are used (specifically no text-white)
const htmlContent = document.documentElement.innerHTML
expect(htmlContent).not.toContain('text-white')
// Verify ShadCN design tokens are used
expect(htmlContent).toContain('text-muted-foreground')
expect(htmlContent).toContain('bg-background')
expect(htmlContent).toContain('text-foreground')
})
it('uses consistent ShadCN component structure', () => {
render(<AftercarePage />)
// Verify TabsList has proper ShadCN structure
const tabsList = screen.getByRole('tablist')
expect(tabsList).toHaveClass('grid', 'w-full', 'grid-cols-2', 'bg-muted', 'border')
// Verify Alert uses ShadCN structure with proper icon placement
const alerts = screen.getAllByRole('alert')
expect(alerts[0]).toHaveAttribute('data-slot', 'alert')
// Verify Cards use proper ShadCN structure
const cardHeaders = document.querySelectorAll('[data-slot="card-header"]')
expect(cardHeaders.length).toBeGreaterThan(0)
const cardContents = document.querySelectorAll('[data-slot="card-content"]')
expect(cardContents.length).toBeGreaterThan(0)
})
it('maintains consistent typography and spacing scales', () => {
render(<AftercarePage />)
// Verify heading uses consistent font classes
const mainHeading = screen.getByText('Tattoo Aftercare')
expect(mainHeading).toHaveClass('font-playfair')
// Verify muted text uses consistent token
const mutedElements = document.querySelectorAll('.text-muted-foreground')
expect(mutedElements.length).toBeGreaterThan(0)
// Verify consistent spacing classes are used
const htmlContent = document.documentElement.innerHTML
expect(htmlContent).toContain('space-y-')
expect(htmlContent).toContain('gap-')
expect(htmlContent).toContain('px-8')
expect(htmlContent).toContain('py-6') // Cards use py-6, not py-8
})
it('applies motion classes with reduced-motion safeguard', () => {
render(<AftercarePage />)
const html = document.documentElement.innerHTML
expect(html).toContain('animate-in')
expect(html).toContain('motion-reduce:animate-none')
})
})

View File

@ -0,0 +1,99 @@
import React from 'react'
import { render } from '@testing-library/react'
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { ArtistPortfolio } from '@/components/artist-portfolio'
// Mock requestAnimationFrame / cancel
global.requestAnimationFrame = vi.fn((cb) => setTimeout(cb, 0) as unknown as number)
global.cancelAnimationFrame = vi.fn((id) => clearTimeout(id as unknown as number))
// Default matchMedia mock (no reduced motion)
const createMatchMedia = (matches: boolean) =>
vi.fn().mockImplementation((query) => ({
matches,
media: query,
onchange: null,
addListener: vi.fn(),
removeListener: vi.fn(),
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn(),
}))
// Basic getBoundingClientRect mock for panels
const defaultRect = {
top: 0,
bottom: 800,
left: 0,
right: 1200,
width: 1200,
height: 800,
x: 0,
y: 0,
toJSON: () => {},
}
describe('ArtistPortfolio Split Hero', () => {
beforeEach(() => {
vi.clearAllMocks()
// default to no reduced-motion preference
Object.defineProperty(window, 'matchMedia', {
writable: true,
value: createMatchMedia(false),
})
// Mock IntersectionObserver (class-like mock to satisfy TS typings)
class MockIntersectionObserver {
constructor(private cb?: IntersectionObserverCallback, private options?: IntersectionObserverInit) {}
observe = vi.fn()
unobserve = vi.fn()
disconnect = vi.fn()
takeRecords() { return [] }
}
// Assign the mock class for the test environment
// eslint-disable-next-line @typescript-eslint/no-explicit-any
;(global as any).IntersectionObserver = MockIntersectionObserver
// Mock getBoundingClientRect for all elements
Element.prototype.getBoundingClientRect = vi.fn(() => defaultRect)
})
it('initializes left/right panels with CSS var of 0 and transform style when motion allowed', () => {
const { getByTestId } = render(<ArtistPortfolio artistId="1" />)
const left = getByTestId('artist-left-panel')
const right = getByTestId('artist-right-panel')
expect(left).toBeInTheDocument()
expect(right).toBeInTheDocument()
// CSS var should be initialized to 0px on mount
expect(left.style.getPropertyValue('--parallax-offset')).toBe('0px')
expect(right.style.getPropertyValue('--parallax-offset')).toBe('0px')
// When motion is allowed, the element should expose the translateY style (uses CSS var)
expect(left).toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
expect(right).toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
})
it('does not apply parallax transform when prefers-reduced-motion is true', () => {
// Mock reduced motion preference
Object.defineProperty(window, 'matchMedia', {
writable: true,
value: createMatchMedia(true),
})
const { getByTestId } = render(<ArtistPortfolio artistId="1" />)
const left = getByTestId('artist-left-panel')
const right = getByTestId('artist-right-panel')
// With reduced motion, the hook should not add transform/willChange styles
expect(left).not.toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
expect(left).not.toHaveStyle({ willChange: 'transform' })
expect(right).not.toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
expect(right).not.toHaveStyle({ willChange: 'transform' })
})
})

View File

@ -0,0 +1,202 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { render, screen, waitFor } from '@testing-library/react'
import { ArtistsGrid } from '@/components/artists-grid'
import '@testing-library/jest-dom'
// Mock the custom hook
vi.mock('@/hooks/use-artist-data', () => ({
useArtists: vi.fn(),
}))
import { useArtists } from '@/hooks/use-artist-data'
describe('ArtistsGrid Component', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('should display loading state', () => {
vi.mocked(useArtists).mockReturnValue({
data: undefined,
isLoading: true,
error: null,
} as any)
render(<ArtistsGrid />)
expect(screen.getByRole('status')).toBeInTheDocument()
})
it('should display artists when loaded', async () => {
const mockArtists = [
{
id: '1',
slug: 'test-artist',
name: 'Test Artist',
bio: 'Test bio',
specialties: ['Traditional', 'Realism'],
instagramHandle: '@testartist',
portfolioImages: [
{
id: '1',
artistId: '1',
url: 'https://example.com/image.jpg',
caption: 'Test image',
tags: ['Traditional'],
isPublic: true,
orderIndex: 0,
createdAt: new Date(),
},
],
isActive: true,
hourlyRate: 150,
},
]
vi.mocked(useArtists).mockReturnValue({
data: mockArtists,
isLoading: false,
error: null,
} as any)
render(<ArtistsGrid />)
await waitFor(() => {
expect(screen.getByText('Test Artist')).toBeInTheDocument()
})
expect(screen.getByText(/Traditional, Realism/i)).toBeInTheDocument()
expect(screen.getByText('Available')).toBeInTheDocument()
})
it('should display error state', () => {
vi.mocked(useArtists).mockReturnValue({
data: undefined,
isLoading: false,
error: new Error('Failed to fetch'),
} as any)
render(<ArtistsGrid />)
expect(screen.getByText(/Failed to load artists/i)).toBeInTheDocument()
expect(screen.getByRole('button', { name: /retry/i })).toBeInTheDocument()
})
it('should display empty state when no artists match filter', async () => {
vi.mocked(useArtists).mockReturnValue({
data: [],
isLoading: false,
error: null,
} as any)
render(<ArtistsGrid />)
await waitFor(() => {
expect(screen.getByText(/No artists found/i)).toBeInTheDocument()
})
})
it('should display artist cards with portfolio images', async () => {
const mockArtists = [
{
id: '1',
slug: 'artist-one',
name: 'Artist One',
bio: 'Bio one',
specialties: ['Traditional'],
portfolioImages: [
{
id: '1',
artistId: '1',
url: 'https://example.com/img1.jpg',
tags: ['profile'],
isPublic: true,
orderIndex: 0,
createdAt: new Date(),
},
],
isActive: true,
hourlyRate: 100,
},
]
vi.mocked(useArtists).mockReturnValue({
data: mockArtists,
isLoading: false,
error: null,
} as any)
render(<ArtistsGrid />)
await waitFor(() => {
// Check for View Portfolio link
const portfolioLink = screen.getByRole('link', { name: /View Portfolio/i })
expect(portfolioLink).toHaveAttribute('href', '/artists/artist-one')
// Check for Book Now link
const bookLink = screen.getByRole('link', { name: /Book Now/i })
expect(bookLink).toHaveAttribute('href', '/book?artist=artist-one')
// Check for hourly rate display
expect(screen.getByText(/\$100\/hr/i)).toBeInTheDocument()
})
})
it('should display specialties as badges', async () => {
const mockArtists = [
{
id: '1',
slug: 'multi-specialty-artist',
name: 'Multi Specialty Artist',
bio: 'Expert in multiple styles',
specialties: ['Traditional', 'Realism', 'Fine Line', 'Japanese'],
portfolioImages: [],
isActive: true,
},
]
vi.mocked(useArtists).mockReturnValue({
data: mockArtists,
isLoading: false,
error: null,
} as any)
render(<ArtistsGrid />)
await waitFor(() => {
// Should show first 3 specialties
expect(screen.getByText('Traditional')).toBeInTheDocument()
expect(screen.getByText('Realism')).toBeInTheDocument()
expect(screen.getByText('Fine Line')).toBeInTheDocument()
// Should show "+1 more" badge for the 4th specialty
expect(screen.getByText('+1 more')).toBeInTheDocument()
})
})
it('should show inactive badge for inactive artists', async () => {
const mockArtists = [
{
id: '1',
slug: 'inactive-artist',
name: 'Inactive Artist',
bio: 'Currently unavailable',
specialties: ['Traditional'],
portfolioImages: [],
isActive: false,
},
]
vi.mocked(useArtists).mockReturnValue({
data: mockArtists,
isLoading: false,
error: null,
} as any)
render(<ArtistsGrid />)
await waitFor(() => {
expect(screen.getByText('Unavailable')).toBeInTheDocument()
})
})
})

View File

@ -0,0 +1,132 @@
import React from 'react'
import { render, screen } from '@testing-library/react'
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { HeroSection } from '@/components/hero-section'
// Mock the feature flags provider
vi.mock('@/components/feature-flags-provider', () => ({
useFeatureFlag: vi.fn(() => true),
}))
// Mock the parallax hooks
vi.mock('@/hooks/use-parallax', () => ({
useMultiLayerParallax: vi.fn(() => ({
background: {
ref: { current: null },
style: { transform: 'translateY(0px)' },
},
midground: {
ref: { current: null },
style: { transform: 'translateY(0px)' },
},
foreground: {
ref: { current: null },
style: { transform: 'translateY(0px)' },
},
})),
useReducedMotion: vi.fn(() => false),
}))
describe('HeroSection Parallax Implementation', () => {
beforeEach(() => {
// Reset mocks
vi.clearAllMocks()
})
it("renders hero section with all layers", () => {
render(<HeroSection />)
// Check for main heading
expect(screen.getByRole("heading", { name: /united tattoo/i })).toBeInTheDocument()
// Check for tagline
expect(screen.getByText(/where artistry meets precision/i)).toBeInTheDocument()
// Check for CTA button
expect(screen.getByRole("button", { name: /book consultation/i })).toBeInTheDocument()
})
it('applies reduced motion data attribute when reduced motion is preferred', async () => {
const { useReducedMotion } = await import('@/hooks/use-parallax')
vi.mocked(useReducedMotion).mockReturnValue(true)
render(<HeroSection />)
const section = document.querySelector('section')
expect(section).toHaveAttribute('data-reduced-motion', 'true')
})
it("has proper accessibility attributes for decorative images", () => {
render(<HeroSection />)
// Background and midground layers should be aria-hidden
const decorativeElements = document.querySelectorAll('[aria-hidden="true"]')
expect(decorativeElements.length).toBeGreaterThan(0)
})
it("uses proper semantic structure", () => {
render(<HeroSection />)
// Should have proper heading hierarchy
const heading = screen.getByRole("heading", { name: /united tattoo/i })
expect(heading.tagName).toBe("H1")
// Should have proper section structure
const section = document.querySelector("section")
expect(section).toHaveAttribute("id", "home")
})
it("applies will-change-transform for performance optimization", () => {
render(<HeroSection />)
const transformElements = document.querySelectorAll(".will-change-transform")
expect(transformElements.length).toBeGreaterThan(0)
})
it('respects feature flag for advanced animations', async () => {
const { useFeatureFlag } = await import('@/components/feature-flags-provider')
const { useMultiLayerParallax } = await import('@/hooks/use-parallax')
// Test with feature flag disabled
vi.mocked(useFeatureFlag).mockReturnValue(false)
render(<HeroSection />)
// Should pass disabled=true to parallax hook when feature flag is off
expect(useMultiLayerParallax).toHaveBeenCalledWith(true)
})
it("has responsive design classes", () => {
render(<HeroSection />)
const heading = screen.getByRole("heading", { name: /united tattoo/i })
expect(heading).toHaveClass("text-5xl", "lg:text-7xl")
const tagline = screen.getByText(/where artistry meets precision/i)
expect(tagline).toHaveClass("text-xl", "lg:text-2xl")
})
it("initializes parallax transforms to 0 at mount", () => {
render(<HeroSection />)
// All parallax layers should initialize with 0px transform
const backgroundLayer = document.querySelector('[style*="translateY(0px)"]')
const midgroundLayer = document.querySelectorAll('[style*="translateY(0px)"]')[1]
const foregroundLayer = document.querySelectorAll('[style*="translateY(0px)"]')[2]
expect(backgroundLayer).toBeInTheDocument()
expect(midgroundLayer).toBeInTheDocument()
expect(foregroundLayer).toBeInTheDocument()
})
it("disables parallax transforms when reduced motion is preferred", async () => {
const { useReducedMotion } = await import('@/hooks/use-parallax')
vi.mocked(useReducedMotion).mockReturnValue(true)
render(<HeroSection />)
// When reduced motion is preferred, parallax should be disabled
const section = document.querySelector('section')
expect(section).toHaveAttribute('data-reduced-motion', 'true')
})
})

View File

@ -0,0 +1,109 @@
import React from 'react'
import { render, screen } from '@testing-library/react'
import { describe, expect, it } from 'vitest'
import { PrivacyPage } from '@/components/privacy-page'
describe('PrivacyPage ShadCN UI Consistency', () => {
it('uses standardized heading and body scales with ShadCN primitives', () => {
render(<PrivacyPage />)
// Verify main container uses ShadCN background tokens
const mainContainer = document.querySelector('.min-h-screen')
expect(mainContainer).toHaveClass('bg-background', 'text-foreground')
// Verify heading uses consistent font classes and scale
const mainHeading = screen.getByText('Privacy Policy')
expect(mainHeading).toHaveClass('font-playfair', 'text-5xl', 'lg:text-7xl')
// Verify body text uses consistent muted foreground token
const bodyText = screen.getByText(/We respect your privacy/)
expect(bodyText).toHaveClass('text-muted-foreground')
// Verify no ad-hoc color classes are used
const htmlContent = document.documentElement.innerHTML
expect(htmlContent).not.toContain('text-white')
expect(htmlContent).not.toContain('text-gray-300')
expect(htmlContent).not.toContain('bg-white/5')
expect(htmlContent).not.toContain('border-white/10')
// Verify ShadCN design tokens are consistently used
expect(htmlContent).toContain('text-muted-foreground')
expect(htmlContent).toContain('bg-background')
expect(htmlContent).toContain('text-foreground')
})
it('uses ShadCN primitives correctly throughout the page', () => {
render(<PrivacyPage />)
// Verify Alert primitive is present and properly structured
const alert = screen.getByRole('alert')
expect(alert).toHaveAttribute('data-slot', 'alert')
// Verify Badge primitive is present
const badge = screen.getByText('Last updated: 2025-09-16')
expect(badge).toBeInTheDocument()
// Verify Card primitives are present (multiple cards should exist)
const cards = document.querySelectorAll('[data-slot="card"]')
expect(cards.length).toBeGreaterThan(0)
// Verify Card headers and content use proper ShadCN structure
const cardHeaders = document.querySelectorAll('[data-slot="card-header"]')
expect(cardHeaders.length).toBeGreaterThan(0)
const cardContents = document.querySelectorAll('[data-slot="card-content"]')
expect(cardContents.length).toBeGreaterThan(0)
// Verify all CardContent uses muted foreground token
const cardContentElements = document.querySelectorAll('[data-slot="card-content"]')
cardContentElements.forEach(element => {
expect(element).toHaveClass('text-muted-foreground')
})
})
it('maintains consistent spacing and typography patterns', () => {
render(<PrivacyPage />)
// Verify consistent spacing classes are used
const htmlContent = document.documentElement.innerHTML
expect(htmlContent).toContain('space-y-3')
expect(htmlContent).toContain('gap-6')
expect(htmlContent).toContain('px-8')
expect(htmlContent).toContain('lg:px-16')
// Verify consistent text sizing
expect(htmlContent).toContain('text-xl')
expect(htmlContent).toContain('leading-relaxed')
// Verify grid layout consistency
expect(htmlContent).toContain('grid-cols-1')
expect(htmlContent).toContain('lg:grid-cols-2')
// Verify responsive design patterns
expect(htmlContent).toContain('max-w-4xl')
expect(htmlContent).toContain('max-w-6xl')
})
it('uses proper icon integration with ShadCN components', () => {
render(<PrivacyPage />)
// Verify icons are properly integrated without ad-hoc color classes
const infoIcon = document.querySelector('.lucide-info')
expect(infoIcon).toBeInTheDocument()
// Verify icons use consistent sizing
const htmlContent = document.documentElement.innerHTML
expect(htmlContent).toContain('w-5 h-5')
// Verify icons don't have ad-hoc color overrides
expect(htmlContent).not.toContain('text-white')
})
it('applies motion classes with reduced-motion safeguard', () => {
render(<PrivacyPage />)
const html = document.documentElement.innerHTML
expect(html).toContain('animate-in')
expect(html).toContain('motion-reduce:animate-none')
})
})

View File

@ -0,0 +1,34 @@
import { describe, expect, it, vi } from 'vitest'
vi.mock('@/lib/flags', () => ({
Flags: { BOOKING_ENABLED: false },
}))
vi.mock('@/lib/auth', () => ({
authOptions: {},
}))
vi.mock('next-auth', () => ({
getServerSession: vi.fn(),
}))
describe('Booking appointments mutations with BOOKING_ENABLED=false', () => {
it('POST returns 503 without invoking booking logic', async () => {
const { POST } = await import('../../app/api/appointments/route')
const response = await POST({} as any)
expect(response.status).toBe(503)
await expect(response.json()).resolves.toEqual({ error: 'Booking disabled' })
})
it('PUT returns 503 without invoking booking logic', async () => {
const { PUT } = await import('../../app/api/appointments/route')
const response = await PUT({} as any)
expect(response.status).toBe(503)
await expect(response.json()).resolves.toEqual({ error: 'Booking disabled' })
})
it('DELETE returns 503 without invoking booking logic', async () => {
const { DELETE } = await import('../../app/api/appointments/route')
const response = await DELETE({} as any)
expect(response.status).toBe(503)
await expect(response.json()).resolves.toEqual({ error: 'Booking disabled' })
})
})

View File

@ -0,0 +1,23 @@
import { describe, it, expect, vi } from 'vitest'
vi.mock('@/lib/flags', () => ({
Flags: { UPLOADS_ADMIN_ENABLED: false },
}))
vi.mock('@/lib/auth', () => ({
authOptions: {},
requireAuth: vi.fn(),
}))
vi.mock('next-auth', () => ({
getServerSession: vi.fn(async () => null),
}))
describe('Uploads admin disabled', () => {
it('returns 503 for files bulk-delete when UPLOADS_ADMIN_ENABLED=false', async () => {
const { POST } = await import('../../app/api/files/bulk-delete/route')
const fakeReq: any = { json: async () => ({ fileIds: ['1'] }) }
const res = await POST(fakeReq as any)
const body = await res.json()
expect(res.status).toBe(503)
expect(body).toHaveProperty('error')
})
})

View File

@ -0,0 +1,25 @@
import React from 'react'
import { renderToString } from 'react-dom/server'
import { describe, expect, it } from 'vitest'
import { ArtistsSection } from '@/components/artists-section'
import { FeatureFlagsProvider } from '@/components/feature-flags-provider'
import { FLAG_DEFAULTS } from '@/lib/flags'
const disabledAnimationFlags = {
...FLAG_DEFAULTS,
ADVANCED_NAV_SCROLL_ANIMATIONS_ENABLED: false,
} as typeof FLAG_DEFAULTS
describe('ArtistsSection static fallback when animations disabled', () => {
it('renders cards visible without animation classes', () => {
const html = renderToString(
<FeatureFlagsProvider value={disabledAnimationFlags}>
<ArtistsSection />
</FeatureFlagsProvider>,
)
expect(html).not.toContain('opacity-0 translate-y-8')
expect(html).toContain('opacity-100 translate-y-0')
})
})

View File

@ -0,0 +1,22 @@
import React from "react"
import { renderToString } from "react-dom/server"
import { describe, expect, it } from "vitest"
import { BookingForm } from "@/components/booking-form"
import { FeatureFlagsProvider } from "@/components/feature-flags-provider"
import { FLAG_DEFAULTS } from "@/lib/flags"
const disabledFlags = { ...FLAG_DEFAULTS, BOOKING_ENABLED: false } as typeof FLAG_DEFAULTS
describe("BookingForm disabled mode (SSR string)", () => {
it("includes disabled notice when BOOKING_ENABLED=false", () => {
const html = renderToString(
<FeatureFlagsProvider value={disabledFlags}>
<BookingForm />
</FeatureFlagsProvider>,
)
expect(html).toContain("Online booking is temporarily unavailable")
expect(html).toContain("contact the studio")
})
})

View File

@ -0,0 +1,199 @@
import React from 'react'
import { render, act } from '@testing-library/react'
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { useParallax, useReducedMotion } from '@/hooks/use-parallax'
// Mock window methods
Object.defineProperty(window, 'matchMedia', {
writable: true,
value: vi.fn().mockImplementation(query => ({
matches: false,
media: query,
onchange: null,
addListener: vi.fn(),
removeListener: vi.fn(),
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn(),
})),
})
// Mock window properties
Object.defineProperty(window, 'pageYOffset', {
writable: true,
value: 0,
})
Object.defineProperty(window, 'innerHeight', {
writable: true,
value: 800,
})
// Mock requestAnimationFrame
global.requestAnimationFrame = vi.fn(callback => setTimeout(callback, 0))
global.cancelAnimationFrame = vi.fn(id => clearTimeout(id))
// Mock IntersectionObserver
global.IntersectionObserver = vi.fn(() => ({
observe: vi.fn(),
unobserve: vi.fn(),
disconnect: vi.fn(),
}))
// Mock getBoundingClientRect
Element.prototype.getBoundingClientRect = vi.fn(() => ({
top: 0,
bottom: 100,
left: 0,
right: 100,
width: 100,
height: 100,
x: 0,
y: 0,
toJSON: () => {},
}))
// Test component that uses the parallax hook
const TestComponent = ({ depth = 0.1, disabled = false }: { depth?: number; disabled?: boolean }) => {
const parallax = useParallax({ depth, disabled })
return (
<div
ref={parallax.ref}
style={parallax.style}
data-testid="parallax-element"
>
Test Element
</div>
)
}
describe('useParallax Hook', () => {
beforeEach(() => {
// Reset mocks
vi.clearAllMocks()
// Reset window properties
Object.defineProperty(window, 'pageYOffset', {
writable: true,
value: 0,
})
// Reset mock implementations
Element.prototype.getBoundingClientRect = vi.fn(() => ({
top: 0,
bottom: 100,
left: 0,
right: 100,
width: 100,
height: 100,
x: 0,
y: 0,
toJSON: () => {},
}))
})
it('initializes CSS transform to 0 at mount', () => {
render(<TestComponent />)
const element = document.querySelector('[data-testid="parallax-element"]')
expect(element).toBeInTheDocument()
// Initially should have 0px transform via CSS variable
expect(element).toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
})
it('does not apply translation until scroll occurs', () => {
render(<TestComponent depth={0.1} />)
const element = document.querySelector('[data-testid="parallax-element"]')
expect(element).toBeInTheDocument()
// Initially should have 0px transform via CSS variable
expect(element).toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
// Simulate scroll
act(() => {
Object.defineProperty(window, 'pageYOffset', {
writable: true,
value: 100,
})
window.dispatchEvent(new Event('scroll'))
})
// After scroll, transform should still use CSS variable
expect(element).toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
})
it('respects disabled prop and does not apply transforms', () => {
render(<TestComponent depth={0.1} disabled={true} />)
const element = document.querySelector('[data-testid="parallax-element"]')
expect(element).toBeInTheDocument()
// With disabled=true, should have no transform styles
expect(element).not.toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
expect(element).not.toHaveStyle({ willChange: 'transform' })
})
})
describe('useReducedMotion Hook', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('initializes with correct boolean value from prefersReducedMotion()', () => {
// Mock matchMedia to return true for reduced motion
Object.defineProperty(window, 'matchMedia', {
writable: true,
value: vi.fn().mockImplementation(query => ({
matches: query === '(prefers-reduced-motion: reduce)',
media: query,
onchange: null,
addListener: vi.fn(),
removeListener: vi.fn(),
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn(),
})),
})
let reducedMotionValue: boolean
const TestReducedMotionComponent = () => {
reducedMotionValue = useReducedMotion()
return <div>Test</div>
}
render(<TestReducedMotionComponent />)
// Should be a boolean value, not a function reference
expect(typeof reducedMotionValue).toBe('boolean')
expect(reducedMotionValue).toBe(true)
})
it('disables parallax transforms when reduced motion is preferred', () => {
// Mock matchMedia to return true for reduced motion
Object.defineProperty(window, 'matchMedia', {
writable: true,
value: vi.fn().mockImplementation(query => ({
matches: query === '(prefers-reduced-motion: reduce)',
media: query,
onchange: null,
addListener: vi.fn(),
removeListener: vi.fn(),
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn(),
})),
})
render(<TestComponent depth={0.1} />)
const element = document.querySelector('[data-testid="parallax-element"]')
expect(element).toBeInTheDocument()
// With reduced motion, should have no transform styles
expect(element).not.toHaveStyle({ transform: 'translateY(var(--parallax-offset, 0px))' })
expect(element).not.toHaveStyle({ willChange: 'transform' })
})
})

View File

@ -0,0 +1,144 @@
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'
// Mock the database using proper Vitest patterns
const mockStmt = {
bind: vi.fn().mockReturnThis(),
run: vi.fn().mockResolvedValue({ success: true, changes: 1 }),
get: vi.fn(),
all: vi.fn().mockResolvedValue({ results: [] }),
first: vi.fn().mockResolvedValue(null),
}
const mockDB = {
prepare: vi.fn().mockReturnValue(mockStmt),
exec: vi.fn(),
}
// Mock the entire lib/db module
vi.mock('@/lib/db', () => ({
getDB: vi.fn(() => mockDB),
}))
// Mock the artists data with proper structure
vi.mock('@/data/artists', () => ({
artists: [
{
id: '1',
name: 'Test Artist',
bio: 'Test bio',
styles: ['Traditional', 'Realism'],
instagram: 'https://instagram.com/testartist',
experience: '5 years',
workImages: ['/test-image.jpg'],
faceImage: '/test-face.jpg',
},
{
id: '2',
name: 'Another Artist',
bio: 'Another bio',
styles: ['Japanese', 'Blackwork'],
instagram: 'https://instagram.com/anotherartist',
experience: '8 years',
workImages: [],
faceImage: '/another-face.jpg',
},
],
}))
describe('DataMigrator', () => {
let DataMigrator: any
let migrator: any
beforeEach(async () => {
vi.clearAllMocks()
// Reset mock implementations
mockDB.prepare.mockReturnValue(mockStmt)
mockStmt.first.mockResolvedValue(null)
mockStmt.run.mockResolvedValue({ success: true, changes: 1 })
// Import the DataMigrator class after mocks are set up
const module = await import('@/lib/data-migration')
DataMigrator = module.DataMigrator
migrator = new DataMigrator()
})
afterEach(() => {
vi.resetAllMocks()
})
describe('isMigrationCompleted', () => {
it('should return false when no artists exist', async () => {
mockStmt.first.mockResolvedValueOnce({ count: 0 })
const isCompleted = await migrator.isMigrationCompleted()
expect(isCompleted).toBe(false)
})
it('should return true when artists exist', async () => {
mockStmt.first.mockResolvedValueOnce({ count: 2 })
const isCompleted = await migrator.isMigrationCompleted()
expect(isCompleted).toBe(true)
})
})
describe('migrateArtistData', () => {
it('should migrate all artists successfully', async () => {
await migrator.migrateArtistData()
// Verify user creation calls
expect(mockDB.prepare).toHaveBeenCalledWith(
expect.stringContaining('INSERT OR IGNORE INTO users')
)
// Verify artist creation calls
expect(mockDB.prepare).toHaveBeenCalledWith(
expect.stringContaining('INSERT OR IGNORE INTO artists')
)
// Verify portfolio image creation calls
expect(mockDB.prepare).toHaveBeenCalledWith(
expect.stringContaining('INSERT OR IGNORE INTO portfolio_images')
)
})
it('should handle errors gracefully', async () => {
mockStmt.run.mockRejectedValueOnce(new Error('Database error'))
await expect(migrator.migrateArtistData()).rejects.toThrow('Database error')
})
})
describe('clearMigratedData', () => {
it('should clear all data successfully', async () => {
await migrator.clearMigratedData()
expect(mockDB.prepare).toHaveBeenCalledWith('DELETE FROM portfolio_images')
expect(mockDB.prepare).toHaveBeenCalledWith('DELETE FROM artists')
expect(mockDB.prepare).toHaveBeenCalledWith('DELETE FROM users WHERE role = "ARTIST"')
})
it('should handle clear data errors', async () => {
mockStmt.run.mockRejectedValueOnce(new Error('Clear error'))
await expect(migrator.clearMigratedData()).rejects.toThrow('Clear error')
})
})
describe('getMigrationStats', () => {
it('should return correct migration statistics', async () => {
mockStmt.first
.mockResolvedValueOnce({ count: 3 }) // total users
.mockResolvedValueOnce({ count: 2 }) // total artists
.mockResolvedValueOnce({ count: 1 }) // total portfolio images
const stats = await migrator.getMigrationStats()
expect(stats.totalUsers).toBe(3)
expect(stats.totalArtists).toBe(2)
expect(stats.totalPortfolioImages).toBe(1)
})
})
})

269
__tests__/lib/db.test.ts Normal file
View File

@ -0,0 +1,269 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import {
getArtists,
getArtistWithPortfolio,
getPublicArtists,
getArtistBySlug,
updateArtist,
addPortfolioImage,
updatePortfolioImage,
deletePortfolioImage,
} from '@/lib/db'
// Mock D1 database
const createMockD1 = () => ({
prepare: vi.fn().mockReturnThis(),
bind: vi.fn().mockReturnThis(),
first: vi.fn(),
all: vi.fn(),
run: vi.fn(),
})
describe('Database Functions', () => {
let mockEnv: { DB: ReturnType<typeof createMockD1> }
beforeEach(() => {
mockEnv = {
DB: createMockD1(),
}
vi.clearAllMocks()
})
describe('getArtists', () => {
it('should fetch all artists and parse JSON fields', async () => {
const mockArtists = [
{
id: '1',
name: 'Test Artist',
bio: 'Test bio',
specialties: '["Traditional","Realism"]',
isActive: 1,
},
]
mockEnv.DB.all.mockResolvedValue({
results: mockArtists,
success: true,
})
const result = await getArtists(mockEnv)
expect(result).toHaveLength(1)
expect(result[0].specialties).toEqual(['Traditional', 'Realism'])
expect(result[0].isActive).toBe(true)
})
it('should handle empty results', async () => {
mockEnv.DB.all.mockResolvedValue({
results: [],
success: true,
})
const result = await getArtists(mockEnv)
expect(result).toEqual([])
})
it('should handle database errors', async () => {
mockEnv.DB.all.mockRejectedValue(new Error('Database error'))
await expect(getArtists(mockEnv)).rejects.toThrow('Database error')
})
})
describe('getArtistWithPortfolio', () => {
it('should fetch artist with portfolio images', async () => {
const mockArtist = {
id: '1',
name: 'Test Artist',
bio: 'Test bio',
specialties: '["Traditional"]',
isActive: 1,
}
const mockImages = [
{
id: '1',
artistId: '1',
url: 'https://example.com/image.jpg',
caption: 'Test image',
tags: '["Traditional","Portrait"]',
isPublic: 1,
orderIndex: 0,
},
]
mockEnv.DB.first.mockResolvedValueOnce(mockArtist)
mockEnv.DB.all.mockResolvedValueOnce({
results: mockImages,
success: true,
})
const result = await getArtistWithPortfolio('1', mockEnv)
expect(result).toBeDefined()
expect(result?.name).toBe('Test Artist')
expect(result?.portfolioImages).toHaveLength(1)
expect(result?.portfolioImages[0].tags).toEqual(['Traditional', 'Portrait'])
})
it('should return null for non-existent artist', async () => {
mockEnv.DB.first.mockResolvedValue(null)
const result = await getArtistWithPortfolio('999', mockEnv)
expect(result).toBeNull()
})
})
describe('getPublicArtists', () => {
it('should return only active artists with public images', async () => {
const mockArtists = [
{
id: '1',
name: 'Active Artist',
specialties: '["Traditional"]',
isActive: 1,
},
{
id: '2',
name: 'Inactive Artist',
specialties: '["Realism"]',
isActive: 0,
},
]
mockEnv.DB.all.mockResolvedValue({
results: mockArtists.filter(a => a.isActive),
success: true,
})
const result = await getPublicArtists({}, mockEnv)
expect(result).toHaveLength(1)
expect(result[0].name).toBe('Active Artist')
})
it('should filter by specialty', async () => {
const mockArtists = [
{
id: '1',
name: 'Traditional Artist',
specialties: '["Traditional"]',
isActive: 1,
},
]
mockEnv.DB.all.mockResolvedValue({
results: mockArtists,
success: true,
})
await getPublicArtists({ specialty: 'Traditional' }, mockEnv)
// Verify the bind was called (specialty filter applied)
expect(mockEnv.DB.bind).toHaveBeenCalled()
})
})
describe('getArtistBySlug', () => {
it('should fetch artist by slug', async () => {
const mockArtist = {
id: '1',
slug: 'test-artist',
name: 'Test Artist',
specialties: '["Traditional"]',
}
mockEnv.DB.first.mockResolvedValue(mockArtist)
mockEnv.DB.all.mockResolvedValue({
results: [],
success: true,
})
const result = await getArtistBySlug('test-artist', mockEnv)
expect(result).toBeDefined()
expect(result?.slug).toBe('test-artist')
expect(mockEnv.DB.bind).toHaveBeenCalledWith('test-artist')
})
})
describe('updateArtist', () => {
it('should update artist and stringify JSON fields', async () => {
const updateData = {
id: '1',
name: 'Updated Name',
bio: 'Updated bio',
specialties: ['Traditional', 'Realism'],
hourlyRate: 150,
}
mockEnv.DB.run.mockResolvedValue({
success: true,
meta: { changes: 1 },
})
await updateArtist('1', updateData, mockEnv)
// Verify the update was called
expect(mockEnv.DB.run).toHaveBeenCalled()
expect(mockEnv.DB.bind).toHaveBeenCalled()
})
})
describe('Portfolio Image Operations', () => {
it('should add portfolio image', async () => {
const imageData = {
url: 'https://example.com/image.jpg',
caption: 'Test caption',
tags: ['Traditional'],
isPublic: true,
orderIndex: 0,
}
mockEnv.DB.run.mockResolvedValue({
success: true,
meta: { last_row_id: 1 },
})
mockEnv.DB.first.mockResolvedValue({
id: '1',
...imageData,
artistId: '1',
tags: JSON.stringify(imageData.tags),
})
const result = await addPortfolioImage('1', imageData, mockEnv)
expect(result).toBeDefined()
expect(result.caption).toBe('Test caption')
})
it('should update portfolio image', async () => {
const updateData = {
caption: 'Updated caption',
tags: ['Traditional', 'Portrait'],
isPublic: false,
}
mockEnv.DB.run.mockResolvedValue({
success: true,
meta: { changes: 1 },
})
await updatePortfolioImage('1', updateData, mockEnv)
expect(mockEnv.DB.run).toHaveBeenCalled()
})
it('should delete portfolio image', async () => {
mockEnv.DB.run.mockResolvedValue({
success: true,
meta: { changes: 1 },
})
await deletePortfolioImage('1', mockEnv)
expect(mockEnv.DB.run).toHaveBeenCalled()
})
})
})

View File

@ -0,0 +1,92 @@
import { beforeEach, afterEach, describe, expect, it, vi } from "vitest"
import {
FLAG_DEFAULTS,
Flags,
getFlags,
registerRuntimeFlags,
resetFlagsCache,
parseBool,
} from "@/lib/flags"
type FlagName = keyof typeof FLAG_DEFAULTS
const flagKeys = Object.keys(FLAG_DEFAULTS) as FlagName[]
const originalEnv: Partial<Record<FlagName, string | undefined>> = {}
beforeEach(() => {
resetFlagsCache()
for (const key of flagKeys) {
if (!(key in originalEnv)) {
originalEnv[key] = process.env[key]
}
delete process.env[key]
}
delete (globalThis as Record<string, unknown>).__UNITED_TATTOO_RUNTIME_FLAGS__
})
afterEach(() => {
resetFlagsCache()
for (const key of flagKeys) {
const value = originalEnv[key]
if (value === undefined) {
delete process.env[key]
} else {
process.env[key] = value
}
}
delete (globalThis as Record<string, unknown>).__UNITED_TATTOO_RUNTIME_FLAGS__
})
describe("parseBool", () => {
it("handles string coercion and defaults", () => {
expect(parseBool("true", false)).toBe(true)
expect(parseBool(" FALSE ", true)).toBe(false)
expect(parseBool("1", false)).toBe(true)
expect(parseBool(undefined, true)).toBe(true)
})
})
describe("getFlags", () => {
it("falls back to defaults and logs missing keys", () => {
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
const snapshot = getFlags({ refresh: true })
expect(snapshot).toMatchObject(FLAG_DEFAULTS)
expect(warnSpy).toHaveBeenCalled()
warnSpy.mockRestore()
})
it("honours environment overrides", () => {
process.env.BOOKING_ENABLED = "false"
process.env.PUBLIC_APPOINTMENT_REQUESTS_ENABLED = "true"
const snapshot = getFlags({ refresh: true })
expect(snapshot.BOOKING_ENABLED).toBe(false)
expect(snapshot.PUBLIC_APPOINTMENT_REQUESTS_ENABLED).toBe(true)
})
})
describe("registerRuntimeFlags", () => {
it("allows runtime overrides to take precedence", () => {
process.env.BOOKING_ENABLED = "true"
const override = { ...FLAG_DEFAULTS, BOOKING_ENABLED: false } as typeof FLAG_DEFAULTS
registerRuntimeFlags(override)
const snapshot = getFlags()
expect(snapshot.BOOKING_ENABLED).toBe(false)
})
})
describe("Flags proxy", () => {
it("reflects current snapshot values", () => {
process.env.ADMIN_ENABLED = "false"
const snapshot = getFlags({ refresh: true })
expect(snapshot.ADMIN_ENABLED).toBe(false)
expect(Flags.ADMIN_ENABLED).toBe(false)
})
})

View File

@ -0,0 +1,92 @@
import { describe, it, expect } from 'vitest'
import { createArtistSchema, createAppointmentSchema } from '@/lib/validations'
describe('Validation Schemas', () => {
describe('createArtistSchema', () => {
it('should validate a valid artist object', () => {
const validArtist = {
name: 'John Doe',
bio: 'Experienced tattoo artist',
specialties: ['Traditional', 'Realism'],
instagramHandle: 'johndoe',
hourlyRate: 150,
isActive: true,
}
const result = createArtistSchema.safeParse(validArtist)
expect(result.success).toBe(true)
})
it('should reject artist with invalid data', () => {
const invalidArtist = {
name: '', // Empty name should fail
bio: 'Bio',
specialties: [],
hourlyRate: -50, // Negative rate should fail
}
const result = createArtistSchema.safeParse(invalidArtist)
expect(result.success).toBe(false)
})
it('should require name field', () => {
const artistWithoutName = {
bio: 'Bio',
specialties: ['Traditional'],
hourlyRate: 150,
}
const result = createArtistSchema.safeParse(artistWithoutName)
expect(result.success).toBe(false)
})
})
describe('createAppointmentSchema', () => {
it('should validate a valid appointment object', () => {
const validAppointment = {
clientName: 'Jane Smith',
clientEmail: 'jane@example.com',
clientPhone: '+1234567890',
artistId: 'artist-123',
startTime: new Date('2024-12-01T10:00:00Z'),
endTime: new Date('2024-12-01T12:00:00Z'),
description: 'Traditional rose tattoo',
estimatedPrice: 300,
status: 'PENDING' as const,
}
const result = createAppointmentSchema.safeParse(validAppointment)
expect(result.success).toBe(true)
})
it('should reject appointment with invalid email', () => {
const invalidAppointment = {
clientName: 'Jane Smith',
clientEmail: 'invalid-email', // Invalid email format
artistId: 'artist-123',
startTime: new Date('2024-12-01T10:00:00Z'),
endTime: new Date('2024-12-01T12:00:00Z'),
description: 'Tattoo description',
status: 'PENDING' as const,
}
const result = createAppointmentSchema.safeParse(invalidAppointment)
expect(result.success).toBe(false)
})
it('should reject appointment with end time before start time', () => {
const invalidAppointment = {
clientName: 'Jane Smith',
clientEmail: 'jane@example.com',
artistId: 'artist-123',
startTime: new Date('2024-12-01T12:00:00Z'),
endTime: new Date('2024-12-01T10:00:00Z'), // End before start
description: 'Tattoo description',
status: 'PENDING' as const,
}
const result = createAppointmentSchema.safeParse(invalidAppointment)
expect(result.success).toBe(false)
})
})
})

46
docs/bootstrapping.md Normal file
View File

@ -0,0 +1,46 @@
# Bootstrapping Checklist
This checklist walks you from cloning the template to having a runnable project with confident defaults. Keep it open while you initialise a new repo.
## 1. Template hygiene
- [ ] Create a fresh repository (local or Gitea) and copy the template into it.
- [ ] Run `./scripts/bootstrap-template.sh` to update the package name, git remotes, and README badges.
- [ ] Remove example images or assets you do not plan to ship (`public/`, `docs/img/`, etc.).
- [ ] Delete unused test suites so the CI noise floor stays low.
## 2. Runtime setup
- [ ] Review `.env.example` and duplicate it to `.env` for local development.
- [ ] Fill only the sections that match the integrations you intend to use (auth, storage, calendar, analytics).
- [ ] Create secrets in your chosen manager (doppler, sops, 1Password CLI, environment repository) and document where they live.
- [ ] Configure feature flags or toggles that gate optional modules; default to safe fallbacks.
## 3. Dependencies & tooling
- [ ] Decide on a package manager (pnpm, npm, yarn) and lock it in the README + CI.
- [ ] Install linting and formatting tools (`eslint`, `prettier`, `biome`, etc.) and wire them into `package.json` scripts.
- [ ] Add base Git hooks (Husky, Lefthook, or pre-commit) if you rely on pre-push validation.
- [ ] Configure TypeScript paths/aliases so the example tests resolve once you create real modules.
## 4. Infrastructure & services
- [ ] Document deployment tooling (Wrangler, Vercel, Fly.io, Docker) in `docs/stack-decisions.md`.
- [ ] Provision staging and production environments or capture the manual steps.
- [ ] Outline database migration flow (Prisma, Drizzle, Kysely) and how to run it locally.
- [ ] For third-party integrations (OAuth, storage, calendar), confirm rate limits and timeout behaviour.
## 5. CI/CD wiring
- [ ] Choose a pipeline runner (Gitea Actions, Woodpecker, GitHub Actions, etc.).
- [ ] Add jobs for lint, typecheck, unit tests, and build (if applicable).
- [ ] Cache dependencies to keep pipelines fast.
- [ ] Gate deployments on green checks and review status.
## 6. Documentation & knowledge
- [ ] Update `README.md` with product-specific copy, screenshots, and deployment commands.
- [ ] Record architectural decisions in `docs/stack-decisions.md` (lean ADRs are ideal).
- [ ] Extend `docs/edge-cases.md` with anything unique to this project.
- [ ] Share workflow instructions (branching, PR labels, release cadence) in `CONTRIBUTING.md`.
## 7. Launch readiness
- [ ] Smoke-test the bootstrap by running `pnpm test` (or your equivalent) and ensuring the example specs fail because modules are missing—this keeps you honest.
- [ ] Create a tracking issue or project board with tasks generated from the checklist.
- [ ] Archive or export the checklist with completed items for future reference.
Repeat this ritual for every new project so you ship with fewer unknowns and more confidence.

50
docs/edge-cases.md Normal file
View File

@ -0,0 +1,50 @@
# Edge Case Catalogue
Capture every non-happy-path scenario that has bitten you in past projects. Use these prompts when planning features, writing tests, and updating documentation.
## Authentication & Authorization
- What happens when the identity provider is unavailable or rate-limited?
- Can users authenticate with multiple providers? How do you reconcile identities?
- Do access tokens expire mid-session? Plan silent refresh and forced logout flows.
- Are admin-only routes guarded on the server, not just the client?
- How do you roll keys or secrets without booting everyone?
## Feature Flags & Configuration
- Can new features be disabled quickly without redeploying?
- Are default values safe when the config service is unreachable?
- What is logged when a flag evaluation fails?
## Data & Persistence
- Are migrations idempotent? Can you roll them back?
- Do background jobs tolerate partial failure or duplicate delivery?
- What size assumptions exist for JSON payloads, binary blobs, or text fields?
- How do you seed development data without leaking production secrets?
## Scheduling & Calendars
- Do you store timestamps in UTC and render them with the user's offset?
- How do you handle daylight saving transitions and leap seconds?
- Can overlapping events be created? If not, validate and surface clear errors.
- What is the source of truth when multiple calendars sync into one timeline?
## File & Asset Management
- Maximum file size? Enforce both client and server-side.
- Are uploads scanned, transcoded, or resized? Where is the queue?
- How do you serve private files? Signed URLs, download proxies, expiring tokens?
- What is the retention policy and deletion workflow?
## External Services
- Plan for timeouts, retries, and rate limits on each integration.
- If a vendor returns partial data, does your UI still render something helpful?
- Document SLAs and fallbacks in `docs/stack-decisions.md`.
## Observability & Recovery
- Which metrics, logs, and traces are mandatory before launch?
- Do alerts route to a real person with enough context to act?
- After an incident, what automated reports or scripts help recreate the scenario?
## Compliance & Privacy
- How do you handle data export, erasure, and consent?
- What environments carry production data? Are they encrypted at rest?
- Which audit logs must be preserved, and where?
When a new surprise occurs, write the story here, then open a PR to harden the template so the next project benefits immediately.

32
docs/stack-decisions.md Normal file
View File

@ -0,0 +1,32 @@
# Stack Decisions Log
Use this document to capture the "why" behind platform and tooling choices. Treat each entry as a lightweight ADR (Architecture Decision Record).
## Template baseline
- **Framework**: Next.js + TypeScript (edge-friendly, hybrid rendering, great DX)
- **Testing**: Vitest (fast unit/integration runner with React ecosystem support)
- **Styling**: Tailwind CSS or CSS Modules (choose one per project)
- **Deployment**: Cloudflare Pages + Workers (immutable deploys, global edge)
- **Database**: PostgreSQL (Supabase/Neon friendly), accessed via ORM of choice
- **Storage**: S3-compatible buckets (AWS S3, Cloudflare R2)
## Recording a decision
1. Title — short phrase (`Adopt Drizzle ORM`, `Switch CI to Woodpecker`)
2. Context — what problem are you solving? Mention constraints, stakeholders, and trade-offs.
3. Decision — what did you pick and why?
4. Status — proposed, accepted, deprecated, superseded.
5. Consequences — positive and negative effects, migrations required, follow-up work.
## Example entry
```
## Adopt Drizzle ORM
Status: Accepted (2024-02-12)
Context: Need a type-safe query builder that works in serverless environments without generating heavyweight clients.
Decision: Replace Prisma with Drizzle ORM because it offers SQL-first migrations, small runtime footprint, and better edge compatibility.
Consequences:
- Rewrite existing Prisma migrations → Drizzle SQL migrations.
- Update CI to run `drizzle-kit push` instead of `prisma migrate deploy`.
- Developers need to learn the new query builder API.
```
Keep this log close to the code. When you revisit a project months later, these notes will save hours of rediscovery.

View File

@ -0,0 +1,41 @@
# Testing Blueprints
The template ships with full Vitest suites under `__tests__/`. They currently reference the `@/` module alias from a Next.js project. Replace those imports with your actual modules as you build the app. Use this guide to adapt the patterns.
## API routes
- File: `__tests__/api/*`
- Focus: HTTP status codes, query parsing, pagination, error handling.
- Mocks: database adapters, third-party SDKs, feature flags.
- Tips: assert on both response body and the parameters passed into mocked dependencies.
## Components
- File: `__tests__/components/*`
- Focus: accessibility, copy, conditional rendering, navigation flows.
- Mocks: Next.js router, contexts, external hooks.
- Tips: render minimal UI tree, interact with `@testing-library/react` utilities, assert on semantics not implementation.
## Hooks
- File: `__tests__/hooks/*`
- Focus: lifecycle, browser APIs (scroll, resize), async behaviour.
- Mocks: `window`, `document`, timers, intersection observers.
- Tips: wrap `act` around updates, reset mocks between tests, include teardown coverage.
## Flags & configuration
- File: `__tests__/flags/*`
- Focus: toggling features on/off, server-side overrides, fallbacks.
- Mocks: flag evaluation client, configuration store.
- Tips: include “flag service offline” scenarios to enforce safe defaults.
## Libraries
- File: `__tests__/lib/*`
- Focus: data migration guards, validation, persistence abstractions.
- Mocks: filesystem, database clients, clock.
- Tips: write table-driven tests so new edge cases are easy to add.
### Making them yours
1. Rename folders to match real domains (`users`, `billing`, `cms`).
2. Swap module imports from `@/lib/...` to wherever your implementation lives.
3. Keep the error-handling tests, even if you simplify the happy path—they are cheap insurance.
4. Run `pnpm test` (or your equivalent) often; treat failures as documentation gaps.
These suites double as onboarding material. New contributors can read the tests to understand intent before diving into production code.

90
scripts/bootstrap-template.sh Executable file
View File

@ -0,0 +1,90 @@
#!/usr/bin/env bash
# Bootstrap a fresh project from this template.
# - Updates README heading with the new project name.
# - Creates an initial commit if none exists.
# - Optionally rewires git remotes.
# - Touches package.json if you want to set the name field (only when present).
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
repo_root="$(cd "${script_dir}/.." && pwd)"
echo "🚀 Template bootstrap"
read -r -p "Project name (e.g. \"Atlas Console\"): " project_name
if [[ -z "${project_name}" ]]; then
echo "Project name cannot be empty. Aborting."
exit 1
fi
default_slug="$(echo "${project_name}" | tr '[:upper:]' '[:lower:]' | tr ' ' '-' | tr -cd '[:alnum:]-')"
read -r -p "Project slug [${default_slug}]: " project_slug
project_slug="${project_slug:-$default_slug}"
# Update README heading if it still contains the template title.
readme="${repo_root}/README.md"
if grep -q "^Development Project Template" "${readme}"; then
echo "Updating README title..."
tmp_readme="$(mktemp)"
{
echo "${project_name}"
echo "${project_name//?/=}"
tail -n +3 "${readme}"
} > "${tmp_readme}"
mv "${tmp_readme}" "${readme}"
else
echo "README already customised; skipping title update."
fi
# Update package.json name if present.
package_json="${repo_root}/package.json"
if [[ -f "${package_json}" ]]; then
if command -v jq >/dev/null 2>&1; then
echo "Updating package.json name → ${project_slug}"
tmp_package="$(mktemp)"
jq --arg name "${project_slug}" '.name = $name' "${package_json}" > "${tmp_package}"
mv "${tmp_package}" "${package_json}"
else
echo "jq is not installed; skipping package.json update. Install jq and rerun if needed."
fi
else
echo "No package.json found; skipping package rename."
fi
# Offer to update git origin remote.
if git -C "${repo_root}" rev-parse --is-inside-work-tree >/dev/null 2>&1; then
current_remote="$(git -C "${repo_root}" remote get-url origin 2>/dev/null || true)"
echo "Current git remote: ${current_remote:-<none>}"
read -r -p "Update git remote? (y/N): " change_remote
if [[ "${change_remote,,}" == "y" ]]; then
read -r -p "New remote URL: " remote_url
if git -C "${repo_root}" remote | grep -q "^origin$"; then
git -C "${repo_root}" remote set-url origin "${remote_url}"
else
git -C "${repo_root}" remote add origin "${remote_url}"
fi
echo "Origin remote updated."
else
echo "Skipping remote update."
fi
fi
# Stamp a .project-name file so scripts/tools can read the canonical name.
echo "${project_name}" > "${repo_root}/.project-name"
cat <<EOT
✅ Bootstrap complete!
- README updated (if the template title was untouched).
- package.json name set to ${project_slug} (if package.json exists).
- Project name written to .project-name.
Next steps:
1. Review docs/bootstrapping.md and keep working through the checklist.
2. Remove or adapt example tests under __tests__/.
3. Replace template copy and assets with your project's branding.
Happy building!
EOT