style: standardize quotes and formatting across YAML and JS files

This commit is contained in:
manjaroblack 2025-08-15 22:00:23 -05:00
parent b9968f312e
commit 5bfca9138c
No known key found for this signature in database
GPG Key ID: 02FD4111DA5560B4
12 changed files with 207 additions and 177 deletions

View File

@ -4,9 +4,9 @@ on:
workflow_dispatch:
inputs:
version_bump:
description: 'Version bump type'
description: "Version bump type"
required: true
default: 'minor'
default: "minor"
type: choice
options:
- patch
@ -19,7 +19,7 @@ jobs:
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
@ -30,8 +30,8 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
registry-url: 'https://registry.npmjs.org'
node-version: "20"
registry-url: "https://registry.npmjs.org"
- name: Configure Git
run: |
@ -57,17 +57,17 @@ jobs:
# Get current version from package.json
CURRENT_VERSION=$(node -p "require('./package.json').version")
echo "current_version=$CURRENT_VERSION" >> $GITHUB_OUTPUT
# Remove beta suffix if present
BASE_VERSION=$(echo $CURRENT_VERSION | sed 's/-beta\.[0-9]\+//')
echo "base_version=$BASE_VERSION" >> $GITHUB_OUTPUT
# Calculate new version based on bump type
IFS='.' read -ra VERSION_PARTS <<< "$BASE_VERSION"
MAJOR=${VERSION_PARTS[0]}
MINOR=${VERSION_PARTS[1]}
PATCH=${VERSION_PARTS[2]}
case "${{ github.event.inputs.version_bump }}" in
"major")
NEW_VERSION="$((MAJOR + 1)).0.0"
@ -82,7 +82,7 @@ jobs:
NEW_VERSION="$BASE_VERSION"
;;
esac
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "Promoting from $CURRENT_VERSION to $NEW_VERSION"
@ -90,7 +90,7 @@ jobs:
run: |
# Update main package.json
npm version ${{ steps.version.outputs.new_version }} --no-git-tag-version
# Update installer package.json
sed -i 's/"version": ".*"/"version": "${{ steps.version.outputs.new_version }}"/' tools/installer/package.json
@ -119,4 +119,4 @@ jobs:
echo "🎉 Successfully promoted to stable!"
echo "📦 Version: ${{ steps.version.outputs.new_version }}"
echo "🚀 The stable release will be automatically published to NPM via semantic-release"
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"
echo "✅ Users running 'npx bmad-method install' will now get version ${{ steps.version.outputs.new_version }}"

View File

@ -25,10 +25,10 @@ Comprehensive guide for determining appropriate test levels (unit, integration,
```yaml
unit_test:
component: "PriceCalculator"
scenario: "Calculate discount with multiple rules"
justification: "Complex business logic with multiple branches"
mock_requirements: "None - pure function"
component: 'PriceCalculator'
scenario: 'Calculate discount with multiple rules'
justification: 'Complex business logic with multiple branches'
mock_requirements: 'None - pure function'
```
### Integration Tests
@ -52,10 +52,10 @@ unit_test:
```yaml
integration_test:
components: ["UserService", "AuthRepository"]
scenario: "Create user with role assignment"
justification: "Critical data flow between service and persistence"
test_environment: "In-memory database"
components: ['UserService', 'AuthRepository']
scenario: 'Create user with role assignment'
justification: 'Critical data flow between service and persistence'
test_environment: 'In-memory database'
```
### End-to-End Tests
@ -79,10 +79,10 @@ integration_test:
```yaml
e2e_test:
journey: "Complete checkout process"
scenario: "User purchases with saved payment method"
justification: "Revenue-critical path requiring full validation"
environment: "Staging with test payment gateway"
journey: 'Complete checkout process'
scenario: 'User purchases with saved payment method'
justification: 'Revenue-critical path requiring full validation'
environment: 'Staging with test payment gateway'
```
## Test Level Selection Rules

View File

@ -6,18 +6,19 @@ Quick NFR validation focused on the core four: security, performance, reliabilit
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "docs/stories/{epic}.{story}.*.md"
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: 'docs/stories/{epic}.{story}.*.md'
optional:
- architecture_refs: "docs/architecture/*.md"
- technical_preferences: "docs/technical-preferences.md"
- architecture_refs: 'docs/architecture/*.md'
- technical_preferences: 'docs/technical-preferences.md'
- acceptance_criteria: From story file
```
## Purpose
Assess non-functional requirements for a story and generate:
1. YAML block for the gate file's `nfr_validation` section
2. Brief markdown assessment saved to `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md`
@ -26,6 +27,7 @@ Assess non-functional requirements for a story and generate:
### 0. Fail-safe for Missing Inputs
If story_path or story file can't be found:
- Still create assessment file with note: "Source story not found"
- Set all selected NFRs to CONCERNS with notes: "Target unknown / evidence missing"
- Continue with assessment to provide value
@ -38,7 +40,7 @@ If story_path or story file can't be found:
```text
Which NFRs should I assess? (Enter numbers or press Enter for default)
[1] Security (default)
[2] Performance (default)
[2] Performance (default)
[3] Reliability (default)
[4] Maintainability (default)
[5] Usability
@ -52,6 +54,7 @@ Which NFRs should I assess? (Enter numbers or press Enter for default)
### 2. Check for Thresholds
Look for NFR requirements in:
- Story acceptance criteria
- `docs/architecture/*.md` files
- `docs/technical-preferences.md`
@ -72,6 +75,7 @@ No security requirements found. Required auth method?
### 3. Quick Assessment
For each selected NFR, check:
- Is there evidence it's implemented?
- Can we validate it?
- Are there obvious gaps?
@ -86,24 +90,24 @@ Generate ONLY for NFRs actually assessed (no placeholders):
# Gate YAML (copy/paste):
nfr_validation:
_assessed: [security, performance, reliability, maintainability]
security:
security:
status: CONCERNS
notes: "No rate limiting on auth endpoints"
notes: 'No rate limiting on auth endpoints'
performance:
status: PASS
notes: "Response times < 200ms verified"
notes: 'Response times < 200ms verified'
reliability:
status: PASS
notes: "Error handling and retries implemented"
notes: 'Error handling and retries implemented'
maintainability:
status: CONCERNS
notes: "Test coverage at 65%, target is 80%"
notes: 'Test coverage at 65%, target is 80%'
```
## Deterministic Status Rules
- **FAIL**: Any selected NFR has critical gap or target clearly not met
- **CONCERNS**: No FAILs, but any NFR is unknown/partial/missing evidence
- **CONCERNS**: No FAILs, but any NFR is unknown/partial/missing evidence
- **PASS**: All selected NFRs meet targets with evidence
## Quality Score Calculation
@ -123,18 +127,21 @@ If `technical-preferences.md` defines custom weights, use those instead.
```markdown
# NFR Assessment: {epic}.{story}
Date: {date}
Reviewer: Quinn
<!-- Note: Source story not found (if applicable) -->
## Summary
- Security: CONCERNS - Missing rate limiting
- Performance: PASS - Meets <200ms requirement
- Reliability: PASS - Proper error handling
- Maintainability: CONCERNS - Test coverage below target
## Critical Issues
1. **No rate limiting** (Security)
- Risk: Brute force attacks possible
- Fix: Add rate limiting middleware to auth endpoints
@ -144,6 +151,7 @@ Reviewer: Quinn
- Fix: Add tests for uncovered branches
## Quick Wins
- Add rate limiting: ~2 hours
- Increase test coverage: ~4 hours
- Add performance monitoring: ~1 hour
@ -152,6 +160,7 @@ Reviewer: Quinn
## Output 3: Story Update Line
**End with this line for the review task to quote:**
```
NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
```
@ -159,6 +168,7 @@ NFR assessment: docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md
## Output 4: Gate Integration Line
**Always print at the end:**
```
Gate NFR block ready → paste into docs/qa/gates/{epic}.{story}-{slug}.yml under nfr_validation
```
@ -166,66 +176,82 @@ Gate NFR block ready → paste into docs/qa/gates/{epic}.{story}-{slug}.yml unde
## Assessment Criteria
### Security
**PASS if:**
- Authentication implemented
- Authorization enforced
- Input validation present
- No hardcoded secrets
**CONCERNS if:**
- Missing rate limiting
- Weak encryption
- Incomplete authorization
**FAIL if:**
- No authentication
- Hardcoded credentials
- SQL injection vulnerabilities
### Performance
**PASS if:**
- Meets response time targets
- No obvious bottlenecks
- Reasonable resource usage
**CONCERNS if:**
- Close to limits
- Missing indexes
- No caching strategy
**FAIL if:**
- Exceeds response time limits
- Memory leaks
- Unoptimized queries
### Reliability
**PASS if:**
- Error handling present
- Graceful degradation
- Retry logic where needed
**CONCERNS if:**
- Some error cases unhandled
- No circuit breakers
- Missing health checks
**FAIL if:**
- No error handling
- Crashes on errors
- No recovery mechanisms
### Maintainability
**PASS if:**
- Test coverage meets target
- Code well-structured
- Documentation present
**CONCERNS if:**
- Test coverage below target
- Some code duplication
- Missing documentation
**FAIL if:**
- No tests
- Highly coupled code
- No documentation
@ -283,7 +309,7 @@ maintainability:
1. **Functional Suitability**: Completeness, correctness, appropriateness
2. **Performance Efficiency**: Time behavior, resource use, capacity
3. **Compatibility**: Co-existence, interoperability
3. **Compatibility**: Co-existence, interoperability
4. **Usability**: Learnability, operability, accessibility
5. **Reliability**: Maturity, availability, fault tolerance
6. **Security**: Confidentiality, integrity, authenticity
@ -291,6 +317,7 @@ maintainability:
8. **Portability**: Adaptability, installability
Use these when assessing beyond the core four.
</details>
<details>
@ -304,12 +331,13 @@ performance_deep_dive:
p99: 350ms
database:
slow_queries: 2
missing_indexes: ["users.email", "orders.user_id"]
missing_indexes: ['users.email', 'orders.user_id']
caching:
hit_rate: 0%
recommendation: "Add Redis for session data"
recommendation: 'Add Redis for session data'
load_test:
max_rps: 150
breaking_point: 200 rps
```
</details>
</details>

View File

@ -27,11 +27,11 @@ Slug rules:
```yaml
schema: 1
story: "{epic}.{story}"
story: '{epic}.{story}'
gate: PASS|CONCERNS|FAIL|WAIVED
status_reason: "1-2 sentence explanation of gate decision"
reviewer: "Quinn"
updated: "{ISO-8601 timestamp}"
status_reason: '1-2 sentence explanation of gate decision'
reviewer: 'Quinn'
updated: '{ISO-8601 timestamp}'
top_issues: [] # Empty array if no issues
waiver: { active: false } # Only set active: true if WAIVED
```
@ -40,20 +40,20 @@ waiver: { active: false } # Only set active: true if WAIVED
```yaml
schema: 1
story: "1.3"
story: '1.3'
gate: CONCERNS
status_reason: "Missing rate limiting on auth endpoints poses security risk."
reviewer: "Quinn"
updated: "2025-01-12T10:15:00Z"
status_reason: 'Missing rate limiting on auth endpoints poses security risk.'
reviewer: 'Quinn'
updated: '2025-01-12T10:15:00Z'
top_issues:
- id: "SEC-001"
- id: 'SEC-001'
severity: high # ONLY: low|medium|high
finding: "No rate limiting on login endpoint"
suggested_action: "Add rate limiting middleware before production"
- id: "TEST-001"
finding: 'No rate limiting on login endpoint'
suggested_action: 'Add rate limiting middleware before production'
- id: 'TEST-001'
severity: medium
finding: "No integration tests for auth flow"
suggested_action: "Add integration test coverage"
finding: 'No integration tests for auth flow'
suggested_action: 'Add integration test coverage'
waiver: { active: false }
```
@ -61,20 +61,20 @@ waiver: { active: false }
```yaml
schema: 1
story: "1.3"
story: '1.3'
gate: WAIVED
status_reason: "Known issues accepted for MVP release."
reviewer: "Quinn"
updated: "2025-01-12T10:15:00Z"
status_reason: 'Known issues accepted for MVP release.'
reviewer: 'Quinn'
updated: '2025-01-12T10:15:00Z'
top_issues:
- id: "PERF-001"
- id: 'PERF-001'
severity: low
finding: "Dashboard loads slowly with 1000+ items"
suggested_action: "Implement pagination in next sprint"
finding: 'Dashboard loads slowly with 1000+ items'
suggested_action: 'Implement pagination in next sprint'
waiver:
active: true
reason: "MVP release - performance optimization deferred"
approved_by: "Product Owner"
reason: 'MVP release - performance optimization deferred'
approved_by: 'Product Owner'
```
## Gate Decision Criteria

View File

@ -6,10 +6,10 @@ Generate a comprehensive risk assessment matrix for a story implementation using
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "docs/stories/{epic}.{story}.*.md"
- story_title: "{title}" # If missing, derive from story file H1
- story_slug: "{slug}" # If missing, derive from title (lowercase, hyphenated)
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: 'docs/stories/{epic}.{story}.*.md'
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Purpose
@ -79,14 +79,14 @@ For each category, identify specific risks:
```yaml
risk:
id: "SEC-001" # Use prefixes: SEC, PERF, DATA, BUS, OPS, TECH
id: 'SEC-001' # Use prefixes: SEC, PERF, DATA, BUS, OPS, TECH
category: security
title: "Insufficient input validation on user forms"
description: "Form inputs not properly sanitized could lead to XSS attacks"
title: 'Insufficient input validation on user forms'
description: 'Form inputs not properly sanitized could lead to XSS attacks'
affected_components:
- "UserRegistrationForm"
- "ProfileUpdateForm"
detection_method: "Code review revealed missing validation"
- 'UserRegistrationForm'
- 'ProfileUpdateForm'
detection_method: 'Code review revealed missing validation'
```
### 2. Risk Assessment
@ -133,20 +133,20 @@ For each identified risk, provide mitigation:
```yaml
mitigation:
risk_id: "SEC-001"
strategy: "preventive" # preventive|detective|corrective
risk_id: 'SEC-001'
strategy: 'preventive' # preventive|detective|corrective
actions:
- "Implement input validation library (e.g., validator.js)"
- "Add CSP headers to prevent XSS execution"
- "Sanitize all user inputs before storage"
- "Escape all outputs in templates"
- 'Implement input validation library (e.g., validator.js)'
- 'Add CSP headers to prevent XSS execution'
- 'Sanitize all user inputs before storage'
- 'Escape all outputs in templates'
testing_requirements:
- "Security testing with OWASP ZAP"
- "Manual penetration testing of forms"
- "Unit tests for validation functions"
residual_risk: "Low - Some zero-day vulnerabilities may remain"
owner: "dev"
timeline: "Before deployment"
- 'Security testing with OWASP ZAP'
- 'Manual penetration testing of forms'
- 'Unit tests for validation functions'
residual_risk: 'Low - Some zero-day vulnerabilities may remain'
owner: 'dev'
timeline: 'Before deployment'
```
## Outputs
@ -172,12 +172,12 @@ risk_summary:
highest:
id: SEC-001
score: 9
title: "XSS on profile form"
title: 'XSS on profile form'
recommendations:
must_fix:
- "Add input sanitization & CSP"
- 'Add input sanitization & CSP'
monitor:
- "Add security alerts for auth endpoints"
- 'Add security alerts for auth endpoints'
```
### Output 2: Markdown Report

View File

@ -6,10 +6,10 @@ Create comprehensive test scenarios with appropriate test level recommendations
```yaml
required:
- story_id: "{epic}.{story}" # e.g., "1.3"
- story_path: "{devStoryLocation}/{epic}.{story}.*.md" # Path from core-config.yaml
- story_title: "{title}" # If missing, derive from story file H1
- story_slug: "{slug}" # If missing, derive from title (lowercase, hyphenated)
- story_id: '{epic}.{story}' # e.g., "1.3"
- story_path: '{devStoryLocation}/{epic}.{story}.*.md' # Path from core-config.yaml
- story_title: '{title}' # If missing, derive from story file H1
- story_slug: '{slug}' # If missing, derive from title (lowercase, hyphenated)
```
## Purpose
@ -62,13 +62,13 @@ For each identified test need, create:
```yaml
test_scenario:
id: "{epic}.{story}-{LEVEL}-{SEQ}"
requirement: "AC reference"
id: '{epic}.{story}-{LEVEL}-{SEQ}'
requirement: 'AC reference'
priority: P0|P1|P2|P3
level: unit|integration|e2e
description: "What is being tested"
justification: "Why this level was chosen"
mitigates_risks: ["RISK-001"] # If risk profile exists
description: 'What is being tested'
justification: 'Why this level was chosen'
mitigates_risks: ['RISK-001'] # If risk profile exists
```
### 5. Validate Coverage

View File

@ -31,21 +31,21 @@ Identify all testable requirements from:
For each requirement, document which tests validate it. Use Given-When-Then to describe what the test validates (not how it's written):
```yaml
requirement: "AC1: User can login with valid credentials"
requirement: 'AC1: User can login with valid credentials'
test_mappings:
- test_file: "auth/login.test.ts"
test_case: "should successfully login with valid email and password"
- test_file: 'auth/login.test.ts'
test_case: 'should successfully login with valid email and password'
# Given-When-Then describes WHAT the test validates, not HOW it's coded
given: "A registered user with valid credentials"
when: "They submit the login form"
then: "They are redirected to dashboard and session is created"
given: 'A registered user with valid credentials'
when: 'They submit the login form'
then: 'They are redirected to dashboard and session is created'
coverage: full
- test_file: "e2e/auth-flow.test.ts"
test_case: "complete login flow"
given: "User on login page"
when: "Entering valid credentials and submitting"
then: "Dashboard loads with user data"
- test_file: 'e2e/auth-flow.test.ts'
test_case: 'complete login flow'
given: 'User on login page'
when: 'Entering valid credentials and submitting'
then: 'Dashboard loads with user data'
coverage: integration
```
@ -67,19 +67,19 @@ Document any gaps found:
```yaml
coverage_gaps:
- requirement: "AC3: Password reset email sent within 60 seconds"
gap: "No test for email delivery timing"
- requirement: 'AC3: Password reset email sent within 60 seconds'
gap: 'No test for email delivery timing'
severity: medium
suggested_test:
type: integration
description: "Test email service SLA compliance"
description: 'Test email service SLA compliance'
- requirement: "AC5: Support 1000 concurrent users"
gap: "No load testing implemented"
- requirement: 'AC5: Support 1000 concurrent users'
gap: 'No load testing implemented'
severity: high
suggested_test:
type: performance
description: "Load test with 1000 concurrent connections"
description: 'Load test with 1000 concurrent connections'
```
## Outputs
@ -95,11 +95,11 @@ trace:
full: Y
partial: Z
none: W
planning_ref: "docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md"
planning_ref: 'docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md'
uncovered:
- ac: "AC3"
reason: "No test found for password reset timing"
notes: "See docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md"
- ac: 'AC3'
reason: 'No test found for password reset timing'
notes: 'See docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md'
```
### Output 2: Traceability Report

View File

@ -11,8 +11,8 @@ template:
schema: 1
story: "{{epic_num}}.{{story_num}}"
story_title: "{{story_title}}"
gate: "{{gate_status}}" # PASS|CONCERNS|FAIL|WAIVED
status_reason: "{{status_reason}}" # 1-2 sentence summary of why this gate decision
gate: "{{gate_status}}" # PASS|CONCERNS|FAIL|WAIVED
status_reason: "{{status_reason}}" # 1-2 sentence summary of why this gate decision
reviewer: "Quinn (Test Architect)"
updated: "{{iso_timestamp}}"
@ -41,7 +41,7 @@ examples:
severity: medium
finding: "Missing integration tests for auth flow"
suggested_action: "Add test coverage for critical paths"
when_waived: |
waiver:
active: true
@ -55,7 +55,7 @@ optional_fields_examples:
quality_and_expiry: |
quality_score: 75 # 0-100 (optional scoring)
expires: "2025-01-26T00:00:00Z" # Optional gate freshness window
evidence: |
evidence:
tests_reviewed: 15
@ -63,14 +63,14 @@ optional_fields_examples:
trace:
ac_covered: [1, 2, 3] # AC numbers with test coverage
ac_gaps: [4] # AC numbers lacking coverage
nfr_validation: |
nfr_validation:
security: { status: CONCERNS, notes: "Rate limiting missing" }
performance: { status: PASS, notes: "" }
reliability: { status: PASS, notes: "" }
maintainability: { status: PASS, notes: "" }
history: |
history: # Append-only audit trail
- at: "2025-01-12T10:00:00Z"
@ -79,7 +79,7 @@ optional_fields_examples:
- at: "2025-01-12T15:00:00Z"
gate: CONCERNS
note: "Tests added but rate limiting still missing"
risk_summary: |
risk_summary: # From risk-profile task
totals:
@ -91,7 +91,7 @@ optional_fields_examples:
recommendations:
must_fix: []
monitor: []
recommendations: |
recommendations:
immediate: # Must fix before production
@ -99,4 +99,4 @@ optional_fields_examples:
refs: ["api/auth/login.ts:42-68"]
future: # Can be addressed later
- action: "Consider caching for better performance"
refs: ["services/data.service.ts"]
refs: ["services/data.service.ts"]

View File

@ -29,14 +29,14 @@ The Test Architect (Quinn) provides comprehensive quality assurance throughout t
### Quick Command Reference
| **Stage** | **Command** | **Purpose** | **Output** | **Priority** |
|-----------|------------|-------------|------------|--------------|
| **After Story Approval** | `*risk` | Identify integration & regression risks | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` | High for complex/brownfield |
| | `*design` | Create test strategy for dev | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` | High for new features |
| **During Development** | `*trace` | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` | Medium |
| | `*nfr` | Validate quality attributes | `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md` | High for critical features |
| **After Development** | `*review` | Comprehensive assessment | QA Results in story + `docs/qa/gates/{epic}.{story}-{slug}.yml` | **Required** |
| **Post-Review** | `*gate` | Update quality decision | Updated `docs/qa/gates/{epic}.{story}-{slug}.yml` | As needed |
| **Stage** | **Command** | **Purpose** | **Output** | **Priority** |
| ------------------------ | ----------- | --------------------------------------- | --------------------------------------------------------------- | --------------------------- |
| **After Story Approval** | `*risk` | Identify integration & regression risks | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` | High for complex/brownfield |
| | `*design` | Create test strategy for dev | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` | High for new features |
| **During Development** | `*trace` | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` | Medium |
| | `*nfr` | Validate quality attributes | `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md` | High for critical features |
| **After Development** | `*review` | Comprehensive assessment | QA Results in story + `docs/qa/gates/{epic}.{story}-{slug}.yml` | **Required** |
| **Post-Review** | `*gate` | Update quality decision | Updated `docs/qa/gates/{epic}.{story}-{slug}.yml` | As needed |
### Stage 1: After Story Creation (Before Dev Starts)
@ -134,24 +134,24 @@ The Test Architect (Quinn) provides comprehensive quality assurance throughout t
### Understanding Gate Decisions
| **Status** | **Meaning** | **Action Required** | **Can Proceed?** |
|------------|-------------|-------------------|------------------|
| **PASS** | All critical requirements met | None | ✅ Yes |
| **CONCERNS** | Non-critical issues found | Team review recommended | ⚠️ With caution |
| **FAIL** | Critical issues (security, missing P0 tests) | Must fix | ❌ No |
| **WAIVED** | Issues acknowledged and accepted | Document reasoning | ✅ With approval |
| **Status** | **Meaning** | **Action Required** | **Can Proceed?** |
| ------------ | -------------------------------------------- | ----------------------- | ---------------- |
| **PASS** | All critical requirements met | None | ✅ Yes |
| **CONCERNS** | Non-critical issues found | Team review recommended | ⚠️ With caution |
| **FAIL** | Critical issues (security, missing P0 tests) | Must fix | ❌ No |
| **WAIVED** | Issues acknowledged and accepted | Document reasoning | ✅ With approval |
### Risk-Based Testing Strategy
The Test Architect uses risk scoring to prioritize testing:
| **Risk Score** | **Calculation** | **Testing Priority** | **Gate Impact** |
|---------------|----------------|-------------------|----------------|
| **9** | High probability × High impact | P0 - Must test thoroughly | FAIL if untested |
| **6** | Medium-high combinations | P1 - Should test well | CONCERNS if gaps |
| **4** | Medium combinations | P1 - Should test | CONCERNS if notable gaps |
| **2-3** | Low-medium combinations | P2 - Nice to have | Note in review |
| **1** | Minimal risk | P2 - Minimal | Note in review |
| **Risk Score** | **Calculation** | **Testing Priority** | **Gate Impact** |
| -------------- | ------------------------------ | ------------------------- | ------------------------ |
| **9** | High probability × High impact | P0 - Must test thoroughly | FAIL if untested |
| **6** | Medium-high combinations | P1 - Should test well | CONCERNS if gaps |
| **4** | Medium combinations | P1 - Should test | CONCERNS if notable gaps |
| **2-3** | Low-medium combinations | P2 - Nice to have | Note in review |
| **1** | Minimal risk | P2 - Minimal | Note in review |
### Special Situations & Best Practices
@ -227,14 +227,14 @@ All Test Architect activities create permanent records:
**Should I run Test Architect commands?**
| **Scenario** | **Before Dev** | **During Dev** | **After Dev** |
|-------------|---------------|----------------|---------------|
| **Simple bug fix** | Optional | Optional | Required `*review` |
| **New feature** | Recommended `*risk`, `*design` | Optional `*trace` | Required `*review` |
| **Brownfield change** | **Required** `*risk`, `*design` | Recommended `*trace`, `*nfr` | Required `*review` |
| **API modification** | **Required** `*risk`, `*design` | **Required** `*trace` | Required `*review` |
| **Performance-critical** | Recommended `*design` | **Required** `*nfr` | Required `*review` |
| **Data migration** | **Required** `*risk`, `*design` | **Required** `*trace` | Required `*review` + `*gate` |
| **Scenario** | **Before Dev** | **During Dev** | **After Dev** |
| ------------------------ | ------------------------------- | ---------------------------- | ---------------------------- |
| **Simple bug fix** | Optional | Optional | Required `*review` |
| **New feature** | Recommended `*risk`, `*design` | Optional `*trace` | Required `*review` |
| **Brownfield change** | **Required** `*risk`, `*design` | Recommended `*trace`, `*nfr` | Required `*review` |
| **API modification** | **Required** `*risk`, `*design` | **Required** `*trace` | Required `*review` |
| **Performance-critical** | Recommended `*design` | **Required** `*nfr` | Required `*review` |
| **Data migration** | **Required** `*risk`, `*design` | **Required** `*trace` | Required `*review` + `*gate` |
### Success Metrics

View File

@ -277,7 +277,7 @@ The documentation uses short forms for convenience. Both styles are valid:
```text
*risk → *risk-profile
*design → *test-design
*design → *test-design
*nfr → *nfr-assess
*trace → *trace-requirements (or just *trace)
*review → *review
@ -376,14 +376,14 @@ Manages quality gate decisions:
The Test Architect provides value throughout the entire development lifecycle. Here's when and how to leverage each capability:
| **Stage** | **Command** | **When to Use** | **Value** | **Output** |
|-----------|------------|-----------------|-----------|------------|
| **Story Drafting** | `*risk` | After SM drafts story | Identify pitfalls early | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` |
| | `*design` | After risk assessment | Guide dev on test strategy | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` |
| **Development** | `*trace` | Mid-implementation | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` |
| | `*nfr` | While building features | Catch quality issues early | `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md` |
| **Review** | `*review` | Story marked complete | Full quality assessment | QA Results in story + gate file |
| **Post-Review** | `*gate` | After fixing issues | Update quality decision | Updated `docs/qa/gates/{epic}.{story}-{slug}.yml` |
| **Stage** | **Command** | **When to Use** | **Value** | **Output** |
| ------------------ | ----------- | ----------------------- | -------------------------- | -------------------------------------------------------------- |
| **Story Drafting** | `*risk` | After SM drafts story | Identify pitfalls early | `docs/qa/assessments/{epic}.{story}-risk-{YYYYMMDD}.md` |
| | `*design` | After risk assessment | Guide dev on test strategy | `docs/qa/assessments/{epic}.{story}-test-design-{YYYYMMDD}.md` |
| **Development** | `*trace` | Mid-implementation | Verify test coverage | `docs/qa/assessments/{epic}.{story}-trace-{YYYYMMDD}.md` |
| | `*nfr` | While building features | Catch quality issues early | `docs/qa/assessments/{epic}.{story}-nfr-{YYYYMMDD}.md` |
| **Review** | `*review` | Story marked complete | Full quality assessment | QA Results in story + gate file |
| **Post-Review** | `*gate` | After fixing issues | Update quality decision | Updated `docs/qa/gates/{epic}.{story}-{slug}.yml` |
#### Example Commands

View File

@ -7,7 +7,7 @@ const yaml = require('js-yaml');
const chalk = require('chalk').default || require('chalk');
const inquirer = require('inquirer').default || require('inquirer');
const semver = require('semver');
const https = require('https');
const https = require('node:https');
// Handle both execution contexts (from root via npx or from installer directory)
let version;
@ -104,28 +104,30 @@ program
.description('Check for BMad Update')
.action(async () => {
console.log('Checking for updates...');
// Make HTTP request to npm registry for latest version info
const req = https.get(`https://registry.npmjs.org/${packageName}/latest`, res => {
const req = https.get(`https://registry.npmjs.org/${packageName}/latest`, (res) => {
// Check for HTTP errors (non-200 status codes)
if (res.statusCode !== 200) {
console.error(chalk.red(`Update check failed: Received status code ${res.statusCode}`));
return;
}
// Accumulate response data chunks
let data = '';
res.on('data', chunk => data += chunk);
res.on('data', (chunk) => (data += chunk));
// Process complete response
res.on('end', () => {
try {
// Parse npm registry response and extract version
const latest = JSON.parse(data).version;
// Compare versions using semver
if (semver.gt(latest, version)) {
console.log(chalk.bold.blue(`⚠️ ${packageName} update available: ${version}${latest}`));
console.log(
chalk.bold.blue(`⚠️ ${packageName} update available: ${version}${latest}`),
);
console.log(chalk.bold.blue('\nInstall latest by running:'));
console.log(chalk.bold.magenta(` npm install ${packageName}@latest`));
console.log(chalk.dim(' or'));
@ -139,14 +141,14 @@ program
}
});
});
// Handle network/connection errors
req.on('error', error => {
req.on('error', (error) => {
console.error(chalk.red('Update check failed:'), error.message);
});
// Set 30 second timeout to prevent hanging
req.setTimeout(30000, () => {
req.setTimeout(30_000, () => {
req.destroy();
console.error(chalk.red('Update check timed out'));
});

View File

@ -3,7 +3,7 @@
* Reduces duplication and provides shared methods
*/
const path = require('path');
const path = require('node:path');
const fs = require('fs-extra');
const yaml = require('js-yaml');
const chalk = require('chalk').default || require('chalk');